lasp/src/lasp/device/lasp_rtaudiodaq.cpp

372 lines
10 KiB
C++

#include "lasp_rtaudiodaq.h"
#if LASP_HAS_RTAUDIO == 1
#include "debugtrace.hpp"
#include "lasp_daq.h"
#include <RtAudio.h>
#include <atomic>
#include <cassert>
using std::atomic;
using std::cerr;
using std::endl;
using std::runtime_error;
using std::vector;
DEBUGTRACE_VARIABLES;
void fillRtAudioDeviceInfo(vector<DeviceInfo> &devinfolist) {
vector<RtAudio::Api> apis;
RtAudio::getCompiledApi(apis);
for (auto api : apis) {
RtAudio rtaudio(api);
us count = rtaudio.getDeviceCount();
for (us devno = 0; devno < count; devno++) {
RtAudio::DeviceInfo devinfo = rtaudio.getDeviceInfo(devno);
if (!devinfo.probed) {
// Device capabilities not successfully probed. Continue to next
continue;
}
// "Our device info struct"
DeviceInfo d;
switch (api) {
case RtAudio::LINUX_ALSA:
d.api = rtaudioAlsaApi;
break;
case RtAudio::LINUX_PULSE:
d.api = rtaudioPulseaudioApi;
break;
case RtAudio::WINDOWS_WASAPI:
d.api = rtaudioWasapiApi;
break;
case RtAudio::WINDOWS_DS:
d.api = rtaudioDsApi;
break;
case RtAudio::WINDOWS_ASIO:
d.api = rtaudioAsioApi;
break;
default:
cerr << "Not implemented RtAudio API, skipping." << endl;
continue;
break;
}
d.device_name = devinfo.name;
d.api_specific_devindex = devno;
for (us j = 0; j < devinfo.sampleRates.size(); j++) {
us rate = devinfo.sampleRates[j];
d.availableSampleRates.push_back((double)rate);
if (devinfo.preferredSampleRate == rate) {
d.prefSampleRateIndex = j;
}
}
d.noutchannels = devinfo.outputChannels;
d.ninchannels = devinfo.inputChannels;
d.availableInputRanges = {1.0};
RtAudioFormat formats = devinfo.nativeFormats;
if (formats & RTAUDIO_SINT8) {
d.availableDataTypes.push_back(
DataTypeDescriptor::DataType::dtype_int8);
}
if (formats & RTAUDIO_SINT16) {
d.availableDataTypes.push_back(
DataTypeDescriptor::DataType::dtype_int16);
}
/* if (formats & RTAUDIO_SINT32) { */
/* d.availableDataTypes.push_back(DataTypeDescriptor::DataType::dtype_int24);
*/
/* } */
if (formats & RTAUDIO_SINT32) {
d.availableDataTypes.push_back(
DataTypeDescriptor::DataType::dtype_fl32);
}
if (formats & RTAUDIO_FLOAT64) {
d.availableDataTypes.push_back(
DataTypeDescriptor::DataType::dtype_fl64);
}
if (d.availableDataTypes.size() == 0) {
std::cerr << "RtAudio: No data types found in device!" << endl;
}
d.prefDataTypeIndex = d.availableDataTypes.size() - 1;
d.availableFramesPerBlock = {512, 1024, 2048, 4096, 8192};
d.prefFramesPerBlockIndex = 1;
devinfolist.push_back(d);
}
}
}
static int mycallback(void *outputBuffer, void *inputBuffer, unsigned int nFrames,
double streamTime, RtAudioStreamStatus status, void *userData);
static void myerrorcallback(RtAudioError::Type, const string &errorText);
class RtAudioDaq : public Daq {
RtAudio rtaudio;
const us nFramesPerBlock;
RtAudioDaq(const RtAudioDaq &) = delete;
RtAudioDaq &operator=(const RtAudioDaq &) = delete;
InDaqCallback _incallback;
OutDaqCallback _outcallback;
std::atomic<StreamStatus> _streamStatus{};
public:
RtAudioDaq(const DeviceInfo &devinfo, const DaqConfiguration &config)
: Daq(devinfo, config),
rtaudio(static_cast<RtAudio::Api>(devinfo.api.api_specific_subcode)),
nFramesPerBlock(Daq::framesPerBlock()) {
DEBUGTRACE_ENTER;
// We make sure not to run RtAudio in duplex mode. This seems to be buggy
// and untested. Better to use a hardware-type loopback into the system.
if (duplexMode()) {
throw runtime_error("RtAudio backend cannot run in duplex mode.");
}
assert(!monitorOutput);
std::unique_ptr<RtAudio::StreamParameters> inParams, outParams;
if (neninchannels() > 0) {
inParams = std::make_unique<RtAudio::StreamParameters>();
// +1 to get the count.
inParams->nChannels = getHighestInChannel() + 1;
if (inParams->nChannels < 1) {
throw runtime_error("Invalid input number of channels");
}
inParams->firstChannel = 0;
inParams->deviceId = devinfo.api_specific_devindex;
} else {
outParams = std::make_unique<RtAudio::StreamParameters>();
outParams->nChannels = getHighestOutChannel() + 1;
if (outParams->nChannels < 1) {
throw runtime_error("Invalid output number of channels");
}
outParams->firstChannel = 0;
outParams->deviceId = devinfo.api_specific_devindex;
}
RtAudio::StreamOptions streamoptions;
streamoptions.flags = RTAUDIO_HOG_DEVICE | RTAUDIO_NONINTERLEAVED;
streamoptions.numberOfBuffers = 2;
streamoptions.streamName = "RtAudio stream";
streamoptions.priority = 0;
RtAudioFormat format;
using Dtype = DataTypeDescriptor::DataType;
const Dtype dtype = dataType();
switch (dtype) {
case Dtype::dtype_fl32:
format = RTAUDIO_FLOAT32;
break;
case Dtype::dtype_fl64:
format = RTAUDIO_FLOAT64;
break;
case Dtype::dtype_int8:
format = RTAUDIO_SINT8;
break;
case Dtype::dtype_int16:
format = RTAUDIO_SINT16;
break;
case Dtype::dtype_int32:
format = RTAUDIO_SINT32;
break;
default:
throw runtime_error("Invalid data type specified for DAQ stream.");
break;
}
// Copy here, as it is used to return the *actual* number of frames per
// block.
unsigned int nFramesPerBlock_copy = nFramesPerBlock;
// Final step: open the stream.
rtaudio.openStream(outParams.get(), inParams.get(), format,
static_cast<us>(samplerate()), &nFramesPerBlock_copy,
mycallback, (void *)this, &streamoptions,
&myerrorcallback);
}
virtual void start(InDaqCallback inCallback,
OutDaqCallback outCallback) override {
DEBUGTRACE_ENTER;
assert(!monitorOutput);
if (StreamStatus().runningOK()) {
throw runtime_error("Stream already running");
}
// Logical XOR
if (inCallback && outCallback) {
throw runtime_error("Either input or output stream possible for RtAudio. "
"Stream duplex mode not provided.");
}
if (inCallback) {
_incallback = inCallback;
if (neninchannels()==0) {
throw runtime_error(
"Input callback given, but stream does not provide input data");
}
}
if (outCallback) {
_outcallback = outCallback;
if (nenoutchannels()==0) {
throw runtime_error(
"Output callback given, but stream does not provide output data");
}
}
rtaudio.startStream();
// If we are here, we are running without errors.
StreamStatus status;
status.isRunning = true;
_streamStatus = status;
}
StreamStatus getStreamStatus() const override { return _streamStatus; }
void stop() override {
DEBUGTRACE_ENTER;
if (getStreamStatus().runningOK()) {
rtaudio.stopStream();
}
}
int streamCallback(void *outputBuffer, void *inputBuffer,
unsigned int nFrames, double streamTime,
RtAudioStreamStatus status) {
/* DEBUGTRACE_ENTER; */
using se = StreamStatus::StreamError;
int rval = 0;
auto stopWithError = [&](se e) {
DEBUGTRACE_PRINT("stopWithError");
StreamStatus stat = _streamStatus;
stat.errorType = e;
stat.isRunning = false;
rval = 1;
};
switch (status) {
case RTAUDIO_INPUT_OVERFLOW:
stopWithError(se::inputXRun);
return 1;
break;
case RTAUDIO_OUTPUT_UNDERFLOW:
stopWithError(se::outputXRun);
return 1;
break;
default:
break;
}
const auto &dtype_descr = DataTypeDescriptor();
const auto dtype = dataType();
us neninchannels = this->neninchannels();
us nenoutchannels = this->nenoutchannels();
us sw = dtype_descr.sw;
if (nFrames != nFramesPerBlock) {
cerr << "RtAudio backend error: nFrames does not match block size!"
<< endl;
stopWithError(se::logicError);
return 1;
}
if (inputBuffer) {
std::vector<uint8_t *> ptrs;
ptrs.reserve(neninchannels);
/* DaqData(neninchannels_inc_mon, nFramesPerBlock, dtype); */
for (int ch = getLowestInChannel(); ch <= getHighestInChannel(); ch++) {
if (inchannel_config.at(ch).enabled) {
ptrs.push_back(&static_cast<uint8_t *>(
inputBuffer)[sw * ninchannels * ch * nFramesPerBlock]);
}
}
DaqData d{neninchannels, nFramesPerBlock, dtype};
d.copyInFromRaw(ptrs);
assert(_incallback);
bool ret = _incallback(d);
if (!ret) {
stopWithError(se::noError);
return 1;
}
}
if (outputBuffer) {
std::vector<uint8_t *> ptrs;
ptrs.reserve(neninchannels);
DaqData data(nenoutchannels, nFramesPerBlock, dtype);
/* outCallback */
for (int ch = 0; ch <= getHighestOutChannel(); ch++) {
if (outchannel_config.at(ch).enabled) {
ptrs.push_back(&static_cast<uint8_t *>(
outputBuffer)[sw * nenoutchannels * ch * nFramesPerBlock]);
}
}
DaqData d{nenoutchannels, nFramesPerBlock, dtype};
assert(_outcallback);
bool ret = _outcallback(d);
if (!ret) {
stopWithError(se::noError);
return 1;
}
us j = 0;
for (auto ptr : ptrs) {
d.copyToRaw(j, ptr);
j++;
}
}
return rval;
}
// RtAudio documentation says: if a stream is open, it will be stopped and
// closed automatically on deletion. Therefore the destructor here is a
// default one.
~RtAudioDaq() = default;
};
std::unique_ptr<Daq> createRtAudioDevice(const DeviceInfo &devinfo,
const DaqConfiguration &config) {
return std::make_unique<RtAudioDaq>(devinfo, config);
}
void myerrorcallback(RtAudioError::Type, const string &errorText) {
cerr << "RtAudio backend stream error: " << errorText << endl;
}
int mycallback(void *outputBuffer, void *inputBuffer, unsigned int nFrames,
double streamTime, RtAudioStreamStatus status, void *userData) {
return static_cast<RtAudioDaq *>(userData)->streamCallback(
outputBuffer, inputBuffer, nFrames, streamTime, status);
}
#endif // LASP_HAS_RTAUDIO == 1