lasp/cpp_src/device/lasp_rtaudiodaq.cpp

481 lines
13 KiB
C++
Raw Permalink Normal View History

2022-10-11 07:55:08 +00:00
/* #define DEBUGTRACE_ENABLED */
#include <mutex>
#include "debugtrace.hpp"
#include "lasp_mathtypes.h"
#include "lasp_rtaudiodaq.h"
#if LASP_HAS_RTAUDIO == 1
#include "RtAudio.h"
#include "lasp_daq.h"
#include <atomic>
#include <cassert>
using std::atomic;
using std::cerr;
using std::endl;
using rte = std::runtime_error;
using std::vector;
using lck = std::scoped_lock<std::mutex>;
2024-01-19 11:32:03 +00:00
const unsigned RTAUDIO_MAX_CHANNELS = 8;
class RtAudioDeviceInfo : public DeviceInfo
{
public:
/**
* @brief Specific for the device (Sub-API). Important for the RtAudio
* backend, as RtAudio is able to handle different API's.
*/
2024-01-19 11:32:03 +00:00
int ID; // Copy of RtAudio::DeviceInfo::ID
virtual std::unique_ptr<DeviceInfo> clone() const override
{
return std::make_unique<DeviceInfo>(*this);
}
};
2024-01-19 11:32:03 +00:00
void fillRtAudioDeviceInfo(DeviceInfoList &devinfolist)
{
DEBUGTRACE_ENTER;
vector<RtAudio::Api> apis;
RtAudio::getCompiledApi(apis);
2024-01-19 11:32:03 +00:00
for (auto api : apis)
{
RtAudio rtaudio(api);
2024-01-19 11:32:03 +00:00
const us count = rtaudio.getDeviceCount();
const auto ids = rtaudio.getDeviceIds();
for (us i = 0; i < count; i++)
{
us id = ids.at(i);
RtAudio::DeviceInfo devinfo = rtaudio.getDeviceInfo(id);
// "Our device info struct"
RtAudioDeviceInfo d;
2024-01-19 11:32:03 +00:00
switch (api)
{
2022-11-11 12:51:10 +00:00
case RtAudio::LINUX_ALSA:
d.api = rtaudioAlsaApi;
break;
case RtAudio::LINUX_PULSE:
d.api = rtaudioPulseaudioApi;
break;
case RtAudio::WINDOWS_WASAPI:
d.api = rtaudioWasapiApi;
break;
case RtAudio::WINDOWS_DS:
d.api = rtaudioDsApi;
break;
case RtAudio::WINDOWS_ASIO:
d.api = rtaudioAsioApi;
break;
default:
cerr << "Not implemented RtAudio API, skipping." << endl;
continue;
break;
}
d.device_name = devinfo.name;
2024-01-19 11:32:03 +00:00
d.ID = id;
2022-11-11 12:51:10 +00:00
/// When 48k is available we overwrite the default sample rate with the 48
/// kHz value, which is our preffered rate,
bool rate_48k_found = false;
2024-01-19 11:32:03 +00:00
for (us j = 0; j < devinfo.sampleRates.size(); j++)
{
us rate_int = devinfo.sampleRates[j];
d.availableSampleRates.push_back((double)rate_int);
2024-01-19 11:32:03 +00:00
if (!rate_48k_found)
{
2024-01-19 11:32:03 +00:00
if (devinfo.preferredSampleRate == rate_int)
{
d.prefSampleRateIndex = j;
}
2024-01-19 11:32:03 +00:00
if (rate_int == 48000)
{
d.prefSampleRateIndex = j;
rate_48k_found = true;
}
}
}
2024-01-19 11:32:03 +00:00
d.noutchannels = std::min(devinfo.outputChannels, RTAUDIO_MAX_CHANNELS);
d.ninchannels = std::min(devinfo.inputChannels, RTAUDIO_MAX_CHANNELS);
d.availableInputRanges = {1.0};
d.availableOutputRanges = {1.0};
RtAudioFormat formats = devinfo.nativeFormats;
2024-01-19 11:32:03 +00:00
if (formats & RTAUDIO_SINT8)
{
d.availableDataTypes.push_back(
DataTypeDescriptor::DataType::dtype_int8);
}
2024-01-19 11:32:03 +00:00
if (formats & RTAUDIO_SINT16)
{
d.availableDataTypes.push_back(
DataTypeDescriptor::DataType::dtype_int16);
}
/* if (formats & RTAUDIO_SINT24) { *1/ */
/* d.availableDataTypes.push_back(DataTypeDescriptor::DataType::dtype_int24);
2022-11-11 12:51:10 +00:00
*/
/* } */
2024-01-19 11:32:03 +00:00
if (formats & RTAUDIO_SINT32)
{
d.availableDataTypes.push_back(
DataTypeDescriptor::DataType::dtype_fl32);
}
2024-01-19 11:32:03 +00:00
if (formats & RTAUDIO_FLOAT64)
{
d.availableDataTypes.push_back(
DataTypeDescriptor::DataType::dtype_fl64);
}
2024-01-19 11:32:03 +00:00
if (d.availableDataTypes.size() == 0)
{
std::cerr << "RtAudio: No data types found in device!" << endl;
}
d.prefDataTypeIndex = d.availableDataTypes.size() - 1;
d.availableFramesPerBlock = {512, 1024, 2048, 4096, 8192};
d.prefFramesPerBlockIndex = 2;
devinfolist.push_back(std::make_unique<RtAudioDeviceInfo>(d));
}
}
}
static int mycallback(void *outputBuffer, void *inputBuffer,
2022-11-11 12:51:10 +00:00
unsigned int nFrames, double streamTime,
RtAudioStreamStatus status, void *userData);
2024-01-19 11:32:03 +00:00
class RtAudioDaq : public Daq
{
RtAudio rtaudio;
const us nFramesPerBlock;
RtAudioDaq(const RtAudioDaq &) = delete;
RtAudioDaq &operator=(const RtAudioDaq &) = delete;
2022-06-13 19:30:02 +00:00
InDaqCallback _incallback;
OutDaqCallback _outcallback;
2022-07-20 12:58:48 +00:00
std::atomic<StreamStatus> _streamStatus{};
2022-11-11 12:51:10 +00:00
public:
RtAudioDaq(const DeviceInfo &devinfo_gen, const DaqConfiguration &config)
: Daq(devinfo_gen, config), rtaudio(static_cast<RtAudio::Api>(
devinfo_gen.api.api_specific_subcode)),
2022-11-11 12:51:10 +00:00
nFramesPerBlock(Daq::framesPerBlock()) {
2022-11-11 12:51:10 +00:00
DEBUGTRACE_ENTER;
2022-07-20 12:58:48 +00:00
2022-11-11 12:51:10 +00:00
// We make sure not to run RtAudio in duplex mode. This seems to be buggy
// and untested. Better to use a hardware-type loopback into the system.
if (duplexMode()) {
throw rte("RtAudio backend cannot run in duplex mode.");
}
assert(!monitorOutput);
const RtAudioDeviceInfo &devinfo =
static_cast<const RtAudioDeviceInfo &>(devinfo_gen);
2022-11-11 12:51:10 +00:00
std::unique_ptr<RtAudio::StreamParameters> inParams, outParams;
2022-07-20 12:58:48 +00:00
2022-11-11 12:51:10 +00:00
if (neninchannels() > 0) {
2022-11-11 12:51:10 +00:00
inParams = std::make_unique<RtAudio::StreamParameters>();
/// RtAudio lacks good bookkeeping when the first channel is not equal to
/// 0. For now, our fix is to shift out the channels we want, and let
/// RtAudio pass on all channels.
2022-11-11 12:51:10 +00:00
inParams->firstChannel = 0;
2024-01-19 11:32:03 +00:00
inParams->nChannels = devinfo.ninchannels;
inParams->deviceId = devinfo.ID;
}
else
{
2022-11-11 12:51:10 +00:00
outParams = std::make_unique<RtAudio::StreamParameters>();
/// RtAudio lacks good bookkeeping when the first channel is not equal to
/// 0. For now, our fix is to shift out the channels we want, and let
/// RtAudio pass on all channels.
2022-11-11 12:51:10 +00:00
outParams->firstChannel = 0;
2024-01-19 11:32:03 +00:00
outParams->nChannels = devinfo.noutchannels;
outParams->deviceId = devinfo.ID;
2022-11-11 12:51:10 +00:00
}
2022-11-11 12:51:10 +00:00
RtAudio::StreamOptions streamoptions;
streamoptions.flags = RTAUDIO_HOG_DEVICE | RTAUDIO_NONINTERLEAVED;
streamoptions.numberOfBuffers = 2;
streamoptions.streamName = "LASP RtAudio DAQ stream";
streamoptions.priority = 0;
RtAudioFormat format;
using Dtype = DataTypeDescriptor::DataType;
const Dtype dtype = dataType();
2024-01-19 11:32:03 +00:00
switch (dtype)
{
2022-11-11 12:51:10 +00:00
case Dtype::dtype_fl32:
DEBUGTRACE_PRINT("Datatype float32");
format = RTAUDIO_FLOAT32;
break;
case Dtype::dtype_fl64:
DEBUGTRACE_PRINT("Datatype float64");
format = RTAUDIO_FLOAT64;
break;
case Dtype::dtype_int8:
DEBUGTRACE_PRINT("Datatype int8");
format = RTAUDIO_SINT8;
break;
case Dtype::dtype_int16:
DEBUGTRACE_PRINT("Datatype int16");
format = RTAUDIO_SINT16;
break;
case Dtype::dtype_int32:
DEBUGTRACE_PRINT("Datatype int32");
format = RTAUDIO_SINT32;
break;
default:
throw rte("Invalid data type specified for DAQ stream.");
break;
}
2022-11-11 12:51:10 +00:00
// Copy here, as it is used to return the *actual* number of frames per
// block.
unsigned int nFramesPerBlock_copy = nFramesPerBlock;
2022-11-11 12:51:10 +00:00
// Final step: open the stream.
2024-01-19 11:32:03 +00:00
RtAudioErrorType err = rtaudio.openStream(outParams.get(), inParams.get(), format,
static_cast<us>(samplerate()), &nFramesPerBlock_copy,
mycallback, (void *)this, &streamoptions);
if (err != RTAUDIO_NO_ERROR)
{
throw std::runtime_error(string("Error opening stream: ") + rtaudio.getErrorText());
}
2024-01-19 11:32:03 +00:00
if (nFramesPerBlock_copy != nFramesPerBlock)
{
throw rte(string("Got different number of frames per block back from RtAudio "
2024-01-19 11:32:03 +00:00
"backend: ") +
std::to_string(nFramesPerBlock_copy) + ". I do not know what to do.");
}
2022-11-11 12:51:10 +00:00
}
2022-07-20 12:58:48 +00:00
virtual void start(InDaqCallback inCallback,
2024-01-19 11:32:03 +00:00
OutDaqCallback outCallback) override final
{
2022-07-20 12:58:48 +00:00
DEBUGTRACE_ENTER;
2022-06-13 19:30:02 +00:00
assert(!monitorOutput);
2024-01-19 11:32:03 +00:00
if (getStreamStatus().runningOK())
{
throw rte("Stream already running");
2022-06-13 19:30:02 +00:00
}
2022-07-20 12:58:48 +00:00
2022-06-13 19:30:02 +00:00
// Logical XOR
2024-01-19 11:32:03 +00:00
if (inCallback && outCallback)
{
throw rte("Either input or output stream possible for RtAudio. "
2022-11-11 12:51:10 +00:00
"Stream duplex mode not provided.");
2022-06-13 19:30:02 +00:00
}
2024-01-19 11:32:03 +00:00
if (neninchannels() > 0)
{
if (!inCallback)
{
throw rte(
2022-06-13 19:30:02 +00:00
"Input callback given, but stream does not provide input data");
}
_incallback = inCallback;
2022-06-13 19:30:02 +00:00
}
2024-01-19 11:32:03 +00:00
if (nenoutchannels() > 0)
{
if (!outCallback)
{
throw rte(
2022-06-13 19:30:02 +00:00
"Output callback given, but stream does not provide output data");
}
_outcallback = outCallback;
2022-06-13 19:30:02 +00:00
}
2022-07-25 20:29:42 +00:00
// Start the stream. Throws on error.
2024-01-19 11:32:03 +00:00
const auto err = rtaudio.startStream();
if (err != RTAUDIO_NO_ERROR)
{
throw std::runtime_error(string("Error starting stream: ") + rtaudio.getErrorText());
}
2022-07-20 12:58:48 +00:00
// If we are here, we are running without errors.
StreamStatus status;
status.isRunning = true;
_streamStatus = status;
2022-06-13 19:30:02 +00:00
}
StreamStatus getStreamStatus() const override final { return _streamStatus; }
2022-06-13 19:30:02 +00:00
2024-01-19 11:32:03 +00:00
void stop() override final
{
2022-07-20 12:58:48 +00:00
DEBUGTRACE_ENTER;
2024-01-19 11:32:03 +00:00
if (getStreamStatus().runningOK())
{
const auto err = rtaudio.stopStream();
if(err != RTAUDIO_NO_ERROR) {
std::cerr << "Error occured while stopping the stream: " << rtaudio.getErrorText() << endl;
}
2022-06-13 19:30:02 +00:00
}
2022-07-25 20:29:42 +00:00
StreamStatus s = _streamStatus;
s.isRunning = false;
s.errorType = StreamStatus::StreamError::noError;
_streamStatus = s;
2022-06-13 19:30:02 +00:00
}
int streamCallback(void *outputBuffer, void *inputBuffer,
2024-01-19 11:32:03 +00:00
unsigned int nFrames, RtAudioStreamStatus status)
{
DEBUGTRACE_ENTER;
2022-07-20 12:58:48 +00:00
using se = StreamStatus::StreamError;
int rval = 0;
2024-01-19 11:32:03 +00:00
auto stopWithError = [&](se e)
{
2022-07-20 12:58:48 +00:00
DEBUGTRACE_PRINT("stopWithError");
StreamStatus stat = _streamStatus;
stat.errorType = e;
stat.isRunning = false;
2022-07-25 20:29:42 +00:00
_streamStatus = stat;
2022-07-20 12:58:48 +00:00
rval = 1;
};
2024-01-19 11:32:03 +00:00
switch (status)
{
2022-11-11 12:51:10 +00:00
case RTAUDIO_INPUT_OVERFLOW:
stopWithError(se::inputXRun);
return 1;
break;
case RTAUDIO_OUTPUT_UNDERFLOW:
stopWithError(se::outputXRun);
return 1;
break;
default:
break;
2022-07-20 12:58:48 +00:00
}
const auto &dtype_descr = dtypeDescr();
2022-06-13 19:30:02 +00:00
const auto dtype = dataType();
const us neninchannels = this->neninchannels();
const us nenoutchannels = this->nenoutchannels();
const us sw = dtype_descr.sw;
2024-01-19 11:32:03 +00:00
if (nFrames != nFramesPerBlock)
{
2022-06-13 19:30:02 +00:00
cerr << "RtAudio backend error: nFrames does not match block size!"
2022-11-11 12:51:10 +00:00
<< endl;
2022-07-20 12:58:48 +00:00
stopWithError(se::logicError);
2022-06-13 19:30:02 +00:00
return 1;
}
2024-01-19 11:32:03 +00:00
if (inputBuffer)
{
assert(_incallback);
std::vector<byte_t *> ptrs;
2022-06-13 19:30:02 +00:00
ptrs.reserve(neninchannels);
const us ch_min = getLowestEnabledInChannel();
const us ch_max = getHighestEnabledInChannel();
assert(ch_min < ninchannels);
assert(ch_max < ninchannels);
/// Only pass on the pointers of the channels we want
2024-01-19 11:32:03 +00:00
for (us ch = ch_min; ch <= ch_max; ch++)
{
if (inchannel_config.at(ch).enabled)
{
byte_t *ptr =
static_cast<byte_t *>(inputBuffer) + sw * ch * nFramesPerBlock;
ptrs.push_back(ptr);
}
}
DaqData d{nFramesPerBlock, neninchannels, dtype};
2022-06-13 19:30:02 +00:00
d.copyInFromRaw(ptrs);
2022-07-20 12:58:48 +00:00
2024-01-19 11:32:03 +00:00
_incallback(d);
}
2024-01-19 11:32:03 +00:00
if (outputBuffer)
{
assert(_outcallback);
std::vector<byte_t *> ptrs;
ptrs.reserve(nenoutchannels);
2022-06-13 19:30:02 +00:00
/* outCallback */
const us ch_min = getLowestEnabledOutChannel();
const us ch_max = getHighestEnabledOutChannel();
assert(ch_min < noutchannels);
assert(ch_max < noutchannels);
/// Only pass on the pointers of the channels we want
2024-01-19 11:32:03 +00:00
for (us ch = ch_min; ch <= ch_max; ch++)
{
if (outchannel_config.at(ch).enabled)
{
ptrs.push_back(static_cast<byte_t *>(outputBuffer) +
sw * ch * nFramesPerBlock);
}
2022-06-13 19:30:02 +00:00
}
DaqData d{nFramesPerBlock, nenoutchannels, dtype};
2022-07-20 12:58:48 +00:00
_outcallback(d);
// Copy over the buffer
2022-06-13 19:30:02 +00:00
us j = 0;
2024-01-19 11:32:03 +00:00
for (auto ptr : ptrs)
{
2022-06-13 19:30:02 +00:00
d.copyToRaw(j, ptr);
j++;
}
}
2022-07-20 12:58:48 +00:00
return rval;
}
// RtAudio documentation says: if a stream is open, it will be stopped and
// closed automatically on deletion. Therefore the destructor here is a
// default one.
~RtAudioDaq() = default;
2022-06-13 19:30:02 +00:00
};
2022-06-13 19:30:02 +00:00
std::unique_ptr<Daq> createRtAudioDevice(const DeviceInfo &devinfo,
2024-01-19 11:32:03 +00:00
const DaqConfiguration &config)
{
2022-06-13 19:30:02 +00:00
return std::make_unique<RtAudioDaq>(devinfo, config);
}
2023-01-04 14:15:03 +00:00
int mycallback(
void *outputBuffer, void *inputBuffer, unsigned int nFrames,
__attribute__((unused)) double streamTime, // Not used parameter streamTime
2024-01-19 11:32:03 +00:00
RtAudioStreamStatus status, void *userData)
{
2022-06-13 19:30:02 +00:00
return static_cast<RtAudioDaq *>(userData)->streamCallback(
outputBuffer, inputBuffer, nFrames, status);
}
#endif // LASP_HAS_RTAUDIO == 1