392 lines
10 KiB
C++
392 lines
10 KiB
C++
#include "lasp_cpprtaudio.h"
|
|
#include <RtAudio.h>
|
|
#include <atomic>
|
|
#include <thread>
|
|
#include <cstring>
|
|
#include <cassert>
|
|
|
|
#if MS_WIN64
|
|
typedef uint8_t u_int8_t;
|
|
#endif
|
|
|
|
using std::atomic;
|
|
|
|
void fillRtAudioDeviceInfo(vector<DeviceInfo> &devinfolist) {
|
|
|
|
vector<RtAudio::Api> apis;
|
|
RtAudio::getCompiledApi(apis);
|
|
|
|
for(auto api: apis) {
|
|
RtAudio rtaudio(api);
|
|
us count = rtaudio.getDeviceCount();
|
|
for(us devno = 0; devno< count;devno++) {
|
|
|
|
RtAudio::DeviceInfo devinfo = rtaudio.getDeviceInfo(devno);
|
|
if(!devinfo.probed) {
|
|
// Device capabilities not successfully probed. Continue to next
|
|
continue;
|
|
}
|
|
DeviceInfo d;
|
|
switch(api){
|
|
case RtAudio::LINUX_ALSA:
|
|
d.api = rtaudioAlsaApi;
|
|
break;
|
|
case RtAudio::LINUX_PULSE:
|
|
d.api = rtaudioPulseaudioApi;
|
|
break;
|
|
case RtAudio::WINDOWS_WASAPI:
|
|
d.api = rtaudioWasapiApi;
|
|
break;
|
|
case RtAudio::WINDOWS_DS:
|
|
d.api = rtaudioDsApi;
|
|
break;
|
|
case RtAudio::WINDOWS_ASIO:
|
|
d.api = rtaudioAsioApi;
|
|
break;
|
|
default:
|
|
cerr << "Not implemented RtAudio API, skipping." << endl;
|
|
continue;
|
|
break;
|
|
}
|
|
|
|
d.device_name = devinfo.name;
|
|
d.api_specific_devindex = devno;
|
|
|
|
for(us j=0; j<devinfo.sampleRates.size();j++){
|
|
us rate = devinfo.sampleRates[j];
|
|
d.availableSampleRates.push_back((double) rate);
|
|
if(devinfo.preferredSampleRate == rate) {
|
|
d.prefSampleRateIndex = j;
|
|
}
|
|
}
|
|
|
|
d.noutchannels = devinfo.outputChannels;
|
|
d.ninchannels = devinfo.inputChannels;
|
|
|
|
d.availableInputRanges = {1.0};
|
|
|
|
RtAudioFormat formats = devinfo.nativeFormats;
|
|
if(formats & RTAUDIO_SINT8) {
|
|
d.availableDataTypes.push_back(dtype_int8);
|
|
}
|
|
if(formats & RTAUDIO_SINT16) {
|
|
d.availableDataTypes.push_back(dtype_int16);
|
|
}
|
|
if(formats & RTAUDIO_SINT32) {
|
|
d.availableDataTypes.push_back(dtype_int24);
|
|
}
|
|
if(formats & RTAUDIO_SINT32) {
|
|
d.availableDataTypes.push_back(dtype_fl32);
|
|
}
|
|
if(formats & RTAUDIO_FLOAT64) {
|
|
d.availableDataTypes.push_back(dtype_fl64);
|
|
}
|
|
if(d.availableDataTypes.size() == 0) {
|
|
std::cerr << "RtAudio: No data types found in device!" << endl;
|
|
}
|
|
|
|
d.prefDataTypeIndex = d.availableDataTypes.size() - 1;
|
|
|
|
d.availableFramesPerBlock.push_back(512);
|
|
d.availableFramesPerBlock.push_back(1024);
|
|
d.availableFramesPerBlock.push_back(2048);
|
|
d.availableFramesPerBlock.push_back(4096);
|
|
d.availableFramesPerBlock.push_back(8192);
|
|
d.prefFramesPerBlockIndex = 1;
|
|
|
|
devinfolist.push_back(d);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
int mycallback(void *outputBuffer, void *inputBuffer,
|
|
unsigned int nFrames,
|
|
double streamTime,
|
|
RtAudioStreamStatus status,
|
|
void *userData);
|
|
|
|
void myerrorcallback(RtAudioError::Type,const string& errorText);
|
|
|
|
class AudioDaq: public Daq {
|
|
|
|
SafeQueue<void*> *inqueue = NULL;
|
|
SafeQueue<void*> *outqueue = NULL;
|
|
SafeQueue<void*> *outDelayqueue = NULL;
|
|
|
|
RtAudio* rtaudio = NULL;
|
|
RtAudio::StreamParameters* instreamparams = nullptr;
|
|
RtAudio::StreamParameters* outstreamparams = nullptr;
|
|
|
|
us nFramesPerBlock;
|
|
|
|
public:
|
|
AudioDaq(const DeviceInfo& devinfo,
|
|
const DaqConfiguration& config):
|
|
Daq(devinfo, config) {
|
|
|
|
nFramesPerBlock = this->framesPerBlock();
|
|
|
|
if(neninchannels(false) > 0) {
|
|
instreamparams = new RtAudio::StreamParameters();
|
|
instreamparams->nChannels = getHighestInChannel() + 1;
|
|
if(instreamparams->nChannels < 1) {
|
|
throw runtime_error("Invalid input number of channels");
|
|
}
|
|
instreamparams->firstChannel = 0;
|
|
instreamparams->deviceId = devinfo.api_specific_devindex;
|
|
}
|
|
|
|
if(nenoutchannels() > 0) {
|
|
outstreamparams = new RtAudio::StreamParameters();
|
|
outstreamparams->nChannels = getHighestOutChannel() + 1;
|
|
if(outstreamparams->nChannels < 1) {
|
|
throw runtime_error("Invalid output number of channels");
|
|
}
|
|
outstreamparams->firstChannel = 0;
|
|
outstreamparams->deviceId = devinfo.api_specific_devindex;
|
|
}
|
|
|
|
RtAudio::StreamOptions streamoptions;
|
|
streamoptions.flags = RTAUDIO_NONINTERLEAVED | RTAUDIO_HOG_DEVICE;
|
|
|
|
streamoptions.numberOfBuffers = 2;
|
|
streamoptions.streamName = "RtAudio stream";
|
|
streamoptions.priority = 0;
|
|
|
|
RtAudioFormat format;
|
|
DataType dtype = dataType();
|
|
if(dtype == dtype_fl32) {
|
|
format = RTAUDIO_FLOAT32;
|
|
} else if(dtype == dtype_fl64) {
|
|
format = RTAUDIO_FLOAT64;
|
|
} else if(dtype == dtype_int8) {
|
|
format = RTAUDIO_SINT8;
|
|
} else if(dtype == dtype_int16) {
|
|
format = RTAUDIO_SINT16;
|
|
} else if(dtype == dtype_int32) {
|
|
format = RTAUDIO_SINT32;
|
|
} else {
|
|
throw runtime_error("Invalid data type");
|
|
}
|
|
|
|
try {
|
|
rtaudio = new RtAudio((RtAudio::Api) devinfo.api.api_specific_subcode);
|
|
if(!rtaudio) {
|
|
throw runtime_error("RtAudio allocation failed");
|
|
}
|
|
rtaudio->openStream(
|
|
outstreamparams,
|
|
instreamparams,
|
|
format,
|
|
(us) samplerate(),
|
|
(unsigned*) &nFramesPerBlock,
|
|
&mycallback,
|
|
(void*) this,
|
|
&streamoptions,
|
|
&myerrorcallback
|
|
);
|
|
} catch(RtAudioError& e) {
|
|
if(rtaudio) delete rtaudio;
|
|
if(instreamparams) delete instreamparams;
|
|
if(outstreamparams) delete outstreamparams;
|
|
throw;
|
|
}
|
|
if(monitorOutput) {
|
|
outDelayqueue = new SafeQueue<void*>();
|
|
}
|
|
|
|
}
|
|
|
|
friend int mycallback(void *outputBuffer, void *inputBuffer,
|
|
unsigned int nFrames,
|
|
double streamTime,
|
|
RtAudioStreamStatus status,
|
|
void *userData);
|
|
|
|
|
|
void start(SafeQueue<void*> *inqueue, SafeQueue<void*> *outqueue) {
|
|
this->inqueue = inqueue;
|
|
this->outqueue = outqueue;
|
|
if(monitorOutput) {
|
|
this->outDelayqueue = new SafeQueue<void*>();
|
|
|
|
}
|
|
|
|
if(isRunning()){
|
|
throw runtime_error("Stream already running");
|
|
}
|
|
|
|
if(neninchannels(false) > 0 && !inqueue) {
|
|
throw runtime_error("inqueue argument not given");
|
|
}
|
|
if(nenoutchannels() > 0 && !outqueue) {
|
|
throw runtime_error("outqueue argument not given");
|
|
}
|
|
assert(rtaudio);
|
|
rtaudio->startStream();
|
|
|
|
}
|
|
|
|
void stop() {
|
|
|
|
if(!isRunning()) {
|
|
cerr << "Stream is already stopped" << endl;
|
|
}
|
|
else {
|
|
assert(rtaudio);
|
|
rtaudio->stopStream();
|
|
}
|
|
if(inqueue) {
|
|
inqueue = nullptr;
|
|
}
|
|
if(outqueue) {
|
|
outqueue = nullptr;
|
|
}
|
|
if(outDelayqueue) {
|
|
delete outDelayqueue;
|
|
outDelayqueue = nullptr;
|
|
}
|
|
}
|
|
bool isRunning() const {return (rtaudio && rtaudio->isStreamRunning());}
|
|
|
|
~AudioDaq() {
|
|
assert(rtaudio);
|
|
if(isRunning()) {
|
|
stop();
|
|
}
|
|
if(rtaudio->isStreamOpen()) {
|
|
rtaudio->closeStream();
|
|
}
|
|
|
|
if(rtaudio) delete rtaudio;
|
|
if(outDelayqueue) delete outDelayqueue;
|
|
if(instreamparams) delete instreamparams;
|
|
if(outstreamparams) delete outstreamparams;
|
|
|
|
}
|
|
};
|
|
|
|
|
|
Daq* createRtAudioDevice(const DeviceInfo& devinfo,
|
|
const DaqConfiguration& config) {
|
|
|
|
AudioDaq *daq = NULL;
|
|
|
|
try {
|
|
daq = new AudioDaq(devinfo, config);
|
|
|
|
} catch (runtime_error &e) {
|
|
if (daq)
|
|
delete daq;
|
|
throw;
|
|
}
|
|
return daq;
|
|
}
|
|
|
|
|
|
int mycallback(
|
|
void *outputBuffervoid,
|
|
void *inputBuffervoid,
|
|
unsigned int nFrames,
|
|
double streamTime,
|
|
RtAudioStreamStatus status,
|
|
void *userData) {
|
|
|
|
u_int8_t* inputBuffer = (u_int8_t*) inputBuffervoid;
|
|
u_int8_t* outputBuffer = (u_int8_t*) outputBuffervoid;
|
|
|
|
AudioDaq* daq = (AudioDaq*) userData;
|
|
DataType dtype = daq->dataType();
|
|
us neninchannels_inc_mon = daq->neninchannels();
|
|
us nenoutchannels = daq->nenoutchannels();
|
|
|
|
bool monitorOutput = daq->monitorOutput;
|
|
us bytesperchan = dtype.sw*nFrames;
|
|
us monitorOffset = ((us) monitorOutput)*bytesperchan;
|
|
|
|
SafeQueue<void*> *inqueue = daq->inqueue;
|
|
SafeQueue<void*> *outqueue = daq->outqueue;
|
|
SafeQueue<void*> *outDelayqueue = daq->outDelayqueue;
|
|
|
|
const boolvec& eninchannels = daq->eninchannels;
|
|
const boolvec& enoutchannels = daq->enoutchannels;
|
|
|
|
if(inputBuffer || monitorOutput) {
|
|
|
|
u_int8_t *inbuffercpy = (u_int8_t*) malloc(bytesperchan*neninchannels_inc_mon);
|
|
if(inputBuffer) {
|
|
us j=0; // OUR buffer channel counter
|
|
us i=0; // RtAudio channel counter
|
|
for(int ch=daq->getLowestInChannel();ch<=daq->getHighestInChannel();ch++) {
|
|
if(eninchannels[ch]) {
|
|
memcpy(
|
|
&(inbuffercpy[monitorOffset+j*bytesperchan]),
|
|
&(inputBuffer[i*bytesperchan]),
|
|
bytesperchan);
|
|
j++;
|
|
}
|
|
i++;
|
|
}
|
|
}
|
|
if(monitorOutput) {
|
|
assert(outDelayqueue);
|
|
|
|
if(!daq->outDelayqueue->empty()) {
|
|
void* dat = daq->outDelayqueue->dequeue();
|
|
memcpy((void*) inbuffercpy, dat, bytesperchan);
|
|
free(dat);
|
|
} else {
|
|
cerr << "Warning: output delay queue appears empty!" << endl;
|
|
memset(inbuffercpy, 0, bytesperchan);
|
|
}
|
|
}
|
|
assert(inqueue);
|
|
inqueue->enqueue(inbuffercpy);
|
|
|
|
}
|
|
|
|
if(outputBuffer) {
|
|
assert(outqueue);
|
|
if(!outqueue->empty()) {
|
|
u_int8_t* outbuffercpy = (u_int8_t*) outqueue->dequeue();
|
|
us j=0; // OUR buffer channel counter
|
|
us i=0; // RtAudio channel counter
|
|
for(us ch=0;ch<=daq->getHighestOutChannel();ch++) {
|
|
/* cerr << "Copying from queue... " << endl; */
|
|
if(enoutchannels[ch]) {
|
|
memcpy(
|
|
&(outputBuffer[i*bytesperchan]),
|
|
&(outbuffercpy[j*bytesperchan]),
|
|
bytesperchan);
|
|
j++;
|
|
}
|
|
else {
|
|
/* cerr << "unused output channel in list" << endl; */
|
|
memset(
|
|
&(outputBuffer[i*bytesperchan]),0,bytesperchan);
|
|
}
|
|
i++;
|
|
}
|
|
if(!monitorOutput) {
|
|
free(outbuffercpy);
|
|
} else {
|
|
assert(outDelayqueue);
|
|
outDelayqueue->enqueue((void*) outbuffercpy);
|
|
}
|
|
}
|
|
else {
|
|
cerr << "RtAudio backend: stream output buffer underflow!" << endl;
|
|
}
|
|
|
|
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
void myerrorcallback(RtAudioError::Type,const string& errorText) {
|
|
cerr << errorText << endl;
|
|
}
|