Inbetween pybind11 commit. Nothing working.
This commit is contained in:
parent
f635cac209
commit
7eaaf43653
@ -144,8 +144,9 @@ set(SETUP_PY "${CMAKE_CURRENT_BINARY_DIR}/setup.py")
|
|||||||
|
|
||||||
set(DEPS "${CMAKE_CURRENT_SOURCE_DIR}/*.py"
|
set(DEPS "${CMAKE_CURRENT_SOURCE_DIR}/*.py"
|
||||||
"${CMAKE_CURRENT_SOURCE_DIR}/lasp/*.py"
|
"${CMAKE_CURRENT_SOURCE_DIR}/lasp/*.py"
|
||||||
"wrappers"
|
# "wrappers"
|
||||||
"lasp_daq")
|
# "lasp_device_lib")
|
||||||
|
)
|
||||||
|
|
||||||
set(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/build/timestamp")
|
set(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/build/timestamp")
|
||||||
|
|
||||||
|
@ -9,8 +9,8 @@
|
|||||||
#include <optional>
|
#include <optional>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Data coming from / going to DAQ. Non-interleaved format, which means
|
* @brief Data coming from / going to DAQ. **Non-interleaved format**, which means
|
||||||
* data in buffer is ordered by channel: _ptr[sample+channel*nsamples]
|
* data in buffer is ordered by channel: _ptr[sample+channel*nframes]
|
||||||
*/
|
*/
|
||||||
class DaqData {
|
class DaqData {
|
||||||
protected:
|
protected:
|
||||||
@ -38,24 +38,13 @@ public:
|
|||||||
us size_bytes() const { return _data.size(); }
|
us size_bytes() const { return _data.size(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Typed DaqData, in which the type is specified
|
|
||||||
*
|
|
||||||
* @tparam T
|
|
||||||
*/
|
|
||||||
template <typename T> class TypedDaqData : public DaqData {
|
|
||||||
T *data() { return static_cast<T *>(_data.data()); }
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Callback of DAQ. Called with arguments of a vector of data
|
* @brief Callback of DAQ. Called with arguments of a vector of data
|
||||||
* spans for each channel, and a datatype descriptor. Callback should return
|
* spans for each channel, and a datatype descriptor. Callback should return
|
||||||
* false for a stop request.
|
* false for a stop request.
|
||||||
*/
|
*/
|
||||||
using ChannelView = std::vector<gsl::span<uint8_t>>;
|
|
||||||
using DaqCallback =
|
using DaqCallback =
|
||||||
std::function<bool(ChannelView channel_data,
|
std::function<bool(std::unique_ptr<DaqData>)>;
|
||||||
const DataTypeDescriptor &dtype_descr)>;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Base cass for all DAQ instances
|
* @brief Base cass for all DAQ instances
|
||||||
|
@ -1,14 +1,17 @@
|
|||||||
#if LASP_HAS_RTAUDIO == 1
|
|
||||||
#include "lasp_rtaudiodaq.h"
|
#include "lasp_rtaudiodaq.h"
|
||||||
|
#if LASP_HAS_RTAUDIO == 1
|
||||||
|
#include "lasp_daqconfig.h"
|
||||||
#include <RtAudio.h>
|
#include <RtAudio.h>
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
|
||||||
|
using std::atomic;
|
||||||
using std::cerr;
|
using std::cerr;
|
||||||
using std::endl;
|
using std::endl;
|
||||||
using std::atomic;
|
using std::runtime_error;
|
||||||
|
using std::vector;
|
||||||
|
|
||||||
void fillRtAudioDeviceInfo(vector<DeviceInfo> &devinfolist) {
|
void fillRtAudioDeviceInfo(vector<DeviceInfo> &devinfolist) {
|
||||||
|
|
||||||
@ -25,6 +28,7 @@ void fillRtAudioDeviceInfo(vector<DeviceInfo> &devinfolist) {
|
|||||||
// Device capabilities not successfully probed. Continue to next
|
// Device capabilities not successfully probed. Continue to next
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
// "Our device info struct"
|
||||||
DeviceInfo d;
|
DeviceInfo d;
|
||||||
switch (api) {
|
switch (api) {
|
||||||
case RtAudio::LINUX_ALSA:
|
case RtAudio::LINUX_ALSA:
|
||||||
@ -66,19 +70,24 @@ void fillRtAudioDeviceInfo(vector<DeviceInfo> &devinfolist) {
|
|||||||
|
|
||||||
RtAudioFormat formats = devinfo.nativeFormats;
|
RtAudioFormat formats = devinfo.nativeFormats;
|
||||||
if (formats & RTAUDIO_SINT8) {
|
if (formats & RTAUDIO_SINT8) {
|
||||||
d.availableDataTypes.push_back(dtype_int8);
|
d.availableDataTypes.push_back(
|
||||||
|
DataTypeDescriptor::DataType::dtype_int8);
|
||||||
}
|
}
|
||||||
if (formats & RTAUDIO_SINT16) {
|
if (formats & RTAUDIO_SINT16) {
|
||||||
d.availableDataTypes.push_back(dtype_int16);
|
d.availableDataTypes.push_back(
|
||||||
|
DataTypeDescriptor::DataType::dtype_int16);
|
||||||
}
|
}
|
||||||
|
/* if (formats & RTAUDIO_SINT32) { */
|
||||||
|
/* d.availableDataTypes.push_back(DataTypeDescriptor::DataType::dtype_int24);
|
||||||
|
*/
|
||||||
|
/* } */
|
||||||
if (formats & RTAUDIO_SINT32) {
|
if (formats & RTAUDIO_SINT32) {
|
||||||
d.availableDataTypes.push_back(dtype_int24);
|
d.availableDataTypes.push_back(
|
||||||
}
|
DataTypeDescriptor::DataType::dtype_fl32);
|
||||||
if (formats & RTAUDIO_SINT32) {
|
|
||||||
d.availableDataTypes.push_back(dtype_fl32);
|
|
||||||
}
|
}
|
||||||
if (formats & RTAUDIO_FLOAT64) {
|
if (formats & RTAUDIO_FLOAT64) {
|
||||||
d.availableDataTypes.push_back(dtype_fl64);
|
d.availableDataTypes.push_back(
|
||||||
|
DataTypeDescriptor::DataType::dtype_fl64);
|
||||||
}
|
}
|
||||||
if (d.availableDataTypes.size() == 0) {
|
if (d.availableDataTypes.size() == 0) {
|
||||||
std::cerr << "RtAudio: No data types found in device!" << endl;
|
std::cerr << "RtAudio: No data types found in device!" << endl;
|
||||||
@ -86,11 +95,7 @@ void fillRtAudioDeviceInfo(vector<DeviceInfo> &devinfolist) {
|
|||||||
|
|
||||||
d.prefDataTypeIndex = d.availableDataTypes.size() - 1;
|
d.prefDataTypeIndex = d.availableDataTypes.size() - 1;
|
||||||
|
|
||||||
d.availableFramesPerBlock.push_back(512);
|
d.availableFramesPerBlock = {512, 1024, 2048, 4096, 8192};
|
||||||
d.availableFramesPerBlock.push_back(1024);
|
|
||||||
d.availableFramesPerBlock.push_back(2048);
|
|
||||||
d.availableFramesPerBlock.push_back(4096);
|
|
||||||
d.availableFramesPerBlock.push_back(8192);
|
|
||||||
d.prefFramesPerBlockIndex = 1;
|
d.prefFramesPerBlockIndex = 1;
|
||||||
|
|
||||||
devinfolist.push_back(d);
|
devinfolist.push_back(d);
|
||||||
@ -103,211 +108,113 @@ int mycallback(void *outputBuffer, void *inputBuffer, unsigned int nFrames,
|
|||||||
|
|
||||||
void myerrorcallback(RtAudioError::Type, const string &errorText);
|
void myerrorcallback(RtAudioError::Type, const string &errorText);
|
||||||
|
|
||||||
class AudioDaq : public Daq {
|
class RtAudioDaq : public Daq {
|
||||||
|
|
||||||
SafeQueue<void *> *inqueue = NULL;
|
RtAudio rtaudio;
|
||||||
SafeQueue<void *> *outqueue = NULL;
|
const us nFramesPerBlock;
|
||||||
SafeQueue<void *> *outDelayqueue = NULL;
|
|
||||||
|
|
||||||
RtAudio *rtaudio = NULL;
|
RtAudioDaq(const RtAudioDaq &) = delete;
|
||||||
RtAudio::StreamParameters *instreamparams = nullptr;
|
RtAudioDaq &operator=(const RtAudioDaq &) = delete;
|
||||||
RtAudio::StreamParameters *outstreamparams = nullptr;
|
|
||||||
|
|
||||||
us nFramesPerBlock;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
AudioDaq(const DeviceInfo &devinfo, const DaqConfiguration &config)
|
RtAudioDaq(const DeviceInfo &devinfo, const DaqConfiguration &config)
|
||||||
: Daq(devinfo, config) {
|
: Daq(devinfo, config),
|
||||||
|
rtaudio(static_cast<RtAudio::Api>(devinfo.api.api_specific_subcode)),
|
||||||
|
nFramesPerBlock(Daq::framesPerBlock()) {
|
||||||
|
|
||||||
nFramesPerBlock = this->framesPerBlock();
|
std::unique_ptr<RtAudio::StreamParameters> inParams, outParams;
|
||||||
|
// We make sure not to run RtAudio in duplex mode. This seems to be buggy
|
||||||
|
// and untested. Better to use a hardware-type loopback into the system.
|
||||||
|
if (neninchannels(false) > 0 && nenoutchannels() > 0) {
|
||||||
|
throw runtime_error("RtAudio backend cannot run in duplex mode.");
|
||||||
|
}
|
||||||
|
assert(!monitorOutput);
|
||||||
|
|
||||||
if (neninchannels(false) > 0) {
|
if (neninchannels() > 0) {
|
||||||
instreamparams = new RtAudio::StreamParameters();
|
|
||||||
instreamparams->nChannels = getHighestInChannel() + 1;
|
inParams = std::make_unique<RtAudio::StreamParameters>();
|
||||||
if (instreamparams->nChannels < 1) {
|
|
||||||
|
// +1 to get the count.
|
||||||
|
inParams->nChannels = getHighestInChannel() + 1;
|
||||||
|
if (inParams->nChannels < 1) {
|
||||||
throw runtime_error("Invalid input number of channels");
|
throw runtime_error("Invalid input number of channels");
|
||||||
}
|
}
|
||||||
instreamparams->firstChannel = 0;
|
inParams->firstChannel = 0;
|
||||||
instreamparams->deviceId = devinfo.api_specific_devindex;
|
inParams->deviceId = devinfo.api_specific_devindex;
|
||||||
}
|
|
||||||
|
|
||||||
if (nenoutchannels() > 0) {
|
} else {
|
||||||
outstreamparams = new RtAudio::StreamParameters();
|
|
||||||
outstreamparams->nChannels = getHighestOutChannel() + 1;
|
outParams = std::make_unique<RtAudio::StreamParameters>();
|
||||||
if (outstreamparams->nChannels < 1) {
|
|
||||||
|
outParams->nChannels = getHighestOutChannel() + 1;
|
||||||
|
if (outParams->nChannels < 1) {
|
||||||
throw runtime_error("Invalid output number of channels");
|
throw runtime_error("Invalid output number of channels");
|
||||||
}
|
}
|
||||||
outstreamparams->firstChannel = 0;
|
outParams->firstChannel = 0;
|
||||||
outstreamparams->deviceId = devinfo.api_specific_devindex;
|
outParams->deviceId = devinfo.api_specific_devindex;
|
||||||
}
|
}
|
||||||
|
|
||||||
RtAudio::StreamOptions streamoptions;
|
RtAudio::StreamOptions streamoptions;
|
||||||
streamoptions.flags = RTAUDIO_NONINTERLEAVED | RTAUDIO_HOG_DEVICE;
|
streamoptions.flags = RTAUDIO_HOG_DEVICE;
|
||||||
|
|
||||||
streamoptions.numberOfBuffers = 2;
|
streamoptions.numberOfBuffers = 2;
|
||||||
streamoptions.streamName = "RtAudio stream";
|
streamoptions.streamName = "RtAudio stream";
|
||||||
streamoptions.priority = 0;
|
streamoptions.priority = 0;
|
||||||
|
|
||||||
RtAudioFormat format;
|
RtAudioFormat format;
|
||||||
const DataType dtype = DataType();
|
using Dtype = DataTypeDescriptor::DataType;
|
||||||
|
const Dtype dtype = dataType();
|
||||||
switch (dtype) {
|
switch (dtype) {
|
||||||
case dtype_fl32:
|
case Dtype::dtype_fl32:
|
||||||
format = RTAUDIO_FLOAT32;
|
format = RTAUDIO_FLOAT32;
|
||||||
break;
|
break;
|
||||||
case dtype_fl64:
|
case Dtype::dtype_fl64:
|
||||||
format = RTAUDIO_FLOAT64;
|
format = RTAUDIO_FLOAT64;
|
||||||
break;
|
break;
|
||||||
case dtype_int8:
|
case Dtype::dtype_int8:
|
||||||
format = RTAUDIO_SINT8;
|
format = RTAUDIO_SINT8;
|
||||||
break;
|
break;
|
||||||
case dtype_int16:
|
case Dtype::dtype_int16:
|
||||||
format = RTAUDIO_SINT16;
|
format = RTAUDIO_SINT16;
|
||||||
break;
|
break;
|
||||||
case dtype_int32:
|
case Dtype::dtype_int32:
|
||||||
format = RTAUDIO_SINT32;
|
format = RTAUDIO_SINT32;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
throw runtime_error("Invalid data type");
|
throw runtime_error("Invalid data type specified for DAQ stream.");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
// Copy here, as it is used to return the *actual* number of frames per
|
||||||
rtaudio = new RtAudio((RtAudio::Api)devinfo.api.api_specific_subcode);
|
// block.
|
||||||
if (!rtaudio) {
|
unsigned int nFramesPerBlock_copy = nFramesPerBlock;
|
||||||
throw runtime_error("RtAudio allocation failed");
|
|
||||||
}
|
// Final step: open the stream.
|
||||||
rtaudio->openStream(outstreamparams, instreamparams, format,
|
rtaudio.openStream(&(*outParams), &(*inParams), format,
|
||||||
(us)samplerate(), (unsigned *)&nFramesPerBlock,
|
static_cast<us>(samplerate()), &nFramesPerBlock_copy,
|
||||||
&mycallback, (void *)this, &streamoptions,
|
&mycallback, (void *)this, &streamoptions,
|
||||||
&myerrorcallback);
|
&myerrorcallback);
|
||||||
} catch (RtAudioError &e) {
|
|
||||||
if (rtaudio)
|
|
||||||
delete rtaudio;
|
|
||||||
if (instreamparams)
|
|
||||||
delete instreamparams;
|
|
||||||
if (outstreamparams)
|
|
||||||
delete outstreamparams;
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
if (monitorOutput) {
|
|
||||||
outDelayqueue = new SafeQueue<void *>();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
friend int mycallback(void *outputBuffer, void *inputBuffer,
|
int streamCallback(void *outputBuffer, void *inputBuffer,
|
||||||
unsigned int nFrames, double streamTime,
|
unsigned int nFrames, double streamTime,
|
||||||
RtAudioStreamStatus status, void *userData);
|
RtAudioStreamStatus status) {
|
||||||
|
|
||||||
void start(SafeQueue<void *> *inqueue, SafeQueue<void *> *outqueue) {
|
auto dtype = dataType();
|
||||||
this->inqueue = inqueue;
|
us neninchannels_inc_mon = neninchannels();
|
||||||
this->outqueue = outqueue;
|
us nenoutchannels = this->nenoutchannels();
|
||||||
if (monitorOutput) {
|
|
||||||
this->outDelayqueue = new SafeQueue<void *>();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isRunning()) {
|
|
||||||
throw runtime_error("Stream already running");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (neninchannels(false) > 0 && !inqueue) {
|
|
||||||
throw runtime_error("inqueue argument not given");
|
|
||||||
}
|
|
||||||
if (nenoutchannels() > 0 && !outqueue) {
|
|
||||||
throw runtime_error("outqueue argument not given");
|
|
||||||
}
|
|
||||||
assert(rtaudio);
|
|
||||||
rtaudio->startStream();
|
|
||||||
}
|
|
||||||
|
|
||||||
void stop() {
|
|
||||||
|
|
||||||
if (!isRunning()) {
|
|
||||||
cerr << "Stream is already stopped" << endl;
|
|
||||||
} else {
|
|
||||||
assert(rtaudio);
|
|
||||||
rtaudio->stopStream();
|
|
||||||
}
|
|
||||||
if (inqueue) {
|
|
||||||
inqueue = nullptr;
|
|
||||||
}
|
|
||||||
if (outqueue) {
|
|
||||||
outqueue = nullptr;
|
|
||||||
}
|
|
||||||
if (outDelayqueue) {
|
|
||||||
delete outDelayqueue;
|
|
||||||
outDelayqueue = nullptr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bool isRunning() const { return (rtaudio && rtaudio->isStreamRunning()); }
|
|
||||||
|
|
||||||
~AudioDaq() {
|
|
||||||
assert(rtaudio);
|
|
||||||
if (isRunning()) {
|
|
||||||
stop();
|
|
||||||
}
|
|
||||||
if (rtaudio->isStreamOpen()) {
|
|
||||||
rtaudio->closeStream();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rtaudio)
|
|
||||||
delete rtaudio;
|
|
||||||
if (outDelayqueue)
|
|
||||||
delete outDelayqueue;
|
|
||||||
if (instreamparams)
|
|
||||||
delete instreamparams;
|
|
||||||
if (outstreamparams)
|
|
||||||
delete outstreamparams;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Daq *createRtAudioDevice(const DeviceInfo &devinfo,
|
|
||||||
const DaqConfiguration &config) {
|
|
||||||
|
|
||||||
AudioDaq *daq = NULL;
|
|
||||||
|
|
||||||
try {
|
|
||||||
daq = new AudioDaq(devinfo, config);
|
|
||||||
|
|
||||||
} catch (runtime_error &e) {
|
|
||||||
if (daq)
|
|
||||||
delete daq;
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
return daq;
|
|
||||||
}
|
|
||||||
|
|
||||||
int mycallback(void *outputBuffervoid, void *inputBuffervoid,
|
|
||||||
unsigned int nFrames, double streamTime,
|
|
||||||
RtAudioStreamStatus status, void *userData) {
|
|
||||||
|
|
||||||
u_int8_t *inputBuffer = (u_int8_t *)inputBuffervoid;
|
|
||||||
u_int8_t *outputBuffer = (u_int8_t *)outputBuffervoid;
|
|
||||||
|
|
||||||
AudioDaq *daq = (AudioDaq *)userData;
|
|
||||||
DataType dtype = daq->dataType();
|
|
||||||
us neninchannels_inc_mon = daq->neninchannels();
|
|
||||||
us nenoutchannels = daq->nenoutchannels();
|
|
||||||
|
|
||||||
bool monitorOutput = daq->monitorOutput;
|
|
||||||
us bytesperchan = dtype_map.at(dtype).sw * nFrames;
|
us bytesperchan = dtype_map.at(dtype).sw * nFrames;
|
||||||
us monitorOffset = ((us)monitorOutput) * bytesperchan;
|
us monitorOffset = ((us)monitorOutput) * bytesperchan;
|
||||||
|
|
||||||
SafeQueue<void *> *inqueue = daq->inqueue;
|
if (inputBuffer) {
|
||||||
SafeQueue<void *> *outqueue = daq->outqueue;
|
|
||||||
SafeQueue<void *> *outDelayqueue = daq->outDelayqueue;
|
|
||||||
|
|
||||||
const boolvec &eninchannels = daq->eninchannels;
|
|
||||||
const boolvec &enoutchannels = daq->enoutchannels;
|
|
||||||
|
|
||||||
if (inputBuffer || monitorOutput) {
|
|
||||||
|
|
||||||
u_int8_t *inbuffercpy =
|
u_int8_t *inbuffercpy =
|
||||||
(u_int8_t *)malloc(bytesperchan * neninchannels_inc_mon);
|
(u_int8_t *)malloc(bytesperchan * neninchannels_inc_mon);
|
||||||
if (inputBuffer) {
|
if (inputBuffer) {
|
||||||
us j = 0; // OUR buffer channel counter
|
us j = 0; // OUR buffer channel counter
|
||||||
us i = 0; // RtAudio channel counter
|
us i = 0; // RtAudio channel counter
|
||||||
for (int ch = daq->getLowestInChannel(); ch <= daq->getHighestInChannel();
|
for (int ch = getLowestInChannel(); ch <= getHighestInChannel(); ch++) {
|
||||||
ch++) {
|
|
||||||
if (eninchannels[ch]) {
|
if (eninchannels[ch]) {
|
||||||
memcpy(&(inbuffercpy[monitorOffset + j * bytesperchan]),
|
memcpy(&(inbuffercpy[monitorOffset + j * bytesperchan]),
|
||||||
&(inputBuffer[i * bytesperchan]), bytesperchan);
|
&(inputBuffer[i * bytesperchan]), bytesperchan);
|
||||||
@ -316,24 +223,9 @@ int mycallback(void *outputBuffervoid, void *inputBuffervoid,
|
|||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (monitorOutput) {
|
|
||||||
assert(outDelayqueue);
|
|
||||||
|
|
||||||
if (!daq->outDelayqueue->empty()) {
|
|
||||||
void *dat = daq->outDelayqueue->dequeue();
|
|
||||||
memcpy((void *)inbuffercpy, dat, bytesperchan);
|
|
||||||
free(dat);
|
|
||||||
} else {
|
|
||||||
cerr << "Warning: output delay queue appears empty!" << endl;
|
|
||||||
memset(inbuffercpy, 0, bytesperchan);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert(inqueue);
|
|
||||||
inqueue->enqueue(inbuffercpy);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (outputBuffer) {
|
if (outputBuffer) {
|
||||||
assert(outqueue);
|
|
||||||
if (!outqueue->empty()) {
|
if (!outqueue->empty()) {
|
||||||
u_int8_t *outbuffercpy = (u_int8_t *)outqueue->dequeue();
|
u_int8_t *outbuffercpy = (u_int8_t *)outqueue->dequeue();
|
||||||
us j = 0; // OUR buffer channel counter
|
us j = 0; // OUR buffer channel counter
|
||||||
@ -362,8 +254,90 @@ int mycallback(void *outputBuffervoid, void *inputBuffervoid,
|
|||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
void myerrorcallback(RtAudioError::Type, const string &errorText) {
|
|
||||||
|
virtual void
|
||||||
|
start(std::optional<DaqCallback> inCallback,
|
||||||
|
std::optional<DaqCallback> outCallback) {
|
||||||
|
|
||||||
|
assert(!monitorOutput);
|
||||||
|
|
||||||
|
if (isRunning()) {
|
||||||
|
throw runtime_error("Stream already running");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (neninchannels(false) > 0 && !inqueue) {
|
||||||
|
throw runtime_error("inqueue argument not given");
|
||||||
|
}
|
||||||
|
if (nenoutchannels() > 0 && !outqueue) {
|
||||||
|
throw runtime_error("outqueue argument not given");
|
||||||
|
}
|
||||||
|
assert(rtaudio);
|
||||||
|
rtaudio->startStream();
|
||||||
|
}
|
||||||
|
|
||||||
|
void stop() {
|
||||||
|
if (!isRunning()) {
|
||||||
|
cerr << "Stream is already stopped" << endl;
|
||||||
|
} else {
|
||||||
|
assert(rtaudio);
|
||||||
|
rtaudio->stopStream();
|
||||||
|
}
|
||||||
|
if (inqueue) {
|
||||||
|
inqueue = nullptr;
|
||||||
|
}
|
||||||
|
if (outqueue) {
|
||||||
|
outqueue = nullptr;
|
||||||
|
}
|
||||||
|
if (outDelayqueue) {
|
||||||
|
delete outDelayqueue;
|
||||||
|
outDelayqueue = nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bool isRunning() const { return (rtaudio && rtaudio->isStreamRunning()); }
|
||||||
|
|
||||||
|
~RtAudioDaq() {
|
||||||
|
assert(rtaudio);
|
||||||
|
if (isRunning()) {
|
||||||
|
stop();
|
||||||
|
}
|
||||||
|
if (rtaudio->isStreamOpen()) {
|
||||||
|
rtaudio->closeStream();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
;
|
||||||
|
|
||||||
|
Daq *createRtAudioDevice(const DeviceInfo &devinfo,
|
||||||
|
const DaqConfiguration &config) {
|
||||||
|
|
||||||
|
AudioDaq *daq = NULL;
|
||||||
|
|
||||||
|
try {
|
||||||
|
daq = new AudioDaq(devinfo, config);
|
||||||
|
|
||||||
|
} catch (runtime_error &e) {
|
||||||
|
if (daq)
|
||||||
|
delete daq;
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
return daq;
|
||||||
|
}
|
||||||
|
|
||||||
|
int mycallback(void *outputBuffervoid, void *inputBuffervoid,
|
||||||
|
unsigned int nFrames, double streamTime,
|
||||||
|
RtAudioStreamStatus status, void *userData) {
|
||||||
|
|
||||||
|
void myerrorcallback(RtAudioError::Type, const string &errorText) {
|
||||||
cerr << errorText << endl;
|
cerr << errorText << endl;
|
||||||
}
|
}
|
||||||
|
int mycallback(void *outputBuffer, void *inputBuffer, unsigned int nFrames,
|
||||||
|
double streamTime, RtAudioStreamStatus status,
|
||||||
|
void *userData) {
|
||||||
|
|
||||||
|
return static_cast<RtAudioDaq *>(userData)->streamCallback(
|
||||||
|
outputBuffer, inputBuffer, nFrames, streamTime, status);
|
||||||
|
}
|
||||||
|
|
||||||
#endif // LASP_HAS_RTAUDIO == 1
|
#endif // LASP_HAS_RTAUDIO == 1
|
||||||
|
@ -62,7 +62,7 @@ class DT9837A : public Daq {
|
|||||||
void threadFcn(std::optional<DaqCallback> inCallback,
|
void threadFcn(std::optional<DaqCallback> inCallback,
|
||||||
std::optional<DaqCallback> outcallback);
|
std::optional<DaqCallback> outcallback);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
DT9837A(const DeviceInfo &devinfo, const DaqConfiguration &config);
|
DT9837A(const DeviceInfo &devinfo, const DaqConfiguration &config);
|
||||||
|
|
||||||
~DT9837A() {
|
~DT9837A() {
|
||||||
@ -118,7 +118,7 @@ void DT9837A::start(std::optional<DaqCallback> inCallback,
|
|||||||
}
|
}
|
||||||
|
|
||||||
class BufHandler {
|
class BufHandler {
|
||||||
protected:
|
protected:
|
||||||
DaqDeviceHandle _handle;
|
DaqDeviceHandle _handle;
|
||||||
const DataTypeDescriptor dtype_descr;
|
const DataTypeDescriptor dtype_descr;
|
||||||
us nchannels, nFramesPerBlock;
|
us nchannels, nFramesPerBlock;
|
||||||
@ -130,7 +130,7 @@ class BufHandler {
|
|||||||
us totalFramesCount = 0;
|
us totalFramesCount = 0;
|
||||||
long long buffer_mid_idx;
|
long long buffer_mid_idx;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
BufHandler(DaqDeviceHandle handle, const DataTypeDescriptor dtype_descr,
|
BufHandler(DaqDeviceHandle handle, const DataTypeDescriptor dtype_descr,
|
||||||
const us nchannels, const us nFramesPerBlock, DaqCallback cb,
|
const us nchannels, const us nFramesPerBlock, DaqCallback cb,
|
||||||
const double samplerate)
|
const double samplerate)
|
||||||
@ -145,7 +145,8 @@ class BufHandler {
|
|||||||
};
|
};
|
||||||
class InBufHandler : public BufHandler {
|
class InBufHandler : public BufHandler {
|
||||||
bool monitorOutput;
|
bool monitorOutput;
|
||||||
public:
|
|
||||||
|
public:
|
||||||
InBufHandler(DT9837A &daq, DaqCallback cb)
|
InBufHandler(DT9837A &daq, DaqCallback cb)
|
||||||
: BufHandler(daq._handle, daq.dtypeDescr(), daq.neninchannels(),
|
: BufHandler(daq._handle, daq.dtypeDescr(), daq.neninchannels(),
|
||||||
daq._nFramesPerBlock, cb, daq.samplerate())
|
daq._nFramesPerBlock, cb, daq.samplerate())
|
||||||
@ -207,29 +208,34 @@ class InBufHandler : public BufHandler {
|
|||||||
topenqueued = false;
|
topenqueued = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void operator()() {
|
bool operator()() {
|
||||||
|
|
||||||
ChannelView cv(nchannels);
|
|
||||||
|
|
||||||
auto runCallback = ([&](us totalOffset) {
|
auto runCallback = ([&](us totalOffset) {
|
||||||
us monitoroffset = monitorOutput ? 1 : 0;
|
us monitoroffset = monitorOutput ? 1 : 0;
|
||||||
for (us channel = monitoroffset; channel < (nchannels - monitoroffset);
|
|
||||||
channel++) {
|
DaqData data(nchannels, nFramesPerBlock,
|
||||||
cv[channel] =
|
DataTypeDescriptor::DataType::dtype_fl64);
|
||||||
gsl::span(reinterpret_cast<uint8_t *>(
|
us ch_no = 0;
|
||||||
&buf[totalOffset + channel * nFramesPerBlock]),
|
|
||||||
nFramesPerBlock * sizeof(double));
|
|
||||||
}
|
|
||||||
if (monitorOutput) {
|
if (monitorOutput) {
|
||||||
|
|
||||||
cv[0] = gsl::span(
|
|
||||||
reinterpret_cast<uint8_t *>(
|
reinterpret_cast<uint8_t *>(
|
||||||
&buf[totalOffset + (nchannels - 1) * nFramesPerBlock]),
|
&buf[totalOffset + (nchannels - 1) * nFramesPerBlock]),
|
||||||
nFramesPerBlock * sizeof(double));
|
nFramesPerBlock * sizeof(double));
|
||||||
}
|
}
|
||||||
cb(cv, dtype_descr);
|
/* if(mon */
|
||||||
});
|
|
||||||
|
|
||||||
|
/* for (us channel = monitoroffset; channel < (nchannels - monitoroffset);
|
||||||
|
*/
|
||||||
|
/* channel++) { */
|
||||||
|
/* cv[channel] = */
|
||||||
|
/* gsl::span(reinterpret_cast<uint8_t *>( */
|
||||||
|
/* &buf[totalOffset + channel * nFramesPerBlock]), */
|
||||||
|
/* nFramesPerBlock * sizeof(double)); */
|
||||||
|
/* } */
|
||||||
|
|
||||||
|
/* cv[0] = gsl::span( */
|
||||||
|
/* cb(cv, dtype_descr); */
|
||||||
|
});
|
||||||
|
|
||||||
ScanStatus status;
|
ScanStatus status;
|
||||||
TransferStatus transferStatus;
|
TransferStatus transferStatus;
|
||||||
@ -262,20 +268,19 @@ class InBufHandler : public BufHandler {
|
|||||||
topenqueued = true;
|
topenqueued = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
~InBufHandler() {
|
~InBufHandler() {
|
||||||
// At exit of the function, stop scanning.
|
// At exit of the function, stop scanning.
|
||||||
DEBUGTRACE_ENTER;
|
DEBUGTRACE_ENTER;
|
||||||
UlError err = ulDaqInScanStop(_handle);
|
UlError err = ulDaqInScanStop(_handle);
|
||||||
if (err != ERR_NO_ERROR) {
|
if (err != ERR_NO_ERROR) {
|
||||||
showErr(err);
|
showErr(err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class OutBufHandler : public BufHandler {
|
class OutBufHandler : public BufHandler {
|
||||||
public:
|
public:
|
||||||
OutBufHandler(DT9837A &daq, DaqCallback cb)
|
OutBufHandler(DT9837A &daq, DaqCallback cb)
|
||||||
: BufHandler(daq._handle, daq.dtypeDescr(), daq.neninchannels(),
|
: BufHandler(daq._handle, daq.dtypeDescr(), daq.neninchannels(),
|
||||||
daq._nFramesPerBlock, cb, daq.samplerate()) {
|
daq._nFramesPerBlock, cb, daq.samplerate()) {
|
||||||
@ -471,7 +476,7 @@ DT9837A::DT9837A(const DeviceInfo &devinfo, const DaqConfiguration &config)
|
|||||||
throw runtime_error("Fatal: could not set IEPE mode");
|
throw runtime_error("Fatal: could not set IEPE mode");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
void fillUlDaqDeviceInfo(std::vector<DeviceInfo> &devinfolist) {
|
void fillUlDaqDeviceInfo(std::vector<DeviceInfo> &devinfolist) {
|
||||||
|
|
||||||
DEBUGTRACE_ENTER;
|
DEBUGTRACE_ENTER;
|
||||||
|
Loading…
Reference in New Issue
Block a user