Some comment improvement and cleanup
This commit is contained in:
parent
6a006e27f9
commit
da99618fc3
@ -1,6 +1,6 @@
|
|||||||
cmake_minimum_required (VERSION 3.16)
|
cmake_minimum_required (VERSION 3.16)
|
||||||
|
|
||||||
project(LASP LANGUAGES CXX)
|
project(LASP LANGUAGES C CXX)
|
||||||
|
|
||||||
# To allow linking to static libs from other directories
|
# To allow linking to static libs from other directories
|
||||||
cmake_policy(SET CMP0079 NEW)
|
cmake_policy(SET CMP0079 NEW)
|
||||||
@ -10,10 +10,6 @@ include("BuildType")
|
|||||||
include("QueryPythonForPybind11")
|
include("QueryPythonForPybind11")
|
||||||
|
|
||||||
|
|
||||||
# Generate stubs for the Python module
|
|
||||||
option(WITH_PY_STUBS
|
|
||||||
"Generate Python stub files (.pyi) for the Python module." On)
|
|
||||||
|
|
||||||
# Find the pybind11 package
|
# Find the pybind11 package
|
||||||
find_pybind11_python_first()
|
find_pybind11_python_first()
|
||||||
|
|
||||||
@ -50,7 +46,6 @@ find_package(BLAS REQUIRED)
|
|||||||
add_definitions(-DLASP_MAX_NFFT=33554432) # 2**25
|
add_definitions(-DLASP_MAX_NFFT=33554432) # 2**25
|
||||||
|
|
||||||
# ####################################### End of user-adjustable variables section
|
# ####################################### End of user-adjustable variables section
|
||||||
|
|
||||||
include(OSSpecific)
|
include(OSSpecific)
|
||||||
|
|
||||||
|
|
||||||
|
@ -32,25 +32,25 @@ void fillRtAudioDeviceInfo(vector<DeviceInfo> &devinfolist) {
|
|||||||
// "Our device info struct"
|
// "Our device info struct"
|
||||||
DeviceInfo d;
|
DeviceInfo d;
|
||||||
switch (api) {
|
switch (api) {
|
||||||
case RtAudio::LINUX_ALSA:
|
case RtAudio::LINUX_ALSA:
|
||||||
d.api = rtaudioAlsaApi;
|
d.api = rtaudioAlsaApi;
|
||||||
break;
|
break;
|
||||||
case RtAudio::LINUX_PULSE:
|
case RtAudio::LINUX_PULSE:
|
||||||
d.api = rtaudioPulseaudioApi;
|
d.api = rtaudioPulseaudioApi;
|
||||||
break;
|
break;
|
||||||
case RtAudio::WINDOWS_WASAPI:
|
case RtAudio::WINDOWS_WASAPI:
|
||||||
d.api = rtaudioWasapiApi;
|
d.api = rtaudioWasapiApi;
|
||||||
break;
|
break;
|
||||||
case RtAudio::WINDOWS_DS:
|
case RtAudio::WINDOWS_DS:
|
||||||
d.api = rtaudioDsApi;
|
d.api = rtaudioDsApi;
|
||||||
break;
|
break;
|
||||||
case RtAudio::WINDOWS_ASIO:
|
case RtAudio::WINDOWS_ASIO:
|
||||||
d.api = rtaudioAsioApi;
|
d.api = rtaudioAsioApi;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
cerr << "Not implemented RtAudio API, skipping." << endl;
|
cerr << "Not implemented RtAudio API, skipping." << endl;
|
||||||
continue;
|
continue;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
d.device_name = devinfo.name;
|
d.device_name = devinfo.name;
|
||||||
@ -80,7 +80,7 @@ void fillRtAudioDeviceInfo(vector<DeviceInfo> &devinfolist) {
|
|||||||
}
|
}
|
||||||
/* if (formats & RTAUDIO_SINT32) { */
|
/* if (formats & RTAUDIO_SINT32) { */
|
||||||
/* d.availableDataTypes.push_back(DataTypeDescriptor::DataType::dtype_int24);
|
/* d.availableDataTypes.push_back(DataTypeDescriptor::DataType::dtype_int24);
|
||||||
*/
|
*/
|
||||||
/* } */
|
/* } */
|
||||||
if (formats & RTAUDIO_SINT32) {
|
if (formats & RTAUDIO_SINT32) {
|
||||||
d.availableDataTypes.push_back(
|
d.availableDataTypes.push_back(
|
||||||
@ -105,7 +105,7 @@ void fillRtAudioDeviceInfo(vector<DeviceInfo> &devinfolist) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int mycallback(void *outputBuffer, void *inputBuffer, unsigned int nFrames,
|
static int mycallback(void *outputBuffer, void *inputBuffer, unsigned int nFrames,
|
||||||
double streamTime, RtAudioStreamStatus status, void *userData);
|
double streamTime, RtAudioStreamStatus status, void *userData);
|
||||||
|
|
||||||
static void myerrorcallback(RtAudioError::Type, const string &errorText);
|
static void myerrorcallback(RtAudioError::Type, const string &errorText);
|
||||||
|
|
||||||
@ -122,91 +122,91 @@ class RtAudioDaq : public Daq {
|
|||||||
|
|
||||||
std::atomic<StreamStatus> _streamStatus{};
|
std::atomic<StreamStatus> _streamStatus{};
|
||||||
|
|
||||||
public:
|
public:
|
||||||
RtAudioDaq(const DeviceInfo &devinfo, const DaqConfiguration &config)
|
RtAudioDaq(const DeviceInfo &devinfo, const DaqConfiguration &config)
|
||||||
: Daq(devinfo, config),
|
: Daq(devinfo, config),
|
||||||
rtaudio(static_cast<RtAudio::Api>(devinfo.api.api_specific_subcode)),
|
rtaudio(static_cast<RtAudio::Api>(devinfo.api.api_specific_subcode)),
|
||||||
nFramesPerBlock(Daq::framesPerBlock()) {
|
nFramesPerBlock(Daq::framesPerBlock()) {
|
||||||
|
|
||||||
DEBUGTRACE_ENTER;
|
DEBUGTRACE_ENTER;
|
||||||
|
|
||||||
// We make sure not to run RtAudio in duplex mode. This seems to be buggy
|
// We make sure not to run RtAudio in duplex mode. This seems to be buggy
|
||||||
// and untested. Better to use a hardware-type loopback into the system.
|
// and untested. Better to use a hardware-type loopback into the system.
|
||||||
if (duplexMode()) {
|
if (duplexMode()) {
|
||||||
throw runtime_error("RtAudio backend cannot run in duplex mode.");
|
throw runtime_error("RtAudio backend cannot run in duplex mode.");
|
||||||
}
|
|
||||||
assert(!monitorOutput);
|
|
||||||
|
|
||||||
std::unique_ptr<RtAudio::StreamParameters> inParams, outParams;
|
|
||||||
|
|
||||||
if (neninchannels() > 0) {
|
|
||||||
|
|
||||||
inParams = std::make_unique<RtAudio::StreamParameters>();
|
|
||||||
|
|
||||||
// +1 to get the count.
|
|
||||||
inParams->nChannels = getHighestInChannel() + 1;
|
|
||||||
if (inParams->nChannels < 1) {
|
|
||||||
throw runtime_error("Invalid input number of channels");
|
|
||||||
}
|
}
|
||||||
inParams->firstChannel = 0;
|
assert(!monitorOutput);
|
||||||
inParams->deviceId = devinfo.api_specific_devindex;
|
|
||||||
|
|
||||||
} else {
|
std::unique_ptr<RtAudio::StreamParameters> inParams, outParams;
|
||||||
|
|
||||||
outParams = std::make_unique<RtAudio::StreamParameters>();
|
if (neninchannels() > 0) {
|
||||||
|
|
||||||
outParams->nChannels = getHighestOutChannel() + 1;
|
inParams = std::make_unique<RtAudio::StreamParameters>();
|
||||||
if (outParams->nChannels < 1) {
|
|
||||||
throw runtime_error("Invalid output number of channels");
|
// +1 to get the count.
|
||||||
|
inParams->nChannels = getHighestInChannel() + 1;
|
||||||
|
if (inParams->nChannels < 1) {
|
||||||
|
throw runtime_error("Invalid input number of channels");
|
||||||
|
}
|
||||||
|
inParams->firstChannel = 0;
|
||||||
|
inParams->deviceId = devinfo.api_specific_devindex;
|
||||||
|
|
||||||
|
} else {
|
||||||
|
|
||||||
|
outParams = std::make_unique<RtAudio::StreamParameters>();
|
||||||
|
|
||||||
|
outParams->nChannels = getHighestOutChannel() + 1;
|
||||||
|
if (outParams->nChannels < 1) {
|
||||||
|
throw runtime_error("Invalid output number of channels");
|
||||||
|
}
|
||||||
|
outParams->firstChannel = 0;
|
||||||
|
outParams->deviceId = devinfo.api_specific_devindex;
|
||||||
}
|
}
|
||||||
outParams->firstChannel = 0;
|
|
||||||
outParams->deviceId = devinfo.api_specific_devindex;
|
RtAudio::StreamOptions streamoptions;
|
||||||
|
streamoptions.flags = RTAUDIO_HOG_DEVICE | RTAUDIO_NONINTERLEAVED;
|
||||||
|
|
||||||
|
streamoptions.numberOfBuffers = 2;
|
||||||
|
streamoptions.streamName = "RtAudio stream";
|
||||||
|
streamoptions.priority = 0;
|
||||||
|
|
||||||
|
RtAudioFormat format;
|
||||||
|
using Dtype = DataTypeDescriptor::DataType;
|
||||||
|
const Dtype dtype = dataType();
|
||||||
|
switch (dtype) {
|
||||||
|
case Dtype::dtype_fl32:
|
||||||
|
format = RTAUDIO_FLOAT32;
|
||||||
|
break;
|
||||||
|
case Dtype::dtype_fl64:
|
||||||
|
format = RTAUDIO_FLOAT64;
|
||||||
|
break;
|
||||||
|
case Dtype::dtype_int8:
|
||||||
|
format = RTAUDIO_SINT8;
|
||||||
|
break;
|
||||||
|
case Dtype::dtype_int16:
|
||||||
|
format = RTAUDIO_SINT16;
|
||||||
|
break;
|
||||||
|
case Dtype::dtype_int32:
|
||||||
|
format = RTAUDIO_SINT32;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw runtime_error("Invalid data type specified for DAQ stream.");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy here, as it is used to return the *actual* number of frames per
|
||||||
|
// block.
|
||||||
|
unsigned int nFramesPerBlock_copy = nFramesPerBlock;
|
||||||
|
|
||||||
|
// Final step: open the stream.
|
||||||
|
rtaudio.openStream(outParams.get(), inParams.get(), format,
|
||||||
|
static_cast<us>(samplerate()), &nFramesPerBlock_copy,
|
||||||
|
mycallback, (void *)this, &streamoptions,
|
||||||
|
&myerrorcallback);
|
||||||
}
|
}
|
||||||
|
|
||||||
RtAudio::StreamOptions streamoptions;
|
|
||||||
streamoptions.flags = RTAUDIO_HOG_DEVICE | RTAUDIO_NONINTERLEAVED;
|
|
||||||
|
|
||||||
streamoptions.numberOfBuffers = 2;
|
|
||||||
streamoptions.streamName = "RtAudio stream";
|
|
||||||
streamoptions.priority = 0;
|
|
||||||
|
|
||||||
RtAudioFormat format;
|
|
||||||
using Dtype = DataTypeDescriptor::DataType;
|
|
||||||
const Dtype dtype = dataType();
|
|
||||||
switch (dtype) {
|
|
||||||
case Dtype::dtype_fl32:
|
|
||||||
format = RTAUDIO_FLOAT32;
|
|
||||||
break;
|
|
||||||
case Dtype::dtype_fl64:
|
|
||||||
format = RTAUDIO_FLOAT64;
|
|
||||||
break;
|
|
||||||
case Dtype::dtype_int8:
|
|
||||||
format = RTAUDIO_SINT8;
|
|
||||||
break;
|
|
||||||
case Dtype::dtype_int16:
|
|
||||||
format = RTAUDIO_SINT16;
|
|
||||||
break;
|
|
||||||
case Dtype::dtype_int32:
|
|
||||||
format = RTAUDIO_SINT32;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
throw runtime_error("Invalid data type specified for DAQ stream.");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy here, as it is used to return the *actual* number of frames per
|
|
||||||
// block.
|
|
||||||
unsigned int nFramesPerBlock_copy = nFramesPerBlock;
|
|
||||||
|
|
||||||
// Final step: open the stream.
|
|
||||||
rtaudio.openStream(outParams.get(), inParams.get(), format,
|
|
||||||
static_cast<us>(samplerate()), &nFramesPerBlock_copy,
|
|
||||||
mycallback, (void *)this, &streamoptions,
|
|
||||||
&myerrorcallback);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void start(InDaqCallback inCallback,
|
virtual void start(InDaqCallback inCallback,
|
||||||
OutDaqCallback outCallback) override {
|
OutDaqCallback outCallback) override {
|
||||||
|
|
||||||
DEBUGTRACE_ENTER;
|
DEBUGTRACE_ENTER;
|
||||||
|
|
||||||
@ -219,7 +219,7 @@ public:
|
|||||||
// Logical XOR
|
// Logical XOR
|
||||||
if (inCallback && outCallback) {
|
if (inCallback && outCallback) {
|
||||||
throw runtime_error("Either input or output stream possible for RtAudio. "
|
throw runtime_error("Either input or output stream possible for RtAudio. "
|
||||||
"Stream duplex mode not provided.");
|
"Stream duplex mode not provided.");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (inCallback) {
|
if (inCallback) {
|
||||||
@ -236,6 +236,7 @@ public:
|
|||||||
"Output callback given, but stream does not provide output data");
|
"Output callback given, but stream does not provide output data");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rtaudio.startStream();
|
rtaudio.startStream();
|
||||||
|
|
||||||
// If we are here, we are running without errors.
|
// If we are here, we are running without errors.
|
||||||
@ -251,12 +252,16 @@ public:
|
|||||||
if (getStreamStatus().runningOK()) {
|
if (getStreamStatus().runningOK()) {
|
||||||
rtaudio.stopStream();
|
rtaudio.stopStream();
|
||||||
}
|
}
|
||||||
|
StreamStatus s = _streamStatus;
|
||||||
|
s.isRunning = false;
|
||||||
|
s.errorType = StreamStatus::StreamError::noError;
|
||||||
|
_streamStatus = s;
|
||||||
}
|
}
|
||||||
|
|
||||||
int streamCallback(void *outputBuffer, void *inputBuffer,
|
int streamCallback(void *outputBuffer, void *inputBuffer,
|
||||||
unsigned int nFrames, double streamTime,
|
unsigned int nFrames, double streamTime,
|
||||||
|
|
||||||
RtAudioStreamStatus status) {
|
RtAudioStreamStatus status) {
|
||||||
|
|
||||||
/* DEBUGTRACE_ENTER; */
|
/* DEBUGTRACE_ENTER; */
|
||||||
|
|
||||||
@ -268,20 +273,21 @@ public:
|
|||||||
StreamStatus stat = _streamStatus;
|
StreamStatus stat = _streamStatus;
|
||||||
stat.errorType = e;
|
stat.errorType = e;
|
||||||
stat.isRunning = false;
|
stat.isRunning = false;
|
||||||
|
_streamStatus = stat;
|
||||||
rval = 1;
|
rval = 1;
|
||||||
};
|
};
|
||||||
|
|
||||||
switch (status) {
|
switch (status) {
|
||||||
case RTAUDIO_INPUT_OVERFLOW:
|
case RTAUDIO_INPUT_OVERFLOW:
|
||||||
stopWithError(se::inputXRun);
|
stopWithError(se::inputXRun);
|
||||||
return 1;
|
return 1;
|
||||||
break;
|
break;
|
||||||
case RTAUDIO_OUTPUT_UNDERFLOW:
|
case RTAUDIO_OUTPUT_UNDERFLOW:
|
||||||
stopWithError(se::outputXRun);
|
stopWithError(se::outputXRun);
|
||||||
return 1;
|
return 1;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto &dtype_descr = DataTypeDescriptor();
|
const auto &dtype_descr = DataTypeDescriptor();
|
||||||
@ -291,7 +297,7 @@ public:
|
|||||||
us sw = dtype_descr.sw;
|
us sw = dtype_descr.sw;
|
||||||
if (nFrames != nFramesPerBlock) {
|
if (nFrames != nFramesPerBlock) {
|
||||||
cerr << "RtAudio backend error: nFrames does not match block size!"
|
cerr << "RtAudio backend error: nFrames does not match block size!"
|
||||||
<< endl;
|
<< endl;
|
||||||
stopWithError(se::logicError);
|
stopWithError(se::logicError);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
@ -303,7 +309,7 @@ public:
|
|||||||
for (int ch = getLowestInChannel(); ch <= getHighestInChannel(); ch++) {
|
for (int ch = getLowestInChannel(); ch <= getHighestInChannel(); ch++) {
|
||||||
if (inchannel_config.at(ch).enabled) {
|
if (inchannel_config.at(ch).enabled) {
|
||||||
ptrs.push_back(&static_cast<uint8_t *>(
|
ptrs.push_back(&static_cast<uint8_t *>(
|
||||||
inputBuffer)[sw * ninchannels * ch * nFramesPerBlock]);
|
inputBuffer)[sw * ninchannels * ch * nFramesPerBlock]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
DaqData d{neninchannels, nFramesPerBlock, dtype};
|
DaqData d{neninchannels, nFramesPerBlock, dtype};
|
||||||
@ -326,7 +332,7 @@ public:
|
|||||||
for (int ch = 0; ch <= getHighestOutChannel(); ch++) {
|
for (int ch = 0; ch <= getHighestOutChannel(); ch++) {
|
||||||
if (outchannel_config.at(ch).enabled) {
|
if (outchannel_config.at(ch).enabled) {
|
||||||
ptrs.push_back(&static_cast<uint8_t *>(
|
ptrs.push_back(&static_cast<uint8_t *>(
|
||||||
outputBuffer)[sw * nenoutchannels * ch * nFramesPerBlock]);
|
outputBuffer)[sw * nenoutchannels * ch * nFramesPerBlock]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
DaqData d{nenoutchannels, nFramesPerBlock, dtype};
|
DaqData d{nenoutchannels, nFramesPerBlock, dtype};
|
||||||
@ -354,7 +360,7 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
std::unique_ptr<Daq> createRtAudioDevice(const DeviceInfo &devinfo,
|
std::unique_ptr<Daq> createRtAudioDevice(const DeviceInfo &devinfo,
|
||||||
const DaqConfiguration &config) {
|
const DaqConfiguration &config) {
|
||||||
return std::make_unique<RtAudioDaq>(devinfo, config);
|
return std::make_unique<RtAudioDaq>(devinfo, config);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -362,7 +368,7 @@ void myerrorcallback(RtAudioError::Type, const string &errorText) {
|
|||||||
cerr << "RtAudio backend stream error: " << errorText << endl;
|
cerr << "RtAudio backend stream error: " << errorText << endl;
|
||||||
}
|
}
|
||||||
int mycallback(void *outputBuffer, void *inputBuffer, unsigned int nFrames,
|
int mycallback(void *outputBuffer, void *inputBuffer, unsigned int nFrames,
|
||||||
double streamTime, RtAudioStreamStatus status, void *userData) {
|
double streamTime, RtAudioStreamStatus status, void *userData) {
|
||||||
|
|
||||||
return static_cast<RtAudioDaq *>(userData)->streamCallback(
|
return static_cast<RtAudioDaq *>(userData)->streamCallback(
|
||||||
outputBuffer, inputBuffer, nFrames, streamTime, status);
|
outputBuffer, inputBuffer, nFrames, streamTime, status);
|
||||||
|
2
third_party/fftpack/CMakeLists.txt
vendored
2
third_party/fftpack/CMakeLists.txt
vendored
@ -6,4 +6,4 @@ add_library(fftpack
|
|||||||
|
|
||||||
# Ling fft to math
|
# Ling fft to math
|
||||||
target_link_libraries(fftpack PRIVATE m)
|
target_link_libraries(fftpack PRIVATE m)
|
||||||
interface_include_directories(fftpack .)
|
target_include_directories(fftpack PUBLIC .)
|
||||||
|
Loading…
Reference in New Issue
Block a user