Fixed latest bugs. Now everything seems to be working, at least with the RtAudio API
This commit is contained in:
parent
4e0c09d356
commit
3d188281ab
@ -154,14 +154,6 @@ Daq::Daq(const DeviceInfo &devinfo, const DaqConfiguration &config)
|
|||||||
throw runtime_error(
|
throw runtime_error(
|
||||||
"Output monitoring only possible when output is enabled");
|
"Output monitoring only possible when output is enabled");
|
||||||
}
|
}
|
||||||
// Some sanity checks
|
|
||||||
if (eninchannels.size() != 4) {
|
|
||||||
throw runtime_error("Invalid length of enabled inChannels vector");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (enoutchannels.size() != 1) {
|
|
||||||
throw runtime_error("Invalid length of enabled outChannels vector");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -193,6 +193,13 @@ class AudioDaq: public Daq {
|
|||||||
|
|
||||||
|
|
||||||
void start(SafeQueue<void*> *inqueue, SafeQueue<void*> *outqueue) {
|
void start(SafeQueue<void*> *inqueue, SafeQueue<void*> *outqueue) {
|
||||||
|
this->inqueue = inqueue;
|
||||||
|
this->outqueue = outqueue;
|
||||||
|
if(monitorOutput) {
|
||||||
|
this->outDelayqueue = new SafeQueue<void*>();
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
if(isRunning()){
|
if(isRunning()){
|
||||||
throw runtime_error("Stream already running");
|
throw runtime_error("Stream already running");
|
||||||
}
|
}
|
||||||
@ -209,6 +216,7 @@ class AudioDaq: public Daq {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void stop() {
|
void stop() {
|
||||||
|
|
||||||
if(!isRunning()) {
|
if(!isRunning()) {
|
||||||
cerr << "Stream is already stopped" << endl;
|
cerr << "Stream is already stopped" << endl;
|
||||||
}
|
}
|
||||||
@ -216,6 +224,9 @@ class AudioDaq: public Daq {
|
|||||||
assert(rtaudio);
|
assert(rtaudio);
|
||||||
rtaudio->stopStream();
|
rtaudio->stopStream();
|
||||||
}
|
}
|
||||||
|
if(inqueue) delete inqueue;
|
||||||
|
if(outqueue) delete outqueue;
|
||||||
|
if(outDelayqueue) delete outDelayqueue;
|
||||||
|
|
||||||
}
|
}
|
||||||
bool isRunning() const {return (rtaudio && rtaudio->isStreamRunning());}
|
bool isRunning() const {return (rtaudio && rtaudio->isStreamRunning());}
|
||||||
|
@ -3,6 +3,7 @@ from .lasp_deviceinfo cimport DeviceInfo
|
|||||||
from .lasp_daqconfig cimport DaqConfiguration
|
from .lasp_daqconfig cimport DaqConfiguration
|
||||||
|
|
||||||
from cpython.ref cimport PyObject,Py_INCREF, Py_DECREF
|
from cpython.ref cimport PyObject,Py_INCREF, Py_DECREF
|
||||||
|
import numpy as np
|
||||||
from .lasp_device_common import AvType
|
from .lasp_device_common import AvType
|
||||||
|
|
||||||
__all__ = ['Daq']
|
__all__ = ['Daq']
|
||||||
|
@ -59,9 +59,6 @@ class AvStream:
|
|||||||
|
|
||||||
self.input_sensitivity = np.asarray(self.input_sensitivity)
|
self.input_sensitivity = np.asarray(self.input_sensitivity)
|
||||||
|
|
||||||
# Fill in numpy data type, and sample width
|
|
||||||
datatype = daqconfig.dataTypeIndex
|
|
||||||
|
|
||||||
# Counters for the number of frames that have been coming in
|
# Counters for the number of frames that have been coming in
|
||||||
self._aframectr = Atomic(0)
|
self._aframectr = Atomic(0)
|
||||||
self._vframectr = Atomic(0)
|
self._vframectr = Atomic(0)
|
||||||
@ -88,6 +85,7 @@ class AvStream:
|
|||||||
self._daq = Daq(device, daqconfig)
|
self._daq = Daq(device, daqconfig)
|
||||||
self.blocksize = self._daq.nFramesPerBlock
|
self.blocksize = self._daq.nFramesPerBlock
|
||||||
self.samplerate = self._daq.samplerate
|
self.samplerate = self._daq.samplerate
|
||||||
|
self.dtype = self._daq.getNumpyDataType()
|
||||||
|
|
||||||
def nCallbacks(self):
|
def nCallbacks(self):
|
||||||
"""Returns the current number of installed callbacks."""
|
"""Returns the current number of installed callbacks."""
|
||||||
@ -164,23 +162,19 @@ class AvStream:
|
|||||||
# the output buffer to zero
|
# the output buffer to zero
|
||||||
noutput_cb = len(self._callbacks[AvType.audio_output])
|
noutput_cb = len(self._callbacks[AvType.audio_output])
|
||||||
|
|
||||||
shouldhaveoutput = (self.avtype == AvType.audio_output or
|
|
||||||
self.daqconfig.duplex_mode)
|
|
||||||
|
|
||||||
if noutput_cb == 0 and shouldhaveoutput and outdata is not None:
|
|
||||||
outdata[:, :] = 0
|
|
||||||
|
|
||||||
# Loop over callbacks
|
# Loop over callbacks
|
||||||
if outdata is not None:
|
if outdata is not None:
|
||||||
for cb in self._callbacks[AvType.audio_output]:
|
|
||||||
try:
|
try:
|
||||||
|
if len(self._callbacks[AvType.audio_output]) == 0:
|
||||||
|
outdata[:, :] = 0
|
||||||
|
for cb in self._callbacks[AvType.audio_output]:
|
||||||
cb(indata, outdata, self._aframectr())
|
cb(indata, outdata, self._aframectr())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
print(e)
|
||||||
return 2
|
return 2
|
||||||
if indata is not None:
|
if indata is not None:
|
||||||
for cb in self._callbacks[AvType.audio_input]:
|
|
||||||
try:
|
try:
|
||||||
|
for cb in self._callbacks[AvType.audio_input]:
|
||||||
cb(indata, outdata, self._aframectr())
|
cb(indata, outdata, self._aframectr())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
print(e)
|
||||||
@ -199,8 +193,8 @@ class AvStream:
|
|||||||
self._vframectr <<= 0
|
self._vframectr <<= 0
|
||||||
self._video_started <<= False
|
self._video_started <<= False
|
||||||
|
|
||||||
self._audiobackend.stop()
|
self._daq.stop()
|
||||||
self._audiobackend = None
|
self._daq = None
|
||||||
|
|
||||||
def isRunning(self):
|
def isRunning(self):
|
||||||
return self._running()
|
return self._running()
|
||||||
|
@ -37,7 +37,7 @@ class Recording:
|
|||||||
if stream.avtype != AvType.audio_input:
|
if stream.avtype != AvType.audio_input:
|
||||||
raise RuntimeError('Stream does not have any input channels')
|
raise RuntimeError('Stream does not have any input channels')
|
||||||
self.blocksize = stream.blocksize
|
self.blocksize = stream.blocksize
|
||||||
self.samplerate = stream.input_samplerate
|
self.samplerate = stream.samplerate
|
||||||
self._running = Atomic(False)
|
self._running = Atomic(False)
|
||||||
self._running_cond = Condition()
|
self._running_cond = Condition()
|
||||||
self.rectime = rectime
|
self.rectime = rectime
|
||||||
@ -81,7 +81,7 @@ class Recording:
|
|||||||
|
|
||||||
self._ad = f.create_dataset('audio',
|
self._ad = f.create_dataset('audio',
|
||||||
(1, stream.blocksize, nchannels),
|
(1, stream.blocksize, nchannels),
|
||||||
dtype=stream.input_numpy_dtype,
|
dtype=stream.dtype,
|
||||||
maxshape=(None, stream.blocksize,
|
maxshape=(None, stream.blocksize,
|
||||||
nchannels),
|
nchannels),
|
||||||
compression='gzip'
|
compression='gzip'
|
||||||
@ -96,7 +96,7 @@ class Recording:
|
|||||||
compression='gzip'
|
compression='gzip'
|
||||||
)
|
)
|
||||||
|
|
||||||
f.attrs['samplerate'] = stream.input_samplerate
|
f.attrs['samplerate'] = stream.samplerate
|
||||||
f.attrs['nchannels'] = nchannels
|
f.attrs['nchannels'] = nchannels
|
||||||
f.attrs['blocksize'] = stream.blocksize
|
f.attrs['blocksize'] = stream.blocksize
|
||||||
f.attrs['sensitivity'] = stream.input_sensitivity
|
f.attrs['sensitivity'] = stream.input_sensitivity
|
||||||
|
Loading…
Reference in New Issue
Block a user