diff --git a/lasp/device/lasp_rtaudio.pyx b/lasp/device/lasp_rtaudio.pyx index 056899c..b87dad5 100644 --- a/lasp/device/lasp_rtaudio.pyx +++ b/lasp/device/lasp_rtaudio.pyx @@ -26,7 +26,7 @@ cdef extern from "RtAudio.h" nogil: THREAD_ERROR ctypedef unsigned long RtAudioStreamFlags - RtAudioStreamFlags RT_AUDIO_NONINTERLEAVED + RtAudioStreamFlags RTAUDIO_NONINTERLEAVED RtAudioStreamFlags RTAUDIO_MINIMIZE_LATENCY RtAudioStreamFlags RTAUDIO_HOG_DEVICE RtAudioStreamFlags RTAUDIO_ALSA_USE_DEFAULT @@ -144,10 +144,12 @@ cdef object fromBufferToNPYNoCopy( void* buf, size_t nchannels, size_t nframes): - cdef cnp.npy_intp[2] dims = [nchannels, nframes] + cdef cnp.npy_intp[2] dims = [nframes, nchannels] + # Interleaved data is C-style contiguous. Therefore, we can directly use + # SimpleNewFromData() array = cnp.PyArray_SimpleNewFromData(2, &dims[0], buffer_format_type, - buf).transpose() + buf) return array @@ -160,8 +162,8 @@ cdef void fromNPYToBuffer(cnp.ndarray arr, memcpy(buf, arr.data, arr.size*arr.itemsize) -cdef int audioCallback(void* outputBuffer, - void* inputBuffer, +cdef int audioCallback(void* outputbuffer, + void* inputbuffer, unsigned int nFrames, double streamTime, RtAudioStreamStatus status, @@ -185,41 +187,42 @@ cdef int audioCallback(void* outputBuffer, # Obtain stream information npy_input = None + npy_output = None if stream.hasInput: try: - assert inputBuffer != NULL + assert inputbuffer != NULL npy_format = _formats_rtkey[stream.sampleformat][2] npy_input = fromBufferToNPYNoCopy( npy_format, - inputBuffer, + inputbuffer, stream.inputParams.nChannels, nFrames) except Exception as e: - print('Exception in Cython callback: ', str(e)) + print('exception in cython callback: ', str(e)) + + if stream.hasOutput: + try: + assert outputbuffer != NULL + npy_format = _formats_rtkey[stream.sampleformat][2] + npy_output = fromBufferToNPYNoCopy( + npy_format, + outputbuffer, + stream.outputParams.nChannels, + nFrames) + + except Exception as e: + print('exception in cython callback: ', str(e)) try: - npy_output, rval = stream.pyCallback(npy_input, - nFrames, - streamTime) + rval = stream.pyCallback(npy_input, + npy_output, + nFrames, + streamTime) except Exception as e: print('Exception in Python callback: ', str(e)) return 1 - - if stream.hasOutput: - if npy_output is None: - print('No output buffer given!') - return 1 - IF LASP_DEBUG_CYTHON: - try: - assert outputBuffer != NULL, "Bug: RtAudio does not give output buffer!" - assert npy_output.shape[0] == stream.outputParams.nChannels, "Bug: channel mismatch in output buffer!" - assert npy_output.shape[1] == nFrames, "Bug: frame mismatch in output buffer!" - assert npy_output.itemsize == stream.sampleSize, "Bug: invalid sample type in output buffer!" - except AssertionError as e: - print(e) - fromNPYToBuffer(npy_output, outputBuffer) - return rval + return rval cdef void errorCallback(RtAudioError.Type _type,const string& errortxt) nogil: pass diff --git a/lasp/lasp_avstream.py b/lasp/lasp_avstream.py index d806b39..845a84d 100644 --- a/lasp/lasp_avstream.py +++ b/lasp/lasp_avstream.py @@ -67,7 +67,8 @@ class AvStream: if daqconfig.duplex_mode or avtype == AvType.audio_output: rtaudio_outputparams = {'deviceid': device.index, - 'nchannels': device.outputchannels, + # TODO: Add option to specify the number of output channels to use + 'nchannels': 1, #device.outputchannels, 'firstchannel': 0} self.sampleformat = daqconfig.en_output_sample_format self.samplerate = int(daqconfig.en_output_rate) @@ -191,24 +192,23 @@ class AvStream: cap.release() print('stopped videothread') - def _audioCallback(self, indata, nframes, streamtime): + def _audioCallback(self, indata, outdata, nframes, streamtime): """ This is called (from a separate thread) for each audio block. """ self._aframectr += 1 - output_signal = None with self._callbacklock: for cb in self._callbacks[AvType.audio_input]: try: - cb(indata, self._aframectr()) + cb(indata, outdata, self._aframectr()) except Exception as e: print(e) for cb in self._callbacks[AvType.audio_output]: try: - output_signal = cb(indata, self._aframectr()) + cb(indata, outdata, self._aframectr()) except Exception as e: print(e) - return output_signal, 0 if self._running else 1 + return 0 if self._running else 1 def stop(self): self._running <<= False diff --git a/lasp/lasp_record.py b/lasp/lasp_record.py index de85589..c60c222 100644 --- a/lasp/lasp_record.py +++ b/lasp/lasp_record.py @@ -131,7 +131,7 @@ class Recording: - def _aCallback(self, frames, aframe): + def _aCallback(self, indata, outdata, aframe): curT = self._aframeno()*self.blocksize/self.samplerate recstatus = RecordStatus( @@ -158,7 +158,7 @@ class Recording: return self._ad.resize(self._aframeno()+1, axis=0) - self._ad[self._aframeno(), :, :] = frames + self._ad[self._aframeno(), :, :] = indata self._aframeno += 1 def _vCallback(self, frame, framectr): @@ -168,10 +168,3 @@ class Recording: self._vd[vframeno, :, :] = frame self._vframeno += 1 - -if __name__ == '__main__': - stream = AvStream() - rec = Recording('test', stream, 5) - with rec(wait=True): - sleep - rec.start() diff --git a/scripts/lasp_siggen b/scripts/lasp_siggen new file mode 100755 index 0000000..e906093 --- /dev/null +++ b/scripts/lasp_siggen @@ -0,0 +1,66 @@ +#!/usr/bin/python3 +import argparse +import numpy as np + + +parser = argparse.ArgumentParser( + description='Play a sine wave' +) +device_help = 'DAQ Device to play to' +parser.add_argument('--device', '-d', help=device_help, type=str, + default='Default') + +args = parser.parse_args() + +from lasp.lasp_avstream import AvStream, AvType +from lasp.device import DAQConfiguration, RtAudio + +config = DAQConfiguration.loadConfigs()[args.device] + +rtaudio = RtAudio() +count = rtaudio.getDeviceCount() +devices = [rtaudio.getDeviceInfo(i) for i in range(count)] + +output_devices = {} +for device in devices: + if device.outputchannels >= 0: + output_devices[device.name] = device + +try: + output_device = output_devices[config.output_device_name] +except KeyError: + raise RuntimeError(f'output device {config.output_device_name} not available') + +samplerate = int(config.en_output_rate) +stream = AvStream(output_device, + AvType.audio_output, + config) + +# freq = 440. +freq = 1000. +omg = 2*np.pi*freq + + +def mycallback(indata, outdata, blockctr): + frames = outdata.shape[0] + nchannels = outdata.shape[1] + # nchannels = 1 + streamtime = blockctr*frames/samplerate + t = np.linspace(streamtime, streamtime + frames/samplerate, + frames)[np.newaxis, :] + outp = 0.01*np.sin(omg*t) + for i in range(nchannels): + outdata[:,i] = ((2**16-1)*outp).astype(np.int16) + +stream.addCallback(mycallback, AvType.audio_output) +stream.start() + +input() + +print('Stopping stream...') +stream.stop() + +print('Stream stopped') +print('Closing stream...') +stream.close() +print('Stream closed')