diff --git a/lasp/c/lasp_siggen.c b/lasp/c/lasp_siggen.c index b10e65d..ee8c0e5 100644 --- a/lasp/c/lasp_siggen.c +++ b/lasp/c/lasp_siggen.c @@ -5,7 +5,7 @@ // Description: // Signal generator implementation ////////////////////////////////////////////////////////////////////// -/* #define TRACERPLUS (-5) */ +#define TRACERPLUS (-5) #include "lasp_siggen.h" #include "lasp_alloc.h" #include "lasp_assert.h" diff --git a/lasp/device/lasp_daqconfig.py b/lasp/device/lasp_daqconfig.py index 9e3196d..f27cdef 100644 --- a/lasp/device/lasp_daqconfig.py +++ b/lasp/device/lasp_daqconfig.py @@ -42,6 +42,8 @@ class DAQConfiguration: Initialize a device descriptor Args: + duplex_mode: Set device to duplex mode, if possible + monitor_gen: If set to true, add monitor channel to recording. input_device_name: ASCII name with which to open the device when connected outut_device_name: ASCII name with which to open the device when connected @@ -75,6 +77,7 @@ class DAQConfiguration: en_output_rate: int en_input_channels: list + monitor_gen: bool = False def getEnabledChannels(self): en_channels = [] diff --git a/lasp/lasp_avstream.py b/lasp/lasp_avstream.py index 047024e..6c44007 100644 --- a/lasp/lasp_avstream.py +++ b/lasp/lasp_avstream.py @@ -6,6 +6,7 @@ Description: Read data from image stream and record sound at the same time import cv2 as cv from .lasp_atomic import Atomic from threading import Thread, Condition, Lock +import numpy as np import time from .device import (RtAudio, DeviceInfo, DAQConfiguration, @@ -18,36 +19,31 @@ video_x, video_y = 640, 480 class AvType: - """ - Specificying the type of data, for adding and removing callbacks from the - stream. - """ - audio_input = 0 - audio_output = 1 - video = 2 + """Specificying the type of data, for adding and removing callbacks from + the stream.""" + audio_input = 1 + audio_output = 2 + video = 4 class AvStream: - """ - Audio and video data stream, to which callbacks can be added for processing - the data. - """ + """Audio and video data stream, to which callbacks can be added for + processing the data.""" def __init__(self, device: DeviceInfo, avtype: AvType, daqconfig: DAQConfiguration, video=None): - """ - Open a stream for audio in/output and video input. For audio output, by - default all available channels are opened for outputting data. + """Open a stream for audio in/output and video input. For audio output, + by default all available channels are opened for outputting data. Args: device: DeviceInfo for the audio device avtype: Type of stream. Input, output or duplex daqconfig: DAQConfiguration instance. If duplex mode flag is set, - please make sure that no output_device is None, as in that case the + please make sure that output_device is None, as in that case the output config will be taken from the input device. video: """ @@ -56,13 +52,18 @@ class AvStream: self._device = device self.avtype = avtype self.duplex_mode = daqconfig.duplex_mode + self.monitor_gen = daqconfig.monitor_gen # Determine highest input channel number channelconfigs = daqconfig.en_input_channels self.channel_names = [] - self.sensitivity = self.daqconfig.getSensitivities() + if daqconfig.monitor_gen: + assert self.duplex_mode + self.channel_names.append('Generated signal') + self.sensitivity = np.concatenate([np.array([1.]), + self.sensitivity]) rtaudio_inputparams = None rtaudio_outputparams = None @@ -71,8 +72,8 @@ class AvStream: if self.duplex_mode or avtype == AvType.audio_output: rtaudio_outputparams = {'deviceid': device.index, - # TODO: Add option to specify the number of output channels to use - 'nchannels': 1, #device.outputchannels, + # TODO: Add option to specify the number of output channels to use + 'nchannels': 1, # device.outputchannels, 'firstchannel': 0} self.sampleformat = daqconfig.en_output_sample_format self.samplerate = int(daqconfig.en_output_rate) @@ -93,24 +94,27 @@ class AvStream: try: self._rtaudio = RtAudio() self.blocksize = self._rtaudio.openStream( - rtaudio_outputparams, # Outputparams - rtaudio_inputparams, # Inputparams - self.sampleformat, # Sampleformat - self.samplerate, - self.nframes_per_block, # Buffer size in frames - self._audioCallback) + rtaudio_outputparams, # Outputparams + rtaudio_inputparams, # Inputparams + self.sampleformat, # Sampleformat + self.samplerate, + self.nframes_per_block, # Buffer size in frames + self._audioCallback) except Exception as e: raise RuntimeError(f'Could not initialize DAQ device: {str(e)}') + # Fill in numpy data type, and sample width self.numpy_dtype = get_numpy_dtype_from_format_string( self.sampleformat) self.sampwidth = get_sampwidth_from_format_string( self.sampleformat) + # Counters for the number of frames that have been coming in self._aframectr = Atomic(0) self._vframectr = Atomic(0) + # Lock self._callbacklock = Lock() self._running = Atomic(False) @@ -118,32 +122,34 @@ class AvStream: self._video = video self._video_started = Atomic(False) + + # Storage for callbacks, specified by type self._callbacks = { AvType.audio_input: [], AvType.audio_output: [], AvType.video: [] } + + # Possible, but long not tested: store video self._videothread = None def close(self): self._rtaudio.closeStream() + self._rtaudio = None def nCallbacks(self): - """ - Returns the current number of installed callbacks - """ + """Returns the current number of installed callbacks.""" return len(self._callbacks[AvType.audio_input]) + \ - len(self._callbacks[AvType.audio_output]) + \ - len(self._callbacks[AvType.video]) + len(self._callbacks[AvType.audio_output]) + \ + len(self._callbacks[AvType.video]) def addCallback(self, cb: callable, cbtype: AvType): - """ - Add as stream callback to the list of callbacks - """ + """Add as stream callback to the list of callbacks.""" with self._callbacklock: outputcallbacks = self._callbacks[AvType.audio_output] if cbtype == AvType.audio_output and len(outputcallbacks) > 0: - raise RuntimeError('Only one audio output callback can be allowed') + raise RuntimeError( + 'Only one audio output callback can be allowed') if cb not in self._callbacks[cbtype]: self._callbacks[cbtype].append(cb) @@ -154,10 +160,8 @@ class AvStream: self._callbacks[cbtype].remove(cb) def start(self): - """ - Start the stream, which means the callbacks are called with stream - data (audio/video) - """ + """Start the stream, which means the callbacks are called with stream + data (audio/video)""" if self._running: raise RuntimeError('Stream already started') @@ -199,23 +203,33 @@ class AvStream: print('stopped videothread') def _audioCallback(self, indata, outdata, nframes, streamtime): - """ - This is called (from a separate thread) for each audio block. - """ - self._aframectr += 1 + """This is called (from a separate thread) for each audio block.""" + self._aframectr += nframes with self._callbacklock: - for cb in self._callbacks[AvType.audio_input]: - try: - cb(indata, outdata, self._aframectr()) - except Exception as e: - print(e) - return 1 + + # Count the number of output callbacks. If no output callbacks are + # present, and there should be output callbacks, we explicitly set + # the output buffer to zero + noutput_cb = len(self._callbacks[AvType.audio_output]) + shouldhaveoutput = (self.avtype == AvType.audio_output or + self.duplex_mode) + if noutput_cb == 0 and shouldhaveoutput: + outdata[:, :] = 0 + + # Loop over callbacks for cb in self._callbacks[AvType.audio_output]: try: cb(indata, outdata, self._aframectr()) except Exception as e: print(e) return 1 + for cb in self._callbacks[AvType.audio_input]: + try: + cb(indata, outdata, self._aframectr()) + except Exception as e: + print(e) + return 1 + return 0 if self._running else 1 def stop(self): diff --git a/lasp/lasp_measurement.py b/lasp/lasp_measurement.py index 8acbcff..b3d0972 100644 --- a/lasp/lasp_measurement.py +++ b/lasp/lasp_measurement.py @@ -164,6 +164,12 @@ class Measurement: self.N = (self.nblocks * self.blocksize) self.T = self.N / self.samplerate + try: + self._channel_names = f.attrs['channel_names'] + except KeyError: + # No channel names found in measurement file + self._channel_names = ['{i}' for i in range(self.nchannels)] + # comment = read-write thing try: self._comment = f.attrs['comment'] @@ -189,6 +195,10 @@ class Measurement: """ return os.path.splitext(self.fn_base)[0] + @property + def channelNames(self): + return self._channel_names + @contextmanager def file(self, mode='r'): """ diff --git a/lasp/lasp_record.py b/lasp/lasp_record.py index c60c222..069045c 100644 --- a/lasp/lasp_record.py +++ b/lasp/lasp_record.py @@ -3,7 +3,6 @@ """ Read data from stream and record sound and video at the same time """ -import numpy as np from .lasp_atomic import Atomic from threading import Condition from .lasp_avstream import AvType, AvStream @@ -19,13 +18,15 @@ class RecordStatus: class Recording: - def __init__(self, fn, stream, rectime=None, wait = True, + def __init__(self, fn: str, stream: AvStream, + rectime: float=None, wait: bool = True, progressCallback=None): """ Args: fn: Filename to record to. extension is added - stream: AvStream instance to record from + stream: AvStream instance to record from. Should have input + channels! rectime: Recording time, None for infinite """ ext = '.h5' @@ -33,6 +34,8 @@ class Recording: fn += ext self._stream = stream + if stream.avtype != AvType.audio_input: + raise RuntimeError('Stream does not have any input channels') self.blocksize = stream.blocksize self.samplerate = stream.samplerate self._running = Atomic(False) @@ -43,7 +46,7 @@ class Recording: self._video_frame_positions = [] self._curT_rounded_to_seconds = 0 - self._aframeno = Atomic(0) + self._ablockno = Atomic(0) self._vframeno = 0 self._progressCallback = progressCallback @@ -53,28 +56,38 @@ class Recording: self._deleteFile = False def setDelete(self, val: bool): + """ + Set the delete flag. If set, measurement file is deleted at the end of + the recording. Typically used for cleaning up after canceling a + recording. + """ self._deleteFile = val def __enter__(self): """ - with self._recording(wait=False): + with Recording(fn, stream, wait=False): event_loop_here() or: - with Recording(wait=True): + with Recording(fn, stream, wait=True): pass """ stream = self._stream f = self._f + nchannels = stream.nchannels + if stream.monitor_gen: + nchannels += 1 + + self.monitor_gen = stream.monitor_gen self._ad = f.create_dataset('audio', - (1, stream.blocksize, stream.nchannels), + (1, stream.blocksize, nchannels), dtype=stream.numpy_dtype, maxshape=(None, stream.blocksize, - stream.nchannels), + nchannels), compression='gzip' ) if stream.hasVideo(): @@ -91,6 +104,7 @@ class Recording: f.attrs['nchannels'] = stream.nchannels f.attrs['blocksize'] = stream.blocksize f.attrs['sensitivity'] = stream.sensitivity + f.attrs['channel_names'] = stream.channel_names f.attrs['time'] = time.time() self._running <<= True @@ -119,7 +133,7 @@ class Recording: stream.removeCallback(self._aCallback, AvType.audio_input) if stream.hasVideo(): stream.removeCallback(self._vCallback, AvType.video_input) - f['video_frame_positions'] = self._video_frame_positions + self._f['video_frame_positions'] = self._video_frame_positions self._f.close() print('\nEnding record') @@ -130,10 +144,9 @@ class Recording: print(f'Error deleting file: {self._fn}') - def _aCallback(self, indata, outdata, aframe): - curT = self._aframeno()*self.blocksize/self.samplerate + curT = self._ablockno()*self.blocksize/self.samplerate recstatus = RecordStatus( curT = curT, done = False) @@ -157,12 +170,16 @@ class Recording: self._progressCallback(recstatus) return - self._ad.resize(self._aframeno()+1, axis=0) - self._ad[self._aframeno(), :, :] = indata - self._aframeno += 1 + self._ad.resize(self._ablockno()+1, axis=0) + if self.monitor_gen: + self._ad[self._ablockno(), :, 0] = outdata + self._ad[self._ablockno(), :, 1:] = indata + else: + self._ad[self._ablockno(), :, :] = indata + self._ablockno += 1 def _vCallback(self, frame, framectr): - self._video_frame_positions.append(self._aframeno()) + self._video_frame_positions.append(self._ablockno()) vframeno = self._vframeno self._vd.resize(vframeno+1, axis=0) self._vd[vframeno, :, :] = frame