diff --git a/.gitignore b/.gitignore index c88706a..c937b44 100644 --- a/.gitignore +++ b/.gitignore @@ -18,3 +18,4 @@ test/test_workers doc LASP.egg-info lasp_octave_fir.* +lasp/aps_ui.py diff --git a/lasp/CMakeLists.txt b/lasp/CMakeLists.txt index 0ef002e..65536cd 100644 --- a/lasp/CMakeLists.txt +++ b/lasp/CMakeLists.txt @@ -12,7 +12,23 @@ include_directories( c ) +# add the command to generate the source code +# add_custom_command ( +# OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/Table.h +# COMMAND MakeTable ${CMAKE_CURRENT_BINARY_DIR}/Table.h +# DEPENDS MakeTable +# ) + +add_custom_command( + OUTPUT "aps_ui.py" + COMMAND pyside-uic ${CMAKE_CURRENT_SOURCE_DIR}/ui/aps_ui.ui -o ${CMAKE_CURRENT_SOURCE_DIR}/aps_ui.py + DEPENDS "ui/aps_ui.ui" + ) +add_custom_target(ui ALL DEPENDS "aps_ui.py") + set_source_files_properties(wrappers.c PROPERTIES COMPILE_FLAGS "${CMAKE_C_FLAGS} ${CYTHON_EXTRA_C_FLAGS}") cython_add_module(wrappers wrappers.pyx) target_link_libraries(wrappers lasp_lib ) + + diff --git a/lasp/fir_design/__init__.py b/lasp/fir_design/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/lasp/lasp_atomic.py b/lasp/lasp_atomic.py new file mode 100644 index 0000000..9f10a13 --- /dev/null +++ b/lasp/lasp_atomic.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Provides a simple atomic variable: + +>>> a = Atomic(0) + +Retrieve the value +>>> b = a() +Set a new value: +>>> a <<= b +Get conversion to boolean: +>>> if a: + do something + +Atomic increment: +>>> A += 1 + +@author: J.A. de Jong - ASCEE +""" +from threading import Lock + +class Atomic: + def __init__(self, val): + self._val = val + self._lock = Lock() + + def __iadd__(self, toadd): + with self._lock: + self._val += toadd + return self + + def __isub__(self, toadd): + with self._lock: + self._val -= toadd + return self + + + def __bool__(self): + with self._lock: + return self._val + + def __ilshift__(self, other): + with self._lock: + self._val = other + return self + + def __call__(self): + with self._lock: + return self._val + \ No newline at end of file diff --git a/lasp/lasp_avstream.py b/lasp/lasp_avstream.py new file mode 100644 index 0000000..65dca11 --- /dev/null +++ b/lasp/lasp_avstream.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sat Mar 10 08:28:03 2018 + +@author: Read data from image stream and record sound at the same time +""" +import cv2 as cv +import sounddevice as sd +from .lasp_atomic import Atomic +from threading import Thread, Condition, Lock +import time +__all__ = ['AvType','AvStream'] + +# %% +blocksize = 2048 +video_x,video_y = 640,480 +dtype, sampwidth = 'int32',4 + +class AvType: + video=0 + audio=1 + +class AvStream: + def __init__(self, audiodeviceno=None, video=None, nchannels = None, samplerate = None): + + audiodevice,audiodeviceno = self._findDevice(audiodeviceno) + if nchannels is None: + self.nchannels = audiodevice['max_input_channels'] + if self.nchannels == 0: + raise RuntimeError('Device has no input channels') + else: + self.nchannels = nchannels + + self.audiodeviceno = audiodeviceno + if samplerate is None: + self.samplerate = audiodevice['default_samplerate'] + else: + self.samplerate = samplerate + + self.blocksize = blocksize + + self.video_x, self.video_y = video_x,video_y + self.dtype, self.sampwidth = dtype, sampwidth + + self._aframectr = Atomic(0) + self._vframectr = Atomic(0) + + self._callbacklock = Lock() + + self._running = Atomic(False) + self._running_cond = Condition() + + self._video = video + self._video_started = Atomic(False) + self._callbacks = [] + self._audiothread = None + self._videothread = None + + def addCallback(self, cb): + """ + + """ + with self._callbacklock: + if not cb in self._callbacks: + self._callbacks.append(cb) + + def removeCallback(self, cb): + with self._callbacklock: + if cb in self._callbacks: + self._callbacks.remove(cb) + + def _findDevice(self, deviceno): + + if deviceno is None: + deviceno = 0 + devices = sd.query_devices() + found = [] + for device in devices: + name = device['name'] + if 'Umik' in name: + found.append((device,deviceno)) + elif 'nanoSHARC' in name: + found.append((device,deviceno)) + deviceno+=1 + + if len(found) == 0: + print('Please choose one of the following:') + print(sd.query_devices()) + raise RuntimeError('Could not find a proper device') + + return found[0] + else: + return (sd.query_devices(deviceno,kind='input'),deviceno) + + def start(self): + """ + + """ + + if self._running: + raise RuntimeError('Stream already started') + + assert self._audiothread == None + assert self._videothread == None + + self._running <<= True + self._audiothread = Thread(target=self._audioThread) + if self._video is not None: + self._videothread = Thread(target=self._videoThread) + self._videothread.start() + else: + self._video_started <<= True + self._audiothread.start() + + def _audioThread(self): + # Raw stream to allow for in24 packed data type + stream = sd.InputStream( + device=self.audiodeviceno, + dtype=self.dtype, + blocksize=blocksize, + channels=self.nchannels, + samplerate=self.samplerate, + callback=self._audioCallback) + + with stream: + with self._running_cond: + while self._running: + self._running_cond.wait() + print('stopped audiothread') + + def _videoThread(self): + cap = cv.VideoCapture(self._video) + if not cap.isOpened(): + cap.open() + vframectr = 0 + loopctr = 0 + while self._running: + ret, frame = cap.read() + # print(frame.shape) + if ret==True: + if vframectr == 0: + self._video_started <<= True + with self._callbacklock: + for cb in self._callbacks: + cb(AvType.video,frame,self._aframectr(),vframectr) + vframectr += 1 + self._vframectr += 1 + else: + + if loopctr == 10: + print('Error: no video capture!') + time.sleep(0.2) + loopctr +=1 + + cap.release() + print('stopped videothread') + + def _audioCallback(self, indata, nframes, time, status): + """This is called (from a separate thread) for each audio block.""" + if not self._video_started: + return + + with self._callbacklock: + for cb in self._callbacks: + cb(AvType.audio,indata,self._aframectr(),self._vframectr()) + self._aframectr += 1 + + def stop(self): + self._running <<= False + with self._running_cond: + self._running_cond.notify() + self._audiothread.join() + self._audiothread = None + if self._video: + self._videothread.join() + self._videothread = None + self._aframectr <<= 0 + self._vframectr <<= 0 + self._video_started <<= False + + def isStarted(self): + return self._running() + + def hasVideo(self): + return True if self._video is not None else False diff --git a/lasp/lasp_playback.py b/lasp/lasp_playback.py new file mode 100644 index 0000000..64c5a4e --- /dev/null +++ b/lasp/lasp_playback.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sat Mar 10 08:28:03 2018 + +@author: Read data from image stream and record sound at the same time +""" +import cv2 as cv +import numpy as np +import queue +import sounddevice as sd +import time +from matplotlib.pyplot import plot, show +from .lasp_atomic import Atomic +from threading import Thread, Condition +import h5py + +class Playback: + """ + Play back a single channel from a + """ + def __init__(self, fn1, channel = 0, video=True, verbose=True): + """ + + """ + ext = '.h5' + if not ext in fn1: + fn = fn1 + ext + else: + fn = fn1 + + print('Filename: ', fn) + self._fn = fn + + self.channel = channel + self._video = video + self._aframectr = Atomic(0) + self._running = Atomic(False) + self._running_cond = Condition() + if video: + self._video_queue = queue.Queue() + + with h5py.File(fn,'r') as f: + self.samplerate = f.attrs['samplerate'] + self.nchannels = f.attrs['nchannels'] + self.blocksize = f.attrs['blocksize'] + self.nblocks = f['audio'].shape[0] + if verbose: + print('Sample rate: ', self.samplerate) + print('Number of audio frames: ', self.nblocks*self.blocksize) + print('Recording time: ', self.nblocks*self.blocksize/self.samplerate) + + + if video: + try: + f['video'] + self._video_frame_positions = f['video_frame_positions'][:] + except: + print('No video available in measurement file. Disabling video') + self._video = False + + @property + def T(self): + return self._nblocks*self._blocksize/self._samplerate + + def start(self): + with h5py.File(self._fn,'r') as f: + stream = sd.OutputStream(samplerate = self.samplerate, + blocksize = self.blocksize, + channels = 1, + dtype = 'int32',callback = self.audio_callback) + self._ad = f['audio'] + self._running <<= True + if self._video: + self._vd = f['video'] + videothread = Thread(target=self.video_thread_fcn) + videothread.start() + + with stream: + try: + with self._running_cond: + while self._running: + self._running_cond.wait() + except KeyboardInterrupt: + print('Keyboard interrupt. Quit playback') + + if self._video: + videothread.join() + + def audio_callback(self, outdata, frames, time, status): + """ + + """ + aframectr = self._aframectr() + if aframectr < self.nblocks: + outdata[:,0] = self._ad[aframectr,:,self.channel] + self._aframectr += 1 + else: + self._running <<= False + with self._running_cond: + self._running_cond.notify() + + def video_thread_fcn(self): + frame_ctr = 0 + nframes = self._vd.shape[0] + video_frame_positions = self._video_frame_positions + assert video_frame_positions.shape[0] == nframes + + while self._running and frame_ctr < nframes: + frame = self._vd[frame_ctr] + + # Find corresponding audio frame + corsp_aframe = video_frame_positions[frame_ctr] + + while self._aframectr() <= corsp_aframe: + print('Sleep video...') + time.sleep(self.blocksize/self.samplerate/2) + + cv.imshow("Video output. Press 'q' to quit",frame) + if cv.waitKey(1) & 0xFF == ord('q'): + self._running <<= False + + frame_ctr += 1 + print('Ending video playback thread') + cv.destroyAllWindows() + diff --git a/lasp/lasp_record.py b/lasp/lasp_record.py new file mode 100644 index 0000000..cdbc9ad --- /dev/null +++ b/lasp/lasp_record.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sat Mar 10 08:28:03 2018 + +@author: Read data from image stream and record sound at the same time +""" +import numpy as np +from .lasp_atomic import Atomic +from threading import Condition +from .lasp_avstream import AvType +import h5py, time + +# %% +class Recording: + def __init__(self, fn, stream, rectime = None): + + ext = '.h5' + if not ext in fn: + fn += ext + self._stream = stream + self.blocksize = stream.blocksize + self.samplerate = stream.samplerate + self._running = Atomic(False) + self._running_cond = Condition() + self.rectime = rectime + self._fn = fn + + self._video_frame_positions = [] + + self._aframeno = Atomic(0) + self._vframeno = 0 + + def start(self): + stream = self._stream + + with h5py.File(self._fn,'w') as f: + self._ad = f.create_dataset('audio', + (1,stream.blocksize,stream.nchannels), + dtype=stream.dtype, + maxshape=(None,stream.blocksize, + stream.nchannels), + compression='gzip' + ) + if stream.hasVideo(): + video_x,video_y = stream.video_x,stream.video_y + self._vd = f.create_dataset('video', + (1,video_y,video_x,3), + dtype='uint8', + maxshape=(None,video_y,video_x,3), + compression='gzip' + ) + + f.attrs['samplerate'] = stream.samplerate + f.attrs['nchannels'] = stream.nchannels + f.attrs['blocksize'] = stream.blocksize + f.attrs['time'] = time.time() + self._running <<= True + # Videothread is going to start + + if not stream.isStarted(): + stream.start() + + stream.addCallback(self._callback) + with self._running_cond: + try: + print('Starting record....') + while self._running: + self._running_cond.wait() + except KeyboardInterrupt: + print("Keyboard interrupt on record") + self._running <<= False + + stream.removeCallback(self._callback) + + if stream.hasVideo(): + f['video_frame_positions'] = self._video_frame_positions + + + print('\nEnding record') + + def stop(self): + self._running <<= False + with self._running_cond: + self._running_cond.notify() + + def _callback(self, _type, data, aframe, vframe): + if not self._stream.isStarted(): + self._running <<= False + with self._running_cond: + self._running_cond.notify() + + if _type == AvType.audio: + self._aCallback(data, aframe) + elif _type == AvType.video: + self._vCallback(data) + + def _aCallback(self, frames, aframe): +# print(self._aframeno()) + print('.',end='') + curT = self._aframeno()*self.blocksize/self.samplerate + if self.rectime is not None and curT > self.rectime: + # We are done! + self._running <<= False + with self._running_cond: + self._running_cond.notify() + return + + self._ad.resize(self._aframeno()+1,axis=0) + self._ad[self._aframeno(),:,:] = frames + self._aframeno += 1 + + + def _vCallback(self,frame): + self._video_frame_positions.append(self._aframeno()) + vframeno = self._vframeno + self._vd.resize(vframeno+1,axis=0) + self._vd[vframeno,:,:] = frame + self._vframeno += 1 + +if __name__ == '__main__': + + rec = Recording('test',5) + rec.start()