Update API for the AvStream.
This commit is contained in:
parent
0109056b5f
commit
b88ec2904c
@ -1,4 +1,6 @@
|
|||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
__all__ = ['DAQConfiguration']
|
__all__ = ['DAQConfiguration']
|
||||||
from .lasp_daqconfig import DAQConfiguration, DAQInputChannel
|
from .lasp_daqconfig import DAQConfiguration, DAQInputChannel, DeviceInfo
|
||||||
from .lasp_rtaudio import RtAudio
|
from .lasp_rtaudio import (RtAudio,
|
||||||
|
get_numpy_dtype_from_format_string,
|
||||||
|
get_sampwidth_from_format_string)
|
||||||
|
@ -9,6 +9,8 @@ Data Acquistiion (DAQ) device descriptors, and the DAQ devices themselves
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class DeviceInfo:
|
class DeviceInfo:
|
||||||
@ -82,7 +84,9 @@ class DAQConfiguration:
|
|||||||
return en_channels
|
return en_channels
|
||||||
|
|
||||||
def getSensitivities(self):
|
def getSensitivities(self):
|
||||||
return [float(channel.sensitivity) for channel in self.getEnabledChannels()]
|
return np.array(
|
||||||
|
[float(channel.sensitivity) for channel in
|
||||||
|
self.getEnabledChannels()])
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def loadConfigs():
|
def loadConfigs():
|
||||||
|
@ -101,12 +101,12 @@ cdef extern from "RtAudio.h" nogil:
|
|||||||
void showWarnings(bool value)
|
void showWarnings(bool value)
|
||||||
|
|
||||||
_formats_strkey = {
|
_formats_strkey = {
|
||||||
'8-bit integers': (RTAUDIO_SINT8, 1),
|
'8-bit integers': (RTAUDIO_SINT8, 1, np.int8),
|
||||||
'16-bit integers': (RTAUDIO_SINT16,2),
|
'16-bit integers': (RTAUDIO_SINT16, 2, np.int16),
|
||||||
'24-bit integers': (RTAUDIO_SINT24,3),
|
'24-bit integers': (RTAUDIO_SINT24, 3),
|
||||||
'32-bit integers': (RTAUDIO_SINT32,4),
|
'32-bit integers': (RTAUDIO_SINT32, 4, np.int32),
|
||||||
'32-bit floats': (RTAUDIO_FLOAT32, 4),
|
'32-bit floats': (RTAUDIO_FLOAT32, 4, np.float32),
|
||||||
'64-bit floats': (RTAUDIO_FLOAT64, 8),
|
'64-bit floats': (RTAUDIO_FLOAT64, 8, np.float64),
|
||||||
}
|
}
|
||||||
_formats_rtkey = {
|
_formats_rtkey = {
|
||||||
RTAUDIO_SINT8: ('8-bit integers', 1, cnp.NPY_INT8),
|
RTAUDIO_SINT8: ('8-bit integers', 1, cnp.NPY_INT8),
|
||||||
@ -116,6 +116,12 @@ _formats_rtkey = {
|
|||||||
RTAUDIO_FLOAT32: ('32-bit floats', 4, cnp.NPY_FLOAT32),
|
RTAUDIO_FLOAT32: ('32-bit floats', 4, cnp.NPY_FLOAT32),
|
||||||
RTAUDIO_FLOAT64: ('64-bit floats', 8, cnp.NPY_FLOAT64),
|
RTAUDIO_FLOAT64: ('64-bit floats', 8, cnp.NPY_FLOAT64),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def get_numpy_dtype_from_format_string(format_string):
|
||||||
|
return _formats_strkey[format_string][-1]
|
||||||
|
def get_sampwidth_from_format_string(format_string):
|
||||||
|
return _formats_strkey[format_string][-2]
|
||||||
|
|
||||||
cdef class _Stream:
|
cdef class _Stream:
|
||||||
cdef:
|
cdef:
|
||||||
object pyCallback
|
object pyCallback
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3.6
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
"""
|
"""
|
||||||
Description: Read data from image stream and record sound at the same time
|
Description: Read data from image stream and record sound at the same time
|
||||||
@ -6,62 +6,100 @@ Description: Read data from image stream and record sound at the same time
|
|||||||
import cv2 as cv
|
import cv2 as cv
|
||||||
from .lasp_atomic import Atomic
|
from .lasp_atomic import Atomic
|
||||||
from threading import Thread, Condition, Lock
|
from threading import Thread, Condition, Lock
|
||||||
|
|
||||||
import time
|
import time
|
||||||
from .device import DAQConfiguration
|
from .device import (RtAudio, DeviceInfo, DAQConfiguration,
|
||||||
import numpy as np
|
get_numpy_dtype_from_format_string,
|
||||||
|
get_sampwidth_from_format_string)
|
||||||
|
|
||||||
__all__ = ['AvType', 'AvStream']
|
__all__ = ['AvType', 'AvStream']
|
||||||
|
|
||||||
video_x, video_y = 640, 480
|
video_x, video_y = 640, 480
|
||||||
dtype, sampwidth = 'int16', 2
|
|
||||||
|
|
||||||
|
|
||||||
class AvType:
|
class AvType:
|
||||||
video = 0
|
"""
|
||||||
audio = 1
|
Specificying the type of data, for adding and removing callbacks from the
|
||||||
|
stream.
|
||||||
|
"""
|
||||||
|
audio_input = 0
|
||||||
|
audio_output = 1
|
||||||
|
video = 2
|
||||||
|
|
||||||
|
|
||||||
class AvStream:
|
class AvStream:
|
||||||
|
"""
|
||||||
|
Audio and video data stream, to which callbacks can be added for processing
|
||||||
|
the data.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
rtaudio,
|
device: DeviceInfo,
|
||||||
output_device,
|
avtype: AvType,
|
||||||
input_device,
|
daqconfig: DAQConfiguration,
|
||||||
daqconfig, video=None):
|
video=None):
|
||||||
|
"""
|
||||||
|
Open a stream for audio in/output and video input. For audio output, by
|
||||||
|
default all available channels are opened for outputting data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
device: DeviceInfo for the audio device
|
||||||
|
avtype: Type of stream. Input, output or duplex
|
||||||
|
|
||||||
|
daqconfig: DAQConfiguration instance. If duplex mode flag is set,
|
||||||
|
please make sure that no output_device is None, as in that case the
|
||||||
|
output config will be taken from the input device.
|
||||||
|
video:
|
||||||
|
"""
|
||||||
|
|
||||||
self.daqconfig = daqconfig
|
self.daqconfig = daqconfig
|
||||||
self.input_device = input_device
|
self._device = device
|
||||||
self.output_device = output_device
|
self.avtype = avtype
|
||||||
|
|
||||||
# Determine highest input channel number
|
# Determine highest input channel number
|
||||||
channelconfigs = daqconfig.en_input_channels
|
channelconfigs = daqconfig.en_input_channels
|
||||||
max_input_channel = 0
|
|
||||||
|
|
||||||
self._rtaudio = rtaudio
|
|
||||||
self.samplerate = int(daqconfig.en_input_rate)
|
|
||||||
self.sensitivity = self.daqconfig.getSensitivities()
|
self.sensitivity = self.daqconfig.getSensitivities()
|
||||||
|
|
||||||
for i, channelconfig in enumerate(channelconfigs):
|
rtaudio_inputparams = None
|
||||||
if channelconfig.channel_enabled:
|
rtaudio_outputparams = None
|
||||||
self.nchannels = i+1
|
|
||||||
|
if daqconfig.duplex_mode or avtype == AvType.audio_output:
|
||||||
|
rtaudio_outputparams = {'deviceid': device.index,
|
||||||
|
'nchannels': device.outputchannels,
|
||||||
|
'firstchannel': 0}
|
||||||
|
self.sampleformat = daqconfig.en_output_sample_format
|
||||||
|
self.samplerate = int(daqconfig.en_output_rate)
|
||||||
|
|
||||||
|
if avtype == AvType.audio_input:
|
||||||
|
for i, channelconfig in enumerate(channelconfigs):
|
||||||
|
if channelconfig.channel_enabled:
|
||||||
|
self.nchannels = i+1
|
||||||
|
rtaudio_inputparams = {'deviceid': device.index,
|
||||||
|
'nchannels': self.nchannels,
|
||||||
|
'firstchannel': 0}
|
||||||
|
|
||||||
|
# Here, we override the sample format in case of duplex mode.
|
||||||
|
self.sampleformat = daqconfig.en_input_sample_format
|
||||||
|
self.samplerate = int(daqconfig.en_input_rate)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if input_device is not None:
|
self._rtaudio = RtAudio()
|
||||||
inputparams = {'deviceid': input_device.index,
|
self.blocksize = self._rtaudio.openStream(
|
||||||
'nchannels': self.nchannels,
|
rtaudio_outputparams, # Outputparams
|
||||||
'firstchannel': 0}
|
rtaudio_inputparams, # Inputparams
|
||||||
|
self.sampleformat, # Sampleformat
|
||||||
self.blocksize = rtaudio.openStream(
|
self.samplerate,
|
||||||
None, # Outputparams
|
2048, # Buffer size in frames
|
||||||
inputparams, #Inputparams
|
self._audioCallback)
|
||||||
daqconfig.en_input_sample_format, # Sampleformat
|
|
||||||
self.samplerate,
|
|
||||||
2048,
|
|
||||||
self._audioCallback)
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise RuntimeError(f'Could not initialize DAQ device: {str(e)}')
|
raise RuntimeError(f'Could not initialize DAQ device: {str(e)}')
|
||||||
|
|
||||||
self.video_x, self.video_y = video_x, video_y
|
self.numpy_dtype = get_numpy_dtype_from_format_string(
|
||||||
self.dtype, self.sampwidth = dtype, sampwidth
|
self.sampleformat)
|
||||||
|
self.sampwidth = get_sampwidth_from_format_string(
|
||||||
|
self.sampleformat)
|
||||||
|
|
||||||
self._aframectr = Atomic(0)
|
self._aframectr = Atomic(0)
|
||||||
self._vframectr = Atomic(0)
|
self._vframectr = Atomic(0)
|
||||||
@ -73,7 +111,11 @@ class AvStream:
|
|||||||
|
|
||||||
self._video = video
|
self._video = video
|
||||||
self._video_started = Atomic(False)
|
self._video_started = Atomic(False)
|
||||||
self._callbacks = []
|
self._callbacks = {
|
||||||
|
AvType.audio_input: [],
|
||||||
|
AvType.audio_output: [],
|
||||||
|
AvType.video: []
|
||||||
|
}
|
||||||
self._videothread = None
|
self._videothread = None
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
@ -85,18 +127,21 @@ class AvStream:
|
|||||||
"""
|
"""
|
||||||
return len(self._callbacks)
|
return len(self._callbacks)
|
||||||
|
|
||||||
def addCallback(self, cb):
|
def addCallback(self, cb: callable, cbtype: AvType):
|
||||||
"""
|
"""
|
||||||
Add as stream callback to the list of callbacks
|
Add as stream callback to the list of callbacks
|
||||||
"""
|
"""
|
||||||
with self._callbacklock:
|
with self._callbacklock:
|
||||||
if cb not in self._callbacks:
|
outputcallbacks = self._callbacks[AvType.audio_output]
|
||||||
self._callbacks.append(cb)
|
if cbtype == AvType.audio_output and len(outputcallbacks) > 0:
|
||||||
|
raise RuntimeError('Only one audio output callback can be allowed')
|
||||||
|
if cb not in self._callbacks[cbtype]:
|
||||||
|
self._callbacks[cbtype].append(cb)
|
||||||
|
|
||||||
def removeCallback(self, cb):
|
def removeCallback(self, cb, cbtype: AvType):
|
||||||
with self._callbacklock:
|
with self._callbacklock:
|
||||||
if cb in self._callbacks:
|
if cb in self._callbacks[cbtype]:
|
||||||
self._callbacks.remove(cb)
|
self._callbacks[cbtype].remove(cb)
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
"""
|
"""
|
||||||
@ -130,16 +175,15 @@ class AvStream:
|
|||||||
if vframectr == 0:
|
if vframectr == 0:
|
||||||
self._video_started <<= True
|
self._video_started <<= True
|
||||||
with self._callbacklock:
|
with self._callbacklock:
|
||||||
for cb in self._callbacks:
|
for cb in self._callbacks[AvType.video]:
|
||||||
cb(AvType.video, frame, self._aframectr(), vframectr)
|
cb(frame, vframectr)
|
||||||
vframectr += 1
|
vframectr += 1
|
||||||
self._vframectr += 1
|
self._vframectr += 1
|
||||||
else:
|
else:
|
||||||
|
loopctr += 1
|
||||||
if loopctr == 10:
|
if loopctr == 10:
|
||||||
print('Error: no video capture!')
|
print('Error: no video capture!')
|
||||||
time.sleep(0.2)
|
time.sleep(0.2)
|
||||||
loopctr += 1
|
|
||||||
|
|
||||||
cap.release()
|
cap.release()
|
||||||
print('stopped videothread')
|
print('stopped videothread')
|
||||||
@ -149,10 +193,13 @@ class AvStream:
|
|||||||
This is called (from a separate thread) for each audio block.
|
This is called (from a separate thread) for each audio block.
|
||||||
"""
|
"""
|
||||||
self._aframectr += 1
|
self._aframectr += 1
|
||||||
|
output_signal = None
|
||||||
with self._callbacklock:
|
with self._callbacklock:
|
||||||
for cb in self._callbacks:
|
for cb in self._callbacks[AvType.audio_input]:
|
||||||
cb(AvType.audio, indata, self._aframectr(), self._vframectr())
|
cb(indata, self._aframectr())
|
||||||
return None, 0 if self._running else 1
|
for cb in self._callbacks[AvType.audio_output]:
|
||||||
|
output_data = cb(indata, self._aframectr())
|
||||||
|
return output_signal, 0 if self._running else 1
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
self._running <<= False
|
self._running <<= False
|
||||||
@ -164,6 +211,7 @@ class AvStream:
|
|||||||
self._aframectr <<= 0
|
self._aframectr <<= 0
|
||||||
self._vframectr <<= 0
|
self._vframectr <<= 0
|
||||||
self._video_started <<= False
|
self._video_started <<= False
|
||||||
|
self._rtaudio.stopStream()
|
||||||
|
|
||||||
def isRunning(self):
|
def isRunning(self):
|
||||||
return self._running()
|
return self._running()
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
"""
|
"""
|
||||||
Read data from stream and record sound and video at the same time
|
Read data from stream and record sound and video at the same time
|
||||||
@ -12,6 +12,7 @@ import time
|
|||||||
|
|
||||||
|
|
||||||
class Recording:
|
class Recording:
|
||||||
|
|
||||||
def __init__(self, fn, stream, rectime=None):
|
def __init__(self, fn, stream, rectime=None):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -23,6 +24,7 @@ class Recording:
|
|||||||
ext = '.h5'
|
ext = '.h5'
|
||||||
if ext not in fn:
|
if ext not in fn:
|
||||||
fn += ext
|
fn += ext
|
||||||
|
|
||||||
self._stream = stream
|
self._stream = stream
|
||||||
self.blocksize = stream.blocksize
|
self.blocksize = stream.blocksize
|
||||||
self.samplerate = stream.samplerate
|
self.samplerate = stream.samplerate
|
||||||
@ -43,7 +45,7 @@ class Recording:
|
|||||||
with h5py.File(self._fn, 'w') as f:
|
with h5py.File(self._fn, 'w') as f:
|
||||||
self._ad = f.create_dataset('audio',
|
self._ad = f.create_dataset('audio',
|
||||||
(1, stream.blocksize, stream.nchannels),
|
(1, stream.blocksize, stream.nchannels),
|
||||||
dtype=stream.dtype,
|
dtype=stream.numpy_dtype,
|
||||||
maxshape=(None, stream.blocksize,
|
maxshape=(None, stream.blocksize,
|
||||||
stream.nchannels),
|
stream.nchannels),
|
||||||
compression='gzip'
|
compression='gzip'
|
||||||
@ -69,7 +71,10 @@ class Recording:
|
|||||||
if not stream.isRunning():
|
if not stream.isRunning():
|
||||||
stream.start()
|
stream.start()
|
||||||
|
|
||||||
stream.addCallback(self._callback)
|
stream.addCallback(self._aCallback, AvType.audio_input)
|
||||||
|
if stream.hasVideo():
|
||||||
|
stream.addCallback(self._aCallback, AvType.audio_input)
|
||||||
|
|
||||||
with self._running_cond:
|
with self._running_cond:
|
||||||
try:
|
try:
|
||||||
print('Starting record....')
|
print('Starting record....')
|
||||||
@ -79,9 +84,10 @@ class Recording:
|
|||||||
print("Keyboard interrupt on record")
|
print("Keyboard interrupt on record")
|
||||||
self._running <<= False
|
self._running <<= False
|
||||||
|
|
||||||
stream.removeCallback(self._callback)
|
stream.removeCallback(self._aCallback, AvType.audio_input)
|
||||||
|
|
||||||
if stream.hasVideo():
|
if stream.hasVideo():
|
||||||
|
stream.removeCallback(self._vCallback, AvType.video_input)
|
||||||
f['video_frame_positions'] = self._video_frame_positions
|
f['video_frame_positions'] = self._video_frame_positions
|
||||||
|
|
||||||
print('\nEnding record')
|
print('\nEnding record')
|
||||||
@ -91,18 +97,8 @@ class Recording:
|
|||||||
with self._running_cond:
|
with self._running_cond:
|
||||||
self._running_cond.notify()
|
self._running_cond.notify()
|
||||||
|
|
||||||
def _callback(self, _type, data, aframe, vframe):
|
|
||||||
if not self._stream.isRunning():
|
|
||||||
self._running <<= False
|
|
||||||
with self._running_cond:
|
|
||||||
self._running_cond.notify()
|
|
||||||
|
|
||||||
if _type == AvType.audio:
|
|
||||||
self._aCallback(data, aframe)
|
|
||||||
elif _type == AvType.video:
|
|
||||||
self._vCallback(data)
|
|
||||||
|
|
||||||
def _aCallback(self, frames, aframe):
|
def _aCallback(self, frames, aframe):
|
||||||
|
|
||||||
curT = self._aframeno()*self.blocksize/self.samplerate
|
curT = self._aframeno()*self.blocksize/self.samplerate
|
||||||
curT_rounded_to_seconds = int(curT)
|
curT_rounded_to_seconds = int(curT)
|
||||||
if curT_rounded_to_seconds > self._curT_rounded_to_seconds:
|
if curT_rounded_to_seconds > self._curT_rounded_to_seconds:
|
||||||
@ -122,7 +118,7 @@ class Recording:
|
|||||||
self._ad[self._aframeno(), :, :] = frames
|
self._ad[self._aframeno(), :, :] = frames
|
||||||
self._aframeno += 1
|
self._aframeno += 1
|
||||||
|
|
||||||
def _vCallback(self, frame):
|
def _vCallback(self, frame, framectr):
|
||||||
self._video_frame_positions.append(self._aframeno())
|
self._video_frame_positions.append(self._aframeno())
|
||||||
vframeno = self._vframeno
|
vframeno = self._vframeno
|
||||||
self._vd.resize(vframeno+1, axis=0)
|
self._vd.resize(vframeno+1, axis=0)
|
||||||
|
@ -19,7 +19,7 @@ parser.add_argument('--input-daq', '-i', help=device_help, type=str,
|
|||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
from lasp.lasp_avstream import AvStream
|
from lasp.lasp_avstream import AvStream, AvType
|
||||||
from lasp.lasp_record import Recording
|
from lasp.lasp_record import Recording
|
||||||
from lasp.device import DAQConfiguration, RtAudio
|
from lasp.device import DAQConfiguration, RtAudio
|
||||||
|
|
||||||
@ -42,9 +42,8 @@ except KeyError:
|
|||||||
|
|
||||||
print(input_device)
|
print(input_device)
|
||||||
|
|
||||||
stream = AvStream(rtaudio,
|
stream = AvStream(input_device,
|
||||||
None, # No output device
|
AvType.audio_input,
|
||||||
input_device,
|
|
||||||
config)
|
config)
|
||||||
|
|
||||||
rec = Recording(args.filename, stream, args.duration)
|
rec = Recording(args.filename, stream, args.duration)
|
||||||
|
Loading…
Reference in New Issue
Block a user