Recording is working again. Sometimes it hangs on closing stream.
This commit is contained in:
parent
3ea745245f
commit
0109056b5f
@ -1,6 +1,9 @@
|
||||
import numpy as np
|
||||
cimport numpy as cnp
|
||||
|
||||
# Do this, our segfaults will be your destination
|
||||
cnp.import_array()
|
||||
|
||||
DEF LASP_FLOAT = "@LASP_FLOAT@"
|
||||
DEF LASP_DEBUG_CYTHON = "@LASP_DEBUG_CYTHON@"
|
||||
|
||||
|
@ -74,6 +74,16 @@ class DAQConfiguration:
|
||||
|
||||
en_input_channels: list
|
||||
|
||||
def getEnabledChannels(self):
|
||||
en_channels = []
|
||||
for chan in self.en_input_channels:
|
||||
if chan.channel_enabled:
|
||||
en_channels.append(chan)
|
||||
return en_channels
|
||||
|
||||
def getSensitivities(self):
|
||||
return [float(channel.sensitivity) for channel in self.getEnabledChannels()]
|
||||
|
||||
@staticmethod
|
||||
def loadConfigs():
|
||||
"""
|
||||
|
@ -109,12 +109,12 @@ _formats_strkey = {
|
||||
'64-bit floats': (RTAUDIO_FLOAT64, 8),
|
||||
}
|
||||
_formats_rtkey = {
|
||||
RTAUDIO_SINT8: ('8-bit integers', 1),
|
||||
RTAUDIO_SINT16: ('16-bit integers',2),
|
||||
RTAUDIO_SINT8: ('8-bit integers', 1, cnp.NPY_INT8),
|
||||
RTAUDIO_SINT16: ('16-bit integers',2, cnp.NPY_INT16),
|
||||
RTAUDIO_SINT24: ('24-bit integers',3),
|
||||
RTAUDIO_SINT32: ('32-bit integers',4),
|
||||
RTAUDIO_FLOAT32: ('32-bit floats', 4),
|
||||
RTAUDIO_FLOAT64: ('64-bit floats', 8),
|
||||
RTAUDIO_SINT32: ('32-bit integers',4, cnp.NPY_INT32),
|
||||
RTAUDIO_FLOAT32: ('32-bit floats', 4, cnp.NPY_FLOAT32),
|
||||
RTAUDIO_FLOAT64: ('64-bit floats', 8, cnp.NPY_FLOAT64),
|
||||
}
|
||||
cdef class _Stream:
|
||||
cdef:
|
||||
@ -133,14 +133,15 @@ cdef class _Stream:
|
||||
# It took me quite a long time to fully understand Cython's idiosyncrasies
|
||||
# concerning C(++) callbacks, the GIL and passing Python objects as pointers
|
||||
# into C(++) functions. But finally, here it is!
|
||||
cdef object fromBufferToNPYNoCopy(buffer_format_type,
|
||||
cdef object fromBufferToNPYNoCopy(
|
||||
cnp.NPY_TYPES buffer_format_type,
|
||||
void* buf,
|
||||
size_t nchannels,
|
||||
size_t nframes):
|
||||
cdef cnp.npy_intp[2] dims = [nchannels, nframes];
|
||||
cdef cnp.npy_intp[2] dims = [nchannels, nframes]
|
||||
|
||||
array = cnp.PyArray_SimpleNewFromData(2, dims, buffer_format_type,
|
||||
buf)
|
||||
array = cnp.PyArray_SimpleNewFromData(2, &dims[0], buffer_format_type,
|
||||
buf).transpose()
|
||||
|
||||
return array
|
||||
|
||||
@ -163,7 +164,9 @@ cdef int audioCallback(void* outputBuffer,
|
||||
Calls the Python callback function and converts data
|
||||
|
||||
"""
|
||||
cdef int rval = 0
|
||||
cdef:
|
||||
int rval = 0
|
||||
cnp.NPY_TYPES npy_format
|
||||
|
||||
with gil:
|
||||
if status == RTAUDIO_INPUT_OVERFLOW:
|
||||
@ -177,8 +180,17 @@ cdef int audioCallback(void* outputBuffer,
|
||||
# Obtain stream information
|
||||
npy_input = None
|
||||
if stream.hasInput:
|
||||
assert inputBuffer != NULL
|
||||
# cdef
|
||||
try:
|
||||
assert inputBuffer != NULL
|
||||
npy_format = _formats_rtkey[stream.sampleformat][2]
|
||||
npy_input = fromBufferToNPYNoCopy(
|
||||
npy_format,
|
||||
inputBuffer,
|
||||
stream.inputParams.nChannels,
|
||||
nFrames)
|
||||
|
||||
except Exception as e:
|
||||
print('Exception in Cython callback: ', str(e))
|
||||
try:
|
||||
npy_output, rval = stream.pyCallback(npy_input,
|
||||
nFrames,
|
||||
@ -192,15 +204,15 @@ cdef int audioCallback(void* outputBuffer,
|
||||
if npy_output is None:
|
||||
print('No output buffer given!')
|
||||
return 1
|
||||
IF LASP_DEBUG_CYTHON:
|
||||
try:
|
||||
assert outputBuffer != NULL, "Bug: RtAudio does not give output buffer!"
|
||||
assert npy_output.shape[0] == stream.outputParams.nChannels, "Bug: channel mismatch in output buffer!"
|
||||
assert npy_output.shape[1] == nFrames, "Bug: frame mismatch in output buffer!"
|
||||
assert npy_output.itemsize == stream.sampleSize, "Bug: invalid sample type in output buffer!"
|
||||
except AssertionError as e:
|
||||
print(e)
|
||||
fromNPYToBuffer(npy_output, outputBuffer)
|
||||
IF LASP_DEBUG_CYTHON:
|
||||
try:
|
||||
assert outputBuffer != NULL, "Bug: RtAudio does not give output buffer!"
|
||||
assert npy_output.shape[0] == stream.outputParams.nChannels, "Bug: channel mismatch in output buffer!"
|
||||
assert npy_output.shape[1] == nFrames, "Bug: frame mismatch in output buffer!"
|
||||
assert npy_output.itemsize == stream.sampleSize, "Bug: invalid sample type in output buffer!"
|
||||
except AssertionError as e:
|
||||
print(e)
|
||||
fromNPYToBuffer(npy_output, outputBuffer)
|
||||
return rval
|
||||
|
||||
cdef void errorCallback(RtAudioError.Type _type,const string& errortxt) nogil:
|
||||
@ -253,6 +265,7 @@ cdef class RtAudio:
|
||||
sampleformats = sampleformats,
|
||||
prefsamplerate = devinfo.preferredSampleRate)
|
||||
|
||||
@cython.nonecheck(True)
|
||||
def openStream(self,object outputParams,
|
||||
object inputParams,
|
||||
str sampleformat,
|
||||
@ -291,6 +304,7 @@ cdef class RtAudio:
|
||||
|
||||
self._stream = _Stream()
|
||||
self._stream.pyCallback = pyCallback
|
||||
self._stream.sampleformat = _formats_strkey[sampleformat][0]
|
||||
|
||||
if outputParams is not None:
|
||||
rtOutputParams_ptr = &self._stream.outputParams
|
||||
@ -300,7 +314,7 @@ cdef class RtAudio:
|
||||
self._stream.hasOutput = True
|
||||
|
||||
if inputParams is not None:
|
||||
rtOutputParams_ptr = &self._stream.inputParams
|
||||
rtInputParams_ptr = &self._stream.inputParams
|
||||
rtInputParams_ptr.deviceId = inputParams['deviceid']
|
||||
rtInputParams_ptr.nChannels = inputParams['nchannels']
|
||||
rtInputParams_ptr.firstChannel = inputParams['firstchannel']
|
||||
@ -321,6 +335,9 @@ cdef class RtAudio:
|
||||
except Exception as e:
|
||||
print('Exception occured in stream opening: ', str(e))
|
||||
self._stream = None
|
||||
raise
|
||||
|
||||
return self._stream.bufferFrames
|
||||
|
||||
def startStream(self):
|
||||
self._rtaudio.startStream()
|
||||
|
@ -22,22 +22,41 @@ class AvType:
|
||||
|
||||
class AvStream:
|
||||
def __init__(self,
|
||||
rtaudio_input,
|
||||
rtaudio_output,
|
||||
input_device,
|
||||
output_device, daqconfig, video=None):
|
||||
|
||||
rtaudio,
|
||||
output_device,
|
||||
input_device,
|
||||
daqconfig, video=None):
|
||||
|
||||
self.daqconfig = daqconfig
|
||||
self.input_device = input_device
|
||||
self.output_device = output_device
|
||||
|
||||
# Determine highest input channel number
|
||||
channelconfigs = daqconfig.en_input_channels
|
||||
max_input_channel = 0
|
||||
|
||||
self._rtaudio = rtaudio
|
||||
self.samplerate = int(daqconfig.en_input_rate)
|
||||
self.sensitivity = self.daqconfig.getSensitivities()
|
||||
|
||||
for i, channelconfig in enumerate(channelconfigs):
|
||||
if channelconfig.channel_enabled:
|
||||
self.nchannels = i+1
|
||||
|
||||
try:
|
||||
daq = DAQDevice(daqconfig)
|
||||
self.nchannels = len(daq.channels_en)
|
||||
self.samplerate = daq.input_rate
|
||||
self.blocksize = daq.blocksize
|
||||
self.sensitivity = np.asarray(daqconfig.input_sensitivity)[
|
||||
daq.channels_en]
|
||||
if input_device is not None:
|
||||
inputparams = {'deviceid': input_device.index,
|
||||
'nchannels': self.nchannels,
|
||||
'firstchannel': 0}
|
||||
|
||||
self.blocksize = rtaudio.openStream(
|
||||
None, # Outputparams
|
||||
inputparams, #Inputparams
|
||||
daqconfig.en_input_sample_format, # Sampleformat
|
||||
self.samplerate,
|
||||
2048,
|
||||
self._audioCallback)
|
||||
|
||||
except Exception as e:
|
||||
raise RuntimeError(f'Could not initialize DAQ device: {str(e)}')
|
||||
|
||||
@ -55,9 +74,11 @@ class AvStream:
|
||||
self._video = video
|
||||
self._video_started = Atomic(False)
|
||||
self._callbacks = []
|
||||
self._audiothread = None
|
||||
self._videothread = None
|
||||
|
||||
def close(self):
|
||||
self._rtaudio.closeStream()
|
||||
|
||||
def nCallbacks(self):
|
||||
"""
|
||||
Returns the current number of installed callbacks
|
||||
@ -86,31 +107,15 @@ class AvStream:
|
||||
if self._running:
|
||||
raise RuntimeError('Stream already started')
|
||||
|
||||
assert self._audiothread is None
|
||||
assert self._videothread is None
|
||||
|
||||
self._running <<= True
|
||||
self._audiothread = Thread(target=self._audioThread)
|
||||
if self._video is not None:
|
||||
self._videothread = Thread(target=self._videoThread)
|
||||
self._videothread.start()
|
||||
else:
|
||||
self._video_started <<= True
|
||||
self._audiothread.start()
|
||||
|
||||
def _audioThread(self):
|
||||
# Raw stream to allow for in24 packed data type
|
||||
try:
|
||||
daq = DAQDevice(self.daqconfig)
|
||||
# Get a single block first and do not process it. This one often
|
||||
# contains quite some rubbish.
|
||||
data = daq.read()
|
||||
while self._running:
|
||||
# print('read data...')
|
||||
data = daq.read()
|
||||
self._audioCallback(data)
|
||||
except RuntimeError as e:
|
||||
print(f'Runtime error occured during audio capture: {str(e)}')
|
||||
self._rtaudio.startStream()
|
||||
|
||||
def _videoThread(self):
|
||||
cap = cv.VideoCapture(self._video)
|
||||
@ -139,22 +144,20 @@ class AvStream:
|
||||
cap.release()
|
||||
print('stopped videothread')
|
||||
|
||||
def _audioCallback(self, indata):
|
||||
"""This is called (from a separate thread) for each audio block."""
|
||||
if not self._video_started:
|
||||
return
|
||||
|
||||
def _audioCallback(self, indata, nframes, streamtime):
|
||||
"""
|
||||
This is called (from a separate thread) for each audio block.
|
||||
"""
|
||||
self._aframectr += 1
|
||||
with self._callbacklock:
|
||||
for cb in self._callbacks:
|
||||
cb(AvType.audio, indata, self._aframectr(), self._vframectr())
|
||||
self._aframectr += 1
|
||||
return None, 0 if self._running else 1
|
||||
|
||||
def stop(self):
|
||||
self._running <<= False
|
||||
with self._running_cond:
|
||||
self._running_cond.notify()
|
||||
self._audiothread.join()
|
||||
self._audiothread = None
|
||||
if self._video:
|
||||
self._videothread.join()
|
||||
self._videothread = None
|
||||
|
@ -1,8 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Sat Mar 10 08:28:03 2018
|
||||
|
||||
Read data from stream and record sound and video at the same time
|
||||
"""
|
||||
import numpy as np
|
||||
@ -34,6 +32,7 @@ class Recording:
|
||||
self._fn = fn
|
||||
|
||||
self._video_frame_positions = []
|
||||
self._curT_rounded_to_seconds = 0
|
||||
|
||||
self._aframeno = Atomic(0)
|
||||
self._vframeno = 0
|
||||
@ -104,8 +103,14 @@ class Recording:
|
||||
self._vCallback(data)
|
||||
|
||||
def _aCallback(self, frames, aframe):
|
||||
print('.', end='')
|
||||
curT = self._aframeno()*self.blocksize/self.samplerate
|
||||
curT_rounded_to_seconds = int(curT)
|
||||
if curT_rounded_to_seconds > self._curT_rounded_to_seconds:
|
||||
self._curT_rounded_to_seconds = curT_rounded_to_seconds
|
||||
print(f'{curT_rounded_to_seconds}', end='', flush=True)
|
||||
else:
|
||||
print('.', end='', flush=True)
|
||||
|
||||
if self.rectime is not None and curT > self.rectime:
|
||||
# We are done!
|
||||
self._running <<= False
|
||||
|
@ -26,7 +26,7 @@ parser.add_argument('--gain-setting', '-g',
|
||||
type=float, default=gain_default)
|
||||
|
||||
parser.add_argument(
|
||||
'fn', help='File name of calibration measurement', type=str)
|
||||
'fn', help='File name of calibration measurement', type=str, default=None)
|
||||
|
||||
parser.add_argument('--channel', help='Channel of the device to calibrate, '
|
||||
+ 'default = 0',
|
||||
@ -51,10 +51,11 @@ prms = P_REF*10**(args.spl/20)
|
||||
sens = Vrms / prms
|
||||
|
||||
print(f'Computed sensitivity: {sens[args.channel]:.5} V/Pa')
|
||||
print('Searching for files in directory to apply sensitivity value to...')
|
||||
dir_ = os.path.dirname(args.fn)
|
||||
for f in os.listdir(dir_):
|
||||
yn = input(f'Apply sensitivity to {f}? [Y/n]')
|
||||
if yn in ['', 'Y', 'y']:
|
||||
meas = Measurement(os.path.join(dir_, f))
|
||||
meas.sensitivity = sens
|
||||
if args.fn:
|
||||
print('Searching for files in directory to apply sensitivity value to...')
|
||||
dir_ = os.path.dirname(args.fn)
|
||||
for f in os.listdir(dir_):
|
||||
yn = input(f'Apply sensitivity to {f}? [Y/n]')
|
||||
if yn in ['', 'Y', 'y']:
|
||||
meas = Measurement(os.path.join(dir_, f))
|
||||
meas.sensitivity = sens
|
||||
|
@ -1,5 +1,8 @@
|
||||
#!/usr/bin/python
|
||||
#!/usr/bin/python3
|
||||
import argparse
|
||||
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Acquire data and store a measurement file'
|
||||
)
|
||||
@ -8,38 +11,49 @@ parser.add_argument('filename', type=str,
|
||||
' Extension is automatically added.')
|
||||
parser.add_argument('--duration', '-d', type=float,
|
||||
help='The recording duration in [s]')
|
||||
parser.add_argument('--comment', '-c', type=str,
|
||||
help='Add a measurement comment, optionally')
|
||||
|
||||
device_help = 'DAQ Device to record from'
|
||||
parser.add_argument('--input-daq', '-i', help=device_help, type=str,
|
||||
choices=['roga', 'umik', 'nidaq', 'default'], default='roga')
|
||||
default='Default')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
device_str = args.input_daq
|
||||
|
||||
if device_str == 'nidaq':
|
||||
|
||||
# Not-integrated way to record with the NI USB 4431 DAQ device
|
||||
from lasp.device.record_ni import USBDAQRecording
|
||||
rec = USBDAQRecording(args.filename, [0, 1])
|
||||
rec.start()
|
||||
exit(0)
|
||||
|
||||
|
||||
from lasp.lasp_record import Recording
|
||||
from lasp.lasp_avstream import AvStream
|
||||
from lasp.device.lasp_daqconfig import default_soundcard, roga_plugndaq, umik
|
||||
from lasp.lasp_record import Recording
|
||||
from lasp.device import DAQConfiguration, RtAudio
|
||||
|
||||
if 'roga' == device_str:
|
||||
device = roga_plugndaq
|
||||
elif 'default' == device_str:
|
||||
device = default_soundcard
|
||||
elif 'umik' == device_str:
|
||||
device = umik
|
||||
config = DAQConfiguration.loadConfigs()[args.input_daq]
|
||||
|
||||
print(config)
|
||||
rtaudio = RtAudio()
|
||||
count = rtaudio.getDeviceCount()
|
||||
devices = [rtaudio.getDeviceInfo(i) for i in range(count)]
|
||||
|
||||
input_devices = {}
|
||||
for device in devices:
|
||||
if device.inputchannels >= 0:
|
||||
input_devices[device.name] = device
|
||||
|
||||
try:
|
||||
input_device = input_devices[config.input_device_name]
|
||||
except KeyError:
|
||||
raise RuntimeError(f'Input device {config.input_device_name} not available')
|
||||
|
||||
print(input_device)
|
||||
|
||||
stream = AvStream(rtaudio,
|
||||
None, # No output device
|
||||
input_device,
|
||||
config)
|
||||
|
||||
stream = AvStream(device)
|
||||
rec = Recording(args.filename, stream, args.duration)
|
||||
rec.start()
|
||||
|
||||
print('Stopping stream...')
|
||||
stream.stop()
|
||||
|
||||
print('Stream stopped')
|
||||
print('Closing stream...')
|
||||
stream.close()
|
||||
print('Stream closed')
|
||||
|
Loading…
Reference in New Issue
Block a user