lasp/lasp/lasp_avstream.py

221 lines
7.5 KiB
Python

#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
"""
Description: Read data from image stream and record sound at the same time
"""
#import cv2 as cv
from .lasp_atomic import Atomic
from threading import Thread, Condition, Lock
import numpy as np
import time
from .device import (RtAudio, DeviceInfo, DAQConfiguration,
get_numpy_dtype_from_format_string,
get_sampwidth_from_format_string, AvType)
__all__ = ['AvStream']
video_x, video_y = 640, 480
class AvStream:
"""Audio and video data stream, to which callbacks can be added for
processing the data."""
def __init__(self,
device: DeviceInfo,
avtype: AvType,
daqconfig: DAQConfiguration,
video=None):
"""Open a stream for audio in/output and video input. For audio output,
by default all available channels are opened for outputting data.
Args:
device: DeviceInfo for the audio device
avtype: Type of stream. Input, output or duplex
daqconfig: DAQConfiguration instance. If duplex mode flag is set,
please make sure that output_device is None, as in that case the
output config will be taken from the input device.
video:
"""
self.daqconfig = daqconfig
self.device = device
self.avtype = avtype
self.duplex_mode = daqconfig.duplex_mode
self.output_channel_names = [ch.channel_name for ch in daqconfig.getEnabledOutputChannels()]
if daqconfig.monitor_gen:
self.input_channel_names = self.output_channel_names
self.input_sensitivity = daqconfig.getEnabledOutputChannelSensitivities()
else:
self.input_channel_names = []
self.input_sensitivity = []
self.input_sensitivity += daqconfig.getEnabledInputChannelSensitivities()
self.input_sensitivity = np.asarray(self.input_sensitivity)
self.input_channel_names += [
ch.channel_name for ch in daqconfig.getEnabledInputChannels()]
self.input_samplerate = float(daqconfig.en_input_rate)
self.output_samplerate = float(daqconfig.en_output_rate)
# Counters for the number of frames that have been coming in
self._aframectr = Atomic(0)
self._vframectr = Atomic(0)
# Lock
self._callbacklock = Lock()
self._running = Atomic(False)
self._running_cond = Condition()
self._video = video
self._video_started = Atomic(False)
# Storage for callbacks, specified by type
self._callbacks = {
AvType.audio_input: [],
AvType.audio_output: [],
AvType.video: []
}
# Possible, but long not tested: store video
self._videothread = None
self._rtaudio = RtAudio(daqconfig.api)
self.blocksize = self._rtaudio.openStream(self)
# Fill in numpy data type, and sample width
self.input_numpy_dtype = get_numpy_dtype_from_format_string(
daqconfig.en_input_sample_format)
self.output_numpy_dtype = get_numpy_dtype_from_format_string(
daqconfig.en_output_sample_format)
self.input_sampwidth = get_sampwidth_from_format_string(
daqconfig.en_input_sample_format)
self.output_sampwidth = get_sampwidth_from_format_string(
daqconfig.en_output_sample_format)
def close(self):
self._rtaudio.closeStream()
self._rtaudio = None
def nCallbacks(self):
"""Returns the current number of installed callbacks."""
return len(self._callbacks[AvType.audio_input]) + \
len(self._callbacks[AvType.audio_output]) + \
len(self._callbacks[AvType.video])
def addCallback(self, cb: callable, cbtype: AvType):
"""Add as stream callback to the list of callbacks."""
with self._callbacklock:
outputcallbacks = self._callbacks[AvType.audio_output]
if cbtype == AvType.audio_output and len(outputcallbacks) > 0:
raise RuntimeError(
'Only one audio output callback can be allowed')
if cb not in self._callbacks[cbtype]:
self._callbacks[cbtype].append(cb)
def removeCallback(self, cb, cbtype: AvType):
with self._callbacklock:
if cb in self._callbacks[cbtype]:
self._callbacks[cbtype].remove(cb)
def start(self):
"""Start the stream, which means the callbacks are called with stream
data (audio/video)"""
if self._running:
raise RuntimeError('Stream already started')
assert self._videothread is None
self._running <<= True
if self._video is not None:
self._videothread = Thread(target=self._videoThread)
self._videothread.start()
else:
self._video_started <<= True
self._rtaudio.startStream()
def _videoThread(self):
cap = cv.VideoCapture(self._video)
if not cap.isOpened():
cap.open()
vframectr = 0
loopctr = 0
while self._running:
ret, frame = cap.read()
# print(frame.shape)
if ret is True:
if vframectr == 0:
self._video_started <<= True
with self._callbacklock:
for cb in self._callbacks[AvType.video]:
cb(frame, vframectr)
vframectr += 1
self._vframectr += 1
else:
loopctr += 1
if loopctr == 10:
print('Error: no video capture!')
time.sleep(0.2)
cap.release()
print('stopped videothread')
def _audioCallback(self, indata, outdata, nframes):
"""This is called (from a separate thread) for each audio block."""
self._aframectr += nframes
with self._callbacklock:
# Count the number of output callbacks. If no output callbacks are
# present, and there should be output callbacks, we explicitly set
# the output buffer to zero
noutput_cb = len(self._callbacks[AvType.audio_output])
shouldhaveoutput = (self.avtype == AvType.audio_output or
self.daqconfig.duplex_mode)
if noutput_cb == 0 and shouldhaveoutput and outdata is not None:
outdata[:, :] = 0
# Loop over callbacks
if outdata is not None:
for cb in self._callbacks[AvType.audio_output]:
try:
cb(indata, outdata, self._aframectr())
except Exception as e:
print(e)
return 2
if indata is not None:
for cb in self._callbacks[AvType.audio_input]:
try:
cb(indata, outdata, self._aframectr())
except Exception as e:
print(e)
return 1
return 0 if self._running else 1
def stop(self):
self._running <<= False
with self._running_cond:
self._running_cond.notify()
if self._video:
self._videothread.join()
self._videothread = None
self._aframectr <<= 0
self._vframectr <<= 0
self._video_started <<= False
self._rtaudio.stopStream()
def isRunning(self):
return self._running()
def hasVideo(self):
return True if self._video is not None else False