lasp/lasp/device/lasp_rtaudio.pyx

838 lines
30 KiB
Cython

import sys
include "config.pxi"
cimport cython
from .lasp_daqconfig import DeviceInfo
from libcpp.string cimport string
from libcpp.vector cimport vector
from libc.stdlib cimport malloc, free
from libc.stdio cimport printf, fprintf, stderr
from libc.string cimport memcpy, memset
from cpython.ref cimport PyObject,Py_INCREF, Py_DECREF
__all__ = ['AvType', 'RtAudio', 'get_numpy_dtype_from_format_string',
'get_sampwidth_from_format_string']
class AvType:
"""Specificying the type of data, for adding and removing callbacks from
the stream."""
audio_input = 1
audio_output = 2
video = 4
cdef extern from "RtAudio.h" nogil:
ctypedef unsigned long RtAudioStreamStatus
RtAudioStreamStatus RTAUDIO_INPUT_OVERFLOW
RtAudioStreamStatus RTAUDIO_OUTPUT_UNDERFLOW
cdef cppclass RtAudioError:
ctypedef enum Type:
WARNING
DEBUG_WARNING
UNSPECIFIED
NO_DEVICES_FOUND
INVALID_DEVICE
MEMORY_ERROR
INVALID_PARAMETER
INVALID_USE
DRIVER_ERROR
SYSTEM_ERROR
THREAD_ERROR
ctypedef unsigned long RtAudioStreamFlags
RtAudioStreamFlags RTAUDIO_NONINTERLEAVED
RtAudioStreamFlags RTAUDIO_MINIMIZE_LATENCY
RtAudioStreamFlags RTAUDIO_HOG_DEVICE
RtAudioStreamFlags RTAUDIO_SCHEDULE_REALTIME
RtAudioStreamFlags RTAUDIO_ALSA_USE_DEFAULT
RtAudioStreamFlags RTAUDIO_JACK_DONT_CONNECT
ctypedef unsigned long RtAudioFormat
RtAudioFormat RTAUDIO_SINT8
RtAudioFormat RTAUDIO_SINT16
RtAudioFormat RTAUDIO_SINT24
RtAudioFormat RTAUDIO_SINT32
RtAudioFormat RTAUDIO_FLOAT32
RtAudioFormat RTAUDIO_FLOAT64
ctypedef int (*RtAudioCallback)(void* outputBuffer,
void* inputBuffer,
unsigned int nFramesPerBlock,
double streamTime,
RtAudioStreamStatus status,
void* userData)
ctypedef void (*RtAudioErrorCallback)(RtAudioError.Type _type,
const string& errortxt)
cdef cppclass cppRtAudio "RtAudio":
enum Api:
UNSPECIFIED
LINUX_ALSA
LINUX_PULSE
MACOSX_CORE
WINDOWS_WASAPI
WINDOWS_ASIO
WINDOWS_DS
cppclass DeviceInfo:
bool probed
string name
unsigned int outputChannels
unsigned int inputChannels
unsigned int duplexChannels
bool isDefaultOutput
bool isDefaultInput
vector[unsigned int] sampleRates
unsigned int preferredSampleRate
RtAudioFormat nativeFormats
cppclass StreamOptions:
StreamOptions()
RtAudioStreamFlags flags
unsigned int numberOfBuffers
string streamName
int priority
cppclass StreamParameters:
StreamParameters()
unsigned int deviceId
unsigned int nChannels
unsigned int firstChannel
@staticmethod
void getCompiledApi(vector[cppRtAudio.Api]& apis)
@staticmethod
cppRtAudio.Api getCompiledApiByName(string& name)
@staticmethod
string getApiDisplayName(cppRtAudio.Api api)
@staticmethod
string getApiName(cppRtAudio.Api api)
# RtAudio() except +
cppRtAudio() except +
cppRtAudio(cppRtAudio.Api api) except +
# ~RtAudio() Destructors should not be listed
unsigned int getDeviceCount()
DeviceInfo getDeviceInfo(unsigned int device)
unsigned int getDefaultOutputDevice()
unsigned int getDefaultInputDevice()
void openStream(StreamParameters* outputParameters,
StreamParameters* intputParameters,
RtAudioFormat _format,
unsigned int sampleRate,
unsigned int* bufferFrames,
RtAudioCallback callback,
void* userData,
void* StreamOptions,
RtAudioErrorCallback) except +
void closeStream()
void startStream() except +
void stopStream() except +
void abortStream() except +
bool isStreamOpen()
bool isStreamRunning()
double getStreamTime()
void setStreamTime(double) except +
long getStreamLatency()
unsigned int getStreamSampleRate()
void showWarnings(bool value)
cdef extern from "lasp_cppthread.h" nogil:
cdef cppclass CPPThread[T,F]:
CPPThread(F threadfunction, T data)
void join()
void CPPsleep(unsigned int ms)
cdef extern from "lasp_cppqueue.h" nogil:
cdef cppclass SafeQueue[T]:
SafeQueue()
void enqueue(T t)
T dequeue()
size_t size() const
bool empty() const
cdef extern from "atomic" namespace "std" nogil:
cdef cppclass atomic[T]:
T load()
void store(T)
cdef extern from "lasp_pyarray.h":
PyObject* data_to_ndarray(void* data,
int n_rows,int n_cols,
int typenum,
bool transfer_ownership,
bool F_contiguous)
_formats_strkey = {
'8-bit integers': (RTAUDIO_SINT8, 1, np.int8),
'16-bit integers': (RTAUDIO_SINT16, 2, np.int16),
'24-bit integers': (RTAUDIO_SINT24, 3),
'32-bit integers': (RTAUDIO_SINT32, 4, np.int32),
'32-bit floats': (RTAUDIO_FLOAT32, 4, np.float32),
'64-bit floats': (RTAUDIO_FLOAT64, 8, np.float64),
}
_formats_rtkey = {
RTAUDIO_SINT8: ('8-bit integers', 1, cnp.NPY_INT8),
RTAUDIO_SINT16: ('16-bit integers', 2, cnp.NPY_INT16),
RTAUDIO_SINT24: ('24-bit integers', 3),
RTAUDIO_SINT32: ('32-bit integers', 4, cnp.NPY_INT32),
RTAUDIO_FLOAT32: ('32-bit floats', 4, cnp.NPY_FLOAT32),
RTAUDIO_FLOAT64: ('64-bit floats', 8, cnp.NPY_FLOAT64),
}
def get_numpy_dtype_from_format_string(format_string):
return _formats_strkey[format_string][-1]
def get_sampwidth_from_format_string(format_string):
return _formats_strkey[format_string][-2]
# It took me quite a long time to fully understand Cython's idiosyncrasies
# concerning C(++) callbacks, the GIL and passing Python objects as pointers
# into C(++) functions. But finally, here it is!
# cdef void fromNPYToBuffer(cnp.ndarray arr,
# void* buf):
# """
# Copy a Python numpy array over to a buffer
# No checks, just memcpy! Careful!
# """
# memcpy(buf, arr.data, arr.size*arr.itemsize)
cdef void copyChannel(void* to, void* from_,
unsigned bytesperchan,
unsigned toindex,
unsigned fromindex) nogil:
memcpy(<void*> &((<char*> to)[bytesperchan*toindex]),
<void*> &((<char*> from_)[bytesperchan*fromindex]),
bytesperchan)
ctypedef struct _Stream:
PyObject* pyCallback
RtAudioFormat sampleformat
# Flag used to pass the stopThread.
atomic[bool] stopThread
# Number of frames per block
unsigned nFramesPerBlock
# Number of bytes per channel
unsigned int nBytesPerChan
# Number of blocks to delay the output before adding to the input
unsigned int outputDelayBlocks
# The structures as used by RtAudio
cppRtAudio.StreamParameters inputParams
cppRtAudio.StreamParameters outputParams
bool* inputChannelsEnabled
bool* outputChannelsEnabled
unsigned ninputchannels_forwarded
unsigned noutputchannels_forwarded
# If these queue pointers are NULL, it means the stream does not have an
# input, or output.
SafeQueue[void*] *outputDelayQueue
SafeQueue[void*] *inputQueue
SafeQueue[void*] *outputQueue
CPPThread[void*, void (*)(void*)] *thread
cdef int audioCallback(void* outputbuffer,
void* inputbuffer,
unsigned int nFramesPerBlock,
double streamTime,
RtAudioStreamStatus status,
void* userData) nogil:
"""
Calls the Python callback function and converts data
"""
cdef:
int rval = 0
_Stream* stream
void* outputbuffercpy = NULL
void* inputbuffercpy = NULL
unsigned j, i
unsigned bytesperchan
unsigned noutputchannels_forwarded, ninputchannels_forwarded
bint ch_en
stream = <_Stream*>(userData)
bytesperchan = stream.nBytesPerChan
ninputchannels_forwarded = stream.ninputchannels_forwarded
noutputchannels_forwarded = stream.noutputchannels_forwarded
# with gil:
# print(f'bytesperchan: {bytesperchan}')
# print(f'ninputchannels_forwarded:: {ninputchannels_forwarded}')
# print(f'noutputchannels_forwarded:: {noutputchannels_forwarded}')
# fprintf(stderr, "Stream heartbeat...\n")
# Returning 2 means aborting the stream immediately
if status == RTAUDIO_INPUT_OVERFLOW:
fprintf(stderr, 'Input overflow.\n')
stream.stopThread.store(True)
return 2
elif status == RTAUDIO_OUTPUT_UNDERFLOW:
fprintf(stderr, 'Output underflow.\n')
# stream.stopThread.store(True)
return 0
if nFramesPerBlock != stream.nFramesPerBlock:
printf('Number of frames mismath in callback data!\n')
stream.stopThread.store(True)
return 2
if inputbuffer:
# fprintf(stderr, "enter copying input buffer code\n")
# with gil:
# assert stream.inputQueue is not NULL
inputbuffercpy = malloc(bytesperchan*ninputchannels_forwarded)
if not inputbuffercpy:
fprintf(stderr, "Error allocating buffer\n")
return 2
if stream.outputDelayQueue:
if stream.outputDelayQueue.size() > stream.outputDelayBlocks:
outputbuffercpy = stream.outputDelayQueue.dequeue()
memcpy(inputbuffercpy, outputbuffercpy,
bytesperchan*noutputchannels_forwarded)
# Cleanup buffer
free(outputbuffercpy)
outputbuffercpy = NULL
else:
memset(inputbuffercpy, 0,
bytesperchan*noutputchannels_forwarded)
# Starting channel for copying input channels according to the channel
# map
j = stream.noutputchannels_forwarded
else:
j = 0
i = 0
for i in range(stream.inputParams.nChannels):
ch_en = stream.inputChannelsEnabled[i]
if ch_en:
copyChannel(inputbuffercpy, inputbuffer, bytesperchan, j, i)
j+=1
i+=1
stream.inputQueue.enqueue(inputbuffercpy)
if outputbuffer:
# with gil:
# assert stream.outputQueue
# fprintf(stderr, "enter copying output buffer code\n")
if stream.outputQueue.empty():
fprintf(stderr, 'Stream output buffer underflow, zero-ing buffer...\n')
memset(outputbuffer, 0, stream.outputParams.nChannels*bytesperchan)
else:
outputbuffercpy = stream.outputQueue.dequeue()
# fprintf(stderr, 'Copying data to stream output buffer...\n')
j = 0
i = 0
for i in range(stream.outputParams.nChannels):
ch_en = stream.outputChannelsEnabled[i]
if ch_en:
copyChannel(outputbuffer, outputbuffercpy, bytesperchan, i, j)
j+=1
else:
# If channel is not enabled, we set the data to zero
memset(<void*> &((<char*> outputbuffer)[bytesperchan*i]), 0, bytesperchan)
pass
i+=1
if stream.outputDelayQueue:
# fprintf(stderr, "Adding to delay queue\n")
stream.outputDelayQueue.enqueue(outputbuffercpy)
else:
free(outputbuffercpy)
outputbuffercpy = NULL
return 0
cdef void audioCallbackPythonThreadFunction(void* voidstream) nogil:
cdef:
_Stream* stream
cnp.NPY_TYPES npy_format
void* inputbuffer = NULL
void* outputbuffer = NULL
unsigned noutputchannels_forwarded
unsigned ninputchannels_forwarded
unsigned nBytesPerChan
unsigned nFramesPerBlock
void* _TESTDATA
stream = <_Stream*> voidstream
ninputchannels_forwarded = stream.ninputchannels_forwarded
noutputchannels_forwarded = stream.noutputchannels_forwarded
nBytesPerChan = stream.nBytesPerChan
nFramesPerBlock = stream.nFramesPerBlock
with gil:
npy_format = _formats_rtkey[stream.sampleformat][2]
callback = <object> stream.pyCallback
print(f'noutputchannels_forwarded: {noutputchannels_forwarded}')
print(f'ninputchannels_forwarded: {ninputchannels_forwarded}')
print(f'nBytesPerChan: {nBytesPerChan}')
print(f'nFramesPerBlock: {nFramesPerBlock}')
while True:
if stream.stopThread.load() == True:
printf('Stopping thread...\n')
return
if stream.inputQueue:
# fprintf(stderr, "Waiting on input queue\n")
inputbuffer = stream.inputQueue.dequeue()
if not inputbuffer:
printf('Stopping thread...\n')
return
if stream.outputQueue:
# fprintf(stderr, 'Allocating output buffer...\n')
outputbuffer = malloc(nBytesPerChan*noutputchannels_forwarded)
# memset(outputbuffer, 0, nBytesPerChan*noutputchannels_forwarded)
with gil:
# Obtain stream information
npy_input = None
npy_output = None
if stream.inputQueue:
# assert(inputbuffer)
try:
# print(f'========ninputchannels_forwarded: {ninputchannels_forwarded}')
# print(f'========nFramesPerBlock: {nFramesPerBlock}')
# print(f'========npy_format: {npy_format}')
npy_input = <object> data_to_ndarray(
inputbuffer,
nFramesPerBlock,
ninputchannels_forwarded,
npy_format,
True, # Do transfer ownership
True) # F-contiguous is True: data is Fortran-cont.
# fprintf(stderr, "Copying array...\n")
# fprintf(stderr, "End Copying array...\n")
except Exception as e:
print('exception in cython callback for audio input: ', str(e))
return
if stream.outputQueue:
# fprintf(stderr, 'Copying output buffer to Numpy...\n')
try:
npy_output = <object> data_to_ndarray(
outputbuffer,
nFramesPerBlock,
noutputchannels_forwarded,
npy_format,
False, # Do not transfer ownership
True) # F-contiguous
except Exception as e:
print('exception in Cython callback for audio output: ', str(e))
return
try:
# fprintf(stderr, "Python callback...\n")
rval = callback(npy_input,
npy_output,
nFramesPerBlock,
)
# fprintf(stderr, "Return from Python callback...\n")
except Exception as e:
print('Exception in Cython callback: ', str(e))
return
if stream.outputQueue:
# fprintf(stderr, 'Enqueuing output buffer...\n')
stream.outputQueue.enqueue(outputbuffer)
if not stream.inputQueue:
# fprintf(stderr, 'No input queue!\n')
while stream.outputQueue.size() > 10 and not stream.stopThread.load():
# printf('Sleeping...\n')
# No input queue to wait on, so we relax a bit here.
CPPsleep(1);
# Outputbuffer is free'ed by the audiothread, so should not be touched
# here.
outputbuffer = NULL
# Inputbuffer memory is owned by Numpy, so should not be free'ed
inputbuffer = NULL
cdef void errorCallback(RtAudioError.Type _type,const string& errortxt) nogil:
fprintf(stderr, 'RtAudio error callback called: ')
fprintf(stderr, errortxt.c_str())
fprintf(stderr, '\n')
cdef class RtAudio:
cdef:
cppRtAudio* _rtaudio
_Stream* _stream
int api
def __cinit__(self, unsigned int iapi):
cdef:
cppRtAudio.Api api = <cppRtAudio.Api> iapi
self._rtaudio = new cppRtAudio(api)
self._stream = NULL
self._rtaudio.showWarnings(True)
self.api = api
def __dealloc__(self):
if self._stream is not NULL:
# fprintf(stderr, 'Force closing stream...')
self._rtaudio.closeStream()
self.cleanupStream(self._stream)
del self._rtaudio
@staticmethod
def getApi():
cdef:
vector[cppRtAudio.Api] apis
cppRtAudio.getCompiledApi(apis)
apidict = {}
for api in apis:
apidict[<int> api] = {
'displayname': cppRtAudio.getApiDisplayName(api).decode('utf-8'),
'name': cppRtAudio.getApiName(api).decode('utf-8')
}
return apidict
cpdef unsigned int getDeviceCount(self):
return self._rtaudio.getDeviceCount()
cpdef unsigned int getDefaultOutputDevice(self):
return self._rtaudio.getDefaultOutputDevice()
cpdef unsigned int getDefaultInputDevice(self):
return self._rtaudio.getDefaultInputDevice()
def getDeviceInfo(self, unsigned int device):
"""
Return device information of the current device
"""
sampleformats = []
cdef:
cppRtAudio.DeviceInfo devinfo
devinfo = self._rtaudio.getDeviceInfo(device)
nf = devinfo.nativeFormats
for format_ in [ RTAUDIO_SINT8, RTAUDIO_SINT16, RTAUDIO_SINT24,
RTAUDIO_SINT32, RTAUDIO_FLOAT32, RTAUDIO_FLOAT64]:
if nf & format_:
sampleformats.append(_formats_rtkey[format_][0])
return DeviceInfo(
api = self.api,
index = device,
probed = devinfo.probed,
name = devinfo.name.decode('utf-8'),
outputchannels = devinfo.outputChannels,
inputchannels = devinfo.inputChannels,
duplexchannels = devinfo.duplexChannels,
samplerates = devinfo.sampleRates,
sampleformats = sampleformats,
prefsamplerate = devinfo.preferredSampleRate)
@cython.nonecheck(True)
def openStream(self,
avstream
):
"""
Opening a stream with specified parameters
Args:
avstream: AvStream instance
Returns: None
"""
if self._stream is not NULL:
raise RuntimeError('Stream is already opened.')
daqconfig = avstream.daqconfig
avtype = avstream.avtype
device = avstream.device
cdef:
bint duplex_mode = daqconfig.duplex_mode
bint monitorOutput = daqconfig.monitor_gen
unsigned int outputDelayBlocks = daqconfig.outputDelayBlocks
cppRtAudio.StreamParameters *rtOutputParams_ptr = NULL
cppRtAudio.StreamParameters *rtInputParams_ptr = NULL
cppRtAudio.StreamOptions streamoptions
size_t sw
unsigned int nFramesPerBlock = int(daqconfig.nFramesPerBlock)
int firstinputchannel, firstoutputchannel
int lastinputchannel, lastoutputchannel
unsigned int ninputchannels_forwarded=0
unsigned int ninputchannels_rtaudio=0
unsigned int noutputchannels_rtaudio=0
unsigned int noutputchannels_forwarded=0
unsigned int samplerate
int i
bint input_stream=False, output_stream=False
bool* inputChannelsEnabled
bool* outputChannelsEnabled
if daqconfig.nFramesPerBlock > 8192 or daqconfig.nFramesPerBlock < 512:
raise ValueError('Invalid number of nFramesPerBlock')
if daqconfig.outputDelayBlocks < 0 or daqconfig.outputDelayBlocks > 10:
raise ValueError('Invalid number of outputDelayBlocks')
try:
# Determine sample rate and sample format, determine whether we are an
# input or an output stream, or both
if avtype == AvType.audio_input or duplex_mode:
# Here, we override the sample format in case of duplex mode.
sampleformat = daqconfig.en_input_sample_format
samplerate = int(daqconfig.en_input_rate)
input_stream = True
if duplex_mode:
output_stream = True
else:
sampleformat = daqconfig.en_output_sample_format
samplerate = int(daqconfig.en_output_rate)
output_stream = True
print(f'Is input stream: {input_stream}')
print(f'Is output stream: {output_stream}')
sw = get_sampwidth_from_format_string(sampleformat)
print(f'samplewidth: {sw}')
# All set, allocate the stream!
self._stream = <_Stream*> malloc(sizeof(_Stream))
if self._stream == NULL:
raise MemoryError('Could not allocate stream: memory error.')
self._stream.pyCallback = <PyObject*> avstream._audioCallback
# Increase reference count to the callback
Py_INCREF(<object> avstream._audioCallback)
self._stream.sampleformat = _formats_strkey[sampleformat][0]
self._stream.stopThread.store(False)
self._stream.inputQueue = NULL
self._stream.outputQueue = NULL
self._stream.outputDelayQueue = NULL
self._stream.thread = NULL
self._stream.outputDelayBlocks = outputDelayBlocks
self._stream.ninputchannels_forwarded = 0
self._stream.noutputchannels_forwarded = 0
self._stream.inputChannelsEnabled = NULL
self._stream.outputChannelsEnabled = NULL
# Create channel maps for input channels, set RtAudio input stream
# parameters
if input_stream:
firstinputchannel = daqconfig.firstEnabledInputChannelNumber()
lastinputchannel = daqconfig.lastEnabledInputChannelNumber()
ninputchannels_rtaudio = lastinputchannel-firstinputchannel+1
# print(firstinputchannel)
# print(lastinputchannel)
# print(ninputchannels_rtaudio)
if lastinputchannel < 0 or ninputchannels_rtaudio < 1:
raise ValueError('Not enough input channels selected')
input_ch = daqconfig.input_channel_configs
inputChannelsEnabled = <bool*> malloc(sizeof(bool)*ninputchannels_rtaudio)
self._stream.inputChannelsEnabled = inputChannelsEnabled
for i in range(firstinputchannel, lastinputchannel+1):
ch_en = input_ch[i].channel_enabled
if ch_en:
ninputchannels_forwarded += 1
inputChannelsEnabled[i] = ch_en
rtInputParams_ptr = &self._stream.inputParams
rtInputParams_ptr.deviceId = device.index
rtInputParams_ptr.nChannels = ninputchannels_rtaudio
rtInputParams_ptr.firstChannel = firstinputchannel
self._stream.inputQueue = new SafeQueue[void*]()
self._stream.ninputchannels_forwarded = ninputchannels_forwarded
# Create channel maps for output channels
if output_stream:
firstoutputchannel = daqconfig.firstEnabledOutputChannelNumber()
lastoutputchannel = daqconfig.lastEnabledOutputChannelNumber()
noutputchannels_rtaudio = lastoutputchannel-firstoutputchannel+1
# print(firstoutputchannel)
# print(lastoutputchannel)
# print(noutputchannels_rtaudio)
if lastoutputchannel < 0 or noutputchannels_rtaudio < 1:
raise ValueError('Not enough output channels selected')
output_ch = daqconfig.output_channel_configs
outputChannelsEnabled = <bool*> malloc(sizeof(bool)*noutputchannels_rtaudio)
self._stream.outputChannelsEnabled = outputChannelsEnabled
for i in range(firstoutputchannel, lastoutputchannel+1):
ch_en = output_ch[i].channel_enabled
if ch_en:
noutputchannels_forwarded += 1
outputChannelsEnabled[i] = ch_en
rtOutputParams_ptr = &self._stream.outputParams
rtOutputParams_ptr.deviceId = device.index
rtOutputParams_ptr.nChannels = noutputchannels_rtaudio
rtOutputParams_ptr.firstChannel = firstoutputchannel
self._stream.outputQueue = new SafeQueue[void*]()
self._stream.noutputchannels_forwarded = noutputchannels_forwarded
if monitorOutput and duplex_mode:
self._stream.outputDelayQueue = new SafeQueue[void*]()
self._stream.ninputchannels_forwarded += noutputchannels_forwarded
streamoptions.flags = RTAUDIO_HOG_DEVICE
streamoptions.flags |= RTAUDIO_NONINTERLEAVED
streamoptions.numberOfBuffers = 4
streamoptions.streamName = "LASP Audio stream".encode('utf-8')
streamoptions.priority = 1
self._rtaudio.openStream(rtOutputParams_ptr,
rtInputParams_ptr,
_formats_strkey[sampleformat][0],
samplerate,
&nFramesPerBlock,
audioCallback,
<void*> self._stream,
&streamoptions, # Stream options
errorCallback # Error callback
)
self._stream.nBytesPerChan = nFramesPerBlock*sw
self._stream.nFramesPerBlock = nFramesPerBlock
except Exception as e:
print('Exception occured in stream opening: ', e)
self.cleanupStream(self._stream)
self._stream = NULL
raise e
with nogil:
self._stream.thread = new CPPThread[void*, void (*)(void*)](audioCallbackPythonThreadFunction,
<void*> self._stream)
# Allow it to start
CPPsleep(500)
pass
return nFramesPerBlock
cdef cleanupStream(self, _Stream* stream):
# printf('Entrance function cleanupStream...\n')
cdef:
void* ptr
if stream == NULL:
return
with nogil:
if stream.thread:
stream.stopThread.store(True)
if stream.inputQueue:
# If waiting in the input queue, hereby we let it run.
stream.inputQueue.enqueue(NULL)
# printf('Joining thread...\n')
# HERE WE SHOULD RELEASE THE GIL, as exiting the thread function
# will require the GIL, which is locked by this thread!
stream.thread.join()
# printf('Thread joined!\n')
del stream.thread
stream.thread = NULL
if stream.inputChannelsEnabled:
free(stream.inputChannelsEnabled)
if stream.outputChannelsEnabled:
free(stream.outputChannelsEnabled)
if stream.outputQueue:
while not stream.outputQueue.empty():
free(stream.outputQueue.dequeue())
del stream.outputQueue
if stream.inputQueue:
while not stream.inputQueue.empty():
free(stream.inputQueue.dequeue())
del stream.inputQueue
if stream.outputDelayQueue:
while not stream.outputDelayQueue.empty():
free(stream.outputDelayQueue.dequeue())
del stream.outputDelayQueue
fprintf(stderr, "End cleanup stream queues...\n")
if stream.pyCallback:
Py_DECREF(<object> stream.pyCallback)
stream.pyCallback = NULL
# fprintf(stderr, "End cleanup callback...\n")
free(stream)
def startStream(self):
self._rtaudio.startStream()
def stopStream(self):
if self._stream is NULL:
raise RuntimeError('Stream is not opened')
try:
self._rtaudio.stopStream()
except:
pass
def closeStream(self):
# print('closeStream')
if self._stream is NULL:
raise RuntimeError('Stream is not opened')
# Closing stream
self._rtaudio.closeStream()
self.cleanupStream(self._stream)
self._stream = NULL
def abortStream(self):
if self._stream is NULL:
raise RuntimeError('Stream is not opened')
self._rtaudio.abortStream()
def isStreamOpen(self):
return self._rtaudio.isStreamOpen()
def isStreamRunning(self):
return self._rtaudio.isStreamRunning()
def getStreamTime(self):
return self._rtaudio.getStreamTime()
def setStreamTime(self, double time):
return self._rtaudio.setStreamTime(time)