diff --git a/python_src/lasp/__init__.py b/python_src/lasp/__init__.py index e6537f1..d28c3e8 100644 --- a/python_src/lasp/__init__.py +++ b/python_src/lasp/__init__.py @@ -3,12 +3,10 @@ LASP: Library for Acoustic Signal Processing """ -from .lasp_cpp import * -from ._version import __version__, __version_tuple__ -LASP_VERSION_MAJOR = __version_tuple__[0] -LASP_VERSION_MINOR = __version_tuple__[1] -from .lasp_common import * +from .lasp_version import __version__ +from .lasp_common import * +from .lasp_cpp import * # from .lasp_imptube import * # TwoMicImpedanceTube from .lasp_measurement import * # Measurement, scaleBlockSens @@ -17,6 +15,7 @@ from .lasp_slm import * # SLM, Dummy from .lasp_record import * # RecordStatus, Recording from .lasp_daqconfigs import * from .lasp_measurementset import * + # from .lasp_siggen import * # SignalType, NoiseType, SiggenMessage, SiggenData, Siggen # from .lasp_weighcal import * # WeighCal # from .tools import * # SmoothingType, smoothSpectralData, SmoothingWidth diff --git a/python_src/lasp/lasp_daqconfigs.py b/python_src/lasp/lasp_daqconfigs.py index c7bf381..df6463d 100644 --- a/python_src/lasp/lasp_daqconfigs.py +++ b/python_src/lasp/lasp_daqconfigs.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- -from .lasp_cpp import DaqConfiguration, LASP_VERSION_MAJOR +from .lasp_cpp import DaqConfiguration +from .lasp_version import LASP_VERSION_MAJOR """! Author: J.A. de Jong - ASCEE diff --git a/python_src/lasp/lasp_measurement.py b/python_src/lasp/lasp_measurement.py index 899bb74..f798577 100644 --- a/python_src/lasp/lasp_measurement.py +++ b/python_src/lasp/lasp_measurement.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- from __future__ import annotations + """! Author: J.A. de Jong - ASCEE @@ -43,7 +44,7 @@ The video dataset can possibly be not present in the data. """ -__all__ = ['Measurement', 'scaleBlockSens'] +__all__ = ["Measurement", "scaleBlockSens"] from contextlib import contextmanager import h5py as h5 import numpy as np @@ -51,7 +52,8 @@ from .lasp_config import LASP_NUMPY_FLOAT_TYPE from scipy.io import wavfile import os, time, wave, logging from .lasp_common import SIQtys, Qty, getFreq -from .lasp_cpp import Window, DaqChannel, LASP_VERSION_MAJOR, AvPowerSpectra +from .lasp_version import LASP_VERSION_MAJOR, LASP_VERSION_MINOR +from .lasp_cpp import Window, DaqChannel, AvPowerSpectra from typing import List from functools import lru_cache @@ -72,7 +74,7 @@ def getSampWidth(dtype): elif dtype == np.float64: return 8 else: - raise ValueError('Invalid data type: %s' % dtype) + raise ValueError("Invalid data type: %s" % dtype) def scaleBlockSens(block, sens): @@ -87,9 +89,9 @@ def scaleBlockSens(block, sens): assert sens.size == block.shape[1] if np.issubdtype(block.dtype.type, np.integer): sw = getSampWidth(block.dtype) - fac = 2**(8 * sw - 1) - 1 + fac = 2 ** (8 * sw - 1) - 1 else: - fac = 1. + fac = 1.0 return block.astype(LASP_NUMPY_FLOAT_TYPE) / fac / sens[np.newaxis, :] @@ -107,7 +109,7 @@ class IterRawData: """ assert isinstance(channels, list) - fa = f['audio'] + fa = f["audio"] self.fa = fa self.i = 0 @@ -117,8 +119,8 @@ class IterRawData: # nchannels = fa.shape[2] self.channels = channels - self.istart = kwargs.pop('istart', 0) - self.istop = kwargs.pop('istop', blocksize*nblocks) + self.istart = kwargs.pop("istart", 0) + self.istop = kwargs.pop("istop", blocksize * nblocks) self.firstblock = self.istart // blocksize self.lastblock = self.istop // blocksize @@ -128,11 +130,11 @@ class IterRawData: self.firstblock_start_offset = self.istart % blocksize if self.istop < 0: - self.istop += blocksize*nblocks + self.istop += blocksize * nblocks if self.istop <= self.istart: - raise ValueError('Stop index is smaller than start index') + raise ValueError("Stop index is smaller than start index") - if self.istop != blocksize*nblocks: + if self.istop != blocksize * nblocks: self.lastblock_stop_offset = self.istop % blocksize else: self.lastblock_stop_offset = blocksize @@ -171,6 +173,7 @@ class IterData(IterRawData): Iterate over blocks of data, scaled with sensitivity and integer scaling between 0 and 1 """ + def __init__(self, fa, channels, sensitivity, **kwargs): super().__init__(fa, channels, **kwargs) self.sens = np.asarray(sensitivity)[self.channels] @@ -187,8 +190,8 @@ class Measurement: def __init__(self, fn): """Initialize a Measurement object based on the filename.""" - if '.h5' not in fn: - fn += '.h5' + if ".h5" not in fn: + fn += ".h5" # Full filepath self.fn = fn @@ -198,26 +201,26 @@ class Measurement: # Open the h5 file in read-plus mode, to allow for changing the # measurement comment. - with h5.File(fn, 'r') as f: + with h5.File(fn, "r") as f: # Check for video data try: - f['video'] + f["video"] self.has_video = True except KeyError: self.has_video = False - self.nblocks, self.blocksize, self.nchannels = f['audio'].shape - dtype = f['audio'].dtype + self.nblocks, self.blocksize, self.nchannels = f["audio"].shape + dtype = f["audio"].dtype self.dtype = dtype self.sampwidth = getSampWidth(dtype) - self.samplerate = f.attrs['samplerate'] - self.N = (self.nblocks * self.blocksize) + self.samplerate = f.attrs["samplerate"] + self.N = self.nblocks * self.blocksize self.T = self.N / self.samplerate try: - self.version_major = f.attrs['LASP_VERSION_MAJOR'] - self.version_minor = f.attrs['LASP_VERSION_MINOR'] + self.version_major = f.attrs["LASP_VERSION_MAJOR"] + self.version_minor = f.attrs["LASP_VERSION_MINOR"] except KeyError: self.version_major = 0 self.version_minor = 1 @@ -225,45 +228,47 @@ class Measurement: # Due to a previous bug, the channel names were not stored # consistently, i.e. as 'channel_names' and later camelcase. try: - self._channelNames = f.attrs['channelNames'] + self._channelNames = f.attrs["channelNames"] except KeyError: try: - self._channelNames = f.attrs['channel_names'] - logging.info("Measurement file obtained which stores channel names with *old* attribute 'channel_names'") + self._channelNames = f.attrs["channel_names"] + logging.info( + "Measurement file obtained which stores channel names with *old* attribute 'channel_names'" + ) except KeyError: # No channel names found in measurement file - logging.info('No channel name data found in measurement') - self._channelNames = [f'Unnamed {i}' for i in range(self.nchannels)] + logging.info("No channel name data found in measurement") + self._channelNames = [f"Unnamed {i}" for i in range(self.nchannels)] # comment = read-write thing - if 'comment' in f.attrs: - self._comment = f.attrs['comment'] + if "comment" in f.attrs: + self._comment = f.attrs["comment"] else: - self._comment = '' + self._comment = "" # Sensitivity try: - sens = f.attrs['sensitivity'] - self._sens = sens * \ - np.ones(self.nchannels) if isinstance( - sens, float) else sens + sens = f.attrs["sensitivity"] + self._sens = ( + sens * np.ones(self.nchannels) if isinstance(sens, float) else sens + ) except KeyError: self._sens = np.ones(self.nchannels) # The time is cached AND ALWAYS ASSUMED TO BE AN IMMUTABLE OBJECT. # It is also cached. Changing the measurement timestamp should not # be done. - self._time = f.attrs['time'] + self._time = f.attrs["time"] # Quantity stored as channel. self._qtys = None try: - qtys_enum_idx = f.attrs['qtys_enum_idx'] + qtys_enum_idx = f.attrs["qtys_enum_idx"] self._qtys = [SIQtys.fromInt(idx) for idx in qtys_enum_idx] except KeyError: try: - qtys_json = f.attrs['qtys'] + qtys_json = f.attrs["qtys"] # Load quantity data self._qtys = [Qty.from_json(qty_json) for qty_json in qtys_json] except KeyError: @@ -273,17 +278,19 @@ class Measurement: if self._qtys is None: self._qtys = [SIQtys.default() for i in range(self.nchannels)] - logging.debug(f'Physical quantity data not available in measurement file. Assuming {SIQtys.default}') + logging.debug( + f"Physical quantity data not available in measurement file. Assuming {SIQtys.default}" + ) def setAttribute(self, atrname, value): """ Set an attribute in the measurement file, and keep a local copy in memory for efficient accessing. """ - with self.file('r+') as f: + with self.file("r+") as f: # Update comment attribute in the file f.attrs[atrname] = value - setattr(self, '_' + atrname, value) + setattr(self, "_" + atrname, value) @property def name(self): @@ -297,14 +304,13 @@ class Measurement: @channelNames.setter def channelNames(self, newchnames): if len(newchnames) != self.nchannels: - raise RuntimeError('Invalid length of new channel names') - self.setAttribute('channelNames', newchnames) + raise RuntimeError("Invalid length of new channel names") + self.setAttribute("channelNames", newchnames) @property def channelConfig(self): chcfg = [] - for chname, sens, qty in zip(self.channelNames, self.sensitivity, - self.qtys): + for chname, sens, qty in zip(self.channelNames, self.sensitivity, self.qtys): ch = DaqChannel() ch.enabled = True ch.name = chname @@ -334,26 +340,26 @@ class Measurement: @qtys.setter def qtys(self, newqtys): if not len(newqtys) == len(self._qtys): - raise ValueError('Invalid number of quantities') + raise ValueError("Invalid number of quantities") qtys_int = [qty.toInt() for qty in newqtys] # Use setAttribute here, but thos store the jsonified version as well, # which we have to overwrite again with the deserialized ones. This is # actually not a very nice way of coding. - with self.file('r+') as f: + with self.file("r+") as f: # Update comment attribute in the file - f.attrs['qtys_enum_idx'] = qtys_int + f.attrs["qtys_enum_idx"] = qtys_int self._qtys = newqtys @contextmanager - def file(self, mode='r'): + def file(self, mode="r"): """Contextmanager which opens the storage file and yields the file. Args: mode: Opening mode for the file. Should either be 'r', or 'r+' """ - if mode not in ('r', 'r+'): - raise ValueError('Invalid file opening mode.') + if mode not in ("r", "r+"): + raise ValueError("Invalid file opening mode.") with h5.File(self.fn, mode) as f: yield f @@ -373,9 +379,9 @@ class Measurement: Args: cmt: Comment text string to set """ - with self.file('r+') as f: + with self.file("r+") as f: # Update comment attribute in the file - f.attrs['comment'] = cmt + f.attrs["comment"] = cmt self._comment = cmt @property @@ -400,7 +406,7 @@ class Measurement: """ time_struct = time.localtime(self.time) - time_string = time.strftime('%Y-%m-%d %H:%M:%S', time_struct) + time_string = time.strftime("%Y-%m-%d %H:%M:%S", time_struct) return time_string def rms(self, channels=None, substract_average=False): @@ -415,8 +421,8 @@ class Measurement: Returns: 1D array with rms values for each channel """ - meansquare = 0. # Mean square of the signal, including its average - sum_ = 0. # Sumf of the values of the signal, used to compute average + meansquare = 0.0 # Mean square of the signal, including its average + sum_ = 0.0 # Sumf of the values of the signal, used to compute average N = 0 with self.file() as f: for block in self.iterData(channels): @@ -425,7 +431,7 @@ class Measurement: N += Nblock meansquare += np.sum(block**2, axis=0) / self.N - avg = sum_/N + avg = sum_ / N # In fact, this is not the complete RMS, as in includes the DC # If p = p_dc + p_osc, then rms(p_osc) = sqrt(ms(p)-ms(p_dc)) if substract_average: @@ -461,7 +467,7 @@ class Measurement: return np.concatenate(rawdata, axis=0) def iterData(self, channels, **kwargs): - sensitivity = kwargs.pop('sensitivity', self.sensitivity) + sensitivity = kwargs.pop("sensitivity", self.sensitivity) if channels is None: channels = list(range(self.nchannels)) with self.file() as f: @@ -496,9 +502,9 @@ class Measurement: Cross-power-spectra. C[freq, ch_i, ch_j] = C_ij """ - nfft = kwargs.pop('nfft', 2048) - window = kwargs.pop('windowType', Window.WindowType.Hann) - overlap = kwargs.pop('overlap', 50.0) + nfft = kwargs.pop("nfft", 2048) + window = kwargs.pop("windowType", Window.WindowType.Hann) + overlap = kwargs.pop("overlap", 50.0) if channels is None: channels = list(range(self.nchannels)) @@ -527,35 +533,37 @@ class Measurement: """ # Create blocks of equal length N Ntot = self.N - Nblocks = Ntot//N + Nblocks = Ntot // N # TODO: This method graps the whole measurement file into memory. Can # only be done with relatively small measurement files. signal = self.data(channels) # Estimate noise power in block - blocks = [signal[i*N:(i+1)*N] for i in range(Nblocks)] + blocks = [signal[i * N : (i + 1) * N] for i in range(Nblocks)] if noiseCorrection: # The difference between the measured signal in the previous block and # the current block - en = [None] + [blocks[i] - blocks[i-1] for i in range(1,Nblocks)] + en = [None] + [blocks[i] - blocks[i - 1] for i in range(1, Nblocks)] - noise_est = [None] + [-np.average(en[i]*en[i+1]) for i in range(1,len(en)-1)] + noise_est = [None] + [ + -np.average(en[i] * en[i + 1]) for i in range(1, len(en) - 1) + ] # Create weighting coefficients - sum_inverse_noise = sum([1/n for n in noise_est[1:]]) - c_n = [1/(ni*sum_inverse_noise) for ni in noise_est[1:]] + sum_inverse_noise = sum([1 / n for n in noise_est[1:]]) + c_n = [1 / (ni * sum_inverse_noise) for ni in noise_est[1:]] else: - c_n = [1/(Nblocks-2)]*(Nblocks-2) + c_n = [1 / (Nblocks - 2)] * (Nblocks - 2) assert np.isclose(sum(c_n), 1.0) - assert Nblocks-2 == len(c_n) + assert Nblocks - 2 == len(c_n) # Average signal over blocks avg = np.zeros((blocks[0].shape), dtype=float) - for n in range(0, Nblocks-2): - avg += c_n[n]*blocks[n+1] + for n in range(0, Nblocks - 2): + avg += c_n[n] * blocks[n + 1] return avg @@ -578,7 +586,6 @@ class Measurement: return freq, CS - @property def sensitivity(self): """Sensitivity of the data in U^-1, from floating point data scaled @@ -607,9 +614,9 @@ class Measurement: valid &= sens.shape[0] == self.nchannels valid &= sens.dtype == float if not valid: - raise ValueError('Invalid sensitivity value(s) given') - with self.file('r+') as f: - f.attrs['sensitivity'] = sens + raise ValueError("Invalid sensitivity value(s) given") + with self.file("r+") as f: + f.attrs["sensitivity"] = sens self._sens = sens def checkOverflow(self, channels): @@ -621,19 +628,17 @@ class Measurement: for block in self.iterData(channels): dtype = block.dtype - if dtype.kind == 'i': + if dtype.kind == "i": # minvalue = np.iinfo(dtype).min maxvalue = np.iinfo(dtype).max - if np.max(np.abs(block)) >= 0.9*maxvalue: + if np.max(np.abs(block)) >= 0.9 * maxvalue: return True else: # Cannot check for floating point values. return False return False - - def exportAsWave(self, fn=None, force=False, dtype=None, - normalize=False, **kwargs): + def exportAsWave(self, fn=None, force=False, dtype=None, normalize=False, **kwargs): """Export measurement file as wave. In case the measurement data is stored as floats, the values are scaled to the proper integer (PCM) data format. @@ -654,14 +659,16 @@ class Measurement: fn = self.fn fn = os.path.splitext(fn)[0] - if os.path.splitext(fn)[1] != '.wav': - fn += '.wav' + if os.path.splitext(fn)[1] != ".wav": + fn += ".wav" if os.path.exists(fn) and not force: - raise RuntimeError(f'File already exists: {fn}') + raise RuntimeError(f"File already exists: {fn}") - if not np.isclose(self.samplerate%1,0): - raise RuntimeError(f'Sample rates should be approximately integer for exporting to Wave to work') + if not np.isclose(self.samplerate % 1, 0): + raise RuntimeError( + f"Sample rates should be approximately integer for exporting to Wave to work" + ) # TODO: With VERY large measurment files, this is not possible! Is this # a theoretical case? @@ -673,20 +680,20 @@ class Measurement: maxabs = np.max(np.abs(data)) data = data / maxabs # "data /= maxabs" fails if dtpyes differ - if dtype==None: + if dtype == None: dtype = data.dtype # keep existing logging.debug(f"dtype not passed as arg; using dtype = {dtype}") # dtype conversion - if dtype=='int16': + if dtype == "int16": newtype = np.int16 newsampwidth = 2 - elif dtype=='int32': + elif dtype == "int32": newtype = np.int32 newsampwidth = 4 - elif dtype=='float32': + elif dtype == "float32": newtype = np.float32 - elif dtype=='float64': + elif dtype == "float64": newtype = np.float64 else: logging.debug(f"cannot handle this dtype {dtype}") @@ -697,22 +704,24 @@ class Measurement: sensone = np.ones_like(self.sensitivity) data = scaleBlockSens(data, sensone) - if dtype=='int16' or dtype=='int32': + if dtype == "int16" or dtype == "int32": # Scale data to integer range and convert to integers - scalefac = 2**(8*newsampwidth-1)-1 - data = (data*scalefac).astype(newtype) + scalefac = 2 ** (8 * newsampwidth - 1) - 1 + data = (data * scalefac).astype(newtype) wavfile.write(fn, int(self.samplerate), data.astype(newtype)) @staticmethod - def fromtxt(fn, - skiprows, - samplerate, - sensitivity, - mfn=None, - timestamp=None, - delimiter='\t', - firstcoltime=True): + def fromtxt( + fn, + skiprows, + samplerate, + sensitivity, + mfn=None, + timestamp=None, + delimiter="\t", + firstcoltime=True, + ): """Converts a txt file to a LASP Measurement file, opens the associated Measurement object and returns it. The measurement file will have the same file name as the txt file, except with h5 extension. @@ -732,50 +741,57 @@ class Measurement: sample time. """ if not os.path.exists(fn): - raise ValueError(f'File {fn} does not exist.') + raise ValueError(f"File {fn} does not exist.") if timestamp is None: timestamp = os.path.getmtime(fn) if mfn is None: - mfn = os.path.splitext(fn)[0] + '.h5' + mfn = os.path.splitext(fn)[0] + ".h5" else: - mfn = os.path.splitext(mfn)[0] + '.h5' + mfn = os.path.splitext(mfn)[0] + ".h5" dat = np.loadtxt(fn, skiprows=skiprows, delimiter=delimiter) if firstcoltime: time = dat[:, 0] if not np.isclose(time[1] - time[0], 1 / samplerate): - raise ValueError('Samplerate given does not agree with ' - 'samplerate in file') + raise ValueError( + "Samplerate given does not agree with " "samplerate in file" + ) # Chop off first column dat = dat[:, 1:] nchannels = dat.shape[1] if nchannels != sensitivity.shape[0]: raise ValueError( - f'Invalid sensitivity length given. Should be: {nchannels}') + f"Invalid sensitivity length given. Should be: {nchannels}" + ) - with h5.File(mfn, 'w') as hf: - hf.attrs['samplerate'] = samplerate - hf.attrs['sensitivity'] = sensitivity - hf.attrs['time'] = timestamp - hf.attrs['blocksize'] = 1 - hf.attrs['nchannels'] = nchannels - ad = hf.create_dataset('audio', (1, dat.shape[0], dat.shape[1]), - dtype=dat.dtype, - maxshape=(1, dat.shape[0], dat.shape[1]), - compression='gzip') + with h5.File(mfn, "w") as hf: + hf.attrs["samplerate"] = samplerate + hf.attrs["sensitivity"] = sensitivity + hf.attrs["time"] = timestamp + hf.attrs["blocksize"] = 1 + hf.attrs["nchannels"] = nchannels + ad = hf.create_dataset( + "audio", + (1, dat.shape[0], dat.shape[1]), + dtype=dat.dtype, + maxshape=(1, dat.shape[0], dat.shape[1]), + compression="gzip", + ) ad[0] = dat return Measurement(mfn) @staticmethod - def fromnpy(data, - samplerate, - sensitivity, - mfn, - timestamp=None, - qtys: List[SIQtys] = None, - channelNames: List[str] = None, - force=False) -> Measurement: + def fromnpy( + data, + samplerate, + sensitivity, + mfn, + timestamp=None, + qtys: List[SIQtys] = None, + channelNames: List[str] = None, + force=False, + ) -> Measurement: """ Converts a numpy array to a LASP Measurement file, opens the associated Measurement object and returns it. The measurement file will @@ -784,76 +800,79 @@ class Measurement: Args: data: Numpy array, first column is sample, second is channel. Can also be specified with a single column for single-channel data. - + samplerate: Sampling frequency in [Hz] - + sensitivity: 1D array of channel sensitivities in [U^-1], where U is the recorded unit. - + mfn: Filepath of the file where the data is stored. - + timestamp: If given, a custom timestamp for the measurement (integer containing seconds since epoch). qtys: If a list of physical quantity data is given here channelNames: Name of the channels - + force: If True, overwrites existing files with specified `mfn` name. """ - if os.path.splitext(mfn)[1] != '.h5': - mfn += '.h5' + if os.path.splitext(mfn)[1] != ".h5": + mfn += ".h5" if os.path.exists(mfn) and not force: - raise ValueError(f'File {mfn} already exist.') + raise ValueError(f"File {mfn} already exist.") if timestamp is None: timestamp = int(time.time()) if data.ndim != 2: data = data[:, np.newaxis] - try: len(sensitivity) except: - raise ValueError('Sensitivity should be given as array-like data type') + raise ValueError("Sensitivity should be given as array-like data type") sensitivity = np.asarray(sensitivity) nchannels = data.shape[1] if nchannels != sensitivity.shape[0]: raise ValueError( - f'Invalid sensitivity length given. Should be: {nchannels}') + f"Invalid sensitivity length given. Should be: {nchannels}" + ) if channelNames is not None: if len(channelNames) != nchannels: raise RuntimeError("Illegal length of channelNames list given") if qtys is None: - qtys = [SIQtys.AP]*nchannels + qtys = [SIQtys.AP] * nchannels else: if len(qtys) != nchannels: raise RuntimeError("Illegal length of qtys list given") qtyvals = [qty.value for qty in qtys] - with h5.File(mfn, 'w') as hf: - hf.attrs['samplerate'] = samplerate - hf.attrs['sensitivity'] = sensitivity - hf.attrs['time'] = timestamp - hf.attrs['blocksize'] = 1 - hf.attrs['nchannels'] = nchannels + with h5.File(mfn, "w") as hf: + hf.attrs["samplerate"] = samplerate + hf.attrs["sensitivity"] = sensitivity + hf.attrs["time"] = timestamp + hf.attrs["blocksize"] = 1 + hf.attrs["nchannels"] = nchannels # Add physical quantity indices - hf.attrs['qtys_enum_idx'] = [qtyval.toInt() for qtyval in qtyvals] + hf.attrs["qtys_enum_idx"] = [qtyval.toInt() for qtyval in qtyvals] # Add channel names in case given if channelNames is not None: - hf.attrs['channelNames'] = channelNames + hf.attrs["channelNames"] = channelNames - ad = hf.create_dataset('audio', (1, data.shape[0], data.shape[1]), - dtype=data.dtype, - maxshape=(1, data.shape[0], data.shape[1]), - compression='gzip') + ad = hf.create_dataset( + "audio", + (1, data.shape[0], data.shape[1]), + dtype=data.dtype, + maxshape=(1, data.shape[0], data.shape[1]), + compression="gzip", + ) ad[0] = data return Measurement(mfn) @@ -866,9 +885,11 @@ class Measurement: base_fn = os.path.splitext(fn)[0] if newfn is None: - newfn = base_fn + '.h5' + newfn = base_fn + ".h5" if os.path.exists(newfn) and not force: - raise RuntimeError(f'Measurement file name {newfn} already exists in path, set "force" to true to overwrite') + raise RuntimeError( + f'Measurement file name {newfn} already exists in path, set "force" to true to overwrite' + ) samplerate, data = wavfile.read(fn) if data.ndim == 2: @@ -879,16 +900,19 @@ class Measurement: data = data[:, np.newaxis] sensitivity = np.ones(nchannels) - with h5.File(newfn, 'w') as hf: - hf.attrs['samplerate'] = samplerate - hf.attrs['nchannels'] = nchannels - hf.attrs['time'] = timestamp - hf.attrs['blocksize'] = 1 - hf.attrs['sensitivity'] = sensitivity - ad = hf.create_dataset('audio', (1, nframes, nchannels), - dtype=data.dtype, - maxshape=(1, nframes, nchannels), - compression='gzip') + with h5.File(newfn, "w") as hf: + hf.attrs["samplerate"] = samplerate + hf.attrs["nchannels"] = nchannels + hf.attrs["time"] = timestamp + hf.attrs["blocksize"] = 1 + hf.attrs["sensitivity"] = sensitivity + ad = hf.create_dataset( + "audio", + (1, nframes, nchannels), + dtype=data.dtype, + maxshape=(1, nframes, nchannels), + compression="gzip", + ) ad[0] = data return Measurement(newfn) diff --git a/python_src/lasp/lasp_record.py b/python_src/lasp/lasp_record.py index 158665b..2b666cf 100644 --- a/python_src/lasp/lasp_record.py +++ b/python_src/lasp/lasp_record.py @@ -7,8 +7,8 @@ import dataclasses, logging, os, time, h5py, threading import numpy as np from .lasp_atomic import Atomic -from .lasp_cpp import (LASP_VERSION_MAJOR, LASP_VERSION_MINOR, InDataHandler, - StreamMgr) +from .lasp_cpp import InDataHandler, StreamMgr +from .lasp_version import LASP_VERSION_MAJOR, LASP_VERSION_MINOR @dataclasses.dataclass @@ -84,10 +84,10 @@ class Recording: try: # Open the file - self.f = h5py.File(self.fn, "w", 'stdio') + self.f = h5py.File(self.fn, "w", "stdio") self.f.flush() except Exception as e: - logging.error(f'Error creating measurement file {e}') + logging.error(f"Error creating measurement file {e}") raise # This flag is used to delete the file on finish(), and can be used @@ -191,7 +191,7 @@ class Recording: """ if self.stop(): - logging.debug('Stop flag set, early return in inCallback') + logging.debug("Stop flag set, early return in inCallback") # Stop flag is raised. We do not add any data anymore. return True @@ -225,12 +225,12 @@ class Recording: with self.file_mtx: self.f.flush() # Remove indata handler, which also should remove callback function - # from StreamMgr. This, however does not have to happen + # from StreamMgr. This, however does not have to happen # instantaneously. For which we have to implement extra mutex # guards in this class del self.indh self.indh = None - + # Remove handle to dataset otherwise the h5 file is not closed # properly. del self.ad diff --git a/python_src/lasp/lasp_version.py b/python_src/lasp/lasp_version.py new file mode 100644 index 0000000..fd4b171 --- /dev/null +++ b/python_src/lasp/lasp_version.py @@ -0,0 +1,4 @@ +import importlib.metadata + +__version__ = importlib.metadata.version(__package__ or __name__) +LASP_VERSION_MAJOR, LASP_VERSION_MINOR = [int(a) for a in __version__.split(".")] diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 28f5ba9..0000000 --- a/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -appdirs -dataclasses_json -scipy -matplotlib -h5py -pytest