Merge branch 'develop' into windows_ready
All checks were successful
continuous-integration/drone/push Build is passing
All checks were successful
continuous-integration/drone/push Build is passing
This commit is contained in:
commit
ebf5ac3de4
@ -67,10 +67,7 @@ void fillUlDaqDeviceInfo(DeviceInfoList &devinfolist) {
|
|||||||
DataTypeDescriptor::DataType::dtype_fl64);
|
DataTypeDescriptor::DataType::dtype_fl64);
|
||||||
devinfo.prefDataTypeIndex = 0;
|
devinfo.prefDataTypeIndex = 0;
|
||||||
|
|
||||||
devinfo.availableSampleRates = {8000, 10000, 11025, 16000, 20000,
|
devinfo.availableSampleRates = ULDAQ_SAMPLERATES;
|
||||||
22050, 24000, 32000, 44056, 44100,
|
|
||||||
47250, 48000, 50000, 50400, 51000};
|
|
||||||
|
|
||||||
devinfo.prefSampleRateIndex = 11;
|
devinfo.prefSampleRateIndex = 11;
|
||||||
|
|
||||||
devinfo.availableFramesPerBlock = {512, 1024, 2048, 4096, 8192};
|
devinfo.availableFramesPerBlock = {512, 1024, 2048, 4096, 8192};
|
||||||
|
@ -70,7 +70,7 @@ DT9837A::DT9837A(const DeviceInfo &devinfo, const DaqConfiguration &config)
|
|||||||
throw rte("Unsensible number of samples per block chosen");
|
throw rte("Unsensible number of samples per block chosen");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (samplerate() < 10000 || samplerate() > 51000) {
|
if (samplerate() < ULDAQ_SAMPLERATES.at(0) || samplerate() > ULDAQ_SAMPLERATES.at(ULDAQ_SAMPLERATES.size()-1)) {
|
||||||
throw rte("Invalid sample rate");
|
throw rte("Invalid sample rate");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,6 +14,14 @@ using std::cerr;
|
|||||||
using std::endl;
|
using std::endl;
|
||||||
using rte = std::runtime_error;
|
using rte = std::runtime_error;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief List of available sampling frequencies for DT9837A
|
||||||
|
*/
|
||||||
|
const std::vector<d> ULDAQ_SAMPLERATES = {8000, 10000, 11025, 16000, 20000,
|
||||||
|
22050, 24000, 32000, 44056, 44100,
|
||||||
|
47250, 48000, 50000, 50400, 51000};
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief UlDaq-specific device information. Adds a copy of the underlying
|
* @brief UlDaq-specific device information. Adds a copy of the underlying
|
||||||
* DaqDeDaqDeviceDescriptor.
|
* DaqDeDaqDeviceDescriptor.
|
||||||
|
@ -23,7 +23,8 @@ y[n] = 1/ba[3] * ( ba[0] * x[n] + ba[1] * x[n-1] + ba[2] * x[n-2] +
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
__all__ = ['peaking', 'biquadTF', 'notch', 'lowpass', 'highpass',
|
__all__ = ['peaking', 'biquadTF', 'notch', 'lowpass', 'highpass',
|
||||||
'highshelf', 'lowshelf', 'LPcompensator', 'HPcompensator']
|
'highshelf', 'lowshelf', 'LP1compensator', 'LP2compensator',
|
||||||
|
'HP1compensator', 'HP2compensator']
|
||||||
|
|
||||||
from numpy import array, cos, pi, sin, sqrt
|
from numpy import array, cos, pi, sin, sqrt
|
||||||
from scipy.interpolate import interp1d
|
from scipy.interpolate import interp1d
|
||||||
@ -157,7 +158,32 @@ def lowshelf(fs, f0, Q, gain):
|
|||||||
a2 = (A+1) + (A-1)*cos(w0) - 2*sqrt(A)*alpha
|
a2 = (A+1) + (A-1)*cos(w0) - 2*sqrt(A)*alpha
|
||||||
return array([b0/a0, b1/a0, b2/a0, a0/a0, a1/a0, a2/a0])
|
return array([b0/a0, b1/a0, b2/a0, a0/a0, a1/a0, a2/a0])
|
||||||
|
|
||||||
def LPcompensator(fs, f0o, Qo, f0n, Qn):
|
def LP1compensator(fs, f0o, f0n):
|
||||||
|
"""
|
||||||
|
Shelving type filter that, when multiplied with a first-order low-pass
|
||||||
|
filter, alters the response of that filter to a different first-order
|
||||||
|
low-pass filter.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
fs: Sampling frequency [Hz]
|
||||||
|
f0o: Cut-off frequency of the original filter [Hz]
|
||||||
|
f0n: Desired cut-off frequency [Hz]
|
||||||
|
"""
|
||||||
|
|
||||||
|
omg0o = 2*pi*f0o
|
||||||
|
omg0n = 2*pi*f0n
|
||||||
|
|
||||||
|
z = -omg0o
|
||||||
|
p = -omg0n
|
||||||
|
k = p/z
|
||||||
|
|
||||||
|
zd, pd, kd = bilinear_zpk(z, p, k, fs)
|
||||||
|
|
||||||
|
sos = zpk2sos(zd,pd,kd)
|
||||||
|
|
||||||
|
return sos[0]
|
||||||
|
|
||||||
|
def LP2compensator(fs, f0o, Qo, f0n, Qn):
|
||||||
"""
|
"""
|
||||||
Shelving type filter that, when multiplied with a second-order low-pass
|
Shelving type filter that, when multiplied with a second-order low-pass
|
||||||
filter, alters the response of that filter to a different second-order
|
filter, alters the response of that filter to a different second-order
|
||||||
@ -194,7 +220,32 @@ def LPcompensator(fs, f0o, Qo, f0n, Qn):
|
|||||||
|
|
||||||
return sos[0]
|
return sos[0]
|
||||||
|
|
||||||
def HPcompensator(fs, f0o, Qo, f0n, Qn):
|
def HP1compensator(fs, f0o, f0n):
|
||||||
|
"""
|
||||||
|
Shelving type filter that, when multiplied with a first-order high-pass
|
||||||
|
filter, alters the response of that filter to a different first-order
|
||||||
|
high-pass filter.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
fs: Sampling frequency [Hz]
|
||||||
|
f0o: Cut-on frequency of the original filter [Hz]
|
||||||
|
f0n: Desired cut-on frequency [Hz]
|
||||||
|
"""
|
||||||
|
|
||||||
|
omg0o = 2*pi*f0o
|
||||||
|
omg0n = 2*pi*f0n
|
||||||
|
|
||||||
|
z = -omg0o
|
||||||
|
p = -omg0n
|
||||||
|
k = 1
|
||||||
|
|
||||||
|
zd, pd, kd = bilinear_zpk(z, p, k, fs)
|
||||||
|
|
||||||
|
sos = zpk2sos(zd,pd,kd)
|
||||||
|
|
||||||
|
return sos[0]
|
||||||
|
|
||||||
|
def HP2compensator(fs, f0o, Qo, f0n, Qn):
|
||||||
"""
|
"""
|
||||||
Shelving type filter that, when multiplied with a second-order high-pass
|
Shelving type filter that, when multiplied with a second-order high-pass
|
||||||
filter, alters the response of that filter to a different second-order
|
filter, alters the response of that filter to a different second-order
|
||||||
|
@ -52,6 +52,7 @@ import os, time, wave, logging
|
|||||||
from .lasp_common import SIQtys, Qty, getFreq
|
from .lasp_common import SIQtys, Qty, getFreq
|
||||||
from .lasp_cpp import Window, DaqChannel, LASP_VERSION_MAJOR, AvPowerSpectra
|
from .lasp_cpp import Window, DaqChannel, LASP_VERSION_MAJOR, AvPowerSpectra
|
||||||
from typing import List
|
from typing import List
|
||||||
|
from functools import lru_cache
|
||||||
|
|
||||||
|
|
||||||
def getSampWidth(dtype):
|
def getSampWidth(dtype):
|
||||||
@ -248,6 +249,9 @@ class Measurement:
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
self._sens = np.ones(self.nchannels)
|
self._sens = np.ones(self.nchannels)
|
||||||
|
|
||||||
|
# The time is cached AND ALWAYS ASSUMED TO BE AN IMMUTABLE OBJECT.
|
||||||
|
# It is also cached. Changing the measurement timestamp should not
|
||||||
|
# be done.
|
||||||
self._time = f.attrs['time']
|
self._time = f.attrs['time']
|
||||||
|
|
||||||
# Quantity stored as channel.
|
# Quantity stored as channel.
|
||||||
@ -260,7 +264,7 @@ class Measurement:
|
|||||||
try:
|
try:
|
||||||
qtys_json = f.attrs['qtys']
|
qtys_json = f.attrs['qtys']
|
||||||
# Load quantity data
|
# Load quantity data
|
||||||
self._qtys = [Qty.from_json(qty_json) for qty_json in qtys_json]
|
self._qtys = [Qty.from_json(qty_json) for qty_json in qtys_json]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
# If quantity data is not available, this is an 'old'
|
# If quantity data is not available, this is an 'old'
|
||||||
# measurement file.
|
# measurement file.
|
||||||
@ -270,7 +274,6 @@ class Measurement:
|
|||||||
self._qtys = [SIQtys.default() for i in range(self.nchannels)]
|
self._qtys = [SIQtys.default() for i in range(self.nchannels)]
|
||||||
logging.debug(f'Physical quantity data not available in measurement file. Assuming {SIQtys.default}')
|
logging.debug(f'Physical quantity data not available in measurement file. Assuming {SIQtys.default}')
|
||||||
|
|
||||||
|
|
||||||
def setAttribute(self, atrname, value):
|
def setAttribute(self, atrname, value):
|
||||||
"""
|
"""
|
||||||
Set an attribute in the measurement file, and keep a local copy in
|
Set an attribute in the measurement file, and keep a local copy in
|
||||||
@ -375,6 +378,7 @@ class Measurement:
|
|||||||
self._comment = cmt
|
self._comment = cmt
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@lru_cache()
|
||||||
def recTime(self):
|
def recTime(self):
|
||||||
"""Returns the total recording time of the measurement, in float
|
"""Returns the total recording time of the measurement, in float
|
||||||
seconds."""
|
seconds."""
|
||||||
@ -385,6 +389,19 @@ class Measurement:
|
|||||||
"""Returns the measurement time in seconds since the epoch."""
|
"""Returns the measurement time in seconds since the epoch."""
|
||||||
return self._time
|
return self._time
|
||||||
|
|
||||||
|
@property
|
||||||
|
@lru_cache()
|
||||||
|
def timestr(self):
|
||||||
|
"""
|
||||||
|
Return a properly formatted string of the measurement time, in order of
|
||||||
|
|
||||||
|
year-month-day hour etc.
|
||||||
|
|
||||||
|
"""
|
||||||
|
time_struct = time.localtime(self.time)
|
||||||
|
time_string = time.strftime('%Y-%m-%d %H:%M:%S', time_struct)
|
||||||
|
return time_string
|
||||||
|
|
||||||
def rms(self, channels=None, substract_average=False):
|
def rms(self, channels=None, substract_average=False):
|
||||||
"""Returns the root mean square values for each channel
|
"""Returns the root mean square values for each channel
|
||||||
|
|
||||||
@ -408,7 +425,7 @@ class Measurement:
|
|||||||
meansquare += np.sum(block**2, axis=0) / self.N
|
meansquare += np.sum(block**2, axis=0) / self.N
|
||||||
|
|
||||||
avg = sum_/N
|
avg = sum_/N
|
||||||
# In fact, this is not the complete RMS, as in includes the DC
|
# In fact, this is not the complete RMS, as in includes the DC
|
||||||
# If p = p_dc + p_osc, then rms(p_osc) = sqrt(ms(p)-ms(p_dc))
|
# If p = p_dc + p_osc, then rms(p_osc) = sqrt(ms(p)-ms(p_dc))
|
||||||
if substract_average:
|
if substract_average:
|
||||||
meansquare -= avg**2
|
meansquare -= avg**2
|
||||||
@ -419,7 +436,7 @@ class Measurement:
|
|||||||
return self.rms(substract_average=True)
|
return self.rms(substract_average=True)
|
||||||
|
|
||||||
def rawData(self, channels=None, **kwargs):
|
def rawData(self, channels=None, **kwargs):
|
||||||
"""Returns the raw data as stored in the measurement file,
|
"""Returns the raw data as stored in the measurement file,
|
||||||
without any transformations applied
|
without any transformations applied
|
||||||
|
|
||||||
args:
|
args:
|
||||||
@ -472,10 +489,10 @@ class Measurement:
|
|||||||
window: Window type
|
window: Window type
|
||||||
overlap: Overlap percentage (value between 0.0 and up to and
|
overlap: Overlap percentage (value between 0.0 and up to and
|
||||||
including 100.0)
|
including 100.0)
|
||||||
weighting:
|
weighting:
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Cross-power-spectra. C[freq, ch_i, ch_j] = C_ij
|
Cross-power-spectra. C[freq, ch_i, ch_j] = C_ij
|
||||||
|
|
||||||
"""
|
"""
|
||||||
nfft = kwargs.pop('nfft', 2048)
|
nfft = kwargs.pop('nfft', 2048)
|
||||||
@ -517,14 +534,14 @@ class Measurement:
|
|||||||
|
|
||||||
# Estimate noise power in block
|
# Estimate noise power in block
|
||||||
blocks = [signal[i*N:(i+1)*N] for i in range(Nblocks)]
|
blocks = [signal[i*N:(i+1)*N] for i in range(Nblocks)]
|
||||||
|
|
||||||
if noiseCorrection:
|
if noiseCorrection:
|
||||||
# The difference between the measured signal in the previous block and
|
# The difference between the measured signal in the previous block and
|
||||||
# the current block
|
# the current block
|
||||||
en = [None] + [blocks[i] - blocks[i-1] for i in range(1,Nblocks)]
|
en = [None] + [blocks[i] - blocks[i-1] for i in range(1,Nblocks)]
|
||||||
|
|
||||||
noise_est = [None] + [-np.average(en[i]*en[i+1]) for i in range(1,len(en)-1)]
|
noise_est = [None] + [-np.average(en[i]*en[i+1]) for i in range(1,len(en)-1)]
|
||||||
|
|
||||||
# Create weighting coefficients
|
# Create weighting coefficients
|
||||||
sum_inverse_noise = sum([1/n for n in noise_est[1:]])
|
sum_inverse_noise = sum([1/n for n in noise_est[1:]])
|
||||||
c_n = [1/(ni*sum_inverse_noise) for ni in noise_est[1:]]
|
c_n = [1/(ni*sum_inverse_noise) for ni in noise_est[1:]]
|
||||||
@ -533,7 +550,7 @@ class Measurement:
|
|||||||
|
|
||||||
assert np.isclose(sum(c_n), 1.0)
|
assert np.isclose(sum(c_n), 1.0)
|
||||||
assert Nblocks-2 == len(c_n)
|
assert Nblocks-2 == len(c_n)
|
||||||
|
|
||||||
# Average signal over blocks
|
# Average signal over blocks
|
||||||
avg = np.zeros((blocks[0].shape), dtype=float)
|
avg = np.zeros((blocks[0].shape), dtype=float)
|
||||||
for n in range(0, Nblocks-2):
|
for n in range(0, Nblocks-2):
|
||||||
@ -558,7 +575,7 @@ class Measurement:
|
|||||||
CS = ps.compute(avg)
|
CS = ps.compute(avg)
|
||||||
freq = getFreq(self.samplerate, N)
|
freq = getFreq(self.samplerate, N)
|
||||||
|
|
||||||
return freq, CS
|
return freq, CS
|
||||||
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -614,8 +631,8 @@ class Measurement:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def exportAsWave(self, fn=None, force=False, newsampwidth=None,
|
def exportAsWave(self, fn=None, force=False, dtype=None,
|
||||||
normalize=True, **kwargs):
|
normalize=False, **kwargs):
|
||||||
"""Export measurement file as wave. In case the measurement data is
|
"""Export measurement file as wave. In case the measurement data is
|
||||||
stored as floats, the values are scaled to the proper integer (PCM)
|
stored as floats, the values are scaled to the proper integer (PCM)
|
||||||
data format.
|
data format.
|
||||||
@ -627,9 +644,8 @@ class Measurement:
|
|||||||
force: If True, overwrites any existing files with the given name
|
force: If True, overwrites any existing files with the given name
|
||||||
, otherwise a RuntimeError is raised.
|
, otherwise a RuntimeError is raised.
|
||||||
|
|
||||||
newsampwidth: sample width in bytes with which to export the data.
|
dtype: if not None, convert data to this data type.
|
||||||
This should only be given in case the measurement data is stored as
|
Options are 'int16', 'int32', 'float32'.
|
||||||
floating point values, otherwise an error is thrown
|
|
||||||
|
|
||||||
normalize: If set: normalize the level to something sensible.
|
normalize: If set: normalize the level to something sensible.
|
||||||
"""
|
"""
|
||||||
@ -643,39 +659,49 @@ class Measurement:
|
|||||||
if os.path.exists(fn) and not force:
|
if os.path.exists(fn) and not force:
|
||||||
raise RuntimeError(f'File already exists: {fn}')
|
raise RuntimeError(f'File already exists: {fn}')
|
||||||
|
|
||||||
|
|
||||||
if not np.isclose(self.samplerate%1,0):
|
if not np.isclose(self.samplerate%1,0):
|
||||||
raise RuntimeError(f'Sample rates should be approximately integer for exporting to Wave to work')
|
raise RuntimeError(f'Sample rates should be approximately integer for exporting to Wave to work')
|
||||||
|
|
||||||
# TODO: With VERY large measurment files, this is not possible! Is this
|
# TODO: With VERY large measurment files, this is not possible! Is this
|
||||||
# a theoretical case?
|
# a theoretical case?
|
||||||
|
# TODO: add sensitivity? Then use self.data() instead of self.rawData()
|
||||||
data = self.rawData(**kwargs)
|
data = self.rawData(**kwargs)
|
||||||
if np.issubdtype(data.dtype, np.floating) and newsampwidth is None:
|
|
||||||
raise ValueError('Newsampwidth parameter should be given for floating point raw data')
|
|
||||||
|
|
||||||
if normalize:
|
if normalize:
|
||||||
# Scale back to maximum of absolute value
|
# Scale back to maximum of absolute value
|
||||||
maxabs = np.max(np.abs(data))
|
maxabs = np.max(np.abs(data))
|
||||||
data = data / maxabs # "data /= maxabs" fails if dtpyes differ
|
data = data / maxabs # "data /= maxabs" fails if dtpyes differ
|
||||||
|
|
||||||
if newsampwidth is not None:
|
if dtype==None:
|
||||||
# Convert to floats, then to new sample width
|
dtype = data.dtype # keep existing
|
||||||
sensone = np.ones_like(self.sensitivity)
|
logging.debug(f"dtype not passed as arg; using dtype = {dtype}")
|
||||||
data = scaleBlockSens(data, sensone)
|
|
||||||
|
|
||||||
if newsampwidth == 2:
|
# dtype conversion
|
||||||
newtype = np.int16
|
if dtype=='int16':
|
||||||
elif newsampwidth == 4:
|
newtype = np.int16
|
||||||
newtype = np.int32
|
newsampwidth = 2
|
||||||
else:
|
elif dtype=='int32':
|
||||||
raise ValueError('Invalid sample width, should be 2 or 4')
|
newtype = np.int32
|
||||||
|
newsampwidth = 4
|
||||||
|
elif dtype=='float32':
|
||||||
|
newtype = np.float32
|
||||||
|
elif dtype=='float64':
|
||||||
|
newtype = np.float64
|
||||||
|
else:
|
||||||
|
logging.debug(f"cannot handle this dtype {dtype}")
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Convert range to [-1, 1]
|
||||||
|
# TODO: this is wrong for float data where full scale > 1
|
||||||
|
sensone = np.ones_like(self.sensitivity)
|
||||||
|
data = scaleBlockSens(data, sensone)
|
||||||
|
|
||||||
|
if dtype=='int16' or dtype=='int32':
|
||||||
|
# Scale data to integer range and convert to integers
|
||||||
scalefac = 2**(8*newsampwidth-1)-1
|
scalefac = 2**(8*newsampwidth-1)-1
|
||||||
|
|
||||||
# Scale data to integer range, and then convert to integers
|
|
||||||
data = (data*scalefac).astype(newtype)
|
data = (data*scalefac).astype(newtype)
|
||||||
|
|
||||||
wavfile.write(fn, int(self.samplerate), data)
|
wavfile.write(fn, int(self.samplerate), data.astype(newtype))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def fromtxt(fn,
|
def fromtxt(fn,
|
||||||
|
Loading…
Reference in New Issue
Block a user