Merge branch 'develop' into AlternativeSmoothing

This commit is contained in:
Casper Jansen 2023-02-23 14:24:30 +01:00
commit 1b46616607
59 changed files with 2489 additions and 1556 deletions

110
.drone.yml Normal file
View File

@ -0,0 +1,110 @@
kind: pipeline
type: docker
name: archlinux
clone:
depth: 50
steps:
- name: archlinux_build
image: archlinux_build:latest
pull: if-not-exists
volumes:
- name: archlinux_ccache
path: /root/.ccache
commands:
# The following command is not required, we included this in the docker
# image of archlinux_build
# - pacman -S --noconfirm ccache openblas fftw pulseaudio pybind11
- git submodule update --init --recursive
- cmake .
# More than two makes ascee2 irresponsive for now
- make -j2
- name: archlinux_test
image: archlinux_build:latest
pull: if-not-exists
commands:
# The following command is not required, we included this in the docker
# image of archlinux_build
# - pacman -S --noconfirm openblas python-pytest fftw pulseaudio python-pip python-scipy python-h5py
- pip install -r requirements.txt
- pip install .
- pytest
# - name: release-arch
# commands:
# -
volumes:
- name: archlinux_ccache
host:
path: /tmp/archlinux_ccache
---
kind: pipeline
type: docker
name: ubuntu
clone:
depth: 3
volumes:
- name: archlinux_ccache
path: /root/.ccache
steps:
- name: ubuntu_build
image: ubuntu_build:latest
pull: if-not-exists
volumes:
- name: ubuntu_ccache
path: /root/.ccache
commands:
# The following commands are not required, we included this in the docker
# image of ubuntu_build
#- apt update
#- apt install -y git cmake python3-pybind11 libopenblas-dev python3-pip python3-scipy libusb-1.0-0-dev libpulse-dev python3-h5py fftw-dev
- git submodule update --init --recursive
- cmake .
# More than two makes ascee2 irresponsive for now
- make -j2
- name: ubuntu_test
image: ubuntu_build:latest
pull: if-not-exists
commands:
# The following commands are not required, we included this in the docker
# image of ubuntu_build
#- apt update
#- apt install -y python3-pytest fftw pulseaudio python3-pip python3-scipy python3-h5py
- pip install -r requirements.txt
- pip install .
- pytest-3
volumes:
- name: ubuntu_ccache
host:
path: /tmp/ubuntu_ccache
---
kind: pipeline
type: docker
name: documentation_build
clone:
depth: 3
steps:
- name: build_docker_master
image: plugins/docker
settings:
repo: ascee/lasp_ascee_nl
tags: latest
username:
from_secret: docker_username
password:
from_secret: docker_password
when:
branch: master

1
.gitignore vendored
View File

@ -20,3 +20,4 @@ doc
.spyproject
.cache
_skbuild
acme_log.log

16
Dockerfile Normal file
View File

@ -0,0 +1,16 @@
FROM archlinux
MAINTAINER J.A. de Jong - j.a.dejong@ascee.nl
RUN pacman --noconfirm -Sy
RUN pacman --noconfirm -S git doxygen graphviz lighttpd python-pip
RUN pip install doxypypy
WORKDIR /root
RUN git clone https://code.ascee.nl/ascee/lasp
WORKDIR /root/lasp
RUN doxygen
RUN rm -rf /srv//http
RUN mv doc/html /srv/http
CMD /usr/bin/lighttpd -D -f /etc/lighttpd/lighttpd.conf
EXPOSE 80

360
Doxyfile
View File

@ -1,4 +1,4 @@
# Doxyfile 1.8.17
# Doxyfile 1.9.1
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
@ -51,7 +51,7 @@ PROJECT_BRIEF = "Library for Acoustic Signal Processing"
# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
# the logo to the output directory.
PROJECT_LOGO = /home/anne/wip/mycode/lasp/img/LASP_200px.png
PROJECT_LOGO = img/LASP_200px.png
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
# into which the generated documentation will be written. If a relative path is
@ -93,6 +93,14 @@ ALLOW_UNICODE_NAMES = NO
OUTPUT_LANGUAGE = English
# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all
# documentation generated by doxygen is written. Doxygen will use this
# information to generate all generated output in the proper direction.
# Possible values are: None, LTR, RTL and Context.
# The default value is: None.
OUTPUT_TEXT_DIRECTION = None
# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
# descriptions after the members that are listed in the file and class
# documentation (similar to Javadoc). Set to NO to disable this.
@ -225,7 +233,7 @@ MULTILINE_CPP_IS_BRIEF = NO
# documentation blocks is shown as doxygen documentation.
# The default value is: YES.
PYTHON_DOCSTRING = YES
PYTHON_DOCSTRING = NO
# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
# documentation from any documented member that it re-implements.
@ -250,16 +258,16 @@ TAB_SIZE = 4
# the documentation. An alias has the form:
# name=value
# For example adding
# "sideeffect=@par Side Effects:^^"
# "sideeffect=@par Side Effects:\n"
# will allow you to put the command \sideeffect (or @sideeffect) in the
# documentation, which will result in a user-defined paragraph with heading
# "Side Effects:". Note that you cannot put \n's in the value part of an alias
# to insert newlines (in the resulting output). You can put ^^ in the value part
# of an alias to insert a newline as if a physical newline was in the original
# file. When you need a literal { or } or , in the value part of an alias you
# have to escape them by means of a backslash (\), this can lead to conflicts
# with the commands \{ and \} for these it is advised to use the version @{ and
# @} or use a double escape (\\{ and \\})
# "Side Effects:". You can put \n's in the value part of an alias to insert
# newlines (in the resulting output). You can put ^^ in the value part of an
# alias to insert a newline as if a physical newline was in the original file.
# When you need a literal { or } or , in the value part of an alias you have to
# escape them by means of a backslash (\), this can lead to conflicts with the
# commands \{ and \} for these it is advised to use the version @{ and @} or use
# a double escape (\\{ and \\})
ALIASES =
@ -304,8 +312,8 @@ OPTIMIZE_OUTPUT_SLICE = NO
# extension. Doxygen has a built-in mapping, but you can override or extend it
# using this tag. The format is ext=language, where ext is a file extension, and
# language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
# Csharp (C#), C, C++, Lex, D, PHP, md (Markdown), Objective-C, Python, Slice,
# VHDL, Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL,
# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser
# tries to guess whether the code is fixed or free formatted code, this is the
# default for Fortran type files). For instance to make doxygen treat .inc files
@ -458,7 +466,7 @@ LOOKUP_CACHE_SIZE = 0
# than 0 to get more control over the balance between CPU load and processing
# speed. At this moment only the input processing can be done using multiple
# threads. Since this is still an experimental feature the default is set to 1,
# which effectively disables parallel processing. Please report any issues you
# which efficively disables parallel processing. Please report any issues you
# encounter. Generating dot graphs in parallel is controlled by the
# DOT_NUM_THREADS setting.
# Minimum value: 0, maximum value: 32, default value: 1.
@ -602,12 +610,6 @@ HIDE_SCOPE_NAMES = NO
HIDE_COMPOUND_REFERENCE= NO
# If the SHOW_HEADERFILE tag is set to YES then the documentation for a class
# will show which file needs to be included to use the class.
# The default value is: YES.
SHOW_HEADERFILE = YES
# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
# the files that are included by a file in the documentation of that file.
# The default value is: YES.
@ -765,8 +767,7 @@ FILE_VERSION_FILTER =
# output files in an output format independent way. To create the layout file
# that represents doxygen's defaults, run doxygen with the -l option. You can
# optionally specify a file name after the option, if omitted DoxygenLayout.xml
# will be used as the name of the layout file. See also section "Changing the
# layout of pages" for information.
# will be used as the name of the layout file.
#
# Note that if you run doxygen from a directory containing a file called
# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
@ -812,26 +813,18 @@ WARNINGS = YES
WARN_IF_UNDOCUMENTED = YES
# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
# potential errors in the documentation, such as documenting some parameters in
# a documented function twice, or documenting parameters that don't exist or
# using markup commands wrongly.
# potential errors in the documentation, such as not documenting some parameters
# in a documented function, or documenting parameters that don't exist or using
# markup commands wrongly.
# The default value is: YES.
WARN_IF_DOC_ERROR = YES
# If WARN_IF_INCOMPLETE_DOC is set to YES, doxygen will warn about incomplete
# function parameter documentation. If set to NO, doxygen will accept that some
# parameters have no documentation without warning.
# The default value is: YES.
WARN_IF_INCOMPLETE_DOC = YES
# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
# are documented, but have no documentation for their parameters or return
# value. If set to NO, doxygen will only warn about wrong parameter
# documentation, but not about the absence of documentation. If EXTRACT_ALL is
# set to YES then this flag will automatically be disabled. See also
# WARN_IF_INCOMPLETE_DOC
# value. If set to NO, doxygen will only warn about wrong or incomplete
# parameter documentation, but not about the absence of documentation. If
# EXTRACT_ALL is set to YES then this flag will automatically be disabled.
# The default value is: NO.
WARN_NO_PARAMDOC = NO
@ -857,10 +850,7 @@ WARN_FORMAT = "$file:$line: $text"
# The WARN_LOGFILE tag can be used to specify a file to which warning and error
# messages should be written. If left blank the output is written to standard
# error (stderr). In case the file specified cannot be opened for writing the
# warning and error messages are written to standard error. When as file - is
# specified the warning and error messages are written to standard output
# (stdout).
# error (stderr).
WARN_LOGFILE =
@ -898,55 +888,16 @@ INPUT_ENCODING = UTF-8
#
# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
# *.hh, *.hxx, *.hpp, *.h++, *.l, *.cs, *.d, *.php, *.php4, *.php5, *.phtml,
# *.inc, *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C
# comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd,
# *.vhdl, *.ucf, *.qsf and *.ice.
# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment),
# *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, *.vhdl,
# *.ucf, *.qsf and *.ice.
FILE_PATTERNS = *.c \
*.cc \
*.cxx \
*.cpp \
*.c++ \
*.java \
*.ii \
*.ixx \
*.ipp \
*.i++ \
*.inl \
*.idl \
*.ddl \
*.odl \
*.h \
*.hh \
*.hxx \
*.hpp \
*.h++ \
*.cs \
*.d \
*.php \
*.php4 \
*.php5 \
*.phtml \
*.inc \
*.m \
*.markdown \
*.md \
*.mm \
*.dox \
*.py \
*.pyw \
*.f90 \
*.f95 \
*.f03 \
*.f08 \
*.f \
*.for \
*.tcl \
*.vhd \
*.vhdl \
*.ucf \
*.qsf
*.py=scripts/py_filter
# The RECURSIVE tag can be used to specify whether or not subdirectories should
# be searched for input files as well.
@ -983,7 +934,7 @@ EXCLUDE_PATTERNS =
# (namespaces, classes, functions, etc.) that should be excluded from the
# output. The symbol name can be a fully qualified name, a word, or if the
# wildcard * is used, a substring. Examples: ANamespace, AClass,
# ANamespace::AClass, ANamespace::*Test
# AClass::ANamespace, ANamespace::*Test
#
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories use the pattern */test/*
@ -1158,6 +1109,44 @@ USE_HTAGS = NO
VERBATIM_HEADERS = YES
# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the
# clang parser (see:
# http://clang.llvm.org/) for more accurate parsing at the cost of reduced
# performance. This can be particularly helpful with template rich C++ code for
# which doxygen's built-in parser lacks the necessary type information.
# Note: The availability of this option depends on whether or not doxygen was
# generated with the -Duse_libclang=ON option for CMake.
# The default value is: NO.
CLANG_ASSISTED_PARSING = NO
# If clang assisted parsing is enabled and the CLANG_ADD_INC_PATHS tag is set to
# YES then doxygen will add the directory of each input to the include path.
# The default value is: YES.
CLANG_ADD_INC_PATHS = YES
# If clang assisted parsing is enabled you can provide the compiler with command
# line options that you would normally use when invoking the compiler. Note that
# the include paths will already be set by doxygen for the files and directories
# specified with INPUT and INCLUDE_PATH.
# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
CLANG_OPTIONS =
# If clang assisted parsing is enabled you can provide the clang parser with the
# path to the directory containing a file called compile_commands.json. This
# file is the compilation database (see:
# http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) containing the
# options used when the source files were built. This is equivalent to
# specifying the -p option to a clang tool, such as clang-check. These options
# will then be passed to the parser. Any options specified with CLANG_OPTIONS
# will be added as well.
# Note: The availability of this option depends on whether or not doxygen was
# generated with the -Duse_libclang=ON option for CMake.
CLANG_DATABASE_PATH =
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
@ -1268,7 +1257,7 @@ HTML_EXTRA_FILES =
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
# will adjust the colors in the style sheet and background images according to
# this color. Hue is specified as an angle on a color-wheel, see
# this color. Hue is specified as an angle on a colorwheel, see
# https://en.wikipedia.org/wiki/Hue for more information. For instance the value
# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
# purple, and 360 is red again.
@ -1278,7 +1267,7 @@ HTML_EXTRA_FILES =
HTML_COLORSTYLE_HUE = 220
# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
# in the HTML output. For a value of 0 the output will use gray-scales only. A
# in the HTML output. For a value of 0 the output will use grayscales only. A
# value of 255 will produce the most vivid colors.
# Minimum value: 0, maximum value: 255, default value: 100.
# This tag requires that the tag GENERATE_HTML is set to YES.
@ -1360,13 +1349,6 @@ GENERATE_DOCSET = NO
DOCSET_FEEDNAME = "Doxygen generated docs"
# This tag determines the URL of the docset feed. A documentation feed provides
# an umbrella under which multiple documentation sets from a single provider
# (such as a company or product suite) can be grouped.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_FEEDURL =
# This tag specifies a string that should uniquely identify the documentation
# set bundle. This should be a reverse domain-name style string, e.g.
# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
@ -1392,12 +1374,8 @@ DOCSET_PUBLISHER_NAME = Publisher
# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
# on Windows. In the beginning of 2021 Microsoft took the original page, with
# a.o. the download links, offline the HTML help workshop was already many years
# in maintenance mode). You can download the HTML help workshop from the web
# archives at Installation executable (see:
# http://web.archive.org/web/20160201063255/http://download.microsoft.com/downlo
# ad/0/A/9/0A939EF6-E31C-430F-A3DF-DFAE7960D564/htmlhelp.exe).
# (see:
# https://www.microsoft.com/en-us/download/details.aspx?id=21138) on Windows.
#
# The HTML Help Workshop contains a compiler that can convert all HTML output
# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
@ -1556,28 +1534,16 @@ DISABLE_INDEX = NO
# to work a browser that supports JavaScript, DHTML, CSS and frames is required
# (i.e. any modern browser). Windows users are probably better off using the
# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
# further fine tune the look of the index (see "Fine-tuning the output"). As an
# example, the default style sheet generated by doxygen has an example that
# shows how to put an image at the root of the tree instead of the PROJECT_NAME.
# Since the tree basically has the same information as the tab index, you could
# consider setting DISABLE_INDEX to YES when enabling this option.
# further fine-tune the look of the index. As an example, the default style
# sheet generated by doxygen has an example that shows how to put an image at
# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
# the same information as the tab index, you could consider setting
# DISABLE_INDEX to YES when enabling this option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_TREEVIEW = YES
# When both GENERATE_TREEVIEW and DISABLE_INDEX are set to YES, then the
# FULL_SIDEBAR option determines if the side bar is limited to only the treeview
# area (value NO) or if it should extend to the full height of the window (value
# YES). Setting this to YES gives a layout similar to
# https://docs.readthedocs.io with more room for contents, but less room for the
# project logo, title, and description. If either GENERATE_TREEVIEW or
# DISABLE_INDEX is set to NO, this option has no effect.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
FULL_SIDEBAR = NO
# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
# doxygen will group on one line in the generated HTML documentation.
#
@ -1602,13 +1568,6 @@ TREEVIEW_WIDTH = 250
EXT_LINKS_IN_WINDOW = NO
# If the OBFUSCATE_EMAILS tag is set to YES, doxygen will obfuscate email
# addresses.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
OBFUSCATE_EMAILS = YES
# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg
# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see
# https://inkscape.org) to generate formulas as SVG images instead of PNGs for
@ -1657,29 +1616,11 @@ FORMULA_MACROFILE =
USE_MATHJAX = NO
# With MATHJAX_VERSION it is possible to specify the MathJax version to be used.
# Note that the different versions of MathJax have different requirements with
# regards to the different settings, so it is possible that also other MathJax
# settings have to be changed when switching between the different MathJax
# versions.
# Possible values are: MathJax_2 and MathJax_3.
# The default value is: MathJax_2.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_VERSION = MathJax_2
# When MathJax is enabled you can set the default output format to be used for
# the MathJax output. For more details about the output format see MathJax
# version 2 (see:
# http://docs.mathjax.org/en/v2.7-latest/output.html) and MathJax version 3
# (see:
# http://docs.mathjax.org/en/latest/web/components/output.html).
# the MathJax output. See the MathJax site (see:
# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details.
# Possible values are: HTML-CSS (which is slower, but has the best
# compatibility. This is the name for Mathjax version 2, for MathJax version 3
# this will be translated into chtml), NativeMML (i.e. MathML. Only supported
# for NathJax 2. For MathJax version 3 chtml will be used instead.), chtml (This
# is the name for Mathjax version 3, for MathJax version 2 this will be
# translated into HTML-CSS) and SVG.
# compatibility), NativeMML (i.e. MathML) and SVG.
# The default value is: HTML-CSS.
# This tag requires that the tag USE_MATHJAX is set to YES.
@ -1692,21 +1633,15 @@ MATHJAX_FORMAT = HTML-CSS
# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
# Content Delivery Network so you can quickly see the result without installing
# MathJax. However, it is strongly recommended to install a local copy of
# MathJax from https://www.mathjax.org before deployment. The default value is:
# - in case of MathJax version 2: https://cdn.jsdelivr.net/npm/mathjax@2
# - in case of MathJax version 3: https://cdn.jsdelivr.net/npm/mathjax@3
# MathJax from https://www.mathjax.org before deployment.
# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_RELPATH = https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2/
# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
# extension names that should be enabled during MathJax rendering. For example
# for MathJax version 2 (see
# https://docs.mathjax.org/en/v2.7-latest/tex.html#tex-and-latex-extensions):
# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
# For example for MathJax version 3 (see
# http://docs.mathjax.org/en/latest/input/tex/extensions/index.html):
# MATHJAX_EXTENSIONS = ams
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_EXTENSIONS =
@ -1886,31 +1821,29 @@ PAPER_TYPE = a4
EXTRA_PACKAGES =
# The LATEX_HEADER tag can be used to specify a user-defined LaTeX header for
# the generated LaTeX document. The header should contain everything until the
# first chapter. If it is left blank doxygen will generate a standard header. It
# is highly recommended to start with a default header using
# doxygen -w latex new_header.tex new_footer.tex new_stylesheet.sty
# and then modify the file new_header.tex. See also section "Doxygen usage" for
# information on how to generate the default header that doxygen normally uses.
# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
# generated LaTeX document. The header should contain everything until the first
# chapter. If it is left blank doxygen will generate a standard header. See
# section "Doxygen usage" for information on how to let doxygen write the
# default header to a separate file.
#
# Note: Only use a user-defined header if you know what you are doing!
# Note: The header is subject to change so you typically have to regenerate the
# default header when upgrading to a newer version of doxygen. The following
# commands have a special meaning inside the header (and footer): For a
# description of the possible markers and block names see the documentation.
# Note: Only use a user-defined header if you know what you are doing! The
# following commands have a special meaning inside the header: $title,
# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
# string, for the replacement values of the other commands the user is referred
# to HTML_HEADER.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HEADER =
# The LATEX_FOOTER tag can be used to specify a user-defined LaTeX footer for
# the generated LaTeX document. The footer should contain everything after the
# last chapter. If it is left blank doxygen will generate a standard footer. See
# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
# generated LaTeX document. The footer should contain everything after the last
# chapter. If it is left blank doxygen will generate a standard footer. See
# LATEX_HEADER for more information on how to generate a default footer and what
# special commands can be used inside the footer. See also section "Doxygen
# usage" for information on how to generate the default footer that doxygen
# normally uses. Note: Only use a user-defined footer if you know what you are
# doing!
# special commands can be used inside the footer.
#
# Note: Only use a user-defined footer if you know what you are doing!
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_FOOTER =
@ -1955,7 +1888,8 @@ USE_PDFLATEX = YES
# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
# command to the generated LaTeX files. This will instruct LaTeX to keep running
# if errors occur, instead of asking the user for help.
# if errors occur, instead of asking the user for help. This option is also used
# when generating formulas in HTML.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
@ -1968,6 +1902,16 @@ LATEX_BATCHMODE = NO
LATEX_HIDE_INDICES = NO
# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
# code with syntax highlighting in the LaTeX output.
#
# Note that which sources are shown also depends on other settings such as
# SOURCE_BROWSER.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_SOURCE_CODE = NO
# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
# bibliography, e.g. plainnat, or ieeetr. See
# https://en.wikipedia.org/wiki/BibTeX and \cite for more info.
@ -2048,6 +1992,16 @@ RTF_STYLESHEET_FILE =
RTF_EXTENSIONS_FILE =
# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
# with syntax highlighting in the RTF output.
#
# Note that which sources are shown also depends on other settings such as
# SOURCE_BROWSER.
# The default value is: NO.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_SOURCE_CODE = NO
#---------------------------------------------------------------------------
# Configuration options related to the man page output
#---------------------------------------------------------------------------
@ -2144,6 +2098,15 @@ GENERATE_DOCBOOK = NO
DOCBOOK_OUTPUT = docbook
# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
# program listings (including syntax highlighting and cross-referencing
# information) to the DOCBOOK output. Note that enabling this will significantly
# increase the size of the DOCBOOK output.
# The default value is: NO.
# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
DOCBOOK_PROGRAMLISTING = NO
#---------------------------------------------------------------------------
# Configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
@ -2156,6 +2119,10 @@ DOCBOOK_OUTPUT = docbook
GENERATE_AUTOGEN_DEF = NO
#---------------------------------------------------------------------------
# Configuration options related to Sqlite3 output
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# Configuration options related to the Perl module output
#---------------------------------------------------------------------------
@ -2322,6 +2289,15 @@ EXTERNAL_PAGES = YES
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
# NO turns the diagrams off. Note that this option also works with HAVE_DOT
# disabled, but it is recommended to install and use dot, since it yields more
# powerful graphs.
# The default value is: YES.
CLASS_DIAGRAMS = YES
# You can include diagrams made with dia in doxygen documentation. Doxygen will
# then run dia to produce the diagram and insert it in the documentation. The
# DIA_PATH tag allows you to specify the directory where the dia binary resides.
@ -2340,7 +2316,7 @@ HIDE_UNDOC_RELATIONS = YES
# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
# Bell Labs. The other options in this section have no effect if this option is
# set to NO
# The default value is: NO.
# The default value is: YES.
HAVE_DOT = YES
@ -2378,14 +2354,11 @@ DOT_FONTSIZE = 10
DOT_FONTPATH =
# If the CLASS_GRAPH tag is set to YES (or GRAPH) then doxygen will generate a
# graph for each documented class showing the direct and indirect inheritance
# relations. In case HAVE_DOT is set as well dot will be used to draw the graph,
# otherwise the built-in generator will be used. If the CLASS_GRAPH tag is set
# to TEXT the direct and indirect inheritance relations will be shown as texts /
# links.
# Possible values are: NO, YES, TEXT and GRAPH.
# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
# each documented class showing the direct and indirect inheritance relations.
# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
CLASS_GRAPH = YES
@ -2514,13 +2487,6 @@ GRAPHICAL_HIERARCHY = YES
DIRECTORY_GRAPH = YES
# The DIR_GRAPH_MAX_DEPTH tag can be used to limit the maximum number of levels
# of child directories generated in directory dependency graphs by dot.
# Minimum value: 1, maximum value: 25, default value: 1.
# This tag requires that the tag DIRECTORY_GRAPH is set to YES.
DIR_GRAPH_MAX_DEPTH = 1
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
# generated by dot. For an explanation of the image formats see the section
# output formats in the documentation of the dot tool (Graphviz (see:
@ -2528,7 +2494,9 @@ DIR_GRAPH_MAX_DEPTH = 1
# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
# to make the SVG files visible in IE 9+ (other browsers do not have this
# requirement).
# Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo,
# Possible values are: png, png:cairo, png:cairo:cairo, png:cairo:gd, png:gd,
# png:gd:gd, jpg, jpg:cairo, jpg:cairo:gd, jpg:gd, jpg:gd:gd, gif, gif:cairo,
# gif:cairo:gd, gif:gd, gif:gd:gd, svg, png:gd, png:gd:gd, png:cairo,
# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
# png:gdiplus:gdiplus.
# The default value is: png.
@ -2574,10 +2542,10 @@ MSCFILE_DIRS =
DIAFILE_DIRS =
# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
# path where java can find the plantuml.jar file or to the filename of jar file
# to be used. If left blank, it is assumed PlantUML is not used or called during
# a preprocessing step. Doxygen will generate a warning when it encounters a
# \startuml command in this case and will not generate output for the diagram.
# path where java can find the plantuml.jar file. If left blank, it is assumed
# PlantUML is not used or called during a preprocessing step. Doxygen will
# generate a warning when it encounters a \startuml command in this case and
# will not generate output for the diagram.
PLANTUML_JAR_PATH =
@ -2639,8 +2607,6 @@ DOT_MULTI_TARGETS = NO
# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
# explaining the meaning of the various boxes and arrows in the dot generated
# graphs.
# Note: This tag requires that UML_LOOK isn't set, i.e. the doxygen internal
# graphical representation for inheritance and collaboration diagrams is used.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
@ -2649,8 +2615,8 @@ GENERATE_LEGEND = YES
# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate
# files that are used to generate the various graphs.
#
# Note: This setting is not only used for dot files but also for msc temporary
# files.
# Note: This setting is not only used for dot files but also for msc and
# plantuml temporary files.
# The default value is: YES.
DOT_CLEANUP = YES

174
README.md
View File

@ -1,108 +1,112 @@
# Library for Acoustic Signal Processing
Welcome to LASP: Library for Acoustic Signal Processing. LASP is a C++ library
with a Python interface which is supposed to process (multi-) microphone
acoustic data in real time on a PC and output results.
- Master branch: [![Build Status](https://drone.ascee.nl/api/badges/ASCEE/lasp/status.svg?ref=refs/heads/master)](https://drone.ascee.nl/ASCEE/lasp)
- Develop branch: [![Build Status](https://drone.ascee.nl/api/badges/ASCEE/lasp/status.svg?ref=refs/heads/develop)](https://drone.ascee.nl/ASCEE/lasp)
At the point in time of this writing, we are yet unsure whether the Raspberry
PI will have enough computational power to this end, but may be by the time it
is finished, we have a new faster generation :).
Welcome to LASP: Library for Acoustic Signal Processing. LASP is a C++ library
with a Python interface which is supposed to acquire and process (multi) sensor data in real time on a PC and output results.
Current features that are implemented:
- Compile-time determination of the floating-point accuracy (32/64 bit)
- Fast convolution FIR filter implementation
- Sample rate decimation by an integer factor of 4.
- Octave filterbank FIR filters designed to comply with IEC 61260
(1995).
- Communication with data acquisition (DAQ) devices, of which:
- Internal sound cards via the [RtAudio](http://www.music.mcgill.ca/~gary/rtaudio) backend. Many thanks to Gary P. Scavone et al.
- [Measurement Computing](https://www.mccdaq.com) [DT9838A](https://www.mccdaq.com/Products/Sound-Vibration-DAQ/DT9837) signal analyzer.
- Configuration of DAQ devices: AC coupling, IEPE, sensitivity physical
quantities.
- Recording of signals from these DAQ devices, and storing in a HDF5 file.
- Filter designers to create A/C sound pressure weighting
- Biquad filter designers for low pass, high pass, peaking and notch filters
- A Peak Programme Meter (PPM) to monitor signal levels from DAQ and to watch
for signal clipping.
- A signal generator to create sine waves, sweeps and noise (white / pink).
- Equalizers to equalize the output prior to sending.
- Averaged power spectra and power spectral density determination
using Welch' method. Taper functions of Hann, Hamming, Bartlett and
Blackman are provided.
- A thread-safe job queue including routines to create worker threads.
- Several linear algebra routines (wrappers around BLAS and LAPACK).
- A nice debug tracer implementation
- Third octave filter bank FIR filters designed to comply with IEC 61260
- (One third) octave filter bank filters designed to comply with IEC 61260
(1995).
- Slow and fast time updates of (A/C/Z) weighted sound pressure levels
- Full Sound Level Meter implementation
- Real time Sound Level meter, Power / Transfer function estimator
- Spectra data smoothing algorithms
- Sensor calibration for microphones
Future features (wish-list)
- Conventional and delay-and-sum beam-forming algorithms
For now, the source code is well-documented but it requires some
additional documentation (the math behind it). This will be published
in a sister repository (https://code.ascee.nl/ascee/lasp-doc).
- Conventional and delay-and-sum beam-forming algorithms
- Impedance tube measurement processing
For now, the source code is well-documented on [lasp.ascee.nl](https://lasp.ascee.nl) but it requires some
additional documentation (the math behind it). This is maintained
in a sister repository [lasp-doc](https://code.ascee.nl/ascee/lasp-doc). The
most recent
If you have any question(s), please feel free to contact us: info@ascee.nl.
# Installation
# Building from source
## Dependencies
Two commands that install all requirements (for Ubuntu / Linux Mint)
- `pip install scipy numpy build scikit-build appdirs`
- `sudo apt install libusb-dev libpulse-dev libboost-dev`
## Runtime dependencies (Linux)
- FFTW (For really fast FFT's). If compiled with Ffftpack, this library is not
required.
- libUlDAQ, for the Measurement Computing DT9837A USB DAQ box
- GNU Autotools, for compiling libUlDAQ
- RtAudio, for Audio DAQ backends
- libusb
- BLAS (OpenBLAS, other).
## Editable install
In the root directory of the repository, run:
- `pip3 isntall --user -e .`
- `cmake .`
- `make -j`
- `$ sudo apt install python3-pybind11 libopenblas-dev python3-pip python3-scipy libusb-1.0-0-dev libpulse-dev cmake-curses-gui python3-h5py`
- `$ pip3 install --user -r requirements.txt`
## Build dependencies
Optional dependencies, which can be turned ON/OFF using CMake:
- Build tools: compiler [http://cmake.org](CMake), the Python packages:
- Scipy
- Numpy
- appdirs
These can all be installed using:
- The following Python packages need also be available:
- `Scipy` (which includes Numpy). Install with `sudo apt install
python3-scipy`, or `pacman -S scipy`.
- `appdirs`, which can be grabbed from [https://pypi.org](Pypi)
## Compilation of LASP
### Archlinux
Compiling the code on Archlinux requires the following packages to be available:
- openblas-lapack (AUR)
- Python>=3.7
- Numpy (Python-numpy)
- Cython
### Ubuntu / Linux Mint
*Only tested with Linux Mint 18.04*, we require the following packages for
compilation:
- build-essential
- cython
- python3-numpy
- libopenblas
If building RtAudio with the ALSA backend:
If building RtAudio with the ALSA backend, you will also require the following packages:
- libclalsadrv-dev
- libopenblas-base
- libopenblas-dev
- libusb-1.0-dev
If building RtAudio with the Jack Audio Connection Kit (JACK) backend, you will also require the following packages:
- libjack-jackd2-dev
## Download & build
- `$ git clone --recursive https://code.ascee.nl/ASCEE/lasp.git`
- `$ cd lasp`
For a release build:
- `$ cmake .`
or optionally for a custom build:
- `$ ccmake .`
Configure and run:
- `$ make -j`
### Build documentation
In directory:
`$ sudo apt install doxygen graphviz`
`$ pip install doxypypy`
While still in lasp dir:
`$ doxygen`
This will build the documentation. It can be read by:
`$ <YOUR-BROWSER> doc/html/index.html`
Or via docker:
`$ docker build -t lasp_ascee_nl:latest .`
## Install
For an editable install (while developing):
- `$ pip3 install --prefix=$HOME/.local -e .`
To install locally, for a fixed version:
- `$ pip3 install --prefix=$HOME/.local`
## Usage
- See examples directories for IPython notebooks.
- Please refer to the [documentation](https://lasp.ascee.nl/) for features.

View File

@ -2,15 +2,10 @@
import numpy as np
from lasp import SeriesBiquad, AvPowerSpectra
from lasp.filter import SPLFilterDesigner
import matplotlib.pyplot as plt
from scipy.signal import sosfreqz
# plt.close('all')
plt.close('all')
# def test_cppslm2():
# """
# Generate a sine wave, now A-weighted
# """
fs = 48000
omg = 2*np.pi*1000

View File

@ -1,17 +0,0 @@
[project] # Project metadata
name = "lasp"
readme = "README.md"
requires-python = ">=3.8"
license = { "file" = "LICENSE" }
authors = [{ "name" = "J.A. de Jong et al.", "email" = "info@ascee.nl" }]
classifiers = [
"Topic :: Scientific/Engineering",
"Programming Language :: Python :: 3.8",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
]
# urls = { "Documentation" = "https://" }
dynamic = ["version", "description"]

3
requirements.txt Normal file
View File

@ -0,0 +1,3 @@
appdirs
dataclasses_json
matplotlib

4
scripts/py_filter Executable file
View File

@ -0,0 +1,4 @@
#!/bin/bash
# Script used to filter Python files such that it can be used together with
# Doxygen
doxypypy -a -c $1

View File

@ -1,16 +1,22 @@
import glob
import glob, os
import platform
from setuptools import setup
if 'Linux' in platform.platform():
extension = list(glob.glob('src/lasp/lasp_cpp.cpython*'))
if len(extension) == 0:
ext_name_glob = 'lasp_cpp.cpython*'
extensions = list(glob.glob('src/lasp/' + ext_name_glob))
# Split of path from file.
ext_names = [os.path.split(a)[1] for a in extensions]
print(extensions)
if len(extensions) == 0:
raise RuntimeError('Please first run CMake to build extension')
elif len(extension) > 1:
elif len(extensions) > 1:
raise RuntimeError('Too many extension files found')
pkgdata = extension
pkgdata = ext_names
else:
raise RuntimeError('Not yet Windows-proof')
@ -24,9 +30,6 @@ classifiers = [
keywords = ["DSP", "DAQ", "Signal processing"]
with open('README.md', 'r') as f:
readme = f.read()
setup(
name="lasp",
@ -40,12 +43,11 @@ setup(
classifiers=classifiers,
keywords=keywords,
license="MIT",
readme=readme,
dependencies=["numpy", "scipy", "appdirs", "h5py", "appdirs",
"dataclasses_json"],
package_dir={"": "src"},
packages=['lasp', 'lasp.filter', 'lasp.tools'],
data_files = pkgdata,
include_package_data=True,
package_dir={'': 'src'},
package_data={'lasp': pkgdata},
python_requires='>=3.8',
)

View File

@ -1,4 +1,3 @@
# Comments are what is imported, state of 6-8-2021
"""
LASP: Library for Acoustic Signal Processing
@ -14,6 +13,7 @@ from .lasp_octavefilter import *
from .lasp_slm import * # SLM, Dummy
from .lasp_record import * # RecordStatus, Recording
from .lasp_daqconfigs import *
from .lasp_measurementset import *
# from .lasp_siggen import * # SignalType, NoiseType, SiggenMessage, SiggenData, Siggen
# from .lasp_weighcal import * # WeighCal
# from .tools import * # SmoothingType, smoothSpectralData, SmoothingWidth

View File

@ -8,6 +8,7 @@ add_library(lasp_device_lib OBJECT
lasp_rtaudiodaq.cpp
lasp_streammgr.cpp
lasp_uldaq.cpp
lasp_uldaq_impl.cpp
)
# Callback requires certain arguments that are not used by code. This disables

View File

@ -44,7 +44,6 @@ public:
systemError,
threadError,
logicError,
apiSpecificError
};
/**
@ -59,6 +58,7 @@ public:
{StreamError::threadError, "Thread error"},
{StreamError::logicError, "Logic error (probably a bug)"},
};
bool isRunning = false;
/**
* @brief Check if stream has error
@ -78,6 +78,25 @@ public:
* @return as described above.
*/
bool runningOK() const { return isRunning && !error(); }
}; // End of class StreamStatus
using rte = std::runtime_error;
/**
* @brief Used for internal throwing of exceptions.
*/
class StreamException : public rte {
using StreamError = StreamStatus::StreamError;
public:
StreamStatus::StreamError e;
StreamException(const StreamStatus::StreamError e)
: rte(StreamStatus::errorMessages.at(e)), e(e) {}
StreamException(const StreamStatus::StreamError e,
const std::string &additional_info)
: rte(StreamStatus::errorMessages.at(e) + ": " + additional_info),
e(e) {}
operator StreamError() { return e; }
};
/**

View File

@ -1,8 +1,8 @@
/* #define DEBUGTRACE_ENABLED */
#include "debugtrace.hpp"
#include <armadillo>
#include "lasp_daqdata.h"
#include "debugtrace.hpp"
#include "lasp_mathtypes.h"
#include <armadillo>
#include <cassert>
#include <memory>
@ -34,19 +34,20 @@ DaqData::DaqData(const us nframes, const us nchannels,
DaqData::DaqData(const DaqData &o) : DaqData(o.nframes, o.nchannels, o.dtype) {
DEBUGTRACE_ENTER;
/* std::copy(o._data, &o._data[sw * nchannels * nframes], _data); */
memcpy(_data, o._data, sw * nchannels * nframes);
}
/* DaqData::DaqData(DaqData &&o) */
/* : nframes(o.nframes), nchannels(o.nchannels), dtype(o.dtype), */
/* dtype_descr(std::move(o.dtype_descr)), sw(o.sw) { */
DaqData::DaqData(DaqData &&o)
: nframes(o.nframes), nchannels(o.nchannels), dtype(o.dtype),
dtype_descr(std::move(o.dtype_descr)), sw(o.sw) {
/* DEBUGTRACE_ENTER; */
/* _data = o._data; */
/* /// Nullptrs do not get deleted */
/* o._data = nullptr; */
/* } */
/// Steal from other one
DEBUGTRACE_ENTER;
_data = o._data;
/// Nullptrs do not get deleted
o._data = nullptr;
}
DaqData::~DaqData() {
DEBUGTRACE_ENTER;
@ -59,12 +60,15 @@ void DaqData::copyInFromRaw(const std::vector<byte_t *> &ptrs) {
us ch = 0;
assert(ptrs.size() == nchannels);
for (auto &ptr : ptrs) {
assert(ch < nchannels);
memcpy(&_data[sw * ch * nframes], ptr, sw * nframes);
/* std::copy(ptr, ptr + sw * nframes, &_data[sw * ch * nframes]); */
copyInFromRaw(ch, ptr);
ch++;
}
}
void DaqData::copyInFromRaw(const us channel, const byte_t *ptr) {
DEBUGTRACE_ENTER;
assert(ptr);
memcpy(&_data[sw * channel * nframes], ptr, sw * nframes);
}
void DaqData::copyToRaw(const us channel, byte_t *ptr) {
/* std::copy(raw_ptr(0, channel), raw_ptr(nframes, channel), ptr); */
@ -141,7 +145,7 @@ d DaqData::toFloat(const us frame, const us channel) const {
vd DaqData::toFloat(const us channel_no) const {
DEBUGTRACE_ENTER;
using DataType = DataTypeDescriptor::DataType;
cerr << (int) dtype << endl;
/* cerr << (int)dtype << endl; */
switch (dtype) {
case (DataType::dtype_int8): {
return toFloat<int8_t>(channel_no);
@ -166,7 +170,6 @@ vd DaqData::toFloat(const us channel_no) const {
return vd();
}
dmat DaqData::toFloat() const {
DEBUGTRACE_ENTER;
@ -204,17 +207,92 @@ dmat DaqData::toFloat() const {
return dmat();
}
template <typename T>
void DaqData::fromFloat(const us frame, const us channel, const d val) {
DEBUGTRACE_ENTER;
#if LASP_DEBUG == 1
check_type<T>();
#endif
if constexpr (std::is_integral<T>::value) {
value<T>(frame, channel) =
static_cast<T>(val * std::numeric_limits<T>::max());
} else {
value<T>(frame, channel) = static_cast<T>(val);
}
}
void DaqData::fromFloat(const us frame, const us channel, const d val) {
using DataType = DataTypeDescriptor::DataType;
switch (dtype) {
case (DataType::dtype_int8):
return fromFloat<int8_t>(frame, channel, val);
break;
case (DataType::dtype_int16):
return fromFloat<int16_t>(frame, channel, val);
break;
case (DataType::dtype_int32):
return fromFloat<int32_t>(frame, channel, val);
break;
case (DataType::dtype_fl32):
return fromFloat<float>(frame, channel, val);
break;
case (DataType::dtype_fl64):
return fromFloat<float>(frame, channel, val);
break;
default:
throw std::runtime_error("BUG");
} // End of switch
}
void DaqData::fromFloat(const us channel, const vd &vals) {
if (vals.size() != nframes) {
throw rte("Invalid number of frames in channel data");
}
using DataType = DataTypeDescriptor::DataType;
switch (dtype) {
case (DataType::dtype_int8):
for (us frame = 0; frame < nframes; frame++) {
fromFloat<int8_t>(frame, channel, vals(frame));
}
break;
case (DataType::dtype_int16):
for (us frame = 0; frame < nframes; frame++) {
fromFloat<int16_t>(frame, channel, vals(frame));
}
break;
case (DataType::dtype_int32):
for (us frame = 0; frame < nframes; frame++) {
fromFloat<int32_t>(frame, channel, vals(frame));
}
break;
case (DataType::dtype_fl32):
for (us frame = 0; frame < nframes; frame++) {
fromFloat<float>(frame, channel, vals(frame));
}
break;
case (DataType::dtype_fl64):
for (us frame = 0; frame < nframes; frame++) {
fromFloat<double>(frame, channel, vals(frame));
}
break;
default:
throw std::runtime_error("BUG");
} // End of switch
}
void DaqData::print() const {
cout << "Number of frames: " << nframes << endl;
cout << "Number of channels: " << nchannels << endl;
cout << "DataType: " << dtype_map.at(dtype).name << endl;
cout << "First sample of first channel (as float)" << toFloat(0, 0) << endl;
cout << "Last sample of first channel (as float)" << toFloat(nframes-1,0) << endl;
cout << "Last sample of last channel (as float)" << toFloat(nframes-1,nchannels-1) << endl;
cout << "Last sample of first channel (as float)" << toFloat(nframes - 1, 0)
<< endl;
cout << "Last sample of last channel (as float)"
<< toFloat(nframes - 1, nchannels - 1) << endl;
dmat data = toFloat();
vrd max = arma::max(data, 0);
vrd min = arma::min(data, 0);
cout << "Maximum value in buf: " << max << endl;
cout << "Minumum value in buf: " << min << endl;
}

View File

@ -65,9 +65,9 @@ public:
* @brief Initialize using no allocation
*/
DaqData(const DaqData &);
/* DaqData(DaqData &&); */
DaqData(DaqData &&);
DaqData &operator=(const DaqData &) = delete;
virtual ~DaqData();
~DaqData();
/**
* @brief Return pointer to the raw data corresponding to a certain sample
@ -104,6 +104,15 @@ public:
*/
void copyInFromRaw(const std::vector<byte_t *> &ptrs);
/**
* @brief Copy data from a set of raw pointers of *uninterleaved* data.
* Overwrites any existing available data.
*
* @param channel The channel to copy to
* @param ptr Pointers to data from channels
*/
void copyInFromRaw(const us channel, const byte_t *ptr);
/**
* @brief Copy contents of DaqData for a certain channel to a raw pointer.
*
@ -126,7 +135,7 @@ public:
* column vector of floats. For data that is not already floating-point,
* the data is scaled back from MAX_INT to +1.0.
*
* @param channel The channel to convert
* @param channel_no The channel number to convert
*
* @return Array of floats
*/
@ -137,13 +146,33 @@ public:
* column vector of floats. For data that is not already floating-point,
* the data is scaled back from MAX_INT to +1.0.
*
* @param channel The channel to convert
* @param frame_no The frame number to convert
* @param channel_no The channel number to convert
*
* @return Float value
*/
d toFloat(const us frame_no, const us channel_no) const;
/**
* @brief Convert to channel data of native type from floating point values.
* Useful for 'changing' raw data in any way.
*
* @param frame_no The frame
* @param channel_no The channel
* @param data The value
*/
void fromFloat(const us frame_no, const us channel_no,
const d data);
/**
* @brief Convert to channel data of native type from floating point values.
* Useful for 'changing' raw data in any way.
*
* @param channel The channel to convert
* @param data Data to convert from float values
*/
void fromFloat(const us channel, const arma::Col<d> &data);
// Return value based on type
template <typename T> T &value(const us frame, const us channel) {
#if LASP_DEBUG == 1
@ -188,6 +217,11 @@ protected:
template <typename T> arma::Col<d> toFloat(const us channel_no) const;
template <typename T> d toFloat(const us frame_no, const us channel_no) const;
/* template <typename T> void fromFloat(const dmat&); */
template <typename T>
void fromFloat(const us channel_no, const arma::Col<d> &vals);
template <typename T>
void fromFloat(const us frame_no, const us channel_no, const d val);
/**
* @brief Return a value as floating point. Does a conversion from integer to
* float, if the stored type is integer. Also scales by its maximum value.

View File

@ -1,80 +0,0 @@
__all__ = ['DaqChannel']
import json, logging
from dataclasses import dataclass, field
from typing import List
import numpy as np
from dataclasses_json import dataclass_json
from ..lasp_common import Qty, SIQtys
@dataclass_json
@dataclass(eq=False)
class DaqChannel:
channel_enabled: bool
channel_name: str = 'Unnamed channel'
sensitivity: float = 1.0
range_index: int = 0
ACCoupling_enabled: bool = False
IEPE_enabled: bool = False
channel_metadata: str = ''
def __post_init__(self):
# logging.debug(f'__post_init__({self.channel_name})')
self._qty = SIQtys.default()
# Whether a digital high-pass filter should be used on input data.
# Negative means disabled. A positive number corresponds to the cut-on
# frequency of the installed highpass filter.
self._highpass = -1.0
try:
meta = json.loads(self.channel_metadata)
if 'qty' in meta:
# The quantity itself is stored as a JSON string, in the JSON
# object called channel_metadata.
self._qty = Qty.from_json(meta['qty'])
if 'highpass' in meta:
self._highpass = meta['highpass']
except json.JSONDecodeError:
logging.debug(f'No JSON data found in DaqChannel {self.channel_name}')
@property
def qty(self):
return self._qty
@qty.setter
def qty(self, newqty):
self._qty = newqty
self._store('qty', newqty.to_json())
@property
def highpass(self):
return self._highpass
@highpass.setter
def highpass(self, newvalue: float):
newvalue = float(newvalue)
self._highpass = newvalue
self._store('highpass', newvalue)
def _store(self, name, val):
try:
meta = json.loads(self.channel_metadata)
except json.JSONDecodeError:
meta = {}
meta[name] = val
self.channel_metadata = json.dumps(meta)
def __eq__(self, other):
"""
We overwrite the default equal-check as the floating point values
cannot be exactly checked due to conversion from/to JSON
"""
return (self.channel_enabled == other.channel_enabled and
self.channel_name == other.channel_name and
self.range_index == other.range_index and
self.ACCoupling_enabled == other.ACCoupling_enabled and
self.IEPE_enabled == other.IEPE_enabled and
self.channel_metadata == other.channel_metadata and
np.isclose(self.highpass,other.highpass))

View File

@ -12,8 +12,8 @@
#endif
std::vector<DeviceInfo> DeviceInfo::getDeviceInfo() {
std::vector<DeviceInfo> devs;
DeviceInfoList DeviceInfo::getDeviceInfo() {
DeviceInfoList devs;
#if LASP_HAS_ULDAQ ==1
fillUlDaqDeviceInfo(devs);
#endif

View File

@ -1,17 +1,34 @@
#pragma once
#include "lasp_config.h"
#include "lasp_types.h"
#include "lasp_daqconfig.h"
#include "lasp_types.h"
#include <memory>
/** \addtogroup device
* @{
*/
using DeviceInfoList = std::vector<std::unique_ptr<DeviceInfo>>;
/**
* @brief Structure containing device info parameters
*/
class DeviceInfo {
public:
/**
* @brief Virtual desctructor. Can be derived class.
*/
virtual ~DeviceInfo() {}
/**
* @brief Clone a device info.
*
* @return Cloned device info
*/
virtual std::unique_ptr<DeviceInfo> clone() const {
return std::make_unique<DeviceInfo>(*this);
}
/**
* @brief Backend API corresponding to device
*/
@ -21,12 +38,6 @@ public:
*/
string device_name = "";
/**
* @brief Specific for the device (Sub-API). Important for the RtAudio
* backend, as RtAudio is able to handle different API's.
*/
int api_specific_devindex = -1;
/**
* @brief The available data types the device can output
*/
@ -97,10 +108,29 @@ public:
}
/**
* @brief Whether the device has an internal monitor of the output signal.
* @brief Whether the device has an internal monitor of the output signal. If
* true, the device is able to monitor output signals internally and able to
* present output signals as virtual input signals. This only works together
* Daq's that are able to run in full duplex mode.
*/
bool hasInternalOutputMonitor = false;
/**
* @brief This flag is used to be able to indicate that the device cannot run
* input and output streams independently, without opening the device in
* duplex mode. This is for example true for the UlDaq: only one handle to
* the device can be given at the same time.
*/
bool duplexModeForced = false;
/**
* @brief The physical quantity of the output signal. For 'normal' audio
* devices, this is typically a 'number' between +/- full scale. For some
* devices however, the output quantity corresponds to a physical signal,
* such a Volts.
*/
DaqChannel::Qty physicalOutputQty = DaqChannel::Qty::Number;
/**
* @brief String representation of DeviceInfo
*
@ -109,11 +139,9 @@ public:
operator string() const {
using std::endl;
std::stringstream str;
str << api.apiname + " " << api_specific_devindex << endl
<< " number of input channels: " << ninchannels << endl
/**
* @brief
*/
str << api.apiname + " " << endl
<< " number of input channels: " << ninchannels
<< endl
<< " number of output channels: " << noutchannels << endl;
return str.str();
}
@ -123,7 +151,7 @@ public:
*
* @return The device info's for each found device.
*/
static std::vector<DeviceInfo> getDeviceInfo();
static DeviceInfoList getDeviceInfo();
};
/** @} */

View File

@ -17,9 +17,19 @@ using rte = std::runtime_error;
using std::vector;
using lck = std::scoped_lock<std::mutex>;
DEBUGTRACE_VARIABLES;
class RtAudioDeviceInfo : public DeviceInfo {
public:
/**
* @brief Specific for the device (Sub-API). Important for the RtAudio
* backend, as RtAudio is able to handle different API's.
*/
int _api_devindex;
virtual std::unique_ptr<DeviceInfo> clone() const override {
return std::make_unique<DeviceInfo>(*this);
}
};
void fillRtAudioDeviceInfo(vector<DeviceInfo> &devinfolist) {
void fillRtAudioDeviceInfo(DeviceInfoList &devinfolist) {
DEBUGTRACE_ENTER;
vector<RtAudio::Api> apis;
@ -36,7 +46,7 @@ void fillRtAudioDeviceInfo(vector<DeviceInfo> &devinfolist) {
continue;
}
// "Our device info struct"
DeviceInfo d;
RtAudioDeviceInfo d;
switch (api) {
case RtAudio::LINUX_ALSA:
d.api = rtaudioAlsaApi;
@ -60,14 +70,29 @@ void fillRtAudioDeviceInfo(vector<DeviceInfo> &devinfolist) {
}
d.device_name = devinfo.name;
d.api_specific_devindex = devno;
d._api_devindex = devno;
/// When 48k is available we overwrite the default sample rate with the 48
/// kHz value, which is our preffered rate,
bool rate_48k_found = false;
for (us j = 0; j < devinfo.sampleRates.size(); j++) {
us rate = devinfo.sampleRates[j];
d.availableSampleRates.push_back((double)rate);
if (devinfo.preferredSampleRate == rate) {
us rate_int = devinfo.sampleRates[j];
d.availableSampleRates.push_back((double)rate_int);
if (!rate_48k_found) {
if (devinfo.preferredSampleRate == rate_int) {
d.prefSampleRateIndex = j;
}
if (rate_int == 48000) {
d.prefSampleRateIndex = j;
rate_48k_found = true;
}
}
}
d.noutchannels = devinfo.outputChannels;
@ -103,9 +128,9 @@ void fillRtAudioDeviceInfo(vector<DeviceInfo> &devinfolist) {
d.prefDataTypeIndex = d.availableDataTypes.size() - 1;
d.availableFramesPerBlock = {512, 1024, 2048, 4096, 8192};
d.prefFramesPerBlockIndex = 1;
d.prefFramesPerBlockIndex = 2;
devinfolist.push_back(d);
devinfolist.push_back(std::make_unique<RtAudioDeviceInfo>(d));
}
}
}
@ -130,9 +155,9 @@ class RtAudioDaq : public Daq {
std::atomic<StreamStatus> _streamStatus{};
public:
RtAudioDaq(const DeviceInfo &devinfo, const DaqConfiguration &config)
: Daq(devinfo, config),
rtaudio(static_cast<RtAudio::Api>(devinfo.api.api_specific_subcode)),
RtAudioDaq(const DeviceInfo &devinfo_gen, const DaqConfiguration &config)
: Daq(devinfo_gen, config), rtaudio(static_cast<RtAudio::Api>(
devinfo_gen.api.api_specific_subcode)),
nFramesPerBlock(Daq::framesPerBlock()) {
DEBUGTRACE_ENTER;
@ -143,6 +168,8 @@ public:
throw rte("RtAudio backend cannot run in duplex mode.");
}
assert(!monitorOutput);
const RtAudioDeviceInfo &devinfo =
static_cast<const RtAudioDeviceInfo &>(devinfo_gen);
std::unique_ptr<RtAudio::StreamParameters> inParams, outParams;
@ -156,7 +183,7 @@ public:
throw rte("Invalid input number of channels");
}
inParams->firstChannel = 0;
inParams->deviceId = devinfo.api_specific_devindex;
inParams->deviceId = devinfo._api_devindex;
} else {
@ -167,7 +194,7 @@ public:
throw rte("Invalid output number of channels");
}
outParams->firstChannel = 0;
outParams->deviceId = devinfo.api_specific_devindex;
outParams->deviceId = devinfo._api_devindex;
}
RtAudio::StreamOptions streamoptions;
@ -218,7 +245,7 @@ public:
if (nFramesPerBlock_copy != nFramesPerBlock) {
throw rte("Got different number of frames per block back from RtAudio "
"backend. Do not know what to do");
"backend. I do not know what to do.");
}
}
@ -397,8 +424,10 @@ std::unique_ptr<Daq> createRtAudioDevice(const DeviceInfo &devinfo,
void myerrorcallback(RtAudioError::Type, const string &errorText) {
cerr << "RtAudio backend stream error: " << errorText << endl;
}
int mycallback(void *outputBuffer, void *inputBuffer, unsigned int nFrames,
double streamTime, RtAudioStreamStatus status, void *userData) {
int mycallback(
void *outputBuffer, void *inputBuffer, unsigned int nFrames,
__attribute__((unused)) double streamTime, // Not used parameter streamTime
RtAudioStreamStatus status, void *userData) {
return static_cast<RtAudioDaq *>(userData)->streamCallback(
outputBuffer, inputBuffer, nFrames, status);

View File

@ -10,5 +10,5 @@ std::unique_ptr<Daq> createRtAudioDevice(const DeviceInfo& devinfo,
*
* @param devinfolist List to append to
*/
void fillRtAudioDeviceInfo(std::vector<DeviceInfo> &devinfolist);
void fillRtAudioDeviceInfo(DeviceInfoList &devinfolist);

View File

@ -1,6 +1,7 @@
/* #define DEBUGTRACE_ENABLED */
#include "lasp_streammgr.h"
#include "debugtrace.hpp"
#include "lasp_biquadbank.h"
#include "lasp_thread.h"
#include <algorithm>
#include <assert.h>
@ -92,12 +93,47 @@ bool StreamMgr::inCallback(const DaqData &data) {
std::scoped_lock<std::mutex> lck(_inDataHandler_mtx);
assert(_inputFilters.size() == data.nchannels);
if (std::count_if(_inputFilters.cbegin(), _inputFilters.cend(),
[](const auto &a) { return bool(a); }) > 0) {
/// Found a filter in vector of input filters. So we have to apply the
/// filters to each channel.
DaqData input_filtered(data.nframes, data.nchannels, data.dtype);
for (us ch = 0; ch < data.nchannels; ch++) {
if (_inputFilters[ch]) {
DEBUGTRACE_PRINT("Filter ch:");
DEBUGTRACE_PRINT(ch);
vd inout = data.toFloat(ch);
_inputFilters[ch]->filter(inout);
input_filtered.fromFloat(ch, inout);
} else {
DEBUGTRACE_PRINT("No filter ch:");
DEBUGTRACE_PRINT(ch);
input_filtered.copyInFromRaw(ch, data.raw_ptr(0, ch));
}
}
for (auto &handler : _inDataHandlers) {
bool res = handler->inCallback(input_filtered);
if (!res) {
return false;
}
}
} else {
/// No input filters
for (auto &handler : _inDataHandlers) {
bool res = handler->inCallback(data);
if (!res)
if (!res) {
return false;
}
}
}
return true;
}
@ -225,22 +261,23 @@ void StreamMgr::startStream(const DaqConfiguration &config) {
[](auto &i) { return i.enabled; });
// Find the first device that matches with the configuration
DeviceInfo devinfo;
{
std::scoped_lock lck(_devices_mtx);
auto devinfo2 = std::find_if(
_devices.cbegin(), _devices.cend(),
[&config](const DeviceInfo &d) { return config.match(d); });
if (devinfo2 == std::cend(_devices)) {
DeviceInfo *devinfo = nullptr;
bool found = false;
for (auto &devinfoi : _devices) {
if (config.match(*devinfoi)) {
devinfo = devinfoi.get();
break;
}
}
if (!devinfo) {
throw rte("Could not find a device with name " + config.device_name +
" in list of devices.");
}
devinfo = *devinfo2;
}
isInput |= (config.monitorOutput && devinfo.hasInternalOutputMonitor);
isInput |= (config.monitorOutput && devinfo->hasInternalOutputMonitor);
DEBUGTRACE_PRINT(isInput);
bool isDuplex = isInput && isOutput;
@ -258,48 +295,92 @@ void StreamMgr::startStream(const DaqConfiguration &config) {
"first stop existing stream");
} else if (_inputStream) {
if (_inputStream->duplexMode() && isOutput) {
throw rte(
"Error: output stream is already running (in duplex mode). Please "
throw rte("Error: output stream is already running (in duplex mode). "
"Please "
"first stop existing stream");
}
}
if (_outputStream && isInput && _outputStream->duplexModeForced &&
config.match(*_outputStream)) {
throw rte("This device is already opened for output. If input is also "
"required, please enable duplex mode for this device");
}
if (_inputStream && isOutput && _inputStream->duplexModeForced &&
config.match(*_inputStream)) {
throw rte("This device is already opened for input. If output is also "
"required, please enable duplex mode for this device");
}
InDaqCallback inCallback;
OutDaqCallback outCallback;
using namespace std::placeholders;
std::unique_ptr<Daq> daq = Daq::createDaq(devinfo, config);
std::unique_ptr<Daq> daq = Daq::createDaq(*devinfo, config);
assert(daq);
std::unique_ptr<Daq> *stream_placeholder;
if (isInput) {
/// Give incallback as parameter to stream
inCallback = std::bind(&StreamMgr::inCallback, this, _1);
stream_placeholder = std::addressof(_inputStream);
/// Reset handlers in case of an input stream
for (auto &handler : _inDataHandlers) {
handler->reset(daq.get());
}
d fs = daq->samplerate();
/// Create input filters
_inputFilters.clear();
/// No input filter for monitor channel.
if (config.monitorOutput && devinfo->hasInternalOutputMonitor) {
_inputFilters.push_back(nullptr);
}
for (auto &ch : daq->inchannel_config) {
if (ch.enabled) {
if (ch.digitalHighPassCutOn < 0) {
_inputFilters.push_back(nullptr);
} else if (ch.digitalHighPassCutOn == 0) {
throw rte("Digital highpass cuton should be > 0 if activated");
} else {
// Put in a digital high-pass filter.
_inputFilters.emplace_back(std::make_unique<SeriesBiquad>(
SeriesBiquad::firstOrderHighPass(fs, ch.digitalHighPassCutOn)));
}
}
} // End of input filter creation
}
if (isOutput) {
/// Give outcallback as parameter to stream
outCallback = std::bind(&StreamMgr::outCallback, this, _1);
/// Reset signal generator in case of an output stream
if (_siggen) {
DEBUGTRACE_PRINT("Resetting _siggen with new samplerate of ");
DEBUGTRACE_PRINT(daq->samplerate());
_siggen->reset(daq->samplerate());
}
outCallback = std::bind(&StreamMgr::outCallback, this, _1);
if(!isDuplex) {
stream_placeholder = std::addressof(_outputStream);
}
}
/// Start the DAQ. If it fails, everything is still nicely cleaned up and
/// the daq unique_ptr cleans up resources nicely.
daq->start(inCallback, outCallback);
// Move daq ptr to right place
*stream_placeholder = std::move(daq);
if (isInput) {
_inputStream = std::move(daq);
} else {
_outputStream = std::move(daq);
}
}
void StreamMgr::stopStream(const StreamType t) {
DEBUGTRACE_ENTER;
checkRightThread();
switch (t) {
case (StreamType::input): {
if (t == StreamType::input) {
if (!_inputStream) {
throw rte("Input stream is not running");
}
@ -309,21 +390,18 @@ void StreamMgr::stopStream(const StreamType t) {
for (auto &handler : _inDataHandlers) {
handler->reset(nullptr);
}
} break;
case (StreamType::output): {
} else {
/// t == output
/// Kill input stream in case that one is a duplex stream
if (_inputStream && _inputStream->duplexMode()) {
_inputStream.reset();
} else {
if (!_outputStream) {
throw rte("Output stream is not running");
}
_outputStream.reset();
} // end else
} break;
default:
throw rte("BUG");
break;
}
}

View File

@ -65,6 +65,8 @@ public:
void stop();
};
class SeriesBiquad;
/**
* @brief Stream manager. Used to manage the input and output streams.
* Implemented as a singleton: only one stream manager can be in existance for
@ -96,10 +98,16 @@ class StreamMgr {
* implemented as to generate the same data for all output channels.
*/
std::shared_ptr<Siggen> _siggen;
/**
* @brief Filters on input stream. For example, a digital high pass filter.
*/
std::vector<std::unique_ptr<SeriesBiquad>> _inputFilters;
std::mutex _siggen_mtx;
std::mutex _devices_mtx;
std::vector<DeviceInfo> _devices;
DeviceInfoList _devices;
StreamMgr();
@ -137,9 +145,13 @@ class StreamMgr {
*
* @return A copy of the internal stored list of devices
*/
std::vector<DeviceInfo> getDeviceInfo() const {
DeviceInfoList getDeviceInfo() const {
std::scoped_lock lck(const_cast<std::mutex &>(_devices_mtx));
return _devices;
DeviceInfoList d2;
for(const auto& dev: _devices) {
d2.push_back(dev->clone());
}
return d2;
}
/**

View File

@ -1,593 +1,22 @@
/* #define DEBUGTRACE_ENABLED */
#include "debugtrace.hpp"
#include "lasp_config.h"
#if LASP_HAS_ULDAQ == 1
#include "lasp_daqconfig.h"
#include "lasp_uldaq.h"
#include <algorithm>
#include <atomic>
#include <cassert>
#include <chrono>
#include <gsl/gsl-lite.hpp>
#include <iostream>
#include <memory>
#include <mutex>
#include <stdexcept>
#include <thread>
#include "lasp_uldaq_impl.h"
#include <uldaq.h>
#include <vector>
using namespace std::literals::chrono_literals;
using std::atomic;
using std::cerr;
using std::endl;
using rte = std::runtime_error;
#include "debugtrace.hpp"
DEBUGTRACE_VARIABLES;
const us MAX_DEV_COUNT_PER_API = 100;
/**
* @brief Reserve some space for an error message from UlDaq
*/
const us UL_ERR_MSG_LEN = 512;
/**
* @brief Show the error to default error stream and return a string
* corresponding to the error
*
* @param err Error string
*/
string showErr(UlError err) {
string errstr;
errstr.reserve(UL_ERR_MSG_LEN);
if (err != ERR_NO_ERROR) {
char errmsg[UL_ERR_MSG_LEN];
errstr = "UlDaq API Error: ";
ulGetErrMsg(err, errmsg);
errstr += errmsg;
std::cerr << "\b\n**************** UlDAQ backend error **********\n";
std::cerr << errstr << std::endl;
std::cerr << "***********************************************\n\n";
return errstr;
}
return errstr;
}
class DT9837A : public Daq {
DaqDeviceHandle _handle = 0;
std::mutex _daqmutex;
std::thread _thread;
atomic<bool> _stopThread{false};
atomic<StreamStatus> _streamStatus;
const us _nFramesPerBlock;
void threadFcn(InDaqCallback inCallback, OutDaqCallback outcallback);
public:
DaqDeviceHandle getHandle() const { return _handle; }
DT9837A(const DeviceInfo &devinfo, const DaqConfiguration &config);
~DT9837A() {
UlError err;
if (isRunning()) {
stop();
}
if (_handle) {
err = ulDisconnectDaqDevice(_handle);
showErr(err);
err = ulReleaseDaqDevice(_handle);
showErr(err);
}
}
bool isRunning() const {
DEBUGTRACE_ENTER;
return _thread.joinable();
}
virtual void start(InDaqCallback inCallback,
OutDaqCallback outCallback) override final;
virtual StreamStatus getStreamStatus() const override {
return _streamStatus;
}
void stop() override final {
DEBUGTRACE_ENTER;
StreamStatus status = _streamStatus;
if (!isRunning()) {
throw rte("No data acquisition running");
}
_stopThread = true;
if (_thread.joinable()) {
_thread.join();
}
_stopThread = false;
status.isRunning = false;
_streamStatus = status;
}
friend class InBufHandler;
friend class OutBufHandler;
};
void DT9837A::start(InDaqCallback inCallback, OutDaqCallback outCallback) {
DEBUGTRACE_ENTER;
if (isRunning()) {
throw rte("DAQ is already running");
}
if (neninchannels() > 0) {
if (!inCallback)
throw rte("DAQ requires a callback for input data");
}
if (nenoutchannels() > 0) {
if (!outCallback)
throw rte("DAQ requires a callback for output data");
}
assert(neninchannels() + nenoutchannels() > 0);
_thread = std::thread(&DT9837A::threadFcn, this, inCallback, outCallback);
}
class BufHandler {
protected:
DT9837A &daq;
const DataTypeDescriptor dtype_descr;
us nchannels, nFramesPerBlock;
double samplerate;
std::vector<double> buf;
bool topenqueued, botenqueued;
us increment = 0;
us totalFramesCount = 0;
long long buffer_mid_idx;
public:
BufHandler(DT9837A &daq, const us nchannels)
: daq(daq), dtype_descr(daq.dtypeDescr()), nchannels(nchannels),
nFramesPerBlock(daq.framesPerBlock()), samplerate(daq.samplerate()),
buf(2 * nchannels *
nFramesPerBlock, // Watch the two here, the top and the bottom!
0),
buffer_mid_idx(nchannels * nFramesPerBlock) {
assert(nchannels > 0);
}
};
class InBufHandler : public BufHandler {
bool monitorOutput;
InDaqCallback cb;
public:
InBufHandler(DT9837A &daq, InDaqCallback cb)
: BufHandler(daq, daq.neninchannels()), cb(cb)
{
DEBUGTRACE_ENTER;
assert(daq.getHandle() != 0);
monitorOutput = daq.monitorOutput;
DaqInScanFlag inscanflags = DAQINSCAN_FF_DEFAULT;
ScanOption scanoptions = SO_CONTINUOUS;
UlError err = ERR_NO_ERROR;
std::vector<DaqInChanDescriptor> indescs;
boolvec eninchannels_without_mon = daq.eninchannels(false);
// Initialize input, if any
dvec ranges = daq.inputRangeForEnabledChannels(false);
for (us chin = 0; chin < 4; chin++) {
if (eninchannels_without_mon[chin] == true) {
DaqInChanDescriptor indesc;
indesc.type = DAQI_ANALOG_SE;
indesc.channel = chin;
double rangeval = ranges.at(chin);
Range rangenum;
if (fabs(rangeval - 1.0) < 1e-8) {
rangenum = BIP1VOLTS;
} else if (fabs(rangeval - 10.0) < 1e-8) {
rangenum = BIP10VOLTS;
} else {
std::cerr << "Fatal: input range value is invalid" << endl;
return;
}
indesc.range = rangenum;
indescs.push_back(indesc);
}
}
// Add possibly last channel as monitor
if (monitorOutput) {
DaqInChanDescriptor indesc;
indesc.type = DAQI_DAC;
indesc.channel = 0;
/// The output only has a range of 10V, therefore the monitor of the
/// output also has to be set to this value.
indesc.range = BIP10VOLTS;
indescs.push_back(indesc);
}
assert(indescs.size() == nchannels);
DEBUGTRACE_MESSAGE("Starting input scan");
err = ulDaqInScan(daq.getHandle(), indescs.data(), nchannels,
2 * nFramesPerBlock, // Watch the 2 here!
&samplerate, scanoptions, inscanflags, buf.data());
if (err != ERR_NO_ERROR) {
showErr(err);
throw rte("Could not start input DAQ");
}
}
void start() {
ScanStatus status;
TransferStatus transferStatus;
UlError err = ulDaqInScanStatus(daq.getHandle(), &status, &transferStatus);
if (err != ERR_NO_ERROR) {
showErr(err);
throw rte("Unable to start input on DAQ");
}
totalFramesCount = transferStatus.currentTotalCount;
topenqueued = true;
botenqueued = true;
}
/**
* @brief InBufHandler::operator()()
*
* @return true on success
*/
bool operator()() {
/* DEBUGTRACE_ENTER; */
bool ret = true;
auto runCallback = ([&](us totalOffset) {
/* DEBUGTRACE_ENTER; */
DaqData data(nFramesPerBlock, nchannels,
DataTypeDescriptor::DataType::dtype_fl64);
us monitorOffset = monitorOutput ? 1 : 0;
/* /// Put the output monitor in front */
if (monitorOutput) {
for (us frame = 0; frame < nFramesPerBlock; frame++) {
data.value<double>(frame, 0) =
buf[totalOffset + (frame * nchannels) + (nchannels - 1)];
}
}
for (us channel = 0; channel < nchannels - monitorOffset; channel++) {
/* DEBUGTRACE_PRINT(channel); */
for (us frame = 0; frame < nFramesPerBlock; frame++) {
data.value<double>(frame, channel + monitorOffset) =
buf[totalOffset + (frame * nchannels) + channel];
}
}
return cb(data);
});
ScanStatus status;
TransferStatus transferStatus;
UlError err = ulDaqInScanStatus(daq.getHandle(), &status, &transferStatus);
if (err != ERR_NO_ERROR) {
showErr(err);
return false;
}
increment = transferStatus.currentTotalCount - totalFramesCount;
totalFramesCount += increment;
if (increment > nFramesPerBlock) {
cerr << "Error: overrun for input of DAQ!" << endl;
return false;
}
assert(status == SS_RUNNING);
if (transferStatus.currentIndex < (long long)buffer_mid_idx) {
topenqueued = false;
if (!botenqueued) {
ret = runCallback(nchannels * nFramesPerBlock);
botenqueued = true;
}
} else {
botenqueued = false;
if (!topenqueued) {
ret = runCallback(0);
topenqueued = true;
}
}
return ret;
}
~InBufHandler() {
// At exit of the function, stop scanning.
DEBUGTRACE_ENTER;
UlError err = ulDaqInScanStop(daq.getHandle());
if (err != ERR_NO_ERROR) {
showErr(err);
}
}
};
class OutBufHandler : public BufHandler {
OutDaqCallback cb;
public:
OutBufHandler(DT9837A &daq, OutDaqCallback cb)
: BufHandler(daq, daq.nenoutchannels()), cb(cb) {
DEBUGTRACE_MESSAGE("Starting output scan");
DEBUGTRACE_PRINT(nchannels);
AOutScanFlag outscanflags = AOUTSCAN_FF_DEFAULT;
ScanOption scanoptions = SO_CONTINUOUS;
UlError err =
ulAOutScan(daq.getHandle(), 0, 0, BIP10VOLTS,
2 * nFramesPerBlock, // Watch the 2 here!
&samplerate, scanoptions, outscanflags, buf.data());
if (err != ERR_NO_ERROR) {
showErr(err);
throw rte("Unable to start output on DAQ");
}
}
void start() {
ScanStatus status;
TransferStatus transferStatus;
UlError err = ulAOutScanStatus(daq.getHandle(), &status, &transferStatus);
if (err != ERR_NO_ERROR) {
showErr(err);
throw rte("Unable to start output on DAQ");
}
if (status != SS_RUNNING) {
throw rte("Unable to start output on DAQ");
}
totalFramesCount = transferStatus.currentTotalCount;
topenqueued = true;
botenqueued = true;
}
/**
* @brief OutBufHandler::operator()
*
* @return true on success
*/
bool operator()() {
/* DEBUGTRACE_ENTER; */
bool res = true;
assert(daq.getHandle() != 0);
UlError err = ERR_NO_ERROR;
ScanStatus status;
TransferStatus transferStatus;
err = ulAOutScanStatus(daq.getHandle(), &status, &transferStatus);
if (err != ERR_NO_ERROR) {
showErr(err);
return false;
}
if (status != SS_RUNNING) {
return false;
}
increment = transferStatus.currentTotalCount - totalFramesCount;
totalFramesCount += increment;
if (increment > nFramesPerBlock) {
cerr << "Error: underrun for output of DAQ!" << endl;
return false;
}
if (transferStatus.currentIndex < buffer_mid_idx) {
topenqueued = false;
if (!botenqueued) {
DaqData d(nFramesPerBlock,1,
DataTypeDescriptor::DataType::dtype_fl64);
res = cb(d);
d.copyToRaw(0, reinterpret_cast<byte_t*>(&(buf[buffer_mid_idx])));
botenqueued = true;
}
} else {
botenqueued = false;
if (!topenqueued) {
DaqData d(nFramesPerBlock,1,
DataTypeDescriptor::DataType::dtype_fl64);
res = cb(d);
d.copyToRaw(0, reinterpret_cast<byte_t*>(&(buf[0])));
topenqueued = true;
}
}
return res;
}
~OutBufHandler() {
DEBUGTRACE_ENTER;
UlError err = ulAOutScanStop(daq.getHandle());
if (err != ERR_NO_ERROR) {
showErr(err);
}
}
};
void DT9837A::threadFcn(InDaqCallback inCallback, OutDaqCallback outCallback) {
void fillUlDaqDeviceInfo(DeviceInfoList &devinfolist) {
DEBUGTRACE_ENTER;
/* cerr << "******************\n" */
/* "Todo: the current way of handling timing in this DAQ thread is not " */
/* "really robust, due " */
/* "to input / output callbacks that can be too time-consuming. We have " */
/* "to fix the " */
/* "sleep_for to properly deal with longer callbacks." */
/* "\n*****************" */
/* << endl; */
try {
std::unique_ptr<OutBufHandler> obh;
std::unique_ptr<InBufHandler> ibh;
StreamStatus status = _streamStatus;
status.isRunning = true;
_streamStatus = status;
if (nenoutchannels() > 0) {
assert(outCallback);
obh = std::make_unique<OutBufHandler>(*this, outCallback);
}
if (neninchannels() > 0) {
assert(inCallback);
ibh = std::make_unique<InBufHandler>(*this, inCallback);
}
if (obh)
obh->start();
if (ibh)
ibh->start();
const double sleeptime_s =
static_cast<double>(_nFramesPerBlock) / (16 * samplerate());
const us sleeptime_us = static_cast<us>(sleeptime_s * 1e6);
while (!_stopThread) {
if (ibh) {
if (!(*ibh)()) {
_stopThread = true;
}
}
if (obh) {
if (!(*obh)()) {
_stopThread = true;
}
} else {
std::this_thread::sleep_for(std::chrono::microseconds(sleeptime_us));
}
}
} catch (rte &e) {
StreamStatus status = _streamStatus;
status.isRunning = false;
status.errorType = StreamStatus::StreamError::systemError;
_streamStatus = status;
cerr << "\n******************\n";
cerr << "Catched error in UlDAQ thread: " << e.what() << endl;
cerr << "\n******************\n";
}
StreamStatus status = _streamStatus;
status.isRunning = false;
_streamStatus = status;
_stopThread = false;
}
std::unique_ptr<Daq> createUlDaqDevice(const DeviceInfo &devinfo,
const DaqConfiguration &config) {
return std::make_unique<DT9837A>(devinfo, config);
}
DT9837A::DT9837A(const DeviceInfo &devinfo, const DaqConfiguration &config)
: Daq(devinfo, config),
_nFramesPerBlock(availableFramesPerBlock.at(framesPerBlockIndex)) {
// Some sanity checks
if (inchannel_config.size() != 4) {
throw rte("Invalid length of enabled inChannels vector");
}
if (outchannel_config.size() != 1) {
throw rte("Invalid length of enabled outChannels vector");
}
if (_nFramesPerBlock < 24 || _nFramesPerBlock > 8192) {
throw rte("Unsensible number of samples per block chosen");
}
if (samplerate() < 10000 || samplerate() > 51000) {
throw rte("Invalid sample rate");
}
DaqDeviceDescriptor devdescriptors[MAX_DEV_COUNT_PER_API];
DaqDeviceDescriptor descriptor;
DaqDeviceInterface interfaceType = ANY_IFC;
UlError err;
unsigned int numdevs = MAX_ULDAQ_DEV_COUNT_PER_API;
us numdevs = MAX_DEV_COUNT_PER_API;
err = ulGetDaqDeviceInventory(interfaceType, devdescriptors,
(unsigned *)&numdevs);
if (err != ERR_NO_ERROR) {
throw rte("Device inventarization failed");
}
if ((us)api_specific_devindex >= numdevs) {
throw rte("Device number {deviceno} too high {err}. This could "
"happen when the device is currently not connected");
}
descriptor = devdescriptors[api_specific_devindex];
// get a handle to the DAQ device associated with the first descriptor
_handle = ulCreateDaqDevice(descriptor);
if (_handle == 0) {
throw rte("Unable to create a handle to the specified DAQ "
"device. Is the device currently in use? Please make sure to set "
"the DAQ configuration in duplex mode if simultaneous input and "
"output is required.");
}
err = ulConnectDaqDevice(_handle);
if (err != ERR_NO_ERROR) {
ulReleaseDaqDevice(_handle);
_handle = 0;
throw rte(string("Unable to connect to device: " + showErr(err)));
}
for (us ch = 0; ch < 4; ch++) {
err = ulAISetConfigDbl(_handle, AI_CFG_CHAN_SENSOR_SENSITIVITY, ch, 1.0);
showErr(err);
if (err != ERR_NO_ERROR) {
throw rte("Fatal: could normalize channel sensitivity");
}
CouplingMode cm = inchannel_config.at(ch).ACCouplingMode ? CM_AC : CM_DC;
err = ulAISetConfig(_handle, AI_CFG_CHAN_COUPLING_MODE, ch, cm);
if (err != ERR_NO_ERROR) {
showErr(err);
throw rte("Fatal: could not set AC/DC coupling mode");
}
IepeMode iepe =
inchannel_config.at(ch).IEPEEnabled ? IEPE_ENABLED : IEPE_DISABLED;
err = ulAISetConfig(_handle, AI_CFG_CHAN_IEPE_MODE, ch, iepe);
if (err != ERR_NO_ERROR) {
showErr(err);
throw rte("Fatal: could not set IEPE mode");
}
}
}
void fillUlDaqDeviceInfo(std::vector<DeviceInfo> &devinfolist) {
DEBUGTRACE_ENTER;
UlError err;
unsigned int numdevs = MAX_DEV_COUNT_PER_API;
DaqDeviceDescriptor devdescriptors[MAX_DEV_COUNT_PER_API];
DaqDeviceDescriptor devdescriptors[MAX_ULDAQ_DEV_COUNT_PER_API];
DaqDeviceDescriptor descriptor;
DaqDeviceInterface interfaceType = ANY_IFC;
@ -602,11 +31,14 @@ void fillUlDaqDeviceInfo(std::vector<DeviceInfo> &devinfolist) {
descriptor = devdescriptors[i];
DeviceInfo devinfo;
UlDaqDeviceInfo devinfo;
devinfo._uldaqDescriptor = descriptor;
devinfo.api = uldaqapi;
string name, interface;
if (string(descriptor.productName) != "DT9837A") {
throw rte("Unknown UlDAQ type");
string productname = descriptor.productName;
if (productname != "DT9837A") {
throw rte("Unknown UlDAQ type: " + productname);
}
switch (descriptor.devInterface) {
@ -629,7 +61,8 @@ void fillUlDaqDeviceInfo(std::vector<DeviceInfo> &devinfolist) {
name += string(descriptor.productName) + " " + string(descriptor.uniqueId);
devinfo.device_name = std::move(name);
devinfo.api_specific_devindex = i;
devinfo.physicalOutputQty = DaqChannel::Qty::Voltage;
devinfo.availableDataTypes.push_back(
DataTypeDescriptor::DataType::dtype_fl64);
devinfo.prefDataTypeIndex = 0;
@ -654,8 +87,16 @@ void fillUlDaqDeviceInfo(std::vector<DeviceInfo> &devinfolist) {
devinfo.hasInternalOutputMonitor = true;
devinfo.duplexModeForced = true;
// Finally, this devinfo is pushed back in list
devinfolist.push_back(devinfo);
devinfolist.push_back(std::make_unique<UlDaqDeviceInfo>(devinfo));
}
}
std::unique_ptr<Daq> createUlDaqDevice(const DeviceInfo &devinfo,
const DaqConfiguration &config) {
return std::make_unique<DT9837A>(devinfo, config);
}
#endif // LASP_HAS_ULDAQ

View File

@ -1,14 +1,18 @@
#pragma once
#include "lasp_daq.h"
/**
* @brief The maximum number of devices that can be enumerated when calling
* ulGetDaqDeviceInventory()
*/
const us MAX_ULDAQ_DEV_COUNT_PER_API = 100;
std::unique_ptr<Daq> createUlDaqDevice(const DeviceInfo &devinfo,
const DaqConfiguration &config);
/**
* @brief Fill device info list with UlDaq specific devices, if any.
*
* @param devinfolist Info list to append to
* @param devinfolist Info list to append to.
*/
void fillUlDaqDeviceInfo(std::vector<DeviceInfo> &devinfolist);
void fillUlDaqDeviceInfo(DeviceInfoList& devinfolist);

View File

@ -0,0 +1,480 @@
/* #define DEBUGTRACE_ENABLED */
#include "debugtrace.hpp"
#include "lasp_config.h"
#if LASP_HAS_ULDAQ == 1
#include "lasp_daqconfig.h"
#include "lasp_uldaq.h"
#include "lasp_uldaq_impl.h"
using namespace std::literals::chrono_literals;
/**
* @brief Reserve some space for an error message from UlDaq
*/
const us UL_ERR_MSG_LEN = 512;
/**
* @brief Return a string corresponding to the UlDaq API error
*
* @param err error code
*
* @return Error string
*/
string getErrMsg(UlError err) {
string errstr;
errstr.reserve(UL_ERR_MSG_LEN);
char errmsg[UL_ERR_MSG_LEN];
errstr = "UlDaq API Error: ";
ulGetErrMsg(err, errmsg);
errstr += errmsg;
return errstr;
}
inline void showErr(string errstr) {
std::cerr << "\b\n**************** UlDAQ backend error **********\n";
std::cerr << errstr << std::endl;
std::cerr << "***********************************************\n\n";
}
inline void showErr(UlError err) {
if (err != ERR_NO_ERROR)
showErr(getErrMsg(err));
}
DT9837A::~DT9837A() {
UlError err;
if (isRunning()) {
stop();
}
if (_handle) {
err = ulDisconnectDaqDevice(_handle);
showErr(err);
err = ulReleaseDaqDevice(_handle);
showErr(err);
}
}
DT9837A::DT9837A(const DeviceInfo &devinfo, const DaqConfiguration &config)
: Daq(devinfo, config),
_nFramesPerBlock(availableFramesPerBlock.at(framesPerBlockIndex)) {
// Some sanity checks
if (inchannel_config.size() != 4) {
throw rte("Invalid length of enabled inChannels vector");
}
if (outchannel_config.size() != 1) {
throw rte("Invalid length of enabled outChannels vector");
}
if (_nFramesPerBlock < 24 || _nFramesPerBlock > 8192) {
throw rte("Unsensible number of samples per block chosen");
}
if (samplerate() < 10000 || samplerate() > 51000) {
throw rte("Invalid sample rate");
}
const UlDaqDeviceInfo *_info =
dynamic_cast<const UlDaqDeviceInfo *>(&devinfo);
if (_info == nullptr) {
throw rte("BUG: Could not cast DeviceInfo to UlDaqDeviceInfo");
}
// get a handle to the DAQ device associated with the first descriptor
_handle = ulCreateDaqDevice(_info->_uldaqDescriptor);
if (_handle == 0) {
throw rte("Unable to create a handle to the specified DAQ "
"device. Is the device currently in use? Please make sure to set "
"the DAQ configuration in duplex mode if simultaneous input and "
"output is required.");
}
UlError err = ulConnectDaqDevice(_handle);
if (err != ERR_NO_ERROR) {
ulReleaseDaqDevice(_handle);
_handle = 0;
throw rte("Unable to connect to device: " + getErrMsg(err));
}
/// Loop over input channels, set parameters
for (us ch = 0; ch < 4; ch++) {
err = ulAISetConfigDbl(_handle, AI_CFG_CHAN_SENSOR_SENSITIVITY, ch, 1.0);
showErr(err);
if (err != ERR_NO_ERROR) {
throw rte("Fatal: could normalize channel sensitivity");
}
CouplingMode cm = inchannel_config.at(ch).ACCouplingMode ? CM_AC : CM_DC;
err = ulAISetConfig(_handle, AI_CFG_CHAN_COUPLING_MODE, ch, cm);
if (err != ERR_NO_ERROR) {
showErr(err);
throw rte("Fatal: could not set AC/DC coupling mode");
}
IepeMode iepe =
inchannel_config.at(ch).IEPEEnabled ? IEPE_ENABLED : IEPE_DISABLED;
err = ulAISetConfig(_handle, AI_CFG_CHAN_IEPE_MODE, ch, iepe);
if (err != ERR_NO_ERROR) {
showErr(err);
throw rte("Fatal: could not set IEPE mode");
}
}
}
bool DT9837A::isRunning() const {
DEBUGTRACE_ENTER;
return _thread.joinable();
}
void DT9837A::stop() {
DEBUGTRACE_ENTER;
StreamStatus status = _streamStatus;
if (!isRunning()) {
throw rte("No data acquisition running");
}
_stopThread = true;
if (_thread.joinable()) {
_thread.join();
}
_stopThread = false;
status.isRunning = false;
_streamStatus = status;
}
/**
* @brief Throws an appropriate stream exception based on the UlError number.
* The mapping is based on the error numbers as given in uldaq.h. There are a
* log of errors definded here (109 in total). Except for some, we will map
* most of them to a driver error.
*
* @param e
*/
inline void throwUlException(UlError err) {
if (err == ERR_NO_ERROR) {
return;
}
string errstr = getErrMsg(err);
showErr(errstr);
Daq::StreamStatus::StreamError serr;
if ((int)err == 18) {
serr = Daq::StreamStatus::StreamError::inputXRun;
} else if ((int)err == 19) {
serr = Daq::StreamStatus::StreamError::outputXRun;
} else {
serr = Daq::StreamStatus::StreamError::driverError;
}
throw Daq::StreamException(serr, errstr);
}
void DT9837A::start(InDaqCallback inCallback, OutDaqCallback outCallback) {
DEBUGTRACE_ENTER;
if (isRunning()) {
throw rte("DAQ is already running");
}
if (neninchannels() > 0) {
if (!inCallback)
throw rte("DAQ requires a callback for input data");
}
if (nenoutchannels() > 0) {
if (!outCallback)
throw rte("DAQ requires a callback for output data");
}
assert(neninchannels() + nenoutchannels() > 0);
_thread = std::thread(&DT9837A::threadFcn, this, inCallback, outCallback);
}
InBufHandler::InBufHandler(DT9837A &daq, InDaqCallback cb)
: BufHandler(daq, daq.neninchannels()), cb(cb)
{
DEBUGTRACE_ENTER;
assert(daq.getHandle() != 0);
monitorOutput = daq.monitorOutput;
DaqInScanFlag inscanflags = DAQINSCAN_FF_DEFAULT;
ScanOption scanoptions = SO_CONTINUOUS;
UlError err = ERR_NO_ERROR;
std::vector<DaqInChanDescriptor> indescs;
boolvec eninchannels_without_mon = daq.eninchannels(false);
// Set ranges for each input. Below asks only channels that are not a
// monitor channel (hence the false flag).
dvec ranges = daq.inputRangeForEnabledChannels(false);
for (us chin = 0; chin < 4; chin++) {
if (eninchannels_without_mon[chin] == true) {
DaqInChanDescriptor indesc;
indesc.type = DAQI_ANALOG_SE;
indesc.channel = chin;
double rangeval = ranges.at(chin);
Range rangenum;
if (fabs(rangeval - 1.0) < 1e-8) {
rangenum = BIP1VOLTS;
} else if (fabs(rangeval - 10.0) < 1e-8) {
rangenum = BIP10VOLTS;
} else {
throw Daq::StreamException(Daq::StreamStatus::StreamError::logicError);
std::cerr << "Fatal: input range value is invalid" << endl;
return;
}
indesc.range = rangenum;
indescs.push_back(indesc);
}
}
// Add possibly last channel as monitor
if (monitorOutput) {
DaqInChanDescriptor indesc;
indesc.type = DAQI_DAC;
indesc.channel = 0;
/// The output only has a range of 10V, therefore the monitor of the
/// output also has to be set to this value.
indesc.range = BIP10VOLTS;
indescs.push_back(indesc);
}
assert(indescs.size() == nchannels);
DEBUGTRACE_MESSAGE("Starting input scan");
err = ulDaqInScan(daq.getHandle(), indescs.data(), nchannels,
2 * nFramesPerBlock, // Watch the 2 here!
&samplerate, scanoptions, inscanflags, buf.data());
throwUlException(err);
}
void InBufHandler::start() {
ScanStatus status;
TransferStatus transferStatus;
UlError err = ulDaqInScanStatus(daq.getHandle(), &status, &transferStatus);
throwUlException(err);
totalFramesCount = transferStatus.currentTotalCount;
topenqueued = true;
botenqueued = true;
}
bool InBufHandler::operator()() {
/* DEBUGTRACE_ENTER; */
bool ret = true;
auto runCallback = ([&](us totalOffset) {
/* DEBUGTRACE_ENTER; */
DaqData data(nFramesPerBlock, nchannels, dtype_descr.dtype);
us monitorOffset = monitorOutput ? 1 : 0;
/* /// Put the output monitor in front */
if (monitorOutput) {
for (us frame = 0; frame < nFramesPerBlock; frame++) {
data.value<double>(frame, 0) =
buf[totalOffset + (frame * nchannels) + (nchannels - 1)];
}
}
for (us channel = 0; channel < nchannels - monitorOffset; channel++) {
/* DEBUGTRACE_PRINT(channel); */
for (us frame = 0; frame < nFramesPerBlock; frame++) {
data.value<double>(frame, channel + monitorOffset) =
buf[totalOffset + (frame * nchannels) + channel];
}
}
return cb(data);
});
ScanStatus status;
TransferStatus transferStatus;
UlError err = ulDaqInScanStatus(daq.getHandle(), &status, &transferStatus);
throwUlException(err);
us increment = transferStatus.currentTotalCount - totalFramesCount;
totalFramesCount += increment;
if (increment > nFramesPerBlock) {
throw Daq::StreamException(Daq::StreamStatus::StreamError::inputXRun);
}
assert(status == SS_RUNNING);
if (transferStatus.currentIndex < (long long)buffer_mid_idx) {
topenqueued = false;
if (!botenqueued) {
ret = runCallback(nchannels * nFramesPerBlock);
botenqueued = true;
}
} else {
botenqueued = false;
if (!topenqueued) {
ret = runCallback(0);
topenqueued = true;
}
}
return ret;
}
InBufHandler::~InBufHandler() {
// At exit of the function, stop scanning.
DEBUGTRACE_ENTER;
UlError err = ulDaqInScanStop(daq.getHandle());
if (err != ERR_NO_ERROR) {
showErr(err);
}
}
OutBufHandler::OutBufHandler(DT9837A &daq, OutDaqCallback cb)
: BufHandler(daq, daq.nenoutchannels()), cb(cb) {
DEBUGTRACE_MESSAGE("Starting output scan");
DEBUGTRACE_PRINT(nchannels);
AOutScanFlag outscanflags = AOUTSCAN_FF_DEFAULT;
ScanOption scanoptions = SO_CONTINUOUS;
UlError err = ulAOutScan(daq.getHandle(), 0, 0, BIP10VOLTS,
2 * nFramesPerBlock, // Watch the 2 here!
&samplerate, scanoptions, outscanflags, buf.data());
throwUlException(err);
}
void OutBufHandler::start() {
ScanStatus status;
TransferStatus transferStatus;
UlError err = ulAOutScanStatus(daq.getHandle(), &status, &transferStatus);
if (err != ERR_NO_ERROR) {
showErr(err);
throw rte("Unable to start output on DAQ");
}
if (status != SS_RUNNING) {
throw rte("Unable to start output on DAQ");
}
totalFramesCount = transferStatus.currentTotalCount;
topenqueued = true;
botenqueued = true;
}
bool OutBufHandler::operator()() {
/* DEBUGTRACE_ENTER; */
bool res = true;
assert(daq.getHandle() != 0);
UlError err = ERR_NO_ERROR;
ScanStatus status;
TransferStatus transferStatus;
err = ulAOutScanStatus(daq.getHandle(), &status, &transferStatus);
throwUlException(err);
if (status != SS_RUNNING) {
return false;
}
us increment = transferStatus.currentTotalCount - totalFramesCount;
totalFramesCount += increment;
if (increment > nFramesPerBlock) {
throw Daq::StreamException(Daq::StreamStatus::StreamError::outputXRun);
}
if (transferStatus.currentIndex < buffer_mid_idx) {
topenqueued = false;
if (!botenqueued) {
DaqData d(nFramesPerBlock, 1, dtype_descr.dtype);
res = cb(d);
d.copyToRaw(0, reinterpret_cast<byte_t *>(&(buf[buffer_mid_idx])));
botenqueued = true;
}
} else {
botenqueued = false;
if (!topenqueued) {
DaqData d(nFramesPerBlock, 1, dtype_descr.dtype);
res = cb(d);
d.copyToRaw(0, reinterpret_cast<byte_t *>(&(buf[0])));
topenqueued = true;
}
}
return res;
}
OutBufHandler::~OutBufHandler() {
DEBUGTRACE_ENTER;
UlError err = ulAOutScanStop(daq.getHandle());
if (err != ERR_NO_ERROR) {
showErr(err);
}
}
void DT9837A::threadFcn(InDaqCallback inCallback, OutDaqCallback outCallback) {
DEBUGTRACE_ENTER;
try {
std::unique_ptr<OutBufHandler> obh;
std::unique_ptr<InBufHandler> ibh;
StreamStatus status = _streamStatus;
status.isRunning = true;
_streamStatus = status;
if (nenoutchannels() > 0) {
assert(outCallback);
obh = std::make_unique<OutBufHandler>(*this, outCallback);
}
if (neninchannels() > 0) {
assert(inCallback);
ibh = std::make_unique<InBufHandler>(*this, inCallback);
}
if (obh)
obh->start();
if (ibh)
ibh->start();
const double sleeptime_s =
static_cast<double>(_nFramesPerBlock) / (16 * samplerate());
const us sleeptime_us = static_cast<us>(sleeptime_s * 1e6);
while (!_stopThread) {
if (ibh) {
if (!(*ibh)()) {
_stopThread = true;
break;
}
}
if (obh) {
if (!(*obh)()) {
_stopThread = true;
break;
}
} else {
std::this_thread::sleep_for(std::chrono::microseconds(sleeptime_us));
}
}
/// Update stream status that we are not running anymore
status.isRunning = false;
_streamStatus = status;
_stopThread = false;
} catch (StreamException &e) {
StreamStatus status = _streamStatus;
// Copy over error type
status.errorType = e.e;
_streamStatus = status;
/*
cerr << "\n******************\n";
cerr << "Catched error in UlDAQ thread: " << e.what() << endl;
cerr << "\n******************\n";
*/
}
}
#endif // LASP_HAS_ULDAQ

View File

@ -0,0 +1,154 @@
#pragma once
#include "lasp_daq.h"
#include <algorithm>
#include <cassert>
#include <chrono>
#include <iostream>
#include <stdexcept>
#include <thread>
#include <uldaq.h>
#include <vector>
using std::atomic;
using std::cerr;
using std::endl;
using rte = std::runtime_error;
/**
* @brief UlDaq-specific device information. Adds a copy of the underlying
* DaqDeDaqDeviceDescriptor.
*/
class UlDaqDeviceInfo : public DeviceInfo {
public:
DaqDeviceDescriptor _uldaqDescriptor;
virtual std::unique_ptr<DeviceInfo> clone() const {
return std::make_unique<UlDaqDeviceInfo>(*this);
}
};
class DT9837A : public Daq {
DaqDeviceHandle _handle = 0;
std::mutex _daqmutex;
std::thread _thread;
atomic<bool> _stopThread{false};
atomic<StreamStatus> _streamStatus;
const us _nFramesPerBlock;
void threadFcn(InDaqCallback inCallback, OutDaqCallback outcallback);
public:
DaqDeviceHandle getHandle() const { return _handle; }
/**
* @brief Create a DT9837A instance.
*
* @param devinfo DeviceInfo to connect to
* @param config DaqConfiguration settings
*/
DT9837A(const DeviceInfo &devinfo, const DaqConfiguration &config);
virtual ~DT9837A();
bool isRunning() const;
void stop() override final;
friend class InBufHandler;
friend class OutBufHandler;
virtual void start(InDaqCallback inCallback,
OutDaqCallback outCallback) override final;
virtual StreamStatus getStreamStatus() const override {
return _streamStatus;
}
};
/**
* @brief Helper class for managing input and output samples of the DAQ device.
*/
class BufHandler {
protected:
/**
* @brief Reference to underlying Daq
*/
DT9837A &daq;
/**
* @brief The type of data, in this case always double precision floats
*/
const DataTypeDescriptor dtype_descr = dtype_desc_fl64;
/**
* @brief The number of channels, number of frames per callback (block).
*/
us nchannels, nFramesPerBlock;
/**
* @brief Sampling frequency in Hz
*/
double samplerate;
std::vector<double> buf;
/**
* @brief Whether the top / bottom part of the buffer are ready to be
* enqueued
*/
bool topenqueued, botenqueued;
/**
* @brief Counter for the total number of frames acquired / sent since the
* start of the stream.
*/
us totalFramesCount = 0;
long long buffer_mid_idx;
public:
/**
* @brief Initialize bufhandler
*
* @param daq
* @param nchannels
*/
BufHandler(DT9837A &daq, const us nchannels)
: daq(daq), dtype_descr(daq.dtypeDescr()), nchannels(nchannels),
nFramesPerBlock(daq.framesPerBlock()), samplerate(daq.samplerate()),
buf(2 * nchannels *
nFramesPerBlock, // Watch the two here, the top and the bottom!
0),
buffer_mid_idx(nchannels * nFramesPerBlock) {
assert(nchannels > 0);
}
};
/**
* @brief Specific helper for the input buffer
*/
class InBufHandler : public BufHandler {
bool monitorOutput;
InDaqCallback cb;
public:
InBufHandler(DT9837A &daq, InDaqCallback cb);
void start();
/**
* @brief InBufHandler::operator()()
*
* @return true on success
*/
bool operator()();
~InBufHandler();
};
class OutBufHandler : public BufHandler {
OutDaqCallback cb;
public:
OutBufHandler(DT9837A &daq, OutDaqCallback cb);
void start();
/**
* @brief OutBufHandler::operator()
*
* @return true on success
*/
bool operator()();
~OutBufHandler();
};

View File

@ -7,6 +7,7 @@ set(lasp_dsp_files
lasp_window.cpp
lasp_fft.cpp
lasp_rtaps.cpp
lasp_rtsignalviewer.cpp
lasp_avpowerspectra.cpp
lasp_biquadbank.cpp
lasp_thread.cpp
@ -14,6 +15,7 @@ set(lasp_dsp_files
lasp_slm.cpp
lasp_threadedindatahandler.cpp
lasp_ppm.cpp
lasp_clip.cpp
)

View File

@ -41,6 +41,45 @@ SeriesBiquad::SeriesBiquad(const vd &filter_coefs) {
"Filter coefficients should have fourth element (a0) equal to 1.0");
}
}
SeriesBiquad SeriesBiquad::firstOrderHighPass(const d fs, const d cuton_Hz) {
if(fs <= 0) {
throw rte("Invalid sampling frequency: " + std::to_string(fs) + " [Hz]");
}
if(cuton_Hz <= 0) {
throw rte("Invalid cuton frequency: " + std::to_string(cuton_Hz) + " [Hz]");
}
if(cuton_Hz >= 0.98*fs/2) {
throw rte("Invalid cuton frequency. We limit this to 0.98* fs / 2. Given value" + std::to_string(cuton_Hz) + " [Hz]");
}
const d tau = 1/(2*arma::datum::pi*cuton_Hz);
const d facnum = 2*fs*tau/(1+2*fs*tau);
const d facden = (1-2*fs*tau)/(1+2*fs*tau);
vd coefs(6);
// b0
coefs(0) = facnum;
// b1
coefs(1) = -facnum;
// b2
coefs(2) = 0;
// a0
coefs(3) = 1;
// a1
coefs(4) = facden;
// a2
coefs(5) = 0;
return SeriesBiquad(coefs);
}
std::unique_ptr<Filter> SeriesBiquad::clone() const {
// sos.as_col() concatenates all columns, exactly what we want.
return std::make_unique<SeriesBiquad>(sos.as_col());

View File

@ -39,6 +39,17 @@ public:
virtual ~SeriesBiquad() override {}
void reset() override final;
std::unique_ptr<Filter> clone() const override final;
/**
* @brief Create a SeriesBiquad object for a first order high-pass filter
*
* @param fs Sampling frequency [Hz]
* @param cuton_Hz Cuton-frequency [Hz]
*
* @return SeriesBiquad object with a single biquad, corresponding to a first
* order high pass filter.
*/
static SeriesBiquad firstOrderHighPass(const d fs, const d cuton_Hz);
};
/**

View File

@ -0,0 +1,94 @@
/* #define DEBUGTRACE_ENABLED */
#include "debugtrace.hpp"
#include "lasp_clip.h"
#include <mutex>
using std::cerr;
using std::endl;
using Lck = std::scoped_lock<std::mutex>;
using rte = std::runtime_error;
ClipHandler::ClipHandler(StreamMgr &mgr)
: ThreadedInDataHandler(mgr){
DEBUGTRACE_ENTER;
}
bool ClipHandler::inCallback_threaded(const DaqData &d) {
DEBUGTRACE_ENTER;
Lck lck(_mtx);
dmat data = d.toFloat();
const us nchannels = d.nchannels;
assert(data.n_cols == nchannels);
if (nchannels != _clip_time.size()) {
DEBUGTRACE_PRINT("Resizing clip indication");
_clip_time = vd(nchannels, arma::fill::value(-1));
}
/// Obtain max abs values for each column, convert this row-vec to a column
/// vector, and scale with the max-range for each channel
vd maxabs = arma::max(arma::abs(data), 0).as_col() / _max_range;
/// Find indices for channels that have a clip
arma::uvec clips = maxabs > clip_point;
for (us i = 0; i < nchannels; i++) {
if (clips(i)) {
/// Reset clip counter
_clip_time(i) = 0;
} else if (_clip_time(i) > clip_indication_time) {
/// Reset to 'unclipped'
_clip_time(i) = -1;
} else if (_clip_time(i) >= 0) {
/// Add a bit of clip time
_clip_time(i) += _dt;
}
}
return true;
}
arma::uvec ClipHandler::getCurrentValue() const {
DEBUGTRACE_ENTER;
Lck lck(_mtx);
arma::uvec clips(_clip_time.size(), arma::fill::zeros);
clips.elem(arma::find(_clip_time >= 0)).fill(1);
return {clips};
}
void ClipHandler::reset(const Daq *daq) {
DEBUGTRACE_ENTER;
Lck lck(_mtx);
if (daq) {
const us nchannels = daq->neninchannels();
_max_range.resize(nchannels);
dvec ranges = daq->inputRangeForEnabledChannels();
assert(ranges.size() == nchannels);
for(us i=0;i<daq->neninchannels();i++) {
_max_range[i] = ranges[i];
}
_clip_time.fill(-1);
const d fs = daq->samplerate();
/* DEBUGTRACE_PRINT(fs); */
_dt = daq->framesPerBlock() / fs;
}
}
ClipHandler::~ClipHandler() {
DEBUGTRACE_ENTER;
Lck lck(_mtx);
stop();
}

77
src/lasp/dsp/lasp_clip.h Normal file
View File

@ -0,0 +1,77 @@
// lasp_clip.h
//
// Author: J.A. de Jong, C.R.D. Jansen - ASCEE
//
// Description: Clip handler
#pragma once
#include <memory>
#include "lasp_filter.h"
#include "lasp_mathtypes.h"
#include "lasp_threadedindatahandler.h"
/**
* \addtogroup dsp
* @{
*
* \addtogroup rt
* @{
*/
/**
* @brief Clipping detector (Clip). Detects when a signal overdrives the input
* */
class ClipHandler: public ThreadedInDataHandler {
/**
* @brief Assuming full scale of a signal is +/- 1.0. If a value is found
*/
static inline const d clip_point = 0.98;
/**
* @brief How long it takes in [s] after a clip event has happened, that we
* are actually still in 'clip' mode.
*/
static inline const d clip_indication_time = 2.0;
/**
* @brief Inverse of block sampling frequency [s]: (framesPerBlock/fs)
*/
d _dt;
mutable std::mutex _mtx;
/**
* @brief How long ago the last clip has happened. Negative in case no clip
* has happened.
*/
vd _clip_time;
/**
* @brief Storage for maximum values
*/
vd _max_range;
public:
/**
* @brief Constructs Clipping indicator
*
* @param mgr Stream Mgr to operate on
*/
ClipHandler(StreamMgr& mgr);
~ClipHandler();
/**
* @brief Get the current values of the clip handler. Returns a vector of of True's.
*
* @return clipping indication
*/
arma::uvec getCurrentValue() const;
bool inCallback_threaded(const DaqData& ) override final;
void reset(const Daq*) override final;
};
/** @} */
/** @} */

View File

@ -135,7 +135,7 @@ cmat Fft::fft(const dmat &freqdata) {
/// * WARNING *. This was source of a serious bug. It is not possible to run
/// FFT's and IFFT's on the same _impl, as it overwrites the same memory.
/// Uncommenting the line below results in faulty results.
/// #pragma omp parallel for
// #pragma omp parallel for
for (us colno = 0; colno < freqdata.n_cols; colno++) {
res.col(colno) = _impl->fft(freqdata.col(colno));
}
@ -152,7 +152,7 @@ dmat Fft::ifft(const cmat &freqdata) {
/// * WARNING *. This was source of a serious bug. It is not possible to run
/// FFT's and IFFT's on the same _impl, as it overwrites the same memory.
/// Uncommenting the line below results in faulty results.
/// #pragma omp parallel for
// #pragma omp parallel for
for (us colno = 0; colno < freqdata.n_cols; colno++) {
res.col(colno) = _impl->ifft(freqdata.col(colno));

View File

@ -84,7 +84,14 @@ class PPMHandler: public ThreadedInDataHandler {
*/
std::tuple<vd,arma::uvec> getCurrentValue() const;
bool inCallback_threaded(const DaqData& ) override final;
/**
* @brief Implements callback on thread
*
* @param d DaqData to work with
*
* @return true when stream should continue.
*/
bool inCallback_threaded(const DaqData& d) override final;
void reset(const Daq*) override final;
};

View File

@ -19,6 +19,10 @@
* @{
*/
/**
* @brief Real time spectral estimator using Welch method of spectral
* estimation.
*/
class RtAps : public ThreadedInDataHandler {
std::unique_ptr<Filter> _filterPrototype;
@ -42,7 +46,8 @@ public:
* @param mgr StreamMgr singleton reference
* @param freqWeightingFilter Optionally: the frequency weighting filter.
* Nullptr should be given for Z-weighting.
* @param For all other arguments, see constructor of AvPowerSpectra
*
* For all other arguments, see constructor of AvPowerSpectra
*/
RtAps(StreamMgr &mgr, const Filter *freqWeightingFilter, const us nfft = 2048,
const Window::WindowType w = Window::WindowType::Hann,
@ -57,7 +62,14 @@ public:
*/
ccube getCurrentValue() const;
bool inCallback_threaded(const DaqData &) override final;
/**
* @brief Implements the work to to when new DaqData arrives
*
* @param d DaqData to use for computing/updating spectra
*
* @return true if stream should continue.
*/
bool inCallback_threaded(const DaqData & d) override final;
void reset(const Daq *) override final;
};

View File

@ -0,0 +1,104 @@
/* #define DEBUGTRACE_ENABLED */
#include "debugtrace.hpp"
#include "lasp_rtsignalviewer.h"
#include <algorithm>
#include <mutex>
using std::cerr;
using std::endl;
using Lck = std::scoped_lock<std::mutex>;
using rte = std::runtime_error;
RtSignalViewer::RtSignalViewer(StreamMgr &mgr, const d approx_time_hist,
const us resolution, const us channel)
: ThreadedInDataHandler(mgr), _approx_time_hist(approx_time_hist),
_resolution(resolution), _channel(channel) {
DEBUGTRACE_ENTER;
if (approx_time_hist <= 0) {
throw rte("Invalid time history. Should be > 0");
}
if (resolution <= 1) {
throw rte("Invalid resolution. Should be > 1");
}
}
bool RtSignalViewer::inCallback_threaded(const DaqData &data) {
DEBUGTRACE_ENTER;
Lck lck(_sv_mtx);
/// Push new data in time buffer, scaled by sensitivity
_tb.push(data.toFloat(_channel) / _sens(_channel));
_dat.col(0) += _nsamples_per_point / _fs;
/// Required number of samples for a new 'point'
while (_tb.size() > _nsamples_per_point) {
// Roll forward time column
_dat.col(0) += _nsamples_per_point / _fs;
vd newvals = _tb.pop(_nsamples_per_point);
d newmax = newvals.max();
d newmin = newvals.min();
vd mincol = _dat.col(1);
vd maxcol = _dat.col(2);
_dat(arma::span(0, _resolution-2),1) = mincol(arma::span(1, _resolution-1));
_dat(arma::span(0, _resolution-2),2) = maxcol(arma::span(1, _resolution-1));
_dat(_resolution-1, 1) = newmin;
_dat(_resolution-1, 2) = newmax;
}
return true;
}
RtSignalViewer::~RtSignalViewer() {
Lck lck(_sv_mtx);
stop();
}
void RtSignalViewer::reset(const Daq *daq) {
DEBUGTRACE_ENTER;
Lck lck(_sv_mtx);
// Reset time buffer
_tb.reset();
if (daq) {
_fs = daq->samplerate();
/// Update sensitivity values
_sens.resize(daq->neninchannels());
us i = 0;
for (const auto &ch : daq->enabledInChannels()) {
_sens[i] = ch.sensitivity;
i++;
}
/// The number of samples per point, based on which minmax are computed
_nsamples_per_point =
std::max<us>(static_cast<us>(_approx_time_hist / _resolution * _fs), 1);
const d dt_points = _nsamples_per_point / _fs;
const d tend = dt_points * _resolution;
// Initialize data array
_dat.resize(_resolution, 3);
_dat.col(0) = arma::linspace(0, tend, _resolution);
_dat.col(1).zeros();
_dat.col(2).zeros();
} else {
_sens.zeros();
_fs = 0;
_dat.reset();
}
}
dmat RtSignalViewer::getCurrentValue() const {
DEBUGTRACE_ENTER;
Lck lck(_sv_mtx);
return _dat;
}

View File

@ -0,0 +1,93 @@
// lasp_threadedaps.h
//
// Author: J.A. de Jong - ASCEE
//
// Description: Real Time Signal Viewer.
#pragma once
#include "lasp_avpowerspectra.h"
#include "lasp_filter.h"
#include "lasp_mathtypes.h"
#include "lasp_threadedindatahandler.h"
#include "lasp_timebuffer.h"
#include <memory>
#include <mutex>
/**
* \addtogroup dsp
* @{
*
* \addtogroup rt
* @{
*/
/**
* @brief Real time signal viewer. Shows envelope of the signal based on amount
* of history shown.
*/
class RtSignalViewer : public ThreadedInDataHandler {
/**
* @brief Storage for sensitivity values
*/
vd _sens;
/**
* @brief Storage for time samples
*/
TimeBuffer _tb;
/**
* @brief The amount of time history to show
*/
const d _approx_time_hist;
/**
* @brief The number of points to show
*/
const us _resolution;
/**
* @brief The channel to show
*/
const us _channel;
us _nsamples_per_point;
d _fs;
dmat _dat;
/**
* @brief Mutex only for _signalviewer.
*/
mutable std::mutex _sv_mtx;
public:
/**
* @brief
*
* @param mgr Stream manager
* @param approx_time_hist The *approximate* time history to show. The exact
* time history will be calculated such that there fits an integer number of
* samples in a single 'point'.
* @param resolution Number of time points
* @param channel The channel number
*/
RtSignalViewer(StreamMgr &mgr, const d approx_time_hist, const us resolution,
const us channel);
~RtSignalViewer();
/**
* @brief Returns a 2D array, with:
* - first column: time instances.
* - second column: minimum value of signal
* - third column: maximum value of signal
*
*/
dmat getCurrentValue() const;
bool inCallback_threaded(const DaqData &) override final;
void reset(const Daq *) override final;
};
/** @} */
/** @} */

View File

@ -6,10 +6,10 @@
// Signal generators implementation
//////////////////////////////////////////////////////////////////////
/* #define DEBUGTRACE_ENABLED */
#include "debugtrace.hpp"
#include "lasp_siggen_impl.h"
#include "debugtrace.hpp"
#include "lasp_mathtypes.h"
#include <cassert>
using rte = std::runtime_error;
@ -57,17 +57,20 @@ vd Periodic::genSignalUnscaled(const us nframes) {
Sweep::Sweep(const d fl, const d fu, const d Ts, const d Tq, const us flags)
: fl_(fl), fu_(fu), Ts(Ts), Tq(Tq), flags(flags) {
if (fl <= 0 || fu < fl || Ts <= 0) {
throw std::runtime_error("Invalid sweep parameters");
throw rte("Invalid sweep parameters");
}
if ((flags & ForwardSweep) && (flags & BackwardSweep)) {
throw std::runtime_error(
throw rte(
"Both forward and backward sweep flag set. Please only set either one "
"or none for a continuous sweep");
}
if ((flags & LinearSweep) && (flags & LogSweep)) {
throw std::runtime_error(
throw rte(
"Both logsweep and linear sweep flag set. Please only set either one.");
}
if (!((flags & LinearSweep) || (flags & LogSweep))) {
throw rte("Either LinearSweep or LogSweep should be given as flag");
}
}
void Sweep::resetImpl() {
@ -107,15 +110,15 @@ void Sweep::resetImpl() {
if (forward_sweep || backward_sweep) {
/* Forward or backward sweep */
/* TRACE(15, "Forward or backward sweep"); */
us K = (us)(Dt * (fl * N + 0.5 * (N - 1) * (fu - fl)));
d eps_num = ((d)K) / Dt - fl * N - 0.5 * (N - 1) * (fu - fl);
d eps = eps_num / (0.5 * (N - 1));
us K = (us)(Dt * (fl * Ns + 0.5 * (Ns - 1) * (fu - fl)));
d eps_num = ((d)K) / Dt - fl * Ns - 0.5 * (Ns - 1) * (fu - fl);
d eps = eps_num / (0.5 * (Ns - 1));
/* iVARTRACE(15, K); */
/* dVARTRACE(15, eps); */
for (us n = 0; n < Ns; n++) {
_signal(n) = d_sin(phase);
d fn = fl + ((d)n) / N * (fu + eps - fl);
d fn = fl + ((d)n) / Ns * (fu + eps - fl);
phase += 2 * arma::datum::pi * Dt * fn;
}
} else {
@ -169,14 +172,14 @@ void Sweep::resetImpl() {
/* Forward or backward sweep */
DEBUGTRACE_PRINT("Forward or backward sweep");
d k1 = (fu / fl);
us K = (us)(Dt * fl * (k1 - 1) / (d_pow(k1, 1.0 / N) - 1));
us K = (us)(Dt * fl * (k1 - 1) / (d_pow(k1, 1.0 / Ns) - 1));
d k = k1;
/* Iterate k to the right solution */
d E;
for (us iter = 0; iter < 10; iter++) {
E = 1 + K / (Dt * fl) * (d_pow(k, 1.0 / N) - 1) - k;
d dEdk = K / (Dt * fl) * d_pow(k, 1.0 / N) / (N * k) - 1;
E = 1 + K / (Dt * fl) * (d_pow(k, 1.0 / Ns) - 1) - k;
d dEdk = K / (Dt * fl) * d_pow(k, 1.0 / Ns) / (Ns * k) - 1;
k -= E / dEdk;
}
@ -187,15 +190,15 @@ void Sweep::resetImpl() {
for (us n = 0; n < Ns; n++) {
_signal[n] = d_sin(phase);
d fn = fl * d_pow(k, ((d)n) / N);
d fn = fl * d_pow(k, ((d)n) / Ns);
phase += 2 * number_pi * Dt * fn;
}
} else {
DEBUGTRACE_PRINT("Continuous sweep");
const us Nf = N / 2;
const us Nb = N - Nf;
const us Nf = Ns / 2;
const us Nb = Ns - Nf;
const d k1 = (fu / fl);
const d phif1 =
2 * number_pi * Dt * fl * (k1 - 1) / (d_pow(k1, 1.0 / Nf) - 1);
@ -224,7 +227,6 @@ void Sweep::resetImpl() {
/* Iterate! */
k -= E / dEdk;
}
DEBUGTRACE_PRINT(K);
@ -254,5 +256,10 @@ void Sweep::resetImpl() {
/* This should be a very small number!! */
DEBUGTRACE_PRINT(phase);
}
} // End of log sweep
else {
// Either log or linear sweep had to be given as flags.
assert(false);
}
}

View File

@ -64,6 +64,12 @@ class Periodic: public Siggen {
vd _signal { 1, arma::fill::zeros};
us _cur_pos = 0;
public:
/**
* @brief Return copy of the generated sequence.
*
* @return As stated above
*/
vd getSequence() const { return _signal; }
virtual vd genSignalUnscaled(const us nframes) override final;
~Periodic() = default;

View File

@ -35,6 +35,7 @@ SLM::SLM(const d fs, const d Lref, const us downsampling_fac, const d tau,
{
DEBUGTRACE_ENTER;
DEBUGTRACE_PRINT(_alpha);
// Make sure thread pool is running
getPool();
@ -42,7 +43,7 @@ SLM::SLM(const d fs, const d Lref, const us downsampling_fac, const d tau,
if (Lref <= 0) {
throw rte("Invalid reference level");
}
if (tau <= 0) {
if (tau < 0) {
throw rte("Invalid time constant for Single pole lowpass filter");
}
if (fs <= 0) {
@ -67,8 +68,8 @@ std::vector<unique_ptr<Filter>> createBandPass(const dmat &coefs) {
return bf;
}
us SLM::suggestedDownSamplingFac(const d fs, const d tau) {
if(tau<0) throw rte("Invalid time weighting time constant");
if(fs<=0) throw rte("Invalid sampling frequency");
if (fs <= 0)
throw rte("Invalid sampling frequency");
// A reasonable 'framerate' for the sound level meter, based on the
// filtering time constant.
if (tau > 0) {
@ -170,8 +171,8 @@ dmat SLM::run(const vd &input_orig) {
/* DEBUGTRACE_PRINT(res.n_rows); */
/* DEBUGTRACE_PRINT(futs.size()); */
// Update the total number of samples harvested so far. NOTE: *This should be
// done AFTER the threads are done!!!*
// Update the total number of samples harvested so far. NOTE: *This should
// be done AFTER the threads are done!!!*
}
N += input.n_rows;

View File

@ -1,5 +1,4 @@
#pragma once
#include <boost/lockfree/spsc_queue.hpp>
#include "lasp_streammgr.h"
const us RINGBUFFER_SIZE = 1024;
@ -52,11 +51,11 @@ class ThreadedInDataHandler: public InDataHandler {
* @brief This function should be overridden with an actual implementation,
* of what should happen on a different thread.
*
* @param DaqData Input daq data
* @param d Input daq data
*
* @return true on succes. False when an error occured.
*/
virtual bool inCallback_threaded(const DaqData&) = 0;
virtual bool inCallback_threaded(const DaqData& d) = 0;
};

View File

@ -85,8 +85,8 @@ class FilterBankDesigner:
if self.nominal_txt(x) == nom_txt:
return x
raise ValueError(
f'Could not find a nominal frequency corresponding to {nom_txt}. Hint: use \'5k\' instead of \'5000\'.'
)
f'Could not find a nominal frequency corresponding to {nom_txt}. '
'Hint: use \'5k\' instead of \'5000\'.')
def sanitize_input(self, input_):
if isinstance(input_, int):
@ -172,7 +172,6 @@ class FilterBankDesigner:
Args:
x: Band designator
"""
SOS_ORDER = 5
fs = self.fs
@ -186,6 +185,18 @@ class FilterBankDesigner:
x = self.sanitize_input(x)
fu_n = fu / fnyq
# Handle fl & fu >= fNyquist: return 'no pass'
if fl_n >= 1:
return np.tile(np.asarray([0, 0, 0, 1, 0, 0]), (SOS_ORDER, 1))
# Handle fu >= fNyquist: return high pass
if fu_n >= 1:
highpass = butter(SOS_ORDER, fl_n, output='sos', btype='highpass')
allpass = np.tile(np.asarray([1, 0, 0, 1, 0, 0]),
(SOS_ORDER-highpass.shape[0], 1))
return np.vstack((highpass, allpass)) # same shape=(SOS_ORDER, 6)
# Regular bands
return butter(SOS_ORDER, [fl_n, fu_n], output='sos', btype='band')
def firFreqResponse(self, x, freq):
@ -254,8 +265,8 @@ class FilterBankDesigner:
for i, x in enumerate(range(xl, xu + 1)):
fl = self.fl(x)
fu = self.fu(x)
# Find the indices in the frequency array which correspond to the
# frequency band x
# Find the indices in the frequency array which correspond to
# the frequency band x
if x != xu:
indices_cur = np.where((freq >= fl) & (freq < fu))
else:
@ -330,20 +341,20 @@ class OctaveBankDesigner(FilterBankDesigner):
@property
def xs(self):
"""All possible band designators for an octave band filter."""
return list(range(-6, 5))
return list(range(-7, 5))
def band_limits(self, x, filter_class=0):
"""Returns the octave band filter limits for filter designator x.
Args:
x: Filter offset power from the reference frequency of 1000 Hz.
filter_class: Either 0 or 1, defines the tolerances on the frequency
response
filter_class: Either 0 or 1, defines the tolerances on the
frequency response
Returns:
freq, llim, ulim: Tuple of Numpy arrays containing the frequencies of
the corner points of the filter frequency response limits, lower limits
in *deciBell*, upper limits in *deciBell*, respectively.
freq, llim, ulim: Tuple of Numpy arrays containing the frequencies
of the corner points of the filter frequency response limits, lower
limits in *deciBell*, upper limits in *deciBell*, respectively.
"""
b = 1
@ -393,7 +404,8 @@ class OctaveBankDesigner(FilterBankDesigner):
-3: '125',
-4: '63',
-5: '31.5',
-6: '16'
-6: '16',
-7: '8'
}
assert len(nominals) == len(self.xs)
return nominals[x]
@ -450,8 +462,7 @@ class OctaveBankDesigner(FilterBankDesigner):
if int(self.fs) in [44100, 48000, 96000]:
return 1.0
else:
raise ValueError('Unimplemented sampling frequency for SOS'
'filter design')
return 1.0
def sosFac_u(self, x):
"""Right side percentage of change in cut-on frequency for designing
@ -463,8 +474,7 @@ class OctaveBankDesigner(FilterBankDesigner):
if int(self.fs) in [44100, 48000, 96000]:
return 1.0
else:
raise ValueError('Unimplemented sampling frequency for SOS'
'filter design')
return 1.0
class ThirdOctaveBankDesigner(FilterBankDesigner):
@ -475,18 +485,16 @@ class ThirdOctaveBankDesigner(FilterBankDesigner):
one-third octave bands
Args:
fs: Sampling frequency in [Hz]
fs: Sampling frequency in [Hz] - can be None
"""
super().__init__(fs)
self.xs = list(range(-16, 14))
self.xs = list(range(-19, 14))
# Text corresponding to the nominal frequency
self._nominal_txt = [
self._nominal_txt = ['12.5', '16', '20',
'25', '31.5', '40', '50', '63', '80', '100', '125', '160', '200',
'250', '315', '400', '500', '630', '800', '1k', '1.25k', '1.6k',
'2k', '2.5k', '3.15k', '4k', '5k', '6.3k', '8k', '10k', '12.5k',
'16k', '20k'
]
'16k', '20k']
assert len(self.xs) == len(self._nominal_txt)
@ -511,13 +519,13 @@ class ThirdOctaveBankDesigner(FilterBankDesigner):
Args:
x: Filter offset power from the reference frequency of 1000 Hz.
filter_class: Either 0 or 1, defines the tolerances on the frequency
response
filter_class: Either 0 or 1, defines the tolerances on the
frequency response
Returns:
freq, llim, ulim: Tuple of Numpy arrays containing the frequencies of
the corner points of the filter frequency response limits, lower limits
in *deciBell*, upper limits in *deciBell*, respectively.
freq, llim, ulim: Tuple of Numpy arrays containing the frequencies
of the corner points of the filter frequency response limits, lower
limits in *deciBell*, upper limits in *deciBell*, respectively.
"""
fm = self.G**(x / self.b) * self.fr
@ -603,25 +611,15 @@ class ThirdOctaveBankDesigner(FilterBankDesigner):
"""Left side percentage of change in cut-on frequency for designing the
filter."""
# Idea: correct for frequency warping:
if np.isclose(self.fs, 48000):
return 1.00
elif np.isclose(self.fs, 41000):
warnings.warn(
f'Frequency {self.fs} might not result in correct filters')
return 1.00
elif np.isclose(self.fs, 32768):
return 1.00
if int(self.fs) in [48000]:
return 1.0
else:
raise ValueError('Unimplemented sampling frequency for SOS'
'filter design')
return 1.0
def sosFac_u(self, x):
"""Right side percentage of change in cut-on frequency for designing
the filter."""
if np.isclose(self.fs, 48000):
return 1
elif np.isclose(self.fs, 32768):
return 1.00
if int(self.fs) in [48000]:
return 1.0
else:
raise ValueError('Unimplemented sampling frequency for SOS'
'filter design')
return 1.0

View File

@ -73,15 +73,21 @@ class Qty:
return f'{self.name} [{self.unit_symb}]'
def __eq__(self, other):
# logging.debug('eq()')
"""
Comparison breaks for the other objects, level unit, level ref name,
etc as these are tuples / a single string.
"""
# logging.debug(f'eq() {self.name} {other.name}')
return (self.name == other.name and
self.unit_name == other.unit_name)
def toInt(self):
"""
Convert quantity to its specific enum integer value
"""
return self.cpp_enum.value
@unique
@ -147,10 +153,13 @@ class CalSetting:
qty: Qty
class CalibrationSettings:
one = CalSetting('94 dB SPL', 94.0 , 1.0, SIQtys.AP)
two = CalSetting('114 dB SPL', 114.0 , 10.0, SIQtys.AP)
one = CalSetting('94 dB SPL', 94.0 , 10**(94/20)*2e-5, SIQtys.AP.value)
two = CalSetting('1 Pa rms', 93.98, 1.0, SIQtys.AP.value)
three = CalSetting('114 dB SPL', 114.0 , 10**(114/20)*2e-5, SIQtys.AP.value)
four = CalSetting('10 Pa rms', 113.98, 10.0, SIQtys.AP.value)
five = CalSetting('93.7 dB SPL', 93.7 , 1.0, SIQtys.AP.value)
types = (one, two)
types = (one, two, three, four, five)
default = one
default_index = 0
@ -277,46 +286,57 @@ class this_lasp_shelve(Shelve):
return os.path.join(lasp_appdir, f'{node}_config.shelve')
class TimeWeighting:
none = (-1, 'Raw (no time weighting)')
# This is not actually a time weighting
none = (0, 'Raw (no time weighting)')
uufast = (1e-4, '0.1 ms')
ufast = (35e-3, 'Impulse (35 ms)')
fast = (0.125, 'Fast (0.125 s)')
slow = (1.0, 'Slow (1.0 s)')
tens = (10., '10 s')
infinite = (0, 'Infinite')
types_realtime = (ufast, fast, slow, tens, infinite)
types_all = (none, uufast, ufast, fast, slow, tens, infinite)
# All-averaging: compute the current average of all history. Kind of the
# equivalent level. Only useful for power spectra. For Sound Level Meter,
# equivalent levels does that thing.
averaging = (-1, 'All-averaging')
types_all = (none, uufast, ufast, fast, slow, tens, averaging)
# Used for combobox of realt time power spectra
types_realtimeaps = (ufast, fast, slow, tens, averaging)
# Used for combobox of realt time sound level meter
types_realtimeslm = (ufast, fast, slow, tens)
# Used for combobox of sound level meter - time dependent
types_slmt = (none, uufast, ufast, fast, slow, tens)
# Used for combobox of sound level meter - Lmax statistics
types_slmstats = (uufast, ufast, fast, slow, tens)
default = fast
default_index = 3
default_index_realtime = 1
@staticmethod
def fillComboBox(cb, realtime=False):
def fillComboBox(cb, types):
"""
Fill TimeWeightings to a combobox
Args:
cb: QComboBox to fill
types: The types to fill it with
"""
cb.clear()
if realtime:
types = TimeWeighting.types_realtime
defindex = TimeWeighting.default_index_realtime
else:
types = TimeWeighting.types_all
defindex = TimeWeighting.default_index
logging.debug(f'{types}')
for tw in types:
cb.addItem(tw[1], tw)
cb.setCurrentIndex(defindex)
if TimeWeighting.default == tw:
cb.setCurrentIndex(cb.count()-1)
@staticmethod
def getCurrent(cb):
if cb.count() == len(TimeWeighting.types_realtime):
return TimeWeighting.types_realtime[cb.currentIndex()]
else:
return TimeWeighting.types_all[cb.currentIndex()]
for type in TimeWeighting.types_all:
if cb.currentText() == type[1]:
return type
class FreqWeighting:

View File

@ -8,14 +8,23 @@
* Welcome to the LASP (Library for Acoustic Signal Processing) code
* documentation. The code comprises a part which is written in C++, a part
* that is written in Python, and a part that functions as glue, which is
* Pybind11 C++ glue code. An example of such a file is the current one.
* Pybind11 C++ glue code. An example of such a file is lasp_cpp.cpp.
* This is the internal documentation of LASP. It serves as background
* information for programmers.
*
* \section Installation
*
* For the installation manual, please refer to the README.md of the Git
* repository. It is recommended to install the software from source.
* For the installation manual, please refer to the <a
* href="https://code.ascee.nl/ASCEE/lasp/src/branch/master/README.md">README</a>
* of the Git repository.
*
*
* \section Usage
*
* Some usage examples are given in the <a href=
* "https://code.ascee.nl/ASCEE/lasp/src/branch/master/examples">examples</a>
* directory of the repository.
*
* */
/**
@ -48,7 +57,6 @@ PYBIND11_MODULE(lasp_cpp, m) {
m.attr("__version__") = std::to_string(LASP_VERSION_MAJOR) + "." +
std::to_string(LASP_VERSION_MINOR);
m.attr("LASP_VERSION_MAJOR") = LASP_VERSION_MAJOR;
m.attr("LASP_VERSION_MINOR") = LASP_VERSION_MINOR;
}

View File

@ -92,6 +92,18 @@ class DaqConfigurations:
output_config = DaqConfiguration.fromTOML(config_str[2])
return DaqConfigurations(duplex_mode, input_config, output_config)
@staticmethod
def loadRaw():
"""
Returns configurations presets in the raw form they are stored.
Returns:
all configurations, raw
"""
with lasp_shelve() as sh:
configs_raw = sh.load(f"daqconfigs_v{LASP_VERSION_MAJOR}", {})
return configs_raw
def save(self, name: str):
"""
Save the current set of configurations to the shelve store.
@ -109,6 +121,17 @@ class DaqConfigurations:
configs_str[name] = [self.duplex_mode, input_str, output_str]
sh.store(f"daqconfigs_v{LASP_VERSION_MAJOR}", configs_str)
@staticmethod
def saveRaw(configs_raw):
"""
Save configurations presets, using already formatted data
Arg:
all configurations, raw data format in form they are stored
"""
with lasp_shelve() as sh:
sh.store(f"daqconfigs_v{LASP_VERSION_MAJOR}", configs_raw)
@staticmethod
def delete(name: str):
"""

View File

@ -50,7 +50,8 @@ from .lasp_config import LASP_NUMPY_FLOAT_TYPE
from scipy.io import wavfile
import os, time, wave, logging
from .lasp_common import SIQtys, Qty, getFreq
from .lasp_cpp import Window, DaqChannel, LASP_VERSION_MAJOR
from .lasp_cpp import Window, DaqChannel, LASP_VERSION_MAJOR, AvPowerSpectra
from typing import List
def getSampWidth(dtype):
@ -195,7 +196,7 @@ class Measurement:
# Open the h5 file in read-plus mode, to allow for changing the
# measurement comment.
with h5.File(fn, 'r+') as f:
with h5.File(fn, 'r') as f:
# Check for video data
try:
f['video']
@ -233,10 +234,9 @@ class Measurement:
self._channelNames = [f'Unnamed {i}' for i in range(self.nchannels)]
# comment = read-write thing
try:
if 'comment' in f.keys():
self._comment = f.attrs['comment']
except KeyError:
f.attrs['comment'] = ''
else:
self._comment = ''
# Sensitivity
@ -250,7 +250,13 @@ class Measurement:
self._time = f.attrs['time']
# Quantity stored as channel.
self._qtys = None
try:
qtys_enum_idx = f.attrs['qtys_enum_idx']
self._qtys = [SIQtys.fromInt(idx) for idx in qtys_enum_idx]
except KeyError:
try:
qtys_json = f.attrs['qtys']
# Load quantity data
@ -260,18 +266,11 @@ class Measurement:
# measurement file.
pass
try:
qtys_enum_idx = f.attrs['qtys_enum_idx']
self._qtys = [SIQtys.fromInt(idx) for idx in qtys_enum_idx]
except KeyError:
pass
if self._qtys is None:
self._qtys = [SIQtys.default() for i in range(self.nchannels)]
logging.debug(f'Physical quantity data not available in measurement file. Assuming {SIQtys.default}')
logging.debug(f'Physical quantity data not available in measurement file. Assuming {SIQtys.default}')
def setAttribute(self, atrname, value):
"""
Set an attribute in the measurement file, and keep a local copy in
@ -311,14 +310,14 @@ class Measurement:
return chcfg
@channelConfig.setter
def channelConfig(self, chcfg):
def channelConfig(self, chcfg: List[DaqChannel]):
chname = []
sens = []
qtys = []
for ch in chcfg:
chname.append(ch.name)
sens.append(ch.sensitivity)
qtys.append(SIQtys.fromInt(ch.qty))
qtys.append(SIQtys.fromCppEnum(ch.qty))
self.channelNames = chname
self.sensitivity = sens
@ -332,11 +331,14 @@ class Measurement:
def qtys(self, newqtys):
if not len(newqtys) == len(self._qtys):
raise ValueError('Invalid number of quantities')
qtys_json = [qty.to_json() for qty in newqtys]
qtys_int = [qty.toInt() for qty in newqtys]
# Use setAttribute here, but thos store the jsonified version as well,
# which we have to overwrite again with the deserialized ones. This is
# actually not a very nice way of coding.
self.setAttribute('qtys', qtys_json)
with self.file('r+') as f:
# Update comment attribute in the file
f.attrs['qtys_enum_idx'] = qtys_int
self._qtys = newqtys
@contextmanager
@ -477,20 +479,20 @@ class Measurement:
"""
nfft = kwargs.pop('nfft', 2048)
window = kwargs.pop('window', Window.hann)
window = kwargs.pop('windowType', Window.WindowType.Hann)
overlap = kwargs.pop('overlap', 50.0)
if channels is None:
channels = list(range(self.nchannels))
nchannels = len(channels)
aps = AvPowerSpectra(nfft, nchannels, overlap, window)
aps = AvPowerSpectra(nfft, window, overlap)
freq = getFreq(self.samplerate, nfft)
for data in self.iterData(channels, **kwargs):
CS = aps.addTimeData(data)
CS = aps.compute(data)
return freq, CS
return freq, aps.get_est()
def periodicAverage(self, N, channels=None, noiseCorrection=True, **kwargs):
"""

View File

@ -0,0 +1,66 @@
"""
Provides class MeasurementSet, a class used to perform checks and adjustments
on a group of measurements at the same time.
"""
__all__ = ['MeasurementSet']
from .lasp_measurement import Measurement
from typing import List
class MeasurementSet(list):
"""
Group of measurements that have some correspondence to one another. Class
is used to operate on multiple measurements at once.
"""
def __init__(self, mlist: List[Measurement] =[]):
"""
Initialize a measurement set
Args:
mlist: Measurement list
"""
if any([not isinstance(i, Measurement) for i in mlist]):
raise TypeError('Object in list should be of Measurement type')
super().__init__(mlist)
def measTimeSame(self):
"""
Returns True if all measurements have the same measurement
time (recorded time)
"""
if len(self) > 0:
first = self[0].N
return all([first == meas.N for meas in self])
else:
return False
def measSimilar(self):
"""
Similar means: channel metadata is the same, and the measurement time
is the same. It means that the recorded data is, of course, different.
Returns:
True if measChannelsSame() and measTimeSame() else False
"""
return self.measTimeSame() and self.measChannelsSame()
def measChannelsSame(self):
"""
This method is used to check whether a set of measurements can be
accessed in a loop, i.e. for computing power spectra or sound levels on
a set of measurements, simultaneously. If the channel data is the same
(name, sensitivity, ...) it returns True.
"""
if len(self) > 0:
first = self[0].channelConfig
return all([first == meas.channelConfig for meas in self])
else:
return False

View File

@ -266,6 +266,8 @@ class SosFilterBank:
return {}
filtered_data = self._fb.filter(data)
if filtered_data.ndim == 1:
filtered_data = filtered_data[:, None]
# Output given as a dictionary with nom_txt as the key
output = {}

View File

@ -135,7 +135,8 @@ class Recording:
f.attrs["channelNames"] = [ch.name for ch in in_ch]
# Add the start delay here, as firstFrames() is called right after the
# constructor is called.
# constructor is called. time.time() returns a floating point
# number of seconds after epoch.
f.attrs["time"] = time.time() + self.startDelay
# In V2, we do not store JSON metadata anymore, but just an enumeration
@ -143,6 +144,7 @@ class Recording:
f.attrs["qtys_enum_idx"] = [ch.qty.value for ch in in_ch]
# Measured physical quantity metadata
# This was how it was in LASP version < 1.0
# f.attrs['qtys'] = [ch.qty.to_json() for ch in in_ch]
def firstFrames(self, adata):
@ -174,18 +176,15 @@ class Recording:
def inCallback(self, adata):
"""
This method should be called to grab data from the input queue, which
is filled by the stream, and put it into a file. It should be called at
a regular interval to prevent overflowing of the queue. It is called
within the start() method of the recording, if block is set to True.
Otherwise, it should be called from its parent at regular intervals.
For example, in Qt this can be done using a QTimer.
This method is called when a block of audio data from the stream is
available. It should return either True or False.
When returning False, it will stop the stream.
"""
if self.stop():
# Stop flag is raised. We do not add any data anymore.
return
return True
with self.file_mtx:

View File

@ -27,7 +27,6 @@ class SLM:
Multi-channel Sound Level Meter. Input data: time data with a certain
sampling frequency. Output: time-weighted (fast/slow) sound pressure
levels in dB(A/C/Z). Possibly in octave bands.
"""
def __init__(self,
@ -38,7 +37,8 @@ class SLM:
xmin=None,
xmax=None,
include_overall=True,
level_ref_value=P_REF):
level_ref_value=P_REF,
offset_t=0):
"""
Initialize a sound level meter object.
@ -55,7 +55,7 @@ class SLM:
include_overall: If true, a non-functioning filter is added which
is used to compute the overall level.
level_ref_value: Reference value for computing the levels in dB
offset_t: Offset to be added to output time data [s]
"""
self.fbdesigner = fbdesigner
@ -72,8 +72,10 @@ class SLM:
self.xs = list(range(xmin, xmax + 1))
nfilters = len(self.xs)
if include_overall: nfilters +=1
if include_overall:
nfilters += 1
self.include_overall = include_overall
self.offset_t = offset_t
spld = SPLFilterDesigner(fs)
if fw == FreqWeighting.A:
@ -136,7 +138,6 @@ class SLM:
# Initialize counter to 0
self.N = 0
def run(self, data):
"""
Add new fresh timedata to the Sound Level Meter
@ -153,7 +154,7 @@ class SLM:
Ncur = levels.shape[0]
tend = tstart + Ncur / self.fs_slm
t = np.linspace(tstart, tend, Ncur, endpoint=False)
t = np.linspace(tstart, tend, Ncur, endpoint=False) + self.offset_t
self.N += Ncur
output = {}
@ -168,7 +169,6 @@ class SLM:
return output
def return_as_dict(self, dat):
"""
Helper function used to return resulting data in a proper way.

View File

@ -25,18 +25,17 @@ void init_daq(py::module &m) {
.value("driverError", Daq::StreamStatus::StreamError::driverError)
.value("systemError", Daq::StreamStatus::StreamError::systemError)
.value("threadError", Daq::StreamStatus::StreamError::threadError)
.value("logicError", Daq::StreamStatus::StreamError::logicError)
.value("apiSpecificError",
Daq::StreamStatus::StreamError::apiSpecificError);
.value("logicError", Daq::StreamStatus::StreamError::logicError);
ss.def("errorMsg", &Daq::StreamStatus::errorMsg);
/// Daq
daq.def("neninchannels", &Daq::neninchannels, py::arg("include_monitor") = true);
daq.def("neninchannels", &Daq::neninchannels,
py::arg("include_monitor") = true);
daq.def("nenoutchannels", &Daq::nenoutchannels);
daq.def("samplerate", &Daq::samplerate);
daq.def("dataType", &Daq::dataType);
daq.def("framesPerBlock", &Daq::framesPerBlock);
daq.def("getStreamStatus", &Daq::getStreamStatus);
}

View File

@ -1,4 +1,3 @@
#include <pybind11/numpy.h>
#include <pybind11/stl.h>
#include <stdint.h>
#include "lasp_deviceinfo.h"
@ -36,5 +35,7 @@ void init_deviceinfo(py::module& m) {
devinfo.def_readonly("hasInputACCouplingSwitch",
&DeviceInfo::hasInputACCouplingSwitch);
devinfo.def_readonly("physicalOutputQty", &DeviceInfo::physicalOutputQty);
}

View File

@ -1,11 +1,13 @@
#include <armadillo>
#define DEBUGTRACE_ENABLED
/* #define DEBUGTRACE_ENABLED */
#include "arma_npy.h"
#include "debugtrace.hpp"
#include "lasp_ppm.h"
#include "lasp_clip.h"
#include "lasp_rtaps.h"
#include "lasp_rtsignalviewer.h"
#include "lasp_streammgr.h"
#include "lasp_threadedindatahandler.h"
#include <armadillo>
#include <atomic>
#include <chrono>
#include <pybind11/pybind11.h>
@ -185,13 +187,23 @@ void init_datahandler(py::module &m) {
ppm.def(py::init<StreamMgr &>());
ppm.def("getCurrentValue", [](const PPMHandler &ppm) {
std::tuple<vd, arma::uvec> tp = ppm.getCurrentValue();
return py::make_tuple(ColToNpy<d>(std::get<0>(tp)),
ColToNpy<arma::uword>(std::get<1>(tp)));
});
/// Clip Detector
py::class_<ClipHandler, ThreadedInDataHandler> clip(m, "ClipHandler");
clip.def(py::init<StreamMgr &>());
clip.def("getCurrentValue", [](const ClipHandler &clip) {
arma::uvec cval = clip.getCurrentValue();
return ColToNpy<arma::uword>(cval); // something goes wrong here
});
/// Real time Aps
///
py::class_<RtAps, ThreadedInDataHandler> rtaps(m, "RtAps");
@ -218,4 +230,18 @@ void init_datahandler(py::module &m) {
ccube val = rt.getCurrentValue();
return CubeToNpy<c>(val);
});
/// Real time Signal Viewer
///
py::class_<RtSignalViewer, ThreadedInDataHandler> rtsv(m, "RtSignalViewer");
rtsv.def(py::init<StreamMgr &, // StreamMgr
const d, // Time history
const us, // Resolution
const us // Channel number
>());
rtsv.def("getCurrentValue", [](RtSignalViewer &rt) {
dmat val = rt.getCurrentValue();
return MatToNpy<d>(val);
});
}

View File

@ -1,10 +1,6 @@
#include "lasp_avpowerspectra.h"
#include "lasp_biquadbank.h"
#include "lasp_fft.h"
#include "lasp_siggen.h"
#include "arma_npy.h"
#include "lasp_siggen_impl.h"
#include "lasp_slm.h"
#include "lasp_window.h"
#include <iostream>
#include <pybind11/pybind11.h>
@ -31,7 +27,9 @@ void init_siggen(py::module &m) {
siggen.def("setDCOffset", &Siggen::setDCOffset);
siggen.def("setMute", &Siggen::setMute);
siggen.def("setLevel", &Siggen::setLevel);
siggen.def("setLevel", &Siggen::setLevel, py::arg("newLevel"), py::arg("dB") = true);
siggen.def("setLevel", &Siggen::setLevel, py::arg("newLevel"),
py::arg("dB") = true);
siggen.def("reset", &Siggen::reset);
siggen.def("setFilter", &Siggen::setFilter);
@ -42,7 +40,10 @@ void init_siggen(py::module &m) {
sine.def(py::init<const d>());
sine.def("setFreq", &Sine::setFreq);
py::class_<Periodic, std::shared_ptr<Periodic>> periodic(m, "Periodic", siggen);
py::class_<Periodic, std::shared_ptr<Periodic>> periodic(m, "Periodic",
siggen);
periodic.def("getSequence",
[](const Sweep &s) { return ColToNpy<d>(s.getSequence()); });
py::class_<Sweep, std::shared_ptr<Sweep>> sweep(m, "Sweep", periodic);
sweep.def(py::init<const d, const d, const d, const d, const us>());
@ -50,6 +51,5 @@ void init_siggen(py::module &m) {
sweep.def_readonly_static("BackwardSweep", &Sweep::BackwardSweep);
sweep.def_readonly_static("LinearSweep", &Sweep::LinearSweep);
sweep.def_readonly_static("LogSweep", &Sweep::LogSweep);
}
/** @} */

View File

@ -79,6 +79,70 @@ class SmoothingType:
# TO DO: add possibility to insert data that is not lin spaced in frequency
def smoothCalcMatrix(freq, sw: SmoothingWidth):
"""
Args:
freq: array of frequencies of data points [Hz] - equally spaced
sw: SmoothingWidth
Returns:
freq: array frequencies of data points [Hz]
Q: matrix to smooth power: {fsm} = [Q] * {fraw}
Warning: this method does not work on levels (dB)
"""
# Settings
tr = 2 # truncate window after 2x std; shorter is faster and less accurate
Noct = sw.value[0]
assert Noct > 0, "'Noct' must be absolute positive"
if Noct < 1:
raise Warning('Check if \'Noct\' is entered correctly')
# Initialize
L = len(freq)
Q = np.zeros(shape=(L, L), dtype=np.float16) # float16: keep size small
x0 = 1 if freq[0] == 0 else 0 # Skip first data point if zero frequency
# Loop over indices of raw frequency vector
for x in range(x0, L):
# Find indices of data points to calculate current (smoothed) magnitude
#
# Indices beyond [0, L] point to non-existing data. Beyond 0 does not
# occur in this implementation. Beyond L occurs when the smoothing
# window nears the end of the series.
# If one end of the window is truncated, the other end
# could be truncated as well, to prevent an error on magnitude data
# with a slope. It however results in unsmoothed looking data at the
# end.
fc = freq[x] # center freq. of smoothing window
fl = fc / np.sqrt(2**(tr/Noct)) # lower cutoff
fu = fc * np.sqrt(2**(tr/Noct)) # upper cutoff
# If the upper (frequency) side of the window is truncated because
# there is no data beyond the Nyquist frequency, also truncate the
# other side to keep it symmetric in a log(frequency) scale.
# So: fu / fc = fc / fl
fNq = freq[-1]
if fu > fNq:
fu = fNq # no data beyond fNq
fl = fc**2 / fu # keep window symmetric
# Find indices corresponding to frequencies
xl = bisect.bisect_left(freq, fl) # index corresponding to fl
xu = bisect.bisect_left(freq, fu)
xl = xu-1 if xu-xl <= 0 else xl # Guarantee window length of at least 1
# Calculate window
xg = np.arange(xl, xu) # indices
fg = freq[xg] # [Hz] corresponding freq
gs = np.sqrt( 1/ ((1+((fg/fc - fc/fg)*(1.507*Noct))**6)) ) # raw windw
gs /= np.sum(gs) # normalize: integral=1
Q[x, xl:xu] = gs # add to matrix
return Q
def smoothSpectralData(freq, M, sw: SmoothingWidth,
st: SmoothingType = SmoothingType.levels):
"""
@ -92,6 +156,10 @@ def smoothSpectralData(freq, M, sw: SmoothingWidth,
side. The deviation is largest when Noct is small (e.g. coarse smoothing).
Casper Jansen, 07-05-2021
Update 16-01-2023: speed up algorithm
- Smoothing is performed using matrix multiplication
- The smoothing matrix is not calculated if it already exists
Args:
freq: array of frequencies of data points [Hz] - equally spaced
M: array of either power, transfer functin or dB points. Depending on
@ -104,9 +172,6 @@ def smoothSpectralData(freq, M, sw: SmoothingWidth,
"""
# TODO: Make this function multi-dimensional array aware.
# Settings
tr = 2 # truncate window after 2x std; shorter is faster and less accurate
# Safety
MM = copy.deepcopy(M)
Noct = sw.value[0]
@ -135,169 +200,33 @@ def smoothSpectralData(freq, M, sw: SmoothingWidth,
# P is power while smoothing. x are indices of P.
Psm = np.zeros_like(P) # Smoothed power - to be calculated
x0 = 1 if freq[0] == 0 else 0 # Skip first data point if zero frequency
if freq[0] == 0:
Psm[0] = P[0] # Reuse old value in case first data..
# ..point is skipped. Not plotted any way.
# Loop through data points
for x in range(x0, L):
# Find indices of data points to calculate current (smoothed) magnitude
#
# Indices beyond [0, L] point to non-existing data. Beyond 0 does not
# occur in this implementation. Beyond L occurs when the smoothing
# window nears the end of the series.
# If one end of the window is truncated, the other end
# could be truncated as well, to prevent an error on magnitude data
# with a slope. It however results in unsmoothed looking data at the
# end.
fc = freq[x] # center freq. of smoothing window
fl = fc / np.sqrt(2**(tr/Noct)) # lower cutoff
fu = fc * np.sqrt(2**(tr/Noct)) # upper cutoff
# Re-use smoothing matrix Q if available. Otherwise, calculate.
# Store in dict 'Qdict'
nfft = int(2*(len(freq)-1))
key = f"nfft{nfft}_Noct{Noct}" # matrix name
# If the upper (frequency) side of the window is truncated because
# there is no data beyond the Nyquist frequency, also truncate the
# other side to keep it symmetric in a log(frequency) scale.
# So: fu / fc = fc / fl
fNq = freq[-1]
if fu > fNq:
fu = fNq # no data beyond fNq
fl = fc**2 / fu # keep window symmetric
if 'Qdict' not in globals(): # Guarantee Qdict exists
global Qdict
Qdict = {}
# Find indices corresponding to frequencies
xl = bisect.bisect_left(freq, fl) # index corresponding to fl
xu = bisect.bisect_left(freq, fu)
# Guarantee window length of at least 1
if xu - xl <= 0:
xl = xu - 1
# Calculate window
g = np.zeros(xu-xl)
for n, xi in enumerate(range(xl, xu)):
fi = freq[xi] # current frequency
g[n] = np.sqrt( 1/ ((1+((fi/fc - fc/fi)*(1.507*Noct))**6)) )
g /= np.sum(g) # normalize: integral=1
if key in Qdict:
Q = Qdict[key]
else:
Q = smoothCalcMatrix(freq, sw)
Qdict[key] = Q
# Apply smoothing
Psm[x] = np.dot(g, P[xl:xu])
Psm = np.matmul(Q, P)
if st == SmoothingType.levels:
Psm = 10*np.log10(Psm)
return Psm
## OLD VERSION
#from scipy.signal.windows import gaussian
#def smoothSpectralData(freq, M, sw: SmoothingWidth,
# st: SmoothingType = SmoothingType.levels):
# """
# Apply fractional octave smoothing to magnitude data in frequency domain.
# Smoothing is performed to power, using a sliding Gaussian window with
# variable length. The window is truncated after 2x std at either side.
#
# The implementation is not exact, because f is linearly spaced and
# fractional octave smoothing is related to log spaced data. In this
# implementation, the window extends with a fixed frequency step to either
# side. The deviation is largest when Noct is small (e.g. coarse smoothing).
# Casper Jansen, 07-05-2021
#
# Args:
# freq: array of frequencies of data points [Hz] - equally spaced
# M: array of either power, transfer functin or dB points. Depending on
# the smoothing type `st`, the smoothing is applied.
#
# Returns:
# freq : array frequencies of data points [Hz]
# Msm : float smoothed magnitude of data points
#
# """
# # TODO: Make this function multi-dimensional array aware.
#
# # Settings
# tr = 2 # truncate window after 2x std; shorter is faster and less accurate
#
# # Safety
# Noct = sw.value[0]
# assert Noct > 0, "'Noct' must be absolute positive"
# if Noct < 1: raise Warning('Check if \'Noct\' is entered correctly')
# assert len(freq)==len(M), 'f and M should have equal length'
#
# if st == SmoothingType.ps:
# assert np.min(M) >= 0, 'absolute magnitude M cannot be negative'
# if st == SmoothingType.levels and isinstance(M.dtype, complex):
# raise RuntimeError('Decibel input should be real-valued')
#
# # Initialize
# L = M.shape[0] # number of data points
#
# P = M
# if st == SmoothingType.levels:
# P = 10**(P/10)
# # TODO: This does not work due to complex numbers. Should be split up in
# # magnitude and phase.
# # elif st == SmoothingType.tf:
# # P = P**2
#
# # P is power while smoothing. x are indices of P.
# Psm = np.zeros_like(P) # Smoothed power - to be calculated
# x0 = 1 if freq[0]==0 else 0 # Skip first data point if zero frequency
# Psm[0] = P[0] # Reuse old value in case first data..
# # ..point is skipped. Not plotted any way.
# df = freq[1] - freq[0] # Frequency step
#
# # Loop through data points
# for x in range(x0, L):
# # Find indices of data points to calculate current (smoothed) magnitude
# fc = freq[x] # center freq. of smoothing window
# Df = tr * fc / Noct # freq. range of smoothing window
#
# # desired lower index of frequency array to be used during smoothing
# xl = int(np.ceil(x - 0.5*Df/df))
#
# # upper index + 1 (because half-open interval)
# xu = int(np.floor(x + 0.5*Df/df)) + 1
#
# # Create window, suitable for frequency lin-spaced data points
# Np = xu - xl # number of points
# std = Np / (2 * tr)
# wind = gaussian(Np, std) # Gaussian window
#
# # Clip indices to valid range
# #
# # Indices beyond [0, L] point to non-existing data. This occurs when
# # the smoothing windows nears the beginning or end of the series.
# # Optional: if one end of the window is clipped, the other end
# # could be clipped as well, to prevent an error on magnitude data with
# # a slope. It however results in unsmoothed looking data at the ends.
# if xl < 0:
# rl = 0 - xl # remove this number of points at the lower end
# xl = xl + rl # .. from f
# wind = wind[rl:] # .. and from window
#
# # rl = 0 - xl # remove this number of points at the lower end
# # xl = xl + rl # .. from f
# # xu = xu - rl
# # wind = wind[rl:-rl] # .. and from window
#
# if xu > L:
# ru = xu - L # remove this number of points at the upper end
# xu = xu - ru
# wind = wind[:-ru]
#
# # ru = xu - L # remove this number of points at the upper end
# # xl = xl + ru
# # xu = xu - ru
# # wind = wind[ru:-ru]
#
# # Apply smoothing
# wind_int = np.sum(wind) # integral
# Psm[x] = np.dot(wind, P[xl:xu]) / wind_int # apply window
#
# if st == SmoothingType.levels:
# Psm = 10*np.log10(Psm)
#
# return Psm
def smoothSpectralDataAlt(freq, Mdb, sw: SmoothingWidth):
Noct = 1/sw.value[0]
@ -382,7 +311,7 @@ if __name__ == "__main__":
Noct = 2 # Noct = 6 for 1/6 oct. smoothing
# Create dummy data set 1: noise
fmin = 3e3 # [Hz] min freq
fmin = 1e3 # [Hz] min freq
fmax = 24e3 # [Hz] max freq
Ndata = 200 # number of data points
freq = np.linspace(fmin, fmax, Ndata) # frequency points
@ -402,10 +331,13 @@ if __name__ == "__main__":
class sw:
value = [Noct]
st = SmoothingType.levels # so data is given in dB
# st = SmoothingType.ps # so data is given in power
# Smooth
Msm = smoothSpectralData(freq, M, sw, st)
fsm = freq
# Plot - lin frequency
plt.figure()
plt.plot(freq, M, '.b')
@ -414,6 +346,7 @@ if __name__ == "__main__":
plt.ylabel('magnitude')
plt.xlim([100, fmax])
plt.title('lin frequency')
plt.legend(['Raw', 'Smooth'])
# Plot - log frequency
plt.figure()
@ -423,3 +356,4 @@ if __name__ == "__main__":
plt.ylabel('magnitude')
plt.xlim([100, fmax])
plt.title('log frequency')
plt.legend(['Raw', 'Smooth 1'])

View File

@ -0,0 +1,77 @@
"""
This script checks whether the 3rd octave equalizer is norm compliant.
Created on Tue Dec 31 14:43:19 2019
@author: anne
"""
from lasp.filter import (ThirdOctaveBankDesigner)
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import freqz, iirdesign, sosfreqz, butter, cheby2
plt.close('all')
# %% Run check
def check_one(fs):
"""Check for fs = fs"""
# Create filter bank
nsections = 5 # filter property
designer = ThirdOctaveBankDesigner(fs)
# Test
xmin = designer.nominal_txt_tox('25') # lowest highest band, nominal name
xmax = designer.nominal_txt_tox('20k') # highest band
compliant = True # remains True if all filters comply
fnyq = fs/2
fll = designer.fl(xmin) / 1.1 # plot limits
fuu = designer.fu(xmax) * 1.1
fig = plt.figure()
plt.xlabel('f (Hz)')
plt.ylabel('Magnitude (dB)')
for x in range(xmin, xmax+1): # loop over bands
fl = designer.fl(x) # lower cut off
fu = designer.fu(x) # upper
fln = fl/fnyq # lower cutoff, normalized [0, 1]
fun = fu/fnyq # upper
sos = designer.createSOSFilter(x)
fnorm, h = sosfreqz(sos, worN=2**18, whole=False)
freq = fnorm*fnyq/np.pi
h_dB = 20*np.log10(np.abs(h) + np.finfo(float).eps)
if not designer.testStandardCompliance(x, freq, h_dB):
print('==============================')
print(designer.nominal_txt(x), ' does not comply\n')
compliant = False
plt.semilogx(freq, h_dB)
# Upper / lower limits
freqlim, ulim, llim = designer.band_limits(x)
plt.semilogx(freqlim, ulim)
plt.semilogx(freqlim, llim)
plt.ylim(-5, 1)
plt.xlim(fll, fuu)
print(f"Passed test : {compliant} for fs = {fs} Hz")
return compliant
if __name__ == "__main__":
# Make list of fs to be tested
# Taken from https://en.wikipedia.org/wiki/Sampling_(signal_processing)
fs_all = [8000, 11025, 16000, 22050, 32000, 37800, 44056, 44100, 47250,
48000, 50000, 50400, 64000, 88200, 96000, 176400, 192000, 352800]
allcompliant = True
for fs in fs_all:
compliant = check_one(fs)
if not compliant:
allcompliant = False
print(f"\n\nAll passed test : {allcompliant}")

View File

@ -8,8 +8,6 @@ Created on Mon Jan 15 19:45:33 2018
import numpy as np
from lasp import AvPowerSpectra, Window
import matplotlib.pyplot as plt
# plt.close('all')
def test_aps1():
nfft = 16384

View File

@ -2,7 +2,7 @@
import numpy as np
from lasp import cppSLM
from lasp.filter import SPLFilterDesigner
import matplotlib.pyplot as plt
def test_cppslm1():
"""
@ -11,7 +11,8 @@ def test_cppslm1():
fs = 48000
omg = 2 * np.pi * 1000
slm = cppSLM.fromBiquads(fs, 2e-5, 1, 0.125, [1.,0,0,1,0,0])
slm = cppSLM.fromBiquads(fs, 2e-5, 1, 0.125,
np.array([[1., 0, 0, 1, 0, 0]]).T)
t = np.linspace(0, 10, 10 * fs, endpoint=False)
@ -40,7 +41,10 @@ def test_cppslm2():
omg = 2 * np.pi * 1000
filt = SPLFilterDesigner(fs).A_Sos_design()
slm = cppSLM.fromBiquads(fs, 2e-5, 0, 0.125, filt.flatten(), [1.,0,0,1,0,0])
slm = cppSLM.fromBiquads(fs, 2e-5, 0, 0.125,
filt.flatten(), # Pre-filter coefs
np.array([[1., 0, 0, 1, 0, 0]]).T # Bandpass coefs
)
t = np.linspace(0, 10, 10 * fs, endpoint=False)
@ -60,12 +64,15 @@ def test_cppslm2():
# (Fast, Slow etc)
assert np.isclose(out[-1, 0], level, atol=1e-2)
def test_cppslm3():
fs = 48000
omg = 2 * np.pi * 1000
filt = SPLFilterDesigner(fs).A_Sos_design()
slm = cppSLM.fromBiquads(fs, 2e-5, 0, 0.125, filt.flatten(), [1.,0,0,1,0,0])
slm = cppSLM.fromBiquads(fs, 2e-5, 0, 0.125,
filt.flatten(),
np.array([[1., 0, 0, 1, 0, 0]]).T)
t = np.linspace(0, 10, 10 * fs, endpoint=False)
in_ = 10 * np.sin(omg * t) * np.sqrt(2) + np.random.randn()
@ -74,16 +81,14 @@ def test_cppslm3():
# Compute overall level
level = 20 * np.log10(rms / 2e-5)
# Output of SLM
out = slm.run(in_)
Lpeak = 20 * np.log10(np.max(np.abs(in_) / 2e-5))
Lpeak
slm.Lpeak()
assert np.isclose(out[-1,0], slm.Leq()[0][0], atol=1e-2)
assert np.isclose(Lpeak, slm.Lpeak()[0][0], atol=2e0)
assert np.isclose(out[-1, 0], slm.Leq()[0], atol=1e-2)
assert np.isclose(Lpeak, slm.Lpeak()[0], atol=2e0)
if __name__ == '__main__':

View File

@ -5,8 +5,6 @@ Testing code for power spectra
"""
import numpy as np
from lasp import PowerSpectra, Window
# import matplotlib.pyplot as plt
# plt.close('all')
def test_ps():
"""

View File

@ -1,37 +0,0 @@
#!/usr/bin/python3
import numpy as np
from lasp import SLM
nframes = 0
samplerate = 48000
omg = 2*np.pi*1000
# def mycallback(input_, nframes, streamtime):
# t = np.linspace(streamtime, streamtime + nframes/samplerate,
# nframes)[np.newaxis,:]
# outp = 0.1*np.sin(omg*t)
# return outp, 0
# if __name__ == '__main__':
# pa = RtAudio()
# count = pa.getDeviceCount()
# # dev = pa.getDeviceInfo(0)
# for i in range(count):
# dev = pa.getDeviceInfo(i)
# print(dev)
# outputparams = {'deviceid': 0, 'nchannels': 1, 'firstchannel': 0}
# pa.openStream(outputparams, None , Format_FLOAT64,samplerate, 512, mycallback)
# pa.startStream()
# input()
# pa.stopStream()
# pa.closeStream()