Merge branch 'develop' into rt_signalviewer
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
Thijs Hekman 2023-02-20 09:38:42 +01:00
commit cacfc7fe6c
43 changed files with 1152 additions and 763 deletions

110
.drone.yml Normal file
View File

@ -0,0 +1,110 @@
kind: pipeline
type: docker
name: archlinux
clone:
depth: 50
steps:
- name: archlinux_build
image: archlinux_build:latest
pull: if-not-exists
volumes:
- name: archlinux_ccache
path: /root/.ccache
commands:
# The following command is not required, we included this in the docker
# image of archlinux_build
# - pacman -S --noconfirm ccache openblas fftw pulseaudio pybind11
- git submodule update --init --recursive
- cmake .
# More than two makes ascee2 irresponsive for now
- make -j2
- name: archlinux_test
image: archlinux_build:latest
pull: if-not-exists
commands:
# The following command is not required, we included this in the docker
# image of archlinux_build
# - pacman -S --noconfirm openblas python-pytest fftw pulseaudio python-pip python-scipy python-h5py
- pip install -r requirements.txt
- pip install .
- pytest
# - name: release-arch
# commands:
# -
volumes:
- name: archlinux_ccache
host:
path: /tmp/archlinux_ccache
---
kind: pipeline
type: docker
name: ubuntu
clone:
depth: 3
volumes:
- name: archlinux_ccache
path: /root/.ccache
steps:
- name: ubuntu_build
image: ubuntu_build:latest
pull: if-not-exists
volumes:
- name: ubuntu_ccache
path: /root/.ccache
commands:
# The following commands are not required, we included this in the docker
# image of ubuntu_build
#- apt update
#- apt install -y git cmake python3-pybind11 libopenblas-dev python3-pip python3-scipy libusb-1.0-0-dev libpulse-dev python3-h5py fftw-dev
- git submodule update --init --recursive
- cmake .
# More than two makes ascee2 irresponsive for now
- make -j2
- name: ubuntu_test
image: ubuntu_build:latest
pull: if-not-exists
commands:
# The following commands are not required, we included this in the docker
# image of ubuntu_build
#- apt update
#- apt install -y python3-pytest fftw pulseaudio python3-pip python3-scipy python3-h5py
- pip install -r requirements.txt
- pip install .
- pytest-3
volumes:
- name: ubuntu_ccache
host:
path: /tmp/ubuntu_ccache
---
kind: pipeline
type: docker
name: documentation_build
clone:
depth: 3
steps:
- name: build_docker_master
image: plugins/docker
settings:
repo: ascee/lasp_ascee_nl
tags: latest
username:
from_secret: docker_username
password:
from_secret: docker_password
when:
branch: master

1
.gitignore vendored
View File

@ -20,3 +20,4 @@ doc
.spyproject
.cache
_skbuild
acme_log.log

16
Dockerfile Normal file
View File

@ -0,0 +1,16 @@
FROM archlinux
MAINTAINER J.A. de Jong - j.a.dejong@ascee.nl
RUN pacman --noconfirm -Sy
RUN pacman --noconfirm -S git doxygen graphviz lighttpd python-pip
RUN pip install doxypypy
WORKDIR /root
RUN git clone https://code.ascee.nl/ascee/lasp
WORKDIR /root/lasp
RUN doxygen
RUN rm -rf /srv//http
RUN mv doc/html /srv/http
CMD /usr/bin/lighttpd -D -f /etc/lighttpd/lighttpd.conf
EXPOSE 80

360
Doxyfile
View File

@ -1,4 +1,4 @@
# Doxyfile 1.8.17
# Doxyfile 1.9.1
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
@ -51,7 +51,7 @@ PROJECT_BRIEF = "Library for Acoustic Signal Processing"
# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
# the logo to the output directory.
PROJECT_LOGO = /home/anne/wip/mycode/lasp/img/LASP_200px.png
PROJECT_LOGO = img/LASP_200px.png
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
# into which the generated documentation will be written. If a relative path is
@ -93,6 +93,14 @@ ALLOW_UNICODE_NAMES = NO
OUTPUT_LANGUAGE = English
# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all
# documentation generated by doxygen is written. Doxygen will use this
# information to generate all generated output in the proper direction.
# Possible values are: None, LTR, RTL and Context.
# The default value is: None.
OUTPUT_TEXT_DIRECTION = None
# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
# descriptions after the members that are listed in the file and class
# documentation (similar to Javadoc). Set to NO to disable this.
@ -225,7 +233,7 @@ MULTILINE_CPP_IS_BRIEF = NO
# documentation blocks is shown as doxygen documentation.
# The default value is: YES.
PYTHON_DOCSTRING = YES
PYTHON_DOCSTRING = NO
# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
# documentation from any documented member that it re-implements.
@ -250,16 +258,16 @@ TAB_SIZE = 4
# the documentation. An alias has the form:
# name=value
# For example adding
# "sideeffect=@par Side Effects:^^"
# "sideeffect=@par Side Effects:\n"
# will allow you to put the command \sideeffect (or @sideeffect) in the
# documentation, which will result in a user-defined paragraph with heading
# "Side Effects:". Note that you cannot put \n's in the value part of an alias
# to insert newlines (in the resulting output). You can put ^^ in the value part
# of an alias to insert a newline as if a physical newline was in the original
# file. When you need a literal { or } or , in the value part of an alias you
# have to escape them by means of a backslash (\), this can lead to conflicts
# with the commands \{ and \} for these it is advised to use the version @{ and
# @} or use a double escape (\\{ and \\})
# "Side Effects:". You can put \n's in the value part of an alias to insert
# newlines (in the resulting output). You can put ^^ in the value part of an
# alias to insert a newline as if a physical newline was in the original file.
# When you need a literal { or } or , in the value part of an alias you have to
# escape them by means of a backslash (\), this can lead to conflicts with the
# commands \{ and \} for these it is advised to use the version @{ and @} or use
# a double escape (\\{ and \\})
ALIASES =
@ -304,8 +312,8 @@ OPTIMIZE_OUTPUT_SLICE = NO
# extension. Doxygen has a built-in mapping, but you can override or extend it
# using this tag. The format is ext=language, where ext is a file extension, and
# language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
# Csharp (C#), C, C++, Lex, D, PHP, md (Markdown), Objective-C, Python, Slice,
# VHDL, Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL,
# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser
# tries to guess whether the code is fixed or free formatted code, this is the
# default for Fortran type files). For instance to make doxygen treat .inc files
@ -458,7 +466,7 @@ LOOKUP_CACHE_SIZE = 0
# than 0 to get more control over the balance between CPU load and processing
# speed. At this moment only the input processing can be done using multiple
# threads. Since this is still an experimental feature the default is set to 1,
# which effectively disables parallel processing. Please report any issues you
# which efficively disables parallel processing. Please report any issues you
# encounter. Generating dot graphs in parallel is controlled by the
# DOT_NUM_THREADS setting.
# Minimum value: 0, maximum value: 32, default value: 1.
@ -602,12 +610,6 @@ HIDE_SCOPE_NAMES = NO
HIDE_COMPOUND_REFERENCE= NO
# If the SHOW_HEADERFILE tag is set to YES then the documentation for a class
# will show which file needs to be included to use the class.
# The default value is: YES.
SHOW_HEADERFILE = YES
# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
# the files that are included by a file in the documentation of that file.
# The default value is: YES.
@ -765,8 +767,7 @@ FILE_VERSION_FILTER =
# output files in an output format independent way. To create the layout file
# that represents doxygen's defaults, run doxygen with the -l option. You can
# optionally specify a file name after the option, if omitted DoxygenLayout.xml
# will be used as the name of the layout file. See also section "Changing the
# layout of pages" for information.
# will be used as the name of the layout file.
#
# Note that if you run doxygen from a directory containing a file called
# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
@ -812,26 +813,18 @@ WARNINGS = YES
WARN_IF_UNDOCUMENTED = YES
# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
# potential errors in the documentation, such as documenting some parameters in
# a documented function twice, or documenting parameters that don't exist or
# using markup commands wrongly.
# potential errors in the documentation, such as not documenting some parameters
# in a documented function, or documenting parameters that don't exist or using
# markup commands wrongly.
# The default value is: YES.
WARN_IF_DOC_ERROR = YES
# If WARN_IF_INCOMPLETE_DOC is set to YES, doxygen will warn about incomplete
# function parameter documentation. If set to NO, doxygen will accept that some
# parameters have no documentation without warning.
# The default value is: YES.
WARN_IF_INCOMPLETE_DOC = YES
# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
# are documented, but have no documentation for their parameters or return
# value. If set to NO, doxygen will only warn about wrong parameter
# documentation, but not about the absence of documentation. If EXTRACT_ALL is
# set to YES then this flag will automatically be disabled. See also
# WARN_IF_INCOMPLETE_DOC
# value. If set to NO, doxygen will only warn about wrong or incomplete
# parameter documentation, but not about the absence of documentation. If
# EXTRACT_ALL is set to YES then this flag will automatically be disabled.
# The default value is: NO.
WARN_NO_PARAMDOC = NO
@ -857,10 +850,7 @@ WARN_FORMAT = "$file:$line: $text"
# The WARN_LOGFILE tag can be used to specify a file to which warning and error
# messages should be written. If left blank the output is written to standard
# error (stderr). In case the file specified cannot be opened for writing the
# warning and error messages are written to standard error. When as file - is
# specified the warning and error messages are written to standard output
# (stdout).
# error (stderr).
WARN_LOGFILE =
@ -898,55 +888,16 @@ INPUT_ENCODING = UTF-8
#
# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
# *.hh, *.hxx, *.hpp, *.h++, *.l, *.cs, *.d, *.php, *.php4, *.php5, *.phtml,
# *.inc, *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C
# comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd,
# *.vhdl, *.ucf, *.qsf and *.ice.
# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment),
# *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, *.vhdl,
# *.ucf, *.qsf and *.ice.
FILE_PATTERNS = *.c \
*.cc \
*.cxx \
*.cpp \
*.c++ \
*.java \
*.ii \
*.ixx \
*.ipp \
*.i++ \
*.inl \
*.idl \
*.ddl \
*.odl \
*.h \
*.hh \
*.hxx \
*.hpp \
*.h++ \
*.cs \
*.d \
*.php \
*.php4 \
*.php5 \
*.phtml \
*.inc \
*.m \
*.markdown \
*.md \
*.mm \
*.dox \
*.py \
*.pyw \
*.f90 \
*.f95 \
*.f03 \
*.f08 \
*.f \
*.for \
*.tcl \
*.vhd \
*.vhdl \
*.ucf \
*.qsf
*.py=scripts/py_filter
# The RECURSIVE tag can be used to specify whether or not subdirectories should
# be searched for input files as well.
@ -983,7 +934,7 @@ EXCLUDE_PATTERNS =
# (namespaces, classes, functions, etc.) that should be excluded from the
# output. The symbol name can be a fully qualified name, a word, or if the
# wildcard * is used, a substring. Examples: ANamespace, AClass,
# ANamespace::AClass, ANamespace::*Test
# AClass::ANamespace, ANamespace::*Test
#
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories use the pattern */test/*
@ -1158,6 +1109,44 @@ USE_HTAGS = NO
VERBATIM_HEADERS = YES
# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the
# clang parser (see:
# http://clang.llvm.org/) for more accurate parsing at the cost of reduced
# performance. This can be particularly helpful with template rich C++ code for
# which doxygen's built-in parser lacks the necessary type information.
# Note: The availability of this option depends on whether or not doxygen was
# generated with the -Duse_libclang=ON option for CMake.
# The default value is: NO.
CLANG_ASSISTED_PARSING = NO
# If clang assisted parsing is enabled and the CLANG_ADD_INC_PATHS tag is set to
# YES then doxygen will add the directory of each input to the include path.
# The default value is: YES.
CLANG_ADD_INC_PATHS = YES
# If clang assisted parsing is enabled you can provide the compiler with command
# line options that you would normally use when invoking the compiler. Note that
# the include paths will already be set by doxygen for the files and directories
# specified with INPUT and INCLUDE_PATH.
# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
CLANG_OPTIONS =
# If clang assisted parsing is enabled you can provide the clang parser with the
# path to the directory containing a file called compile_commands.json. This
# file is the compilation database (see:
# http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) containing the
# options used when the source files were built. This is equivalent to
# specifying the -p option to a clang tool, such as clang-check. These options
# will then be passed to the parser. Any options specified with CLANG_OPTIONS
# will be added as well.
# Note: The availability of this option depends on whether or not doxygen was
# generated with the -Duse_libclang=ON option for CMake.
CLANG_DATABASE_PATH =
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
@ -1268,7 +1257,7 @@ HTML_EXTRA_FILES =
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
# will adjust the colors in the style sheet and background images according to
# this color. Hue is specified as an angle on a color-wheel, see
# this color. Hue is specified as an angle on a colorwheel, see
# https://en.wikipedia.org/wiki/Hue for more information. For instance the value
# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
# purple, and 360 is red again.
@ -1278,7 +1267,7 @@ HTML_EXTRA_FILES =
HTML_COLORSTYLE_HUE = 220
# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
# in the HTML output. For a value of 0 the output will use gray-scales only. A
# in the HTML output. For a value of 0 the output will use grayscales only. A
# value of 255 will produce the most vivid colors.
# Minimum value: 0, maximum value: 255, default value: 100.
# This tag requires that the tag GENERATE_HTML is set to YES.
@ -1360,13 +1349,6 @@ GENERATE_DOCSET = NO
DOCSET_FEEDNAME = "Doxygen generated docs"
# This tag determines the URL of the docset feed. A documentation feed provides
# an umbrella under which multiple documentation sets from a single provider
# (such as a company or product suite) can be grouped.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_FEEDURL =
# This tag specifies a string that should uniquely identify the documentation
# set bundle. This should be a reverse domain-name style string, e.g.
# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
@ -1392,12 +1374,8 @@ DOCSET_PUBLISHER_NAME = Publisher
# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
# on Windows. In the beginning of 2021 Microsoft took the original page, with
# a.o. the download links, offline the HTML help workshop was already many years
# in maintenance mode). You can download the HTML help workshop from the web
# archives at Installation executable (see:
# http://web.archive.org/web/20160201063255/http://download.microsoft.com/downlo
# ad/0/A/9/0A939EF6-E31C-430F-A3DF-DFAE7960D564/htmlhelp.exe).
# (see:
# https://www.microsoft.com/en-us/download/details.aspx?id=21138) on Windows.
#
# The HTML Help Workshop contains a compiler that can convert all HTML output
# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
@ -1556,28 +1534,16 @@ DISABLE_INDEX = NO
# to work a browser that supports JavaScript, DHTML, CSS and frames is required
# (i.e. any modern browser). Windows users are probably better off using the
# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
# further fine tune the look of the index (see "Fine-tuning the output"). As an
# example, the default style sheet generated by doxygen has an example that
# shows how to put an image at the root of the tree instead of the PROJECT_NAME.
# Since the tree basically has the same information as the tab index, you could
# consider setting DISABLE_INDEX to YES when enabling this option.
# further fine-tune the look of the index. As an example, the default style
# sheet generated by doxygen has an example that shows how to put an image at
# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
# the same information as the tab index, you could consider setting
# DISABLE_INDEX to YES when enabling this option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_TREEVIEW = YES
# When both GENERATE_TREEVIEW and DISABLE_INDEX are set to YES, then the
# FULL_SIDEBAR option determines if the side bar is limited to only the treeview
# area (value NO) or if it should extend to the full height of the window (value
# YES). Setting this to YES gives a layout similar to
# https://docs.readthedocs.io with more room for contents, but less room for the
# project logo, title, and description. If either GENERATE_TREEVIEW or
# DISABLE_INDEX is set to NO, this option has no effect.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
FULL_SIDEBAR = NO
# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
# doxygen will group on one line in the generated HTML documentation.
#
@ -1602,13 +1568,6 @@ TREEVIEW_WIDTH = 250
EXT_LINKS_IN_WINDOW = NO
# If the OBFUSCATE_EMAILS tag is set to YES, doxygen will obfuscate email
# addresses.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
OBFUSCATE_EMAILS = YES
# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg
# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see
# https://inkscape.org) to generate formulas as SVG images instead of PNGs for
@ -1657,29 +1616,11 @@ FORMULA_MACROFILE =
USE_MATHJAX = NO
# With MATHJAX_VERSION it is possible to specify the MathJax version to be used.
# Note that the different versions of MathJax have different requirements with
# regards to the different settings, so it is possible that also other MathJax
# settings have to be changed when switching between the different MathJax
# versions.
# Possible values are: MathJax_2 and MathJax_3.
# The default value is: MathJax_2.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_VERSION = MathJax_2
# When MathJax is enabled you can set the default output format to be used for
# the MathJax output. For more details about the output format see MathJax
# version 2 (see:
# http://docs.mathjax.org/en/v2.7-latest/output.html) and MathJax version 3
# (see:
# http://docs.mathjax.org/en/latest/web/components/output.html).
# the MathJax output. See the MathJax site (see:
# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details.
# Possible values are: HTML-CSS (which is slower, but has the best
# compatibility. This is the name for Mathjax version 2, for MathJax version 3
# this will be translated into chtml), NativeMML (i.e. MathML. Only supported
# for NathJax 2. For MathJax version 3 chtml will be used instead.), chtml (This
# is the name for Mathjax version 3, for MathJax version 2 this will be
# translated into HTML-CSS) and SVG.
# compatibility), NativeMML (i.e. MathML) and SVG.
# The default value is: HTML-CSS.
# This tag requires that the tag USE_MATHJAX is set to YES.
@ -1692,21 +1633,15 @@ MATHJAX_FORMAT = HTML-CSS
# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
# Content Delivery Network so you can quickly see the result without installing
# MathJax. However, it is strongly recommended to install a local copy of
# MathJax from https://www.mathjax.org before deployment. The default value is:
# - in case of MathJax version 2: https://cdn.jsdelivr.net/npm/mathjax@2
# - in case of MathJax version 3: https://cdn.jsdelivr.net/npm/mathjax@3
# MathJax from https://www.mathjax.org before deployment.
# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_RELPATH = https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2/
# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
# extension names that should be enabled during MathJax rendering. For example
# for MathJax version 2 (see
# https://docs.mathjax.org/en/v2.7-latest/tex.html#tex-and-latex-extensions):
# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
# For example for MathJax version 3 (see
# http://docs.mathjax.org/en/latest/input/tex/extensions/index.html):
# MATHJAX_EXTENSIONS = ams
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_EXTENSIONS =
@ -1886,31 +1821,29 @@ PAPER_TYPE = a4
EXTRA_PACKAGES =
# The LATEX_HEADER tag can be used to specify a user-defined LaTeX header for
# the generated LaTeX document. The header should contain everything until the
# first chapter. If it is left blank doxygen will generate a standard header. It
# is highly recommended to start with a default header using
# doxygen -w latex new_header.tex new_footer.tex new_stylesheet.sty
# and then modify the file new_header.tex. See also section "Doxygen usage" for
# information on how to generate the default header that doxygen normally uses.
# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
# generated LaTeX document. The header should contain everything until the first
# chapter. If it is left blank doxygen will generate a standard header. See
# section "Doxygen usage" for information on how to let doxygen write the
# default header to a separate file.
#
# Note: Only use a user-defined header if you know what you are doing!
# Note: The header is subject to change so you typically have to regenerate the
# default header when upgrading to a newer version of doxygen. The following
# commands have a special meaning inside the header (and footer): For a
# description of the possible markers and block names see the documentation.
# Note: Only use a user-defined header if you know what you are doing! The
# following commands have a special meaning inside the header: $title,
# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
# string, for the replacement values of the other commands the user is referred
# to HTML_HEADER.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HEADER =
# The LATEX_FOOTER tag can be used to specify a user-defined LaTeX footer for
# the generated LaTeX document. The footer should contain everything after the
# last chapter. If it is left blank doxygen will generate a standard footer. See
# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
# generated LaTeX document. The footer should contain everything after the last
# chapter. If it is left blank doxygen will generate a standard footer. See
# LATEX_HEADER for more information on how to generate a default footer and what
# special commands can be used inside the footer. See also section "Doxygen
# usage" for information on how to generate the default footer that doxygen
# normally uses. Note: Only use a user-defined footer if you know what you are
# doing!
# special commands can be used inside the footer.
#
# Note: Only use a user-defined footer if you know what you are doing!
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_FOOTER =
@ -1955,7 +1888,8 @@ USE_PDFLATEX = YES
# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
# command to the generated LaTeX files. This will instruct LaTeX to keep running
# if errors occur, instead of asking the user for help.
# if errors occur, instead of asking the user for help. This option is also used
# when generating formulas in HTML.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
@ -1968,6 +1902,16 @@ LATEX_BATCHMODE = NO
LATEX_HIDE_INDICES = NO
# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
# code with syntax highlighting in the LaTeX output.
#
# Note that which sources are shown also depends on other settings such as
# SOURCE_BROWSER.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_SOURCE_CODE = NO
# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
# bibliography, e.g. plainnat, or ieeetr. See
# https://en.wikipedia.org/wiki/BibTeX and \cite for more info.
@ -2048,6 +1992,16 @@ RTF_STYLESHEET_FILE =
RTF_EXTENSIONS_FILE =
# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
# with syntax highlighting in the RTF output.
#
# Note that which sources are shown also depends on other settings such as
# SOURCE_BROWSER.
# The default value is: NO.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_SOURCE_CODE = NO
#---------------------------------------------------------------------------
# Configuration options related to the man page output
#---------------------------------------------------------------------------
@ -2144,6 +2098,15 @@ GENERATE_DOCBOOK = NO
DOCBOOK_OUTPUT = docbook
# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
# program listings (including syntax highlighting and cross-referencing
# information) to the DOCBOOK output. Note that enabling this will significantly
# increase the size of the DOCBOOK output.
# The default value is: NO.
# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
DOCBOOK_PROGRAMLISTING = NO
#---------------------------------------------------------------------------
# Configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
@ -2156,6 +2119,10 @@ DOCBOOK_OUTPUT = docbook
GENERATE_AUTOGEN_DEF = NO
#---------------------------------------------------------------------------
# Configuration options related to Sqlite3 output
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# Configuration options related to the Perl module output
#---------------------------------------------------------------------------
@ -2322,6 +2289,15 @@ EXTERNAL_PAGES = YES
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
# NO turns the diagrams off. Note that this option also works with HAVE_DOT
# disabled, but it is recommended to install and use dot, since it yields more
# powerful graphs.
# The default value is: YES.
CLASS_DIAGRAMS = YES
# You can include diagrams made with dia in doxygen documentation. Doxygen will
# then run dia to produce the diagram and insert it in the documentation. The
# DIA_PATH tag allows you to specify the directory where the dia binary resides.
@ -2340,7 +2316,7 @@ HIDE_UNDOC_RELATIONS = YES
# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
# Bell Labs. The other options in this section have no effect if this option is
# set to NO
# The default value is: NO.
# The default value is: YES.
HAVE_DOT = YES
@ -2378,14 +2354,11 @@ DOT_FONTSIZE = 10
DOT_FONTPATH =
# If the CLASS_GRAPH tag is set to YES (or GRAPH) then doxygen will generate a
# graph for each documented class showing the direct and indirect inheritance
# relations. In case HAVE_DOT is set as well dot will be used to draw the graph,
# otherwise the built-in generator will be used. If the CLASS_GRAPH tag is set
# to TEXT the direct and indirect inheritance relations will be shown as texts /
# links.
# Possible values are: NO, YES, TEXT and GRAPH.
# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
# each documented class showing the direct and indirect inheritance relations.
# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
CLASS_GRAPH = YES
@ -2514,13 +2487,6 @@ GRAPHICAL_HIERARCHY = YES
DIRECTORY_GRAPH = YES
# The DIR_GRAPH_MAX_DEPTH tag can be used to limit the maximum number of levels
# of child directories generated in directory dependency graphs by dot.
# Minimum value: 1, maximum value: 25, default value: 1.
# This tag requires that the tag DIRECTORY_GRAPH is set to YES.
DIR_GRAPH_MAX_DEPTH = 1
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
# generated by dot. For an explanation of the image formats see the section
# output formats in the documentation of the dot tool (Graphviz (see:
@ -2528,7 +2494,9 @@ DIR_GRAPH_MAX_DEPTH = 1
# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
# to make the SVG files visible in IE 9+ (other browsers do not have this
# requirement).
# Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo,
# Possible values are: png, png:cairo, png:cairo:cairo, png:cairo:gd, png:gd,
# png:gd:gd, jpg, jpg:cairo, jpg:cairo:gd, jpg:gd, jpg:gd:gd, gif, gif:cairo,
# gif:cairo:gd, gif:gd, gif:gd:gd, svg, png:gd, png:gd:gd, png:cairo,
# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
# png:gdiplus:gdiplus.
# The default value is: png.
@ -2574,10 +2542,10 @@ MSCFILE_DIRS =
DIAFILE_DIRS =
# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
# path where java can find the plantuml.jar file or to the filename of jar file
# to be used. If left blank, it is assumed PlantUML is not used or called during
# a preprocessing step. Doxygen will generate a warning when it encounters a
# \startuml command in this case and will not generate output for the diagram.
# path where java can find the plantuml.jar file. If left blank, it is assumed
# PlantUML is not used or called during a preprocessing step. Doxygen will
# generate a warning when it encounters a \startuml command in this case and
# will not generate output for the diagram.
PLANTUML_JAR_PATH =
@ -2639,8 +2607,6 @@ DOT_MULTI_TARGETS = NO
# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
# explaining the meaning of the various boxes and arrows in the dot generated
# graphs.
# Note: This tag requires that UML_LOOK isn't set, i.e. the doxygen internal
# graphical representation for inheritance and collaboration diagrams is used.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
@ -2649,8 +2615,8 @@ GENERATE_LEGEND = YES
# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate
# files that are used to generate the various graphs.
#
# Note: This setting is not only used for dot files but also for msc temporary
# files.
# Note: This setting is not only used for dot files but also for msc and
# plantuml temporary files.
# The default value is: YES.
DOT_CLEANUP = YES

174
README.md
View File

@ -1,108 +1,112 @@
# Library for Acoustic Signal Processing
Welcome to LASP: Library for Acoustic Signal Processing. LASP is a C++ library
with a Python interface which is supposed to process (multi-) microphone
acoustic data in real time on a PC and output results.
- Master branch: [![Build Status](https://drone.ascee.nl/api/badges/ASCEE/lasp/status.svg?ref=refs/heads/master)](https://drone.ascee.nl/ASCEE/lasp)
- Develop branch: [![Build Status](https://drone.ascee.nl/api/badges/ASCEE/lasp/status.svg?ref=refs/heads/develop)](https://drone.ascee.nl/ASCEE/lasp)
At the point in time of this writing, we are yet unsure whether the Raspberry
PI will have enough computational power to this end, but may be by the time it
is finished, we have a new faster generation :).
Welcome to LASP: Library for Acoustic Signal Processing. LASP is a C++ library
with a Python interface which is supposed to acquire and process (multi) sensor data in real time on a PC and output results.
Current features that are implemented:
- Compile-time determination of the floating-point accuracy (32/64 bit)
- Fast convolution FIR filter implementation
- Sample rate decimation by an integer factor of 4.
- Octave filterbank FIR filters designed to comply with IEC 61260
(1995).
- Communication with data acquisition (DAQ) devices, of which:
- Internal sound cards via the [RtAudio](http://www.music.mcgill.ca/~gary/rtaudio) backend. Many thanks to Gary P. Scavone et al.
- [Measurement Computing](https://www.mccdaq.com) [DT9838A](https://www.mccdaq.com/Products/Sound-Vibration-DAQ/DT9837) signal analyzer.
- Configuration of DAQ devices: AC coupling, IEPE, sensitivity physical
quantities.
- Recording of signals from these DAQ devices, and storing in a HDF5 file.
- Filter designers to create A/C sound pressure weighting
- Biquad filter designers for low pass, high pass, peaking and notch filters
- A Peak Programme Meter (PPM) to monitor signal levels from DAQ and to watch
for signal clipping.
- A signal generator to create sine waves, sweeps and noise (white / pink).
- Equalizers to equalize the output prior to sending.
- Averaged power spectra and power spectral density determination
using Welch' method. Taper functions of Hann, Hamming, Bartlett and
Blackman are provided.
- A thread-safe job queue including routines to create worker threads.
- Several linear algebra routines (wrappers around BLAS and LAPACK).
- A nice debug tracer implementation
- Third octave filter bank FIR filters designed to comply with IEC 61260
- (One third) octave filter bank filters designed to comply with IEC 61260
(1995).
- Slow and fast time updates of (A/C/Z) weighted sound pressure levels
- Full Sound Level Meter implementation
- Real time Sound Level meter, Power / Transfer function estimator
- Spectra data smoothing algorithms
- Sensor calibration for microphones
Future features (wish-list)
- Conventional and delay-and-sum beam-forming algorithms
For now, the source code is well-documented but it requires some
additional documentation (the math behind it). This will be published
in a sister repository (https://code.ascee.nl/ascee/lasp-doc).
- Conventional and delay-and-sum beam-forming algorithms
- Impedance tube measurement processing
For now, the source code is well-documented on [lasp.ascee.nl](https://lasp.ascee.nl) but it requires some
additional documentation (the math behind it). This is maintained
in a sister repository [lasp-doc](https://code.ascee.nl/ascee/lasp-doc). The
most recent
If you have any question(s), please feel free to contact us: info@ascee.nl.
# Installation
# Building from source
## Dependencies
Two commands that install all requirements (for Ubuntu / Linux Mint)
- `pip install scipy numpy build scikit-build appdirs`
- `sudo apt install libusb-dev libpulse-dev libboost-dev`
## Runtime dependencies (Linux)
- FFTW (For really fast FFT's). If compiled with Ffftpack, this library is not
required.
- libUlDAQ, for the Measurement Computing DT9837A USB DAQ box
- GNU Autotools, for compiling libUlDAQ
- RtAudio, for Audio DAQ backends
- libusb
- BLAS (OpenBLAS, other).
## Editable install
In the root directory of the repository, run:
- `pip3 isntall --user -e .`
- `cmake .`
- `make -j`
- `$ sudo apt install python3-pybind11 libopenblas-dev python3-pip python3-scipy libusb-1.0-0-dev libpulse-dev cmake-curses-gui python3-h5py`
- `$ pip3 install --user -r requirements.txt`
## Build dependencies
Optional dependencies, which can be turned ON/OFF using CMake:
- Build tools: compiler [http://cmake.org](CMake), the Python packages:
- Scipy
- Numpy
- appdirs
These can all be installed using:
- The following Python packages need also be available:
- `Scipy` (which includes Numpy). Install with `sudo apt install
python3-scipy`, or `pacman -S scipy`.
- `appdirs`, which can be grabbed from [https://pypi.org](Pypi)
## Compilation of LASP
### Archlinux
Compiling the code on Archlinux requires the following packages to be available:
- openblas-lapack (AUR)
- Python>=3.7
- Numpy (Python-numpy)
- Cython
### Ubuntu / Linux Mint
*Only tested with Linux Mint 18.04*, we require the following packages for
compilation:
- build-essential
- cython
- python3-numpy
- libopenblas
If building RtAudio with the ALSA backend:
If building RtAudio with the ALSA backend, you will also require the following packages:
- libclalsadrv-dev
- libopenblas-base
- libopenblas-dev
- libusb-1.0-dev
If building RtAudio with the Jack Audio Connection Kit (JACK) backend, you will also require the following packages:
- libjack-jackd2-dev
## Download & build
- `$ git clone --recursive https://code.ascee.nl/ASCEE/lasp.git`
- `$ cd lasp`
For a release build:
- `$ cmake .`
or optionally for a custom build:
- `$ ccmake .`
Configure and run:
- `$ make -j`
### Build documentation
In directory:
`$ sudo apt install doxygen graphviz`
`$ pip install doxypypy`
While still in lasp dir:
`$ doxygen`
This will build the documentation. It can be read by:
`$ <YOUR-BROWSER> doc/html/index.html`
Or via docker:
`$ docker build -t lasp_ascee_nl:latest .`
## Install
For an editable install (while developing):
- `$ pip3 install --prefix=$HOME/.local -e .`
To install locally, for a fixed version:
- `$ pip3 install --prefix=$HOME/.local`
## Usage
- See examples directories for IPython notebooks.
- Please refer to the [documentation](https://lasp.ascee.nl/) for features.

View File

@ -2,15 +2,10 @@
import numpy as np
from lasp import SeriesBiquad, AvPowerSpectra
from lasp.filter import SPLFilterDesigner
import matplotlib.pyplot as plt
from scipy.signal import sosfreqz
# plt.close('all')
plt.close('all')
# def test_cppslm2():
# """
# Generate a sine wave, now A-weighted
# """
fs = 48000
omg = 2*np.pi*1000

View File

@ -1,17 +0,0 @@
[project] # Project metadata
name = "lasp"
readme = "README.md"
requires-python = ">=3.8"
license = { "file" = "LICENSE" }
authors = [{ "name" = "J.A. de Jong et al.", "email" = "info@ascee.nl" }]
classifiers = [
"Topic :: Scientific/Engineering",
"Programming Language :: Python :: 3.8",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
]
# urls = { "Documentation" = "https://" }
dynamic = ["version", "description"]

3
requirements.txt Normal file
View File

@ -0,0 +1,3 @@
appdirs
dataclasses_json
matplotlib

4
scripts/py_filter Executable file
View File

@ -0,0 +1,4 @@
#!/bin/bash
# Script used to filter Python files such that it can be used together with
# Doxygen
doxypypy -a -c $1

View File

@ -1,16 +1,22 @@
import glob
import glob, os
import platform
from setuptools import setup
if 'Linux' in platform.platform():
extension = list(glob.glob('src/lasp/lasp_cpp.cpython*'))
if len(extension) == 0:
ext_name_glob = 'lasp_cpp.cpython*'
extensions = list(glob.glob('src/lasp/' + ext_name_glob))
# Split of path from file.
ext_names = [os.path.split(a)[1] for a in extensions]
print(extensions)
if len(extensions) == 0:
raise RuntimeError('Please first run CMake to build extension')
elif len(extension) > 1:
elif len(extensions) > 1:
raise RuntimeError('Too many extension files found')
pkgdata = extension
pkgdata = ext_names
else:
raise RuntimeError('Not yet Windows-proof')
@ -24,9 +30,6 @@ classifiers = [
keywords = ["DSP", "DAQ", "Signal processing"]
with open('README.md', 'r') as f:
readme = f.read()
setup(
name="lasp",
@ -40,12 +43,11 @@ setup(
classifiers=classifiers,
keywords=keywords,
license="MIT",
readme=readme,
dependencies=["numpy", "scipy", "appdirs", "h5py", "appdirs",
"dataclasses_json"],
"dataclasses_json"],
package_dir={"": "src"},
packages=['lasp', 'lasp.filter', 'lasp.tools'],
data_files = pkgdata,
include_package_data=True,
package_dir={'': 'src'},
package_data={'lasp': pkgdata},
python_requires='>=3.8',
)

View File

@ -1,4 +1,3 @@
# Comments are what is imported, state of 6-8-2021
"""
LASP: Library for Acoustic Signal Processing
@ -14,6 +13,7 @@ from .lasp_octavefilter import *
from .lasp_slm import * # SLM, Dummy
from .lasp_record import * # RecordStatus, Recording
from .lasp_daqconfigs import *
from .lasp_measurementset import *
# from .lasp_siggen import * # SignalType, NoiseType, SiggenMessage, SiggenData, Siggen
# from .lasp_weighcal import * # WeighCal
# from .tools import * # SmoothingType, smoothSpectralData, SmoothingWidth

View File

@ -109,7 +109,7 @@ public:
* Overwrites any existing available data.
*
* @param channel The channel to copy to
* @param ptrs Pointers to data from channels
* @param ptr Pointers to data from channels
*/
void copyInFromRaw(const us channel, const byte_t *ptr);
@ -135,7 +135,7 @@ public:
* column vector of floats. For data that is not already floating-point,
* the data is scaled back from MAX_INT to +1.0.
*
* @param channel The channel to convert
* @param channel_no The channel number to convert
*
* @return Array of floats
*/
@ -146,8 +146,8 @@ public:
* column vector of floats. For data that is not already floating-point,
* the data is scaled back from MAX_INT to +1.0.
*
* @param channel The channel to convert
* @param frame_no The frame number to convert
* @param channel_no The channel number to convert
*
* @return Float value
*/
@ -157,9 +157,9 @@ public:
* @brief Convert to channel data of native type from floating point values.
* Useful for 'changing' raw data in any way.
*
* @param channel_no
* @param channel_no
* @param data
* @param frame_no The frame
* @param channel_no The channel
* @param data The value
*/
void fromFloat(const us frame_no, const us channel_no,
const d data);
@ -169,7 +169,7 @@ public:
* Useful for 'changing' raw data in any way.
*
* @param channel The channel to convert
* @param data
* @param data Data to convert from float values
*/
void fromFloat(const us channel, const arma::Col<d> &data);

View File

@ -12,8 +12,8 @@
#endif
std::vector<DeviceInfo> DeviceInfo::getDeviceInfo() {
std::vector<DeviceInfo> devs;
DeviceInfoList DeviceInfo::getDeviceInfo() {
DeviceInfoList devs;
#if LASP_HAS_ULDAQ ==1
fillUlDaqDeviceInfo(devs);
#endif

View File

@ -1,17 +1,34 @@
#pragma once
#include "lasp_config.h"
#include "lasp_types.h"
#include "lasp_daqconfig.h"
#include "lasp_types.h"
#include <memory>
/** \addtogroup device
* @{
*/
using DeviceInfoList = std::vector<std::unique_ptr<DeviceInfo>>;
/**
* @brief Structure containing device info parameters
*/
class DeviceInfo {
public:
/**
* @brief Virtual desctructor. Can be derived class.
*/
virtual ~DeviceInfo() {}
/**
* @brief Clone a device info.
*
* @return Cloned device info
*/
virtual std::unique_ptr<DeviceInfo> clone() const {
return std::make_unique<DeviceInfo>(*this);
}
/**
* @brief Backend API corresponding to device
*/
@ -21,12 +38,6 @@ public:
*/
string device_name = "";
/**
* @brief Specific for the device (Sub-API). Important for the RtAudio
* backend, as RtAudio is able to handle different API's.
*/
int api_specific_devindex = -1;
/**
* @brief The available data types the device can output
*/
@ -104,6 +115,14 @@ public:
*/
bool hasInternalOutputMonitor = false;
/**
* @brief This flag is used to be able to indicate that the device cannot run
* input and output streams independently, without opening the device in
* duplex mode. This is for example true for the UlDaq: only one handle to
* the device can be given at the same time.
*/
bool duplexModeForced = false;
/**
* @brief The physical quantity of the output signal. For 'normal' audio
* devices, this is typically a 'number' between +/- full scale. For some
@ -112,7 +131,6 @@ public:
*/
DaqChannel::Qty physicalOutputQty = DaqChannel::Qty::Number;
/**
* @brief String representation of DeviceInfo
*
@ -121,11 +139,9 @@ public:
operator string() const {
using std::endl;
std::stringstream str;
str << api.apiname + " " << api_specific_devindex << endl
<< " number of input channels: " << ninchannels << endl
/**
* @brief
*/
str << api.apiname + " " << endl
<< " number of input channels: " << ninchannels
<< endl
<< " number of output channels: " << noutchannels << endl;
return str.str();
}
@ -135,7 +151,7 @@ public:
*
* @return The device info's for each found device.
*/
static std::vector<DeviceInfo> getDeviceInfo();
static DeviceInfoList getDeviceInfo();
};
/** @} */

View File

@ -17,7 +17,19 @@ using rte = std::runtime_error;
using std::vector;
using lck = std::scoped_lock<std::mutex>;
void fillRtAudioDeviceInfo(vector<DeviceInfo> &devinfolist) {
class RtAudioDeviceInfo : public DeviceInfo {
public:
/**
* @brief Specific for the device (Sub-API). Important for the RtAudio
* backend, as RtAudio is able to handle different API's.
*/
int _api_devindex;
virtual std::unique_ptr<DeviceInfo> clone() const override {
return std::make_unique<DeviceInfo>(*this);
}
};
void fillRtAudioDeviceInfo(DeviceInfoList &devinfolist) {
DEBUGTRACE_ENTER;
vector<RtAudio::Api> apis;
@ -34,7 +46,7 @@ void fillRtAudioDeviceInfo(vector<DeviceInfo> &devinfolist) {
continue;
}
// "Our device info struct"
DeviceInfo d;
RtAudioDeviceInfo d;
switch (api) {
case RtAudio::LINUX_ALSA:
d.api = rtaudioAlsaApi;
@ -58,7 +70,7 @@ void fillRtAudioDeviceInfo(vector<DeviceInfo> &devinfolist) {
}
d.device_name = devinfo.name;
d.api_specific_devindex = devno;
d._api_devindex = devno;
/// When 48k is available we overwrite the default sample rate with the 48
/// kHz value, which is our preffered rate,
@ -118,7 +130,7 @@ void fillRtAudioDeviceInfo(vector<DeviceInfo> &devinfolist) {
d.availableFramesPerBlock = {512, 1024, 2048, 4096, 8192};
d.prefFramesPerBlockIndex = 2;
devinfolist.push_back(d);
devinfolist.push_back(std::make_unique<RtAudioDeviceInfo>(d));
}
}
}
@ -143,9 +155,9 @@ class RtAudioDaq : public Daq {
std::atomic<StreamStatus> _streamStatus{};
public:
RtAudioDaq(const DeviceInfo &devinfo, const DaqConfiguration &config)
: Daq(devinfo, config),
rtaudio(static_cast<RtAudio::Api>(devinfo.api.api_specific_subcode)),
RtAudioDaq(const DeviceInfo &devinfo_gen, const DaqConfiguration &config)
: Daq(devinfo_gen, config), rtaudio(static_cast<RtAudio::Api>(
devinfo_gen.api.api_specific_subcode)),
nFramesPerBlock(Daq::framesPerBlock()) {
DEBUGTRACE_ENTER;
@ -156,6 +168,8 @@ public:
throw rte("RtAudio backend cannot run in duplex mode.");
}
assert(!monitorOutput);
const RtAudioDeviceInfo &devinfo =
static_cast<const RtAudioDeviceInfo &>(devinfo_gen);
std::unique_ptr<RtAudio::StreamParameters> inParams, outParams;
@ -169,7 +183,7 @@ public:
throw rte("Invalid input number of channels");
}
inParams->firstChannel = 0;
inParams->deviceId = devinfo.api_specific_devindex;
inParams->deviceId = devinfo._api_devindex;
} else {
@ -180,7 +194,7 @@ public:
throw rte("Invalid output number of channels");
}
outParams->firstChannel = 0;
outParams->deviceId = devinfo.api_specific_devindex;
outParams->deviceId = devinfo._api_devindex;
}
RtAudio::StreamOptions streamoptions;

View File

@ -10,5 +10,5 @@ std::unique_ptr<Daq> createRtAudioDevice(const DeviceInfo& devinfo,
*
* @param devinfolist List to append to
*/
void fillRtAudioDeviceInfo(std::vector<DeviceInfo> &devinfolist);
void fillRtAudioDeviceInfo(DeviceInfoList &devinfolist);

View File

@ -261,22 +261,23 @@ void StreamMgr::startStream(const DaqConfiguration &config) {
[](auto &i) { return i.enabled; });
// Find the first device that matches with the configuration
std::scoped_lock lck(_devices_mtx);
DeviceInfo devinfo;
{
std::scoped_lock lck(_devices_mtx);
DeviceInfo *devinfo = nullptr;
bool found = false;
auto devinfo2 = std::find_if(
_devices.cbegin(), _devices.cend(),
[&config](const DeviceInfo &d) { return config.match(d); });
if (devinfo2 == std::cend(_devices)) {
throw rte("Could not find a device with name " + config.device_name +
" in list of devices.");
for (auto &devinfoi : _devices) {
if (config.match(*devinfoi)) {
devinfo = devinfoi.get();
break;
}
devinfo = *devinfo2;
}
if (!devinfo) {
throw rte("Could not find a device with name " + config.device_name +
" in list of devices.");
}
isInput |= (config.monitorOutput && devinfo.hasInternalOutputMonitor);
isInput |= (config.monitorOutput && devinfo->hasInternalOutputMonitor);
DEBUGTRACE_PRINT(isInput);
bool isDuplex = isInput && isOutput;
@ -300,11 +301,23 @@ void StreamMgr::startStream(const DaqConfiguration &config) {
}
}
if (_outputStream && isInput && _outputStream->duplexModeForced &&
config.match(*_outputStream)) {
throw rte("This device is already opened for output. If input is also "
"required, please enable duplex mode for this device");
}
if (_inputStream && isOutput && _inputStream->duplexModeForced &&
config.match(*_inputStream)) {
throw rte("This device is already opened for input. If output is also "
"required, please enable duplex mode for this device");
}
InDaqCallback inCallback;
OutDaqCallback outCallback;
using namespace std::placeholders;
std::unique_ptr<Daq> daq = Daq::createDaq(devinfo, config);
std::unique_ptr<Daq> daq = Daq::createDaq(*devinfo, config);
assert(daq);
@ -321,7 +334,7 @@ void StreamMgr::startStream(const DaqConfiguration &config) {
/// Create input filters
_inputFilters.clear();
/// No input filter for monitor channel.
if (config.monitorOutput && devinfo.hasInternalOutputMonitor) {
if (config.monitorOutput && devinfo->hasInternalOutputMonitor) {
_inputFilters.push_back(nullptr);
}
for (auto &ch : daq->inchannel_config) {

View File

@ -107,7 +107,7 @@ class StreamMgr {
std::mutex _siggen_mtx;
std::mutex _devices_mtx;
std::vector<DeviceInfo> _devices;
DeviceInfoList _devices;
StreamMgr();
@ -145,9 +145,13 @@ class StreamMgr {
*
* @return A copy of the internal stored list of devices
*/
std::vector<DeviceInfo> getDeviceInfo() const {
DeviceInfoList getDeviceInfo() const {
std::scoped_lock lck(const_cast<std::mutex &>(_devices_mtx));
return _devices;
DeviceInfoList d2;
for(const auto& dev: _devices) {
d2.push_back(dev->clone());
}
return d2;
}
/**

View File

@ -6,13 +6,12 @@
#include "lasp_uldaq_impl.h"
#include <uldaq.h>
void fillUlDaqDeviceInfo(std::vector<DeviceInfo> &devinfolist,
void *vDescriptors) {
void fillUlDaqDeviceInfo(DeviceInfoList &devinfolist) {
DEBUGTRACE_ENTER;
DaqDeviceDescriptor *descriptors_copy =
static_cast<DaqDeviceDescriptor *>(vDescriptors);
UlError err;
unsigned int numdevs = MAX_ULDAQ_DEV_COUNT_PER_API;
@ -32,16 +31,14 @@ void fillUlDaqDeviceInfo(std::vector<DeviceInfo> &devinfolist,
descriptor = devdescriptors[i];
// Copy structure over, if given as not nullptr
if (descriptors_copy) {
descriptors_copy[i] = descriptor;
}
UlDaqDeviceInfo devinfo;
devinfo._uldaqDescriptor = descriptor;
DeviceInfo devinfo;
devinfo.api = uldaqapi;
string name, interface;
if (string(descriptor.productName) != "DT9837A") {
throw rte("Unknown UlDAQ type");
string productname = descriptor.productName;
if (productname != "DT9837A") {
throw rte("Unknown UlDAQ type: " + productname);
}
switch (descriptor.devInterface) {
@ -66,7 +63,6 @@ void fillUlDaqDeviceInfo(std::vector<DeviceInfo> &devinfolist,
devinfo.physicalOutputQty = DaqChannel::Qty::Voltage;
devinfo.api_specific_devindex = i;
devinfo.availableDataTypes.push_back(
DataTypeDescriptor::DataType::dtype_fl64);
devinfo.prefDataTypeIndex = 0;
@ -91,8 +87,10 @@ void fillUlDaqDeviceInfo(std::vector<DeviceInfo> &devinfolist,
devinfo.hasInternalOutputMonitor = true;
devinfo.duplexModeForced = true;
// Finally, this devinfo is pushed back in list
devinfolist.push_back(devinfo);
devinfolist.push_back(std::make_unique<UlDaqDeviceInfo>(devinfo));
}
}

View File

@ -13,10 +13,6 @@ std::unique_ptr<Daq> createUlDaqDevice(const DeviceInfo &devinfo,
/**
* @brief Fill device info list with UlDaq specific devices, if any.
*
* @param devinfolist Info list to append to
* @param descriptors Pointer to array
* DaqDeviceDescriptors[MAX_ULDAQ_DEV_COUNT_PER_API]. If a pointer is given, a
* copy of the device descriptors is set to the memory of this pointer. We use
* a void* pointer here to not expose the implementation of UlDaq.
* @param devinfolist Info list to append to.
*/
void fillUlDaqDeviceInfo(std::vector<DeviceInfo>&, void *descriptors = nullptr);
void fillUlDaqDeviceInfo(DeviceInfoList& devinfolist);

View File

@ -35,7 +35,10 @@ inline void showErr(string errstr) {
std::cerr << errstr << std::endl;
std::cerr << "***********************************************\n\n";
}
inline void showErr(UlError err) { showErr(getErrMsg(err)); }
inline void showErr(UlError err) {
if (err != ERR_NO_ERROR)
showErr(getErrMsg(err));
}
DT9837A::~DT9837A() {
UlError err;
@ -71,21 +74,14 @@ DT9837A::DT9837A(const DeviceInfo &devinfo, const DaqConfiguration &config)
throw rte("Invalid sample rate");
}
std::vector<DeviceInfo> devs;
DaqDeviceDescriptor devdescriptors[MAX_ULDAQ_DEV_COUNT_PER_API];
fillUlDaqDeviceInfo(devs, static_cast<void *>(devdescriptors));
if (devs.size() == 0) {
throw rte("Unable to find any UlDaq devices");
}
if (devinfo.api_specific_devindex < 0 ||
devinfo.api_specific_devindex >= (int)MAX_ULDAQ_DEV_COUNT_PER_API) {
throw rte("Invalid device index");
const UlDaqDeviceInfo *_info =
dynamic_cast<const UlDaqDeviceInfo *>(&devinfo);
if (_info == nullptr) {
throw rte("BUG: Could not cast DeviceInfo to UlDaqDeviceInfo");
}
// get a handle to the DAQ device associated with the first descriptor
_handle = ulCreateDaqDevice(devdescriptors[devinfo.api_specific_devindex]);
_handle = ulCreateDaqDevice(_info->_uldaqDescriptor);
if (_handle == 0) {
throw rte("Unable to create a handle to the specified DAQ "

View File

@ -14,6 +14,19 @@ using std::cerr;
using std::endl;
using rte = std::runtime_error;
/**
* @brief UlDaq-specific device information. Adds a copy of the underlying
* DaqDeDaqDeviceDescriptor.
*/
class UlDaqDeviceInfo : public DeviceInfo {
public:
DaqDeviceDescriptor _uldaqDescriptor;
virtual std::unique_ptr<DeviceInfo> clone() const {
return std::make_unique<UlDaqDeviceInfo>(*this);
}
};
class DT9837A : public Daq {
DaqDeviceHandle _handle = 0;
@ -123,7 +136,6 @@ public:
*/
bool operator()();
~InBufHandler();
};
class OutBufHandler : public BufHandler {
OutDaqCallback cb;

View File

@ -15,6 +15,7 @@ set(lasp_dsp_files
lasp_slm.cpp
lasp_threadedindatahandler.cpp
lasp_ppm.cpp
lasp_clip.cpp
)

View File

@ -0,0 +1,94 @@
/* #define DEBUGTRACE_ENABLED */
#include "debugtrace.hpp"
#include "lasp_clip.h"
#include <mutex>
using std::cerr;
using std::endl;
using Lck = std::scoped_lock<std::mutex>;
using rte = std::runtime_error;
ClipHandler::ClipHandler(StreamMgr &mgr)
: ThreadedInDataHandler(mgr){
DEBUGTRACE_ENTER;
}
bool ClipHandler::inCallback_threaded(const DaqData &d) {
DEBUGTRACE_ENTER;
Lck lck(_mtx);
dmat data = d.toFloat();
const us nchannels = d.nchannels;
assert(data.n_cols == nchannels);
if (nchannels != _clip_time.size()) {
DEBUGTRACE_PRINT("Resizing clip indication");
_clip_time = vd(nchannels, arma::fill::value(-1));
}
/// Obtain max abs values for each column, convert this row-vec to a column
/// vector, and scale with the max-range for each channel
vd maxabs = arma::max(arma::abs(data), 0).as_col() / _max_range;
/// Find indices for channels that have a clip
arma::uvec clips = maxabs > clip_point;
for (us i = 0; i < nchannels; i++) {
if (clips(i)) {
/// Reset clip counter
_clip_time(i) = 0;
} else if (_clip_time(i) > clip_indication_time) {
/// Reset to 'unclipped'
_clip_time(i) = -1;
} else if (_clip_time(i) >= 0) {
/// Add a bit of clip time
_clip_time(i) += _dt;
}
}
return true;
}
arma::uvec ClipHandler::getCurrentValue() const {
DEBUGTRACE_ENTER;
Lck lck(_mtx);
arma::uvec clips(_clip_time.size(), arma::fill::zeros);
clips.elem(arma::find(_clip_time >= 0)).fill(1);
return {clips};
}
void ClipHandler::reset(const Daq *daq) {
DEBUGTRACE_ENTER;
Lck lck(_mtx);
if (daq) {
const us nchannels = daq->neninchannels();
_max_range.resize(nchannels);
dvec ranges = daq->inputRangeForEnabledChannels();
assert(ranges.size() == nchannels);
for(us i=0;i<daq->neninchannels();i++) {
_max_range[i] = ranges[i];
}
_clip_time.fill(-1);
const d fs = daq->samplerate();
/* DEBUGTRACE_PRINT(fs); */
_dt = daq->framesPerBlock() / fs;
}
}
ClipHandler::~ClipHandler() {
DEBUGTRACE_ENTER;
Lck lck(_mtx);
stop();
}

77
src/lasp/dsp/lasp_clip.h Normal file
View File

@ -0,0 +1,77 @@
// lasp_clip.h
//
// Author: J.A. de Jong, C.R.D. Jansen - ASCEE
//
// Description: Clip handler
#pragma once
#include <memory>
#include "lasp_filter.h"
#include "lasp_mathtypes.h"
#include "lasp_threadedindatahandler.h"
/**
* \addtogroup dsp
* @{
*
* \addtogroup rt
* @{
*/
/**
* @brief Clipping detector (Clip). Detects when a signal overdrives the input
* */
class ClipHandler: public ThreadedInDataHandler {
/**
* @brief Assuming full scale of a signal is +/- 1.0. If a value is found
*/
static inline const d clip_point = 0.98;
/**
* @brief How long it takes in [s] after a clip event has happened, that we
* are actually still in 'clip' mode.
*/
static inline const d clip_indication_time = 2.0;
/**
* @brief Inverse of block sampling frequency [s]: (framesPerBlock/fs)
*/
d _dt;
mutable std::mutex _mtx;
/**
* @brief How long ago the last clip has happened. Negative in case no clip
* has happened.
*/
vd _clip_time;
/**
* @brief Storage for maximum values
*/
vd _max_range;
public:
/**
* @brief Constructs Clipping indicator
*
* @param mgr Stream Mgr to operate on
*/
ClipHandler(StreamMgr& mgr);
~ClipHandler();
/**
* @brief Get the current values of the clip handler. Returns a vector of of True's.
*
* @return clipping indication
*/
arma::uvec getCurrentValue() const;
bool inCallback_threaded(const DaqData& ) override final;
void reset(const Daq*) override final;
};
/** @} */
/** @} */

View File

@ -135,7 +135,7 @@ cmat Fft::fft(const dmat &freqdata) {
/// * WARNING *. This was source of a serious bug. It is not possible to run
/// FFT's and IFFT's on the same _impl, as it overwrites the same memory.
/// Uncommenting the line below results in faulty results.
/// #pragma omp parallel for
// #pragma omp parallel for
for (us colno = 0; colno < freqdata.n_cols; colno++) {
res.col(colno) = _impl->fft(freqdata.col(colno));
}
@ -152,7 +152,7 @@ dmat Fft::ifft(const cmat &freqdata) {
/// * WARNING *. This was source of a serious bug. It is not possible to run
/// FFT's and IFFT's on the same _impl, as it overwrites the same memory.
/// Uncommenting the line below results in faulty results.
/// #pragma omp parallel for
// #pragma omp parallel for
for (us colno = 0; colno < freqdata.n_cols; colno++) {
res.col(colno) = _impl->ifft(freqdata.col(colno));

View File

@ -84,7 +84,14 @@ class PPMHandler: public ThreadedInDataHandler {
*/
std::tuple<vd,arma::uvec> getCurrentValue() const;
bool inCallback_threaded(const DaqData& ) override final;
/**
* @brief Implements callback on thread
*
* @param d DaqData to work with
*
* @return true when stream should continue.
*/
bool inCallback_threaded(const DaqData& d) override final;
void reset(const Daq*) override final;
};

View File

@ -46,7 +46,8 @@ public:
* @param mgr StreamMgr singleton reference
* @param freqWeightingFilter Optionally: the frequency weighting filter.
* Nullptr should be given for Z-weighting.
* @param For all other arguments, see constructor of AvPowerSpectra
*
* For all other arguments, see constructor of AvPowerSpectra
*/
RtAps(StreamMgr &mgr, const Filter *freqWeightingFilter, const us nfft = 2048,
const Window::WindowType w = Window::WindowType::Hann,
@ -61,7 +62,14 @@ public:
*/
ccube getCurrentValue() const;
bool inCallback_threaded(const DaqData &) override final;
/**
* @brief Implements the work to to when new DaqData arrives
*
* @param d DaqData to use for computing/updating spectra
*
* @return true if stream should continue.
*/
bool inCallback_threaded(const DaqData & d) override final;
void reset(const Daq *) override final;
};

View File

@ -13,28 +13,29 @@ using rte = std::runtime_error;
using std::unique_ptr;
SLM::SLM(const d fs, const d Lref, const us downsampling_fac, const d tau,
std::unique_ptr<Filter> pre_filter,
std::vector<std::unique_ptr<Filter>> bandpass)
: _pre_filter(std::move(pre_filter)), _bandpass(std::move(bandpass)),
_alpha(exp(-1 / (fs * tau))),
_sp_storage(_bandpass.size(), arma::fill::zeros), // Storage for
// components of
// single pole low pass
// filter
Lrefsq(Lref*Lref), // Reference level
downsampling_fac(downsampling_fac),
std::unique_ptr<Filter> pre_filter,
std::vector<std::unique_ptr<Filter>> bandpass)
: _pre_filter(std::move(pre_filter)), _bandpass(std::move(bandpass)),
_alpha(exp(-1 / (fs * tau))),
_sp_storage(_bandpass.size(), arma::fill::zeros), // Storage for
// components of
// single pole low pass
// filter
Lrefsq(Lref * Lref), // Reference level
downsampling_fac(downsampling_fac),
// Initalize mean square
Pm(_bandpass.size(), arma::fill::zeros),
// Initalize mean square
Pm(_bandpass.size(), arma::fill::zeros),
// Initalize max
Pmax(_bandpass.size(), arma::fill::zeros),
// Initalize max
Pmax(_bandpass.size(), arma::fill::zeros),
// Initalize peak
Ppeak(_bandpass.size(), arma::fill::zeros)
// Initalize peak
Ppeak(_bandpass.size(), arma::fill::zeros)
{
DEBUGTRACE_ENTER;
DEBUGTRACE_PRINT(_alpha);
// Make sure thread pool is running
getPool();
@ -42,7 +43,7 @@ SLM::SLM(const d fs, const d Lref, const us downsampling_fac, const d tau,
if (Lref <= 0) {
throw rte("Invalid reference level");
}
if (tau <= 0) {
if (tau < 0) {
throw rte("Invalid time constant for Single pole lowpass filter");
}
if (fs <= 0) {
@ -66,43 +67,43 @@ std::vector<unique_ptr<Filter>> createBandPass(const dmat &coefs) {
}
return bf;
}
us SLM::suggestedDownSamplingFac(const d fs,const d tau) {
if(tau<0) throw rte("Invalid time weighting time constant");
if(fs<=0) throw rte("Invalid sampling frequency");
us SLM::suggestedDownSamplingFac(const d fs, const d tau) {
if (fs <= 0)
throw rte("Invalid sampling frequency");
// A reasonable 'framerate' for the sound level meter, based on the
// filtering time constant.
if (tau > 0) {
d fs_slm = 10 / tau;
if(fs_slm < 30) {
if (fs_slm < 30) {
fs_slm = 30;
}
return std::max((us) 1, static_cast<us>(fs / fs_slm));
return std::max((us)1, static_cast<us>(fs / fs_slm));
} else {
return 1;
}
}
SLM SLM::fromBiquads(const d fs, const d Lref, const us downsampling_fac,
const d tau, const vd &pre_filter_coefs,
const dmat &bandpass_coefs) {
const d tau, const vd &pre_filter_coefs,
const dmat &bandpass_coefs) {
DEBUGTRACE_ENTER;
return SLM(fs, Lref, downsampling_fac, tau,
std::make_unique<SeriesBiquad>(pre_filter_coefs),
createBandPass(bandpass_coefs));
std::make_unique<SeriesBiquad>(pre_filter_coefs),
createBandPass(bandpass_coefs));
}
SLM SLM::fromBiquads(const d fs, const d Lref, const us downsampling_fac,
const d tau, const dmat &bandpass_coefs) {
const d tau, const dmat &bandpass_coefs) {
DEBUGTRACE_ENTER;
return SLM(fs, Lref, downsampling_fac, tau,
nullptr, // Pre-filter
createBandPass(bandpass_coefs) // Bandpass coefficients
);
nullptr, // Pre-filter
createBandPass(bandpass_coefs) // Bandpass coefficients
);
}
vd SLM::run_single(vd work,const us i) {
vd SLM::run_single(vd work, const us i) {
// Filter input in-place
_bandpass[i]->filter(work);
@ -126,7 +127,7 @@ vd SLM::run_single(vd work,const us i) {
for (us j = 0; j < work.n_rows; j++) {
// Update mean square of signal, work is here still signal power
Pm(i) = (Pm(i) * static_cast<d>(N_local) + work(j)) /
(static_cast<d>(N_local) + 1);
(static_cast<d>(N_local) + 1);
N_local++;
@ -142,7 +143,7 @@ vd SLM::run_single(vd work,const us i) {
Pmax(i) = std::max(Pmax(i), arma::max(work));
// Convert to levels in dB
work = 10*arma::log10((work+arma::datum::eps)/Lrefsq);
work = 10 * arma::log10((work + arma::datum::eps) / Lrefsq);
return work;
}
@ -170,8 +171,8 @@ dmat SLM::run(const vd &input_orig) {
/* DEBUGTRACE_PRINT(res.n_rows); */
/* DEBUGTRACE_PRINT(futs.size()); */
// Update the total number of samples harvested so far. NOTE: *This should be
// done AFTER the threads are done!!!*
// Update the total number of samples harvested so far. NOTE: *This should
// be done AFTER the threads are done!!!*
}
N += input.n_rows;

View File

@ -51,11 +51,11 @@ class ThreadedInDataHandler: public InDataHandler {
* @brief This function should be overridden with an actual implementation,
* of what should happen on a different thread.
*
* @param DaqData Input daq data
* @param d Input daq data
*
* @return true on succes. False when an error occured.
*/
virtual bool inCallback_threaded(const DaqData&) = 0;
virtual bool inCallback_threaded(const DaqData& d) = 0;
};

View File

@ -85,8 +85,8 @@ class FilterBankDesigner:
if self.nominal_txt(x) == nom_txt:
return x
raise ValueError(
f'Could not find a nominal frequency corresponding to {nom_txt}. Hint: use \'5k\' instead of \'5000\'.'
)
f'Could not find a nominal frequency corresponding to {nom_txt}. '
'Hint: use \'5k\' instead of \'5000\'.')
def sanitize_input(self, input_):
if isinstance(input_, int):
@ -172,7 +172,6 @@ class FilterBankDesigner:
Args:
x: Band designator
"""
SOS_ORDER = 5
fs = self.fs
@ -186,6 +185,18 @@ class FilterBankDesigner:
x = self.sanitize_input(x)
fu_n = fu / fnyq
# Handle fl & fu >= fNyquist: return 'no pass'
if fl_n >= 1:
return np.tile(np.asarray([0, 0, 0, 1, 0, 0]), (SOS_ORDER, 1))
# Handle fu >= fNyquist: return high pass
if fu_n >= 1:
highpass = butter(SOS_ORDER, fl_n, output='sos', btype='highpass')
allpass = np.tile(np.asarray([1, 0, 0, 1, 0, 0]),
(SOS_ORDER-highpass.shape[0], 1))
return np.vstack((highpass, allpass)) # same shape=(SOS_ORDER, 6)
# Regular bands
return butter(SOS_ORDER, [fl_n, fu_n], output='sos', btype='band')
def firFreqResponse(self, x, freq):
@ -254,8 +265,8 @@ class FilterBankDesigner:
for i, x in enumerate(range(xl, xu + 1)):
fl = self.fl(x)
fu = self.fu(x)
# Find the indices in the frequency array which correspond to the
# frequency band x
# Find the indices in the frequency array which correspond to
# the frequency band x
if x != xu:
indices_cur = np.where((freq >= fl) & (freq < fu))
else:
@ -330,20 +341,20 @@ class OctaveBankDesigner(FilterBankDesigner):
@property
def xs(self):
"""All possible band designators for an octave band filter."""
return list(range(-6, 5))
return list(range(-7, 5))
def band_limits(self, x, filter_class=0):
"""Returns the octave band filter limits for filter designator x.
Args:
x: Filter offset power from the reference frequency of 1000 Hz.
filter_class: Either 0 or 1, defines the tolerances on the frequency
response
filter_class: Either 0 or 1, defines the tolerances on the
frequency response
Returns:
freq, llim, ulim: Tuple of Numpy arrays containing the frequencies of
the corner points of the filter frequency response limits, lower limits
in *deciBell*, upper limits in *deciBell*, respectively.
freq, llim, ulim: Tuple of Numpy arrays containing the frequencies
of the corner points of the filter frequency response limits, lower
limits in *deciBell*, upper limits in *deciBell*, respectively.
"""
b = 1
@ -393,7 +404,8 @@ class OctaveBankDesigner(FilterBankDesigner):
-3: '125',
-4: '63',
-5: '31.5',
-6: '16'
-6: '16',
-7: '8'
}
assert len(nominals) == len(self.xs)
return nominals[x]
@ -450,8 +462,7 @@ class OctaveBankDesigner(FilterBankDesigner):
if int(self.fs) in [44100, 48000, 96000]:
return 1.0
else:
raise ValueError('Unimplemented sampling frequency for SOS'
'filter design')
return 1.0
def sosFac_u(self, x):
"""Right side percentage of change in cut-on frequency for designing
@ -463,8 +474,7 @@ class OctaveBankDesigner(FilterBankDesigner):
if int(self.fs) in [44100, 48000, 96000]:
return 1.0
else:
raise ValueError('Unimplemented sampling frequency for SOS'
'filter design')
return 1.0
class ThirdOctaveBankDesigner(FilterBankDesigner):
@ -475,18 +485,16 @@ class ThirdOctaveBankDesigner(FilterBankDesigner):
one-third octave bands
Args:
fs: Sampling frequency in [Hz]
fs: Sampling frequency in [Hz] - can be None
"""
super().__init__(fs)
self.xs = list(range(-16, 14))
self.xs = list(range(-19, 14))
# Text corresponding to the nominal frequency
self._nominal_txt = [
self._nominal_txt = ['12.5', '16', '20',
'25', '31.5', '40', '50', '63', '80', '100', '125', '160', '200',
'250', '315', '400', '500', '630', '800', '1k', '1.25k', '1.6k',
'2k', '2.5k', '3.15k', '4k', '5k', '6.3k', '8k', '10k', '12.5k',
'16k', '20k'
]
'16k', '20k']
assert len(self.xs) == len(self._nominal_txt)
@ -511,13 +519,13 @@ class ThirdOctaveBankDesigner(FilterBankDesigner):
Args:
x: Filter offset power from the reference frequency of 1000 Hz.
filter_class: Either 0 or 1, defines the tolerances on the frequency
response
filter_class: Either 0 or 1, defines the tolerances on the
frequency response
Returns:
freq, llim, ulim: Tuple of Numpy arrays containing the frequencies of
the corner points of the filter frequency response limits, lower limits
in *deciBell*, upper limits in *deciBell*, respectively.
freq, llim, ulim: Tuple of Numpy arrays containing the frequencies
of the corner points of the filter frequency response limits, lower
limits in *deciBell*, upper limits in *deciBell*, respectively.
"""
fm = self.G**(x / self.b) * self.fr
@ -603,25 +611,15 @@ class ThirdOctaveBankDesigner(FilterBankDesigner):
"""Left side percentage of change in cut-on frequency for designing the
filter."""
# Idea: correct for frequency warping:
if np.isclose(self.fs, 48000):
return 1.00
elif np.isclose(self.fs, 41000):
warnings.warn(
f'Frequency {self.fs} might not result in correct filters')
return 1.00
elif np.isclose(self.fs, 32768):
return 1.00
if int(self.fs) in [48000]:
return 1.0
else:
raise ValueError('Unimplemented sampling frequency for SOS'
'filter design')
return 1.0
def sosFac_u(self, x):
"""Right side percentage of change in cut-on frequency for designing
the filter."""
if np.isclose(self.fs, 48000):
return 1
elif np.isclose(self.fs, 32768):
return 1.00
if int(self.fs) in [48000]:
return 1.0
else:
raise ValueError('Unimplemented sampling frequency for SOS'
'filter design')
return 1.0

View File

@ -286,46 +286,57 @@ class this_lasp_shelve(Shelve):
return os.path.join(lasp_appdir, f'{node}_config.shelve')
class TimeWeighting:
none = (-1, 'Raw (no time weighting)')
# This is not actually a time weighting
none = (0, 'Raw (no time weighting)')
uufast = (1e-4, '0.1 ms')
ufast = (35e-3, 'Impulse (35 ms)')
fast = (0.125, 'Fast (0.125 s)')
slow = (1.0, 'Slow (1.0 s)')
tens = (10., '10 s')
infinite = (0, 'Infinite')
types_realtime = (ufast, fast, slow, tens, infinite)
types_all = (none, uufast, ufast, fast, slow, tens, infinite)
# All-averaging: compute the current average of all history. Kind of the
# equivalent level. Only useful for power spectra. For Sound Level Meter,
# equivalent levels does that thing.
averaging = (-1, 'All-averaging')
types_all = (none, uufast, ufast, fast, slow, tens, averaging)
# Used for combobox of realt time power spectra
types_realtimeaps = (ufast, fast, slow, tens, averaging)
# Used for combobox of realt time sound level meter
types_realtimeslm = (ufast, fast, slow, tens)
# Used for combobox of sound level meter - time dependent
types_slmt = (none, uufast, ufast, fast, slow, tens)
# Used for combobox of sound level meter - Lmax statistics
types_slmstats = (uufast, ufast, fast, slow, tens)
default = fast
default_index = 3
default_index_realtime = 1
@staticmethod
def fillComboBox(cb, realtime=False):
def fillComboBox(cb, types):
"""
Fill TimeWeightings to a combobox
Args:
cb: QComboBox to fill
types: The types to fill it with
"""
cb.clear()
if realtime:
types = TimeWeighting.types_realtime
defindex = TimeWeighting.default_index_realtime
else:
types = TimeWeighting.types_all
defindex = TimeWeighting.default_index
logging.debug(f'{types}')
for tw in types:
cb.addItem(tw[1], tw)
cb.setCurrentIndex(defindex)
if TimeWeighting.default == tw:
cb.setCurrentIndex(cb.count()-1)
@staticmethod
def getCurrent(cb):
if cb.count() == len(TimeWeighting.types_realtime):
return TimeWeighting.types_realtime[cb.currentIndex()]
else:
return TimeWeighting.types_all[cb.currentIndex()]
for type in TimeWeighting.types_all:
if cb.currentText() == type[1]:
return type
class FreqWeighting:

View File

@ -8,14 +8,23 @@
* Welcome to the LASP (Library for Acoustic Signal Processing) code
* documentation. The code comprises a part which is written in C++, a part
* that is written in Python, and a part that functions as glue, which is
* Pybind11 C++ glue code. An example of such a file is the current one.
* Pybind11 C++ glue code. An example of such a file is lasp_cpp.cpp.
* This is the internal documentation of LASP. It serves as background
* information for programmers.
*
* \section Installation
*
* For the installation manual, please refer to the README.md of the Git
* repository. It is recommended to install the software from source.
* For the installation manual, please refer to the <a
* href="https://code.ascee.nl/ASCEE/lasp/src/branch/master/README.md">README</a>
* of the Git repository.
*
*
* \section Usage
*
* Some usage examples are given in the <a href=
* "https://code.ascee.nl/ASCEE/lasp/src/branch/master/examples">examples</a>
* directory of the repository.
*
* */
/**
@ -48,7 +57,6 @@ PYBIND11_MODULE(lasp_cpp, m) {
m.attr("__version__") = std::to_string(LASP_VERSION_MAJOR) + "." +
std::to_string(LASP_VERSION_MINOR);
m.attr("LASP_VERSION_MAJOR") = LASP_VERSION_MAJOR;
m.attr("LASP_VERSION_MINOR") = LASP_VERSION_MINOR;
}

View File

@ -196,7 +196,7 @@ class Measurement:
# Open the h5 file in read-plus mode, to allow for changing the
# measurement comment.
with h5.File(fn, 'r+') as f:
with h5.File(fn, 'r') as f:
# Check for video data
try:
f['video']
@ -234,10 +234,9 @@ class Measurement:
self._channelNames = [f'Unnamed {i}' for i in range(self.nchannels)]
# comment = read-write thing
try:
if 'comment' in f.keys():
self._comment = f.attrs['comment']
except KeyError:
f.attrs['comment'] = ''
else:
self._comment = ''
# Sensitivity

View File

@ -0,0 +1,66 @@
"""
Provides class MeasurementSet, a class used to perform checks and adjustments
on a group of measurements at the same time.
"""
__all__ = ['MeasurementSet']
from .lasp_measurement import Measurement
from typing import List
class MeasurementSet(list):
"""
Group of measurements that have some correspondence to one another. Class
is used to operate on multiple measurements at once.
"""
def __init__(self, mlist: List[Measurement] =[]):
"""
Initialize a measurement set
Args:
mlist: Measurement list
"""
if any([not isinstance(i, Measurement) for i in mlist]):
raise TypeError('Object in list should be of Measurement type')
super().__init__(mlist)
def measTimeSame(self):
"""
Returns True if all measurements have the same measurement
time (recorded time)
"""
if len(self) > 0:
first = self[0].N
return all([first == meas.N for meas in self])
else:
return False
def measSimilar(self):
"""
Similar means: channel metadata is the same, and the measurement time
is the same. It means that the recorded data is, of course, different.
Returns:
True if measChannelsSame() and measTimeSame() else False
"""
return self.measTimeSame() and self.measChannelsSame()
def measChannelsSame(self):
"""
This method is used to check whether a set of measurements can be
accessed in a loop, i.e. for computing power spectra or sound levels on
a set of measurements, simultaneously. If the channel data is the same
(name, sensitivity, ...) it returns True.
"""
if len(self) > 0:
first = self[0].channelConfig
return all([first == meas.channelConfig for meas in self])
else:
return False

View File

@ -32,10 +32,10 @@ class SLM:
def __init__(self,
fs,
fbdesigner=None,
tw: TimeWeighting =TimeWeighting.fast,
fw: FreqWeighting =FreqWeighting.A,
xmin = None,
xmax = None,
tw: TimeWeighting = TimeWeighting.fast,
fw: FreqWeighting = FreqWeighting.A,
xmin=None,
xmax=None,
include_overall=True,
level_ref_value=P_REF,
offset_t=0):
@ -72,7 +72,8 @@ class SLM:
self.xs = list(range(xmin, xmax + 1))
nfilters = len(self.xs)
if include_overall: nfilters +=1
if include_overall:
nfilters += 1
self.include_overall = include_overall
self.offset_t = offset_t
@ -91,7 +92,7 @@ class SLM:
# This is a bit of a hack, as the 5 is hard-encoded here, but should in
# fact be coming from somewhere else..
sos_overall = np.array([1,0,0,1,0,0]*5, dtype=float)
sos_overall = np.array([1, 0, 0, 1, 0, 0]*5, dtype=float)
if fbdesigner is not None:
assert fbdesigner.fs == fs
@ -108,14 +109,14 @@ class SLM:
# Create a unit impulse response filter, every third index equals
# 1, so b0 = 1 and a0 is 1 (by definition)
# a0 = 1, b0 = 1, rest is zero
sos[:,-1] = sos_overall
sos[:, -1] = sos_overall
self.nom_txt.append('overall')
else:
# No filterbank, means we do only compute the overall values. This
# means that in case of include_overall, it creates two overall
# channels. That would be confusing, so we do not allow it.
sos = sos_overall[:,np.newaxis]
sos = sos_overall[:, np.newaxis]
self.nom_txt.append('overall')
# Downsampling factor, determine from single pole low pass filter time
@ -137,7 +138,6 @@ class SLM:
# Initialize counter to 0
self.N = 0
def run(self, data):
"""
Add new fresh timedata to the Sound Level Meter
@ -169,7 +169,6 @@ class SLM:
return output
def return_as_dict(self, dat):
"""
Helper function used to return resulting data in a proper way.

View File

@ -2,6 +2,7 @@
#include "arma_npy.h"
#include "debugtrace.hpp"
#include "lasp_ppm.h"
#include "lasp_clip.h"
#include "lasp_rtaps.h"
#include "lasp_rtsignalviewer.h"
#include "lasp_streammgr.h"
@ -192,6 +193,17 @@ void init_datahandler(py::module &m) {
ColToNpy<arma::uword>(std::get<1>(tp)));
});
/// Clip Detector
py::class_<ClipHandler, ThreadedInDataHandler> clip(m, "ClipHandler");
clip.def(py::init<StreamMgr &>());
clip.def("getCurrentValue", [](const ClipHandler &clip) {
arma::uvec cval = clip.getCurrentValue();
return ColToNpy<arma::uword>(cval); // something goes wrong here
});
/// Real time Aps
///
py::class_<RtAps, ThreadedInDataHandler> rtaps(m, "RtAps");

View File

@ -78,6 +78,70 @@ class SmoothingType:
# TO DO: add possibility to insert data that is not lin spaced in frequency
def smoothCalcMatrix(freq, sw: SmoothingWidth):
"""
Args:
freq: array of frequencies of data points [Hz] - equally spaced
sw: SmoothingWidth
Returns:
freq: array frequencies of data points [Hz]
Q: matrix to smooth power: {fsm} = [Q] * {fraw}
Warning: this method does not work on levels (dB)
"""
# Settings
tr = 2 # truncate window after 2x std; shorter is faster and less accurate
Noct = sw.value[0]
assert Noct > 0, "'Noct' must be absolute positive"
if Noct < 1:
raise Warning('Check if \'Noct\' is entered correctly')
# Initialize
L = len(freq)
Q = np.zeros(shape=(L, L), dtype=np.float16) # float16: keep size small
x0 = 1 if freq[0] == 0 else 0 # Skip first data point if zero frequency
# Loop over indices of raw frequency vector
for x in range(x0, L):
# Find indices of data points to calculate current (smoothed) magnitude
#
# Indices beyond [0, L] point to non-existing data. Beyond 0 does not
# occur in this implementation. Beyond L occurs when the smoothing
# window nears the end of the series.
# If one end of the window is truncated, the other end
# could be truncated as well, to prevent an error on magnitude data
# with a slope. It however results in unsmoothed looking data at the
# end.
fc = freq[x] # center freq. of smoothing window
fl = fc / np.sqrt(2**(tr/Noct)) # lower cutoff
fu = fc * np.sqrt(2**(tr/Noct)) # upper cutoff
# If the upper (frequency) side of the window is truncated because
# there is no data beyond the Nyquist frequency, also truncate the
# other side to keep it symmetric in a log(frequency) scale.
# So: fu / fc = fc / fl
fNq = freq[-1]
if fu > fNq:
fu = fNq # no data beyond fNq
fl = fc**2 / fu # keep window symmetric
# Find indices corresponding to frequencies
xl = bisect.bisect_left(freq, fl) # index corresponding to fl
xu = bisect.bisect_left(freq, fu)
xl = xu-1 if xu-xl <= 0 else xl # Guarantee window length of at least 1
# Calculate window
xg = np.arange(xl, xu) # indices
fg = freq[xg] # [Hz] corresponding freq
gs = np.sqrt( 1/ ((1+((fg/fc - fc/fg)*(1.507*Noct))**6)) ) # raw windw
gs /= np.sum(gs) # normalize: integral=1
Q[x, xl:xu] = gs # add to matrix
return Q
def smoothSpectralData(freq, M, sw: SmoothingWidth,
st: SmoothingType = SmoothingType.levels):
"""
@ -91,6 +155,10 @@ def smoothSpectralData(freq, M, sw: SmoothingWidth,
side. The deviation is largest when Noct is small (e.g. coarse smoothing).
Casper Jansen, 07-05-2021
Update 16-01-2023: speed up algorithm
- Smoothing is performed using matrix multiplication
- The smoothing matrix is not calculated if it already exists
Args:
freq: array of frequencies of data points [Hz] - equally spaced
M: array of either power, transfer functin or dB points. Depending on
@ -103,9 +171,6 @@ def smoothSpectralData(freq, M, sw: SmoothingWidth,
"""
# TODO: Make this function multi-dimensional array aware.
# Settings
tr = 2 # truncate window after 2x std; shorter is faster and less accurate
# Safety
MM = copy.deepcopy(M)
Noct = sw.value[0]
@ -134,169 +199,33 @@ def smoothSpectralData(freq, M, sw: SmoothingWidth,
# P is power while smoothing. x are indices of P.
Psm = np.zeros_like(P) # Smoothed power - to be calculated
x0 = 1 if freq[0] == 0 else 0 # Skip first data point if zero frequency
Psm[0] = P[0] # Reuse old value in case first data..
if freq[0] == 0:
Psm[0] = P[0] # Reuse old value in case first data..
# ..point is skipped. Not plotted any way.
# Loop through data points
for x in range(x0, L):
# Find indices of data points to calculate current (smoothed) magnitude
#
# Indices beyond [0, L] point to non-existing data. Beyond 0 does not
# occur in this implementation. Beyond L occurs when the smoothing
# window nears the end of the series.
# If one end of the window is truncated, the other end
# could be truncated as well, to prevent an error on magnitude data
# with a slope. It however results in unsmoothed looking data at the
# end.
fc = freq[x] # center freq. of smoothing window
fl = fc / np.sqrt(2**(tr/Noct)) # lower cutoff
fu = fc * np.sqrt(2**(tr/Noct)) # upper cutoff
# Re-use smoothing matrix Q if available. Otherwise, calculate.
# Store in dict 'Qdict'
nfft = int(2*(len(freq)-1))
key = f"nfft{nfft}_Noct{Noct}" # matrix name
# If the upper (frequency) side of the window is truncated because
# there is no data beyond the Nyquist frequency, also truncate the
# other side to keep it symmetric in a log(frequency) scale.
# So: fu / fc = fc / fl
fNq = freq[-1]
if fu > fNq:
fu = fNq # no data beyond fNq
fl = fc**2 / fu # keep window symmetric
if 'Qdict' not in globals(): # Guarantee Qdict exists
global Qdict
Qdict = {}
# Find indices corresponding to frequencies
xl = bisect.bisect_left(freq, fl) # index corresponding to fl
xu = bisect.bisect_left(freq, fu)
if key in Qdict:
Q = Qdict[key]
else:
Q = smoothCalcMatrix(freq, sw)
Qdict[key] = Q
# Guarantee window length of at least 1
if xu - xl <= 0:
xl = xu - 1
# Calculate window
g = np.zeros(xu-xl)
for n, xi in enumerate(range(xl, xu)):
fi = freq[xi] # current frequency
g[n] = np.sqrt( 1/ ((1+((fi/fc - fc/fi)*(1.507*Noct))**6)) )
g /= np.sum(g) # normalize: integral=1
# Apply smoothing
Psm[x] = np.dot(g, P[xl:xu])
# Apply smoothing
Psm = np.matmul(Q, P)
if st == SmoothingType.levels:
Psm = 10*np.log10(Psm)
return Psm
## OLD VERSION
#from scipy.signal.windows import gaussian
#def smoothSpectralData(freq, M, sw: SmoothingWidth,
# st: SmoothingType = SmoothingType.levels):
# """
# Apply fractional octave smoothing to magnitude data in frequency domain.
# Smoothing is performed to power, using a sliding Gaussian window with
# variable length. The window is truncated after 2x std at either side.
#
# The implementation is not exact, because f is linearly spaced and
# fractional octave smoothing is related to log spaced data. In this
# implementation, the window extends with a fixed frequency step to either
# side. The deviation is largest when Noct is small (e.g. coarse smoothing).
# Casper Jansen, 07-05-2021
#
# Args:
# freq: array of frequencies of data points [Hz] - equally spaced
# M: array of either power, transfer functin or dB points. Depending on
# the smoothing type `st`, the smoothing is applied.
#
# Returns:
# freq : array frequencies of data points [Hz]
# Msm : float smoothed magnitude of data points
#
# """
# # TODO: Make this function multi-dimensional array aware.
#
# # Settings
# tr = 2 # truncate window after 2x std; shorter is faster and less accurate
#
# # Safety
# Noct = sw.value[0]
# assert Noct > 0, "'Noct' must be absolute positive"
# if Noct < 1: raise Warning('Check if \'Noct\' is entered correctly')
# assert len(freq)==len(M), 'f and M should have equal length'
#
# if st == SmoothingType.ps:
# assert np.min(M) >= 0, 'absolute magnitude M cannot be negative'
# if st == SmoothingType.levels and isinstance(M.dtype, complex):
# raise RuntimeError('Decibel input should be real-valued')
#
# # Initialize
# L = M.shape[0] # number of data points
#
# P = M
# if st == SmoothingType.levels:
# P = 10**(P/10)
# # TODO: This does not work due to complex numbers. Should be split up in
# # magnitude and phase.
# # elif st == SmoothingType.tf:
# # P = P**2
#
# # P is power while smoothing. x are indices of P.
# Psm = np.zeros_like(P) # Smoothed power - to be calculated
# x0 = 1 if freq[0]==0 else 0 # Skip first data point if zero frequency
# Psm[0] = P[0] # Reuse old value in case first data..
# # ..point is skipped. Not plotted any way.
# df = freq[1] - freq[0] # Frequency step
#
# # Loop through data points
# for x in range(x0, L):
# # Find indices of data points to calculate current (smoothed) magnitude
# fc = freq[x] # center freq. of smoothing window
# Df = tr * fc / Noct # freq. range of smoothing window
#
# # desired lower index of frequency array to be used during smoothing
# xl = int(np.ceil(x - 0.5*Df/df))
#
# # upper index + 1 (because half-open interval)
# xu = int(np.floor(x + 0.5*Df/df)) + 1
#
# # Create window, suitable for frequency lin-spaced data points
# Np = xu - xl # number of points
# std = Np / (2 * tr)
# wind = gaussian(Np, std) # Gaussian window
#
# # Clip indices to valid range
# #
# # Indices beyond [0, L] point to non-existing data. This occurs when
# # the smoothing windows nears the beginning or end of the series.
# # Optional: if one end of the window is clipped, the other end
# # could be clipped as well, to prevent an error on magnitude data with
# # a slope. It however results in unsmoothed looking data at the ends.
# if xl < 0:
# rl = 0 - xl # remove this number of points at the lower end
# xl = xl + rl # .. from f
# wind = wind[rl:] # .. and from window
#
# # rl = 0 - xl # remove this number of points at the lower end
# # xl = xl + rl # .. from f
# # xu = xu - rl
# # wind = wind[rl:-rl] # .. and from window
#
# if xu > L:
# ru = xu - L # remove this number of points at the upper end
# xu = xu - ru
# wind = wind[:-ru]
#
# # ru = xu - L # remove this number of points at the upper end
# # xl = xl + ru
# # xu = xu - ru
# # wind = wind[ru:-ru]
#
# # Apply smoothing
# wind_int = np.sum(wind) # integral
# Psm[x] = np.dot(wind, P[xl:xu]) / wind_int # apply window
#
# if st == SmoothingType.levels:
# Psm = 10*np.log10(Psm)
#
# return Psm
# %% Test
if __name__ == "__main__":
@ -310,7 +239,7 @@ if __name__ == "__main__":
Noct = 2 # Noct = 6 for 1/6 oct. smoothing
# Create dummy data set 1: noise
fmin = 3e3 # [Hz] min freq
fmin = 1e3 # [Hz] min freq
fmax = 24e3 # [Hz] max freq
Ndata = 200 # number of data points
freq = np.linspace(fmin, fmax, Ndata) # frequency points
@ -330,10 +259,13 @@ if __name__ == "__main__":
class sw:
value = [Noct]
st = SmoothingType.levels # so data is given in dB
# st = SmoothingType.ps # so data is given in power
# Smooth
Msm = smoothSpectralData(freq, M, sw, st)
fsm = freq
# Plot - lin frequency
plt.figure()
plt.plot(freq, M, '.b')
@ -342,12 +274,14 @@ if __name__ == "__main__":
plt.ylabel('magnitude')
plt.xlim([100, fmax])
plt.title('lin frequency')
plt.legend(['Raw', 'Smooth'])
# Plot - log frequency
plt.figure()
plt.semilogx(freq, M, '.b')
plt.semilogx(fsm, Msm, 'r')
plt.semilogx(fsm, Msm, 'r')
plt.xlabel('f (Hz)')
plt.ylabel('magnitude')
plt.xlim([100, fmax])
plt.title('log frequency')
plt.legend(['Raw', 'Smooth 1'])

View File

@ -0,0 +1,77 @@
"""
This script checks whether the 3rd octave equalizer is norm compliant.
Created on Tue Dec 31 14:43:19 2019
@author: anne
"""
from lasp.filter import (ThirdOctaveBankDesigner)
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import freqz, iirdesign, sosfreqz, butter, cheby2
plt.close('all')
# %% Run check
def check_one(fs):
"""Check for fs = fs"""
# Create filter bank
nsections = 5 # filter property
designer = ThirdOctaveBankDesigner(fs)
# Test
xmin = designer.nominal_txt_tox('25') # lowest highest band, nominal name
xmax = designer.nominal_txt_tox('20k') # highest band
compliant = True # remains True if all filters comply
fnyq = fs/2
fll = designer.fl(xmin) / 1.1 # plot limits
fuu = designer.fu(xmax) * 1.1
fig = plt.figure()
plt.xlabel('f (Hz)')
plt.ylabel('Magnitude (dB)')
for x in range(xmin, xmax+1): # loop over bands
fl = designer.fl(x) # lower cut off
fu = designer.fu(x) # upper
fln = fl/fnyq # lower cutoff, normalized [0, 1]
fun = fu/fnyq # upper
sos = designer.createSOSFilter(x)
fnorm, h = sosfreqz(sos, worN=2**18, whole=False)
freq = fnorm*fnyq/np.pi
h_dB = 20*np.log10(np.abs(h) + np.finfo(float).eps)
if not designer.testStandardCompliance(x, freq, h_dB):
print('==============================')
print(designer.nominal_txt(x), ' does not comply\n')
compliant = False
plt.semilogx(freq, h_dB)
# Upper / lower limits
freqlim, ulim, llim = designer.band_limits(x)
plt.semilogx(freqlim, ulim)
plt.semilogx(freqlim, llim)
plt.ylim(-5, 1)
plt.xlim(fll, fuu)
print(f"Passed test : {compliant} for fs = {fs} Hz")
return compliant
if __name__ == "__main__":
# Make list of fs to be tested
# Taken from https://en.wikipedia.org/wiki/Sampling_(signal_processing)
fs_all = [8000, 11025, 16000, 22050, 32000, 37800, 44056, 44100, 47250,
48000, 50000, 50400, 64000, 88200, 96000, 176400, 192000, 352800]
allcompliant = True
for fs in fs_all:
compliant = check_one(fs)
if not compliant:
allcompliant = False
print(f"\n\nAll passed test : {allcompliant}")

View File

@ -8,8 +8,6 @@ Created on Mon Jan 15 19:45:33 2018
import numpy as np
from lasp import AvPowerSpectra, Window
import matplotlib.pyplot as plt
# plt.close('all')
def test_aps1():
nfft = 16384

View File

@ -2,26 +2,27 @@
import numpy as np
from lasp import cppSLM
from lasp.filter import SPLFilterDesigner
import matplotlib.pyplot as plt
def test_cppslm1():
"""
Generate a sine wave
"""
fs = 48000
omg = 2*np.pi*1000
omg = 2 * np.pi * 1000
slm = cppSLM.fromBiquads(fs, 2e-5, 1, 0.125, [1.,0,0,1,0,0])
slm = cppSLM.fromBiquads(fs, 2e-5, 1, 0.125,
np.array([[1., 0, 0, 1, 0, 0]]).T)
t = np.linspace(0, 10, 10*fs, endpoint=False)
t = np.linspace(0, 10, 10 * fs, endpoint=False)
# Input signal with an rms of 1 Pa
in_ = np.sin(omg*t)*np.sqrt(2)
in_ = np.sin(omg * t) * np.sqrt(2)
# Compute overall RMS
rms = np.sqrt(np.sum(in_**2)/in_.size)
rms = np.sqrt(np.sum(in_**2) / in_.size)
# Compute overall level
level = 20*np.log10(rms/2e-5)
level = 20 * np.log10(rms / 2e-5)
# Output of SLM
out = slm.run(in_)
@ -29,7 +30,7 @@ def test_cppslm1():
# Output of SLM should be close to theoretical
# level, at least for reasonable time constants
# (Fast, Slow etc)
assert(np.isclose(out[-1,0], level))
assert (np.isclose(out[-1, 0], level))
def test_cppslm2():
@ -37,20 +38,23 @@ def test_cppslm2():
Generate a sine wave, now A-weighted
"""
fs = 48000
omg = 2*np.pi*1000
omg = 2 * np.pi * 1000
filt = SPLFilterDesigner(fs).A_Sos_design()
slm = cppSLM.fromBiquads(fs, 2e-5, 0, 0.125, filt.flatten(), [1.,0,0,1,0,0])
slm = cppSLM.fromBiquads(fs, 2e-5, 0, 0.125,
filt.flatten(), # Pre-filter coefs
np.array([[1., 0, 0, 1, 0, 0]]).T # Bandpass coefs
)
t = np.linspace(0, 10, 10*fs, endpoint=False)
t = np.linspace(0, 10, 10 * fs, endpoint=False)
# Input signal with an rms of 1 Pa
in_ = np.sin(omg*t) *np.sqrt(2)
in_ = np.sin(omg * t) * np.sqrt(2)
# Compute overall RMS
rms = np.sqrt(np.sum(in_**2)/in_.size)
rms = np.sqrt(np.sum(in_**2) / in_.size)
# Compute overall level
level = 20*np.log10(rms/2e-5)
level = 20 * np.log10(rms / 2e-5)
# Output of SLM
out = slm.run(in_)
@ -58,32 +62,33 @@ def test_cppslm2():
# Output of SLM should be close to theoretical
# level, at least for reasonable time constants
# (Fast, Slow etc)
assert np.isclose(out[-1,0], level, atol=1e-2)
assert np.isclose(out[-1, 0], level, atol=1e-2)
def test_cppslm3():
fs = 48000
omg = 2*np.pi*1000
omg = 2 * np.pi * 1000
filt = SPLFilterDesigner(fs).A_Sos_design()
slm = cppSLM.fromBiquads(fs, 2e-5, 0, 0.125, filt.flatten(), [1.,0,0,1,0,0])
t = np.linspace(0, 10, 10*fs, endpoint=False)
slm = cppSLM.fromBiquads(fs, 2e-5, 0, 0.125,
filt.flatten(),
np.array([[1., 0, 0, 1, 0, 0]]).T)
t = np.linspace(0, 10, 10 * fs, endpoint=False)
in_ = 10*np.sin(omg*t) * np.sqrt(2)+np.random.randn()
in_ = 10 * np.sin(omg * t) * np.sqrt(2) + np.random.randn()
# Compute overall RMS
rms = np.sqrt(np.sum(in_**2)/in_.size)
rms = np.sqrt(np.sum(in_**2) / in_.size)
# Compute overall level
level = 20*np.log10(rms/2e-5)
level = 20 * np.log10(rms / 2e-5)
# Output of SLM
out = slm.run(in_)
Lpeak = 20*np.log10(np.max(np.abs(in_)/2e-5))
Lpeak = 20 * np.log10(np.max(np.abs(in_) / 2e-5))
Lpeak
slm.Lpeak()
assert np.isclose(out[-1,0], slm.Leq()[0][0], atol=1e-2)
assert np.isclose(Lpeak, slm.Lpeak()[0][0], atol=2e0)
assert np.isclose(out[-1, 0], slm.Leq()[0], atol=1e-2)
assert np.isclose(Lpeak, slm.Lpeak()[0], atol=2e0)
if __name__ == '__main__':

View File

@ -5,8 +5,6 @@ Testing code for power spectra
"""
import numpy as np
from lasp import PowerSpectra, Window
# import matplotlib.pyplot as plt
# plt.close('all')
def test_ps():
"""

View File

@ -1,37 +0,0 @@
#!/usr/bin/python3
import numpy as np
from lasp import SLM
nframes = 0
samplerate = 48000
omg = 2*np.pi*1000
# def mycallback(input_, nframes, streamtime):
# t = np.linspace(streamtime, streamtime + nframes/samplerate,
# nframes)[np.newaxis,:]
# outp = 0.1*np.sin(omg*t)
# return outp, 0
# if __name__ == '__main__':
# pa = RtAudio()
# count = pa.getDeviceCount()
# # dev = pa.getDeviceInfo(0)
# for i in range(count):
# dev = pa.getDeviceInfo(i)
# print(dev)
# outputparams = {'deviceid': 0, 'nchannels': 1, 'firstchannel': 0}
# pa.openStream(outputparams, None , Format_FLOAT64,samplerate, 512, mycallback)
# pa.startStream()
# input()
# pa.stopStream()
# pa.closeStream()