Protect makeARGB with tests and benchmarks (#1697)

* update to working

* cupy tests, too

* doubling up and down

* add more realism to the benchmarks

* name to reflect scale

* use different numbers to mean different numbers

(that sure does sound tautological)

* more sensible: order, error

* thorough check of lots of makeARGB arg combos

* docstring for tool usage

* no print needed

* better error messages

* test makeARGB using cupy, too

* skip without cupy available

* switch from conda to venv

* skip cupy runs when not available

* use endian-proof makeARGB shim in tests

* generate the asv conf to suit the system

* document running asv

* comments for future matrix goals

* put all makeARGB tests together; name for clarity

* subprocess.check_output is standard for all supported pythons

* better handle lack of git version

* use makeARGB shim

* small fixes and improvements
This commit is contained in:
Martin Chase 2021-04-15 15:51:21 -07:00 committed by GitHub
parent a465f93d9b
commit 6fed6d42b3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 4713 additions and 433 deletions

1
.gitignore vendored
View File

@ -108,3 +108,4 @@ rtr.cvs
.tags*
.asv/
asv.conf.json

View File

@ -70,3 +70,14 @@ As PyQtGraph supports a wide array of Qt-bindings, and python versions, we make
### Continous Integration
For our Continuous Integration, we utilize Azure Pipelines. Tested configurations are visible on [README](README.md). More information on coverage and test failures can be found on the respective tabs of the [build results page](https://dev.azure.com/pyqtgraph/pyqtgraph/_build?definitionId=1)
### Benchmarks
( *Still under development* ) To ensure this library is performant, we use [Air Speed Velocity (asv)](https://asv.readthedocs.io/en/stable/) to run benchmarks. For developing on core functions and classes, be aware of any impact your changes have on their speed. To configure and run asv:
```
pip install asv
python setup.py asv_config
asv run
```
( TODO publish results )

View File

@ -1,140 +0,0 @@
{
// The version of the config file format. Do not change, unless
// you know what you are doing.
"version": 1,
// The name of the project being benchmarked
"project": "pyqtgraph",
// The project's homepage
"project_url": "http://pyqtgraph.org/",
// The URL or local path of the source code repository for the
// project being benchmarked
"repo": ".",
// List of branches to benchmark. If not provided, defaults to "master"
// (for git) or "default" (for mercurial).
"branches": ["master"], // for git
// "branches": ["default"], // for mercurial
// The DVCS being used. If not set, it will be automatically
// determined from "repo" by looking at the protocol in the URL
// (if remote), or by looking for special directories, such as
// ".git" (if local).
// "dvcs": "git",
// The tool to use to create environments. May be "conda",
// "virtualenv" or other value depending on the plugins in use.
// If missing or the empty string, the tool will be automatically
// determined by looking for tools on the PATH environment
// variable.
"environment_type": "conda",
// timeout in seconds for installing any dependencies in environment
// defaults to 10 min
//"install_timeout": 600,
// the base URL to show a commit for the project.
"show_commit_url": "http://github.com/pyqtgraph/pyqtgraph/commit/",
// The Pythons you'd like to test against. If not provided, defaults
// to the current version of Python used to run `asv`.
"pythons": ["2.7", "3.8"],
// The matrix of dependencies to test. Each key is the name of a
// package (in PyPI) and the values are version numbers. An empty
// list or empty string indicates to just test against the default
// (latest) version. null indicates that the package is to not be
// installed. If the package to be tested is only available from
// PyPi, and the 'environment_type' is conda, then you can preface
// the package name by 'pip+', and the package will be installed via
// pip (with all the conda available packages installed first,
// followed by the pip installed packages).
//
"matrix": {
"numpy": [],
"numba": [],
"pyqt": ["4", "5"],
},
// Combinations of libraries/python versions can be excluded/included
// from the set to test. Each entry is a dictionary containing additional
// key-value pairs to include/exclude.
//
// An exclude entry excludes entries where all values match. The
// values are regexps that should match the whole string.
//
// An include entry adds an environment. Only the packages listed
// are installed. The 'python' key is required. The exclude rules
// do not apply to includes.
//
// In addition to package names, the following keys are available:
//
// - python
// Python version, as in the *pythons* variable above.
// - environment_type
// Environment type, as above.
// - sys_platform
// Platform, as in sys.platform. Possible values for the common
// cases: 'linux2', 'win32', 'cygwin', 'darwin'.
//
"exclude": [
{"python": "3.8", "pyqt": "4"},
],
//
// "include": [
// // additional env for python2.7
// {"python": "2.7", "numpy": "1.8"},
// // additional env if run on windows+conda
// {"platform": "win32", "environment_type": "conda", "python": "2.7", "libpython": ""},
// ],
// The directory (relative to the current directory) that benchmarks are
// stored in. If not provided, defaults to "benchmarks"
"benchmark_dir": "benchmarks",
// The directory (relative to the current directory) to cache the Python
// environments in. If not provided, defaults to "env"
"env_dir": ".asv/env",
// The directory (relative to the current directory) that raw benchmark
// results are stored in. If not provided, defaults to "results".
"results_dir": ".asv/results",
// The directory (relative to the current directory) that the html tree
// should be written to. If not provided, defaults to "html".
"html_dir": ".asv/html",
// The number of characters to retain in the commit hashes.
// "hash_length": 8,
// `asv` will cache wheels of the recent builds in each
// environment, making them faster to install next time. This is
// number of builds to keep, per environment.
"build_cache_size": 5
// The commits after which the regression search in `asv publish`
// should start looking for regressions. Dictionary whose keys are
// regexps matching to benchmark names, and values corresponding to
// the commit (exclusive) after which to start looking for
// regressions. The default is to start from the first commit
// with results. If the commit is `null`, regression detection is
// skipped for the matching benchmark.
//
// "regressions_first_commits": {
// "some_benchmark": "352cdf", // Consider regressions only after this commit
// "another_benchmark": null, // Skip regression detection altogether
// }
// The thresholds for relative change in results, after which `asv
// publish` starts reporting regressions. Dictionary of the same
// form as in ``regressions_first_commits``, with values
// indicating the thresholds. If multiple entries match, the
// maximum is taken. If no entry matches, the default is 5%.
//
// "regressions_thresholds": {
// "some_benchmark": 0.01, // Threshold of 1%
// "another_benchmark": 0.5, // Threshold of 50%
// }
}

View File

@ -1,72 +1,130 @@
import numpy as np
import pyqtgraph as pg
from pyqtgraph.functions import makeARGB
try:
import cupy as cp
class TimeSuite(object):
pg.setConfigOption("useCupy", True)
except ImportError:
cp = None
class _TimeSuite(object):
def __init__(self):
self.c_map = None
super(_TimeSuite, self).__init__()
self.float_data = None
self.uint8_data = None
self.uint8_lut = None
self.uint16_data = None
self.uint16_lut = None
self.output = None
self.cupy_output = None
def setup(self):
size = (500, 500)
self.float_data = {
'data': np.random.normal(size=size),
'levels': [-4., 4.],
}
self.uint16_data = {
'data': np.random.randint(100, 4500, size=size).astype('uint16'),
'levels': [250, 3000],
}
self.uint8_data = {
'data': np.random.randint(0, 255, size=size).astype('ubyte'),
'levels': [20, 220],
}
self.c_map = np.array([
[-500., 255.],
[-255., 255.],
[0., 500.],
])
self.uint8_lut = np.zeros((256, 4), dtype='ubyte')
for i in range(3):
self.uint8_lut[:, i] = np.clip(np.linspace(self.c_map[i][0], self.c_map[i][1], 256), 0, 255)
self.uint8_lut[:, 3] = 255
self.uint16_lut = np.zeros((2 ** 16, 4), dtype='ubyte')
for i in range(3):
self.uint16_lut[:, i] = np.clip(np.linspace(self.c_map[i][0], self.c_map[i][1], 2 ** 16), 0, 255)
self.uint16_lut[:, 3] = 255
def make_test(dtype, use_levels, lut_name, func_name):
def time_test(self):
data = getattr(self, dtype + '_data')
makeARGB(
data['data'],
lut=getattr(self, lut_name + '_lut', None),
levels=use_levels and data['levels'],
size = (self.size, self.size)
self.float_data, self.uint16_data, self.uint8_data, self.uint16_lut, self.uint8_lut = self._create_data(
size, np
)
self.output = np.zeros(size + (4,), dtype=np.ubyte)
makeARGB(self.uint16_data["data"]) # prime the cpu
if cp:
self.cupy_output = cp.zeros(size + (4,), dtype=cp.ubyte)
makeARGB(cp.asarray(self.uint16_data["data"])) # prime the gpu
@staticmethod
def _create_data(size, xp):
float_data = {
"data": xp.random.normal(size=size),
"levels": [-4.0, 4.0],
}
uint16_data = {
"data": xp.random.randint(100, 4500, size=size).astype("uint16"),
"levels": [250, 3000],
}
uint8_data = {
"data": xp.random.randint(0, 255, size=size).astype("ubyte"),
"levels": [20, 220],
}
c_map = xp.array([[-500.0, 255.0], [-255.0, 255.0], [0.0, 500.0]])
uint8_lut = xp.zeros((256, 4), dtype="ubyte")
for i in range(3):
uint8_lut[:, i] = xp.clip(xp.linspace(c_map[i][0], c_map[i][1], 256), 0, 255)
uint8_lut[:, 3] = 255
uint16_lut = xp.zeros((2 ** 16, 4), dtype="ubyte")
for i in range(3):
uint16_lut[:, i] = xp.clip(xp.linspace(c_map[i][0], c_map[i][1], 2 ** 16), 0, 255)
uint16_lut[:, 3] = 255
return float_data, uint16_data, uint8_data, uint16_lut, uint8_lut
def make_test(dtype, use_cupy, use_levels, lut_name, func_name):
def time_test(self):
data = getattr(self, dtype + "_data")
levels = data["levels"] if use_levels else None
lut = getattr(self, lut_name + "_lut", None) if lut_name is not None else None
for _ in range(10):
img_data = data["data"]
output = self.output
if use_cupy:
img_data = cp.asarray(img_data)
output = self.cupy_output
makeARGB(
img_data, lut=lut, levels=levels, output=output,
)
if use_cupy:
output.get(out=self.output)
time_test.__name__ = func_name
return time_test
for dt in ['float', 'uint16', 'uint8']:
for levels in [True, False]:
for ln in [None, 'uint8', 'uint16']:
name = f'time_makeARGB_{dt}_{"" if levels else "no"}levels_{ln or "no"}lut'
setattr(TimeSuite, name, make_test(dt, levels, ln, name))
for cupy in [True, False]:
if cupy and cp is None:
continue
for dtype in ["float", "uint16", "uint8"]:
for levels in [True, False]:
if dtype == "float" and not levels:
continue
for lutname in [None, "uint8", "uint16"]:
name = (
f'time_10x_makeARGB_{"cupy" if cupy else ""}{dtype}_{"" if levels else "no"}levels_{lutname or "no"}lut'
)
setattr(_TimeSuite, name, make_test(dtype, cupy, levels, lutname, name))
if __name__ == "__main__":
ts = TimeSuite()
ts.setup()
class Time0256Suite(_TimeSuite):
def __init__(self):
self.size = 256
super(Time0256Suite, self).__init__()
class Time0512Suite(_TimeSuite):
def __init__(self):
self.size = 512
super(Time0512Suite, self).__init__()
class Time1024Suite(_TimeSuite):
def __init__(self):
self.size = 1024
super(Time1024Suite, self).__init__()
class Time2048Suite(_TimeSuite):
def __init__(self):
self.size = 2048
super(Time2048Suite, self).__init__()
class Time3072Suite(_TimeSuite):
def __init__(self):
self.size = 3072
super(Time3072Suite, self).__init__()
class Time4096Suite(_TimeSuite):
def __init__(self):
self.size = 4096
super(Time4096Suite, self).__init__()

View File

@ -1,12 +1,12 @@
# -*- coding: utf-8 -*-
import pyqtgraph as pg
import numpy as np
import sys
from copy import deepcopy
from collections import OrderedDict
from numpy.testing import assert_array_almost_equal, assert_almost_equal
import pytest
from copy import deepcopy
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
import pyqtgraph as pg
np.random.seed(12345)
@ -147,207 +147,6 @@ def test_rescaleData():
assert np.allclose(s1, s2)
def makeARGB(*args, **kwds):
img, alpha = pg.makeARGB(*args, **kwds)
if kwds.get('useRGBA'): # endian independent
out = img
elif sys.byteorder == 'little': # little-endian ARGB32 to B,G,R,A
out = img
else: # big-endian ARGB32 to B,G,R,A
out = img[..., [3, 2, 1, 0]]
return out, alpha
def test_makeARGB():
# Many parameters to test here:
# * data dtype (ubyte, uint16, float, others)
# * data ndim (2 or 3)
# * levels (None, 1D, or 2D)
# * lut dtype
# * lut size
# * lut ndim (1 or 2)
# * useRGBA argument
# Need to check that all input values map to the correct output values, especially
# at and beyond the edges of the level range.
def checkArrays(a, b):
# because py.test output is difficult to read for arrays
if not np.all(a == b):
comp = []
for i in range(a.shape[0]):
if a.shape[1] > 1:
comp.append('[')
for j in range(a.shape[1]):
m = a[i,j] == b[i,j]
comp.append('%d,%d %s %s %s%s' %
(i, j, str(a[i,j]).ljust(15), str(b[i,j]).ljust(15),
m, ' ********' if not np.all(m) else ''))
if a.shape[1] > 1:
comp.append(']')
raise Exception("arrays do not match:\n%s" % '\n'.join(comp))
def checkImage(img, check, alpha, alphaCheck):
assert img.dtype == np.ubyte
assert alpha is alphaCheck
if alpha is False:
checkArrays(img[..., 3], 255)
if np.isscalar(check) or check.ndim == 3:
checkArrays(img[..., :3], check)
elif check.ndim == 2:
checkArrays(img[..., :3], check[..., np.newaxis])
elif check.ndim == 1:
checkArrays(img[..., :3], check[..., np.newaxis, np.newaxis])
else:
raise Exception('invalid check array ndim')
# uint8 data tests
im1 = np.arange(256).astype('ubyte').reshape(256, 1)
im2, alpha = makeARGB(im1, levels=(0, 255))
checkImage(im2, im1, alpha, False)
im3, alpha = makeARGB(im1, levels=(0.0, 255.0))
checkImage(im3, im1, alpha, False)
im4, alpha = makeARGB(im1, levels=(255, 0))
checkImage(im4, 255-im1, alpha, False)
im5, alpha = makeARGB(np.concatenate([im1]*3, axis=1), levels=[(0, 255), (0.0, 255.0), (255, 0)])
checkImage(im5, np.concatenate([im1, im1, 255-im1], axis=1), alpha, False)
im2, alpha = makeARGB(im1, levels=(128,383))
checkImage(im2[:128], 0, alpha, False)
checkImage(im2[128:], im1[:128], alpha, False)
# uint8 data + uint8 LUT
lut = np.arange(256)[::-1].astype(np.uint8)
im2, alpha = makeARGB(im1, lut=lut)
checkImage(im2, lut, alpha, False)
# lut larger than maxint
lut = np.arange(511).astype(np.uint8)
im2, alpha = makeARGB(im1, lut=lut)
checkImage(im2, lut[::2], alpha, False)
# lut smaller than maxint
lut = np.arange(128).astype(np.uint8)
im2, alpha = makeARGB(im1, lut=lut)
checkImage(im2, np.linspace(0, 127.5, 256, dtype='ubyte'), alpha, False)
# lut + levels
lut = np.arange(256)[::-1].astype(np.uint8)
im2, alpha = makeARGB(im1, lut=lut, levels=[-128, 384])
checkImage(im2, np.linspace(191.5, 64.5, 256, dtype='ubyte'), alpha, False)
im2, alpha = makeARGB(im1, lut=lut, levels=[64, 192])
checkImage(im2, np.clip(np.linspace(384.5, -127.5, 256), 0, 255).astype('ubyte'), alpha, False)
# uint8 data + uint16 LUT
lut = np.arange(4096)[::-1].astype(np.uint16) // 16
im2, alpha = makeARGB(im1, lut=lut)
checkImage(im2, np.arange(256)[::-1].astype('ubyte'), alpha, False)
# uint8 data + float LUT
lut = np.linspace(10., 137., 256)
im2, alpha = makeARGB(im1, lut=lut)
checkImage(im2, lut.astype('ubyte'), alpha, False)
# uint8 data + 2D LUT
lut = np.zeros((256, 3), dtype='ubyte')
lut[:,0] = np.arange(256)
lut[:,1] = np.arange(256)[::-1]
lut[:,2] = 7
im2, alpha = makeARGB(im1, lut=lut)
checkImage(im2, lut[:,None,::-1], alpha, False)
# check useRGBA
im2, alpha = makeARGB(im1, lut=lut, useRGBA=True)
checkImage(im2, lut[:,None,:], alpha, False)
# uint16 data tests
im1 = np.arange(0, 2**16, 256).astype('uint16')[:, None]
im2, alpha = makeARGB(im1, levels=(512, 2**16))
checkImage(im2, np.clip(np.linspace(-2, 253, 256), 0, 255).astype('ubyte'), alpha, False)
lut = (np.arange(512, 2**16)[::-1] // 256).astype('ubyte')
im2, alpha = makeARGB(im1, lut=lut, levels=(512, 2**16-256))
checkImage(im2, np.clip(np.linspace(257, 2, 256), 0, 255).astype('ubyte'), alpha, False)
lut = np.zeros(2**16, dtype='ubyte')
lut[1000:1256] = np.arange(256)
lut[1256:] = 255
im1 = np.arange(1000, 1256).astype('uint16')[:, None]
im2, alpha = makeARGB(im1, lut=lut)
checkImage(im2, np.arange(256).astype('ubyte'), alpha, False)
# float data tests
im1 = np.linspace(1.0, 17.0, 256)[:, None]
im2, alpha = makeARGB(im1, levels=(5.0, 13.0))
checkImage(im2, np.clip(np.linspace(-128, 383, 256), 0, 255).astype('ubyte'), alpha, False)
lut = (np.arange(1280)[::-1] // 10).astype('ubyte')
im2, alpha = makeARGB(im1, lut=lut, levels=(1, 17))
checkImage(im2, np.linspace(127.5, 0, 256).astype('ubyte'), alpha, False)
# nans in image
# 2d input image, one pixel is nan
im1 = np.ones((10, 12))
im1[3, 5] = np.nan
im2, alpha = makeARGB(im1, levels=(0, 1))
assert alpha
assert im2[3, 5, 3] == 0 # nan pixel is transparent
assert im2[0, 0, 3] == 255 # doesn't affect other pixels
# 3d RGB input image, any color channel of a pixel is nan
im1 = np.ones((10, 12, 3))
im1[3, 5, 1] = np.nan
im2, alpha = makeARGB(im1, levels=(0, 1))
assert alpha
assert im2[3, 5, 3] == 0 # nan pixel is transparent
assert im2[0, 0, 3] == 255 # doesn't affect other pixels
# 3d RGBA input image, any color channel of a pixel is nan
im1 = np.ones((10, 12, 4))
im1[3, 5, 1] = np.nan
im2, alpha = makeARGB(im1, levels=(0, 1), useRGBA=True)
assert alpha
assert im2[3, 5, 3] == 0 # nan pixel is transparent
# test sanity checks
class AssertExc(object):
def __init__(self, exc=Exception):
self.exc = exc
def __enter__(self):
return self
def __exit__(self, *args):
assert args[0] is self.exc, "Should have raised %s (got %s)" % (self.exc, args[0])
return True
with AssertExc(TypeError): # invalid image shape
pg.makeARGB(np.zeros((2,), dtype='float'))
with AssertExc(TypeError): # invalid image shape
pg.makeARGB(np.zeros((2,2,7), dtype='float'))
with AssertExc(): # float images require levels arg
pg.makeARGB(np.zeros((2,2), dtype='float'))
with AssertExc(): # bad levels arg
pg.makeARGB(np.zeros((2,2), dtype='float'), levels=[1])
with AssertExc(): # bad levels arg
pg.makeARGB(np.zeros((2,2), dtype='float'), levels=[1,2,3])
with AssertExc(): # can't mix 3-channel levels and LUT
pg.makeARGB(np.zeros((2,2)), lut=np.zeros((10,3), dtype='ubyte'), levels=[(0,1)]*3)
with AssertExc(): # multichannel levels must have same number of channels as image
pg.makeARGB(np.zeros((2,2,3), dtype='float'), levels=[(1,2)]*4)
with AssertExc(): # 3d levels not allowed
pg.makeARGB(np.zeros((2,2,3), dtype='float'), levels=np.zeros([3, 2, 2]))
def test_eq():
eq = pg.functions.eq

File diff suppressed because it is too large Load Diff

View File

@ -134,6 +134,7 @@ setup(
'test': helpers.TestCommand,
'debug': helpers.DebugCommand,
'mergetest': helpers.MergeTestCommand,
'asv_config': helpers.ASVConfigCommand,
'style': helpers.StyleCommand},
packages=allPackages,
python_requires=">=3.7",

View File

@ -1,21 +1,16 @@
# -*- coding: utf-8 -*-
import os, sys, re
try:
from subprocess import check_output, check_call
except ImportError:
import subprocess as sp
def check_output(*args, **kwds):
kwds['stdout'] = sp.PIPE
proc = sp.Popen(*args, **kwds)
output = proc.stdout.read()
proc.wait()
if proc.returncode != 0:
ex = Exception("Process had nonzero return value "
+ "%d " % proc.returncode)
ex.returncode = proc.returncode
ex.output = output
raise ex
return output
from contextlib import suppress
import json
import os
import re
import shutil
import subprocess
import sys
from distutils.core import Command
from typing import Dict, Any
from generateChangelog import generateDebianChangelog
# Maximum allowed repository size difference (in kB) following merge.
# This is used to prevent large files from being inappropriately added to
@ -229,9 +224,9 @@ def unitTests():
"""
try:
if sys.version[0] == '3':
out = check_output('PYTHONPATH=. py.test-3', shell=True)
out = subprocess.check_output('PYTHONPATH=. py.test-3', shell=True)
else:
out = check_output('PYTHONPATH=. py.test', shell=True)
out = subprocess.check_output('PYTHONPATH=. py.test', shell=True)
ret = 0
except Exception as e:
out = e.output
@ -295,12 +290,12 @@ def checkMergeSize(
try:
print("Check out target branch:\n" + setup)
check_call(setup, shell=True)
targetSize = int(check_output(checkSize, shell=True))
subprocess.check_call(setup, shell=True)
targetSize = int(subprocess.check_output(checkSize, shell=True))
print("TARGET SIZE: %d kB" % targetSize)
print("Merge source branch:\n" + merge)
check_call(merge, shell=True)
mergeSize = int(check_output(checkSize, shell=True))
subprocess.check_call(merge, shell=True)
mergeSize = int(subprocess.check_output(checkSize, shell=True))
print("MERGE SIZE: %d kB" % mergeSize)
diff = mergeSize - targetSize
@ -355,7 +350,7 @@ def getInitVersion(pkgroot):
def gitCommit(name):
"""Return the commit ID for the given name."""
commit = check_output(
commit = subprocess.check_output(
['git', 'show', name],
universal_newlines=True).split('\n')[0]
assert commit[:7] == 'commit '
@ -375,11 +370,16 @@ def getGitVersion(tagPrefix):
if not os.path.isdir(os.path.join(path, '.git')):
return None
v = check_output(['git',
'describe',
'--tags',
'--dirty',
'--match="%s*"'%tagPrefix]).strip().decode('utf-8')
try:
v = (
subprocess.check_output(
["git", "describe", "--tags", "--dirty", '--match="%s*"' % tagPrefix],
stderr=subprocess.DEVNULL)
.strip()
.decode("utf-8")
)
except (FileNotFoundError, subprocess.CalledProcessError):
return None
# chop off prefix
assert v.startswith(tagPrefix)
@ -414,7 +414,7 @@ def getGitVersion(tagPrefix):
def getGitBranch():
m = re.search(
r'\* (.*)',
check_output(['git', 'branch'],
subprocess.check_output(['git', 'branch'],
universal_newlines=True))
if m is None:
return ''
@ -476,9 +476,55 @@ def getVersionStrings(pkg):
return version, forcedVersion, gitVersion, initVersion
from distutils.core import Command
import shutil, subprocess
from generateChangelog import generateDebianChangelog
DEFAULT_ASV: Dict[str, Any] = {
"version": 1,
"project": "pyqtgraph",
"project_url": "http://pyqtgraph.org/",
"repo": ".",
"branches": ["master"],
"environment_type": "virtualenv",
"show_commit_url": "http://github.com/pyqtgraph/pyqtgraph/commit/",
# "pythons": ["3.7", "3.8", "3.9"],
"matrix": {
# "numpy": ["1.17", "1.18", "1.19", ""],
"numpy": "",
"pyqt5": ["", None],
"pyside2": ["", None],
},
"exclude": [
{"pyqt5": "", "pyside2": ""},
{"pyqt5": None, "pyside2": None}
],
"benchmark_dir": "benchmarks",
"env_dir": ".asv/env",
"results_dir": ".asv/results",
"html_dir": ".asv/html",
"build_cache_size": 5
}
class ASVConfigCommand(Command):
description = "Setup the ASV benchmarking config for this system"
user_options = []
def initialize_options(self) -> None:
pass
def finalize_options(self) -> None:
pass
def run(self) -> None:
config = DEFAULT_ASV
with suppress(FileNotFoundError, subprocess.CalledProcessError):
cuda_check = subprocess.check_output(["nvcc", "--version"])
match = re.search(r"release (\d{1,2}\.\d)", cuda_check.decode("utf-8"))
ver = match.groups()[0] # e.g. 11.0
ver_str = ver.replace(".", "") # e.g. 110
config["matrix"][f"cupy-cuda{ver_str}"] = ""
with open("asv.conf.json", "w") as conf_file:
conf_file.write(json.dumps(config, indent=2))
class DebCommand(Command):
description = "build .deb package using `debuild -us -uc`"
@ -500,8 +546,7 @@ class DebCommand(Command):
debName = "python-" + pkgName
debDir = self.debDir
assert os.getcwd() == self.cwd, 'Must be in package root: '
+ '%s' % self.cwd
assert os.getcwd() == self.cwd, 'Must be in package root: %s' % self.cwd
if os.path.isdir(debDir):
raise Exception('DEB build dir already exists: "%s"' % debDir)