pyqtgraph/tests/graphicsItems/test_ImageItem.py

240 lines
9.1 KiB
Python
Raw Normal View History

# -*- coding: utf-8 -*-
import time
import pytest
from pyqtgraph.Qt import QtGui, QtTest, QtCore
import numpy as np
import pyqtgraph as pg
from tests.image_testing import assertImageApproved, TransposedImageItem
try:
import cupy
except ImportError:
cupy = None
app = pg.mkQApp()
@pytest.mark.skipif(cupy is None, reason="CuPy unavailable to test")
def test_useCupy_can_be_set_after_init():
prev_setting = pg.getConfigOption("useCupy")
try:
pg.setConfigOption("useCupy", False)
w = pg.GraphicsLayoutWidget()
w.show()
view = pg.ViewBox()
w.setCentralWidget(view)
w.resize(200, 200)
img = cupy.random.randint(0, 255, size=(32, 32)).astype(cupy.uint8)
ii = pg.ImageItem()
view.addItem(ii)
pg.setConfigOption("useCupy", True)
ii.setImage(img)
w.hide()
finally:
pg.setConfigOption("useCupy", prev_setting)
Bypass makeARGB in some cases (#1786) * refactor out _ndarray_to_qimage() * combine levels back with lut * make use of Grayscale8, RGB888 and Indexed8 QImage formats Grayscale8 and RGB888 images are those that are ready for display without further processing. * add Grayscale16 * apply the efflut early for uint16 mono/rgb, uint8 rgb * ndarray indexing is faster than np.take * handle uint16 rgb(a) with no levels same as levels=[0, 65535] * add support for Format_RGBA64 * fix: support colormaps of shape (h, 1) * check ImageItem uint8 and uint16 QImage formats * uint16 mono with rgb lut -> RGBX8888 * got width and height swapped in array dimensions * set ImageItem as row-major * no need to form a 1d 32-bit lut for array indexing you can index (y, x) into a lookup table of shape (nentry, 3) or (nentry, 4) and get an output of shape (y, x, 3) or (y, x, 4) * Revert "no need to form a 1d 32-bit lut for array indexing" This reverts commit 45cf3100de637ed7e53ebb565fbb840ae1534255. * distinguish between levels_lut and colors_lut this allows uint16 images with user lut to be rendered as Format_Indexed8 * uint8 (1-chan) images should always combine to efflut this efflut will then be used for Indexed8 format color table. previously, we would be taking a performance hit with doing a numpy lookup with levels_lut. * adapt benchmarks/makeARGB.py to renderImageItem.py * restructure uint8 and uint16 codepaths * normalize 1-chan images to ndim==2 earlier up * refactor long code into functions * bug: qimage may not be assigned * fix: assign to self.qimage only if not None * for uint16, do rescale rather than do levels_lut lookup * cases 2,3 are already handled i.e. no more using lut to do rescale of uint16 image data. * rescale rgb images by computation, not by memory lookup * setImage() does not take an output argument * try to be cupy compatible use "xp" instead of numpy module * add numba to benchmarking * fix: lut_big is dtype uint8 with more than 256 entries * bug: applying colors_lut needs C-order * support float with no nans * fix: variable could be uninitialized * add float32 format tests * avoid explicitly forcing to C-contiguous * cache effective lut only if combination took place every one of the four branches now does its own return. this makes it easier to follow. * fix cupy benchmark : typo in renderQImage * remove for loop of 1 iteration * use float32 for floating point benchmark * superceded by renderImageItem.py * lint * benchmark without lut conversion * put the lut onto the substrate * fix editor complaints * handle lack of cupy * leading underscores imply privacy Co-authored-by: KIU Shueng Chuan <nixchuan@gmail.com>
2021-05-20 00:21:12 +00:00
@pytest.mark.skipif(cupy is None, reason="CuPy unavailable to test")
def test_ensuring_substrate():
prev_setting = pg.getConfigOption("useCupy")
try:
pg.setConfigOption("useCupy", True)
ii = pg.ImageItem()
data = cupy.random.randint(0, 255, size=(32, 32)).astype(cupy.uint8)
assert data is ii._ensure_proper_substrate(data, cupy)
assert isinstance(ii._ensure_proper_substrate(data, cupy), cupy.ndarray)
assert data is not ii._ensure_proper_substrate(data, np)
assert isinstance(ii._ensure_proper_substrate(data, np), np.ndarray)
data = np.random.randint(0, 255, size=(32, 32)).astype(np.uint8)
assert data is ii._ensure_proper_substrate(data, np)
assert isinstance(ii._ensure_proper_substrate(data, np), np.ndarray)
assert data is not ii._ensure_proper_substrate(data, cupy)
assert isinstance(ii._ensure_proper_substrate(data, cupy), cupy.ndarray)
data = range(0, 255)
assert data is not ii._ensure_proper_substrate(data, np)
assert isinstance(ii._ensure_proper_substrate(data, np), np.ndarray)
assert data is not ii._ensure_proper_substrate(data, cupy)
assert isinstance(ii._ensure_proper_substrate(data, cupy), cupy.ndarray)
finally:
pg.setConfigOption("useCupy", prev_setting)
def test_ImageItem(transpose=False):
w = pg.GraphicsLayoutWidget()
w.show()
view = pg.ViewBox()
w.setCentralWidget(view)
w.resize(200, 200)
2016-08-23 16:04:07 +00:00
img = TransposedImageItem(border=0.5, transpose=transpose)
view.addItem(img)
# test mono float
np.random.seed(0)
data = np.random.normal(size=(20, 20))
dmax = data.max()
data[:10, 1] = dmax + 10
data[1, :10] = dmax + 12
data[3, :10] = dmax + 13
img.setImage(data)
QtTest.QTest.qWaitForWindowExposed(w)
time.sleep(0.1)
app.processEvents()
assertImageApproved(w, 'imageitem/init', 'Init image item. View is auto-scaled, image axis 0 marked by 1 line, axis 1 is marked by 2 lines. Origin in bottom-left.')
# ..with colormap
cmap = pg.ColorMap([0, 0.25, 0.75, 1], [[0, 0, 0, 255], [255, 0, 0, 255], [255, 255, 0, 255], [255, 255, 255, 255]])
img.setLookupTable(cmap.getLookupTable())
assertImageApproved(w, 'imageitem/lut', 'Set image LUT.')
# ..and different levels
img.setLevels([dmax+9, dmax+13])
assertImageApproved(w, 'imageitem/levels1', 'Levels show only axis lines.')
img.setLookupTable(None)
# test mono int
data = np.fromfunction(lambda x,y: x+y*10, (129, 128)).astype(np.int16)
img.setImage(data)
assertImageApproved(w, 'imageitem/gradient_mono_int', 'Mono int gradient.')
img.setLevels([640, 641])
assertImageApproved(w, 'imageitem/gradient_mono_int_levels', 'Mono int gradient w/ levels to isolate diagonal.')
# test mono byte
data = np.fromfunction(lambda x,y: x+y, (129, 128)).astype(np.ubyte)
img.setImage(data)
assertImageApproved(w, 'imageitem/gradient_mono_byte', 'Mono byte gradient.')
img.setLevels([127, 128])
assertImageApproved(w, 'imageitem/gradient_mono_byte_levels', 'Mono byte gradient w/ levels to isolate diagonal.')
# test monochrome image
data = np.zeros((10, 10), dtype='uint8')
data[:5,:5] = 1
data[5:,5:] = 1
img.setImage(data)
assertImageApproved(w, 'imageitem/monochrome', 'Ubyte image with only 0,1 values.')
# test bool
data = data.astype(bool)
img.setImage(data)
assertImageApproved(w, 'imageitem/bool', 'Boolean mask.')
# test RGBA byte
data = np.zeros((100, 100, 4), dtype='ubyte')
data[..., 0] = np.linspace(0, 255, 100).reshape(100, 1)
data[..., 1] = np.linspace(0, 255, 100).reshape(1, 100)
data[..., 3] = 255
img.setImage(data)
assertImageApproved(w, 'imageitem/gradient_rgba_byte', 'RGBA byte gradient.')
img.setLevels([[128, 129], [128, 255], [0, 1], [0, 255]])
assertImageApproved(w, 'imageitem/gradient_rgba_byte_levels', 'RGBA byte gradient. Levels set to show x=128 and y>128.')
# test RGBA float
data = data.astype(float)
img.setImage(data / 1e9)
assertImageApproved(w, 'imageitem/gradient_rgba_float', 'RGBA float gradient.')
# checkerboard to test alpha
2016-08-23 16:04:07 +00:00
img2 = TransposedImageItem(transpose=transpose)
img2.setImage(np.fromfunction(lambda x,y: (x+y)%2, (10, 10)), levels=[-1,2])
view.addItem(img2)
img2.setScale(10)
img2.setZValue(-10)
data[..., 0] *= 1e-9
data[..., 1] *= 1e9
data[..., 3] = np.fromfunction(lambda x,y: np.sin(0.1 * (x+y)), (100, 100))
img.setImage(data, levels=[[0, 128e-9],[0, 128e9],[0, 1],[-1, 1]])
assertImageApproved(w, 'imageitem/gradient_rgba_float_alpha', 'RGBA float gradient with alpha.')
# test composition mode
img.setCompositionMode(QtGui.QPainter.CompositionMode.CompositionMode_Plus)
assertImageApproved(w, 'imageitem/gradient_rgba_float_additive', 'RGBA float gradient with alpha and additive composition mode.')
img2.hide()
img.setCompositionMode(QtGui.QPainter.CompositionMode.CompositionMode_SourceOver)
# test downsampling
data = np.fromfunction(lambda x,y: np.cos(0.002 * x**2), (800, 100))
img.setImage(data, levels=[-1, 1])
assertImageApproved(w, 'imageitem/resolution_without_downsampling', 'Resolution test without downsampling.')
img.setAutoDownsample(True)
assertImageApproved(w, 'imageitem/resolution_with_downsampling_x', 'Resolution test with downsampling axross x axis.')
assert img._lastDownsample == (4, 1)
img.setImage(data.T, levels=[-1, 1])
assertImageApproved(w, 'imageitem/resolution_with_downsampling_y', 'Resolution test with downsampling across y axis.')
assert img._lastDownsample == (1, 4)
w.hide()
def test_ImageItem_axisorder():
# All image tests pass again using the opposite axis order
origMode = pg.getConfigOption('imageAxisOrder')
altMode = 'row-major' if origMode == 'col-major' else 'col-major'
pg.setConfigOptions(imageAxisOrder=altMode)
try:
test_ImageItem(transpose=True)
finally:
pg.setConfigOptions(imageAxisOrder=origMode)
def test_setRect():
def assert_equal_transforms(tr1, tr2):
dic = { # there seems to be no easy way to get the matrix in one call:
'tr11': ( tr1.m11(), tr2.m11() ),
'tr12': ( tr1.m12(), tr2.m12() ),
'tr13': ( tr1.m13(), tr2.m13() ),
'tr21': ( tr1.m21(), tr2.m21() ),
'tr22': ( tr1.m22(), tr2.m22() ),
'tr23': ( tr1.m23(), tr2.m23() ),
'tr31': ( tr1.m31(), tr2.m31() ),
'tr32': ( tr1.m32(), tr2.m32() ),
'tr33': ( tr1.m33(), tr2.m33() )
}
log_string = 'Matrix element mismatch\n'
good = True
for key, values in dic.items():
val1, val2 = values
if val1 != val2:
good = False
log_string += f'{key}: {val1} != {val2}\n'
assert good, log_string
tr = QtGui.QTransform() # construct a reference transform
tr.scale(2, 4) # scale 2x2 image to 4x8
tr.translate(-1, -1) # after shifting by -1, -1
# the transformed 2x2 image would cover (-2,-4) to (2,4).
# Now have setRect construct the same transform:
imgitem = pg.ImageItem(np.eye(2), rect=(-2,-4, 4,8) ) # test tuple of floats
assert_equal_transforms(tr, imgitem.transform())
imgitem = pg.ImageItem(np.eye(2), rect=QtCore.QRectF(-2,-4, 4,8) ) # test QRectF
assert_equal_transforms(tr, imgitem.transform())
imgitem = pg.ImageItem(np.eye(2))
imgitem.setRect(-2,-4, 4,8) # test individual parameters
assert_equal_transforms(tr, imgitem.transform())
imgitem = pg.ImageItem(np.eye(2))
imgitem.setRect(QtCore.QRect(-2,-4, 4,8)) # test QRect argument
assert_equal_transforms(tr, imgitem.transform())
def test_dividebyzero():
im = pg.image(pg.np.random.normal(size=(100,100)))
im.imageItem.setAutoDownsample(True)
im.view.setRange(xRange=[-5+25, 5e+25],yRange=[-5e+25, 5e+25])
app.processEvents()
QtTest.QTest.qWait(1000)
# must manually call im.imageItem.render here or the exception
# will only exist on the Qt event loop
im.imageItem.render()