a472f8c5de
* Remove all usage of python2_3.py Technically these functions were exported at the top level of the library, this removes them without warning... If we want to we can bring them back for there, but I honestly don't think its needed, as we are py3 only now and have been for multiple releases. This may introduce a number of 'useless cast' or similar but those were always happening anyway This PR brought to you by sed * Update varname in hdf example to avoid collision with builtin * Clean up some leftover comments surrounding imports of compat code * Unnecessary string casts * Additional unnecessary casts * syntax error fix * more unnecessary casts * Yet more unnecessary casts
64 lines
2.0 KiB
Python
64 lines
2.0 KiB
Python
# -*- coding: utf-8 -*-
|
|
import initExample ## Add path to library (just for examples; you do not need this)
|
|
|
|
import time
|
|
import numpy as np
|
|
import pyqtgraph.multiprocess as mp
|
|
import pyqtgraph as pg
|
|
|
|
print( "\n=================\nParallelize")
|
|
|
|
## Do a simple task:
|
|
## for x in range(N):
|
|
## sum([x*i for i in range(M)])
|
|
##
|
|
## We'll do this three times
|
|
## - once without Parallelize
|
|
## - once with Parallelize, but forced to use a single worker
|
|
## - once with Parallelize automatically determining how many workers to use
|
|
##
|
|
|
|
tasks = range(10)
|
|
results = [None] * len(tasks)
|
|
results2 = results[:]
|
|
results3 = results[:]
|
|
size = 2000000
|
|
|
|
pg.mkQApp()
|
|
|
|
### Purely serial processing
|
|
start = time.time()
|
|
with pg.ProgressDialog('processing serially..', maximum=len(tasks)) as dlg:
|
|
for i, x in enumerate(tasks):
|
|
tot = 0
|
|
for j in range(size):
|
|
tot += j * x
|
|
results[i] = tot
|
|
dlg += 1
|
|
if dlg.wasCanceled():
|
|
raise Exception('processing canceled')
|
|
print( "Serial time: %0.2f" % (time.time() - start))
|
|
|
|
### Use parallelize, but force a single worker
|
|
### (this simulates the behavior seen on windows, which lacks os.fork)
|
|
start = time.time()
|
|
with mp.Parallelize(enumerate(tasks), results=results2, workers=1, progressDialog='processing serially (using Parallelizer)..') as tasker:
|
|
for i, x in tasker:
|
|
tot = 0
|
|
for j in range(size):
|
|
tot += j * x
|
|
tasker.results[i] = tot
|
|
print( "\nParallel time, 1 worker: %0.2f" % (time.time() - start))
|
|
print( "Results match serial: %s" % str(results2 == results))
|
|
|
|
### Use parallelize with multiple workers
|
|
start = time.time()
|
|
with mp.Parallelize(enumerate(tasks), results=results3, progressDialog='processing in parallel..') as tasker:
|
|
for i, x in tasker:
|
|
tot = 0
|
|
for j in range(size):
|
|
tot += j * x
|
|
tasker.results[i] = tot
|
|
print( "\nParallel time, %d workers: %0.2f" % (mp.Parallelize.suggestedWorkerCount(), time.time() - start))
|
|
print( "Results match serial: %s" % str(results3 == results))
|