Multiprocessing updates / fixes:

- ForkedProcess is much more careful with inherited state -- closes file handles, removes atexit and excepthook callbacks
   - Remote processes copy sys.path from parent
   - Parallelizer has ProgressDialog support
   - Many docstring updates
   - Added some test code for remote GraphicsView rendering
This commit is contained in:
Luke Campagnola 2012-06-21 22:00:04 -04:00
parent cc93c7ba43
commit d1fdbadd19
9 changed files with 643 additions and 129 deletions

View File

@ -0,0 +1,20 @@
# -*- coding: utf-8 -*-
import initExample ## Add path to library (just for examples; you do not need this)
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
app = pg.mkQApp()
v = pg.RemoteGraphicsView()
v.show()
QtGui = v.pg.QtGui
rect = QtGui.QGraphicsRectItem(0,0,10,10)
rect.setPen(QtGui.QPen(QtGui.QColor(255,255,0)))
v.scene().addItem(rect)
## Start Qt event loop unless running in interactive mode or using pyside.
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()

View File

@ -1,38 +1,10 @@
# -*- coding: utf-8 -*-
import initExample ## Add path to library (just for examples; you do not need this)
import numpy as np
import pyqtgraph.multiprocess as mp
from pyqtgraph.multiprocess.parallelizer import Parallelize #, Parallelizer
import pyqtgraph as pg
import time
print "\n=================\nParallelize"
tasks = [1,2,4,8]
results = [None] * len(tasks)
size = 2000000
start = time.time()
with Parallelize(enumerate(tasks), results=results, workers=1) as tasker:
for i, x in tasker:
print i, x
tot = 0
for j in xrange(size):
tot += j * x
results[i] = tot
print results
print "serial:", time.time() - start
start = time.time()
with Parallelize(enumerate(tasks), results=results) as tasker:
for i, x in tasker:
print i, x
tot = 0
for j in xrange(size):
tot += j * x
results[i] = tot
print results
print "parallel:", time.time() - start

63
examples/parallelize.py Normal file
View File

@ -0,0 +1,63 @@
# -*- coding: utf-8 -*-
import initExample ## Add path to library (just for examples; you do not need this)
import numpy as np
import pyqtgraph.multiprocess as mp
import pyqtgraph as pg
import time
print "\n=================\nParallelize"
## Do a simple task:
## for x in range(N):
## sum([x*i for i in range(M)])
##
## We'll do this three times
## - once without Parallelize
## - once with Parallelize, but forced to use a single worker
## - once with Parallelize automatically determining how many workers to use
##
tasks = range(10)
results = [None] * len(tasks)
results2 = results[:]
results3 = results[:]
size = 2000000
pg.mkQApp()
### Purely serial processing
start = time.time()
with pg.ProgressDialog('processing serially..', maximum=len(tasks)) as dlg:
for i, x in enumerate(tasks):
tot = 0
for j in xrange(size):
tot += j * x
results[i] = tot
dlg += 1
if dlg.wasCanceled():
raise Exception('processing canceled')
print "Serial time: %0.2f" % (time.time() - start)
### Use parallelize, but force a single worker
### (this simulates the behavior seen on windows, which lacks os.fork)
start = time.time()
with mp.Parallelize(enumerate(tasks), results=results2, workers=1, progressDialog='processing serially (using Parallelizer)..') as tasker:
for i, x in tasker:
tot = 0
for j in xrange(size):
tot += j * x
tasker.results[i] = tot
print "\nParallel time, 1 worker: %0.2f" % (time.time() - start)
print "Results match serial: ", results2 == results
### Use parallelize with multiple workers
start = time.time()
with mp.Parallelize(enumerate(tasks), results=results3, progressDialog='processing in parallel..') as tasker:
for i, x in tasker:
tot = 0
for j in xrange(size):
tot += j * x
tasker.results[i] = tot
print "\nParallel time, %d workers: %0.2f" % (mp.Parallelize.suggestedWorkerCount(), time.time() - start)
print "Results match serial: ", results3 == results

View File

@ -20,3 +20,5 @@ TODO:
"""
from processes import *
from parallelizer import Parallelize, CanceledError
from remoteproxy import proxy

15
multiprocess/bootstrap.py Normal file
View File

@ -0,0 +1,15 @@
"""For starting up remote processes"""
import sys, pickle
if __name__ == '__main__':
name, port, authkey, targetStr, path = pickle.load(sys.stdin)
if path is not None:
## rewrite sys.path without assigning a new object--no idea who already has a reference to the existing list.
while len(sys.path) > 0:
sys.path.pop()
sys.path.extend(path)
#import pyqtgraph
#import pyqtgraph.multiprocess.processes
target = pickle.loads(targetStr) ## unpickling the target should import everything we need
target(name, port, authkey)
sys.exit(0)

View File

@ -2,6 +2,10 @@ import os, sys, time, multiprocessing
from processes import ForkedProcess
from remoteproxy import ExitError
class CanceledError(Exception):
"""Raised when the progress dialog is canceled during a processing operation."""
pass
class Parallelize:
"""
Class for ultra-simple inline parallelization on multi-core CPUs
@ -29,35 +33,78 @@ class Parallelize:
print results
The only major caveat is that *result* in the example above must be picklable.
The only major caveat is that *result* in the example above must be picklable,
since it is automatically sent via pipe back to the parent process.
"""
def __init__(self, tasks, workers=None, block=True, **kwds):
def __init__(self, tasks, workers=None, block=True, progressDialog=None, **kwds):
"""
Args:
tasks - list of objects to be processed (Parallelize will determine how to distribute the tasks)
workers - number of worker processes or None to use number of CPUs in the system
kwds - objects to be shared by proxy with child processes
=============== ===================================================================
Arguments:
tasks list of objects to be processed (Parallelize will determine how to
distribute the tasks)
workers number of worker processes or None to use number of CPUs in the
system
progressDialog optional dict of arguments for ProgressDialog
to update while tasks are processed
kwds objects to be shared by proxy with child processes (they will
appear as attributes of the tasker)
=============== ===================================================================
"""
self.block = block
## Generate progress dialog.
## Note that we want to avoid letting forked child processes play with progress dialogs..
self.showProgress = False
if progressDialog is not None:
self.showProgress = True
if isinstance(progressDialog, basestring):
progressDialog = {'labelText': progressDialog}
import pyqtgraph as pg
self.progressDlg = pg.ProgressDialog(**progressDialog)
if workers is None:
workers = multiprocessing.cpu_count()
workers = self.suggestedWorkerCount()
if not hasattr(os, 'fork'):
workers = 1
self.workers = workers
self.tasks = list(tasks)
self.kwds = kwds
self.kwds = kwds.copy()
self.kwds['_taskStarted'] = self._taskStarted
def __enter__(self):
self.proc = None
workers = self.workers
if workers == 1:
return Tasker(None, self.tasks, self.kwds)
if self.workers == 1:
return self.runSerial()
else:
return self.runParallel()
def __exit__(self, *exc_info):
if self.proc is not None: ## worker
try:
if exc_info[0] is not None:
sys.excepthook(*exc_info)
finally:
#print os.getpid(), 'exit'
os._exit(0)
else: ## parent
if self.showProgress:
self.progressDlg.__exit__(None, None, None)
def runSerial(self):
if self.showProgress:
self.progressDlg.__enter__()
self.progressDlg.setMaximum(len(self.tasks))
self.progress = {os.getpid(): []}
return Tasker(None, self.tasks, self.kwds)
def runParallel(self):
self.childs = []
## break up tasks into one set per worker
workers = self.workers
chunks = [[] for i in xrange(workers)]
i = 0
for i in range(len(self.tasks)):
@ -72,30 +119,74 @@ class Parallelize:
else:
self.childs.append(proc)
## process events from workers until all have exited.
activeChilds = self.childs[:]
while len(activeChilds) > 0:
for ch in activeChilds:
## Keep track of the progress of each worker independently.
self.progress = {ch.childPid: [] for ch in self.childs}
## for each child process, self.progress[pid] is a list
## of task indexes. The last index is the task currently being
## processed; all others are finished.
try:
if self.showProgress:
self.progressDlg.__enter__()
self.progressDlg.setMaximum(len(self.tasks))
## process events from workers until all have exited.
activeChilds = self.childs[:]
pollInterval = 0.01
while len(activeChilds) > 0:
waitingChildren = 0
rem = []
try:
ch.processRequests()
except ExitError:
rem.append(ch)
for ch in rem:
activeChilds.remove(ch)
time.sleep(0.1)
for ch in activeChilds:
try:
n = ch.processRequests()
if n > 0:
waitingChildren += 1
except ExitError:
#print ch.childPid, 'process finished'
rem.append(ch)
if self.showProgress:
self.progressDlg += 1
#print "remove:", [ch.childPid for ch in rem]
for ch in rem:
activeChilds.remove(ch)
os.waitpid(ch.childPid, 0)
#print [ch.childPid for ch in activeChilds]
if self.showProgress and self.progressDlg.wasCanceled():
for ch in activeChilds:
ch.kill()
raise CanceledError()
## adjust polling interval--prefer to get exactly 1 event per poll cycle.
if waitingChildren > 1:
pollInterval *= 0.7
elif waitingChildren == 0:
pollInterval /= 0.7
pollInterval = max(min(pollInterval, 0.5), 0.0005) ## but keep it within reasonable limits
time.sleep(pollInterval)
finally:
if self.showProgress:
self.progressDlg.__exit__(None, None, None)
return [] ## no tasks for parent process.
def __exit__(self, *exc_info):
if exc_info[0] is not None:
sys.excepthook(*exc_info)
if self.proc is not None:
os._exit(0)
def wait(self):
## wait for all child processes to finish
pass
@staticmethod
def suggestedWorkerCount():
return multiprocessing.cpu_count() ## is this really the best option?
def _taskStarted(self, pid, i, **kwds):
## called remotely by tasker to indicate it has started working on task i
#print pid, 'reported starting task', i
if self.showProgress:
if len(self.progress[pid]) > 0:
self.progressDlg += 1
if pid == os.getpid(): ## single-worker process
if self.progressDlg.wasCanceled():
raise CanceledError()
self.progress[pid].append(i)
class Tasker:
def __init__(self, proc, tasks, kwds):
@ -106,9 +197,13 @@ class Tasker:
def __iter__(self):
## we could fix this up such that tasks are retrieved from the parent process one at a time..
for task in self.tasks:
for i, task in enumerate(self.tasks):
self.index = i
#print os.getpid(), 'starting task', i
self._taskStarted(os.getpid(), i, _callSync='off')
yield task
if self.proc is not None:
#print os.getpid(), 'no more tasks'
self.proc.close()

View File

@ -1,10 +1,51 @@
from remoteproxy import RemoteEventHandler, ExitError, NoResultError, LocalObjectProxy, ObjectProxy
import subprocess, atexit, os, sys, time, random, socket
import subprocess, atexit, os, sys, time, random, socket, signal
import cPickle as pickle
import multiprocessing.connection
__all__ = ['Process', 'QtProcess', 'ForkedProcess', 'ExitError', 'NoResultError']
class Process(RemoteEventHandler):
def __init__(self, name=None, target=None):
"""
Bases: RemoteEventHandler
This class is used to spawn and control a new python interpreter.
It uses subprocess.Popen to start the new process and communicates with it
using multiprocessing.Connection objects over a network socket.
By default, the remote process will immediately enter an event-processing
loop that carries out requests send from the parent process.
Remote control works mainly through proxy objects::
proc = Process() ## starts process, returns handle
rsys = proc._import('sys') ## asks remote process to import 'sys', returns
## a proxy which references the imported module
rsys.stdout.write('hello\n') ## This message will be printed from the remote
## process. Proxy objects can usually be used
## exactly as regular objects are.
proc.close() ## Request the remote process shut down
Requests made via proxy objects may be synchronous or asynchronous and may
return objects either by proxy or by value (if they are picklable). See
ProxyObject for more information.
"""
def __init__(self, name=None, target=None, copySysPath=True):
"""
============ =============================================================
Arguments:
name Optional name for this process used when printing messages
from the remote process.
target Optional function to call after starting remote process.
By default, this is startEventLoop(), which causes the remote
process to process requests from the parent process until it
is asked to quit. If you wish to specify a different target,
it must be picklable (bound methods are not).
copySysPath If true, copy the contents of sys.path to the remote process
============ =============================================================
"""
if target is None:
target = startEventLoop
if name is None:
@ -25,8 +66,12 @@ class Process(RemoteEventHandler):
port += 1
## start remote process, instruct it to run target function
self.proc = subprocess.Popen((sys.executable, __file__, 'remote'), stdin=subprocess.PIPE)
pickle.dump((name+'_child', port, authkey, target), self.proc.stdin)
sysPath = sys.path if copySysPath else None
bootstrap = os.path.abspath(os.path.join(os.path.dirname(__file__), 'bootstrap.py'))
self.proc = subprocess.Popen((sys.executable, bootstrap), stdin=subprocess.PIPE)
targetStr = pickle.dumps(target) ## double-pickle target so that child has a chance to
## set its sys.path properly before unpickling the target
pickle.dump((name+'_child', port, authkey, targetStr, sysPath), self.proc.stdin)
self.proc.stdin.close()
## open connection for remote process
@ -60,16 +105,29 @@ def startEventLoop(name, port, authkey):
class ForkedProcess(RemoteEventHandler):
"""
ForkedProcess is a substitute for Process that uses os.fork() to generate a new process.
This is much faster than starting a completely new interpreter, but carries some caveats
and limitations:
- open file handles are shared with the parent process, which is potentially dangerous
- it is not possible to have a QApplication in both parent and child process
(unless both QApplications are created _after_ the call to fork())
- generally not thread-safe. Also, threads are not copied by fork(); the new process
will have only one thread that starts wherever fork() was called in the parent process.
- forked processes are unceremoniously terminated when join() is called; they are not
given any opportunity to clean up. (This prevents them calling any cleanup code that
was only intended to be used by the parent process)
This is much faster than starting a completely new interpreter and child processes
automatically have a copy of the entire program state from before the fork. This
makes it an appealing approach when parallelizing expensive computations. (see
also Parallelizer)
However, fork() comes with some caveats and limitations:
- fork() is not available on Windows.
- It is not possible to have a QApplication in both parent and child process
(unless both QApplications are created _after_ the call to fork())
Attempts by the forked process to access Qt GUI elements created by the parent
will most likely cause the child to crash.
- Likewise, database connections are unlikely to function correctly in a forked child.
- Threads are not copied by fork(); the new process
will have only one thread that starts wherever fork() was called in the parent process.
- Forked processes are unceremoniously terminated when join() is called; they are not
given any opportunity to clean up. (This prevents them calling any cleanup code that
was only intended to be used by the parent process)
- Normally when fork()ing, open file handles are shared with the parent process,
which is potentially dangerous. ForkedProcess is careful to close all file handles
that are not explicitly needed--stdout, stderr, and a single pipe to the parent
process.
"""
def __init__(self, name=None, target=0, preProxy=None):
@ -101,16 +159,46 @@ class ForkedProcess(RemoteEventHandler):
pid = os.fork()
if pid == 0:
self.isParent = False
## We are now in the forked process; need to be extra careful what we touch while here.
## - no reading/writing file handles/sockets owned by parent process (stdout is ok)
## - don't touch QtGui or QApplication at all; these are landmines.
## - don't let the process call exit handlers
## -
## close all file handles we do not want shared with parent
conn.close()
sys.stdin.close() ## otherwise we screw with interactive prompts.
fid = remoteConn.fileno()
os.closerange(3, fid)
os.closerange(fid+1, 4096) ## just guessing on the maximum descriptor count..
## Override any custom exception hooks
def excepthook(*args):
import traceback
traceback.print_exception(*args)
sys.excepthook = excepthook
## Make it harder to access QApplication instance
if 'PyQt4.QtGui' in sys.modules:
sys.modules['PyQt4.QtGui'].QApplication = None
sys.modules.pop('PyQt4.QtGui', None)
sys.modules.pop('PyQt4.QtCore', None)
## sabotage atexit callbacks
atexit._exithandlers = []
atexit.register(lambda: os._exit(0))
RemoteEventHandler.__init__(self, remoteConn, name+'_child', pid=os.getppid())
if target is not None:
target()
ppid = os.getppid()
self.forkedProxies = {}
for name, proxyId in proxyIDs.iteritems():
self.forkedProxies[name] = ObjectProxy(ppid, proxyId=proxyId, typeStr=repr(preProxy[name]))
if target is not None:
target()
else:
self.isParent = True
self.childPid = pid
@ -127,10 +215,11 @@ class ForkedProcess(RemoteEventHandler):
self.processRequests() # exception raised when the loop should exit
time.sleep(0.01)
except ExitError:
sys.exit(0)
break
except:
print "Error occurred in forked event loop:"
sys.excepthook(*sys.exc_info())
sys.exit(0)
def join(self, timeout=10):
if self.hasJoined:
@ -138,10 +227,19 @@ class ForkedProcess(RemoteEventHandler):
#os.kill(pid, 9)
try:
self.close(callSync='sync', timeout=timeout, noCleanup=True) ## ask the child process to exit and require that it return a confirmation.
os.waitpid(self.childPid, 0)
except IOError: ## probably remote process has already quit
pass
self.hasJoined = True
def kill(self):
"""Immediately kill the forked remote process.
This is generally safe because forked processes are already
expected to _avoid_ any cleanup at exit."""
os.kill(self.childPid, signal.SIGKILL)
self.hasJoined = True
##Special set of subclasses that implement a Qt event loop instead.
@ -165,8 +263,33 @@ class RemoteQtEventHandler(RemoteEventHandler):
#raise
class QtProcess(Process):
def __init__(self, name=None):
Process.__init__(self, name, target=startQtEventLoop)
"""
QtProcess is essentially the same as Process, with two major differences:
- The remote process starts by running startQtEventLoop() which creates a
QApplication in the remote process and uses a QTimer to trigger
remote event processing. This allows the remote process to have its own
GUI.
- A QTimer is also started on the parent process which polls for requests
from the child process. This allows Qt signals emitted within the child
process to invoke slots on the parent process and vice-versa.
Example::
proc = QtProcess()
rQtGui = proc._import('PyQt4.QtGui')
btn = rQtGui.QPushButton('button on child process')
btn.show()
def slot():
print 'slot invoked on parent process'
btn.clicked.connect(proxy(slot)) # be sure to send a proxy of the slot
"""
def __init__(self, **kwds):
if 'target' not in kwds:
kwds['target'] = startQtEventLoop
Process.__init__(self, **kwds)
self.startEventTimer()
def startEventTimer(self):
@ -201,8 +324,3 @@ def startQtEventLoop(name, port, authkey):
app.exec_()
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == 'remote': ## module has been invoked as script in new python interpreter.
name, port, authkey, target = pickle.load(sys.stdin)
target(name, port, authkey)
sys.exit(0)

View File

@ -9,7 +9,26 @@ class NoResultError(Exception):
class RemoteEventHandler(object):
"""
This class handles communication between two processes. One instance is present on
each process and listens for communication from the other process. This enables
(amongst other things) ObjectProxy instances to look up their attributes and call
their methods.
This class is responsible for carrying out actions on behalf of the remote process.
Each instance holds one end of a Connection which allows python
objects to be passed between processes.
For the most common operations, see _import(), close(), and transfer()
To handle and respond to incoming requests, RemoteEventHandler requires that its
processRequests method is called repeatedly (this is usually handled by the Process
classes defined in multiprocess.processes).
"""
handlers = {} ## maps {process ID : handler}. This allows unpickler to determine which process
## an object proxy belongs to
@ -55,19 +74,25 @@ class RemoteEventHandler(object):
def processRequests(self):
"""Process all pending requests from the pipe, return
after no more events are immediately available. (non-blocking)"""
after no more events are immediately available. (non-blocking)
Returns the number of events processed.
"""
if self.exited:
raise ExitError()
numProcessed = 0
while self.conn.poll():
try:
self.handleRequest()
numProcessed += 1
except ExitError:
self.exited = True
raise
except:
print "Error in process %s" % self.name
sys.excepthook(*sys.exc_info())
return numProcessed
def handleRequest(self):
"""Handle a single request from the remote process.
@ -175,6 +200,7 @@ class RemoteEventHandler(object):
self.send(request='result', reqId=reqId, callSync='off', opts=dict(result=result))
def replyError(self, reqId, *exc):
print "error:", self.name, reqId, exc[1]
excStr = traceback.format_exception(*exc)
try:
self.send(request='error', reqId=reqId, callSync='off', opts=dict(exception=exc[1], excString=excStr))
@ -282,7 +308,9 @@ class RemoteEventHandler(object):
try:
optStr = pickle.dumps(opts)
except:
print "Error pickling:", opts
print "==== Error pickling this object: ===="
print opts
print "======================================="
raise
request = (request, reqId, optStr)
@ -381,8 +409,8 @@ class RemoteEventHandler(object):
def transfer(self, obj, **kwds):
"""
Transfer an object to the remote host (the object must be picklable) and return
a proxy for the new remote object.
Transfer an object by value to the remote host (the object must be picklable)
and return a proxy for the new remote object.
"""
return self.send(request='transfer', opts=dict(obj=obj), **kwds)
@ -395,7 +423,12 @@ class RemoteEventHandler(object):
class Request:
## used internally for tracking asynchronous requests and returning results
"""
Request objects are returned when calling an ObjectProxy in asynchronous mode
or if a synchronous call has timed out. Use hasResult() to ask whether
the result of the call has been returned yet. Use result() to get
the returned value.
"""
def __init__(self, process, reqId, description=None, timeout=10):
self.proc = process
self.description = description
@ -405,10 +438,13 @@ class Request:
self.timeout = timeout
def result(self, block=True, timeout=None):
"""Return the result for this request.
"""
Return the result for this request.
If block is True, wait until the result has arrived or *timeout* seconds passes.
If the timeout is reached, raise an exception. (use timeout=None to disable)
If block is False, raises an exception if the result has not arrived yet."""
If the timeout is reached, raise NoResultError. (use timeout=None to disable)
If block is False, raise NoResultError immediately if the result has not arrived yet.
"""
if self.gotResult:
return self._result
@ -434,16 +470,24 @@ class Request:
def hasResult(self):
"""Returns True if the result for this request has arrived."""
try:
#print "check result", self.description
self.result(block=False)
except NoResultError:
#print " -> not yet"
pass
return self.gotResult
class LocalObjectProxy(object):
"""Used for wrapping local objects to ensure that they are send by proxy to a remote host."""
"""
Used for wrapping local objects to ensure that they are send by proxy to a remote host.
Note that 'proxy' is just a shorter alias for LocalObjectProxy.
For example::
data = [1,2,3,4,5]
remotePlot.plot(data) ## by default, lists are pickled and sent by value
remotePlot.plot(proxy(data)) ## force the object to be sent by proxy
"""
nextProxyId = 0
proxiedObjects = {} ## maps {proxyId: object}
@ -501,7 +545,44 @@ class ObjectProxy(object):
attributes on existing proxy objects.
For the most part, this object can be used exactly as if it
were a local object.
were a local object::
rsys = proc._import('sys') # returns proxy to sys module on remote process
rsys.stdout # proxy to remote sys.stdout
rsys.stdout.write # proxy to remote sys.stdout.write
rsys.stdout.write('hello') # calls sys.stdout.write('hello') on remote machine
# and returns the result (None)
When calling a proxy to a remote function, the call can be made synchronous
(result of call is returned immediately), asynchronous (result is returned later),
or return can be disabled entirely::
ros = proc._import('os')
## synchronous call; result is returned immediately
pid = ros.getpid()
## asynchronous call
request = ros.getpid(_callSync='async')
while not request.hasResult():
time.sleep(0.01)
pid = request.result()
## disable return when we know it isn't needed
rsys.stdout.write('hello', _callSync='off')
Additionally, values returned from a remote function call are automatically
returned either by value (must be picklable) or by proxy.
This behavior can be forced::
rnp = proc._import('numpy')
arrProxy = rnp.array([1,2,3,4], _returnType='proxy')
arrValue = rnp.array([1,2,3,4], _returnType='value')
The default callSync and returnType behaviors (as well as others) can be set
for each proxy individually using ObjectProxy._setProxyOptions() or globally using
proc.setProxyOptions().
"""
def __init__(self, processId, proxyId, typeStr='', parent=None):
object.__init__(self)
@ -574,6 +655,13 @@ class ObjectProxy(object):
"""
self._proxyOptions.update(kwds)
def _getValue(self):
"""
Return the value of the proxied object
(the remote object must be picklable)
"""
return self._handler.getObjValue(self)
def _getProxyOption(self, opt):
val = self._proxyOptions[opt]
if val is None:
@ -591,20 +679,31 @@ class ObjectProxy(object):
return "<ObjectProxy for process %d, object 0x%x: %s >" % (self._processId, self._proxyId, self._typeStr)
def __getattr__(self, attr):
#if '_processId' not in self.__dict__:
#raise Exception("ObjectProxy has no processId")
#proc = Process._processes[self._processId]
deferred = self._getProxyOption('deferGetattr')
if deferred is True:
def __getattr__(self, attr, **kwds):
"""
Calls __getattr__ on the remote object and returns the attribute
by value or by proxy depending on the options set (see
ObjectProxy._setProxyOptions and RemoteEventHandler.setProxyOptions)
If the option 'deferGetattr' is True for this proxy, then a new proxy object
is returned _without_ asking the remote object whether the named attribute exists.
This can save time when making multiple chained attribute requests,
but may also defer a possible AttributeError until later, making
them more difficult to debug.
"""
opts = self._getProxyOptions()
for k in opts:
if '_'+k in kwds:
opts[k] = kwds.pop('_'+k)
if opts['deferGetattr'] is True:
return self._deferredAttr(attr)
else:
opts = self._getProxyOptions()
#opts = self._getProxyOptions()
return self._handler.getObjAttr(self, attr, **opts)
def _deferredAttr(self, attr):
return DeferredObjectProxy(self, attr)
def __call__(self, *args, **kwds):
"""
Attempts to call the proxied object from the remote process.
@ -613,44 +712,34 @@ class ObjectProxy(object):
_callSync 'off', 'sync', or 'async'
_returnType 'value', 'proxy', or 'auto'
If the remote call raises an exception on the remote process,
it will be re-raised on the local process.
"""
#opts = {}
#callSync = kwds.pop('_callSync', self.)
#if callSync is not None:
#opts['callSync'] = callSync
#returnType = kwds.pop('_returnType', self._defaultReturnValue)
#if returnType is not None:
#opts['returnType'] = returnType
opts = self._getProxyOptions()
for k in opts:
if '_'+k in kwds:
opts[k] = kwds.pop('_'+k)
#print "call", opts
return self._handler.callObj(obj=self, args=args, kwds=kwds, **opts)
def _getValue(self):
## this just gives us an easy way to change the behavior of the special methods
#proc = Process._processes[self._processId]
return self._handler.getObjValue(self)
## Explicitly proxy special methods. Is there a better way to do this??
def _getSpecialAttr(self, attr):
#return self.__getattr__(attr)
## this just gives us an easy way to change the behavior of the special methods
return self._deferredAttr(attr)
def __getitem__(self, *args):
return self._getSpecialAttr('__getitem__')(*args)
def __setitem__(self, *args):
return self._getSpecialAttr('__setitem__')(*args)
return self._getSpecialAttr('__setitem__')(*args, _callSync='off')
def __setattr__(self, *args):
return self._getSpecialAttr('__setattr__')(*args)
return self._getSpecialAttr('__setattr__')(*args, _callSync='off')
def __str__(self, *args):
return self._getSpecialAttr('__str__')(*args, _returnType=True)
return self._getSpecialAttr('__str__')(*args, _returnType='value')
def __len__(self, *args):
return self._getSpecialAttr('__len__')(*args)
@ -670,6 +759,21 @@ class ObjectProxy(object):
def __pow__(self, *args):
return self._getSpecialAttr('__pow__')(*args)
def __iadd__(self, *args):
return self._getSpecialAttr('__iadd__')(*args, _callSync='off')
def __isub__(self, *args):
return self._getSpecialAttr('__isub__')(*args, _callSync='off')
def __idiv__(self, *args):
return self._getSpecialAttr('__idiv__')(*args, _callSync='off')
def __imul__(self, *args):
return self._getSpecialAttr('__imul__')(*args, _callSync='off')
def __ipow__(self, *args):
return self._getSpecialAttr('__ipow__')(*args, _callSync='off')
def __rshift__(self, *args):
return self._getSpecialAttr('__rshift__')(*args)
@ -679,6 +783,15 @@ class ObjectProxy(object):
def __floordiv__(self, *args):
return self._getSpecialAttr('__pow__')(*args)
def __irshift__(self, *args):
return self._getSpecialAttr('__rshift__')(*args, _callSync='off')
def __ilshift__(self, *args):
return self._getSpecialAttr('__lshift__')(*args, _callSync='off')
def __ifloordiv__(self, *args):
return self._getSpecialAttr('__pow__')(*args, _callSync='off')
def __eq__(self, *args):
return self._getSpecialAttr('__eq__')(*args)
@ -704,7 +817,16 @@ class ObjectProxy(object):
return self._getSpecialAttr('__or__')(*args)
def __xor__(self, *args):
return self._getSpecialAttr('__or__')(*args)
return self._getSpecialAttr('__xor__')(*args)
def __iand__(self, *args):
return self._getSpecialAttr('__iand__')(*args, _callSync='off')
def __ior__(self, *args):
return self._getSpecialAttr('__ior__')(*args, _callSync='off')
def __ixor__(self, *args):
return self._getSpecialAttr('__ixor__')(*args, _callSync='off')
def __mod__(self, *args):
return self._getSpecialAttr('__mod__')(*args)
@ -746,6 +868,37 @@ class ObjectProxy(object):
return self._getSpecialAttr('__rmod__')(*args)
class DeferredObjectProxy(ObjectProxy):
"""
This class represents an attribute (or sub-attribute) of a proxied object.
It is used to speed up attribute requests. Take the following scenario::
rsys = proc._import('sys')
rsys.stdout.write('hello')
For this simple example, a total of 4 synchronous requests are made to
the remote process:
1) import sys
2) getattr(sys, 'stdout')
3) getattr(stdout, 'write')
4) write('hello')
This takes a lot longer than running the equivalent code locally. To
speed things up, we can 'defer' the two attribute lookups so they are
only carried out when neccessary::
rsys = proc._import('sys')
rsys._setProxyOptions(deferGetattr=True)
rsys.stdout.write('hello')
This example only makes two requests to the remote process; the two
attribute lookups immediately return DeferredObjectProxy instances
immediately without contacting the remote process. When the call
to write() is made, all attribute requests are processed at the same time.
Note that if the attributes requested do not exist on the remote object,
making the call to write() will raise an AttributeError.
"""
def __init__(self, parentProxy, attribute):
## can't set attributes directly because setattr is overridden.
for k in ['_processId', '_typeStr', '_proxyId', '_handler']:
@ -756,4 +909,10 @@ class DeferredObjectProxy(ObjectProxy):
def __repr__(self):
return ObjectProxy.__repr__(self) + '.' + '.'.join(self._attributes)
def _undefer(self):
"""
Return a non-deferred ObjectProxy referencing the same object
"""
return self._parent.__getattr__(self._attributes[-1], _deferGetattr=False)

View File

@ -0,0 +1,70 @@
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph.multiprocess as mp
import pyqtgraph as pg
import numpy as np
import ctypes, os
__all__ = ['RemoteGraphicsView']
class RemoteGraphicsView(QtGui.QWidget):
def __init__(self, parent=None, *args, **kwds):
self._img = None
self._imgReq = None
QtGui.QWidget.__init__(self)
self._proc = mp.QtProcess()
self.pg = self._proc._import('pyqtgraph')
rpgRemote = self._proc._import('pyqtgraph.widgets.RemoteGraphicsView')
self._view = rpgRemote.Renderer(*args, **kwds)
self._view._setProxyOptions(deferGetattr=True)
self._view.sceneRendered.connect(mp.proxy(self.remoteSceneChanged))
def scene(self):
return self._view.scene()
def resizeEvent(self, ev):
ret = QtGui.QWidget.resizeEvent(self, ev)
self._view.resize(self.size(), _callSync='off')
return ret
def remoteSceneChanged(self, data):
self._img = pg.makeQImage(data, alpha=True)
self.update()
def paintEvent(self, ev):
if self._img is None:
return
p = QtGui.QPainter(self)
p.drawImage(self.rect(), self._img, self.rect())
p.end()
class Renderer(pg.GraphicsView):
sceneRendered = QtCore.Signal(object)
def __init__(self, *args, **kwds):
pg.GraphicsView.__init__(self, *args, **kwds)
self.scene().changed.connect(self.update)
self.img = None
self.renderTimer = QtCore.QTimer()
self.renderTimer.timeout.connect(self.renderView)
self.renderTimer.start(16)
def update(self):
self.img = None
return pg.GraphicsView.update(self)
def resize(self, size):
pg.GraphicsView.resize(self, size)
self.update()
def renderView(self):
if self.img is None:
self.img = QtGui.QImage(self.width(), self.height(), QtGui.QImage.Format_ARGB32)
self.img.fill(0xffffffff)
p = QtGui.QPainter(self.img)
self.render(p, self.viewRect(), self.rect())
p.end()
self.data = np.fromstring(ctypes.string_at(int(self.img.bits()), self.img.byteCount()), dtype=np.ubyte).reshape(self.height(), self.width(),4).transpose(1,0,2)
#self.data = ctypes.string_at(int(self.img.bits()), self.img.byteCount())
self.sceneRendered.emit(self.data)