I spent quite a bit of time looking on how to use the multiprocessing package, but couldn't find anything on how to use it inside a plugin in QGIS. I am developing a plugin that does some optimization for several elements. I would like to parallelize it.
I found a useful link on multi-threading inside a python plugin (http://snorf.net/blog/2013/12/07/multithreading-in-qgis-python-plugins/), but nothing on using the multiprocessing module, which might be easier?
I have been trying with a very basic example. I am only showing the run function from the plugin here:
def run(self):
"""Run method that performs all the real work"""
# show the dialog
self.dlg.show()
# Run the dialog event loop
result = self.dlg.exec_()
# See if OK was pressed and run code
if result:
#Get number of cores
nProcs = mp.cpu_count()
#Start a Process
p = mp.Pool(nProcs)
#Define function
def cube(x):
return x**3
#Run parallel
results = p.map(cube, range(1,7))
When I run this code from the plugin in QGIS, it opens several QGIS windows, which then return errors (can't load layers, etc.). What am I missing? Do I need to start a worker first on another thread and then use multiprocessing there? Or would we use another function from multiprocessing?
Please let me know if the question needs edits. I am working under windows 7, using QGIS 2.10.
Thanks,
UPDATE
I created a worker class to implement the function and sent it to a new thread, but I get the same problem when I use multiprocessing in that thread.
The class I created is as follows:
class Worker(QObject):
'''Example worker'''
def __init__(self, result_queue, f, attr=[], repet=None, nbCores=None):
QObject.__init__(self)
if not hasattr(f, '__call__'):
#Check if not a function
raise TypeError('Worker expected a function as second argument')
if not isinstance(attr, list) and not repet==None:
#Check if not a list if there is a repet command
raise TypeError('Input problem:\nThe arguments for the function should be in a list if repet is provided')
if not all(isinstance(elem, list) for elem in attr) and repet==None and len(inspect.getargspec(f).args) > 1:
#Check if not a list of lists if there isn't a repet command
raise TypeError('Input problem:\nThe arguments for the function should be a list of lists if repet is not provided')
if not repet == None and (not isinstance(repet, int) or repet == 0):
#Check that provided an integer greater than 0
raise TypeError('If provided, repet should be None or a strictly positive integer')
self.result_queue = result_queue
self.f = f
self.attr = attr
self.repet = repet
self.nbCores = nbCores
if self.nbCores == None:
self.nbCores = mp.cpu_count() - 1
def fStar(self, arg):
"""Convert the function to taking a list as arguments"""
return self.f(*arg)
def run(self):
ret = None
try:
if self.repet == 1:
# estimates the function based on provided arguments
ret = self.f(*self.attr) #The star unpacks the list into attributes
else:
pool = mp.Pool(processes=self.nbCores)
if self.repet > 1:
ret = pool.map(self.fStar, itools.repeat(self.attr,self.repet))
elif self.repet == None:
ret = pool.map(self.fStar, self.attr)
pool.close()
pool.join()
except Exception, e:
#I can't pass an exception, it makes qgis bug
pass
self.result_queue.put(ret) #Pass the result to the queue
finished = pyqtSignal(object)
error = pyqtSignal(Exception, basestring)
I start the worker and send it to a new thread using the following function:
def startWorker(f, attr, repet=None, nbCores=None):
#Create a result queue
result_queue = queue.Queue()
# create a new worker instance
worker = Worker(result_queue, f, attr, repet, nbCores)
# start the worker in a new thread
thread = QThread()
worker.moveToThread(thread)
thread.started.connect(worker.run)
thread.start()
#Clean up when the thread is finished
worker.deleteLater()
thread.quit()
thread.wait()
thread.deleteLater()
#Export the result to the queue
res = []
while not result_queue.empty():
r = result_queue.get()
if r is None:
continue
res.append(r)
return res
As in my initial question, I just replaced results = p.map(cube, range(1,7)) by calling the startWorker function
Please let me know if you have any idea how to make this work. I implemented the work in multiple threads, but it would be much faster to use several cores...
I need to pass a kwarg to the parent class of my equivalent of FingerFactoryFromService using super.
I know I am actually passing the kwarg to IFingerFactory because that is also where I pass the service that ends up in init FingerFactoryFromService and I can understand that it is getting tripped up somewhere in the component system but I cannot think of any other way.
The error I keep getting is
exceptions.TypeError: 'test' is an invalid keyword argument for this function
Versions of code in my virtualenv are:
pip (1.4.1)
setuptools (1.1.6)
Twisted (13.1.0)
wsgiref (0.1.2)
zope.interface (4.0.5)
This is a cutdown example from the finger tutorial demonstrating the issue:
from twisted.protocols import basic
from twisted.application import internet, service
from twisted.internet import protocol, reactor, defer
from twisted.python import components
from zope.interface import Interface, implements # #UnresolvedImport
class IFingerService(Interface):
def getUser(user): # #NoSelf
"""
Return a deferred returning a string.
"""
def getUsers(): # #NoSelf
"""
Return a deferred returning a list of strings.
"""
class IFingerFactory(Interface):
def getUser(user): # #NoSelf
"""
Return a deferred returning a string.
"""
def buildProtocol(addr): # #NoSelf
"""
Return a protocol returning a string.
"""
def catchError(err):
return "Internal error in server"
class FingerProtocol(basic.LineReceiver):
def lineReceived(self, user):
d = self.factory.getUser(user)
d.addErrback(catchError)
def writeValue(value):
self.transport.write(value + '\r\n')
self.transport.loseConnection()
d.addCallback(writeValue)
class FingerService(service.Service):
implements(IFingerService)
def __init__(self, filename):
self.filename = filename
self.users = {}
def _read(self):
self.users.clear()
for line in file(self.filename):
user, status = line.split(':', 1)
user = user.strip()
status = status.strip()
self.users[user] = status
self.call = reactor.callLater(30, self._read) # #UndefinedVariable
def getUser(self, user):
print user
return defer.succeed(self.users.get(user, "No such user"))
def getUsers(self):
return defer.succeed(self.users.keys())
def startService(self):
self._read()
service.Service.startService(self)
def stopService(self):
service.Service.stopService(self)
self.call.cancel()
class FingerFactoryFromService(protocol.ServerFactory):
implements(IFingerFactory)
protocol = FingerProtocol
#def __init__(self, srv):
def __init__(self, srv, test=None):
self.service = srv
## I need to call super here because my equivalent of ServerFactory requires
## a kwarg but this cutdown example doesnt so I just assign it to a property
# super(FingerFactoryFromService, self).__init__(test=test)
self.test_thing = test or 'Default Something'
def getUser(self, user):
return self.service.getUser(user)
components.registerAdapter(FingerFactoryFromService,
IFingerService,
IFingerFactory)
application = service.Application('finger')
serviceCollection = service.IServiceCollection(application)
finger_service = FingerService('/etc/passwd')
finger_service.setServiceParent(serviceCollection)
#line_finger_factory = IFingerFactory(finger_service)
line_finger_factory = IFingerFactory(finger_service, test='Something')
line_finger_server = internet.TCPServer(1079, line_finger_factory)
line_finger_server.setServiceParent(serviceCollection)
This has nothing to do with the component system. What you want to do is override the Factory's buildProtocol method, as documented here:
https://twistedmatrix.com/documents/current/core/howto/servers.html#auto9
We have code that invokes a variable number of context managers depending on runtime parameters:
from contextlib import nested, contextmanager
#contextmanager
def my_context(arg):
print("entering", arg)
try:
yield arg
finally:
print("exiting", arg)
def my_fn(items):
with nested(*(my_context(arg) for arg in items)) as managers:
print("processing under", managers)
my_fn(range(3))
However, contextlib.nested is deprecated since Python 2.7:
DeprecationWarning: With-statements now directly support multiple context managers
The answers to Multiple variables in Python 'with' statement indicate that contextlib.nested has some "confusing error prone quirks", but the suggested alternative of using the multiple-manager with statement won't work for a variable number of context managers (and also breaks backward compatibility).
Are there any alternatives to contextlib.nested that aren't deprecated and (preferably) don't have the same bugs?
Or should I continue to use contextlib.nested and ignore the warning? If so, should I plan for contextlib.nested to be removed at some time in the future?
The new Python 3 contextlib.ExitStack class was added as a replacement for contextlib.nested() (see issue 13585).
It is coded in such a way you can use it in Python 2 directly:
import sys
from collections import deque
class ExitStack(object):
"""Context manager for dynamic management of a stack of exit callbacks
For example:
with ExitStack() as stack:
files = [stack.enter_context(open(fname)) for fname in filenames]
# All opened files will automatically be closed at the end of
# the with statement, even if attempts to open files later
# in the list raise an exception
"""
def __init__(self):
self._exit_callbacks = deque()
def pop_all(self):
"""Preserve the context stack by transferring it to a new instance"""
new_stack = type(self)()
new_stack._exit_callbacks = self._exit_callbacks
self._exit_callbacks = deque()
return new_stack
def _push_cm_exit(self, cm, cm_exit):
"""Helper to correctly register callbacks to __exit__ methods"""
def _exit_wrapper(*exc_details):
return cm_exit(cm, *exc_details)
_exit_wrapper.__self__ = cm
self.push(_exit_wrapper)
def push(self, exit):
"""Registers a callback with the standard __exit__ method signature
Can suppress exceptions the same way __exit__ methods can.
Also accepts any object with an __exit__ method (registering a call
to the method instead of the object itself)
"""
# We use an unbound method rather than a bound method to follow
# the standard lookup behaviour for special methods
_cb_type = type(exit)
try:
exit_method = _cb_type.__exit__
except AttributeError:
# Not a context manager, so assume its a callable
self._exit_callbacks.append(exit)
else:
self._push_cm_exit(exit, exit_method)
return exit # Allow use as a decorator
def callback(self, callback, *args, **kwds):
"""Registers an arbitrary callback and arguments.
Cannot suppress exceptions.
"""
def _exit_wrapper(exc_type, exc, tb):
callback(*args, **kwds)
# We changed the signature, so using #wraps is not appropriate, but
# setting __wrapped__ may still help with introspection
_exit_wrapper.__wrapped__ = callback
self.push(_exit_wrapper)
return callback # Allow use as a decorator
def enter_context(self, cm):
"""Enters the supplied context manager
If successful, also pushes its __exit__ method as a callback and
returns the result of the __enter__ method.
"""
# We look up the special methods on the type to match the with statement
_cm_type = type(cm)
_exit = _cm_type.__exit__
result = _cm_type.__enter__(cm)
self._push_cm_exit(cm, _exit)
return result
def close(self):
"""Immediately unwind the context stack"""
self.__exit__(None, None, None)
def __enter__(self):
return self
def __exit__(self, *exc_details):
# We manipulate the exception state so it behaves as though
# we were actually nesting multiple with statements
frame_exc = sys.exc_info()[1]
def _fix_exception_context(new_exc, old_exc):
while 1:
exc_context = new_exc.__context__
if exc_context in (None, frame_exc):
break
new_exc = exc_context
new_exc.__context__ = old_exc
# Callbacks are invoked in LIFO order to match the behaviour of
# nested context managers
suppressed_exc = False
while self._exit_callbacks:
cb = self._exit_callbacks.pop()
try:
if cb(*exc_details):
suppressed_exc = True
exc_details = (None, None, None)
except:
new_exc_details = sys.exc_info()
# simulate the stack of exceptions by setting the context
_fix_exception_context(new_exc_details[1], exc_details[1])
if not self._exit_callbacks:
raise
exc_details = new_exc_details
return suppressed_exc
Use this as your context manager, then add nested context managers at will:
with ExitStack() as stack:
managers = [stack.enter_context(my_context(arg)) for arg in items]
print("processing under", managers)
For your example context manager, this prints:
>>> my_fn(range(3))
('entering', 0)
('entering', 1)
('entering', 2)
('processing under', [0, 1, 2])
('exiting', 2)
('exiting', 1)
('exiting', 0)
You can also install the contextlib2 module; it includes ExitStack as a backport.
It's a little vexing that the python3 maintainers chose to break backwards compatibility, since implementing nested in terms of ExitStack is pretty straightforward:
try:
from contextlib import nested # Python 2
except ImportError:
from contextlib import ExitStack, contextmanager
#contextmanager
def nested(*contexts):
"""
Reimplementation of nested in python 3.
"""
with ExitStack() as stack:
for ctx in contexts:
stack.enter_context(ctx)
yield contexts
import sys
import contextlib
class nodeA(object):
def __init__(self):
print( '__init__ nodeA')
def __enter__(self):
print( '__enter__ nodeA')
def __exit__(self, a, b, c):
print( '__exit__ nodeA')
class nodeB(object):
def __init__(self):
print( '__init__ nodeB')
def __enter__(self):
print( '__enter__ nodeB')
def __exit__(self, a, b, c):
print( '__exit__ nodeB')
class nodeC(object):
def __init__(self):
print( '__init__ nodeC')
def __enter__(self):
print( '__enter__ nodeC')
def __exit__(self, a, b, c):
print( '__exit__ nodeC')
print( 'Start...')
a = nodeA()
b = nodeB()
c = nodeC()
print( 'Python version: %s' % (sys.version))
if sys.version.startswith('2'):
print('Use python 2!')
with contextlib.nested(a, b, c):
print('hallo?')
if sys.version.startswith('3'):
print('Use python 3!')
with contextlib.ExitStack() as stack:
[stack.enter_context(arg) for arg in [a,b,c]]
print('...end!')
Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 4 years ago.
Improve this question
Are there any exemplary examples of the GoF Observer implemented in Python? I have a bit code which currently has bits of debugging code laced through the key class (currently generating messages to stderr if a magic env is set). Additionally, the class has an interface for incrementally return results as well as storing them (in memory) for post processing. (The class itself is a job manager for concurrently executing commands on remote machines over ssh).
Currently the usage of the class looks something like:
job = SSHJobMan(hostlist, cmd)
job.start()
while not job.done():
for each in job.poll():
incrementally_process(job.results[each])
time.sleep(0.2) # or other more useful work
post_process(job.results)
An alernative usage model is:
job = SSHJobMan(hostlist, cmd)
job.wait() # implicitly performs a start()
process(job.results)
This all works fine for the current utility. However it does lack flexibility. For example I currently support a brief output format or a progress bar as incremental results, I also support
brief, complete and "merged message" outputs for the post_process() function.
However, I'd like to support multiple results/output streams (progress bar to the terminal, debugging and warnings to a log file, outputs from successful jobs to one file/directory, error messages and other results from non-successful jobs to another, etc).
This sounds like a situation that calls for Observer ... have instances of my class accept registration from other objects and call them back with specific types of events as they occur.
I'm looking at PyPubSub since I saw several references to that in SO related questions. I'm not sure I'm ready to add the external dependency to my utility but I could see value in using their interface as a model for mine if that's going to make it easier for others to use. (The project is intended as both a standalone command line utility and a class for writing other scripts/utilities).
In short I know how to do what I want ... but there are numerous ways to accomplish it. I want suggestions on what's most likely to work for other users of the code in the long run.
The code itself is at: classh.
However it does lack flexibility.
Well... actually, this looks like a good design to me if an asynchronous API is what you want. It usually is. Maybe all you need is to switch from stderr to Python's logging module, which has a sort of publish/subscribe model of its own, what with Logger.addHandler() and so on.
If you do want to support observers, my advice is to keep it simple. You really only need a few lines of code.
class Event(object):
pass
class Observable(object):
def __init__(self):
self.callbacks = []
def subscribe(self, callback):
self.callbacks.append(callback)
def fire(self, **attrs):
e = Event()
e.source = self
for k, v in attrs.items():
setattr(e, k, v)
for fn in self.callbacks:
fn(e)
Your Job class can subclass Observable. When something of interest happens, call self.fire(type="progress", percent=50) or the like.
I think people in the other answers overdo it. You can easily achieve events in Python with less than 15 lines of code.
You simple have two classes: Event and Observer. Any class that wants to listen for an event, needs to inherit Observer and set to listen (observe) for a specific event. When an Event is instantiated and fired, all observers listening to that event will run the specified callback functions.
class Observer():
_observers = []
def __init__(self):
self._observers.append(self)
self._observables = {}
def observe(self, event_name, callback):
self._observables[event_name] = callback
class Event():
def __init__(self, name, data, autofire = True):
self.name = name
self.data = data
if autofire:
self.fire()
def fire(self):
for observer in Observer._observers:
if self.name in observer._observables:
observer._observables[self.name](self.data)
Example:
class Room(Observer):
def __init__(self):
print("Room is ready.")
Observer.__init__(self) # Observer's init needs to be called
def someone_arrived(self, who):
print(who + " has arrived!")
room = Room()
room.observe('someone arrived', room.someone_arrived)
Event('someone arrived', 'Lenard')
Output:
Room is ready.
Lenard has arrived!
A few more approaches...
Example: the logging module
Maybe all you need is to switch from stderr to Python's logging module, which has a powerful publish/subscribe model.
It's easy to get started producing log records.
# producer
import logging
log = logging.getLogger("myjobs") # that's all the setup you need
class MyJob(object):
def run(self):
log.info("starting job")
n = 10
for i in range(n):
log.info("%.1f%% done" % (100.0 * i / n))
log.info("work complete")
On the consumer side there's a bit more work. Unfortunately configuring logger output takes, like, 7 whole lines of code to do. ;)
# consumer
import myjobs, sys, logging
if user_wants_log_output:
ch = logging.StreamHandler(sys.stderr)
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
myjobs.log.addHandler(ch)
myjobs.log.setLevel(logging.INFO)
myjobs.MyJob().run()
On the other hand there's an amazing amount of stuff in the logging package. If you ever need to send log data to a rotating set of files, an email address, and the Windows Event Log, you're covered.
Example: simplest possible observer
But you don't need to use any library at all. An extremely simple way to support observers is to call a method that does nothing.
# producer
class MyJob(object):
def on_progress(self, pct):
"""Called when progress is made. pct is the percent complete.
By default this does nothing. The user may override this method
or even just assign to it."""
pass
def run(self):
n = 10
for i in range(n):
self.on_progress(100.0 * i / n)
self.on_progress(100.0)
# consumer
import sys, myjobs
job = myjobs.MyJob()
job.on_progress = lambda pct: sys.stdout.write("%.1f%% done\n" % pct)
job.run()
Sometimes instead of writing a lambda, you can just say job.on_progress = progressBar.update, which is nice.
This is about as simple as it gets. One drawback is that it doesn't naturally support multiple listeners subscribing to the same events.
Example: C#-like events
With a bit of support code, you can get C#-like events in Python. Here's the code:
# glue code
class event(object):
def __init__(self, func):
self.__doc__ = func.__doc__
self._key = ' ' + func.__name__
def __get__(self, obj, cls):
try:
return obj.__dict__[self._key]
except KeyError, exc:
be = obj.__dict__[self._key] = boundevent()
return be
class boundevent(object):
def __init__(self):
self._fns = []
def __iadd__(self, fn):
self._fns.append(fn)
return self
def __isub__(self, fn):
self._fns.remove(fn)
return self
def __call__(self, *args, **kwargs):
for f in self._fns[:]:
f(*args, **kwargs)
The producer declares the event using a decorator:
# producer
class MyJob(object):
#event
def progress(pct):
"""Called when progress is made. pct is the percent complete."""
def run(self):
n = 10
for i in range(n+1):
self.progress(100.0 * i / n)
#consumer
import sys, myjobs
job = myjobs.MyJob()
job.progress += lambda pct: sys.stdout.write("%.1f%% done\n" % pct)
job.run()
This works exactly like the "simple observer" code above, but you can add as many listeners as you like using +=. (Unlike C#, there are no event handler types, you don't have to new EventHandler(foo.bar) when subscribing to an event, and you don't have to check for null before firing the event. Like C#, events do not squelch exceptions.)
How to choose
If logging does everything you need, use that. Otherwise do the simplest thing that works for you. The key thing to note is that you don't need to take on a big external dependency.
How about an implementation where objects aren't kept alive just because they're observing something? Below please find an implementation of the observer pattern with the following features:
Usage is pythonic. To add an observer to a bound method .bar of instance foo, just do foo.bar.addObserver(observer).
Observers are not kept alive by virtue of being observers. In other words, the observer code uses no strong references.
No sub-classing necessary (descriptors ftw).
Can be used with unhashable types.
Can be used as many times you want in a single class.
(bonus) As of today the code exists in a proper downloadable, installable package on github.
Here's the code (the github package or PyPI package have the most up to date implementation):
import weakref
import functools
class ObservableMethod(object):
"""
A proxy for a bound method which can be observed.
I behave like a bound method, but other bound methods can subscribe to be
called whenever I am called.
"""
def __init__(self, obj, func):
self.func = func
functools.update_wrapper(self, func)
self.objectWeakRef = weakref.ref(obj)
self.callbacks = {} #observing object ID -> weak ref, methodNames
def addObserver(self, boundMethod):
"""
Register a bound method to observe this ObservableMethod.
The observing method will be called whenever this ObservableMethod is
called, and with the same arguments and keyword arguments. If a
boundMethod has already been registered to as a callback, trying to add
it again does nothing. In other words, there is no way to sign up an
observer to be called back multiple times.
"""
obj = boundMethod.__self__
ID = id(obj)
if ID in self.callbacks:
s = self.callbacks[ID][1]
else:
wr = weakref.ref(obj, Cleanup(ID, self.callbacks))
s = set()
self.callbacks[ID] = (wr, s)
s.add(boundMethod.__name__)
def discardObserver(self, boundMethod):
"""
Un-register a bound method.
"""
obj = boundMethod.__self__
if id(obj) in self.callbacks:
self.callbacks[id(obj)][1].discard(boundMethod.__name__)
def __call__(self, *arg, **kw):
"""
Invoke the method which I proxy, and all of it's callbacks.
The callbacks are called with the same *args and **kw as the main
method.
"""
result = self.func(self.objectWeakRef(), *arg, **kw)
for ID in self.callbacks:
wr, methodNames = self.callbacks[ID]
obj = wr()
for methodName in methodNames:
getattr(obj, methodName)(*arg, **kw)
return result
#property
def __self__(self):
"""
Get a strong reference to the object owning this ObservableMethod
This is needed so that ObservableMethod instances can observe other
ObservableMethod instances.
"""
return self.objectWeakRef()
class ObservableMethodDescriptor(object):
def __init__(self, func):
"""
To each instance of the class using this descriptor, I associate an
ObservableMethod.
"""
self.instances = {} # Instance id -> (weak ref, Observablemethod)
self._func = func
def __get__(self, inst, cls):
if inst is None:
return self
ID = id(inst)
if ID in self.instances:
wr, om = self.instances[ID]
if not wr():
msg = "Object id %d should have been cleaned up"%(ID,)
raise RuntimeError(msg)
else:
wr = weakref.ref(inst, Cleanup(ID, self.instances))
om = ObservableMethod(inst, self._func)
self.instances[ID] = (wr, om)
return om
def __set__(self, inst, val):
raise RuntimeError("Assigning to ObservableMethod not supported")
def event(func):
return ObservableMethodDescriptor(func)
class Cleanup(object):
"""
I manage remove elements from a dict whenever I'm called.
Use me as a weakref.ref callback to remove an object's id from a dict
when that object is garbage collected.
"""
def __init__(self, key, d):
self.key = key
self.d = d
def __call__(self, wr):
del self.d[self.key]
To use this we just decorate methods we want to make observable with #event. Here's an example
class Foo(object):
def __init__(self, name):
self.name = name
#event
def bar(self):
print("%s called bar"%(self.name,))
def baz(self):
print("%s called baz"%(self.name,))
a = Foo('a')
b = Foo('b')
a.bar.addObserver(b.bar)
a.bar()
From wikipedia:
from collections import defaultdict
class Observable (defaultdict):
def __init__ (self):
defaultdict.__init__(self, object)
def emit (self, *args):
'''Pass parameters to all observers and update states.'''
for subscriber in self:
response = subscriber(*args)
self[subscriber] = response
def subscribe (self, subscriber):
'''Add a new subscriber to self.'''
self[subscriber]
def stat (self):
'''Return a tuple containing the state of each observer.'''
return tuple(self.values())
The Observable is used like this.
myObservable = Observable ()
# subscribe some inlined functions.
# myObservable[lambda x, y: x * y] would also work here.
myObservable.subscribe(lambda x, y: x * y)
myObservable.subscribe(lambda x, y: float(x) / y)
myObservable.subscribe(lambda x, y: x + y)
myObservable.subscribe(lambda x, y: x - y)
# emit parameters to each observer
myObservable.emit(6, 2)
# get updated values
myObservable.stat() # returns: (8, 3.0, 4, 12)
Based on Jason's answer, I implemented the C#-like events example as a fully-fledged python module including documentation and tests. I love fancy pythonic stuff :)
So, if you want some ready-to-use solution, you can just use the code on github.
Example: twisted log observers
To register an observer yourCallable() (a callable that accepts a dictionary) to receive all log events (in addition to any other observers):
twisted.python.log.addObserver(yourCallable)
Example: complete producer/consumer example
From Twisted-Python mailing list:
#!/usr/bin/env python
"""Serve as a sample implementation of a twisted producer/consumer
system, with a simple TCP server which asks the user how many random
integers they want, and it sends the result set back to the user, one
result per line."""
import random
from zope.interface import implements
from twisted.internet import interfaces, reactor
from twisted.internet.protocol import Factory
from twisted.protocols.basic import LineReceiver
class Producer:
"""Send back the requested number of random integers to the client."""
implements(interfaces.IPushProducer)
def __init__(self, proto, cnt):
self._proto = proto
self._goal = cnt
self._produced = 0
self._paused = False
def pauseProducing(self):
"""When we've produced data too fast, pauseProducing() will be
called (reentrantly from within resumeProducing's transport.write
method, most likely), so set a flag that causes production to pause
temporarily."""
self._paused = True
print('pausing connection from %s' % (self._proto.transport.getPeer()))
def resumeProducing(self):
self._paused = False
while not self._paused and self._produced < self._goal:
next_int = random.randint(0, 10000)
self._proto.transport.write('%d\r\n' % (next_int))
self._produced += 1
if self._produced == self._goal:
self._proto.transport.unregisterProducer()
self._proto.transport.loseConnection()
def stopProducing(self):
pass
class ServeRandom(LineReceiver):
"""Serve up random data."""
def connectionMade(self):
print('connection made from %s' % (self.transport.getPeer()))
self.transport.write('how many random integers do you want?\r\n')
def lineReceived(self, line):
cnt = int(line.strip())
producer = Producer(self, cnt)
self.transport.registerProducer(producer, True)
producer.resumeProducing()
def connectionLost(self, reason):
print('connection lost from %s' % (self.transport.getPeer()))
factory = Factory()
factory.protocol = ServeRandom
reactor.listenTCP(1234, factory)
print('listening on 1234...')
reactor.run()
OP asks "Are there any exemplary examples of the GoF Observer implemented in Python?"
This is an example in Python 3.7. This Observable class meets the requirement of creating a relationship between one observable and many observers while remaining independent of their structure.
from functools import partial
from dataclasses import dataclass, field
import sys
from typing import List, Callable
#dataclass
class Observable:
observers: List[Callable] = field(default_factory=list)
def register(self, observer: Callable):
self.observers.append(observer)
def deregister(self, observer: Callable):
self.observers.remove(observer)
def notify(self, *args, **kwargs):
for observer in self.observers:
observer(*args, **kwargs)
def usage_demo():
observable = Observable()
# Register two anonymous observers using lambda.
observable.register(
lambda *args, **kwargs: print(f'Observer 1 called with args={args}, kwargs={kwargs}'))
observable.register(
lambda *args, **kwargs: print(f'Observer 2 called with args={args}, kwargs={kwargs}'))
# Create an observer function, register it, then deregister it.
def callable_3():
print('Observer 3 NOT called.')
observable.register(callable_3)
observable.deregister(callable_3)
# Create a general purpose observer function and register four observers.
def callable_x(*args, **kwargs):
print(f'{args[0]} observer called with args={args}, kwargs={kwargs}')
for gui_field in ['Form field 4', 'Form field 5', 'Form field 6', 'Form field 7']:
observable.register(partial(callable_x, gui_field))
observable.notify('test')
if __name__ == '__main__':
sys.exit(usage_demo())
A functional approach to observer design:
def add_listener(obj, method_name, listener):
# Get any existing listeners
listener_attr = method_name + '_listeners'
listeners = getattr(obj, listener_attr, None)
# If this is the first listener, then set up the method wrapper
if not listeners:
listeners = [listener]
setattr(obj, listener_attr, listeners)
# Get the object's method
method = getattr(obj, method_name)
#wraps(method)
def method_wrapper(*args, **kwags):
method(*args, **kwags)
for l in listeners:
l(obj, *args, **kwags) # Listener also has object argument
# Replace the original method with the wrapper
setattr(obj, method_name, method_wrapper)
else:
# Event is already set up, so just add another listener
listeners.append(listener)
def remove_listener(obj, method_name, listener):
# Get any existing listeners
listener_attr = method_name + '_listeners'
listeners = getattr(obj, listener_attr, None)
if listeners:
# Remove the listener
next((listeners.pop(i)
for i, l in enumerate(listeners)
if l == listener),
None)
# If this was the last listener, then remove the method wrapper
if not listeners:
method = getattr(obj, method_name)
delattr(obj, listener_attr)
setattr(obj, method_name, method.__wrapped__)
These methods can then be used to add a listener to any class method. For example:
class MyClass(object):
def __init__(self, prop):
self.prop = prop
def some_method(self, num, string):
print('method:', num, string)
def listener_method(obj, num, string):
print('listener:', num, string, obj.prop)
my = MyClass('my_prop')
add_listener(my, 'some_method', listener_method)
my.some_method(42, 'with listener')
remove_listener(my, 'some_method', listener_method)
my.some_method(42, 'without listener')
And the output is:
method: 42 with listener
listener: 42 with listener my_prop
method: 42 without listener