I want to implement a timer to measure how long a block of code takes to run. I then want to do this across an entire application containing multiple modules (40+) across multiple directories (4+).
My timer is created with two functions that are within a class with a structure like this:
class SubClass(Class1)
def getStartTime(self):
start = time.time()
return start
def logTiming(self, classstring, start):
fin = time.time() - start
logging.getLogger('perf_log_handler').info((classstring + ' sec').format(round(fin,3)))
The first function gets the start time, and the second function calculates the time for the block to run and then logs it to a logger.
This code is in a module that we'll call module1.py.
In practice, generically, it will be implemented as such:
class SubSubClass(SubClass)
def Some_Process
stim = super().getStartTime()
code..............................
...
...
...
...
super().logTiming("The Process took: {}", stim)
return Result_Of_Process
This code resides in a module called module2.py and already works and successfully logs. My problem is that when structured like this, I can seemingly only use the timer inside code that is under the umbrella of SubClass, where it is defined (my application fails to render and I get a "can't find page" error in my browser). But I want to use this code everywhere in all the application modules, globally. Whether the module is within another directory, whether some blocks of code are within other classes and subclasses inside other modules, everywhere.
What is the easiest, most efficient way to create this timing instrument so that I can use it anywhere in my application? I understand I may have to define it completely differently. I am very new to all of this, so any help is appreciated.
OPTION 1) You should define another module, for example, "mytimer.py" fully dedicated to the timer:
import time
class MyTimer():
def __init__(self):
self.start = time.time()
def log(self):
now = time.time()
return now - self.start
And then, from any line of your code, for example, in module2.py:
from mytimer import MyTimer
class SomeClass()
def Some_Function
t = MyTimer()
....
t.log()
return ...
OPTION 2) You could also use a simple function instead of a class:
import time
def mytimer(start=None, tag=""):
if start is None:
start = time.time()
now = time.time()
delay = float(now - start)
print "%(tag)s %(delay).2f seconds." % {'tag': tag, 'delay': delay}
return now
And then, in your code:
from mytimer import mytimer
class SomeClass()
def Some_Function
t = mytimer(tag='BREAK0')
....
t = mytimer(start=t, tag='BREAK1')
....
t = mytimer(start=t, tag='BREAK2')
....
t = mytimer(start=t, tag='BREAK3')
return ...
I am not quite sure what you are looking for, but once upon a time I used a decorator for a similar type of problem.
The snippet below is the closest I can remember to what I implemented at that time. Hopefully it is useful to you.
Brief explanation
The timed is a 'decorator' that wraps methods in the python object and times the method.
The class contains a log that is updated by the wrapper as the #timed methods are called.
Note that if you want to make the #property act as a "class property" you can draw inspiration from this post.
from time import sleep, time
# -----------------
# Define Decorators
# ------------------
def timed(wrapped):
def wrapper(self, *arg, **kwargs):
start = time()
res = wrapped(self, *arg, **kwargs)
stop = time()
self.log = {'method': wrapped.__name__, 'called': start, 'elapsed': stop - start}
return res
return wrapper
# -----------------
# Define Classes
# ------------------
class Test(object):
__log = []
#property
def log(self):
return self.__log
#log.setter
def log(self, kwargs):
self.__log.append(kwargs)
#timed
def test(self):
print("Running timed method")
sleep(2)
#timed
def test2(self, a, b=2):
print("Running another timed method")
sleep(2)
return a+b
# ::::::::::::::::::
if __name__ == '__main__':
t = Test()
res = t.test()
res = t.test2(1)
print(t.log)
I am trying to implement an FTP server using twisted that limits the size of the uploaded file. Ideally this would happen before the transfer starts, but it is not really a problem if it exits gracefully during the transfer if it is too large.
I have started from the very basic ftpserver.py and slowly been pulling in more of the underlying classes from ftp.py to get down to the innards.
Current code below, please excuse the 'hack-and-slash' style employed until I can get it working.
#!/usr/bin/python
import os
from twisted.protocols.ftp import FTPFactory, FTPShell, FTPAnonymousShell, IFTPShell
from twisted.cred.portal import Portal
from twisted.cred.checkers import AllowAnonymousAccess
from twisted.internet import reactor, defer
from twisted.python import filepath, failure
class FileConsumer1(object):
def __init__(self, fObj):
self.fObj = fObj
def registerProducer(self, producer, streaming):
self.producer = producer
assert streaming
def unregisterProducer(self):
self.producer = None
self.fObj.close()
def write(self, bytes):
size = os.fstat(self.fObj.fileno()).st_size + len(bytes)
if size > 10:
raise Exception("File too large") # WHAT GOES HERE?
self.fObj.write(bytes)
class FileWriter1(object):
def __init__(self, fObj):
self.fObj = fObj
self._receive = False
def receive(self):
assert not self._receive, "Can only call IWriteFile.receive *once* per instance"
self._receive = True
return defer.succeed(FileConsumer1(self.fObj))
def close(self):
return defer.succeed(None)
class FTPShell1(FTPShell):
def openForWriting(self, path):
p = self._path(path)
if p.isdir():
return defer.fail(IsADirectoryError(path))
try:
fObj = p.open('w')
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
return defer.succeed(FileWriter1(fObj))
class FTPRealm1(object):
def __init__(self, root):
self.path = filepath.FilePath(root)
def requestAvatar(self, avatarId, mind, *interfaces):
avatar = FTPShell1(self.path)
return (IFTPShell, avatar, getattr(avatar, 'logout', lambda: None))
p = Portal(FTPRealm1('./'), [ AllowAnonymousAccess() ])
f = FTPFactory(p)
reactor.listenTCP(4021, f)
reactor.run()
clearly the check if size > 10 will be bigger, but how should a be indicating there's a problem at this point? As it stands, twisted catches that exception, but it's not very elegant. As far as I can see from examination of ftp.py there's nothing obvious I can return here. Can I pass down a deferred in some way? How should I be closing down the transfer elegantly?
Thanks,
Here's a revised version
#!/usr/bin/python
import os
from zope.interface import Interface, implements
from twisted.protocols.ftp import FTPFactory, FTPShell, FTPAnonymousShell, IFTPShell, IWriteFile , BaseFTPRealm, FTPCmdError, EXCEEDED_STORAGE_ALLOC
from twisted.cred.portal import Portal
from twisted.cred.checkers import AllowAnonymousAccess
from twisted.internet import reactor, defer, interfaces
from twisted.python import filepath
class ExceededStorageAllocError(FTPCmdError):
errorCode = EXCEEDED_STORAGE_ALLOC
class FileConsumer(object):
implements(interfaces.IConsumer)
def __init__(self):
self.data = ""
self.error = None
def registerProducer(self, producer, streaming):
self.producer = producer
assert streaming
def unregisterProducer(self):
if self.producer:
self.producer.stopProducing()
self.producer = None
def write(self, bytes):
self.data += bytes
if len(self.data) > 10:
self.unregisterProducer()
self.error = ExceededStorageAllocError()
class FileWriter(object):
implements(IWriteFile)
def __init__(self, path):
self.path = path
def receive(self):
self.consumer = FileConsumer()
return defer.succeed(self.consumer)
def close(self):
if self.consumer.error:
return defer.fail(self.consumer.error)
try:
f = self.path.open('w')
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
f.write(self.consumer.data)
return defer.succeed(None)
class FTPShell1(FTPShell):
makeDirectory = FTPAnonymousShell.makeDirectory
removeDirectory = FTPAnonymousShell.removeDirectory
def openForWriting(self, path):
p = self._path(path)
if p.isdir():
return defer.fail(IsADirectoryError(path))
return defer.succeed(FileWriter(p))
class FTPRealm1(BaseFTPRealm):
def __init__(self, root):
self.root = root
def requestAvatar(self, avatarId, mind, *interfaces):
avatar = FTPShell1(filepath.FilePath(self.root))
return (IFTPShell, avatar, getattr(avatar, 'logout', lambda: None))
p = Portal(FTPRealm1('./'), [ AllowAnonymousAccess() ])
f = FTPFactory(p)
reactor.listenTCP(4021, f)
reactor.run()
which accumulates the received data within the FileConsumer() then aborts if the file is too long. the close() method of the FileWriter() then either reports that error or writes the complete buffer to the file.
The only real issue I'm having with this is that when run, the exception is displayed on the server:
Unexpected error received during transfer:
Traceback (most recent call last):
Failure: __main__.ExceededStorageAllocError:
As a quick disclaimer, I'm very bad with Twisted's producer/consumer model, so this may not work. As always, I'm not responsible if things blow up ;)
You seem to be on the correct path so pat yourself on the back for that. I think if you call unregisterProducer when a file is too large, the file should stop consuming. You may also need to call self.producer.stopProducing(), but don't quote me on that.
def unregisterProducer(self):
self.producer.stopProducing()
self.fObj.close()
def write(self, bytes):
size = os.fstat(self.fObj.fileno()).st_size + len(bytes)
if size > 10:
self.unregisterConsumer()
# log statements would go here
# do some clean up too
self.fObj.write(bytes)
If my mental code Python interpreter is correct, this should simply just stop consuming the file. As far as what you should return to the client, you're going to have to read the RFC about FTP to figure that out.
PS
As tedious as it may seem, please use the #implementor decorators. Most times you'll be fine, but there may be instances where unexpected trace backs appear.
I wrote some code to get data using pySerial as below.
My class is depend on serial class which doesn't meet the "loose coupling" rule.
Should I use interface to decouple my class?
Thanks a lot for your instruction.
import serial
class ArduinoConnect:
def __init__(self):
pass
def serial_connect(self, serial_port, serial_baudrate):
self._serial_port = serial_port
try:
self.ser = serial.Serial(
port=self._serial_port,
baudrate=serial_baudrate,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
)
except serial.serialutil.SerialException, e:
print str(e)
def serial_disconnect(self):
self.ser.close()
def get_quaternion(self, number_of_data=50):
buff = []
self.ser.write('q')
self.ser.write(chr(number_of_data))
for j in range(number_of_data):
in_string = self.ser.readline()
buff_line = in_string.split(",")
buff_line.pop()
buff_line = self.hex_to_quaternion(buff_line)
buff.append(list(buff_line))
return buff
def hex_to_quaternion(self, list_of_hex=None):
#......
pass
arduino = ArduinoConnect()
arduino.serial_connect(serial_port="COM5", serial_baudrate=115200)
print arduino.get_quaternion()
arduino.serial_disconnect()
I adjusted my code as recommended.
DI help to separate the serial process,and a factory method help to encapsulate the DI process.
Is there anything else I could do to meet the "loose coupling" rule?
Thanks for your help.
import serial
class ArduinoConnect:
def __init__(self, serial_to_arduino):
self._serial_to_arduino = serial_to_arduino
def get_quaternion(self, number_of_data=50):
buff = []
self._serial_to_arduino.write('q')
self._serial_to_arduino.write(chr(number_of_data))
for j in range(number_of_data):
in_string = self._serial_to_arduino.readline()
buff_line = in_string.split(",")
buff_line.pop()
buff_line = self.hex_to_quaternion(buff_line)
buff.append(list(buff_line))
return buff
def hex_to_quaternion(self, list_of_hex):
......
def __getattr__(self, attr):
return getattr(self._serial_to_arduino, attr)
class SerialToArduino:
def __init__(self):
pass
def serial_connect(self, serial_port="COM5", serial_baudrate=115200):
self._serial_port = serial_port
try:
self.ser = serial.Serial(
port=self._serial_port,
baudrate=serial_baudrate,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
)
except serial.serialutil.SerialException, e:
print str(e)
def serial_disconnect(self):
self.ser.close()
def readline(self):
return self.ser.readline()
def write(self, data):
self.ser.write(data=data)
def get_ArduinoConnect():
'factory method'
return ArduinoConnect(serial_to_arduino=SerialToArduino())
arduino = get_ArduinoConnect()
arduino.serial_connect(serial_port="COM5", serial_baudrate=115200)
print arduino.get_quaternion()
arduino.serial_disconnect()
I can think of 2 possible solutions
Implement an "adapter" to expose methods to the main class and hide the implementation of Serial. So that your main class can avoid having dependency on the concrete class Serial
Do Dependency Injection, something likes this
def serial_connect(self, engine, serial_port, serial_baudrate)
Update for 1: I referred to http://en.wikipedia.org/wiki/Adapter_pattern which is commonly used when you want to separate the concrete implementation from the abstraction. Think of it as the travel plug adapter.
It is particularly useful for language like Java with strict interface and everything. In your case, because in Python we don't have "interface", you can simulate it by using an abstract class
class AbstractAdapter():
def serial_connect(self, serial_port="COM5", serial_baudrate=115200):
raise("Needs implementation")
# do the same thing for the rest of the methods
Then in ArduinoConnect, you can check for the type
def __init__(self, serial_to_arduino):
if not isinstance(serial_to_arduino, AbstractAdapter):
raise("Wrong type")
This forces your serial_to_arduino to extend AbstractAdapter which enforces the implementation of all abstract methods hence an adapter.
This might not be the most "pythonic" way to do things, but from OOP point of view, you can do it that way to have the highest level of loose coupling (In my opinion)
P/s: Actually, I think the correct pattern in this case should be Strategy, both of them are pretty similar in term of implementation but they are meant for different purposes. You can read more about some patterns like Strategy, Proxy, Command, Mediator which are often used to achieve loose coupling
I am using xmlrpclib to interactively remote control some lab equipment with IPython. I love IPython's autocompletion features and I would also like to have it via xmlrpclib. So far I managed to accomplish method name completion and method help with the following approach.
A little test server simulating a motorized stage (this is only useful, if you want to test my client code):
import time # needed for StageSimulation
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
class StageSimulation:
""" A very simple simulation of a motorized linear stage """
v = 5 # mm/s -- speed of stage
goalPos = 0 # mm -- goal Position in mm
goalTime = 0 # sec -- time when goal position should be reached
def getPos(self):
""" Return actual Position of stage """
delta_t = self.goalTime - time.time() # remaining moving time
if delta_t <= 0 : # stage is not moving
return self.goalPos
else: # stage is moving
return self.goalPos - self.v*delta_t
def move(self, goalPos, v=5):
""" Move stage to position ``goalPos`` with speed ``v`` """
p0 = self.getPos()
delta_p = goalPos - p0
if v*delta_p < 0: # sign of v wrong
v *= -1
self.goalTime = time.time() + delta_p/v
self.goalPos, self.v = goalPos, v
# Restrict to a particular path (see python Docs)
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ('/RPC2',)
if __name__=='__main__':
""" Instaniate Server """
host, hport = "localhost", 8787
LogXMLRPCRequests = False
server = SimpleXMLRPCServer((host, hport), allow_none=True,
requestHandler=RequestHandler)
server.register_introspection_functions()
StS = StageSimulation()
server.register_instance(StS)
try:
server.serve_forever()
except KeyboardInterrupt:
print("Terminated server.")
My client instantiates an object in which all known methods are registered:
import xmlrpclib
class XMLRPCClientObject(object):
"""XMLRPC Client which allows Tab completion on server instances
This is achieved by reading all methods names from the server and
using them to generate local wrappers to them.
"""
def __init__(self, url):
""" Connect to server with ``url`` and generate methods """
self.SP = xmlrpclib.ServerProxy(url)
self.generateMethods()
def generateMethods(self):
""" Read names of server methods and use them for local wrappers """
SP = self.SP
for n in SP.system.listMethods():
f = getattr(SP, n)
f.__doc__ = SP.system.methodHelp(n) # add doc string
f.__name__ = n # needed to make help() work
setattr(self, n, f) # register as local method
if __name__ == "__main__":
""" main function connects to Test Server """
S = XMLRPCClientObject("http://localhost:8787")
In addition to the method name completion, I would also like to have parameter name completion as in S.move(goal<TAB>. An approach would be to utilize xmlrpc.ServerProxy.system.methodSignature(), but system_methodSignature() is not supported by SimpleXMLRPCServer. Does anybody have an idea how to retrieve the signatures of the server methods?
I tend to think that python inspect module can help, it provides the basics you can use for your desired features
Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 4 years ago.
Improve this question
Are there any exemplary examples of the GoF Observer implemented in Python? I have a bit code which currently has bits of debugging code laced through the key class (currently generating messages to stderr if a magic env is set). Additionally, the class has an interface for incrementally return results as well as storing them (in memory) for post processing. (The class itself is a job manager for concurrently executing commands on remote machines over ssh).
Currently the usage of the class looks something like:
job = SSHJobMan(hostlist, cmd)
job.start()
while not job.done():
for each in job.poll():
incrementally_process(job.results[each])
time.sleep(0.2) # or other more useful work
post_process(job.results)
An alernative usage model is:
job = SSHJobMan(hostlist, cmd)
job.wait() # implicitly performs a start()
process(job.results)
This all works fine for the current utility. However it does lack flexibility. For example I currently support a brief output format or a progress bar as incremental results, I also support
brief, complete and "merged message" outputs for the post_process() function.
However, I'd like to support multiple results/output streams (progress bar to the terminal, debugging and warnings to a log file, outputs from successful jobs to one file/directory, error messages and other results from non-successful jobs to another, etc).
This sounds like a situation that calls for Observer ... have instances of my class accept registration from other objects and call them back with specific types of events as they occur.
I'm looking at PyPubSub since I saw several references to that in SO related questions. I'm not sure I'm ready to add the external dependency to my utility but I could see value in using their interface as a model for mine if that's going to make it easier for others to use. (The project is intended as both a standalone command line utility and a class for writing other scripts/utilities).
In short I know how to do what I want ... but there are numerous ways to accomplish it. I want suggestions on what's most likely to work for other users of the code in the long run.
The code itself is at: classh.
However it does lack flexibility.
Well... actually, this looks like a good design to me if an asynchronous API is what you want. It usually is. Maybe all you need is to switch from stderr to Python's logging module, which has a sort of publish/subscribe model of its own, what with Logger.addHandler() and so on.
If you do want to support observers, my advice is to keep it simple. You really only need a few lines of code.
class Event(object):
pass
class Observable(object):
def __init__(self):
self.callbacks = []
def subscribe(self, callback):
self.callbacks.append(callback)
def fire(self, **attrs):
e = Event()
e.source = self
for k, v in attrs.items():
setattr(e, k, v)
for fn in self.callbacks:
fn(e)
Your Job class can subclass Observable. When something of interest happens, call self.fire(type="progress", percent=50) or the like.
I think people in the other answers overdo it. You can easily achieve events in Python with less than 15 lines of code.
You simple have two classes: Event and Observer. Any class that wants to listen for an event, needs to inherit Observer and set to listen (observe) for a specific event. When an Event is instantiated and fired, all observers listening to that event will run the specified callback functions.
class Observer():
_observers = []
def __init__(self):
self._observers.append(self)
self._observables = {}
def observe(self, event_name, callback):
self._observables[event_name] = callback
class Event():
def __init__(self, name, data, autofire = True):
self.name = name
self.data = data
if autofire:
self.fire()
def fire(self):
for observer in Observer._observers:
if self.name in observer._observables:
observer._observables[self.name](self.data)
Example:
class Room(Observer):
def __init__(self):
print("Room is ready.")
Observer.__init__(self) # Observer's init needs to be called
def someone_arrived(self, who):
print(who + " has arrived!")
room = Room()
room.observe('someone arrived', room.someone_arrived)
Event('someone arrived', 'Lenard')
Output:
Room is ready.
Lenard has arrived!
A few more approaches...
Example: the logging module
Maybe all you need is to switch from stderr to Python's logging module, which has a powerful publish/subscribe model.
It's easy to get started producing log records.
# producer
import logging
log = logging.getLogger("myjobs") # that's all the setup you need
class MyJob(object):
def run(self):
log.info("starting job")
n = 10
for i in range(n):
log.info("%.1f%% done" % (100.0 * i / n))
log.info("work complete")
On the consumer side there's a bit more work. Unfortunately configuring logger output takes, like, 7 whole lines of code to do. ;)
# consumer
import myjobs, sys, logging
if user_wants_log_output:
ch = logging.StreamHandler(sys.stderr)
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
myjobs.log.addHandler(ch)
myjobs.log.setLevel(logging.INFO)
myjobs.MyJob().run()
On the other hand there's an amazing amount of stuff in the logging package. If you ever need to send log data to a rotating set of files, an email address, and the Windows Event Log, you're covered.
Example: simplest possible observer
But you don't need to use any library at all. An extremely simple way to support observers is to call a method that does nothing.
# producer
class MyJob(object):
def on_progress(self, pct):
"""Called when progress is made. pct is the percent complete.
By default this does nothing. The user may override this method
or even just assign to it."""
pass
def run(self):
n = 10
for i in range(n):
self.on_progress(100.0 * i / n)
self.on_progress(100.0)
# consumer
import sys, myjobs
job = myjobs.MyJob()
job.on_progress = lambda pct: sys.stdout.write("%.1f%% done\n" % pct)
job.run()
Sometimes instead of writing a lambda, you can just say job.on_progress = progressBar.update, which is nice.
This is about as simple as it gets. One drawback is that it doesn't naturally support multiple listeners subscribing to the same events.
Example: C#-like events
With a bit of support code, you can get C#-like events in Python. Here's the code:
# glue code
class event(object):
def __init__(self, func):
self.__doc__ = func.__doc__
self._key = ' ' + func.__name__
def __get__(self, obj, cls):
try:
return obj.__dict__[self._key]
except KeyError, exc:
be = obj.__dict__[self._key] = boundevent()
return be
class boundevent(object):
def __init__(self):
self._fns = []
def __iadd__(self, fn):
self._fns.append(fn)
return self
def __isub__(self, fn):
self._fns.remove(fn)
return self
def __call__(self, *args, **kwargs):
for f in self._fns[:]:
f(*args, **kwargs)
The producer declares the event using a decorator:
# producer
class MyJob(object):
#event
def progress(pct):
"""Called when progress is made. pct is the percent complete."""
def run(self):
n = 10
for i in range(n+1):
self.progress(100.0 * i / n)
#consumer
import sys, myjobs
job = myjobs.MyJob()
job.progress += lambda pct: sys.stdout.write("%.1f%% done\n" % pct)
job.run()
This works exactly like the "simple observer" code above, but you can add as many listeners as you like using +=. (Unlike C#, there are no event handler types, you don't have to new EventHandler(foo.bar) when subscribing to an event, and you don't have to check for null before firing the event. Like C#, events do not squelch exceptions.)
How to choose
If logging does everything you need, use that. Otherwise do the simplest thing that works for you. The key thing to note is that you don't need to take on a big external dependency.
How about an implementation where objects aren't kept alive just because they're observing something? Below please find an implementation of the observer pattern with the following features:
Usage is pythonic. To add an observer to a bound method .bar of instance foo, just do foo.bar.addObserver(observer).
Observers are not kept alive by virtue of being observers. In other words, the observer code uses no strong references.
No sub-classing necessary (descriptors ftw).
Can be used with unhashable types.
Can be used as many times you want in a single class.
(bonus) As of today the code exists in a proper downloadable, installable package on github.
Here's the code (the github package or PyPI package have the most up to date implementation):
import weakref
import functools
class ObservableMethod(object):
"""
A proxy for a bound method which can be observed.
I behave like a bound method, but other bound methods can subscribe to be
called whenever I am called.
"""
def __init__(self, obj, func):
self.func = func
functools.update_wrapper(self, func)
self.objectWeakRef = weakref.ref(obj)
self.callbacks = {} #observing object ID -> weak ref, methodNames
def addObserver(self, boundMethod):
"""
Register a bound method to observe this ObservableMethod.
The observing method will be called whenever this ObservableMethod is
called, and with the same arguments and keyword arguments. If a
boundMethod has already been registered to as a callback, trying to add
it again does nothing. In other words, there is no way to sign up an
observer to be called back multiple times.
"""
obj = boundMethod.__self__
ID = id(obj)
if ID in self.callbacks:
s = self.callbacks[ID][1]
else:
wr = weakref.ref(obj, Cleanup(ID, self.callbacks))
s = set()
self.callbacks[ID] = (wr, s)
s.add(boundMethod.__name__)
def discardObserver(self, boundMethod):
"""
Un-register a bound method.
"""
obj = boundMethod.__self__
if id(obj) in self.callbacks:
self.callbacks[id(obj)][1].discard(boundMethod.__name__)
def __call__(self, *arg, **kw):
"""
Invoke the method which I proxy, and all of it's callbacks.
The callbacks are called with the same *args and **kw as the main
method.
"""
result = self.func(self.objectWeakRef(), *arg, **kw)
for ID in self.callbacks:
wr, methodNames = self.callbacks[ID]
obj = wr()
for methodName in methodNames:
getattr(obj, methodName)(*arg, **kw)
return result
#property
def __self__(self):
"""
Get a strong reference to the object owning this ObservableMethod
This is needed so that ObservableMethod instances can observe other
ObservableMethod instances.
"""
return self.objectWeakRef()
class ObservableMethodDescriptor(object):
def __init__(self, func):
"""
To each instance of the class using this descriptor, I associate an
ObservableMethod.
"""
self.instances = {} # Instance id -> (weak ref, Observablemethod)
self._func = func
def __get__(self, inst, cls):
if inst is None:
return self
ID = id(inst)
if ID in self.instances:
wr, om = self.instances[ID]
if not wr():
msg = "Object id %d should have been cleaned up"%(ID,)
raise RuntimeError(msg)
else:
wr = weakref.ref(inst, Cleanup(ID, self.instances))
om = ObservableMethod(inst, self._func)
self.instances[ID] = (wr, om)
return om
def __set__(self, inst, val):
raise RuntimeError("Assigning to ObservableMethod not supported")
def event(func):
return ObservableMethodDescriptor(func)
class Cleanup(object):
"""
I manage remove elements from a dict whenever I'm called.
Use me as a weakref.ref callback to remove an object's id from a dict
when that object is garbage collected.
"""
def __init__(self, key, d):
self.key = key
self.d = d
def __call__(self, wr):
del self.d[self.key]
To use this we just decorate methods we want to make observable with #event. Here's an example
class Foo(object):
def __init__(self, name):
self.name = name
#event
def bar(self):
print("%s called bar"%(self.name,))
def baz(self):
print("%s called baz"%(self.name,))
a = Foo('a')
b = Foo('b')
a.bar.addObserver(b.bar)
a.bar()
From wikipedia:
from collections import defaultdict
class Observable (defaultdict):
def __init__ (self):
defaultdict.__init__(self, object)
def emit (self, *args):
'''Pass parameters to all observers and update states.'''
for subscriber in self:
response = subscriber(*args)
self[subscriber] = response
def subscribe (self, subscriber):
'''Add a new subscriber to self.'''
self[subscriber]
def stat (self):
'''Return a tuple containing the state of each observer.'''
return tuple(self.values())
The Observable is used like this.
myObservable = Observable ()
# subscribe some inlined functions.
# myObservable[lambda x, y: x * y] would also work here.
myObservable.subscribe(lambda x, y: x * y)
myObservable.subscribe(lambda x, y: float(x) / y)
myObservable.subscribe(lambda x, y: x + y)
myObservable.subscribe(lambda x, y: x - y)
# emit parameters to each observer
myObservable.emit(6, 2)
# get updated values
myObservable.stat() # returns: (8, 3.0, 4, 12)
Based on Jason's answer, I implemented the C#-like events example as a fully-fledged python module including documentation and tests. I love fancy pythonic stuff :)
So, if you want some ready-to-use solution, you can just use the code on github.
Example: twisted log observers
To register an observer yourCallable() (a callable that accepts a dictionary) to receive all log events (in addition to any other observers):
twisted.python.log.addObserver(yourCallable)
Example: complete producer/consumer example
From Twisted-Python mailing list:
#!/usr/bin/env python
"""Serve as a sample implementation of a twisted producer/consumer
system, with a simple TCP server which asks the user how many random
integers they want, and it sends the result set back to the user, one
result per line."""
import random
from zope.interface import implements
from twisted.internet import interfaces, reactor
from twisted.internet.protocol import Factory
from twisted.protocols.basic import LineReceiver
class Producer:
"""Send back the requested number of random integers to the client."""
implements(interfaces.IPushProducer)
def __init__(self, proto, cnt):
self._proto = proto
self._goal = cnt
self._produced = 0
self._paused = False
def pauseProducing(self):
"""When we've produced data too fast, pauseProducing() will be
called (reentrantly from within resumeProducing's transport.write
method, most likely), so set a flag that causes production to pause
temporarily."""
self._paused = True
print('pausing connection from %s' % (self._proto.transport.getPeer()))
def resumeProducing(self):
self._paused = False
while not self._paused and self._produced < self._goal:
next_int = random.randint(0, 10000)
self._proto.transport.write('%d\r\n' % (next_int))
self._produced += 1
if self._produced == self._goal:
self._proto.transport.unregisterProducer()
self._proto.transport.loseConnection()
def stopProducing(self):
pass
class ServeRandom(LineReceiver):
"""Serve up random data."""
def connectionMade(self):
print('connection made from %s' % (self.transport.getPeer()))
self.transport.write('how many random integers do you want?\r\n')
def lineReceived(self, line):
cnt = int(line.strip())
producer = Producer(self, cnt)
self.transport.registerProducer(producer, True)
producer.resumeProducing()
def connectionLost(self, reason):
print('connection lost from %s' % (self.transport.getPeer()))
factory = Factory()
factory.protocol = ServeRandom
reactor.listenTCP(1234, factory)
print('listening on 1234...')
reactor.run()
OP asks "Are there any exemplary examples of the GoF Observer implemented in Python?"
This is an example in Python 3.7. This Observable class meets the requirement of creating a relationship between one observable and many observers while remaining independent of their structure.
from functools import partial
from dataclasses import dataclass, field
import sys
from typing import List, Callable
#dataclass
class Observable:
observers: List[Callable] = field(default_factory=list)
def register(self, observer: Callable):
self.observers.append(observer)
def deregister(self, observer: Callable):
self.observers.remove(observer)
def notify(self, *args, **kwargs):
for observer in self.observers:
observer(*args, **kwargs)
def usage_demo():
observable = Observable()
# Register two anonymous observers using lambda.
observable.register(
lambda *args, **kwargs: print(f'Observer 1 called with args={args}, kwargs={kwargs}'))
observable.register(
lambda *args, **kwargs: print(f'Observer 2 called with args={args}, kwargs={kwargs}'))
# Create an observer function, register it, then deregister it.
def callable_3():
print('Observer 3 NOT called.')
observable.register(callable_3)
observable.deregister(callable_3)
# Create a general purpose observer function and register four observers.
def callable_x(*args, **kwargs):
print(f'{args[0]} observer called with args={args}, kwargs={kwargs}')
for gui_field in ['Form field 4', 'Form field 5', 'Form field 6', 'Form field 7']:
observable.register(partial(callable_x, gui_field))
observable.notify('test')
if __name__ == '__main__':
sys.exit(usage_demo())
A functional approach to observer design:
def add_listener(obj, method_name, listener):
# Get any existing listeners
listener_attr = method_name + '_listeners'
listeners = getattr(obj, listener_attr, None)
# If this is the first listener, then set up the method wrapper
if not listeners:
listeners = [listener]
setattr(obj, listener_attr, listeners)
# Get the object's method
method = getattr(obj, method_name)
#wraps(method)
def method_wrapper(*args, **kwags):
method(*args, **kwags)
for l in listeners:
l(obj, *args, **kwags) # Listener also has object argument
# Replace the original method with the wrapper
setattr(obj, method_name, method_wrapper)
else:
# Event is already set up, so just add another listener
listeners.append(listener)
def remove_listener(obj, method_name, listener):
# Get any existing listeners
listener_attr = method_name + '_listeners'
listeners = getattr(obj, listener_attr, None)
if listeners:
# Remove the listener
next((listeners.pop(i)
for i, l in enumerate(listeners)
if l == listener),
None)
# If this was the last listener, then remove the method wrapper
if not listeners:
method = getattr(obj, method_name)
delattr(obj, listener_attr)
setattr(obj, method_name, method.__wrapped__)
These methods can then be used to add a listener to any class method. For example:
class MyClass(object):
def __init__(self, prop):
self.prop = prop
def some_method(self, num, string):
print('method:', num, string)
def listener_method(obj, num, string):
print('listener:', num, string, obj.prop)
my = MyClass('my_prop')
add_listener(my, 'some_method', listener_method)
my.some_method(42, 'with listener')
remove_listener(my, 'some_method', listener_method)
my.some_method(42, 'without listener')
And the output is:
method: 42 with listener
listener: 42 with listener my_prop
method: 42 without listener