What is the right way to do "loose coupling" in python? - python

I wrote some code to get data using pySerial as below.
My class is depend on serial class which doesn't meet the "loose coupling" rule.
Should I use interface to decouple my class?
Thanks a lot for your instruction.
import serial
class ArduinoConnect:
def __init__(self):
pass
def serial_connect(self, serial_port, serial_baudrate):
self._serial_port = serial_port
try:
self.ser = serial.Serial(
port=self._serial_port,
baudrate=serial_baudrate,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
)
except serial.serialutil.SerialException, e:
print str(e)
def serial_disconnect(self):
self.ser.close()
def get_quaternion(self, number_of_data=50):
buff = []
self.ser.write('q')
self.ser.write(chr(number_of_data))
for j in range(number_of_data):
in_string = self.ser.readline()
buff_line = in_string.split(",")
buff_line.pop()
buff_line = self.hex_to_quaternion(buff_line)
buff.append(list(buff_line))
return buff
def hex_to_quaternion(self, list_of_hex=None):
#......
pass
arduino = ArduinoConnect()
arduino.serial_connect(serial_port="COM5", serial_baudrate=115200)
print arduino.get_quaternion()
arduino.serial_disconnect()
I adjusted my code as recommended.
DI help to separate the serial process,and a factory method help to encapsulate the DI process.
Is there anything else I could do to meet the "loose coupling" rule?
Thanks for your help.
import serial
class ArduinoConnect:
def __init__(self, serial_to_arduino):
self._serial_to_arduino = serial_to_arduino
def get_quaternion(self, number_of_data=50):
buff = []
self._serial_to_arduino.write('q')
self._serial_to_arduino.write(chr(number_of_data))
for j in range(number_of_data):
in_string = self._serial_to_arduino.readline()
buff_line = in_string.split(",")
buff_line.pop()
buff_line = self.hex_to_quaternion(buff_line)
buff.append(list(buff_line))
return buff
def hex_to_quaternion(self, list_of_hex):
......
def __getattr__(self, attr):
return getattr(self._serial_to_arduino, attr)
class SerialToArduino:
def __init__(self):
pass
def serial_connect(self, serial_port="COM5", serial_baudrate=115200):
self._serial_port = serial_port
try:
self.ser = serial.Serial(
port=self._serial_port,
baudrate=serial_baudrate,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
)
except serial.serialutil.SerialException, e:
print str(e)
def serial_disconnect(self):
self.ser.close()
def readline(self):
return self.ser.readline()
def write(self, data):
self.ser.write(data=data)
def get_ArduinoConnect():
'factory method'
return ArduinoConnect(serial_to_arduino=SerialToArduino())
arduino = get_ArduinoConnect()
arduino.serial_connect(serial_port="COM5", serial_baudrate=115200)
print arduino.get_quaternion()
arduino.serial_disconnect()

I can think of 2 possible solutions
Implement an "adapter" to expose methods to the main class and hide the implementation of Serial. So that your main class can avoid having dependency on the concrete class Serial
Do Dependency Injection, something likes this
def serial_connect(self, engine, serial_port, serial_baudrate)
Update for 1: I referred to http://en.wikipedia.org/wiki/Adapter_pattern which is commonly used when you want to separate the concrete implementation from the abstraction. Think of it as the travel plug adapter.
It is particularly useful for language like Java with strict interface and everything. In your case, because in Python we don't have "interface", you can simulate it by using an abstract class
class AbstractAdapter():
def serial_connect(self, serial_port="COM5", serial_baudrate=115200):
raise("Needs implementation")
# do the same thing for the rest of the methods
Then in ArduinoConnect, you can check for the type
def __init__(self, serial_to_arduino):
if not isinstance(serial_to_arduino, AbstractAdapter):
raise("Wrong type")
This forces your serial_to_arduino to extend AbstractAdapter which enforces the implementation of all abstract methods hence an adapter.
This might not be the most "pythonic" way to do things, but from OOP point of view, you can do it that way to have the highest level of loose coupling (In my opinion)
P/s: Actually, I think the correct pattern in this case should be Strategy, both of them are pretty similar in term of implementation but they are meant for different purposes. You can read more about some patterns like Strategy, Proxy, Command, Mediator which are often used to achieve loose coupling

Related

Twisted: cache list of instances

I am currently working through the twisted Developer Guides and would like to get some information/guide regarding extending the usecase of one of the provides examples, namely caching instances of objects:
The example code is:
cache.classes.py
from twisted.spread import pb
class MasterDuckPond(pb.Cacheable):
def __init__(self, ducks):
self.observers = []
self.ducks = ducks
def count(self):
print "I have [%d] ducks" % len(self.ducks)
def addDuck(self, duck):
self.ducks.append(duck)
for o in self.observers: o.callRemote('addDuck', duck)
def removeDuck(self, duck):
self.ducks.remove(duck)
for o in self.observers: o.callRemote('removeDuck', duck)
def getStateToCacheAndObserveFor(self, perspective, observer):
self.observers.append(observer)
# you should ignore pb.Cacheable-specific state, like self.observers
return self.ducks # in this case, just a list of ducks
def stoppedObserving(self, perspective, observer):
self.observers.remove(observer)
class SlaveDuckPond(pb.RemoteCache):
# This is a cache of a remote MasterDuckPond
def count(self):
return len(self.cacheducks)
def getDucks(self):
return self.cacheducks
def setCopyableState(self, state):
print " cache - sitting, er, setting ducks"
self.cacheducks = state
def observe_addDuck(self, newDuck):
print " cache - addDuck"
self.cacheducks.append(newDuck)
def observe_removeDuck(self, deadDuck):
print " cache - removeDuck"
self.cacheducks.remove(deadDuck)
pb.setUnjellyableForClass(MasterDuckPond, SlaveDuckPond)
cache_sender.py
#!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.spread import pb, jelly
from twisted.python import log
from twisted.internet import reactor
from cache_classes import MasterDuckPond
class Sender:
def __init__(self, pond):
self.pond = pond
def phase1(self, remote):
self.remote = remote
d = remote.callRemote("takePond", self.pond)
d.addCallback(self.phase2).addErrback(log.err)
def phase2(self, response):
self.pond.addDuck("ugly duckling")
self.pond.count()
reactor.callLater(1, self.phase3)
def phase3(self):
d = self.remote.callRemote("checkDucks")
d.addCallback(self.phase4).addErrback(log.err)
def phase4(self, dummy):
self.pond.removeDuck("one duck")
self.pond.count()
self.remote.callRemote("checkDucks")
d = self.remote.callRemote("ignorePond")
d.addCallback(self.phase5)
def phase5(self, dummy):
d = self.remote.callRemote("shutdown")
d.addCallback(self.phase6)
def phase6(self, dummy):
reactor.stop()
def main():
master = MasterDuckPond(["one duck", "two duck"])
master.count()
sender = Sender(master)
factory = pb.PBClientFactory()
reactor.connectTCP("localhost", 8800, factory)
deferred = factory.getRootObject()
deferred.addCallback(sender.phase1)
reactor.run()
if __name__ == '__main__':
main()
cache_receiver.py:
#!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.application import service, internet
from twisted.internet import reactor
from twisted.spread import pb
import cache_classes
class Receiver(pb.Root):
def remote_takePond(self, pond):
self.pond = pond
print "got pond:", pond # a DuckPondCache
self.remote_checkDucks()
def remote_checkDucks(self):
print "[%d] ducks: " % self.pond.count(), self.pond.getDucks()
def remote_ignorePond(self):
# stop watching the pond
print "dropping pond"
# gc causes __del__ causes 'decache' msg causes stoppedObserving
self.pond = None
def remote_shutdown(self):
reactor.stop()
application = service.Application("copy_receiver")
internet.TCPServer(8800, pb.PBServerFactory(Receiver())).setServiceParent(
service.IServiceCollection(application))
This example seems pretty straightforward, the MasterDuckPond is controlled by the sending side, and the SlaveDuckPond is a cache that tracks changes to the master.
However, how would I go about updating/caching an entire list of instanced objects?
Don't use PB. The protocol is overly complicated for at least 99% of use-cases (meaning you will have to work a lot harder than necessary to understand, implement, and maintain your project). There are no other implementations of it and there probably never will be (which means you're stuck with Python and Twisted - which is not to say those things are bad, but there are a lot of other things out there that may also be good). The level of maintenance is minimal (so if you find bugs, there's a small chance someone will help you get them fixed but that's about all you can expect - and you may not even get that if the bugs aren't trivial).
Give HTTP a try. It can do a lot. If you combine it with a data format like Capn, CBOR, or even JSON it can do even more.

dynamically adding a resource to a python coap server with coapthon library

I am trying to build a coap server, in which I can add a new resource without the need to stop the server, recode it and restart .my server is suppossed to host two types of resources, "sensors(Sens-Me)" and "Actuators(Act-Me)" . I want that if I press the A key, a new instance of actuator should be added to the server, likewise If i Press S for Sensor .Below is my code :
from coapthon.resources.resource import Resource
from coapthon.server.coap import CoAP
class Sensor(Resource):
def __init__(self,name="Sensor",coap_server=None):
super(Sensor,self).__init__(name,coap_server,visible=True,observable=True,allow_children=True)
self.payload = "This is a new sensor"
self.resource_type = "rt1"
self.content_type = "application/json"
self.interface_type = "if1"
self.var = 0
def render_GET(self,request):
self.payload = "new sensor value ::{}".format(str(int(self.var+1)))
self.var +=1
return self
class Actuator(Resource):
def __init__(self,name="Actuator",coap_server=None):
super(Actuator,self).__init__(name,coap_server,visible=True,observable=True)
self.payload="This is an actuator"
self.resource_type="rt1"
def render_GET(self,request):
return self
class CoAPServer(CoAP):
def __init__(self, host, port, multicast=False):
CoAP.__init__(self,(host,port),multicast)
self.add_resource('sens-Me/',Sensor())
self.add_resource('act-Me/',Actuator())
print "CoAP server started on {}:{}".format(str(host),str(port))
print self.root.dump()
def main():
ip = "0.0.0.0"
port = 5683
multicast=False
server = CoAPServer(ip,port,multicast)
try:
server.listen(10)
print "executed after listen"
except KeyboardInterrupt:
server.close()
if __name__=="__main__":
main()
I am not sure what exactly do you want to do.
Is it just to replace a resource on the same route or add a new one?
Replace a resource
It is not possible according to the current coapthon version source:
https://github.com/Tanganelli/CoAPthon/blob/b6983fbf48399bc5687656be55ac5b9cce4f4718/coapthon/server/coap.py#L279
try:
res = self.root[actual_path]
except KeyError:
res = None
if res is None:
if len(paths) != i:
return False
resource.path = actual_path
self.root[actual_path] = resource
Alternatively, you can solve it in scope of request.
Say, have a registry of handlers which are used by resources and can be changed on a user input event. Well, you'll not be able to add new routes.
If you absolutely need that feature, you may request it from a developer or contribute to that project.
Add a new resource
I have extended your snippet a little bit.
I have a little experience in Python so I an not sure I've made everything properly, but it works.
There is a separate thread polling the user input and adding the same resource. Add the needed code there.
from coapthon.resources.resource import Resource
from coapthon.server.coap import CoAP
from threading import Thread
import sys
class Sensor(Resource):
def __init__(self,name="Sensor",coap_server=None):
super(Sensor,self).__init__(name,coap_server,visible=True,observable=True,allow_children=True)
self.payload = "This is a new sensor"
self.resource_type = "rt1"
self.content_type = "application/json"
self.interface_type = "if1"
self.var = 0
def render_GET(self,request):
self.payload = "new sensor value ::{}".format(str(int(self.var+1)))
self.var +=1
return self
class Actuator(Resource):
def __init__(self,name="Actuator",coap_server=None):
super(Actuator,self).__init__(name,coap_server,visible=True,observable=True)
self.payload="This is an actuator"
self.resource_type="rt1"
def render_GET(self,request):
return self
class CoAPServer(CoAP):
def __init__(self, host, port, multicast=False):
CoAP.__init__(self,(host,port),multicast)
self.add_resource('sens-Me/',Sensor())
self.add_resource('act-Me/',Actuator())
print "CoAP server started on {}:{}".format(str(host),str(port))
print self.root.dump()
def pollUserInput(server):
while 1:
user_input = raw_input("Some input please: ")
print user_input
server.add_resource('sens-Me2/', Sensor())
def main():
ip = "0.0.0.0"
port = 5683
multicast=False
server = CoAPServer(ip,port,multicast)
thread = Thread(target = pollUserInput, args=(server,))
thread.setDaemon(True)
thread.start()
try:
server.listen(10)
print "executed after listen"
except KeyboardInterrupt:
print server.root.dump()
server.close()
sys.exit()
if __name__=="__main__":
main()

Callback function does not see correct values in instance

I'm having a strange phenomena in Python with callback functions and handlers.
I use ZMQ to handle my communication and use a stream for the socket. I have the base class:
import multiprocessing
import zmq
from concurrent.futures import ThreadPoolExecutor
from zmq.eventloop import ioloop, zmqstream
from zmq.utils import jsonapi as json
# Types of messages
TYPE_A = 'type_a'
TYPE_B = 'type_b'
class ZmqProcess(multiprocessing.Process):
def __init__(self):
super(ZmqProcess, self).__init__()
self.context = None
self.loop = None
self.handle_stream = None
def setup(self):
self.context = zmq.Context()
self.loop = ioloop.IOLoop.instance()
def send(self, msg_type, msg, host, port):
sock = zmq.Context().socket(zmq.PAIR)
sock.connect('tcp://%s:%s' % (host, port))
sock.send_json([msg_type, msg])
def stream(self, sock_type, addr):
sock = self.context.socket(sock_type)
if isinstance(addr, str):
addr = addr.split(':')
host, port = addr if len(addr) == 2 else (addr[0], None)
if port:
sock.bind('tcp://%s:%s' % (host, port))
else:
port = sock.bind_to_random_port('tcp://%s' % host)
stream = zmqstream.ZMQStream(sock, self.loop)
return stream, int(port)
class MessageHandler(object):
def __init__(self, json_load=-1):
self._json_load = json_load
self.pool = ThreadPoolExecutor(max_workers=10)
def __call__(self, msg):
i = self._json_load
msg_type, data = json.loads(msg[i])
msg[i] = data
if msg_type.startswith('_'):
raise AttributeError('%s starts with an "_"' % msg_type)
getattr(self, msg_type)(*msg)
And I have a class that inherits from it:
import zmq
import zmq_base
class ZmqServerMeta(zmq_base.ZmqProcess):
def __init__(self, bind_addr, handlers):
super(ZmqServerMeta, self).__init__()
self.bind_addr = bind_addr
self.handlers = handlers
def setup(self):
super(ZmqServerMeta, self).setup()
self.handle_stream, _ = self.stream(zmq.PAIR, self.bind_addr)
self.handle_stream.on_recv(StreamHandler(self.handle_stream, self.stop,
self.handlers))
def run(self):
self.setup()
self.loop.start()
def stop(self):
self.loop.stop()
class StreamHandler(zmq_base.MessageHandler):
def __init__(self, handle_stream, stop, handlers):
super(StreamHandler, self).__init__()
self._handle_stream = handle_stream
self._stop = stop
self._handlers = handlers
def type_a(self, data):
if zmq_base.TYPE_A in self._handlers:
if self._handlers[zmq_base.TYPE_A]:
for handle in self._handlers[zmq_base.TYPE_A]:
self.pool.submit(handle, data)
else:
pass
else:
pass
def type_b(self, data):
if zmq_base.TYPE_B in self._handlers:
if self._handlers[zmq_base.TYPE_B]:
for handle in self._handlers[zmq_base.TYPE_B]:
self.pool.submit(handle, data)
else:
pass
else:
pass
def endit(self):
self._stop()
Additionally, I have a class that I want to use as storage. And here is where the trouble starts:
import threading
import zmq_server_meta as server
import zmq_base as base
class Storage:
def __init__(self):
self.list = []
self.list_lock = threading.RLock()
self.zmq_server = None
self.host = '127.0.0.1'
self.port = 5432
self.bind_addr = (self.host, self.port)
def setup(self):
handlers = {base.TYPE_A: [self. remove]}
self.zmq_server = server.ZmqServerMeta(handlers=handlers, bind_addr=self.bind_addr)
self.zmq_server.start()
def add(self, data):
with self.list_lock:
try:
self.list.append(data)
except:
print "Didn't work"
def remove(self, msg):
with self.list_lock:
try:
self.list.remove(msg)
except:
print "Didn't work"
The idea is that that class stores some global information that it receives.
It is all started in a file to test:
import sys
import time
import storage
import zmq_base as base
import zmq_server_meta as server
def printMsg(msg):
print msg
store = storage.Storage()
store.setup()
handlers = {base.TYPE_B: [printMsg]}
client = server.ZmqServerMeta(handlers=handlers, bind_addr=('127.0.0.1', 5431))
client.start()
message = "Test"
store.add(message)
client.send(base.TYPE_A, message, '127.0.0.1', 5432)
I simplified it to reduce clutter. Instead of just adding it, it is usually send and then a response comes back. The response, the client sending, should be processed by the correct callback, remove(), and it should remove something out of the list. The problem that occurs is, that the remove() function sees an empty list, although there should be an element in the list. If I check from the testing file, I can see the element after it was added, and if I call remove() from there, I see a non-empty list and can remove it. My question is, why does the callback sees an empty list and how can I make sure it does see the correct elements in the list?
Kind regards
Patrick
I believe the problem lays in the fact that the ZmqProcess class inherits from multiprocessing.Process. Multiprocessing does not allow to share objects among different processes, except by using a shared memory map using Value or Array ( as can be seen in the documentation: https://docs.python.org/3/library/multiprocessing.html#sharing-state-between-processes )
If you want to use your custom object, you can use a Server process / proxy object, which can be found in on the same page of the documentation.
So you can, for instance, define a manager in the init function of the Storage class like: self.manager = Manager() Afterwards you put self.list = self.manager.list(). This should do the trick.

Access method variable from other method python

I have a problem accessing client.close() in readholdingregisters() from closeconnection() in the code below:
class EnergyMeter:
def __init__(self, model, gatewayipaddress, port ,deviceid):
self.model = model
self.gatewayipaddress = gatewayipaddress
self.port = port
self.deviceid = deviceid
def readholdingregisters(self, startingregister, numberofregisters):
from pymodbus3.client.sync import ModbusTcpClient as ModbusClient
client = ModbusClient(self.gatewayipaddress, port=self.port)
client.connect()
if #some code here:
#some other code here
return concatResult
else:
return otherResult
Here I want to close the connection client.connect() from readholdingregisters():
def closeconnection(self):
EnergyMeter.readholdingregisters().client.close()??????????????????
Can you plase give me a good solution how to do it?
Usual approach is , make client as datamember of class.
self.client = ModbusClient(self.gatewayipaddress, port=self.port)
self.client.connect()
#some code here
def closeconnection(self):
self.readholdingregisters()
self.client.close()
However if you want to access method through class EnergyMeter.readholdingregisters()
Then it should be declared as staticmethod or classmethod and this method should return client instance. Link
from pymodbus3.client.sync import ModbusTcpClient as ModbusClient
class EnergyMeter:
def __init__(self, model, gatewayipaddress, port ,deviceid):
self.model = model
self.gatewayipaddress = gatewayipaddress
self.port = port
self.deviceid = deviceid
self.client = ModbusClient(self.gatewayipaddress, port=self.port)
self.client.connect()
def readholdingregisters(self, startingregister, numberofregisters):
if #some code here:
#some other code here
return concatResult
else:
return otherResult
Then, to close, you call energy_meter.client.close() for whatever energy_meter you want to close.

Python Observer Pattern: Examples, Tips? [closed]

Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 4 years ago.
Improve this question
Are there any exemplary examples of the GoF Observer implemented in Python? I have a bit code which currently has bits of debugging code laced through the key class (currently generating messages to stderr if a magic env is set). Additionally, the class has an interface for incrementally return results as well as storing them (in memory) for post processing. (The class itself is a job manager for concurrently executing commands on remote machines over ssh).
Currently the usage of the class looks something like:
job = SSHJobMan(hostlist, cmd)
job.start()
while not job.done():
for each in job.poll():
incrementally_process(job.results[each])
time.sleep(0.2) # or other more useful work
post_process(job.results)
An alernative usage model is:
job = SSHJobMan(hostlist, cmd)
job.wait() # implicitly performs a start()
process(job.results)
This all works fine for the current utility. However it does lack flexibility. For example I currently support a brief output format or a progress bar as incremental results, I also support
brief, complete and "merged message" outputs for the post_process() function.
However, I'd like to support multiple results/output streams (progress bar to the terminal, debugging and warnings to a log file, outputs from successful jobs to one file/directory, error messages and other results from non-successful jobs to another, etc).
This sounds like a situation that calls for Observer ... have instances of my class accept registration from other objects and call them back with specific types of events as they occur.
I'm looking at PyPubSub since I saw several references to that in SO related questions. I'm not sure I'm ready to add the external dependency to my utility but I could see value in using their interface as a model for mine if that's going to make it easier for others to use. (The project is intended as both a standalone command line utility and a class for writing other scripts/utilities).
In short I know how to do what I want ... but there are numerous ways to accomplish it. I want suggestions on what's most likely to work for other users of the code in the long run.
The code itself is at: classh.
However it does lack flexibility.
Well... actually, this looks like a good design to me if an asynchronous API is what you want. It usually is. Maybe all you need is to switch from stderr to Python's logging module, which has a sort of publish/subscribe model of its own, what with Logger.addHandler() and so on.
If you do want to support observers, my advice is to keep it simple. You really only need a few lines of code.
class Event(object):
pass
class Observable(object):
def __init__(self):
self.callbacks = []
def subscribe(self, callback):
self.callbacks.append(callback)
def fire(self, **attrs):
e = Event()
e.source = self
for k, v in attrs.items():
setattr(e, k, v)
for fn in self.callbacks:
fn(e)
Your Job class can subclass Observable. When something of interest happens, call self.fire(type="progress", percent=50) or the like.
I think people in the other answers overdo it. You can easily achieve events in Python with less than 15 lines of code.
You simple have two classes: Event and Observer. Any class that wants to listen for an event, needs to inherit Observer and set to listen (observe) for a specific event. When an Event is instantiated and fired, all observers listening to that event will run the specified callback functions.
class Observer():
_observers = []
def __init__(self):
self._observers.append(self)
self._observables = {}
def observe(self, event_name, callback):
self._observables[event_name] = callback
class Event():
def __init__(self, name, data, autofire = True):
self.name = name
self.data = data
if autofire:
self.fire()
def fire(self):
for observer in Observer._observers:
if self.name in observer._observables:
observer._observables[self.name](self.data)
Example:
class Room(Observer):
def __init__(self):
print("Room is ready.")
Observer.__init__(self) # Observer's init needs to be called
def someone_arrived(self, who):
print(who + " has arrived!")
room = Room()
room.observe('someone arrived', room.someone_arrived)
Event('someone arrived', 'Lenard')
Output:
Room is ready.
Lenard has arrived!
A few more approaches...
Example: the logging module
Maybe all you need is to switch from stderr to Python's logging module, which has a powerful publish/subscribe model.
It's easy to get started producing log records.
# producer
import logging
log = logging.getLogger("myjobs") # that's all the setup you need
class MyJob(object):
def run(self):
log.info("starting job")
n = 10
for i in range(n):
log.info("%.1f%% done" % (100.0 * i / n))
log.info("work complete")
On the consumer side there's a bit more work. Unfortunately configuring logger output takes, like, 7 whole lines of code to do. ;)
# consumer
import myjobs, sys, logging
if user_wants_log_output:
ch = logging.StreamHandler(sys.stderr)
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
myjobs.log.addHandler(ch)
myjobs.log.setLevel(logging.INFO)
myjobs.MyJob().run()
On the other hand there's an amazing amount of stuff in the logging package. If you ever need to send log data to a rotating set of files, an email address, and the Windows Event Log, you're covered.
Example: simplest possible observer
But you don't need to use any library at all. An extremely simple way to support observers is to call a method that does nothing.
# producer
class MyJob(object):
def on_progress(self, pct):
"""Called when progress is made. pct is the percent complete.
By default this does nothing. The user may override this method
or even just assign to it."""
pass
def run(self):
n = 10
for i in range(n):
self.on_progress(100.0 * i / n)
self.on_progress(100.0)
# consumer
import sys, myjobs
job = myjobs.MyJob()
job.on_progress = lambda pct: sys.stdout.write("%.1f%% done\n" % pct)
job.run()
Sometimes instead of writing a lambda, you can just say job.on_progress = progressBar.update, which is nice.
This is about as simple as it gets. One drawback is that it doesn't naturally support multiple listeners subscribing to the same events.
Example: C#-like events
With a bit of support code, you can get C#-like events in Python. Here's the code:
# glue code
class event(object):
def __init__(self, func):
self.__doc__ = func.__doc__
self._key = ' ' + func.__name__
def __get__(self, obj, cls):
try:
return obj.__dict__[self._key]
except KeyError, exc:
be = obj.__dict__[self._key] = boundevent()
return be
class boundevent(object):
def __init__(self):
self._fns = []
def __iadd__(self, fn):
self._fns.append(fn)
return self
def __isub__(self, fn):
self._fns.remove(fn)
return self
def __call__(self, *args, **kwargs):
for f in self._fns[:]:
f(*args, **kwargs)
The producer declares the event using a decorator:
# producer
class MyJob(object):
#event
def progress(pct):
"""Called when progress is made. pct is the percent complete."""
def run(self):
n = 10
for i in range(n+1):
self.progress(100.0 * i / n)
#consumer
import sys, myjobs
job = myjobs.MyJob()
job.progress += lambda pct: sys.stdout.write("%.1f%% done\n" % pct)
job.run()
This works exactly like the "simple observer" code above, but you can add as many listeners as you like using +=. (Unlike C#, there are no event handler types, you don't have to new EventHandler(foo.bar) when subscribing to an event, and you don't have to check for null before firing the event. Like C#, events do not squelch exceptions.)
How to choose
If logging does everything you need, use that. Otherwise do the simplest thing that works for you. The key thing to note is that you don't need to take on a big external dependency.
How about an implementation where objects aren't kept alive just because they're observing something? Below please find an implementation of the observer pattern with the following features:
Usage is pythonic. To add an observer to a bound method .bar of instance foo, just do foo.bar.addObserver(observer).
Observers are not kept alive by virtue of being observers. In other words, the observer code uses no strong references.
No sub-classing necessary (descriptors ftw).
Can be used with unhashable types.
Can be used as many times you want in a single class.
(bonus) As of today the code exists in a proper downloadable, installable package on github.
Here's the code (the github package or PyPI package have the most up to date implementation):
import weakref
import functools
class ObservableMethod(object):
"""
A proxy for a bound method which can be observed.
I behave like a bound method, but other bound methods can subscribe to be
called whenever I am called.
"""
def __init__(self, obj, func):
self.func = func
functools.update_wrapper(self, func)
self.objectWeakRef = weakref.ref(obj)
self.callbacks = {} #observing object ID -> weak ref, methodNames
def addObserver(self, boundMethod):
"""
Register a bound method to observe this ObservableMethod.
The observing method will be called whenever this ObservableMethod is
called, and with the same arguments and keyword arguments. If a
boundMethod has already been registered to as a callback, trying to add
it again does nothing. In other words, there is no way to sign up an
observer to be called back multiple times.
"""
obj = boundMethod.__self__
ID = id(obj)
if ID in self.callbacks:
s = self.callbacks[ID][1]
else:
wr = weakref.ref(obj, Cleanup(ID, self.callbacks))
s = set()
self.callbacks[ID] = (wr, s)
s.add(boundMethod.__name__)
def discardObserver(self, boundMethod):
"""
Un-register a bound method.
"""
obj = boundMethod.__self__
if id(obj) in self.callbacks:
self.callbacks[id(obj)][1].discard(boundMethod.__name__)
def __call__(self, *arg, **kw):
"""
Invoke the method which I proxy, and all of it's callbacks.
The callbacks are called with the same *args and **kw as the main
method.
"""
result = self.func(self.objectWeakRef(), *arg, **kw)
for ID in self.callbacks:
wr, methodNames = self.callbacks[ID]
obj = wr()
for methodName in methodNames:
getattr(obj, methodName)(*arg, **kw)
return result
#property
def __self__(self):
"""
Get a strong reference to the object owning this ObservableMethod
This is needed so that ObservableMethod instances can observe other
ObservableMethod instances.
"""
return self.objectWeakRef()
class ObservableMethodDescriptor(object):
def __init__(self, func):
"""
To each instance of the class using this descriptor, I associate an
ObservableMethod.
"""
self.instances = {} # Instance id -> (weak ref, Observablemethod)
self._func = func
def __get__(self, inst, cls):
if inst is None:
return self
ID = id(inst)
if ID in self.instances:
wr, om = self.instances[ID]
if not wr():
msg = "Object id %d should have been cleaned up"%(ID,)
raise RuntimeError(msg)
else:
wr = weakref.ref(inst, Cleanup(ID, self.instances))
om = ObservableMethod(inst, self._func)
self.instances[ID] = (wr, om)
return om
def __set__(self, inst, val):
raise RuntimeError("Assigning to ObservableMethod not supported")
def event(func):
return ObservableMethodDescriptor(func)
class Cleanup(object):
"""
I manage remove elements from a dict whenever I'm called.
Use me as a weakref.ref callback to remove an object's id from a dict
when that object is garbage collected.
"""
def __init__(self, key, d):
self.key = key
self.d = d
def __call__(self, wr):
del self.d[self.key]
To use this we just decorate methods we want to make observable with #event. Here's an example
class Foo(object):
def __init__(self, name):
self.name = name
#event
def bar(self):
print("%s called bar"%(self.name,))
def baz(self):
print("%s called baz"%(self.name,))
a = Foo('a')
b = Foo('b')
a.bar.addObserver(b.bar)
a.bar()
From wikipedia:
from collections import defaultdict
class Observable (defaultdict):
def __init__ (self):
defaultdict.__init__(self, object)
def emit (self, *args):
'''Pass parameters to all observers and update states.'''
for subscriber in self:
response = subscriber(*args)
self[subscriber] = response
def subscribe (self, subscriber):
'''Add a new subscriber to self.'''
self[subscriber]
def stat (self):
'''Return a tuple containing the state of each observer.'''
return tuple(self.values())
The Observable is used like this.
myObservable = Observable ()
# subscribe some inlined functions.
# myObservable[lambda x, y: x * y] would also work here.
myObservable.subscribe(lambda x, y: x * y)
myObservable.subscribe(lambda x, y: float(x) / y)
myObservable.subscribe(lambda x, y: x + y)
myObservable.subscribe(lambda x, y: x - y)
# emit parameters to each observer
myObservable.emit(6, 2)
# get updated values
myObservable.stat() # returns: (8, 3.0, 4, 12)
Based on Jason's answer, I implemented the C#-like events example as a fully-fledged python module including documentation and tests. I love fancy pythonic stuff :)
So, if you want some ready-to-use solution, you can just use the code on github.
Example: twisted log observers
To register an observer yourCallable() (a callable that accepts a dictionary) to receive all log events (in addition to any other observers):
twisted.python.log.addObserver(yourCallable)
Example: complete producer/consumer example
From Twisted-Python mailing list:
#!/usr/bin/env python
"""Serve as a sample implementation of a twisted producer/consumer
system, with a simple TCP server which asks the user how many random
integers they want, and it sends the result set back to the user, one
result per line."""
import random
from zope.interface import implements
from twisted.internet import interfaces, reactor
from twisted.internet.protocol import Factory
from twisted.protocols.basic import LineReceiver
class Producer:
"""Send back the requested number of random integers to the client."""
implements(interfaces.IPushProducer)
def __init__(self, proto, cnt):
self._proto = proto
self._goal = cnt
self._produced = 0
self._paused = False
def pauseProducing(self):
"""When we've produced data too fast, pauseProducing() will be
called (reentrantly from within resumeProducing's transport.write
method, most likely), so set a flag that causes production to pause
temporarily."""
self._paused = True
print('pausing connection from %s' % (self._proto.transport.getPeer()))
def resumeProducing(self):
self._paused = False
while not self._paused and self._produced < self._goal:
next_int = random.randint(0, 10000)
self._proto.transport.write('%d\r\n' % (next_int))
self._produced += 1
if self._produced == self._goal:
self._proto.transport.unregisterProducer()
self._proto.transport.loseConnection()
def stopProducing(self):
pass
class ServeRandom(LineReceiver):
"""Serve up random data."""
def connectionMade(self):
print('connection made from %s' % (self.transport.getPeer()))
self.transport.write('how many random integers do you want?\r\n')
def lineReceived(self, line):
cnt = int(line.strip())
producer = Producer(self, cnt)
self.transport.registerProducer(producer, True)
producer.resumeProducing()
def connectionLost(self, reason):
print('connection lost from %s' % (self.transport.getPeer()))
factory = Factory()
factory.protocol = ServeRandom
reactor.listenTCP(1234, factory)
print('listening on 1234...')
reactor.run()
OP asks "Are there any exemplary examples of the GoF Observer implemented in Python?"
This is an example in Python 3.7. This Observable class meets the requirement of creating a relationship between one observable and many observers while remaining independent of their structure.
from functools import partial
from dataclasses import dataclass, field
import sys
from typing import List, Callable
#dataclass
class Observable:
observers: List[Callable] = field(default_factory=list)
def register(self, observer: Callable):
self.observers.append(observer)
def deregister(self, observer: Callable):
self.observers.remove(observer)
def notify(self, *args, **kwargs):
for observer in self.observers:
observer(*args, **kwargs)
def usage_demo():
observable = Observable()
# Register two anonymous observers using lambda.
observable.register(
lambda *args, **kwargs: print(f'Observer 1 called with args={args}, kwargs={kwargs}'))
observable.register(
lambda *args, **kwargs: print(f'Observer 2 called with args={args}, kwargs={kwargs}'))
# Create an observer function, register it, then deregister it.
def callable_3():
print('Observer 3 NOT called.')
observable.register(callable_3)
observable.deregister(callable_3)
# Create a general purpose observer function and register four observers.
def callable_x(*args, **kwargs):
print(f'{args[0]} observer called with args={args}, kwargs={kwargs}')
for gui_field in ['Form field 4', 'Form field 5', 'Form field 6', 'Form field 7']:
observable.register(partial(callable_x, gui_field))
observable.notify('test')
if __name__ == '__main__':
sys.exit(usage_demo())
A functional approach to observer design:
def add_listener(obj, method_name, listener):
# Get any existing listeners
listener_attr = method_name + '_listeners'
listeners = getattr(obj, listener_attr, None)
# If this is the first listener, then set up the method wrapper
if not listeners:
listeners = [listener]
setattr(obj, listener_attr, listeners)
# Get the object's method
method = getattr(obj, method_name)
#wraps(method)
def method_wrapper(*args, **kwags):
method(*args, **kwags)
for l in listeners:
l(obj, *args, **kwags) # Listener also has object argument
# Replace the original method with the wrapper
setattr(obj, method_name, method_wrapper)
else:
# Event is already set up, so just add another listener
listeners.append(listener)
def remove_listener(obj, method_name, listener):
# Get any existing listeners
listener_attr = method_name + '_listeners'
listeners = getattr(obj, listener_attr, None)
if listeners:
# Remove the listener
next((listeners.pop(i)
for i, l in enumerate(listeners)
if l == listener),
None)
# If this was the last listener, then remove the method wrapper
if not listeners:
method = getattr(obj, method_name)
delattr(obj, listener_attr)
setattr(obj, method_name, method.__wrapped__)
These methods can then be used to add a listener to any class method. For example:
class MyClass(object):
def __init__(self, prop):
self.prop = prop
def some_method(self, num, string):
print('method:', num, string)
def listener_method(obj, num, string):
print('listener:', num, string, obj.prop)
my = MyClass('my_prop')
add_listener(my, 'some_method', listener_method)
my.some_method(42, 'with listener')
remove_listener(my, 'some_method', listener_method)
my.some_method(42, 'without listener')
And the output is:
method: 42 with listener
listener: 42 with listener my_prop
method: 42 without listener

Categories