Python best pattern for cross reference - python

My program is using two different communication channels, and in some cases, I need to publish received events from one channel into another. So the basic structure is something like this:
--fileA.py--
class ChannelA:
def __init__(self, data_store):
self.store = data_store
def read(self):
# Listen for messages on this channel and publish them to another channel.
self.store.update()
def publish(self):
# Send a message over this channel
self.store.update()
--fileB.py--
class ChannelB:
def __init__(self, data_store)
self.store = data_store
def read(self):
# Listen for messages on this channel and publish them to another channel.
self.store.update()
def publish(self):
# Send a message over this channel
self.store.update()
--main.py--
from fileStore import DataStore
from fileA import ChannelA
from fileB import ChannelB
def main():
data = DataStore()
channel_A = ChannelA(data)
channel_A.read()
channel_B = ChannelB(data)
channel_B.read()
if __name__ == '__main__':
main()
So what I would like to achieve in the is that I could for example inside ChannelA call a method ChannelB.publish() and vice versa.
I could of course add ChannelA instance to ChannelB constructor parameter, but I can't do the same for ChannelA, since ChannelB had not yet been created:
class ChannelB:
def __init__(self, data_store, channel)
self.store = data_store
self.channel = channel
One simple way of doing this would be to just initialize both communication channels inside a single class and call the methods inside, but this is something I would like to avoid if possible.
Another simple way of solving this could for example be to just define both instances in the main class and later access them via deferred imports:
--main.py--
from fileStore import DataStore
from fileA import ChannelA
from fileB import ChannelB
def publish_A():
channel_A.publish()
def publish_B():
channel_B.publish()
data = DataStore()
channel_A = ChannelA(data)
channel_B = ChannelB(data)
def main():
channel_A.read()
channel_B.read()
if __name__ == '__main__':
main()
--fileA.py--
...
def publish(self):
import main
main.publish_B()
self.store.update()
...
My question here is, what is the best or preferred way to solve such cases in Python?

So what I ended up doing was the following:
--fileA.py--
class ChannelA:
def __init__(self, data_store):
self.store = data_store
self.publish_other = None
def on_publish_other(self, callback):
self.publish_other = callback
def read(self):
# Listen for messages on this channel and publish them to another channel.
self.store.update()
def publish(self):
# Send a message over this channel
if self.publish_other:
self.publish_other()
self.store.update()
--main.py--
from fileStore import DataStore
from fileA import ChannelA
from fileB import ChannelB
def main():
data = DataStore()
channel_A = ChannelA(data)
channel_B = ChannelB(data)
channel_A.on_publish_other(channel_B.publish)
channel_A.read()
channel_B.read()
if __name__ == '__main__':
main()

Related

Background task with asyncio

I have one class produceMessages() that keeps feeding messages to its attribute message_logs.
I want to process these messages in the other class processMessages(). However, the code in processMessages() won't execute until produceMessages() is done - which is never as I want these messages to come forever.
Looking at documentation, I've found that the library asyncio might help me but I'm struggling to get the below example work:
This is first_file.py
import asyncio
import random
class produceMessages():
def __init__(self, timeout = 10):
self.timeout = timeout
self.message_logs = []
async def run(self):
while(True):
self.message_logs.append(random.uniform(0, 1))
await asyncio.sleep(self.timeout)
This is second_file.py
import first_file
import asyncio
import time
class processMessages():
def __init__(self):
self.producer = first_file.produceMessages()
asyncio.run(self.producer.run())
def print_logs(self):
print(self.producer.message_logs)
time.sleep(1)
x = processMessages()
x.print_logs()
How can I make this work? Thanks
I would recommend you try the library threading. This is how I would approach it with that:
import first_file
import asyncio
import time
class processMessages():
def __init__(self):
self.test = first_file.produceMessages()
t = threading.Thread(target=self.test.run)
t.run()
t2 = threading.Thread(target=self.print_logs)
def print_logs(self):
print(self.test.message_logs)
time.sleep(1)
x = processMessages()
x.t2.run()

Assign Locust task sets programmatically

I am trying to do something where I assign the locust tasks dynamically based on the host value passed through the UI. In this example, if host is passed in as "hello", the test should run the hello task else it should run the world task.
from locust import HttpUser, TaskSet, task, events
class RandomTask1(TaskSet):
#task(1)
def soemthing(self):
print("Hello!")
class RandomTask2(TaskSet):
#task(1)
def soemthing(self):
print("World!")
class LoadTestUser(HttpUser):
def on_start(self):
host_config = self.host
if host_config == "hello":
tasks = {RandomTask1:1}
else:
tasks = {RandomTask2:1}
The example below does not work and I get the following error
Exception: No tasks defined on LoadTestUser. use the #task decorator or set the tasks property of the User (or mark it as abstract = True if you only intend to subclass it)
Any idea how I can achieve something like this? I have simplified this for the example, but for all intents and purposes, let's assume that the locust instance is already running and cannot be stopped or restarted and the tasks need to be assigned dynamically.
Edit:
Tried doing this:
class LoadTestUser(HttpUser):
def on_start(self):
if self.host == "hello":
self.tasks = {HelloTask: 1}
else:
self.tasks = {WorldTask: 1}
#task
def nothing(self):
pass
class HelloTask(TaskSet):
#task
def something(self):
print("Hello")
class WorldTask(TaskSet):
#task
def something(self):
print("World")
Now I see the following error:
Traceback (most recent call last):
File "/Users/user/project/venv/lib/python3.8/site-packages/locust/user/task.py", line 285, in run
self.schedule_task(self.get_next_task())
File "/Users/user/project/venv/lib/python3.8/site-packages/locust/user/task.py", line 420, in get_next_task
return random.choice(self.user.tasks)
File "/Users/user/opt/anaconda3/lib/python3.8/random.py", line 291, in choice
return seq[i]
KeyError: 0
Create a single task and put the logic in that task for what you want to have it run.
class LoadTestUser(HttpUser):
def something1(self):
print("Hello!")
def something2(self):
print("World!")
#task
def task_logic(self):
if self.host == "hello":
self.something1()
else:
self.something2()
However, you can just address the error you're getting directly. You need to have a task defined in the class even if you intend to overwrite or change the tasks with your TaskSets. There's an example in the documentation but just add a task with pass so it doesn't do anything, then your overrides should work.
class LoadTestUser(HttpUser):
def on_start(self):
host_config = self.host
if host_config == "hello":
self.tasks = {RandomTask1:1}
else:
self.tasks = {RandomTask2:1}
#task
def nothing(self):
pass
EDIT:
This should work but looks like there could be a bug in the current version of Locust where it only accepts a dictionary for tasks when Locust first starts and then only accepts a list afterward. Until it's fixed, the example in the other answer works.
import sys
from locust import HttpUser, TaskSet, task, events
class LoadTestUser(HttpUser):
def locust_class(self, name):
module = sys.modules[__name__]
return getattr(module, f"{name.capitalize()}Task")
def get_weighted_tasks(self, task_list):
new_tasks = []
for item in task_list:
if "locust_task_weight" in dir(item):
for i in range(item.locust_task_weight):
new_tasks.append(item)
return new_tasks
def get_locust_tasks(self, cls):
tasks = []
for maybe_task in cls.__dict__.values():
if hasattr(maybe_task, "locust_task_weight"):
tasks.append(maybe_task)
return tasks
def on_start(self):
task_cls = self.locust_class(self.host)
task_list = self.get_locust_tasks(task_cls)
self.tasks = self.get_weighted_tasks(task_list)
#task(1)
def nothing(self):
pass
class HelloTask(TaskSet):
#task(1)
def something(self):
print("Hello")
#task(100)
def something_else(self):
print("hello")
class WorldTask(TaskSet):
#task(1)
def something(self):
print("World")
#task(10)
def something_else(self):
print("world")

Callback function does not see correct values in instance

I'm having a strange phenomena in Python with callback functions and handlers.
I use ZMQ to handle my communication and use a stream for the socket. I have the base class:
import multiprocessing
import zmq
from concurrent.futures import ThreadPoolExecutor
from zmq.eventloop import ioloop, zmqstream
from zmq.utils import jsonapi as json
# Types of messages
TYPE_A = 'type_a'
TYPE_B = 'type_b'
class ZmqProcess(multiprocessing.Process):
def __init__(self):
super(ZmqProcess, self).__init__()
self.context = None
self.loop = None
self.handle_stream = None
def setup(self):
self.context = zmq.Context()
self.loop = ioloop.IOLoop.instance()
def send(self, msg_type, msg, host, port):
sock = zmq.Context().socket(zmq.PAIR)
sock.connect('tcp://%s:%s' % (host, port))
sock.send_json([msg_type, msg])
def stream(self, sock_type, addr):
sock = self.context.socket(sock_type)
if isinstance(addr, str):
addr = addr.split(':')
host, port = addr if len(addr) == 2 else (addr[0], None)
if port:
sock.bind('tcp://%s:%s' % (host, port))
else:
port = sock.bind_to_random_port('tcp://%s' % host)
stream = zmqstream.ZMQStream(sock, self.loop)
return stream, int(port)
class MessageHandler(object):
def __init__(self, json_load=-1):
self._json_load = json_load
self.pool = ThreadPoolExecutor(max_workers=10)
def __call__(self, msg):
i = self._json_load
msg_type, data = json.loads(msg[i])
msg[i] = data
if msg_type.startswith('_'):
raise AttributeError('%s starts with an "_"' % msg_type)
getattr(self, msg_type)(*msg)
And I have a class that inherits from it:
import zmq
import zmq_base
class ZmqServerMeta(zmq_base.ZmqProcess):
def __init__(self, bind_addr, handlers):
super(ZmqServerMeta, self).__init__()
self.bind_addr = bind_addr
self.handlers = handlers
def setup(self):
super(ZmqServerMeta, self).setup()
self.handle_stream, _ = self.stream(zmq.PAIR, self.bind_addr)
self.handle_stream.on_recv(StreamHandler(self.handle_stream, self.stop,
self.handlers))
def run(self):
self.setup()
self.loop.start()
def stop(self):
self.loop.stop()
class StreamHandler(zmq_base.MessageHandler):
def __init__(self, handle_stream, stop, handlers):
super(StreamHandler, self).__init__()
self._handle_stream = handle_stream
self._stop = stop
self._handlers = handlers
def type_a(self, data):
if zmq_base.TYPE_A in self._handlers:
if self._handlers[zmq_base.TYPE_A]:
for handle in self._handlers[zmq_base.TYPE_A]:
self.pool.submit(handle, data)
else:
pass
else:
pass
def type_b(self, data):
if zmq_base.TYPE_B in self._handlers:
if self._handlers[zmq_base.TYPE_B]:
for handle in self._handlers[zmq_base.TYPE_B]:
self.pool.submit(handle, data)
else:
pass
else:
pass
def endit(self):
self._stop()
Additionally, I have a class that I want to use as storage. And here is where the trouble starts:
import threading
import zmq_server_meta as server
import zmq_base as base
class Storage:
def __init__(self):
self.list = []
self.list_lock = threading.RLock()
self.zmq_server = None
self.host = '127.0.0.1'
self.port = 5432
self.bind_addr = (self.host, self.port)
def setup(self):
handlers = {base.TYPE_A: [self. remove]}
self.zmq_server = server.ZmqServerMeta(handlers=handlers, bind_addr=self.bind_addr)
self.zmq_server.start()
def add(self, data):
with self.list_lock:
try:
self.list.append(data)
except:
print "Didn't work"
def remove(self, msg):
with self.list_lock:
try:
self.list.remove(msg)
except:
print "Didn't work"
The idea is that that class stores some global information that it receives.
It is all started in a file to test:
import sys
import time
import storage
import zmq_base as base
import zmq_server_meta as server
def printMsg(msg):
print msg
store = storage.Storage()
store.setup()
handlers = {base.TYPE_B: [printMsg]}
client = server.ZmqServerMeta(handlers=handlers, bind_addr=('127.0.0.1', 5431))
client.start()
message = "Test"
store.add(message)
client.send(base.TYPE_A, message, '127.0.0.1', 5432)
I simplified it to reduce clutter. Instead of just adding it, it is usually send and then a response comes back. The response, the client sending, should be processed by the correct callback, remove(), and it should remove something out of the list. The problem that occurs is, that the remove() function sees an empty list, although there should be an element in the list. If I check from the testing file, I can see the element after it was added, and if I call remove() from there, I see a non-empty list and can remove it. My question is, why does the callback sees an empty list and how can I make sure it does see the correct elements in the list?
Kind regards
Patrick
I believe the problem lays in the fact that the ZmqProcess class inherits from multiprocessing.Process. Multiprocessing does not allow to share objects among different processes, except by using a shared memory map using Value or Array ( as can be seen in the documentation: https://docs.python.org/3/library/multiprocessing.html#sharing-state-between-processes )
If you want to use your custom object, you can use a Server process / proxy object, which can be found in on the same page of the documentation.
So you can, for instance, define a manager in the init function of the Storage class like: self.manager = Manager() Afterwards you put self.list = self.manager.list(). This should do the trick.

Twisted python: the correct way to pass a kwarg through the component system to a factory

I need to pass a kwarg to the parent class of my equivalent of FingerFactoryFromService using super.
I know I am actually passing the kwarg to IFingerFactory because that is also where I pass the service that ends up in init FingerFactoryFromService and I can understand that it is getting tripped up somewhere in the component system but I cannot think of any other way.
The error I keep getting is
exceptions.TypeError: 'test' is an invalid keyword argument for this function
Versions of code in my virtualenv are:
pip (1.4.1)
setuptools (1.1.6)
Twisted (13.1.0)
wsgiref (0.1.2)
zope.interface (4.0.5)
This is a cutdown example from the finger tutorial demonstrating the issue:
from twisted.protocols import basic
from twisted.application import internet, service
from twisted.internet import protocol, reactor, defer
from twisted.python import components
from zope.interface import Interface, implements # #UnresolvedImport
class IFingerService(Interface):
def getUser(user): # #NoSelf
"""
Return a deferred returning a string.
"""
def getUsers(): # #NoSelf
"""
Return a deferred returning a list of strings.
"""
class IFingerFactory(Interface):
def getUser(user): # #NoSelf
"""
Return a deferred returning a string.
"""
def buildProtocol(addr): # #NoSelf
"""
Return a protocol returning a string.
"""
def catchError(err):
return "Internal error in server"
class FingerProtocol(basic.LineReceiver):
def lineReceived(self, user):
d = self.factory.getUser(user)
d.addErrback(catchError)
def writeValue(value):
self.transport.write(value + '\r\n')
self.transport.loseConnection()
d.addCallback(writeValue)
class FingerService(service.Service):
implements(IFingerService)
def __init__(self, filename):
self.filename = filename
self.users = {}
def _read(self):
self.users.clear()
for line in file(self.filename):
user, status = line.split(':', 1)
user = user.strip()
status = status.strip()
self.users[user] = status
self.call = reactor.callLater(30, self._read) # #UndefinedVariable
def getUser(self, user):
print user
return defer.succeed(self.users.get(user, "No such user"))
def getUsers(self):
return defer.succeed(self.users.keys())
def startService(self):
self._read()
service.Service.startService(self)
def stopService(self):
service.Service.stopService(self)
self.call.cancel()
class FingerFactoryFromService(protocol.ServerFactory):
implements(IFingerFactory)
protocol = FingerProtocol
#def __init__(self, srv):
def __init__(self, srv, test=None):
self.service = srv
## I need to call super here because my equivalent of ServerFactory requires
## a kwarg but this cutdown example doesnt so I just assign it to a property
# super(FingerFactoryFromService, self).__init__(test=test)
self.test_thing = test or 'Default Something'
def getUser(self, user):
return self.service.getUser(user)
components.registerAdapter(FingerFactoryFromService,
IFingerService,
IFingerFactory)
application = service.Application('finger')
serviceCollection = service.IServiceCollection(application)
finger_service = FingerService('/etc/passwd')
finger_service.setServiceParent(serviceCollection)
#line_finger_factory = IFingerFactory(finger_service)
line_finger_factory = IFingerFactory(finger_service, test='Something')
line_finger_server = internet.TCPServer(1079, line_finger_factory)
line_finger_server.setServiceParent(serviceCollection)
This has nothing to do with the component system. What you want to do is override the Factory's buildProtocol method, as documented here:
https://twistedmatrix.com/documents/current/core/howto/servers.html#auto9

How to generate reports using HtmlTestRunner for unittests which invlove xml-rpc communication?

If it is a unittest that invlove a single xml-rpc requests/response between an xmlrpc client/server, it works fine and generates report on one end but if the scenario scales to more than one xmlrpc request, the report is not getting generated.
Here goes the code snippet template:
On one machine ABC I have,
class ABC():
def add():
.......
def subtract():
.......
class MachineABCTest(unittest.TestCase):
def test_ABC(self):
x1 = abc_client.multiply() #method which resides in MachineXYZTest
y1 = abc_client.divide() #method which resides in MachineXYZTest
suite = unittest.TestLoader().loadTestsFromTestCase(MachineABCTest)
runner = HTMLTestRunner.HTMLTestRunner(stream=fp, verbosity=2)
runner.run(suite)
class ServerThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.abc_server = SimpleThreadedXMLRPCServer(("XXX.XXX.XXX.1", 8000))
self.abc_server.register_instance(ABC()) #methods add & subtract will get registered
def run(self):
self.moderator_srv.serve_forever()
abc_server = ServerThread()
abc_server.start()
And on the other machine,
class XYZ():
def multiply():
.......
def divide():
.......
class MachineXYZTest(unittest.TestCase):
def test_XYZ(self):
x2 = xyz_client.add() #method which resides in MachineXYZTest
x2 = xyz_client.subtract() #method which resides in MachineXYZTest
suite = unittest.TestLoader().loadTestsFromTestCase(MachineXYZTest)
runner = HTMLTestRunner.HTMLTestRunner(stream=fp, verbosity=2)
runner.run(suite)
class ServerThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.xyz_server = SimpleThreadedXMLRPCServer(("XXX.XXX.XXX.2", 8000))
self.xyz_server.register_instance(XYZ()) # methods multiply & divide will get registered
def run(self):
self.xyz_server.serve_forever()
xyz_server = ServerThread()
xyz_server.start()
There's nothing wrong with the import statement as well
import HTMLTestRunner
reload(HTMLTestRunner)
dir = "D:\\Test\\ABC_XYZ_Test"
fp = file(os.path.join(dir, "TestResult.html"), "wb")
which I have included in both the files.
It would have generated report if there was only one call being made from ABC like
x1 = abc_client.multiply() #method which resides in MachineXYZTest
and xyz_server serving that request. In this case the report will be generated on the client end(here, ABC).
The problem occurs only if there's a 2-way communication occurring 'or' there are more than one request(as seen in the above code snippet) being made(from ABC to XYZ or vice-versa) .
Thanks.

Categories