I can't find a way to make my threads persistent between the first and second call of my script.
So far, when I run python script_1.py A the script runs the if option == 'A' block and starts the thread. Then, the script exits and the thread is cleaned up. So, when I run python script_1.py B the isAlive attribute can't be used.
is there any way to keep persistence?
The code for script_1.py is:
from script_2 import imp
script_2 = imp()
if option == 'A':
script_2.start()
elif option == 'B':
script_2.stop()
and for script_2.py is:
from threading import Thread
class workerThread(Thread):
def __init__(self, _parent):
Thread.__init__(self)
self.parent = _parent
self.active = False
def run(self):
while(self.active == False):
print 'I am here'
print 'and now I am here'
class imp():
def __init__(self):
self.threadObj = None
def start(self):
self.threadObj = workerThread(self)
self.threadObj.start()
def stop(self):
if self.threadObj.isAlive() == True:
print 'it is alive'
A solution would be:
from threading import Thread
from socket import *
from time import sleep
class workerThread(Thread):
def __init__(self):
Thread.__init__(self)
self.sock = socket()
self.sock.bind(('', 9866))
self.sock.listen(4)
self.start()
def run(self):
while 1:
ns, na = self.sock.accept()
if ns.recv(8192) in (b'quit', 'quit'):
ns.close()
break
self.sock.close()
print('Worker died')
imp = workerThread()
And the first script:
if option == 'A':
from time import sleep
from script_2 import imp
while 1:
sleep(0.1)
elif option == 'B':
from socket import *
s = socket()
s.connect(('127.0.0.1', 9866))
s.send('quit') # b'quit' if you're using Python3
s.close()
It's not even close to elegant, but it's a 5min mockup of what you could do.
To make this closer to useable code, I'd go with:
self.sock = fromfd('/path/to/socket', AF_UNIX, SOCK_DGRAM)
and register it with an ePoll object within the worker thread.
import select
self.watch = select.epoll()
self.watch.register(self.sock.fileno(), select.EPOLLIN)
while 1:
for fd, event in self.watch.poll(0.1):
if fd == self.sock.fileno() and event == select.EPOLLIN:
ns, na = self.sock.accept()
# store socket and register it
elif event == select.EPOLLIN:
data = storedSockets[fd].recv(8192)
# and do work on it
Anyway, but you will need to keep the first instance of your execution running and create some form of communication method for the second instance you start up, i used sockets as an example which i think is rather good, especially in conjunction with unix sockets and epoll because the speed is fantastisc. You can also use memcache
Related
I have a script that initiates two classes (control of a led strip and temp/hum sensor).
Each class runs a while loop that can be terminated with signal_handler() which basically calls sys.exit(0). I was thinking about handling the exit of the main program with signal_handler() as I did for the classes themselves.
However, when I try to CTRL + C out of the script, the program exits with error (see below the code) and the lights program doesn't exit properly (i.e., lights are still on when they should be off if exiting gracefully).
import threading
from light_controller import LightController
from thermometer import Thermometer
import signal
def signal_handler():
print("\nhouse.py terminated with Ctrl+C.")
if l_thread.is_alive():
l_thread.join()
if t_thread.is_alive():
t_thread.join()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
lights = LightController()
temp = Thermometer()
t_thread = threading.Thread(target = temp.run)
t_thread.daemon = True
t_thread.start()
l_thread = threading.Thread(target = lights.run)
l_thread.daemon = True
l_thread.start()
Thermometer() terminated with Ctrl+C.
Exception ignored in: <module 'threading' from '/usr/lib/python3.7/threading.py'>
Traceback (most recent call last):
File "/usr/lib/python3.7/threading.py", line 1281, in _shutdown
t.join()
File "/usr/lib/python3.7/threading.py", line 1032, in join
self._wait_for_tstate_lock()
File "/usr/lib/python3.7/threading.py", line 1048, in _wait_for_tstate_lock
elif lock.acquire(block, timeout):
File "/home/pi/Desktop/house/thermometer.py", line 51, in signal_handler
sys.exit(0)
My take is that this is happening because I have the signal_handler() replicated in the two classes and the main program. Both classes will run infinite loops and might be used by themselves, so I rather keep the signal_handler() inside each of the two classes.
I'm not sure if it's possible to actually keep it like this. I also don't know if sys.exit() is actually the way to get out without causing errors down the line.
I am OK with using a different exit method for the main program house.py instead of CTRL+C.
Update
Thank you for the spellcheck!
Here's the code for the classes.
thermometer.py
from luma.core.interface.serial import i2c
from luma.core.render import canvas
from luma.oled.device import ssd1306, ssd1325, ssd1331, sh1106
from luma.core.error import DeviceNotFoundError
import os
import time
import signal
import sys
import socket
from PIL import ImageFont, ImageDraw
# adafruit
import board
import busio
from adafruit_htu21d import HTU21D
class Thermometer(object):
"""docstring for Thermometer"""
def __init__(self):
super(Thermometer, self).__init__()
# TODO: Check for pixelmix.ttf in folder
self.drawfont = "pixelmix.ttf"
self.sleep_secs = 30
try:
signal.signal(signal.SIGINT, self.signal_handler)
self.serial = i2c(port=1, address=0x3C)
self.oled_device = ssd1306(self.serial, rotate=0)
except DeviceNotFoundError:
print("I2C mini OLED display not found.")
sys.exit(1)
try:
# Create library object using our Bus I2C port
#self.i2c_port = busio.I2C(board.SCL, board.SDA)
#self.temp_sensor = HTU21D(self.i2c_port)
print("Running temp in debug mode")
except ValueError:
print("Temperature sensor not found")
sys.exit(1)
def getIP(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
return ip
def signal_handler(self, sig, frame):
print("\nThermometer() terminated with Ctrl+C.")
sys.exit(0)
def run(self):
try:
while True:
# Measure things
temp_value = 25
hum_value = 50
#temp_value = round(self.temp_sensor.temperature, 1)
#hum_value = round(self.temp_sensor.relative_humidity, 1)
# Display results
with canvas(self.oled_device) as draw:
draw.rectangle(self.oled_device.bounding_box, outline="white", fill="black")
font = ImageFont.truetype(self.drawfont, 10)
ip = self.getIP()
draw.text((5, 5), "IP: " + ip, fill="white", font=font)
font = ImageFont.truetype(self.drawfont, 12)
draw.text((5, 20), f"T: {temp_value} C", fill="white", font=font)
draw.text((5, 40), f"H: {hum_value}%", fill="white", font=font)
# TODO ADD SAVING Here
time.sleep(self.sleep_secs)
except SystemExit:
print("Exiting...")
sys.exit(0)
except:
print("Unexpected error:", sys.exc_info()[0])
sys.exit(2)
if __name__ == '__main__':
thermo = Thermometer()
thermo.run()
light_controller.py
import RPi.GPIO as GPIO
import time
import signal
import datetime
import sys
class LightController(object):
"""docstring for LightController"""
def __init__(self):
super(LightController, self).__init__()
signal.signal(signal.SIGTERM, self.safe_exit)
signal.signal(signal.SIGHUP, self.safe_exit)
signal.signal(signal.SIGINT, self.safe_exit)
self.red_pin = 9
self.green_pin = 11
# might be white pin if hooking up a white LED here
self.blue_pin = 10
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.red_pin, GPIO.OUT)
GPIO.setup(self.green_pin, GPIO.OUT)
GPIO.setup(self.blue_pin, GPIO.OUT)
self.pwm_red = GPIO.PWM(self.red_pin, 500) # We need to activate PWM on LED so we can dim, use 1000 Hz
self.pwm_green = GPIO.PWM(self.green_pin, 500)
self.pwm_blue = GPIO.PWM(self.blue_pin, 500)
# Start PWM at 0% duty cycle (off)
self.pwm_red.start(0)
self.pwm_green.start(0)
self.pwm_blue.start(0)
self.pin_zip = zip([self.red_pin, self.green_pin, self.blue_pin],
[self.pwm_red, self.pwm_green, self.pwm_blue])
# Config lights on-off cycle here
self.lights_on = 7
self.lights_off = 19
print(f"Initalizing LightController with lights_on: {self.lights_on}h & lights_off: {self.lights_off}h")
print("------------------------------")
def change_intensity(self, pwm_object, intensity):
pwm_object.ChangeDutyCycle(intensity)
def run(self):
while True:
#for pin, pwm_object in self.pin_zip:
# pwm_object.ChangeDutyCycle(100)
# time.sleep(10)
# pwm_object.ChangeDutyCycle(20)
# time.sleep(10)
# pwm_object.ChangeDutyCycle(0)
current_hour = datetime.datetime.now().hour
# evaluate between
if self.lights_on <= current_hour <= self.lights_off:
self.pwm_blue.ChangeDutyCycle(100)
else:
self.pwm_blue.ChangeDutyCycle(0)
# run this once a second
time.sleep(1)
# ------- Safe Exit ---------- #
def safe_exit(self, signum, frame):
print("\nLightController() terminated with Ctrl+C.")
sys.exit(0)
if __name__ == '__main__':
controller = LightController()
controller.run()
Option 1: Threading is hard
To expand on what I mean with "no internal loops" – threading is hard, so let's do something else instead.
I've added __enter__ and __exit__ to the Thermometer and LightController classes here; this makes them usable as context managers (i.e. with the with block). This is useful when you have objects that "own" other resources; in this case, the thermometer owns the serial device and the light controller touches GPIO.
Then, instead of each class having .run(), where they'd stay forever, let's have the "outer" program control that: it runs in a forever while loop, and asks each "device" to do its thing before waiting for a second again. (You could also use the stdlib sched module to have the classes register functions to run at different intervals, or be otherwise clever if the different classes happen to need different check intervals.)
Since there are no threads, there's no need to set up signal handlers either; a ctrl+c in the program bubbles up a KeyboardInterrupt exception like regular, and the with blocks' __exit__ handlers get their chance of cleaning up.
class Thermometer:
def __enter__(self):
self.serial = ...
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# TODO: Cleanup the i2c/ssd devices
pass
def step(self):
""" Measure and draw things """
# Measure things...
# Draw things...
class LightController:
def __enter__(self):
GPIO.setmode(...)
def __exit__(self, exc_type, exc_val, exc_tb):
# TODO: cleanup GPIO
pass
def step(self):
current_hour = datetime.datetime.now().hour
# etc...
def main():
with LightController() as lights, Thermometer() as temp:
while True:
lights.step()
temp.step()
time.sleep(1)
if __name__ == '__main__':
main()
Option 2: Threading is hard but let's do it anyway
Another option, to have your threads cooperate and shut down when you want to, is to use an Event to control their internal loops.
The idea here is that instead of time.sleep() in the loops, you have Event.wait() doing the waiting, since it accepts an optional timeout to hang around for to wait for the event being set (or not). In fact, on some OSes, time.sleep() is implemented as having the thread wait on an anonymous event.
When you want the threads to quit, you set the stop event, and they'll finish up what they're doing.
I've also packaged this concept up into a "DeviceThread" here for convenience's sake.
import threading
import time
class DeviceThread(threading.Thread):
interval = 1
def __init__(self, stop_event):
super().__init__(name=self.__class__.__name__)
self.stop_event = stop_event
def step(self):
pass
def initialize(self):
pass
def cleanup(self):
pass
def run(self):
try:
self.initialize()
while not self.stop_event.wait(self.interval):
self.step()
finally:
self.cleanup()
class ThermometerThread(DeviceThread):
def initialize(self):
self.serial = ...
def cleanup(self):
... # close serial port
def step(self):
... # measure and draw
def main():
stop_event = threading.Event()
threads = [ThermometerThread(stop_event)]
for thread in threads:
thread.start()
try:
while True:
# Nothing to do in the main thread...
time.sleep(1)
except KeyboardInterrupt:
print("Caught keyboard interrupt, stopping threads")
stop_event.set()
for thread in threads:
print(f"Waiting for {thread.name} to stop")
thread.join()
I'm writing a Python module to read jstest output and make Xbox gamepad working in Python on Linux. I need to start in background infinite while loop in __init__ on another thread that looks like this:
import os
from threading import Thread
import time
import select
import subprocess
class Joystick:
"""Initializes base class and launches jstest and xboxdrv"""
def __init__(self, refreshRate=2000, deadzone=4000):
self.proc = subprocess.Popen(['xboxdrv', '-D', '-v', '--detach-kernel-driver', '--dpad-as-button'], stdout=subprocess.PIPE, bufsize=0)
self.pipe = self.proc.stdout
self.refresh = refreshRate
self.refreshDelay = 1.0 / refreshRate
self.refreshTime = 0 # indicates the next refresh
self.deadzone = deadzone
self.start()
self.xbox = subprocess.Popen(['jstest', '--normal', '/dev/input/js0'], stdout=subprocess.PIPE, bufsize=-1, universal_newlines=True)
self.response = self.xbox.stdout.readline()
a = Thread(target=self.reload2())
a.start()
print("working")
def reload2(self):
while True:
self.response = self.xbox.stdout.readline()
print("read")
time.sleep(0.5)
def start(self):
global leftVibrateAmount, rightVibrateAmount
leftVibrateAmount = 0
rightVibrateAmount = 0
readTime = time.time() + 1 # here we wait a while
found = False
while readTime > time.time() and not found:
readable, writeable, exception = select.select([self.pipe], [], [], 0)
if readable:
response = self.pipe.readline()
# tries to detect if controller is connected
if response == b'[ERROR] XboxdrvDaemon::run(): fatal exception: DBusSubsystem::request_name(): failed to become primary owner of dbus name\n':
raise IOError("Another instance of xboxdrv is running.")
elif response == b'[INFO] XboxdrvDaemon::connect(): connecting slot to thread\n':
found = True
self.reading = response
elif response == b'':
raise IOError('Are you running as sudo?')
if not found:
self.pipe.close()
# halt if controller not found
raise IOError("Xbox controller/receiver isn't connected")
The loop is defined to start running in __init__ function like so:
a = threading.Thread(target=self.reload2) # code hangs here
a.start()
But each time I create variable "a", whole program hangs in while loop, which should be running in another thread.
Thanks for help.
You may be having issues with your __init__. I put it in a simple class as an example, and it runs as expected.
import time
from threading import Thread
class InfiniteLooper():
def __init__(self):
a = Thread(target=self.reload2) # reload, not reload(), otherwise you're executing reload2 and assigning the result to Target, but it's an infinite loop, so...
print('Added thread')
a.start()
print('Thread started')
def reload2(self):
while True:
self.response = input('Enter something')
print('read')
time.sleep(0.5)
loop = InfiniteLooper()
Output:
Added thread
Thread started
Enter something
1
read
Enter something
1
read
As you can see, the "Enter something" appears after I've added the thread and started it. It also loops fine
I have an application listening on a specific TCP port to handle received requests (listen.py). After that, I have another one (trigger.py) that depending on the requested parameters triggers the respective operation.
Now, lets say the operation A was triggered (opA.py). Operation A uses a worker thread to start (worker.py). When the user request listen.py to stop operation A, the started thread is supposed to stop.
UPDATED:
The problem is that the thread is never stopped since the problem lies in trigger.py. The OperationA instance is lost once the code exits. So, I can never call stopOperation since it show me AttributeError: 'NoneType' object has no attribute 'stopOperation'
Any ideas of How to solve this?
listen.py
from trigger import Trigger
'''
code to handle requests here:
1st: param -> 'start'
2nd: param -> 'stop'
'''
t = Trigger()
t.execute(param)
trigger.py
from opA import OperationA
class Trigger():
def execute(param):
opA = OperationA()
if param == 'start':
opA.startOperation()
elif param == 'stop':
opA.stopOperation()
opA.py
from worker import ThreadParam
class OperationThread(ThreadParam):
def run(self):
while (self.running == False):
'''
do something here
'''
class OperationA():
def _init__(self):
listenThread = OperationThread(self)
def startOperation(self):
self.listenThread.start()
def stopOperation(self):
if self.listenThread.isAlive() == True:
print 'Thread is alive'
self.listenThread.killSignal()
else:
print 'Thread is dead'
worker.py
from threading import Thread
class ThreadParam(Thread):
def __init__(self, _parent):
Thread.__init__(self)
self.parent = _parent
self.running = False;
def killSignal(self):
self.running = True;
A minimal useful Trigger might look like this:
class Trigger(object):
def __init__(self):
self.operation = None
def execute(self, command):
if command == 'start':
assert self.operation is None
self.operation = OperationA()
self.operation.start_operation()
elif command == 'stop':
self.operation.stop_operation()
self.operation = None
else:
print 'Unknown command', repr(command)
I'm working on server written in python. When the client sends a cmd the server will call a function with unknown running time. So to avoid blocking I used threading. But when looking at the child process it seems that they're not terminating, causing a lot of memory usage.
EDIT : Here is the tree of the directory : http://pastebin.com/WZDxLquC
Following answers I found on stackoverflow I implemented a custom Thread class:
sThreads.py :
import threading
class Thread(threading.Thread):
def __init__(self, aFun, args = ()):
super(Thread, self).__init__(None, aFun, None, args)
self.stopped = threading.Event()
def stop(self):
self.stopped.set()
def isStopped(self):
return self.stopped.isSet()
Then here is the server's loop:
some where in mainServer.py:
def serve_forever(self, aCustomClass, aSize = 1024):
while True:
self.conn, self.addr = self.sock.accept()
msg = self.recvMSG(4096)
if(msg):
self.handShake(msg)
print 'Accepted !'
while True:
msg = self.recvMSG(aSize)
if(msg):
t = sThreads.Thread(self.handle, (aCustomClass,))
t.start()
self.currentThreads.append(t)
if(self.workers > 0):
tt = sThreads.Thread(self.respond)
tt.start()
if(self.workers == 0 and len(self.currentThreads) > 0):
for th in self.currentThreads:
th.stop()
Using a custom Thread class will not solve the issue and it still does not stop the terminated threads!
EDIT : added the handle() and respond() methods :
def handle(self, aClass):
self.workers += 1
self.queue.put(aClass._onRecieve(self.decodeStream()))
def respond(self):
while self.workers > 0:
msgToSend, wantToSend = self.queue.get()
self.workers -= 1
if(wantToSend):
print 'I want to send :', msgToSend
continue #Send is not yet implemented !
It seems that self.queue.get() was causing all the issue ...
I'm trying to start a data queue server under a managing process (so that it can later be turned into a service), and while the data queue server function works fine in the main process, it does not work in a process created using multiprocessing.Process.
The dataQueueServer and dataQueueClient code is based on the code from the multiprocessing module documentation here.
When run on its own, dataQueueServer works well. However, when run using a multiprocessing.Process's start() in mpquueue, it doesn't work (when tested with the client). I am using the dataQueueClient without changes to test both cases.
The code does reach the serve_forever in both cases, so I think the server is working, but something is blocking it from communicating back to the client in the mpqueue case.
I have placed the loop that runs the serve_forever() part under a thread, so that it can be stoppable.
Here is the code:
mpqueue # this is the "manager" process trying to spawn the server in a child process
import time
import multiprocessing
import threading
import dataQueueServer
class Printer():
def __init__(self):
self.lock = threading.Lock()
def tsprint(self, text):
with self.lock:
print text
class QueueServer(multiprocessing.Process):
def __init__(self, name = '', printer = None):
multiprocessing.Process.__init__(self)
self.name = name
self.printer = printer
self.ml = dataQueueServer.MainLoop(name = 'ml', printer = self.printer)
def run(self):
self.printer.tsprint(self.ml)
self.ml.start()
def stop(self):
self.ml.stop()
if __name__ == '__main__':
printer = Printer()
qs = QueueServer(name = 'QueueServer', printer = printer)
printer.tsprint(qs)
printer.tsprint('starting')
qs.start()
printer.tsprint('started.')
printer.tsprint('Press Ctrl-C to quit')
try:
while True:
time.sleep(60)
except KeyboardInterrupt:
printer.tsprint('\nTrying to exit cleanly...')
qs.stop()
printer.tsprint('stopped')
dataQueueServer
import time
import threading
from multiprocessing.managers import BaseManager
from multiprocessing import Queue
HOST = ''
PORT = 50010
AUTHKEY = 'authkey'
## Define some helper functions for use by the main process loop
class Printer():
def __init__(self):
self.lock = threading.Lock()
def tsprint(self, text):
with self.lock:
print text
class QueueManager(BaseManager):
pass
class MainLoop(threading.Thread):
"""A thread based loop manager, allowing termination signals to be sent
to the thread"""
def __init__(self, name = '', printer = None):
threading.Thread.__init__(self)
self._stopEvent = threading.Event()
self.daemon = True
self.name = name
if printer is None:
self.printer = Printer()
else:
self.printer = printer
## create the queue
self.queue = Queue()
## Add a function to the handler to return the queue to clients
self.QM = QueueManager
self.QM.register('get_queue', callable=lambda:self.queue)
self.queue_manager = self.QM(address=(HOST, PORT), authkey=AUTHKEY)
self.queue_server = self.queue_manager.get_server()
def __del__(self):
self.printer.tsprint( 'closing...')
def run(self):
self.printer.tsprint( '{}: started serving'.format(self.name))
self.queue_server.serve_forever()
def stop(self):
self.printer.tsprint ('{}: stopping'.format(self.name))
self._stopEvent.set()
def stopped(self):
return self._stopEvent.isSet()
def start():
printer = Printer()
ml = MainLoop(name = 'ml', printer = printer)
ml.start()
return ml
def stop(ml):
ml.stop()
if __name__ == '__main__':
ml = start()
raw_input("\nhit return to stop")
stop(ml)
And a client:
dataQueueClient
import datetime
from multiprocessing.managers import BaseManager
n = 0
N = 10**n
HOST = ''
PORT = 50010
AUTHKEY = 'authkey'
def now():
return datetime.datetime.now()
def gen(n, func, *args, **kwargs):
k = 0
while k < n:
yield func(*args, **kwargs)
k += 1
class QueueManager(BaseManager):
pass
QueueManager.register('get_queue')
m = QueueManager(address=(HOST, PORT), authkey=AUTHKEY)
m.connect()
queue = m.get_queue()
def load(msg, q):
return q.put(msg)
def get(q):
return q.get()
lgen = gen(N, load, msg = 'hello', q = queue)
t0 = now()
while True:
try:
lgen.next()
except StopIteration:
break
t1 = now()
print 'loaded %d items in ' % N, t1-t0
t0 = now()
while queue.qsize() > 0:
queue.get()
t1 = now()
print 'got %d items in ' % N, t1-t0
So it seems like the solution is simple enough: Don't use serve_forever(), and use manager.start() instead.
According to Eli Bendersky, the BaseManager (and it's extended version SyncManager) already spawns the server in a new process (and looking at the multiprocessing.managers code confirms this). The problem I have been experiencing stems from the form used in the example, in which the server is started under the main process.
I still don't understand why the current example doesn't work when run under a child process, but that's no longer an issue.
Here's the working (and much simplified from OP) code to manage multiple queue servers:
Server:
from multiprocessing import Queue
from multiprocessing.managers import SyncManager
HOST = ''
PORT0 = 5011
PORT1 = 5012
PORT2 = 5013
AUTHKEY = 'authkey'
name0 = 'qm0'
name1 = 'qm1'
name2 = 'qm2'
description = 'Queue Server'
def CreateQueueServer(HOST, PORT, AUTHKEY, name = None, description = None):
name = name
description = description
q = Queue()
class QueueManager(SyncManager):
pass
QueueManager.register('get_queue', callable = lambda: q)
QueueManager.register('get_name', callable = name)
QueueManager.register('get_description', callable = description)
manager = QueueManager(address = (HOST, PORT), authkey = AUTHKEY)
manager.start() # This actually starts the server
return manager
# Start three queue servers
qm0 = CreateQueueServer(HOST, PORT0, AUTHKEY, name0, description)
qm1 = CreateQueueServer(HOST, PORT1, AUTHKEY, name1, description)
qm2 = CreateQueueServer(HOST, PORT2, AUTHKEY, name2, description)
raw_input("return to end")
Client:
from multiprocessing.managers import SyncManager
HOST = ''
PORT0 = 5011
PORT1 = 5012
PORT2 = 5013
AUTHKEY = 'authkey'
def QueueServerClient(HOST, PORT, AUTHKEY):
class QueueManager(SyncManager):
pass
QueueManager.register('get_queue')
QueueManager.register('get_name')
QueueManager.register('get_description')
manager = QueueManager(address = (HOST, PORT), authkey = AUTHKEY)
manager.connect() # This starts the connected client
return manager
# create three connected managers
qc0 = QueueServerClient(HOST, PORT0, AUTHKEY)
qc1 = QueueServerClient(HOST, PORT1, AUTHKEY)
qc2 = QueueServerClient(HOST, PORT2, AUTHKEY)
# Get the queue objects from the clients
q0 = qc0.get_queue()
q1 = qc1.get_queue()
q2 = qc2.get_queue()
# put stuff in the queues
q0.put('some stuff')
q1.put('other stuff')
q2.put({1:123, 2:'abc'})
# check their sizes
print 'q0 size', q0.qsize()
print 'q1 size', q1.qsize()
print 'q2 size', q2.qsize()
# pull some stuff and print it
print q0.get()
print q1.get()
print q2.get()
Adding an additional server to share a dictionary with the information of the running queue servers so that consumers can easily tell what's available where is easy enough using that model. One thing to note, though, is that the shared dictionary requires slightly different syntax than a normal dictionary: dictionary[0] = something will not work. You need to use dictionary.update([(key, value), (otherkey, othervalue)]) and dictionary.get(key) syntax, which propagates across to all other clients connected to this dictionary..