I'm trying to use multiprocessing for a function that can potentially return a segfault (I have no control over this ATM). In cases where the child process hits a segfault, I want only that child to fail, but all other child tasks to continue/return their results.
I've already switched from multiprocessing.Pool to concurrent.futures.ProcessPoolExecutor avoid the issue of the child process hanging forever (or until an arbitrary timeout) as documented in this bug: https://bugs.python.org/issue22393.
However the issue I face now, is that when the first child task hits a segfault, all in-flight child processes get marked as broken (concurrent.futures.process.BrokenProcessPool).
Is there a way to only mark actually broken child processes as broken?
Code I'm running in Python 3.7.4:
import concurrent.futures
import ctypes
from time import sleep
def do_something(x):
print(f"{x}; in do_something")
sleep(x*3)
if x == 2:
# raise a segmentation fault internally
return x, ctypes.string_at(0)
return x, x-1
nums = [1, 2, 3, 1.5]
executor = concurrent.futures.ProcessPoolExecutor()
result_futures = []
for num in nums:
# Using submit with a list instead of map lets you get past the first exception
# Example: https://stackoverflow.com/a/53346191/7619676
future = executor.submit(do_something, num)
result_futures.append(future)
# Wait for all results
concurrent.futures.wait(result_futures)
# After a segfault is hit for any child process (i.e. is "terminated abruptly"), the process pool becomes unusable
# and all running/pending child processes' results are set to broken
for future in result_futures:
try:
print(future.result())
except concurrent.futures.process.BrokenProcessPool:
print("broken")
Result:
(1, 0)
broken
broken
(1.5, 0.5)
Desired result:
(1, 0)
broken
(3, 2)
(1.5, 0.5)
multiprocessing.Pool and concurrent.futures.ProcessPoolExecutor both make assumptions about how to handle the concurrency of the interactions between the workers and the main process that are violated if any one process is killed or segfaults, so they do the safe thing and mark the whole pool as broken. To get around this, you will need to build up your own pool with different assumptions directly using multiprocessing.Process instances.
This might sound intimidating but a list and a multiprocessing.Manager will get you pretty far:
import multiprocessing
import ctypes
import queue
from time import sleep
def do_something(job, result):
while True:
x=job.get()
print(f"{x}; in do_something")
sleep(x*3)
if x == 2:
# raise a segmentation fault internally
return x, ctypes.string_at(0)
result.put((x, x-1))
nums = [1, 2, 3, 1.5]
if __name__ == "__main__":
# you ARE using the spawn context, right?
ctx = multiprocessing.get_context("spawn")
manager = ctx.Manager()
job_queue = manager.Queue(maxsize=-1)
result_queue = manager.Queue(maxsize=-1)
pool = [
ctx.Process(target=do_something, args=(job_queue, result_queue), daemon=True)
for _ in range(multiprocessing.cpu_count())
]
for proc in pool:
proc.start()
for num in nums:
job_queue.put(num)
try:
while True:
# Timeout is our only signal that no more results coming
print(result_queue.get(timeout=10))
except queue.Empty:
print("Done!")
print(pool) # will see one dead Process
for proc in pool:
proc.kill() # avoid stderr spam
This "Pool" is a little inflexible, and you will probably want to customize it for your application's specific needs. But you can definitely skip right over segfaulting workers.
When I went down this rabbit hole, where I was interested in cancelling specific submissions to a worker pool, I eventually wound up writing a whole library to integrate into Trio async apps: trio-parallel. Hopefully you won't need to go that far!
Based on #Richard Sheridan's answer, I ended up using the code below. This version doesn't require setting a timeout, which is something I couldn't do for my use case.
import ctypes
import multiprocessing
from typing import List
from time import sleep
def do_something(x, result):
print(f"{x} starting")
sleep(x * 3)
if x == 2:
# raise a segmentation fault internally
y = ctypes.string_at(0)
y = x
print(f"{x} done")
results_queue.put(y)
def wait_for_process_slot(
processes: List,
concurrency: int = multiprocessing.cpu_count() - 1,
wait_sec: int = 1,
) -> int:
"""Blocks main process if `concurrency` processes are already running.
Alternative to `multiprocessing.Semaphore.acquire`
useful for when child processes might fail and not be able to signal.
Relies instead on the main's (parent's) tracking of `multiprocessing.Process`es.
"""
counter = 0
while True:
counter = sum([1 for i, p in processes.items() if p.is_alive()])
if counter < concurrency:
return counter
sleep(wait_sec)
if __name__ == "__main__":
# "spawn" results in an OSError b/c pickling a segfault fails?
ctx = multiprocessing.get_context()
manager = ctx.Manager()
results_queue = manager.Queue(maxsize=-1)
concurrency = multiprocessing.cpu_count() - 1 # reserve 1 CPU for waiting
nums = [3, 1, 2, 1.5]
all_processes = {}
for idx, num in enumerate(nums):
num_running_processes = wait_for_process_slot(all_processes, concurrency)
p = ctx.Process(target=do_something, args=(num, results_queue), daemon=True)
all_processes.update({idx: p})
p.start()
# Wait for the last batch of processes not blocked by wait_for_process_slot to finish
for p in all_processes.values():
p.join()
# Check last batch of processes for bad processes
# Relies on all processes having finished (the p.joins above)
bad_nums = [idx for idx, p in all_processes.items() if p.exitcode != 0]
If I am using the python module queue.Queue, I want to be able to print out the contents using a method that does not pop the original queue or create a new queue object.
I have tried looking into doing a get and then putting the contents back but this is too high cost.
# Ideally it would look like the following
from queue import Queue
q = Queue()
q.print()
q.put(1)
q.print()
>> [] # Or something like this
>> [1] # Or something like this
>>> print(list(q.queue))
Does this work for you?
Assuming you are using python 2.
You can use something like this:
from queue import Queue
q = Queue.Queue()
q.put(1)
q.put(2)
q.put(3)
print q.queue
You can also loop on it :
for q_item in q.queue:
print q_item
But unless you are dealing with threads, I would use a normal list as a Queue implementation.
Sorry, I am a bit late to answer this question, but going by this comment, I extended the Queue in the multiprocessing package as per your requirements. Hopefully it will help someone in the future.
import multiprocessing as mp
from multiprocessing import queues
class IterQueue(queues.Queue):
def __init__(self, *args, **kwargs):
ctx = mp.get_context()
kwargs['ctx'] = ctx
super().__init__(*args, **kwargs)
# <---- Iter Protocol ------>
def __iter__(self):
return self
def __next__(self):
try:
if not self.empty():
return self.get() # block=True | default
else:
raise StopIteration
except ValueError: # the Queue is closed
raise StopIteration
Given below is a sample usage of this IterQueue I wrote:
def sample_func(queue_ref):
for i in range(10):
queue_ref.put(i)
IQ = IterQueue()
p = mp.Process(target=sample_func, args=(IQ,))
p.start()
p.join()
print(list(IQ)) # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
I have tested this IterQueue for even a few more complex scenarios, and it seems to be working fine. Let me know if you think this works, or it could fail in some situation.
not sure if this is still a question, but using the name.queue (i.e. q.queue here) works for me. This works also for the other types of queues in the module.
import queue
q = queue.Queue()
print(list(q.queue))
q.put(1)
print(list(q.queue))
The most common way to print the content of the queue if you are not using the queue then use this code snippet:
class Queue:
def __init__(self):
self.items = []
def push(self, e):
self.items.append(e)
def pop(self):
head = self.items[0]
self.items = self.item[1:]
return head
def print(self):
for e in self.items:
print(e)
q = Queue()
q.push(1)
q.push(23)
q.print()
OUTPUT
1
23
Suppose I have the following multiprocessing structure:
import multiprocessing as mp
def worker(working_queue, output_queue):
while True:
if working_queue.empty() == True:
break
else:
picked = working_queue.get()
res_item = "Number " + str(picked)
output_queue.put(res_item)
return
if __name__ == '__main__':
static_input = xrange(100)
working_q = mp.Queue()
output_q = mp.Queue()
results_bank = []
for i in static_input:
working_q.put(i)
processes = [mp.Process(target=worker,args=(working_q, output_q)) for i in range(2)]
for proc in processes:
proc.start()
for proc in processes:
proc.join()
results_bank = []
while True:
if output_q.empty() == True:
break
results_bank.append(output_q.get_nowait())
if len(results_bank) == len(static_input):
print "Good run"
else:
print "Bad run"
My question: How would I 'batch' write my results to a single file while the working_queue is still 'working' (or at least, not finished)?
Note: My actual data structure is not sensitive to unordered results relative to inputs (despite my example using integers).
Also, I think that batch/set writing from the output queue is best practice rather than from the growing results bank object. However, I am open to solutions relying on either approach. I am new to multiprocessing so unsure of best practice or most efficient solution(s) to this question.
If you wish to use mp.Processes and mp.Queues, here is a way to process the results in batches. The main idea is in the writer function, below:
import itertools as IT
import multiprocessing as mp
SENTINEL = None
static_len = 100
def worker(working_queue, output_queue):
for picked in iter(working_queue.get, SENTINEL):
res_item = "Number {:2d}".format(picked)
output_queue.put(res_item)
def writer(output_queue, threshold=10):
result_length = 0
items = iter(output_queue.get, SENTINEL)
for batch in iter(lambda: list(IT.islice(items, threshold)), []):
print('\n'.join(batch))
result_length += len(batch)
state = 'Good run' if result_length == static_len else 'Bad run'
print(state)
if __name__ == '__main__':
num_workers = 2
static_input = range(static_len)
working_q = mp.Queue()
output_q = mp.Queue()
writer_proc = mp.Process(target=writer, args=(output_q,))
writer_proc.start()
for i in static_input:
working_q.put(i)
processes = [mp.Process(target=worker, args=(working_q, output_q))
for i in range(num_workers)]
for proc in processes:
proc.start()
# Put SENTINELs in the Queue to tell the workers to exit their for-loop
working_q.put(SENTINEL)
for proc in processes:
proc.join()
output_q.put(SENTINEL)
writer_proc.join()
When passed two arguments, iter expects a callable and a sentinel:
iter(callable, sentinel). The callable (i.e. a function) gets called repeatedly until it returns a value equal to the sentinel. So
items = iter(output_queue.get, SENTINEL)
defines items to be an iterable which, when iterated over, will return items from output_queue
until output_queue.get() returns SENTINEL.
The for-loop:
for batch in iter(lambda: list(IT.islice(items, threshold)), []):
calls the lambda function repeatedly until an empty list is returned. When called, the lambda function returns a list of up to threshold number of items from the iterable items. Thus, this is an idiom for "grouping by n items without padding". See this post for more on this idiom.
Note that it is not a good practice to test working_q.empty(). It could lead to a race condition. For example, suppose we have the 2 worker processes on these lines when the working_q has only 1 item left in it:
def worker(working_queue, output_queue):
while True:
if working_queue.empty() == True: <-- Process-1
break
else:
picked = working_queue.get() <-- Process-2
res_item = "Number " + str(picked)
output_queue.put(res_item)
return
Suppose Process-1 calls working_queue.empty() while there is still one item in the queue. So it returns False. Then Process-2 calls working_queue.get() and obtains the last item. Then Process-1 gets to line picked = working_queue.get() and hangs because there are no more items in the queue.
Therefore, use sentinels (as shown above) to concretely signal when a for-loop
or while-loop should stop instead of checking queue.empty().
There is no operation like "batch q.get". But it is a good practice to put/pop a batch of items instead of items one by one.
Which is exactly what multiprocessing.Pool.map is doing with its parameter chunksize :)
For writing output as soon as possible there is Pool.imap_unordered which returns an iterable instead of list.
def work(item):
return "Number " + str(item)
import multiprocessing
static_input = range(100)
chunksize = 10
with multiprocessing.Pool() as pool:
for out in pool.imap_unordered(work, static_input, chunksize):
print(out)
I am attempting to use a partial function so that pool.map() can target a function that has more than one parameter (in this case a Lock() object).
Here is example code (taken from an answer to a previous question of mine):
from functools import partial
def target(lock, iterable_item):
for item in items:
# Do cool stuff
if (... some condition here ...):
lock.acquire()
# Write to stdout or logfile, etc.
lock.release()
def main():
iterable = [1, 2, 3, 4, 5]
pool = multiprocessing.Pool()
l = multiprocessing.Lock()
func = partial(target, l)
pool.map(func, iterable)
pool.close()
pool.join()
However when I run this code, I get the error:
Runtime Error: Lock objects should only be shared between processes through inheritance.
What am I missing here? How can I share the lock between my subprocesses?
You can't pass normal multiprocessing.Lock objects to Pool methods, because they can't be pickled. There are two ways to get around this. One is to create Manager() and pass a Manager.Lock():
def main():
iterable = [1, 2, 3, 4, 5]
pool = multiprocessing.Pool()
m = multiprocessing.Manager()
l = m.Lock()
func = partial(target, l)
pool.map(func, iterable)
pool.close()
pool.join()
This is a little bit heavyweight, though; using a Manager requires spawning another process to host the Manager server. And all calls to acquire/release the lock have to be sent to that server via IPC.
The other option is to pass the regular multiprocessing.Lock() at Pool creation time, using the initializer kwarg. This will make your lock instance global in all the child workers:
def target(iterable_item):
for item in items:
# Do cool stuff
if (... some condition here ...):
lock.acquire()
# Write to stdout or logfile, etc.
lock.release()
def init(l):
global lock
lock = l
def main():
iterable = [1, 2, 3, 4, 5]
l = multiprocessing.Lock()
pool = multiprocessing.Pool(initializer=init, initargs=(l,))
pool.map(target, iterable)
pool.close()
pool.join()
The second solution has the side-effect of no longer requiring partial.
Here's a version (using Barrier instead of Lock, but you get the idea) which would also work on Windows (where the missing fork is causing additional troubles):
import multiprocessing as mp
def procs(uid_barrier):
uid, barrier = uid_barrier
print(uid, 'waiting')
barrier.wait()
print(uid, 'past barrier')
def main():
N_PROCS = 10
with mp.Manager() as man:
barrier = man.Barrier(N_PROCS)
with mp.Pool(N_PROCS) as p:
p.map(procs, ((uid, barrier) for uid in range(N_PROCS)))
if __name__ == '__main__':
mp.freeze_support()
main()
I'm having much trouble trying to understand just how the multiprocessing queue works on python and how to implement it. Lets say I have two python modules that access data from a shared file, let's call these two modules a writer and a reader. My plan is to have both the reader and writer put requests into two separate multiprocessing queues, and then have a third process pop these requests in a loop and execute as such.
My main problem is that I really don't know how to implement multiprocessing.queue correctly, you cannot really instantiate the object for each process since they will be separate queues, how do you make sure that all processes relate to a shared queue (or in this case, queues)
My main problem is that I really don't know how to implement multiprocessing.queue correctly, you cannot really instantiate the object for each process since they will be separate queues, how do you make sure that all processes relate to a shared queue (or in this case, queues)
This is a simple example of a reader and writer sharing a single queue... The writer sends a bunch of integers to the reader; when the writer runs out of numbers, it sends 'DONE', which lets the reader know to break out of the read loop.
You can spawn as many reader processes as you like...
from multiprocessing import Process, Queue
import time
import sys
def reader_proc(queue):
"""Read from the queue; this spawns as a separate Process"""
while True:
msg = queue.get() # Read from the queue and do nothing
if msg == "DONE":
break
def writer(count, num_of_reader_procs, queue):
"""Write integers into the queue. A reader_proc() will read them from the queue"""
for ii in range(0, count):
queue.put(ii) # Put 'count' numbers into queue
### Tell all readers to stop...
for ii in range(0, num_of_reader_procs):
queue.put("DONE")
def start_reader_procs(qq, num_of_reader_procs):
"""Start the reader processes and return all in a list to the caller"""
all_reader_procs = list()
for ii in range(0, num_of_reader_procs):
### reader_p() reads from qq as a separate process...
### you can spawn as many reader_p() as you like
### however, there is usually a point of diminishing returns
reader_p = Process(target=reader_proc, args=((qq),))
reader_p.daemon = True
reader_p.start() # Launch reader_p() as another proc
all_reader_procs.append(reader_p)
return all_reader_procs
if __name__ == "__main__":
num_of_reader_procs = 2
qq = Queue() # writer() writes to qq from _this_ process
for count in [10**4, 10**5, 10**6]:
assert 0 < num_of_reader_procs < 4
all_reader_procs = start_reader_procs(qq, num_of_reader_procs)
writer(count, len(all_reader_procs), qq) # Queue stuff to all reader_p()
print("All reader processes are pulling numbers from the queue...")
_start = time.time()
for idx, a_reader_proc in enumerate(all_reader_procs):
print(" Waiting for reader_p.join() index %s" % idx)
a_reader_proc.join() # Wait for a_reader_proc() to finish
print(" reader_p() idx:%s is done" % idx)
print(
"Sending {0} integers through Queue() took {1} seconds".format(
count, (time.time() - _start)
)
)
print("")
Here's a dead simple usage of multiprocessing.Queue and multiprocessing.Process that allows callers to send an "event" plus arguments to a separate process that dispatches the event to a "do_" method on the process. (Python 3.4+)
import multiprocessing as mp
import collections
Msg = collections.namedtuple('Msg', ['event', 'args'])
class BaseProcess(mp.Process):
"""A process backed by an internal queue for simple one-way message passing.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.queue = mp.Queue()
def send(self, event, *args):
"""Puts the event and args as a `Msg` on the queue
"""
msg = Msg(event, args)
self.queue.put(msg)
def dispatch(self, msg):
event, args = msg
handler = getattr(self, "do_%s" % event, None)
if not handler:
raise NotImplementedError("Process has no handler for [%s]" % event)
handler(*args)
def run(self):
while True:
msg = self.queue.get()
self.dispatch(msg)
Usage:
class MyProcess(BaseProcess):
def do_helloworld(self, arg1, arg2):
print(arg1, arg2)
if __name__ == "__main__":
process = MyProcess()
process.start()
process.send('helloworld', 'hello', 'world')
The send happens in the parent process, the do_* happens in the child process.
I left out any exception handling that would obviously interrupt the run loop and exit the child process. You can also customize it by overriding run to control blocking or whatever else.
This is really only useful in situations where you have a single worker process, but I think it's a relevant answer to this question to demonstrate a common scenario with a little more object-orientation.
I had a look at multiple answers across stack overflow and the web while trying to set-up a way of doing multiprocessing using queues for passing around large pandas dataframes. It seemed to me that every answer was re-iterating the same kind of solutions without any consideration of the multitude of edge cases one will definitely come across when setting up calculations like these. The problem is that there is many things at play at the same time. The number of tasks, the number of workers, the duration of each task and possible exceptions during task execution. All of these make synchronization tricky and most answers do not address how you can go about it. So this is my take after fiddling around for a few hours, hopefully this will be generic enough for most people to find it useful.
Some thoughts before any coding examples. Since queue.Empty or queue.qsize() or any other similar method is unreliable for flow control, any code of the like
while True:
try:
task = pending_queue.get_nowait()
except queue.Empty:
break
is bogus. This will kill the worker even if milliseconds later another task turns up in the queue. The worker will not recover and after a while ALL the workers will disappear as they randomly find the queue momentarily empty. The end result will be that the main multiprocessing function (the one with the join() on the processes) will return without all the tasks having completed. Nice. Good luck debugging through that if you have thousands of tasks and a few are missing.
The other issue is the use of sentinel values. Many people have suggested adding a sentinel value in the queue to flag the end of the queue. But to flag it to whom exactly? If there is N workers, assuming N is the number of cores available give or take, then a single sentinel value will only flag the end of the queue to one worker. All the other workers will sit waiting for more work when there is none left. Typical examples I've seen are
while True:
task = pending_queue.get()
if task == SOME_SENTINEL_VALUE:
break
One worker will get the sentinel value while the rest will wait indefinitely. No post I came across mentioned that you need to submit the sentinel value to the queue AT LEAST as many times as you have workers so that ALL of them get it.
The other issue is the handling of exceptions during task execution. Again these should be caught and managed. Moreover, if you have a completed_tasks queue you should independently count in a deterministic way how many items are in the queue before you decide that the job is done. Again relying on queue sizes is bound to fail and returns unexpected results.
In the example below, the par_proc() function will receive a list of tasks including the functions with which these tasks should be executed alongside any named arguments and values.
import multiprocessing as mp
import dill as pickle
import queue
import time
import psutil
SENTINEL = None
def do_work(tasks_pending, tasks_completed):
# Get the current worker's name
worker_name = mp.current_process().name
while True:
try:
task = tasks_pending.get_nowait()
except queue.Empty:
print(worker_name + ' found an empty queue. Sleeping for a while before checking again...')
time.sleep(0.01)
else:
try:
if task == SENTINEL:
print(worker_name + ' no more work left to be done. Exiting...')
break
print(worker_name + ' received some work... ')
time_start = time.perf_counter()
work_func = pickle.loads(task['func'])
result = work_func(**task['task'])
tasks_completed.put({work_func.__name__: result})
time_end = time.perf_counter() - time_start
print(worker_name + ' done in {} seconds'.format(round(time_end, 5)))
except Exception as e:
print(worker_name + ' task failed. ' + str(e))
tasks_completed.put({work_func.__name__: None})
def par_proc(job_list, num_cpus=None):
# Get the number of cores
if not num_cpus:
num_cpus = psutil.cpu_count(logical=False)
print('* Parallel processing')
print('* Running on {} cores'.format(num_cpus))
# Set-up the queues for sending and receiving data to/from the workers
tasks_pending = mp.Queue()
tasks_completed = mp.Queue()
# Gather processes and results here
processes = []
results = []
# Count tasks
num_tasks = 0
# Add the tasks to the queue
for job in job_list:
for task in job['tasks']:
expanded_job = {}
num_tasks = num_tasks + 1
expanded_job.update({'func': pickle.dumps(job['func'])})
expanded_job.update({'task': task})
tasks_pending.put(expanded_job)
# Use as many workers as there are cores (usually chokes the system so better use less)
num_workers = num_cpus
# We need as many sentinels as there are worker processes so that ALL processes exit when there is no more
# work left to be done.
for c in range(num_workers):
tasks_pending.put(SENTINEL)
print('* Number of tasks: {}'.format(num_tasks))
# Set-up and start the workers
for c in range(num_workers):
p = mp.Process(target=do_work, args=(tasks_pending, tasks_completed))
p.name = 'worker' + str(c)
processes.append(p)
p.start()
# Gather the results
completed_tasks_counter = 0
while completed_tasks_counter < num_tasks:
results.append(tasks_completed.get())
completed_tasks_counter = completed_tasks_counter + 1
for p in processes:
p.join()
return results
And here is a test to run the above code against
def test_parallel_processing():
def heavy_duty1(arg1, arg2, arg3):
return arg1 + arg2 + arg3
def heavy_duty2(arg1, arg2, arg3):
return arg1 * arg2 * arg3
task_list = [
{'func': heavy_duty1, 'tasks': [{'arg1': 1, 'arg2': 2, 'arg3': 3}, {'arg1': 1, 'arg2': 3, 'arg3': 5}]},
{'func': heavy_duty2, 'tasks': [{'arg1': 1, 'arg2': 2, 'arg3': 3}, {'arg1': 1, 'arg2': 3, 'arg3': 5}]},
]
results = par_proc(task_list)
job1 = sum([y for x in results if 'heavy_duty1' in x.keys() for y in list(x.values())])
job2 = sum([y for x in results if 'heavy_duty2' in x.keys() for y in list(x.values())])
assert job1 == 15
assert job2 == 21
plus another one with some exceptions
def test_parallel_processing_exceptions():
def heavy_duty1_raises(arg1, arg2, arg3):
raise ValueError('Exception raised')
return arg1 + arg2 + arg3
def heavy_duty2(arg1, arg2, arg3):
return arg1 * arg2 * arg3
task_list = [
{'func': heavy_duty1_raises, 'tasks': [{'arg1': 1, 'arg2': 2, 'arg3': 3}, {'arg1': 1, 'arg2': 3, 'arg3': 5}]},
{'func': heavy_duty2, 'tasks': [{'arg1': 1, 'arg2': 2, 'arg3': 3}, {'arg1': 1, 'arg2': 3, 'arg3': 5}]},
]
results = par_proc(task_list)
job1 = sum([y for x in results if 'heavy_duty1' in x.keys() for y in list(x.values())])
job2 = sum([y for x in results if 'heavy_duty2' in x.keys() for y in list(x.values())])
assert not job1
assert job2 == 21
Hope that is helpful.
in "from queue import Queue" there is no module called queue, instead multiprocessing should be used. Therefore, it should look like "from multiprocessing import Queue"
Just made a simple and general example for demonstrating passing a message over a Queue between 2 standalone programs. It doesn't directly answer the OP's question but should be clear enough indicating the concept.
Server:
multiprocessing-queue-manager-server.py
import asyncio
import concurrent.futures
import multiprocessing
import multiprocessing.managers
import queue
import sys
import threading
from typing import Any, AnyStr, Dict, Union
class QueueManager(multiprocessing.managers.BaseManager):
def get_queue(self, ident: Union[AnyStr, int, type(None)] = None) -> multiprocessing.Queue:
pass
def get_queue(ident: Union[AnyStr, int, type(None)] = None) -> multiprocessing.Queue:
global q
if not ident in q:
q[ident] = multiprocessing.Queue()
return q[ident]
q: Dict[Union[AnyStr, int, type(None)], multiprocessing.Queue] = dict()
delattr(QueueManager, 'get_queue')
def init_queue_manager_server():
if not hasattr(QueueManager, 'get_queue'):
QueueManager.register('get_queue', get_queue)
def serve(no: int, term_ev: threading.Event):
manager: QueueManager
with QueueManager(authkey=QueueManager.__name__.encode()) as manager:
print(f"Server address {no}: {manager.address}")
while not term_ev.is_set():
try:
item: Any = manager.get_queue().get(timeout=0.1)
print(f"Client {no}: {item} from {manager.address}")
except queue.Empty:
continue
async def main(n: int):
init_queue_manager_server()
term_ev: threading.Event = threading.Event()
executor: concurrent.futures.ThreadPoolExecutor = concurrent.futures.ThreadPoolExecutor()
i: int
for i in range(n):
asyncio.ensure_future(asyncio.get_running_loop().run_in_executor(executor, serve, i, term_ev))
# Gracefully shut down
try:
await asyncio.get_running_loop().create_future()
except asyncio.CancelledError:
term_ev.set()
executor.shutdown()
raise
if __name__ == '__main__':
asyncio.run(main(int(sys.argv[1])))
Client:
multiprocessing-queue-manager-client.py
import multiprocessing
import multiprocessing.managers
import os
import sys
from typing import AnyStr, Union
class QueueManager(multiprocessing.managers.BaseManager):
def get_queue(self, ident: Union[AnyStr, int, type(None)] = None) -> multiprocessing.Queue:
pass
delattr(QueueManager, 'get_queue')
def init_queue_manager_client():
if not hasattr(QueueManager, 'get_queue'):
QueueManager.register('get_queue')
def main():
init_queue_manager_client()
manager: QueueManager = QueueManager(sys.argv[1], authkey=QueueManager.__name__.encode())
manager.connect()
message = f"A message from {os.getpid()}"
print(f"Message to send: {message}")
manager.get_queue().put(message)
if __name__ == '__main__':
main()
Usage
Server:
$ python3 multiprocessing-queue-manager-server.py N
N is a integer indicating how many servers should be created. Copy one of the <server-address-N> output by the server and make it the first argument of each multiprocessing-queue-manager-client.py.
Client:
python3 multiprocessing-queue-manager-client.py <server-address-1>
Result
Server:
Client 1: <item> from <server-address-1>
Gist: https://gist.github.com/89062d639e40110c61c2f88018a8b0e5
UPD: Created a package here.
Server:
import ipcq
with ipcq.QueueManagerServer(address=ipcq.Address.AUTO, authkey=ipcq.AuthKey.AUTO) as server:
server.get_queue().get()
Client:
import ipcq
client = ipcq.QueueManagerClient(address=ipcq.Address.AUTO, authkey=ipcq.AuthKey.AUTO)
client.get_queue().put('a message')
We implemented two versions of this, one a simple multi thread pool that can execute many types of callables, making our lives much easier and the second version that uses processes, which is less flexible in terms of callables and requires and extra call to dill.
Setting frozen_pool to true will freeze execution until finish_pool_queue is called in either class.
Thread Version:
'''
Created on Nov 4, 2019
#author: Kevin
'''
from threading import Lock, Thread
from Queue import Queue
import traceback
from helium.loaders.loader_retailers import print_info
from time import sleep
import signal
import os
class ThreadPool(object):
def __init__(self, queue_threads, *args, **kwargs):
self.frozen_pool = kwargs.get('frozen_pool', False)
self.print_queue = kwargs.get('print_queue', True)
self.pool_results = []
self.lock = Lock()
self.queue_threads = queue_threads
self.queue = Queue()
self.threads = []
for i in range(self.queue_threads):
t = Thread(target=self.make_pool_call)
t.daemon = True
t.start()
self.threads.append(t)
def make_pool_call(self):
while True:
if self.frozen_pool:
#print '--> Queue is frozen'
sleep(1)
continue
item = self.queue.get()
if item is None:
break
call = item.get('call', None)
args = item.get('args', [])
kwargs = item.get('kwargs', {})
keep_results = item.get('keep_results', False)
try:
result = call(*args, **kwargs)
if keep_results:
self.lock.acquire()
self.pool_results.append((item, result))
self.lock.release()
except Exception as e:
self.lock.acquire()
print e
traceback.print_exc()
self.lock.release()
os.kill(os.getpid(), signal.SIGUSR1)
self.queue.task_done()
def finish_pool_queue(self):
self.frozen_pool = False
while self.queue.unfinished_tasks > 0:
if self.print_queue:
print_info('--> Thread pool... %s' % self.queue.unfinished_tasks)
sleep(5)
self.queue.join()
for i in range(self.queue_threads):
self.queue.put(None)
for t in self.threads:
t.join()
del self.threads[:]
def get_pool_results(self):
return self.pool_results
def clear_pool_results(self):
del self.pool_results[:]
Process Version:
'''
Created on Nov 4, 2019
#author: Kevin
'''
import traceback
from helium.loaders.loader_retailers import print_info
from time import sleep
import signal
import os
from multiprocessing import Queue, Process, Value, Array, JoinableQueue, Lock,\
RawArray, Manager
from dill import dill
import ctypes
from helium.misc.utils import ignore_exception
from mem_top import mem_top
import gc
class ProcessPool(object):
def __init__(self, queue_processes, *args, **kwargs):
self.frozen_pool = Value(ctypes.c_bool, kwargs.get('frozen_pool', False))
self.print_queue = kwargs.get('print_queue', True)
self.manager = Manager()
self.pool_results = self.manager.list()
self.queue_processes = queue_processes
self.queue = JoinableQueue()
self.processes = []
for i in range(self.queue_processes):
p = Process(target=self.make_pool_call)
p.start()
self.processes.append(p)
print 'Processes', self.queue_processes
def make_pool_call(self):
while True:
if self.frozen_pool.value:
sleep(1)
continue
item_pickled = self.queue.get()
if item_pickled is None:
#print '--> Ending'
self.queue.task_done()
break
item = dill.loads(item_pickled)
call = item.get('call', None)
args = item.get('args', [])
kwargs = item.get('kwargs', {})
keep_results = item.get('keep_results', False)
try:
result = call(*args, **kwargs)
if keep_results:
self.pool_results.append(dill.dumps((item, result)))
else:
del call, args, kwargs, keep_results, item, result
except Exception as e:
print e
traceback.print_exc()
os.kill(os.getpid(), signal.SIGUSR1)
self.queue.task_done()
def finish_pool_queue(self, callable=None):
self.frozen_pool.value = False
while self.queue._unfinished_tasks.get_value() > 0:
if self.print_queue:
print_info('--> Process pool... %s' % (self.queue._unfinished_tasks.get_value()))
if callable:
callable()
sleep(5)
for i in range(self.queue_processes):
self.queue.put(None)
self.queue.join()
self.queue.close()
for p in self.processes:
with ignore_exception: p.join(10)
with ignore_exception: p.terminate()
with ignore_exception: del self.processes[:]
def get_pool_results(self):
return self.pool_results
def clear_pool_results(self):
del self.pool_results[:]
def test(eg):
print 'EG', eg
Call with either:
tp = ThreadPool(queue_threads=2)
tp.queue.put({'call': test, 'args': [random.randint(0, 100)]})
tp.finish_pool_queue()
or
pp = ProcessPool(queue_processes=2)
pp.queue.put(dill.dumps({'call': test, 'args': [random.randint(0, 100)]}))
pp.queue.put(dill.dumps({'call': test, 'args': [random.randint(0, 100)]}))
pp.finish_pool_queue()
A multi-producers and multi-consumers example, verified. It should be easy to modify it to cover other cases, single/multi producers, single/multi consumers.
from multiprocessing import Process, JoinableQueue
import time
import os
q = JoinableQueue()
def producer():
for item in range(30):
time.sleep(2)
q.put(item)
pid = os.getpid()
print(f'producer {pid} done')
def worker():
while True:
item = q.get()
pid = os.getpid()
print(f'pid {pid} Working on {item}')
print(f'pid {pid} Finished {item}')
q.task_done()
for i in range(5):
p = Process(target=worker, daemon=True).start()
# send thirty task requests to the worker
producers = []
for i in range(2):
p = Process(target=producer)
producers.append(p)
p.start()
# make sure producers done
for p in producers:
p.join()
# block until all workers are done
q.join()
print('All work completed')
Explanation:
Two producers and five consumers in this example.
JoinableQueue is used to make sure all elements stored in queue will be processed. 'task_done' is for worker to notify an element is done. 'q.join()' will wait for all elements marked as done.
With #2, there is no need to join wait for every worker.
But it is important to join wait for every producer to store element into queue. Otherwise, program exit immediately.