For some reason, my program is hanging using multiprocessing and queues, even though I set timeouts and check if the queue is empty. This happens on both Windows and Linux.
There are multiple processes that recieve inputs (here a, b and c) and should send results (here they just send back the inputs a, b and c).
From what I see, after all "arguments are given" they send back results for a and b over and over again, although a and b are provided only once.
import multiprocessing as mp
import queue
class Multithreading:
def __init__(self, n_processes):
self._processes = [
_Thread(name='Process-{}'.format(i))
for i in range(n_processes)]
def __enter__(self):
for process in self._processes:
process.start()
print(f'Started {process.name}')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for process in self._processes:
process.event_stopped.set()
process.join()
def run(self):
args = ['a', 'b', 'c']
n_calls = len(args)
for i, arg in enumerate(args):
m = i % len(self._processes)
print(f'Setting arguments to {self._processes[m].name}')
is_started = False
while not is_started:
try:
self._processes[m].queue_inputs.put(arg, timeout=0.05)
is_started = True
print(f'Argument given to {self._processes[m].name}')
except queue.Full:
pass
print(f'All arguments given')
for i in range(n_calls):
m = i % len(self._processes)
print(f'Checking result from {self._processes[m].name}')
arg = None
while True:
try:
arg = self._processes[m].queue_results.get(timeout=0.05)
print('Received {}'.format(arg))
break
except queue.Empty:
print(f'Empty in {self._processes[m].name}, arg = {arg}')
pass
class _Thread(mp.Process):
def __init__(self, name):
super().__init__(name=name, target=self._run)
self.queue_inputs = mp.Queue()
self.queue_results = mp.Queue()
self.event_stopped = mp.Event()
def _run(self):
print(f'Running {self.name}')
while not self.event_stopped.is_set():
try:
arg = self.queue_inputs.get(timeout=0.05)
print(f'{self.name} received {arg}')
while not self.event_stopped.is_set():
try:
self.queue_results.put(arg, timeout=0.05)
print(f'{self.name} sent {arg}')
except queue.Full:
pass
except queue.Empty:
pass
if __name__ == '__main__':
for _ in range(100000000):
with Multithreading(n_processes=2) as m:
m.run()
I would expect timeouts of put and get methods to raise the according exceptions, but apparently they do not.
The problem is in _Thread._run:
def _run(self):
print(f'Running {self.name}')
while not self.event_stopped.is_set(): # Ok, loop until event_stopped
try:
arg = self.queue_inputs.get(timeout=0.05) # Ok, try to get an item
print(f'{self.name} received {arg}')
while not self.event_stopped.is_set(): # Oops, what is this loop for???
try:
self.queue_results.put(arg, timeout=0.05)
print(f'{self.name} sent {arg}')
except queue.Full:
pass
except queue.Empty:
pass
Your current code loops infinitely (or until its queue_results queue become full of event_stopped is set) on the same item repeatedly adding it to its output queue. Replacing the offending while with a if is enough to fix the problem:
...
while not self.event_stopped.is_set(): # Ok, loop until event_stopped
try:
arg = self.queue_inputs.get(timeout=0.05) # Ok, try to get an item
print(f'{self.name} received {arg}')
if not self.event_stopped.is_set():# ignore the item if stopped in the meanwhile
try:
...
Related
I like the default python multiprocessing.Pool, but it's still a pain that it isn't easy to show the current progress being made during the pool's execution. In leui of that, I attempted to create my own, custom multiprocess pool mapper, and it looks like this;
from multiprocessing import Process, Pool, cpu_count
from iterable_queue import IterableQueue
def _proc_action(f, in_queue, out_queue):
try:
for val in in_queue:
out_queue.put(f(val))
except (KeyboardInterrupt, EOFError):
pass
def progress_pool_map(f, ls, n_procs=cpu_count()):
in_queue = IterableQueue()
out_queue = IterableQueue()
err = None
try:
procs = [Process(target=_proc_action, args=(f, in_queue, out_queue)) for _ in range(n_procs)]
[p.start() for p in procs]
for elem in ls:
in_queue.put(elem)
in_queue.close()
bar = 0
for _ in ls:
elem = next(out_queue)
bar += 1
if bar % 1000 == 0:
print(bar)
yield elem
out_queue.close()
except (KeyboardInterrupt, EOFError) as e:
in_queue.close()
out_queue.close()
print("Joining processes")
[p.join() for p in procs]
print("Closing processes")
[p.close() for p in procs]
err = e
if err:
raise err
It works fairly well, and prints a value to the console for every 1000 items processed. The progress display itself is something I can worry about in future. Right now, however, my issue is that when cancelled, the operation does anything but fail gracefully. When I try to interrupt the map, it hangs on Joining Processes, and never makes it to Closing Processes. If I try hitting Ctrl+C again, it causes an infinite spew of BrokenPipeErrors to fill the console until I send an EOF and stop my program.
Here's iterable_queue.py, for reference;
from multiprocessing.queues import Queue
from multiprocessing import get_context, Value
import queue
class QueueClosed(Exception):
pass
class IterableQueue(Queue):
def __init__(self, maxsize=0, *, ctx=None):
super().__init__(
maxsize=maxsize,
ctx=ctx if ctx is not None else get_context()
)
self.closed = Value('b', False)
def close(self):
with self.closed.get_lock():
if not self.closed.value:
self.closed.value = True
super().put((None, False))
# throws BrokenPipeError in another thread without this sleep in between
# terrible hack, must fix at some point
import time; time.sleep(0.01)
super().close()
def __iter__(self):
return self
def __next__(self):
try:
return self.get()
except QueueClosed:
raise StopIteration
def get(self, *args, **kwargs):
try:
result, is_open = super().get(*args, **kwargs)
except OSError:
raise QueueClosed
if not is_open:
super().put((None, False))
raise QueueClosed
return result
def __bool__(self):
return bool(self.closed.value)
def put(self, val, *args, **kwargs):
with self.closed.get_lock():
if self.closed.value:
raise QueueClosed
super().put((val, True), *args, **kwargs)
def get_nowait(self):
return self.get(block=False)
def put_nowait(self):
return self.put(block=False)
def empty_remaining(self, block=False):
try:
while True:
yield self.get(block=block)
except (queue.Empty, QueueClosed):
pass
def clear(self):
for _ in self.empty_remaining():
pass
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
I'm using threading.Thread and t.start() with a List of Callables to do long-running multithreaded processing. My main thread is blocked until all threads did finish. I'd like however t.start() to immediately return if one of the Callables throw an exception and terminate the other threads.
Using t.join() to check that the thread got executed provides no information about failures due to exception.
Here is the code:
import json
import requests
class ThreadServices:
def __init__(self):
self.obj = ""
def execute_services(self, arg1, arg2):
try:
result = call_some_process(arg1, arg2) #some method
#save results somewhere
except Exception, e:
# raise exception
print e
def invoke_services(self, stubs):
"""
Thread Spanning Function
"""
try:
p1 = "" #some value
p2 = "" #some value
# Call service 1
t1 = threading.Thread(target=self.execute_services, args=(a, b,)
# Start thread
t1.start()
# Block till thread completes execution
t1.join()
thread_pool = list()
for stub in stubs:
# Start parallel execution of threads
t = threading.Thread(target=self.execute_services,
args=(p1, p2))
t.start()
thread_pool.append(t)
for thread in thread_pool:
# Block till all the threads complete execution: Wait for all
the parallel tasks to complete
thread.join()
# Start another process thread
t2 = threading.Thread(target=self.execute_services,
args=(p1, p2)
t2.start()
# Block till this thread completes execution
t2.join()
requests.post(url, data= json.dumps({status_code=200}))
except Exception, e:
print e
requests.post(url, data= json.dumps({status_code=500}))
# Don't return anything as this function is invoked as a thread from
# main calling function
class Service(ThreadServices):
"""
Service Class
"""
def main_thread(self, request, context):
"""
Main Thread:Invokes Task Execution Sequence in ThreadedService
:param request:
:param context:
:return:
"""
try:
main_thread = threading.Thread(target=self.invoke_services,
args=(request,))
main_thread.start()
return True
except Exception, e:
return False
When i call Service().main_thread(request, context) and there is some exception executing t1, I need to get it raised in main_thread and return False. How can i implement it for this structure. Thanks!!
For one thing, you are complicating matters too much. I would do it this way:
from thread import start_new_thread as thread
from time import sleep
class Task:
"""One thread per task.
This you should do with subclassing threading.Thread().
This is just conceptual example.
"""
def __init__ (self, func, args=(), kwargs={}):
self.func = func
self.args = args
self.kwargs = kwargs
self.error = None
self.done = 0
self.result = None
def _run (self):
self.done = 0
self.error = None
self.result = None
# So this is what you should do in subclassed Thread():
try: self.result = self.func(*self.args, **self.kwargs)
except Exception, e:
self.error = e
self.done = 1
def start (self):
thread(self._run,())
def wait (self, retrexc=1):
"""Used in place of threading.Thread.join(), but it returns the result of the function self.func() and manages errors.."""
while not self.done: sleep(0.001)
if self.error:
if retrexc: return self.error
raise self.error
return self.result
# And this is how you should use your pool:
def do_something (tasknr):
print tasknr-20
if tasknr%7==0: raise Exception, "Dummy exception!"
return tasknr**120/82.0
pool = []
for task in xrange(20, 50):
t = Task(do_something, (task,))
pool.append(t)
# And only then wait for each one:
results = []
for task in pool:
results.append(task.wait())
print results
This way you can make task.wait() raise the error instead. The thread would already be stopped. So all you need to do is remove their references from pool, or whole pool, after you are done. You can even:
results = []
for task in pool:
try: results.append(task.wait(0))
except Exception, e:
print task.args, "Error:", str(e)
print results
Now, do not use strictly this (I mean Task() class) as it needs a lot of things added to be used for real.
Just subclass threading.Thread() and implement the similar concept by overriding run() and join() or add new functions like wait().
I have a process running with asyncio which should run forever.
I can interact with that process with a ProcessIterator, which can (left out here) send data to stdin and fetch from stdout.
I can access the data with async for fd, data in ProcessIterator(...):.
The problem is now that the execution of this async iterator must be timelimited. If the time runs out, the timeout() function is called,
but the exception does not originate out of the __anext__ function to notify of the timeout.
How can I raise this exception in the async iterator?
I found no way of calling awaitable.throw(something) or similar for it.
class ProcessIterator:
def __init__(self, process, loop, run_timeout):
self.process = process
self.loop = loop
self.run_timeout = run_timeout
# set the global timer
self.overall_timer = self.loop.call_later(
self.run_timeout, self.timeout)
def timeout(self):
# XXX: how do i pass this exception into the iterator?
raise ProcTimeoutError(
self.process.args,
self.run_timeout,
was_global,
)
async def __aiter__(self):
return self
async def __anext__(self):
if self.process.exited:
raise StopAsyncIteration()
else:
# fetch output from the process asyncio.Queue()
entry = await self.process.output_queue.get()
if entry == StopIteration:
raise StopAsyncIteration()
return entry
The usage of the async iterator is now roughly:
async def test_coro(loop):
code = 'print("rofl"); time.sleep(5); print("lol")'
proc = Process([sys.executable, '-u', '-c', code])
await proc.create()
try:
async for fd, line in ProcessIterator(proc, loop, run_timeout=1):
print("%d: %s" % (fd, line))
except ProcessTimeoutError as exc:
# XXX This is the exception I'd like to get here! How can i throw it?
print("timeout: %s" % exc)
await proc.wait()
tl;dr: How can I throw a timed exception so it originates from a async iterator?
EDIT: Added solution 2
Solution 1:
Can the timeout() callback store the ProcTimeoutError exception in an instance variable? Then __anext__() can check the instance variable and raise the exception if it is set.
class ProcessIterator:
def __init__(self, process, loop, run_timeout):
self.process = process
self.loop = loop
self.error = None
self.run_timeout = run_timeout
# set the global timer
self.overall_timer = self.loop.call_later(
self.run_timeout, self.timeout)
def timeout(self):
# XXX: set instance variable
self.error = ProcTimeoutError(
self.process.args,
self.run_timeout,
was_global
)
async def __aiter__(self):
return self
async def __anext__(self):
# XXX: if error is set, then raise the exception
if self.error:
raise self.error
elif self.process.exited:
raise StopAsyncIteration()
else:
# fetch output from the process asyncio.Queue()
entry = await self.process.output_queue.get()
if entry == StopIteration:
raise StopAsyncIteration()
return entry
Solution 2:
Put the exception on the process.output_queue.
....
def timeout(self):
# XXX: set instance variable
self.process.ouput_queue.put(ProcTimeoutError(
self.process.args,
self.run_timeout,
was_global
))
....
# fetch output from the process asyncio.Queue()
entry = await self.process.output_queue.get()
if entry == StopIteration:
raise StopAsyncIteration()
elif entry = ProcTimeoutError:
raise entry
....
If there may be entries on the queue, use a priority queue. Assign ProcTimeoutError a higher priority than the other entries, e.g., (0, ProcTimeoutError) vs (1, other_entry).
Please check out timeout context manager from asyncio:
with asyncio.timeout(10):
async for i in get_iter():
process(i)
It is not released yet but you can copy-paste the implementation from asyncio master branch
You could use get_nowait, which will return entry or throw QueueEmpty immediately. Wrapping it in while loop on self.error with some async sleep should do the trick. Something like:
async def __anext__(self):
if self.process.exited:
raise StopAsyncIteration()
else:
while self.error is None:
try:
entry = self.process.output_queue.get_nowait()
if entry == StopIteration:
raise StopAsyncIteration()
return entry
except asyncio.QueueEmpty:
# some sleep to give back control to ioloop
# since we using nowait
await asyncio.sleep(0.1)
else:
raise self.error
And as a hint approach that is used in Tornado's Queue.get implementation with timeout:
def get(self, timeout=None):
"""Remove and return an item from the queue.
Returns a Future which resolves once an item is available, or raises
`tornado.gen.TimeoutError` after a timeout.
"""
future = Future()
try:
future.set_result(self.get_nowait())
except QueueEmpty:
self._getters.append(future)
_set_timeout(future, timeout)
return future
This is the solution I came up with by now.
See https://github.com/SFTtech/kevin kevin/process.py for the upstream version.
It also features line counting and output timeouts, which I stripped from this example.
class Process:
def __init__(self, command, loop=None):
self.loop = loop or asyncio.get_event_loop()
self.created = False
self.killed = asyncio.Future()
self.proc = self.loop.subprocess_exec(
lambda: WorkerInteraction(self), # see upstream repo
*command)
self.transport = None
self.protocol = None
async def create(self):
self.transport, self.protocol = await self.proc
def communicate(self, timeout):
if self.killed.done():
raise Exception("process was already killed "
"and no output is waiting")
return ProcessIterator(self, self.loop, timeout)
class ProcessIterator:
"""
Asynchronous iterator for the process output.
Use like `async for (fd, data) in ProcessIterator(...):`
"""
def __init__(self, process, loop, run_timeout):
self.process = process
self.loop = loop
self.run_timeout = run_timeout
self.overall_timer = None
if self.run_timeout < INF:
# set the global timer
self.overall_timer = self.loop.call_later(
self.run_timeout,
functools.partial(self.timeout, was_global=True))
def timeout(self):
if not self.process.killed.done():
self.process.killed.set_exception(ProcTimeoutError(
self.process.args,
self.run_timeout,
))
async def __aiter__(self):
return self
async def __anext__(self):
# either the process exits,
# there's an exception (process killed, timeout, ...)
# or the queue gives us the next data item.
# wait for the first of those events.
done, pending = await asyncio.wait(
[self.process.protocol.queue.get(), self.process.killed],
return_when=asyncio.FIRST_COMPLETED)
# at least one of them is done now:
for future in done:
# if something failed, cancel the pending futures
# and raise the exception
# this happens e.g. for a timeout.
if future.exception():
for future_pending in pending:
future_pending.cancel()
# kill the process before throwing the error!
await self.process.pwn()
raise future.exception()
# fetch output from the process
entry = future.result()
# it can be stopiteration to indicate the last data chunk
# as the process exited on its own.
if entry == StopIteration:
if not self.process.killed.done():
self.process.killed.set_result(entry)
# raise the stop iteration
await self.stop_iter(enough=False)
return entry
raise Exception("internal fail: no future was done!")
async def stop_iter(self):
# stop the timer
if self.overall_timer:
self.overall_timer.cancel()
retcode = self.process.returncode()
raise StopAsyncIteration()
The magic function is this:
done, pending = await asyncio.wait(
[self.process.protocol.queue.get(), self.process.killed],
return_when=asyncio.FIRST_COMPLETED)
When the timeout occurs, the queue fetching is aborted reliably.
Im using the following code to multithread urlib2. However what is the best way to limit the number of threads that it consumes ??
class ApiMultiThreadHelper:
def __init__(self,api_calls):
self.q = Queue.Queue()
self.api_datastore = {}
self.api_calls = api_calls
self.userpass = '#####'
def query_api(self,q,api_query):
self.q.put(self.issue_request(api_query))
def issue_request(self,api_query):
self.api_datastore.update({api_query:{}})
for lookup in ["call1","call2"]:
query = api_query+lookup
request = urllib2.Request(query)
request.add_header("Authorization", "Basic %s" % self.userpass)
f = urllib2.urlopen(request)
response = f.read()
f.close()
self.api_datastore[api_query].update({lookup:response})
return True
def go(self):
threads = []
for i in self.api_calls:
t = threading.Thread(target=self.query_api, args = (self.q,i))
t.start()
threads.append(t)
for t in threads:
t.join()
You should use a thread pool. Here's my implementation I've made years ago (Python 3.x friendly):
import traceback
from threading import Thread
try:
import queue as Queue # Python3.x
except ImportError:
import Queue
class ThreadPool(object):
def __init__(self, no=10):
self.alive = True
self.tasks = Queue.Queue()
self.threads = []
for _ in range(no):
t = Thread(target=self.worker)
t.start()
self.threads.append(t)
def worker(self):
while self.alive:
try:
fn, args, kwargs = self.tasks.get(timeout=0.5)
except Queue.Empty:
continue
except ValueError:
self.tasks.task_done()
continue
try:
fn(*args, **kwargs)
except Exception:
# might wanna add some better error handling
traceback.print_exc()
self.tasks.task_done()
def add_job(self, fn, args=[], kwargs={}):
self.tasks.put((fn, args, kwargs))
def join(self):
self.tasks.join()
def deactivate(self):
self.alive = False
for t in self.threads:
t.join()
You can also find a similar class in multiprocessing.pool module (don't ask me why it is there). You can then refactor your code like this:
def go(self):
tp = ThreadPool(20) # <-- 20 thread workers
for i in self.api_calls:
tp.add_job(self.query_api, args=(self.q, i))
tp.join()
tp.deactivate()
Number of threads is now defined a priori.
I'm boggled over why a function called in a thread always returns the same value. I've confirmed that the parameters are different for each call. If I call the function after acquiring a lock then the function returns the correct value. This obviously defeats the purpose of using threads, because then this function is just called sequentially, one thread after another. Here is what I have. The function is called "get_related_properties" and I've made a note of it in the code:
class ThreadedGetMultipleRelatedProperties():
def __init__(self, property_values, **kwargs):
self.property_values = property_values
self.kwargs = kwargs
self.timeout = kwargs.get('timeout', 20)
self.lock = threading.RLock()
def get_result_dict(self):
queue = QueueWithTimeout()
result_dictionary = {}
num_threads = len(self.property_values)
threads = []
for i in range(num_threads):
t = GetMultipleRelatedPropertiesThread(queue,
result_dictionary,
self.lock)
t.setDaemon(True)
try:
threads.append(t)
t.start()
except:
return {"Error": "Unable to process results at this time." }
for property_value in self.property_values:
kwargs_copy = dict.copy(kwargs)
kwargs_copy['property_value'] = property_value
queue.put(self.kwargs_copy)
queue.join_with_timeout(self.timeout)
# cleanup threads
for i in range(num_threads):
queue.put(None)
for t in threads: t.join()
return result_dictionary
class GetMultipleRelatedPropertiesThread(threading.Thread):
def __init__(self, queue, result_dictionary, lock):
threading.Thread.__init__(self)
self.queue = queue
self.result_dictionary = result_dictionary
self.lock = lock
def run(self):
from mixpanel_helpers import get_related_properties
while True:
kwargs = self.queue.get()
if kwargs == None:
break
current_property_value = kwargs.get('property_value')
self.lock.acquire()
# The function call below always returns the same value if called before acquire
result = get_related_properties(**kwargs)
try:
self.result_dictionary[current_property_value] = result
finally:
self.lock.release()
#signals to queue job is done
self.queue.task_done()
Here is get_related_properties, although it makes other calls, so I'm not sure the problem lives in here:
def get_related_properties(property_name,
property_value,
related_properties,
properties={},
**kwargs):
kwargs['exclude_detailed_data'] = True
properties[property_name] = property_value
result = get_multiple_mixpanel_results(properties=properties,
filter_on_values=related_properties,
**kwargs)
result_dictionary = {}
for related_property in related_properties:
try:
# grab the last result here, because it'll more likely have the most up to date properties
current_result = result[related_property][0]['__results'][0]['label']
except Exception as e:
current_result = None
try:
related_property = int(related_property)
except:
pass
result_dictionary[related_property] = current_result
return result_dictionary
An additional note, I've also tried to copy the function using Python's copy module, both a deep and shallow copy and call the function copy, but neither of those worked.