gevent/threading causes some deadlock - python

I have this code, whose purpose is to dedupe requests.
def dedup_requests(f):
pending = {}
#functools.wraps(f)
def wrapped(*args, **kwargs):
key = _make_call_key(args, kwargs)
if key not in pending:
pending[key] = gevent.spawn(f, *args, **kwargs)
result = pending[key].get()
if key in pending:
del pending[key]
return result
return wrapped
I suspect it is causing a deadlock somehow (this happens once in awhile, and I can't reproduce it).
It happens both when using threading and gevent.
Is the recurring use of get allowed?
Can this code even produce a deadlock when threading is not involved?
Note that it runs under other gevent tasks, so spawned tasks might spawn additional tasks, in case that's an issue.

Though I still don't exactly understand the source of the deadlock (my best guess is that get doesn't really work as expected when called more than once), this seems to work:
from gevent import lock
def queue_identical_calls(f, max_size=100):
pending = {}
#functools.wraps(f)
def wrapped(*args, **kwargs):
key = _make_call_key(args, kwargs)
if key not in pending:
pending[key] = lock.BoundedSemaphore(1)
lock_for_current_call = pending[key]
lock_for_current_call.acquire()
result = f(*args, **kwargs)
lock_for_current_call.release()
if len(pending) > max_size:
pending.clear()
return result
return wrapped

Your problem is that your code is not async. You need to have the function itself handle the key update and then in a while loop test for your values. This is an example of async working. You can prove it by noticing that the last element sometimes shows up first in the list.
import gevent
import random
pending = {}
def dedup_requests(key, *args, **kwargs):
global pending
if key not in pending:
gevent.spawn(ftest, key, *args, **kwargs)
def ftest(key, *args, **kwargs):
global pending
z = random.randint(1,7)
gevent.sleep(z)
pending[key] = z
return z
l = ['test','test2','test3']
for i in l:
dedup_requests(i)
while 1:
if set(pending.keys()) != set(l):
print(pending)
else:
print(pending)
break
gevent.sleep(1)

Related

saving function output in multithread python [duplicate]

The function foo below returns a string 'foo'. How can I get the value 'foo' which is returned from the thread's target?
from threading import Thread
def foo(bar):
print('hello {}'.format(bar))
return 'foo'
thread = Thread(target=foo, args=('world!',))
thread.start()
return_value = thread.join()
The "one obvious way to do it", shown above, doesn't work: thread.join() returned None.
One way I've seen is to pass a mutable object, such as a list or a dictionary, to the thread's constructor, along with a an index or other identifier of some sort. The thread can then store its results in its dedicated slot in that object. For example:
def foo(bar, result, index):
print 'hello {0}'.format(bar)
result[index] = "foo"
from threading import Thread
threads = [None] * 10
results = [None] * 10
for i in range(len(threads)):
threads[i] = Thread(target=foo, args=('world!', results, i))
threads[i].start()
# do some other stuff
for i in range(len(threads)):
threads[i].join()
print " ".join(results) # what sound does a metasyntactic locomotive make?
If you really want join() to return the return value of the called function, you can do this with a Thread subclass like the following:
from threading import Thread
def foo(bar):
print 'hello {0}'.format(bar)
return "foo"
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs, Verbose)
self._return = None
def run(self):
if self._Thread__target is not None:
self._return = self._Thread__target(*self._Thread__args,
**self._Thread__kwargs)
def join(self):
Thread.join(self)
return self._return
twrv = ThreadWithReturnValue(target=foo, args=('world!',))
twrv.start()
print twrv.join() # prints foo
That gets a little hairy because of some name mangling, and it accesses "private" data structures that are specific to Thread implementation... but it works.
For Python 3:
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs)
self._return = None
def run(self):
if self._target is not None:
self._return = self._target(*self._args,
**self._kwargs)
def join(self, *args):
Thread.join(self, *args)
return self._return
FWIW, the multiprocessing module has a nice interface for this using the Pool class. And if you want to stick with threads rather than processes, you can just use the multiprocessing.pool.ThreadPool class as a drop-in replacement.
def foo(bar, baz):
print 'hello {0}'.format(bar)
return 'foo' + baz
from multiprocessing.pool import ThreadPool
pool = ThreadPool(processes=1)
async_result = pool.apply_async(foo, ('world', 'foo')) # tuple of args for foo
# do some other stuff in the main process
return_val = async_result.get() # get the return value from your function.
In Python 3.2+, stdlib concurrent.futures module provides a higher level API to threading, including passing return values or exceptions from a worker thread back to the main thread:
import concurrent.futures
def foo(bar):
print('hello {}'.format(bar))
return 'foo'
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(foo, 'world!')
return_value = future.result()
print(return_value)
Jake's answer is good, but if you don't want to use a threadpool (you don't know how many threads you'll need, but create them as needed) then a good way to transmit information between threads is the built-in Queue.Queue class, as it offers thread safety.
I created the following decorator to make it act in a similar fashion to the threadpool:
def threaded(f, daemon=False):
import Queue
def wrapped_f(q, *args, **kwargs):
'''this function calls the decorated function and puts the
result in a queue'''
ret = f(*args, **kwargs)
q.put(ret)
def wrap(*args, **kwargs):
'''this is the function returned from the decorator. It fires off
wrapped_f in a new thread and returns the thread object with
the result queue attached'''
q = Queue.Queue()
t = threading.Thread(target=wrapped_f, args=(q,)+args, kwargs=kwargs)
t.daemon = daemon
t.start()
t.result_queue = q
return t
return wrap
Then you just use it as:
#threaded
def long_task(x):
import time
x = x + 5
time.sleep(5)
return x
# does not block, returns Thread object
y = long_task(10)
print y
# this blocks, waiting for the result
result = y.result_queue.get()
print result
The decorated function creates a new thread each time it's called and returns a Thread object that contains the queue that will receive the result.
UPDATE
It's been quite a while since I posted this answer, but it still gets views so I thought I would update it to reflect the way I do this in newer versions of Python:
Python 3.2 added in the concurrent.futures module which provides a high-level interface for parallel tasks. It provides ThreadPoolExecutor and ProcessPoolExecutor, so you can use a thread or process pool with the same api.
One benefit of this api is that submitting a task to an Executor returns a Future object, which will complete with the return value of the callable you submit.
This makes attaching a queue object unnecessary, which simplifies the decorator quite a bit:
_DEFAULT_POOL = ThreadPoolExecutor()
def threadpool(f, executor=None):
#wraps(f)
def wrap(*args, **kwargs):
return (executor or _DEFAULT_POOL).submit(f, *args, **kwargs)
return wrap
This will use a default module threadpool executor if one is not passed in.
The usage is very similar to before:
#threadpool
def long_task(x):
import time
x = x + 5
time.sleep(5)
return x
# does not block, returns Future object
y = long_task(10)
print y
# this blocks, waiting for the result
result = y.result()
print result
If you're using Python 3.4+, one really nice feature of using this method (and Future objects in general) is that the returned future can be wrapped to turn it into an asyncio.Future with asyncio.wrap_future. This makes it work easily with coroutines:
result = await asyncio.wrap_future(long_task(10))
If you don't need access to the underlying concurrent.Future object, you can include the wrap in the decorator:
_DEFAULT_POOL = ThreadPoolExecutor()
def threadpool(f, executor=None):
#wraps(f)
def wrap(*args, **kwargs):
return asyncio.wrap_future((executor or _DEFAULT_POOL).submit(f, *args, **kwargs))
return wrap
Then, whenever you need to push cpu intensive or blocking code off the event loop thread, you can put it in a decorated function:
#threadpool
def some_long_calculation():
...
# this will suspend while the function is executed on a threadpool
result = await some_long_calculation()
Another solution that doesn't require changing your existing code:
import Queue # Python 2.x
#from queue import Queue # Python 3.x
from threading import Thread
def foo(bar):
print 'hello {0}'.format(bar) # Python 2.x
#print('hello {0}'.format(bar)) # Python 3.x
return 'foo'
que = Queue.Queue() # Python 2.x
#que = Queue() # Python 3.x
t = Thread(target=lambda q, arg1: q.put(foo(arg1)), args=(que, 'world!'))
t.start()
t.join()
result = que.get()
print result # Python 2.x
#print(result) # Python 3.x
It can be also easily adjusted to a multi-threaded environment:
import Queue # Python 2.x
#from queue import Queue # Python 3.x
from threading import Thread
def foo(bar):
print 'hello {0}'.format(bar) # Python 2.x
#print('hello {0}'.format(bar)) # Python 3.x
return 'foo'
que = Queue.Queue() # Python 2.x
#que = Queue() # Python 3.x
threads_list = list()
t = Thread(target=lambda q, arg1: q.put(foo(arg1)), args=(que, 'world!'))
t.start()
threads_list.append(t)
# Add more threads here
...
threads_list.append(t2)
...
threads_list.append(t3)
...
# Join all the threads
for t in threads_list:
t.join()
# Check thread's return value
while not que.empty():
result = que.get()
print result # Python 2.x
#print(result) # Python 3.x
UPDATE:
I think there's a significantly simpler and more concise way to save the result of the thread, and in a way that keeps the interface virtually identical to the threading.Thread class (please let me know if there are edge cases - I haven't tested as much as my original post below):
import threading
class ConciseResult(threading.Thread):
def run(self):
self.result = self._target(*self._args, **self._kwargs)
To be robust and avoid potential errors:
import threading
class ConciseRobustResult(threading.Thread):
def run(self):
try:
if self._target is not None:
self.result = self._target(*self._args, **self._kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs
Short explanation: we override only the run method of threading.Thread, and modify nothing else. This allows us to use everything else the threading.Thread class does for us, without needing to worry about missing potential edge cases such as _private attribute assignments or custom attribute modifications in the way that my original post does.
We can verify that we only modify the run method by looking at the output of help(ConciseResult) and help(ConciseRobustResult). The only method/attribute/descriptor included under Methods defined here: is run, and everything else comes from the inherited threading.Thread base class (see the Methods inherited from threading.Thread: section).
To test either of these implementations using the example code below, substitute ConciseResult or ConciseRobustResult for ThreadWithResult in the main function below.
Original post using a closure function in the init method:
Most answers I've found are long and require being familiar with other modules or advanced python features, and will be rather confusing to someone unless they're already familiar with everything the answer talks about.
Working code for a simplified approach:
import threading
class ThreadWithResult(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, *, daemon=None):
def function():
self.result = target(*args, **kwargs)
super().__init__(group=group, target=function, name=name, daemon=daemon)
Example code:
import time, random
def function_to_thread(n):
count = 0
while count < 3:
print(f'still running thread {n}')
count +=1
time.sleep(3)
result = random.random()
print(f'Return value of thread {n} should be: {result}')
return result
def main():
thread1 = ThreadWithResult(target=function_to_thread, args=(1,))
thread2 = ThreadWithResult(target=function_to_thread, args=(2,))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print(thread1.result)
print(thread2.result)
main()
Explanation:
I wanted to simplify things significantly, so I created a ThreadWithResult class and had it inherit from threading.Thread. The nested function function in __init__ calls the threaded function we want to save the value of, and saves the result of that nested function as the instance attribute self.result after the thread finishes executing.
Creating an instance of this is identical to creating an instance of threading.Thread. Pass in the function you want to run on a new thread to the target argument and any arguments that your function might need to the args argument and any keyword arguments to the kwargs argument.
e.g.
my_thread = ThreadWithResult(target=my_function, args=(arg1, arg2, arg3))
I think this is significantly easier to understand than the vast majority of answers, and this approach requires no extra imports! I included the time and random module to simulate the behavior of a thread, but they're not required to achieve the functionality asked in the original question.
I know I'm answering this looong after the question was asked, but I hope this can help more people in the future!
EDIT: I created the save-thread-result PyPI package to allow you to access the same code above and reuse it across projects (GitHub code is here). The PyPI package fully extends the threading.Thread class, so you can set any attributes you would set on threading.thread on the ThreadWithResult class as well!
The original answer above goes over the main idea behind this subclass, but for more information, see the more detailed explanation (from the module docstring) here.
Quick usage example:
pip3 install -U save-thread-result # MacOS/Linux
pip install -U save-thread-result # Windows
python3 # MacOS/Linux
python # Windows
from save_thread_result import ThreadWithResult
# As of Release 0.0.3, you can also specify values for
#`group`, `name`, and `daemon` if you want to set those
# values manually.
thread = ThreadWithResult(
target = my_function,
args = (my_function_arg1, my_function_arg2, ...)
kwargs = {my_function_kwarg1: kwarg1_value, my_function_kwarg2: kwarg2_value, ...}
)
thread.start()
thread.join()
if getattr(thread, 'result', None):
print(thread.result)
else:
# thread.result attribute not set - something caused
# the thread to terminate BEFORE the thread finished
# executing the function passed in through the
# `target` argument
print('ERROR! Something went wrong while executing this thread, and the function you passed in did NOT complete!!')
# seeing help about the class and information about the threading.Thread super class methods and attributes available:
help(ThreadWithResult)
Parris / kindall's answer join/return answer ported to Python 3:
from threading import Thread
def foo(bar):
print('hello {0}'.format(bar))
return "foo"
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, *, daemon=None):
Thread.__init__(self, group, target, name, args, kwargs, daemon=daemon)
self._return = None
def run(self):
if self._target is not None:
self._return = self._target(*self._args, **self._kwargs)
def join(self):
Thread.join(self)
return self._return
twrv = ThreadWithReturnValue(target=foo, args=('world!',))
twrv.start()
print(twrv.join()) # prints foo
Note, the Thread class is implemented differently in Python 3.
I stole kindall's answer and cleaned it up just a little bit.
The key part is adding *args and **kwargs to join() in order to handle the timeout
class threadWithReturn(Thread):
def __init__(self, *args, **kwargs):
super(threadWithReturn, self).__init__(*args, **kwargs)
self._return = None
def run(self):
if self._Thread__target is not None:
self._return = self._Thread__target(*self._Thread__args, **self._Thread__kwargs)
def join(self, *args, **kwargs):
super(threadWithReturn, self).join(*args, **kwargs)
return self._return
UPDATED ANSWER BELOW
This is my most popularly upvoted answer, so I decided to update with code that will run on both py2 and py3.
Additionally, I see many answers to this question that show a lack of comprehension regarding Thread.join(). Some completely fail to handle the timeout arg. But there is also a corner-case that you should be aware of regarding instances when you have (1) a target function that can return None and (2) you also pass the timeout arg to join(). Please see "TEST 4" to understand this corner case.
ThreadWithReturn class that works with py2 and py3:
import sys
from threading import Thread
from builtins import super # https://stackoverflow.com/a/30159479
_thread_target_key, _thread_args_key, _thread_kwargs_key = (
('_target', '_args', '_kwargs')
if sys.version_info >= (3, 0) else
('_Thread__target', '_Thread__args', '_Thread__kwargs')
)
class ThreadWithReturn(Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._return = None
def run(self):
target = getattr(self, _thread_target_key)
if target is not None:
self._return = target(
*getattr(self, _thread_args_key),
**getattr(self, _thread_kwargs_key)
)
def join(self, *args, **kwargs):
super().join(*args, **kwargs)
return self._return
Some sample tests are shown below:
import time, random
# TEST TARGET FUNCTION
def giveMe(arg, seconds=None):
if not seconds is None:
time.sleep(seconds)
return arg
# TEST 1
my_thread = ThreadWithReturn(target=giveMe, args=('stringy',))
my_thread.start()
returned = my_thread.join()
# (returned == 'stringy')
# TEST 2
my_thread = ThreadWithReturn(target=giveMe, args=(None,))
my_thread.start()
returned = my_thread.join()
# (returned is None)
# TEST 3
my_thread = ThreadWithReturn(target=giveMe, args=('stringy',), kwargs={'seconds': 5})
my_thread.start()
returned = my_thread.join(timeout=2)
# (returned is None) # because join() timed out before giveMe() finished
# TEST 4
my_thread = ThreadWithReturn(target=giveMe, args=(None,), kwargs={'seconds': 5})
my_thread.start()
returned = my_thread.join(timeout=random.randint(1, 10))
Can you identify the corner-case that we may possibly encounter with TEST 4?
The problem is that we expect giveMe() to return None (see TEST 2), but we also expect join() to return None if it times out.
returned is None means either:
(1) that's what giveMe() returned, or
(2) join() timed out
This example is trivial since we know that giveMe() will always return None. But in real-world instance (where the target may legitimately return None or something else) we'd want to explicitly check for what happened.
Below is how to address this corner-case:
# TEST 4
my_thread = ThreadWithReturn(target=giveMe, args=(None,), kwargs={'seconds': 5})
my_thread.start()
returned = my_thread.join(timeout=random.randint(1, 10))
if my_thread.isAlive():
# returned is None because join() timed out
# this also means that giveMe() is still running in the background
pass
# handle this based on your app's logic
else:
# join() is finished, and so is giveMe()
# BUT we could also be in a race condition, so we need to update returned, just in case
returned = my_thread.join()
Using Queue :
import threading, queue
def calc_square(num, out_queue1):
l = []
for x in num:
l.append(x*x)
out_queue1.put(l)
arr = [1,2,3,4,5,6,7,8,9,10]
out_queue1=queue.Queue()
t1=threading.Thread(target=calc_square, args=(arr,out_queue1))
t1.start()
t1.join()
print (out_queue1.get())
My solution to the problem is to wrap the function and thread in a class. Does not require using pools,queues, or c type variable passing. It is also non blocking. You check status instead. See example of how to use it at end of code.
import threading
class ThreadWorker():
'''
The basic idea is given a function create an object.
The object can then run the function in a thread.
It provides a wrapper to start it,check its status,and get data out the function.
'''
def __init__(self,func):
self.thread = None
self.data = None
self.func = self.save_data(func)
def save_data(self,func):
'''modify function to save its returned data'''
def new_func(*args, **kwargs):
self.data=func(*args, **kwargs)
return new_func
def start(self,params):
self.data = None
if self.thread is not None:
if self.thread.isAlive():
return 'running' #could raise exception here
#unless thread exists and is alive start or restart it
self.thread = threading.Thread(target=self.func,args=params)
self.thread.start()
return 'started'
def status(self):
if self.thread is None:
return 'not_started'
else:
if self.thread.isAlive():
return 'running'
else:
return 'finished'
def get_results(self):
if self.thread is None:
return 'not_started' #could return exception
else:
if self.thread.isAlive():
return 'running'
else:
return self.data
def add(x,y):
return x +y
add_worker = ThreadWorker(add)
print add_worker.start((1,2,))
print add_worker.status()
print add_worker.get_results()
Taking into consideration #iman comment on #JakeBiesinger answer I have recomposed it to have various number of threads:
from multiprocessing.pool import ThreadPool
def foo(bar, baz):
print 'hello {0}'.format(bar)
return 'foo' + baz
numOfThreads = 3
results = []
pool = ThreadPool(numOfThreads)
for i in range(0, numOfThreads):
results.append(pool.apply_async(foo, ('world', 'foo'))) # tuple of args for foo)
# do some other stuff in the main process
# ...
# ...
results = [r.get() for r in results]
print results
pool.close()
pool.join()
I'm using this wrapper, which comfortably turns any function for running in a Thread - taking care of its return value or exception. It doesn't add Queue overhead.
def threading_func(f):
"""Decorator for running a function in a thread and handling its return
value or exception"""
def start(*args, **kw):
def run():
try:
th.ret = f(*args, **kw)
except:
th.exc = sys.exc_info()
def get(timeout=None):
th.join(timeout)
if th.exc:
raise th.exc[0], th.exc[1], th.exc[2] # py2
##raise th.exc[1] #py3
return th.ret
th = threading.Thread(None, run)
th.exc = None
th.get = get
th.start()
return th
return start
Usage Examples
def f(x):
return 2.5 * x
th = threading_func(f)(4)
print("still running?:", th.is_alive())
print("result:", th.get(timeout=1.0))
#threading_func
def th_mul(a, b):
return a * b
th = th_mul("text", 2.5)
try:
print(th.get())
except TypeError:
print("exception thrown ok.")
Notes on threading module
Comfortable return value & exception handling of a threaded function is a frequent "Pythonic" need and should indeed already be offered by the threading module - possibly directly in the standard Thread class. ThreadPool has way too much overhead for simple tasks - 3 managing threads, lots of bureaucracy. Unfortunately Thread's layout was copied from Java originally - which you see e.g. from the still useless 1st (!) constructor parameter group.
Based of what kindall mentioned, here's the more generic solution that works with Python3.
import threading
class ThreadWithReturnValue(threading.Thread):
def __init__(self, *init_args, **init_kwargs):
threading.Thread.__init__(self, *init_args, **init_kwargs)
self._return = None
def run(self):
self._return = self._target(*self._args, **self._kwargs)
def join(self):
threading.Thread.join(self)
return self._return
Usage
th = ThreadWithReturnValue(target=requests.get, args=('http://www.google.com',))
th.start()
response = th.join()
response.status_code # => 200
join always return None, i think you should subclass Thread to handle return codes and so.
You can define a mutable above the scope of the threaded function, and add the result to that. (I also modified the code to be python3 compatible)
returns = {}
def foo(bar):
print('hello {0}'.format(bar))
returns[bar] = 'foo'
from threading import Thread
t = Thread(target=foo, args=('world!',))
t.start()
t.join()
print(returns)
This returns {'world!': 'foo'}
If you use the function input as the key to your results dict, every unique input is guaranteed to give an entry in the results
Define your target to
1) take an argument q
2) replace any statements return foo with q.put(foo); return
so a function
def func(a):
ans = a * a
return ans
would become
def func(a, q):
ans = a * a
q.put(ans)
return
and then you would proceed as such
from Queue import Queue
from threading import Thread
ans_q = Queue()
arg_tups = [(i, ans_q) for i in xrange(10)]
threads = [Thread(target=func, args=arg_tup) for arg_tup in arg_tups]
_ = [t.start() for t in threads]
_ = [t.join() for t in threads]
results = [q.get() for _ in xrange(len(threads))]
And you can use function decorators/wrappers to make it so you can use your existing functions as target without modifying them, but follow this basic scheme.
GuySoft's idea is great, but I think the object does not necessarily have to inherit from Thread and start() could be removed from interface:
from threading import Thread
import queue
class ThreadWithReturnValue(object):
def __init__(self, target=None, args=(), **kwargs):
self._que = queue.Queue()
self._t = Thread(target=lambda q,arg1,kwargs1: q.put(target(*arg1, **kwargs1)) ,
args=(self._que, args, kwargs), )
self._t.start()
def join(self):
self._t.join()
return self._que.get()
def foo(bar):
print('hello {0}'.format(bar))
return "foo"
twrv = ThreadWithReturnValue(target=foo, args=('world!',))
print(twrv.join()) # prints foo
This is a pretty old question, but I wanted to share a simple solution that has worked for me and helped my dev process.
The methodology behind this answer is the fact that the "new" target function, inner is assigning the result of the original function (passed through the __init__ function) to the result instance attribute of the wrapper through something called closure.
This allows the wrapper class to hold onto the return value for callers to access at anytime.
NOTE: This method doesn't need to use any mangled methods or private methods of the threading.Thread class, although yield functions have not been considered (OP did not mention yield functions).
Enjoy!
from threading import Thread as _Thread
class ThreadWrapper:
def __init__(self, target, *args, **kwargs):
self.result = None
self._target = self._build_threaded_fn(target)
self.thread = _Thread(
target=self._target,
*args,
**kwargs
)
def _build_threaded_fn(self, func):
def inner(*args, **kwargs):
self.result = func(*args, **kwargs)
return inner
Additionally, you can run pytest (assuming you have it installed) with the following code to demonstrate the results:
import time
from commons import ThreadWrapper
def test():
def target():
time.sleep(1)
return 'Hello'
wrapper = ThreadWrapper(target=target)
wrapper.thread.start()
r = wrapper.result
assert r is None
time.sleep(2)
r = wrapper.result
assert r == 'Hello'
As mentioned multiprocessing pool is much slower than basic threading. Using queues as proposeded in some answers here is a very effective alternative. I have use it with dictionaries in order to be able run a lot of small threads and recuperate multiple answers by combining them with dictionaries:
#!/usr/bin/env python3
import threading
# use Queue for python2
import queue
import random
LETTERS = 'abcdefghijklmnopqrstuvwxyz'
LETTERS = [ x for x in LETTERS ]
NUMBERS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def randoms(k, q):
result = dict()
result['letter'] = random.choice(LETTERS)
result['number'] = random.choice(NUMBERS)
q.put({k: result})
threads = list()
q = queue.Queue()
results = dict()
for name in ('alpha', 'oscar', 'yankee',):
threads.append( threading.Thread(target=randoms, args=(name, q)) )
threads[-1].start()
_ = [ t.join() for t in threads ]
while not q.empty():
results.update(q.get())
print(results)
Here is the version that I created of #Kindall's answer.
This version makes it so that all you have to do is input your command with arguments to create the new thread.
This was made with Python 3.8:
from threading import Thread
from typing import Any
def test(plug, plug2, plug3):
print(f"hello {plug}")
print(f'I am the second plug : {plug2}')
print(plug3)
return 'I am the return Value!'
def test2(msg):
return f'I am from the second test: {msg}'
def test3():
print('hello world')
def NewThread(com, Returning: bool, *arguments) -> Any:
"""
Will create a new thread for a function/command.
:param com: Command to be Executed
:param arguments: Arguments to be sent to Command
:param Returning: True/False Will this command need to return anything
"""
class NewThreadWorker(Thread):
def __init__(self, group = None, target = None, name = None, args = (), kwargs = None, *,
daemon = None):
Thread.__init__(self, group, target, name, args, kwargs, daemon = daemon)
self._return = None
def run(self):
if self._target is not None:
self._return = self._target(*self._args, **self._kwargs)
def join(self):
Thread.join(self)
return self._return
ntw = NewThreadWorker(target = com, args = (*arguments,))
ntw.start()
if Returning:
return ntw.join()
if __name__ == "__main__":
print(NewThread(test, True, 'hi', 'test', test2('hi')))
NewThread(test3, True)
You can use pool.apply_async() of ThreadPool() to return the value from test() as shown below:
from multiprocessing.pool import ThreadPool
def test(num1, num2):
return num1 + num2
pool = ThreadPool(processes=1) # Here
result = pool.apply_async(test, (2, 3)) # Here
print(result.get()) # 5
And, you can also use submit() of concurrent.futures.ThreadPoolExecutor() to return the value from test() as shown below:
from concurrent.futures import ThreadPoolExecutor
def test(num1, num2):
return num1 + num2
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(test, 2, 3) # Here
print(future.result()) # 5
And, instead of return, you can use the array result as shown below:
from threading import Thread
def test(num1, num2, r):
r[0] = num1 + num2 # Instead of "return"
result = [None] # Here
thread = Thread(target=test, args=(2, 3, result))
thread.start()
thread.join()
print(result[0]) # 5
And instead of return, you can also use the queue result as shown below:
from threading import Thread
import queue
def test(num1, num2, q):
q.put(num1 + num2) # Instead of "return"
queue = queue.Queue() # Here
thread = Thread(target=test, args=(2, 3, queue))
thread.start()
thread.join()
print(queue.get()) # '5'
The shortest and simplest way I've found to do this is to take advantage of Python classes and their dynamic properties. You can retrieve the current thread from within the context of your spawned thread using threading.current_thread(), and assign the return value to a property.
import threading
def some_target_function():
# Your code here.
threading.current_thread().return_value = "Some return value."
your_thread = threading.Thread(target=some_target_function)
your_thread.start()
your_thread.join()
return_value = your_thread.return_value
print(return_value)
One usual solution is to wrap your function foo with a decorator like
result = queue.Queue()
def task_wrapper(*args):
result.put(target(*args))
Then the whole code may looks like that
result = queue.Queue()
def task_wrapper(*args):
result.put(target(*args))
threads = [threading.Thread(target=task_wrapper, args=args) for args in args_list]
for t in threads:
t.start()
while(True):
if(len(threading.enumerate()) < max_num):
break
for t in threads:
t.join()
return result
Note
One important issue is that the return values may be unorderred.
(In fact, the return value is not necessarily saved to the queue, since you can choose arbitrary thread-safe data structure )
Kindall's answer in Python3
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, *, daemon=None):
Thread.__init__(self, group, target, name, args, kwargs, daemon)
self._return = None
def run(self):
try:
if self._target:
self._return = self._target(*self._args, **self._kwargs)
finally:
del self._target, self._args, self._kwargs
def join(self,timeout=None):
Thread.join(self,timeout)
return self._return
I know this thread is old.... but I faced the same problem... If you are willing to use thread.join()
import threading
class test:
def __init__(self):
self.msg=""
def hello(self,bar):
print('hello {}'.format(bar))
self.msg="foo"
def main(self):
thread = threading.Thread(target=self.hello, args=('world!',))
thread.start()
thread.join()
print(self.msg)
g=test()
g.main()
Best way... Define a global variable, then change the variable in the threaded function. Nothing to pass in or retrieve back
from threading import Thread
# global var
radom_global_var = 5
def function():
global random_global_var
random_global_var += 1
domath = Thread(target=function)
domath.start()
domath.join()
print(random_global_var)
# result: 6

Is there a way to prevent the infinite loop after creating an iterable custom object in Python - 3.x?

I'm setting up a library to spawn as many threads as I can and have them complete any task they are assigned too. When unit testing the creation of the object containing 'n' number of threads and trying to iterate over each one at the object level I am running into an infinite loop.
I fixed the pickling issue with race conditions, but now when trying to output how many threads were created in a for loop, when debugging I encounter an infinite loop.
from __future__ import print_function
try:
import sys
from threading import Thread, Lock, current_thread
from queue import Queue
except ImportError:
raise ImportError
class Worker(Thread):
""" Thread executing tasks from a given tasks queue """
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.start()
def run(self):
while True:
func, args, kargs = self.tasks.get()
try:
# Acquire locking mechanism for threads to prevent race condition
Lock.acquire()
func(*args, **kargs)
# Release locking mechanism
Lock.release()
except Exception as e:
# An exception happened in this thread
raise e
finally:
if self.tasks is None:
# Mark process done once there are no more tasks to process
self.tasks.task_done()
class SpawnThreads:
"""Pool of threads consuming tasks from a queue."""
def __init__(self, num_threads: int):
self.tasks = Queue(num_threads)
self.num_threads = num_threads
for _ in range(self.num_threads):
Worker(self.tasks)
def __iter__(self):
return self
def __next__(self):
next_value = 0
while next_value < self.num_threads:
try:
next_value += 1
except Exception:
raise StopIteration
return next_value
def add_task(self, func, *args, **kargs):
"""Add a task to the queue."""
self.tasks.put((func, args, kargs))
def task_list(self, func, args_list):
"""Add a list of tasks to the queue."""
for args in args_list:
self.add_task(func, args)
def wait_completion(self):
""".Wait for completion of all the tasks in the queue."""
self.tasks.join()
def get_qsize(self):
"""Return the approximate size of the queue."""
return self.tasks.qsize()
def get_current_thread(self):
return current_thread()
This is the unittest to evaluate the creation of the thread spawning object and iterate and access each individual thread.
import pytest
import unittest
import SpawnThreads
#unittest
class TestThreadFactory(unittest.TestCase):
def test_spawn_threads(self):
workforce = SpawnThreads(5)
self.assertIsNotNone(workforce)
print(workforce)
for w in workforce:
print(w)
The expected output should be the address space/object, which is each thread (5 total).
More Specifically I would like to see this result 5 times in the console:
<ThreadFactory.thread_factory.SpawnThreads object at 0x0000024E851F5208>
I am getting the integer 5 returned infinitely instead of the 5 addresses of threads.
Your __next__ is definitionally always going to return 5 and never end. __next__ isn't a generator function, there is no state on entry aside from what's on self. So you always loop until next_value (a stateless local variable) is equal to self.num_threads (a value which never changes) and return it; your __next__ could simplify to just return self.num_threads, with no chance of StopIteration ever being raised (thus the infinite loop).
If you want it to return different values (specifically, each of your workers), you'll need state for that:
class SpawnThreads:
"""Pool of threads consuming tasks from a queue."""
def __init__(self, num_threads: int):
self.tasks = Queue(num_threads)
self.next_value = 0 # Initial next_value in instance state
self.num_threads = num_threads
# Store list of workers
self.workers = [Worker(self.tasks) for _ in range(self.num_threads)]
def __iter__(self):
return self
def __next__(self):
# Check if iteration has finished
if self.next_value >= self.num_threads:
raise StopIteration
retval = self.workers[self.next_value] # Save value to return to local
self.next_value += 1 # Increment state for next use
return retval # Return value
Those last three lines could be replaced with an alternative tricksy approach to avoid the local variable if you really care:
try:
return self.workers[self.next_value]
finally:
self.next_value += 1
Or even better, you could use Python built-ins to do the work for you:
class SpawnThreads:
"""Pool of threads consuming tasks from a queue."""
def __init__(self, num_threads: int):
self.tasks = Queue(num_threads)
self.num_threads = num_threads
self.workers = [Worker(self.tasks) for _ in range(self.num_threads)]
self.next_worker_iter = iter(self.workers) # Iterates workers
def __iter__(self):
return self
def __next__(self):
# Let the list iterator do the work of maintaining state,
# raising StopIteration, etc.
return next(self.next_worker_iter)
This approach is simpler, faster, and as a bonus, thread-safe, at least on CPython (if two threads iterate the same SpawnThreads instance, each of the workers will be produced exactly once, rather than values potentially being skipped or repeated).
If the goal is to make an iterable (can be iterated multiple times), not an iterator (can be iterated once from beginning to end and never again), the simplest solution is to make __iter__ return an iterator itself, removing the need for __next__ entirely:
class SpawnThreads:
"""Pool of threads consuming tasks from a queue."""
def __init__(self, num_threads: int):
self.tasks = Queue(num_threads)
self.num_threads = num_threads
self.workers = [Worker(self.tasks) for _ in range(self.num_threads)]
def __iter__(self):
# Makes this a generator function that produces each Worker once
yield from self.workers
# Alternatively:
return iter(self.workers)
# though that exposes more implementation details than anonymous generators
The problem is this method:
def __next__(self):
next_value = 0
while next_value < self.num_threads:
try:
next_value += 1
except Exception:
raise StopIteration
return next_value
It's basically the same as
def __next__(self):
return self.num_threads
As you see there isn't any iterator state, it will just return the same number forever. next_value += 1 will never throw an exception, next_value is just an integer.
To achieve what you want, just store the threads in a container and return an iterator to that container. Modify SpawnThreads:
def __init__(self, num_threads: int):
self.tasks = Queue(num_threads)
self.num_threads = num_threads
self.threads = []
for _ in range(self.num_threads):
self.threads.append(Worker(self.tasks));
def __iter__(self):
return iter(self.threads)
# remove the __next__() method

Waiting for tornado future to complete

I have a function that returns a future. I want to create a decorator to the function which waits for the future to complete and then return the result essentially converting the async function to blocking function (which I will use in my REST API). Is there a way to do that?
def sync(fn):
def wrapped(*args, **kwargs):
return IOLoop.instance().run_sync(lambda: fn(*args, **kwargs))
return wrapped
#gen.coroutine
def my_coro():
# ...
sync_fn = sync(my_coro)
result = sync_fn()
To resolve a future you need to yield it. Something like this might work:
from tornado import gen
def blocking(func):
def new_func(*args, **kwargs):
result = yield func(*args, **kwargs)
return result
return gen.coroutine(new_func)

Debounce Celery tasks?

Is there a standard method for debouncing Celery tasks?
For example, so that a task can be "started" multiple times, but will only be run once after some delay:
def debounce_task(task):
if task_is_queued(task):
return
task.apply_async(countdown=30)
Here's how we do it with Redis counters. All of this can probably be generalized in a decorator but we only use it for a specific task (webhooks)
Your public-facing task is what you call from other functions. It'll need to increment a key in Redis. The key is formed by the arguments of your function, whatever they may be (this ensures the counter is unique amongst individual tasks)
#task
def your_public_task(*args, **kwargs):
cache_key = make_public_task_cache_key(*args, **kwargs)
get_redis().incr(cache_key)
_your_task(*args, **kwargs, countdown=settings.QUEUE_DELAY)
Note the cache key functions are shared (you want the same cache key in each function), and the countdown setting.
Then, the actual task executing the code does the following:
#task
def _your_task(*args, **kwargs):
cache_key = make_public_task_cache_key(*args, **kwargs)
counter = get_redis().getset(cache_key, 0)
# redis makes the zero a string.
if counter == '0':
return
... execute your actual task code.
This lets you hit your_public_task.delay(..) as many times as you want, within your QUEUE_DELAY, and it'll only fire off once.
bartek has the idea, use redis counters which are atomic (and should be easily available if your broker is redis). Although his solution is thottling, not debouncing. The difference is minor though (getset vs decr).
Queue up the task:
conn = get_redis()
conn.incr(key)
task.apply_async(args=args, kwargs=kwargs, countdown=countdown)
Then in the task:
conn = get_redis()
counter = conn.decr(key)
if counter > 0:
# task is still queued
return
# continue on to rest of task
It's hard to make it a decorator since you need to decorate the task and calling the task itself. So you will need a decorator before the celery #task decorator and one after it.
For now I'm just made some functions that help me call the task, and one that checks in the start of the task.
Here's how you can do it with Mongo.
NOTE: I had to make the design a little more forgiving, as Celery tasks aren't guaranteed to execute the exact moment of eta is met or countdown runs out.
Also, Mongo expiring indexes are only cleaned up every minute or so; So you can't base the design around records being deleted the moment the eta is up.
Anyhow, the flow is something like this:
Client code calls my_task.
preflight increments a call counter, and returns it as flight_id
_my_task is set to be executed after TTL seconds.
When _my_task runs, it checks if it's flight_id is still current. If it's not, it aborts.
... sometime later... mongo cleans up stale entries in the collection, via an expiring index.
#celery.task(track_started=False, ignore_result=True)
def my_task(my_arg):
flight_id = preflight(inflight_collection, 'my_task', HASH(my_arg), TTL)
_my_task.apply_async((my_arg,), {'flight_id':flight_id}, countdown=TTL)
#celery.task(track_started=False, ignore_result=True)
def _my_task(my_arg, flight_id=None):
if not check_for_takeoff(inflight_collection, 'my_task', HASH(my_arg), flight_id):
return
# ... actual work ... #
Library code:
TTL = 5 * 60 # Run tasks after 5 minutes
EXPIRY = 6 * TTL # This needs to be much larger than TTL.
# We need to store a list of task-executions currently pending
inflight_collection = db['celery_In_Flight']
inflight_collection.create_index([('fn', pymongo.ASCENDING,),
('key', pymongo.ASCENDING,)])
inflight_collection.create_index('eta', expiresAfterSeconds=EXPIRY)
def preflight(collection, fn, key, ttl):
eta = datetime.datetime.now() + datetime.timedelta(seconds=ttl)
result = collection.find_one_and_update({
'fn': fn,
'key': key,
}, {
'$set': {
'eta': eta
},
'$inc': {
'flightId': 1
}
}, upsert=True, return_document=pymongo.ReturnDocument.AFTER)
print 'Preflight[{}][{}] = {}'.format(fn, key, result['flightId'])
return result['flightId']
def check_for_takeoff(collection, fn, key, flight_id):
result = collection.find_one({
'fn': fn,
'key': key
})
ready = result is None or result['flightId'] == flight_id
print 'Check[{}][{}] = {}, {}'.format(fn, key, result['flightId'], ready)
return ready
Here's the solution I came up with: https://gist.github.com/wolever/3cf2305613052f3810a271e09d42e35c
And copied here, for posterity:
import time
import redis
def get_redis_connection():
return redis.connect()
class TaskDebouncer(object):
""" A simple Celery task debouncer.
Usage::
def debounce_process_corpus(corpus):
# Only one task with ``key`` will be allowed to execute at a
# time. For example, if the task was resizing an image, the key
# might be the image's URL.
key = "process_corpus:%s" %(corpus.id, )
TaskDebouncer.delay(
key, my_taks, args=[corpus.id], countdown=0,
)
#task(bind=True)
def process_corpus(self, corpus_id, debounce_key=None):
debounce = TaskDebouncer(debounce_key, keepalive=30)
corpus = Corpus.load(corpus_id)
try:
for item in corpus:
item.process()
# If ``debounce.keepalive()`` isn't called every
# ``keepalive`` interval (the ``keepalive=30`` in the
# call to ``TaskDebouncer(...)``) the task will be
# considered dead and another one will be allowed to
# start.
debounce.keepalive()
finally:
# ``finalize()`` will mark the task as complete and allow
# subsequent tasks to execute. If it returns true, there
# was another attempt to start a task with the same key
# while this task was running. Depending on your business
# logic, this might indicate that the task should be
# retried.
needs_retry = debounce.finalize()
if needs_retry:
raise self.retry(max_retries=None)
"""
def __init__(self, key, keepalive=60):
if key:
self.key = key.partition("!")[0]
self.run_key = key
else:
self.key = None
self.run_key = None
self._keepalive = keepalive
self.cxn = get_redis_connection()
self.init()
self.keepalive()
#classmethod
def delay(cls, key, task, args=None, kwargs=None, countdown=30):
cxn = get_redis_connection()
now = int(time.time())
first = cxn.set(key, now, nx=True, ex=countdown + 10)
if not first:
now = cxn.get(key)
run_key = "%s!%s" %(key, now)
if first:
kwargs = dict(kwargs or {})
kwargs["debounce_key"] = run_key
task.apply_async(args=args, kwargs=kwargs, countdown=countdown)
return (first, run_key)
def init(self):
self.initial = self.key and self.cxn.get(self.key)
def keepalive(self, expire=None):
if self.key is None:
return
expire = expire if expire is not None else self._keepalive
self.cxn.expire(self.key, expire)
def is_out_of_date(self):
if self.key is None:
return False
return self.cxn.get(self.key) != self.initial
def finalize(self):
if self.key is None:
return False
with self.cxn.pipeline() as pipe:
while True:
try:
pipe.watch(self.key)
if pipe.get(self.key) != self.initial:
return True
pipe.multi()
pipe.delete(self.key)
pipe.execute()
break
except redis.WatchError:
continue
return False
Here's a more filled out solution based off https://stackoverflow.com/a/28157498/4391298 but turned into a decorator and reaching into the Kombu connection pool to reuse your Redis counter.
import logging
from functools import wraps
# Not strictly required
from django.core.exceptions import ImproperlyConfigured
from django.core.cache.utils import make_template_fragment_key
from celery.utils import gen_task_name
LOGGER = logging.getLogger(__name__)
def debounced_task(**options):
"""Debounced task decorator."""
try:
countdown = options.pop('countdown')
except KeyError:
raise ImproperlyConfigured("Debounced tasks require a countdown")
def factory(func):
"""Decorator factory."""
try:
name = options.pop('name')
except KeyError:
name = gen_task_name(app, func.__name__, func.__module__)
#wraps(func)
def inner(*args, **kwargs):
"""Decorated function."""
key = make_template_fragment_key(name, [args, kwargs])
with app.pool.acquire_channel(block=True) as (_, channel):
depth = channel.client.decr(key)
if depth <= 0:
try:
func(*args, **kwargs)
except:
# The task failed (or is going to retry), set the
# count back to where it was
channel.client.set(key, depth)
raise
else:
LOGGER.debug("%s calls pending to %s",
depth, name)
task = app._task_from_fun(inner, **options, name=name + '__debounced')
#wraps(func)
def debouncer(*args, **kwargs):
"""
Debouncer that calls the real task.
This is the task we are scheduling."""
key = make_template_fragment_key(name, [args, kwargs])
with app.pool.acquire_channel(block=True) as (_, channel):
# Mark this key to expire after the countdown, in case our
# task never runs or runs too many times, we want to clean
# up our Redis to eventually resolve the issue.
channel.client.expire(key, countdown + 10)
depth = channel.client.incr(key)
LOGGER.debug("Requesting %s in %i seconds (depth=%s)",
name, countdown, depth)
task.si(*args, **kwargs).apply_async(countdown=countdown)
return app._task_from_fun(debouncer, **options, name=name)
return factory

python function fails to return unless the last statement is slow

I'm working on a subclass of threading.Thread which allows its methods to be called and run in the thread represented by the object that they are called on as opposed to the usual behavior. I do this by using decorators on the target method that place the call to the method in a collections.deque and using the run method to process the deque.
the run method uses a while not self.__stop: statement and a threading.Condition object to wait for a call to be placed in the deque and then call self.__process_calls. The else part of the while loop makes a final call to __process_calls. if self.__stop, an exception is raised on any attempts to call one of the 'callable' methods from another thread.
The problem is that __process_calls fails to return unless the last statement is a print which I discovered during debugging. I've tried a = 1 and an explicit return but neither work. with any print statement as the final statement of the function though, it returns and the thread doesn't hang. Any ideas what's going on?
EDIT: It was pointed out by David Zaslavsky that the print works because it takes a while
and I've confirmed that
The code's a little long but hopefully, my explanation above is clear enough to help understand it.
import threading
import collections
class BrokenPromise(Exception): pass
class CallableThreadError(Exception): pass
class CallToNonRunningThreadError(CallableThreadError): pass
class Promise(object):
def __init__(self, deque, condition):
self._condition = condition
self._deque = deque
def read(self, timeout=None):
if not self._deque:
with self._condition:
if timeout:
self._condition.wait(timeout)
else:
self._condition.wait()
if self._deque:
value = self._deque.popleft()
del self._deque
del self._condition
return value
else:
raise BrokenPromise
def ready(self):
return bool(self._deque)
class CallableThread(threading.Thread):
def __init__(self, *args, **kwargs):
# _enqueued_calls is used to store tuples that encode a function call.
# It is processed by the run method
self.__enqueued_calls = collections.deque()
# _enqueue_call_permission is for callers to signal that they have
# placed something on the queue
self.__enqueue_call_permission = threading.Condition()
self.__stop = False
super(CallableThread, self).__init__(*args, **kwargs)
#staticmethod
def blocking_method(f):
u"""A decorator function to implement a blocking method on a thread"""
# the returned function enqueues the decorated function and blocks
# until the decorated function# is called and returns. It then returns
# the value unmodified. The code in register runs in the calling thread
# and the decorated method runs in thread that it is called on
f = CallableThread.nonblocking_method_with_promise(f)
def register(self, *args, **kwargs):
p = f(self, *args, **kwargs)
return p.read()
return register
#staticmethod
def nonblocking_method_with_promise(f):
u"""A decorator function to implement a non-blocking method on a
thread
"""
# the returned function enqueues the decorated function and returns a
# Promise object.N The code in register runs in the calling thread
# and the decorated method runs in thread that it is called on.
def register(self, *args, **kwargs):
call_complete = threading.Condition()
response_deque = collections.deque()
self.__push_call(f, args, kwargs, response_deque, call_complete)
return Promise(response_deque, call_complete)
return register
#staticmethod
def nonblocking_method(f):
def register(self, *args, **kwargs):
self.__push_call(f, args, kwargs)
return register
def run(self):
while not self.__stop: # while we've not been killed
with self.__enqueue_call_permission:
# get the condition so that we can wait on it if we need too.
if not self.__enqueued_calls:
self.__enqueue_call_permission.wait()
self.__process_calls()
else:
# if we exit because self._run == False, finish processing
# the pending calls if there are any
self.__process_calls()
def stop(self):
u""" Signal the thread to stop"""
with self.__enqueue_call_permission:
# we do this in case the run method is stuck waiting on an update
self.__stop = True
self.__enqueue_call_permission.notify()
def __process_calls(self):
print "processing calls"
while self.__enqueued_calls:
((f, args, kwargs),
response_deque, call_complete) = self.__enqueued_calls.popleft()
if call_complete:
with call_complete:
response_deque.append(f(self, *args, **kwargs))
call_complete.notify()
else:
f(self, *args, **kwargs)
# this is where you place the print statement if you want to see the
# behavior
def __push_call(self, f, args, kwargs, response_deque=None,
call_complete=None):
if self.__stop:
raise CallToNonRunningThreadError(
"This thread is no longer accepting calls")
with self.__enqueue_call_permission:
self.__enqueued_calls.append(((f, args, kwargs),
response_deque, call_complete))
self.__enqueue_call_permission.notify()
#if __name__=='__main__': i lost the indent on the following code in copying but
#it doesn't matter in this context
class TestThread(CallableThread):
u"""Increment a counter on each call and print the value"""
counter = 0
#CallableThread.nonblocking_method_with_promise
def increment(self):
self.counter += 1
return self.counter
class LogThread(CallableThread):
#CallableThread.nonblocking_method
def log(self, message):
print message
l = LogThread()
l.start()
l.log("logger started")
t = TestThread()
t.start()
l.log("test thread started")
p = t.increment()
l.log("promise aquired")
v = p.read()
l.log("promise read")
l.log("{0} read from promise".format(v))
l.stop()
t.stop()
l.join()
t.join()
__process_calls is modifying __enqueued_calls without owning the lock. This may be creating a race condition.
Edit: deque may be "threadsafe" (ie not corrupted by thread accesses), but the checking of its state still should be locked.
The stop condition is also not safe.
Comments inline:
def run(self):
while not self.__stop: # while we've not been killed
with self.__enqueue_call_permission:
# get the condition so that we can wait on it if we need too.
### should be checking __stop here, it could have been modified before
### you took the lock.
if not self.__enqueued_calls:
self.__enqueue_call_permission.wait()
self.__process_calls()
else:
# if we exit because self._run == False, finish processing
# the pending calls if there are any
self.__process_calls()

Categories