The function foo below returns a string 'foo'. How can I get the value 'foo' which is returned from the thread's target?
from threading import Thread
def foo(bar):
print('hello {}'.format(bar))
return 'foo'
thread = Thread(target=foo, args=('world!',))
thread.start()
return_value = thread.join()
The "one obvious way to do it", shown above, doesn't work: thread.join() returned None.
One way I've seen is to pass a mutable object, such as a list or a dictionary, to the thread's constructor, along with a an index or other identifier of some sort. The thread can then store its results in its dedicated slot in that object. For example:
def foo(bar, result, index):
print 'hello {0}'.format(bar)
result[index] = "foo"
from threading import Thread
threads = [None] * 10
results = [None] * 10
for i in range(len(threads)):
threads[i] = Thread(target=foo, args=('world!', results, i))
threads[i].start()
# do some other stuff
for i in range(len(threads)):
threads[i].join()
print " ".join(results) # what sound does a metasyntactic locomotive make?
If you really want join() to return the return value of the called function, you can do this with a Thread subclass like the following:
from threading import Thread
def foo(bar):
print 'hello {0}'.format(bar)
return "foo"
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs, Verbose)
self._return = None
def run(self):
if self._Thread__target is not None:
self._return = self._Thread__target(*self._Thread__args,
**self._Thread__kwargs)
def join(self):
Thread.join(self)
return self._return
twrv = ThreadWithReturnValue(target=foo, args=('world!',))
twrv.start()
print twrv.join() # prints foo
That gets a little hairy because of some name mangling, and it accesses "private" data structures that are specific to Thread implementation... but it works.
For Python 3:
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs)
self._return = None
def run(self):
if self._target is not None:
self._return = self._target(*self._args,
**self._kwargs)
def join(self, *args):
Thread.join(self, *args)
return self._return
FWIW, the multiprocessing module has a nice interface for this using the Pool class. And if you want to stick with threads rather than processes, you can just use the multiprocessing.pool.ThreadPool class as a drop-in replacement.
def foo(bar, baz):
print 'hello {0}'.format(bar)
return 'foo' + baz
from multiprocessing.pool import ThreadPool
pool = ThreadPool(processes=1)
async_result = pool.apply_async(foo, ('world', 'foo')) # tuple of args for foo
# do some other stuff in the main process
return_val = async_result.get() # get the return value from your function.
In Python 3.2+, stdlib concurrent.futures module provides a higher level API to threading, including passing return values or exceptions from a worker thread back to the main thread:
import concurrent.futures
def foo(bar):
print('hello {}'.format(bar))
return 'foo'
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(foo, 'world!')
return_value = future.result()
print(return_value)
Jake's answer is good, but if you don't want to use a threadpool (you don't know how many threads you'll need, but create them as needed) then a good way to transmit information between threads is the built-in Queue.Queue class, as it offers thread safety.
I created the following decorator to make it act in a similar fashion to the threadpool:
def threaded(f, daemon=False):
import Queue
def wrapped_f(q, *args, **kwargs):
'''this function calls the decorated function and puts the
result in a queue'''
ret = f(*args, **kwargs)
q.put(ret)
def wrap(*args, **kwargs):
'''this is the function returned from the decorator. It fires off
wrapped_f in a new thread and returns the thread object with
the result queue attached'''
q = Queue.Queue()
t = threading.Thread(target=wrapped_f, args=(q,)+args, kwargs=kwargs)
t.daemon = daemon
t.start()
t.result_queue = q
return t
return wrap
Then you just use it as:
#threaded
def long_task(x):
import time
x = x + 5
time.sleep(5)
return x
# does not block, returns Thread object
y = long_task(10)
print y
# this blocks, waiting for the result
result = y.result_queue.get()
print result
The decorated function creates a new thread each time it's called and returns a Thread object that contains the queue that will receive the result.
UPDATE
It's been quite a while since I posted this answer, but it still gets views so I thought I would update it to reflect the way I do this in newer versions of Python:
Python 3.2 added in the concurrent.futures module which provides a high-level interface for parallel tasks. It provides ThreadPoolExecutor and ProcessPoolExecutor, so you can use a thread or process pool with the same api.
One benefit of this api is that submitting a task to an Executor returns a Future object, which will complete with the return value of the callable you submit.
This makes attaching a queue object unnecessary, which simplifies the decorator quite a bit:
_DEFAULT_POOL = ThreadPoolExecutor()
def threadpool(f, executor=None):
#wraps(f)
def wrap(*args, **kwargs):
return (executor or _DEFAULT_POOL).submit(f, *args, **kwargs)
return wrap
This will use a default module threadpool executor if one is not passed in.
The usage is very similar to before:
#threadpool
def long_task(x):
import time
x = x + 5
time.sleep(5)
return x
# does not block, returns Future object
y = long_task(10)
print y
# this blocks, waiting for the result
result = y.result()
print result
If you're using Python 3.4+, one really nice feature of using this method (and Future objects in general) is that the returned future can be wrapped to turn it into an asyncio.Future with asyncio.wrap_future. This makes it work easily with coroutines:
result = await asyncio.wrap_future(long_task(10))
If you don't need access to the underlying concurrent.Future object, you can include the wrap in the decorator:
_DEFAULT_POOL = ThreadPoolExecutor()
def threadpool(f, executor=None):
#wraps(f)
def wrap(*args, **kwargs):
return asyncio.wrap_future((executor or _DEFAULT_POOL).submit(f, *args, **kwargs))
return wrap
Then, whenever you need to push cpu intensive or blocking code off the event loop thread, you can put it in a decorated function:
#threadpool
def some_long_calculation():
...
# this will suspend while the function is executed on a threadpool
result = await some_long_calculation()
Another solution that doesn't require changing your existing code:
import Queue # Python 2.x
#from queue import Queue # Python 3.x
from threading import Thread
def foo(bar):
print 'hello {0}'.format(bar) # Python 2.x
#print('hello {0}'.format(bar)) # Python 3.x
return 'foo'
que = Queue.Queue() # Python 2.x
#que = Queue() # Python 3.x
t = Thread(target=lambda q, arg1: q.put(foo(arg1)), args=(que, 'world!'))
t.start()
t.join()
result = que.get()
print result # Python 2.x
#print(result) # Python 3.x
It can be also easily adjusted to a multi-threaded environment:
import Queue # Python 2.x
#from queue import Queue # Python 3.x
from threading import Thread
def foo(bar):
print 'hello {0}'.format(bar) # Python 2.x
#print('hello {0}'.format(bar)) # Python 3.x
return 'foo'
que = Queue.Queue() # Python 2.x
#que = Queue() # Python 3.x
threads_list = list()
t = Thread(target=lambda q, arg1: q.put(foo(arg1)), args=(que, 'world!'))
t.start()
threads_list.append(t)
# Add more threads here
...
threads_list.append(t2)
...
threads_list.append(t3)
...
# Join all the threads
for t in threads_list:
t.join()
# Check thread's return value
while not que.empty():
result = que.get()
print result # Python 2.x
#print(result) # Python 3.x
UPDATE:
I think there's a significantly simpler and more concise way to save the result of the thread, and in a way that keeps the interface virtually identical to the threading.Thread class (please let me know if there are edge cases - I haven't tested as much as my original post below):
import threading
class ConciseResult(threading.Thread):
def run(self):
self.result = self._target(*self._args, **self._kwargs)
To be robust and avoid potential errors:
import threading
class ConciseRobustResult(threading.Thread):
def run(self):
try:
if self._target is not None:
self.result = self._target(*self._args, **self._kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs
Short explanation: we override only the run method of threading.Thread, and modify nothing else. This allows us to use everything else the threading.Thread class does for us, without needing to worry about missing potential edge cases such as _private attribute assignments or custom attribute modifications in the way that my original post does.
We can verify that we only modify the run method by looking at the output of help(ConciseResult) and help(ConciseRobustResult). The only method/attribute/descriptor included under Methods defined here: is run, and everything else comes from the inherited threading.Thread base class (see the Methods inherited from threading.Thread: section).
To test either of these implementations using the example code below, substitute ConciseResult or ConciseRobustResult for ThreadWithResult in the main function below.
Original post using a closure function in the init method:
Most answers I've found are long and require being familiar with other modules or advanced python features, and will be rather confusing to someone unless they're already familiar with everything the answer talks about.
Working code for a simplified approach:
import threading
class ThreadWithResult(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, *, daemon=None):
def function():
self.result = target(*args, **kwargs)
super().__init__(group=group, target=function, name=name, daemon=daemon)
Example code:
import time, random
def function_to_thread(n):
count = 0
while count < 3:
print(f'still running thread {n}')
count +=1
time.sleep(3)
result = random.random()
print(f'Return value of thread {n} should be: {result}')
return result
def main():
thread1 = ThreadWithResult(target=function_to_thread, args=(1,))
thread2 = ThreadWithResult(target=function_to_thread, args=(2,))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print(thread1.result)
print(thread2.result)
main()
Explanation:
I wanted to simplify things significantly, so I created a ThreadWithResult class and had it inherit from threading.Thread. The nested function function in __init__ calls the threaded function we want to save the value of, and saves the result of that nested function as the instance attribute self.result after the thread finishes executing.
Creating an instance of this is identical to creating an instance of threading.Thread. Pass in the function you want to run on a new thread to the target argument and any arguments that your function might need to the args argument and any keyword arguments to the kwargs argument.
e.g.
my_thread = ThreadWithResult(target=my_function, args=(arg1, arg2, arg3))
I think this is significantly easier to understand than the vast majority of answers, and this approach requires no extra imports! I included the time and random module to simulate the behavior of a thread, but they're not required to achieve the functionality asked in the original question.
I know I'm answering this looong after the question was asked, but I hope this can help more people in the future!
EDIT: I created the save-thread-result PyPI package to allow you to access the same code above and reuse it across projects (GitHub code is here). The PyPI package fully extends the threading.Thread class, so you can set any attributes you would set on threading.thread on the ThreadWithResult class as well!
The original answer above goes over the main idea behind this subclass, but for more information, see the more detailed explanation (from the module docstring) here.
Quick usage example:
pip3 install -U save-thread-result # MacOS/Linux
pip install -U save-thread-result # Windows
python3 # MacOS/Linux
python # Windows
from save_thread_result import ThreadWithResult
# As of Release 0.0.3, you can also specify values for
#`group`, `name`, and `daemon` if you want to set those
# values manually.
thread = ThreadWithResult(
target = my_function,
args = (my_function_arg1, my_function_arg2, ...)
kwargs = {my_function_kwarg1: kwarg1_value, my_function_kwarg2: kwarg2_value, ...}
)
thread.start()
thread.join()
if getattr(thread, 'result', None):
print(thread.result)
else:
# thread.result attribute not set - something caused
# the thread to terminate BEFORE the thread finished
# executing the function passed in through the
# `target` argument
print('ERROR! Something went wrong while executing this thread, and the function you passed in did NOT complete!!')
# seeing help about the class and information about the threading.Thread super class methods and attributes available:
help(ThreadWithResult)
Parris / kindall's answer join/return answer ported to Python 3:
from threading import Thread
def foo(bar):
print('hello {0}'.format(bar))
return "foo"
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, *, daemon=None):
Thread.__init__(self, group, target, name, args, kwargs, daemon=daemon)
self._return = None
def run(self):
if self._target is not None:
self._return = self._target(*self._args, **self._kwargs)
def join(self):
Thread.join(self)
return self._return
twrv = ThreadWithReturnValue(target=foo, args=('world!',))
twrv.start()
print(twrv.join()) # prints foo
Note, the Thread class is implemented differently in Python 3.
I stole kindall's answer and cleaned it up just a little bit.
The key part is adding *args and **kwargs to join() in order to handle the timeout
class threadWithReturn(Thread):
def __init__(self, *args, **kwargs):
super(threadWithReturn, self).__init__(*args, **kwargs)
self._return = None
def run(self):
if self._Thread__target is not None:
self._return = self._Thread__target(*self._Thread__args, **self._Thread__kwargs)
def join(self, *args, **kwargs):
super(threadWithReturn, self).join(*args, **kwargs)
return self._return
UPDATED ANSWER BELOW
This is my most popularly upvoted answer, so I decided to update with code that will run on both py2 and py3.
Additionally, I see many answers to this question that show a lack of comprehension regarding Thread.join(). Some completely fail to handle the timeout arg. But there is also a corner-case that you should be aware of regarding instances when you have (1) a target function that can return None and (2) you also pass the timeout arg to join(). Please see "TEST 4" to understand this corner case.
ThreadWithReturn class that works with py2 and py3:
import sys
from threading import Thread
from builtins import super # https://stackoverflow.com/a/30159479
_thread_target_key, _thread_args_key, _thread_kwargs_key = (
('_target', '_args', '_kwargs')
if sys.version_info >= (3, 0) else
('_Thread__target', '_Thread__args', '_Thread__kwargs')
)
class ThreadWithReturn(Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._return = None
def run(self):
target = getattr(self, _thread_target_key)
if target is not None:
self._return = target(
*getattr(self, _thread_args_key),
**getattr(self, _thread_kwargs_key)
)
def join(self, *args, **kwargs):
super().join(*args, **kwargs)
return self._return
Some sample tests are shown below:
import time, random
# TEST TARGET FUNCTION
def giveMe(arg, seconds=None):
if not seconds is None:
time.sleep(seconds)
return arg
# TEST 1
my_thread = ThreadWithReturn(target=giveMe, args=('stringy',))
my_thread.start()
returned = my_thread.join()
# (returned == 'stringy')
# TEST 2
my_thread = ThreadWithReturn(target=giveMe, args=(None,))
my_thread.start()
returned = my_thread.join()
# (returned is None)
# TEST 3
my_thread = ThreadWithReturn(target=giveMe, args=('stringy',), kwargs={'seconds': 5})
my_thread.start()
returned = my_thread.join(timeout=2)
# (returned is None) # because join() timed out before giveMe() finished
# TEST 4
my_thread = ThreadWithReturn(target=giveMe, args=(None,), kwargs={'seconds': 5})
my_thread.start()
returned = my_thread.join(timeout=random.randint(1, 10))
Can you identify the corner-case that we may possibly encounter with TEST 4?
The problem is that we expect giveMe() to return None (see TEST 2), but we also expect join() to return None if it times out.
returned is None means either:
(1) that's what giveMe() returned, or
(2) join() timed out
This example is trivial since we know that giveMe() will always return None. But in real-world instance (where the target may legitimately return None or something else) we'd want to explicitly check for what happened.
Below is how to address this corner-case:
# TEST 4
my_thread = ThreadWithReturn(target=giveMe, args=(None,), kwargs={'seconds': 5})
my_thread.start()
returned = my_thread.join(timeout=random.randint(1, 10))
if my_thread.isAlive():
# returned is None because join() timed out
# this also means that giveMe() is still running in the background
pass
# handle this based on your app's logic
else:
# join() is finished, and so is giveMe()
# BUT we could also be in a race condition, so we need to update returned, just in case
returned = my_thread.join()
Using Queue :
import threading, queue
def calc_square(num, out_queue1):
l = []
for x in num:
l.append(x*x)
out_queue1.put(l)
arr = [1,2,3,4,5,6,7,8,9,10]
out_queue1=queue.Queue()
t1=threading.Thread(target=calc_square, args=(arr,out_queue1))
t1.start()
t1.join()
print (out_queue1.get())
My solution to the problem is to wrap the function and thread in a class. Does not require using pools,queues, or c type variable passing. It is also non blocking. You check status instead. See example of how to use it at end of code.
import threading
class ThreadWorker():
'''
The basic idea is given a function create an object.
The object can then run the function in a thread.
It provides a wrapper to start it,check its status,and get data out the function.
'''
def __init__(self,func):
self.thread = None
self.data = None
self.func = self.save_data(func)
def save_data(self,func):
'''modify function to save its returned data'''
def new_func(*args, **kwargs):
self.data=func(*args, **kwargs)
return new_func
def start(self,params):
self.data = None
if self.thread is not None:
if self.thread.isAlive():
return 'running' #could raise exception here
#unless thread exists and is alive start or restart it
self.thread = threading.Thread(target=self.func,args=params)
self.thread.start()
return 'started'
def status(self):
if self.thread is None:
return 'not_started'
else:
if self.thread.isAlive():
return 'running'
else:
return 'finished'
def get_results(self):
if self.thread is None:
return 'not_started' #could return exception
else:
if self.thread.isAlive():
return 'running'
else:
return self.data
def add(x,y):
return x +y
add_worker = ThreadWorker(add)
print add_worker.start((1,2,))
print add_worker.status()
print add_worker.get_results()
Taking into consideration #iman comment on #JakeBiesinger answer I have recomposed it to have various number of threads:
from multiprocessing.pool import ThreadPool
def foo(bar, baz):
print 'hello {0}'.format(bar)
return 'foo' + baz
numOfThreads = 3
results = []
pool = ThreadPool(numOfThreads)
for i in range(0, numOfThreads):
results.append(pool.apply_async(foo, ('world', 'foo'))) # tuple of args for foo)
# do some other stuff in the main process
# ...
# ...
results = [r.get() for r in results]
print results
pool.close()
pool.join()
I'm using this wrapper, which comfortably turns any function for running in a Thread - taking care of its return value or exception. It doesn't add Queue overhead.
def threading_func(f):
"""Decorator for running a function in a thread and handling its return
value or exception"""
def start(*args, **kw):
def run():
try:
th.ret = f(*args, **kw)
except:
th.exc = sys.exc_info()
def get(timeout=None):
th.join(timeout)
if th.exc:
raise th.exc[0], th.exc[1], th.exc[2] # py2
##raise th.exc[1] #py3
return th.ret
th = threading.Thread(None, run)
th.exc = None
th.get = get
th.start()
return th
return start
Usage Examples
def f(x):
return 2.5 * x
th = threading_func(f)(4)
print("still running?:", th.is_alive())
print("result:", th.get(timeout=1.0))
#threading_func
def th_mul(a, b):
return a * b
th = th_mul("text", 2.5)
try:
print(th.get())
except TypeError:
print("exception thrown ok.")
Notes on threading module
Comfortable return value & exception handling of a threaded function is a frequent "Pythonic" need and should indeed already be offered by the threading module - possibly directly in the standard Thread class. ThreadPool has way too much overhead for simple tasks - 3 managing threads, lots of bureaucracy. Unfortunately Thread's layout was copied from Java originally - which you see e.g. from the still useless 1st (!) constructor parameter group.
Based of what kindall mentioned, here's the more generic solution that works with Python3.
import threading
class ThreadWithReturnValue(threading.Thread):
def __init__(self, *init_args, **init_kwargs):
threading.Thread.__init__(self, *init_args, **init_kwargs)
self._return = None
def run(self):
self._return = self._target(*self._args, **self._kwargs)
def join(self):
threading.Thread.join(self)
return self._return
Usage
th = ThreadWithReturnValue(target=requests.get, args=('http://www.google.com',))
th.start()
response = th.join()
response.status_code # => 200
join always return None, i think you should subclass Thread to handle return codes and so.
You can define a mutable above the scope of the threaded function, and add the result to that. (I also modified the code to be python3 compatible)
returns = {}
def foo(bar):
print('hello {0}'.format(bar))
returns[bar] = 'foo'
from threading import Thread
t = Thread(target=foo, args=('world!',))
t.start()
t.join()
print(returns)
This returns {'world!': 'foo'}
If you use the function input as the key to your results dict, every unique input is guaranteed to give an entry in the results
Define your target to
1) take an argument q
2) replace any statements return foo with q.put(foo); return
so a function
def func(a):
ans = a * a
return ans
would become
def func(a, q):
ans = a * a
q.put(ans)
return
and then you would proceed as such
from Queue import Queue
from threading import Thread
ans_q = Queue()
arg_tups = [(i, ans_q) for i in xrange(10)]
threads = [Thread(target=func, args=arg_tup) for arg_tup in arg_tups]
_ = [t.start() for t in threads]
_ = [t.join() for t in threads]
results = [q.get() for _ in xrange(len(threads))]
And you can use function decorators/wrappers to make it so you can use your existing functions as target without modifying them, but follow this basic scheme.
GuySoft's idea is great, but I think the object does not necessarily have to inherit from Thread and start() could be removed from interface:
from threading import Thread
import queue
class ThreadWithReturnValue(object):
def __init__(self, target=None, args=(), **kwargs):
self._que = queue.Queue()
self._t = Thread(target=lambda q,arg1,kwargs1: q.put(target(*arg1, **kwargs1)) ,
args=(self._que, args, kwargs), )
self._t.start()
def join(self):
self._t.join()
return self._que.get()
def foo(bar):
print('hello {0}'.format(bar))
return "foo"
twrv = ThreadWithReturnValue(target=foo, args=('world!',))
print(twrv.join()) # prints foo
This is a pretty old question, but I wanted to share a simple solution that has worked for me and helped my dev process.
The methodology behind this answer is the fact that the "new" target function, inner is assigning the result of the original function (passed through the __init__ function) to the result instance attribute of the wrapper through something called closure.
This allows the wrapper class to hold onto the return value for callers to access at anytime.
NOTE: This method doesn't need to use any mangled methods or private methods of the threading.Thread class, although yield functions have not been considered (OP did not mention yield functions).
Enjoy!
from threading import Thread as _Thread
class ThreadWrapper:
def __init__(self, target, *args, **kwargs):
self.result = None
self._target = self._build_threaded_fn(target)
self.thread = _Thread(
target=self._target,
*args,
**kwargs
)
def _build_threaded_fn(self, func):
def inner(*args, **kwargs):
self.result = func(*args, **kwargs)
return inner
Additionally, you can run pytest (assuming you have it installed) with the following code to demonstrate the results:
import time
from commons import ThreadWrapper
def test():
def target():
time.sleep(1)
return 'Hello'
wrapper = ThreadWrapper(target=target)
wrapper.thread.start()
r = wrapper.result
assert r is None
time.sleep(2)
r = wrapper.result
assert r == 'Hello'
As mentioned multiprocessing pool is much slower than basic threading. Using queues as proposeded in some answers here is a very effective alternative. I have use it with dictionaries in order to be able run a lot of small threads and recuperate multiple answers by combining them with dictionaries:
#!/usr/bin/env python3
import threading
# use Queue for python2
import queue
import random
LETTERS = 'abcdefghijklmnopqrstuvwxyz'
LETTERS = [ x for x in LETTERS ]
NUMBERS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def randoms(k, q):
result = dict()
result['letter'] = random.choice(LETTERS)
result['number'] = random.choice(NUMBERS)
q.put({k: result})
threads = list()
q = queue.Queue()
results = dict()
for name in ('alpha', 'oscar', 'yankee',):
threads.append( threading.Thread(target=randoms, args=(name, q)) )
threads[-1].start()
_ = [ t.join() for t in threads ]
while not q.empty():
results.update(q.get())
print(results)
Here is the version that I created of #Kindall's answer.
This version makes it so that all you have to do is input your command with arguments to create the new thread.
This was made with Python 3.8:
from threading import Thread
from typing import Any
def test(plug, plug2, plug3):
print(f"hello {plug}")
print(f'I am the second plug : {plug2}')
print(plug3)
return 'I am the return Value!'
def test2(msg):
return f'I am from the second test: {msg}'
def test3():
print('hello world')
def NewThread(com, Returning: bool, *arguments) -> Any:
"""
Will create a new thread for a function/command.
:param com: Command to be Executed
:param arguments: Arguments to be sent to Command
:param Returning: True/False Will this command need to return anything
"""
class NewThreadWorker(Thread):
def __init__(self, group = None, target = None, name = None, args = (), kwargs = None, *,
daemon = None):
Thread.__init__(self, group, target, name, args, kwargs, daemon = daemon)
self._return = None
def run(self):
if self._target is not None:
self._return = self._target(*self._args, **self._kwargs)
def join(self):
Thread.join(self)
return self._return
ntw = NewThreadWorker(target = com, args = (*arguments,))
ntw.start()
if Returning:
return ntw.join()
if __name__ == "__main__":
print(NewThread(test, True, 'hi', 'test', test2('hi')))
NewThread(test3, True)
You can use pool.apply_async() of ThreadPool() to return the value from test() as shown below:
from multiprocessing.pool import ThreadPool
def test(num1, num2):
return num1 + num2
pool = ThreadPool(processes=1) # Here
result = pool.apply_async(test, (2, 3)) # Here
print(result.get()) # 5
And, you can also use submit() of concurrent.futures.ThreadPoolExecutor() to return the value from test() as shown below:
from concurrent.futures import ThreadPoolExecutor
def test(num1, num2):
return num1 + num2
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(test, 2, 3) # Here
print(future.result()) # 5
And, instead of return, you can use the array result as shown below:
from threading import Thread
def test(num1, num2, r):
r[0] = num1 + num2 # Instead of "return"
result = [None] # Here
thread = Thread(target=test, args=(2, 3, result))
thread.start()
thread.join()
print(result[0]) # 5
And instead of return, you can also use the queue result as shown below:
from threading import Thread
import queue
def test(num1, num2, q):
q.put(num1 + num2) # Instead of "return"
queue = queue.Queue() # Here
thread = Thread(target=test, args=(2, 3, queue))
thread.start()
thread.join()
print(queue.get()) # '5'
The shortest and simplest way I've found to do this is to take advantage of Python classes and their dynamic properties. You can retrieve the current thread from within the context of your spawned thread using threading.current_thread(), and assign the return value to a property.
import threading
def some_target_function():
# Your code here.
threading.current_thread().return_value = "Some return value."
your_thread = threading.Thread(target=some_target_function)
your_thread.start()
your_thread.join()
return_value = your_thread.return_value
print(return_value)
One usual solution is to wrap your function foo with a decorator like
result = queue.Queue()
def task_wrapper(*args):
result.put(target(*args))
Then the whole code may looks like that
result = queue.Queue()
def task_wrapper(*args):
result.put(target(*args))
threads = [threading.Thread(target=task_wrapper, args=args) for args in args_list]
for t in threads:
t.start()
while(True):
if(len(threading.enumerate()) < max_num):
break
for t in threads:
t.join()
return result
Note
One important issue is that the return values may be unorderred.
(In fact, the return value is not necessarily saved to the queue, since you can choose arbitrary thread-safe data structure )
Kindall's answer in Python3
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, *, daemon=None):
Thread.__init__(self, group, target, name, args, kwargs, daemon)
self._return = None
def run(self):
try:
if self._target:
self._return = self._target(*self._args, **self._kwargs)
finally:
del self._target, self._args, self._kwargs
def join(self,timeout=None):
Thread.join(self,timeout)
return self._return
I know this thread is old.... but I faced the same problem... If you are willing to use thread.join()
import threading
class test:
def __init__(self):
self.msg=""
def hello(self,bar):
print('hello {}'.format(bar))
self.msg="foo"
def main(self):
thread = threading.Thread(target=self.hello, args=('world!',))
thread.start()
thread.join()
print(self.msg)
g=test()
g.main()
Best way... Define a global variable, then change the variable in the threaded function. Nothing to pass in or retrieve back
from threading import Thread
# global var
radom_global_var = 5
def function():
global random_global_var
random_global_var += 1
domath = Thread(target=function)
domath.start()
domath.join()
print(random_global_var)
# result: 6
Related
The function foo below returns a string 'foo'. How can I get the value 'foo' which is returned from the thread's target?
from threading import Thread
def foo(bar):
print('hello {}'.format(bar))
return 'foo'
thread = Thread(target=foo, args=('world!',))
thread.start()
return_value = thread.join()
The "one obvious way to do it", shown above, doesn't work: thread.join() returned None.
One way I've seen is to pass a mutable object, such as a list or a dictionary, to the thread's constructor, along with a an index or other identifier of some sort. The thread can then store its results in its dedicated slot in that object. For example:
def foo(bar, result, index):
print 'hello {0}'.format(bar)
result[index] = "foo"
from threading import Thread
threads = [None] * 10
results = [None] * 10
for i in range(len(threads)):
threads[i] = Thread(target=foo, args=('world!', results, i))
threads[i].start()
# do some other stuff
for i in range(len(threads)):
threads[i].join()
print " ".join(results) # what sound does a metasyntactic locomotive make?
If you really want join() to return the return value of the called function, you can do this with a Thread subclass like the following:
from threading import Thread
def foo(bar):
print 'hello {0}'.format(bar)
return "foo"
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs, Verbose)
self._return = None
def run(self):
if self._Thread__target is not None:
self._return = self._Thread__target(*self._Thread__args,
**self._Thread__kwargs)
def join(self):
Thread.join(self)
return self._return
twrv = ThreadWithReturnValue(target=foo, args=('world!',))
twrv.start()
print twrv.join() # prints foo
That gets a little hairy because of some name mangling, and it accesses "private" data structures that are specific to Thread implementation... but it works.
For Python 3:
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs)
self._return = None
def run(self):
if self._target is not None:
self._return = self._target(*self._args,
**self._kwargs)
def join(self, *args):
Thread.join(self, *args)
return self._return
FWIW, the multiprocessing module has a nice interface for this using the Pool class. And if you want to stick with threads rather than processes, you can just use the multiprocessing.pool.ThreadPool class as a drop-in replacement.
def foo(bar, baz):
print 'hello {0}'.format(bar)
return 'foo' + baz
from multiprocessing.pool import ThreadPool
pool = ThreadPool(processes=1)
async_result = pool.apply_async(foo, ('world', 'foo')) # tuple of args for foo
# do some other stuff in the main process
return_val = async_result.get() # get the return value from your function.
In Python 3.2+, stdlib concurrent.futures module provides a higher level API to threading, including passing return values or exceptions from a worker thread back to the main thread:
import concurrent.futures
def foo(bar):
print('hello {}'.format(bar))
return 'foo'
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(foo, 'world!')
return_value = future.result()
print(return_value)
Jake's answer is good, but if you don't want to use a threadpool (you don't know how many threads you'll need, but create them as needed) then a good way to transmit information between threads is the built-in Queue.Queue class, as it offers thread safety.
I created the following decorator to make it act in a similar fashion to the threadpool:
def threaded(f, daemon=False):
import Queue
def wrapped_f(q, *args, **kwargs):
'''this function calls the decorated function and puts the
result in a queue'''
ret = f(*args, **kwargs)
q.put(ret)
def wrap(*args, **kwargs):
'''this is the function returned from the decorator. It fires off
wrapped_f in a new thread and returns the thread object with
the result queue attached'''
q = Queue.Queue()
t = threading.Thread(target=wrapped_f, args=(q,)+args, kwargs=kwargs)
t.daemon = daemon
t.start()
t.result_queue = q
return t
return wrap
Then you just use it as:
#threaded
def long_task(x):
import time
x = x + 5
time.sleep(5)
return x
# does not block, returns Thread object
y = long_task(10)
print y
# this blocks, waiting for the result
result = y.result_queue.get()
print result
The decorated function creates a new thread each time it's called and returns a Thread object that contains the queue that will receive the result.
UPDATE
It's been quite a while since I posted this answer, but it still gets views so I thought I would update it to reflect the way I do this in newer versions of Python:
Python 3.2 added in the concurrent.futures module which provides a high-level interface for parallel tasks. It provides ThreadPoolExecutor and ProcessPoolExecutor, so you can use a thread or process pool with the same api.
One benefit of this api is that submitting a task to an Executor returns a Future object, which will complete with the return value of the callable you submit.
This makes attaching a queue object unnecessary, which simplifies the decorator quite a bit:
_DEFAULT_POOL = ThreadPoolExecutor()
def threadpool(f, executor=None):
#wraps(f)
def wrap(*args, **kwargs):
return (executor or _DEFAULT_POOL).submit(f, *args, **kwargs)
return wrap
This will use a default module threadpool executor if one is not passed in.
The usage is very similar to before:
#threadpool
def long_task(x):
import time
x = x + 5
time.sleep(5)
return x
# does not block, returns Future object
y = long_task(10)
print y
# this blocks, waiting for the result
result = y.result()
print result
If you're using Python 3.4+, one really nice feature of using this method (and Future objects in general) is that the returned future can be wrapped to turn it into an asyncio.Future with asyncio.wrap_future. This makes it work easily with coroutines:
result = await asyncio.wrap_future(long_task(10))
If you don't need access to the underlying concurrent.Future object, you can include the wrap in the decorator:
_DEFAULT_POOL = ThreadPoolExecutor()
def threadpool(f, executor=None):
#wraps(f)
def wrap(*args, **kwargs):
return asyncio.wrap_future((executor or _DEFAULT_POOL).submit(f, *args, **kwargs))
return wrap
Then, whenever you need to push cpu intensive or blocking code off the event loop thread, you can put it in a decorated function:
#threadpool
def some_long_calculation():
...
# this will suspend while the function is executed on a threadpool
result = await some_long_calculation()
Another solution that doesn't require changing your existing code:
import Queue # Python 2.x
#from queue import Queue # Python 3.x
from threading import Thread
def foo(bar):
print 'hello {0}'.format(bar) # Python 2.x
#print('hello {0}'.format(bar)) # Python 3.x
return 'foo'
que = Queue.Queue() # Python 2.x
#que = Queue() # Python 3.x
t = Thread(target=lambda q, arg1: q.put(foo(arg1)), args=(que, 'world!'))
t.start()
t.join()
result = que.get()
print result # Python 2.x
#print(result) # Python 3.x
It can be also easily adjusted to a multi-threaded environment:
import Queue # Python 2.x
#from queue import Queue # Python 3.x
from threading import Thread
def foo(bar):
print 'hello {0}'.format(bar) # Python 2.x
#print('hello {0}'.format(bar)) # Python 3.x
return 'foo'
que = Queue.Queue() # Python 2.x
#que = Queue() # Python 3.x
threads_list = list()
t = Thread(target=lambda q, arg1: q.put(foo(arg1)), args=(que, 'world!'))
t.start()
threads_list.append(t)
# Add more threads here
...
threads_list.append(t2)
...
threads_list.append(t3)
...
# Join all the threads
for t in threads_list:
t.join()
# Check thread's return value
while not que.empty():
result = que.get()
print result # Python 2.x
#print(result) # Python 3.x
UPDATE:
I think there's a significantly simpler and more concise way to save the result of the thread, and in a way that keeps the interface virtually identical to the threading.Thread class (please let me know if there are edge cases - I haven't tested as much as my original post below):
import threading
class ConciseResult(threading.Thread):
def run(self):
self.result = self._target(*self._args, **self._kwargs)
To be robust and avoid potential errors:
import threading
class ConciseRobustResult(threading.Thread):
def run(self):
try:
if self._target is not None:
self.result = self._target(*self._args, **self._kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs
Short explanation: we override only the run method of threading.Thread, and modify nothing else. This allows us to use everything else the threading.Thread class does for us, without needing to worry about missing potential edge cases such as _private attribute assignments or custom attribute modifications in the way that my original post does.
We can verify that we only modify the run method by looking at the output of help(ConciseResult) and help(ConciseRobustResult). The only method/attribute/descriptor included under Methods defined here: is run, and everything else comes from the inherited threading.Thread base class (see the Methods inherited from threading.Thread: section).
To test either of these implementations using the example code below, substitute ConciseResult or ConciseRobustResult for ThreadWithResult in the main function below.
Original post using a closure function in the init method:
Most answers I've found are long and require being familiar with other modules or advanced python features, and will be rather confusing to someone unless they're already familiar with everything the answer talks about.
Working code for a simplified approach:
import threading
class ThreadWithResult(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, *, daemon=None):
def function():
self.result = target(*args, **kwargs)
super().__init__(group=group, target=function, name=name, daemon=daemon)
Example code:
import time, random
def function_to_thread(n):
count = 0
while count < 3:
print(f'still running thread {n}')
count +=1
time.sleep(3)
result = random.random()
print(f'Return value of thread {n} should be: {result}')
return result
def main():
thread1 = ThreadWithResult(target=function_to_thread, args=(1,))
thread2 = ThreadWithResult(target=function_to_thread, args=(2,))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print(thread1.result)
print(thread2.result)
main()
Explanation:
I wanted to simplify things significantly, so I created a ThreadWithResult class and had it inherit from threading.Thread. The nested function function in __init__ calls the threaded function we want to save the value of, and saves the result of that nested function as the instance attribute self.result after the thread finishes executing.
Creating an instance of this is identical to creating an instance of threading.Thread. Pass in the function you want to run on a new thread to the target argument and any arguments that your function might need to the args argument and any keyword arguments to the kwargs argument.
e.g.
my_thread = ThreadWithResult(target=my_function, args=(arg1, arg2, arg3))
I think this is significantly easier to understand than the vast majority of answers, and this approach requires no extra imports! I included the time and random module to simulate the behavior of a thread, but they're not required to achieve the functionality asked in the original question.
I know I'm answering this looong after the question was asked, but I hope this can help more people in the future!
EDIT: I created the save-thread-result PyPI package to allow you to access the same code above and reuse it across projects (GitHub code is here). The PyPI package fully extends the threading.Thread class, so you can set any attributes you would set on threading.thread on the ThreadWithResult class as well!
The original answer above goes over the main idea behind this subclass, but for more information, see the more detailed explanation (from the module docstring) here.
Quick usage example:
pip3 install -U save-thread-result # MacOS/Linux
pip install -U save-thread-result # Windows
python3 # MacOS/Linux
python # Windows
from save_thread_result import ThreadWithResult
# As of Release 0.0.3, you can also specify values for
#`group`, `name`, and `daemon` if you want to set those
# values manually.
thread = ThreadWithResult(
target = my_function,
args = (my_function_arg1, my_function_arg2, ...)
kwargs = {my_function_kwarg1: kwarg1_value, my_function_kwarg2: kwarg2_value, ...}
)
thread.start()
thread.join()
if getattr(thread, 'result', None):
print(thread.result)
else:
# thread.result attribute not set - something caused
# the thread to terminate BEFORE the thread finished
# executing the function passed in through the
# `target` argument
print('ERROR! Something went wrong while executing this thread, and the function you passed in did NOT complete!!')
# seeing help about the class and information about the threading.Thread super class methods and attributes available:
help(ThreadWithResult)
Parris / kindall's answer join/return answer ported to Python 3:
from threading import Thread
def foo(bar):
print('hello {0}'.format(bar))
return "foo"
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, *, daemon=None):
Thread.__init__(self, group, target, name, args, kwargs, daemon=daemon)
self._return = None
def run(self):
if self._target is not None:
self._return = self._target(*self._args, **self._kwargs)
def join(self):
Thread.join(self)
return self._return
twrv = ThreadWithReturnValue(target=foo, args=('world!',))
twrv.start()
print(twrv.join()) # prints foo
Note, the Thread class is implemented differently in Python 3.
I stole kindall's answer and cleaned it up just a little bit.
The key part is adding *args and **kwargs to join() in order to handle the timeout
class threadWithReturn(Thread):
def __init__(self, *args, **kwargs):
super(threadWithReturn, self).__init__(*args, **kwargs)
self._return = None
def run(self):
if self._Thread__target is not None:
self._return = self._Thread__target(*self._Thread__args, **self._Thread__kwargs)
def join(self, *args, **kwargs):
super(threadWithReturn, self).join(*args, **kwargs)
return self._return
UPDATED ANSWER BELOW
This is my most popularly upvoted answer, so I decided to update with code that will run on both py2 and py3.
Additionally, I see many answers to this question that show a lack of comprehension regarding Thread.join(). Some completely fail to handle the timeout arg. But there is also a corner-case that you should be aware of regarding instances when you have (1) a target function that can return None and (2) you also pass the timeout arg to join(). Please see "TEST 4" to understand this corner case.
ThreadWithReturn class that works with py2 and py3:
import sys
from threading import Thread
from builtins import super # https://stackoverflow.com/a/30159479
_thread_target_key, _thread_args_key, _thread_kwargs_key = (
('_target', '_args', '_kwargs')
if sys.version_info >= (3, 0) else
('_Thread__target', '_Thread__args', '_Thread__kwargs')
)
class ThreadWithReturn(Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._return = None
def run(self):
target = getattr(self, _thread_target_key)
if target is not None:
self._return = target(
*getattr(self, _thread_args_key),
**getattr(self, _thread_kwargs_key)
)
def join(self, *args, **kwargs):
super().join(*args, **kwargs)
return self._return
Some sample tests are shown below:
import time, random
# TEST TARGET FUNCTION
def giveMe(arg, seconds=None):
if not seconds is None:
time.sleep(seconds)
return arg
# TEST 1
my_thread = ThreadWithReturn(target=giveMe, args=('stringy',))
my_thread.start()
returned = my_thread.join()
# (returned == 'stringy')
# TEST 2
my_thread = ThreadWithReturn(target=giveMe, args=(None,))
my_thread.start()
returned = my_thread.join()
# (returned is None)
# TEST 3
my_thread = ThreadWithReturn(target=giveMe, args=('stringy',), kwargs={'seconds': 5})
my_thread.start()
returned = my_thread.join(timeout=2)
# (returned is None) # because join() timed out before giveMe() finished
# TEST 4
my_thread = ThreadWithReturn(target=giveMe, args=(None,), kwargs={'seconds': 5})
my_thread.start()
returned = my_thread.join(timeout=random.randint(1, 10))
Can you identify the corner-case that we may possibly encounter with TEST 4?
The problem is that we expect giveMe() to return None (see TEST 2), but we also expect join() to return None if it times out.
returned is None means either:
(1) that's what giveMe() returned, or
(2) join() timed out
This example is trivial since we know that giveMe() will always return None. But in real-world instance (where the target may legitimately return None or something else) we'd want to explicitly check for what happened.
Below is how to address this corner-case:
# TEST 4
my_thread = ThreadWithReturn(target=giveMe, args=(None,), kwargs={'seconds': 5})
my_thread.start()
returned = my_thread.join(timeout=random.randint(1, 10))
if my_thread.isAlive():
# returned is None because join() timed out
# this also means that giveMe() is still running in the background
pass
# handle this based on your app's logic
else:
# join() is finished, and so is giveMe()
# BUT we could also be in a race condition, so we need to update returned, just in case
returned = my_thread.join()
Using Queue :
import threading, queue
def calc_square(num, out_queue1):
l = []
for x in num:
l.append(x*x)
out_queue1.put(l)
arr = [1,2,3,4,5,6,7,8,9,10]
out_queue1=queue.Queue()
t1=threading.Thread(target=calc_square, args=(arr,out_queue1))
t1.start()
t1.join()
print (out_queue1.get())
My solution to the problem is to wrap the function and thread in a class. Does not require using pools,queues, or c type variable passing. It is also non blocking. You check status instead. See example of how to use it at end of code.
import threading
class ThreadWorker():
'''
The basic idea is given a function create an object.
The object can then run the function in a thread.
It provides a wrapper to start it,check its status,and get data out the function.
'''
def __init__(self,func):
self.thread = None
self.data = None
self.func = self.save_data(func)
def save_data(self,func):
'''modify function to save its returned data'''
def new_func(*args, **kwargs):
self.data=func(*args, **kwargs)
return new_func
def start(self,params):
self.data = None
if self.thread is not None:
if self.thread.isAlive():
return 'running' #could raise exception here
#unless thread exists and is alive start or restart it
self.thread = threading.Thread(target=self.func,args=params)
self.thread.start()
return 'started'
def status(self):
if self.thread is None:
return 'not_started'
else:
if self.thread.isAlive():
return 'running'
else:
return 'finished'
def get_results(self):
if self.thread is None:
return 'not_started' #could return exception
else:
if self.thread.isAlive():
return 'running'
else:
return self.data
def add(x,y):
return x +y
add_worker = ThreadWorker(add)
print add_worker.start((1,2,))
print add_worker.status()
print add_worker.get_results()
Taking into consideration #iman comment on #JakeBiesinger answer I have recomposed it to have various number of threads:
from multiprocessing.pool import ThreadPool
def foo(bar, baz):
print 'hello {0}'.format(bar)
return 'foo' + baz
numOfThreads = 3
results = []
pool = ThreadPool(numOfThreads)
for i in range(0, numOfThreads):
results.append(pool.apply_async(foo, ('world', 'foo'))) # tuple of args for foo)
# do some other stuff in the main process
# ...
# ...
results = [r.get() for r in results]
print results
pool.close()
pool.join()
I'm using this wrapper, which comfortably turns any function for running in a Thread - taking care of its return value or exception. It doesn't add Queue overhead.
def threading_func(f):
"""Decorator for running a function in a thread and handling its return
value or exception"""
def start(*args, **kw):
def run():
try:
th.ret = f(*args, **kw)
except:
th.exc = sys.exc_info()
def get(timeout=None):
th.join(timeout)
if th.exc:
raise th.exc[0], th.exc[1], th.exc[2] # py2
##raise th.exc[1] #py3
return th.ret
th = threading.Thread(None, run)
th.exc = None
th.get = get
th.start()
return th
return start
Usage Examples
def f(x):
return 2.5 * x
th = threading_func(f)(4)
print("still running?:", th.is_alive())
print("result:", th.get(timeout=1.0))
#threading_func
def th_mul(a, b):
return a * b
th = th_mul("text", 2.5)
try:
print(th.get())
except TypeError:
print("exception thrown ok.")
Notes on threading module
Comfortable return value & exception handling of a threaded function is a frequent "Pythonic" need and should indeed already be offered by the threading module - possibly directly in the standard Thread class. ThreadPool has way too much overhead for simple tasks - 3 managing threads, lots of bureaucracy. Unfortunately Thread's layout was copied from Java originally - which you see e.g. from the still useless 1st (!) constructor parameter group.
Based of what kindall mentioned, here's the more generic solution that works with Python3.
import threading
class ThreadWithReturnValue(threading.Thread):
def __init__(self, *init_args, **init_kwargs):
threading.Thread.__init__(self, *init_args, **init_kwargs)
self._return = None
def run(self):
self._return = self._target(*self._args, **self._kwargs)
def join(self):
threading.Thread.join(self)
return self._return
Usage
th = ThreadWithReturnValue(target=requests.get, args=('http://www.google.com',))
th.start()
response = th.join()
response.status_code # => 200
join always return None, i think you should subclass Thread to handle return codes and so.
You can define a mutable above the scope of the threaded function, and add the result to that. (I also modified the code to be python3 compatible)
returns = {}
def foo(bar):
print('hello {0}'.format(bar))
returns[bar] = 'foo'
from threading import Thread
t = Thread(target=foo, args=('world!',))
t.start()
t.join()
print(returns)
This returns {'world!': 'foo'}
If you use the function input as the key to your results dict, every unique input is guaranteed to give an entry in the results
Define your target to
1) take an argument q
2) replace any statements return foo with q.put(foo); return
so a function
def func(a):
ans = a * a
return ans
would become
def func(a, q):
ans = a * a
q.put(ans)
return
and then you would proceed as such
from Queue import Queue
from threading import Thread
ans_q = Queue()
arg_tups = [(i, ans_q) for i in xrange(10)]
threads = [Thread(target=func, args=arg_tup) for arg_tup in arg_tups]
_ = [t.start() for t in threads]
_ = [t.join() for t in threads]
results = [q.get() for _ in xrange(len(threads))]
And you can use function decorators/wrappers to make it so you can use your existing functions as target without modifying them, but follow this basic scheme.
GuySoft's idea is great, but I think the object does not necessarily have to inherit from Thread and start() could be removed from interface:
from threading import Thread
import queue
class ThreadWithReturnValue(object):
def __init__(self, target=None, args=(), **kwargs):
self._que = queue.Queue()
self._t = Thread(target=lambda q,arg1,kwargs1: q.put(target(*arg1, **kwargs1)) ,
args=(self._que, args, kwargs), )
self._t.start()
def join(self):
self._t.join()
return self._que.get()
def foo(bar):
print('hello {0}'.format(bar))
return "foo"
twrv = ThreadWithReturnValue(target=foo, args=('world!',))
print(twrv.join()) # prints foo
This is a pretty old question, but I wanted to share a simple solution that has worked for me and helped my dev process.
The methodology behind this answer is the fact that the "new" target function, inner is assigning the result of the original function (passed through the __init__ function) to the result instance attribute of the wrapper through something called closure.
This allows the wrapper class to hold onto the return value for callers to access at anytime.
NOTE: This method doesn't need to use any mangled methods or private methods of the threading.Thread class, although yield functions have not been considered (OP did not mention yield functions).
Enjoy!
from threading import Thread as _Thread
class ThreadWrapper:
def __init__(self, target, *args, **kwargs):
self.result = None
self._target = self._build_threaded_fn(target)
self.thread = _Thread(
target=self._target,
*args,
**kwargs
)
def _build_threaded_fn(self, func):
def inner(*args, **kwargs):
self.result = func(*args, **kwargs)
return inner
Additionally, you can run pytest (assuming you have it installed) with the following code to demonstrate the results:
import time
from commons import ThreadWrapper
def test():
def target():
time.sleep(1)
return 'Hello'
wrapper = ThreadWrapper(target=target)
wrapper.thread.start()
r = wrapper.result
assert r is None
time.sleep(2)
r = wrapper.result
assert r == 'Hello'
As mentioned multiprocessing pool is much slower than basic threading. Using queues as proposeded in some answers here is a very effective alternative. I have use it with dictionaries in order to be able run a lot of small threads and recuperate multiple answers by combining them with dictionaries:
#!/usr/bin/env python3
import threading
# use Queue for python2
import queue
import random
LETTERS = 'abcdefghijklmnopqrstuvwxyz'
LETTERS = [ x for x in LETTERS ]
NUMBERS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def randoms(k, q):
result = dict()
result['letter'] = random.choice(LETTERS)
result['number'] = random.choice(NUMBERS)
q.put({k: result})
threads = list()
q = queue.Queue()
results = dict()
for name in ('alpha', 'oscar', 'yankee',):
threads.append( threading.Thread(target=randoms, args=(name, q)) )
threads[-1].start()
_ = [ t.join() for t in threads ]
while not q.empty():
results.update(q.get())
print(results)
Here is the version that I created of #Kindall's answer.
This version makes it so that all you have to do is input your command with arguments to create the new thread.
This was made with Python 3.8:
from threading import Thread
from typing import Any
def test(plug, plug2, plug3):
print(f"hello {plug}")
print(f'I am the second plug : {plug2}')
print(plug3)
return 'I am the return Value!'
def test2(msg):
return f'I am from the second test: {msg}'
def test3():
print('hello world')
def NewThread(com, Returning: bool, *arguments) -> Any:
"""
Will create a new thread for a function/command.
:param com: Command to be Executed
:param arguments: Arguments to be sent to Command
:param Returning: True/False Will this command need to return anything
"""
class NewThreadWorker(Thread):
def __init__(self, group = None, target = None, name = None, args = (), kwargs = None, *,
daemon = None):
Thread.__init__(self, group, target, name, args, kwargs, daemon = daemon)
self._return = None
def run(self):
if self._target is not None:
self._return = self._target(*self._args, **self._kwargs)
def join(self):
Thread.join(self)
return self._return
ntw = NewThreadWorker(target = com, args = (*arguments,))
ntw.start()
if Returning:
return ntw.join()
if __name__ == "__main__":
print(NewThread(test, True, 'hi', 'test', test2('hi')))
NewThread(test3, True)
You can use pool.apply_async() of ThreadPool() to return the value from test() as shown below:
from multiprocessing.pool import ThreadPool
def test(num1, num2):
return num1 + num2
pool = ThreadPool(processes=1) # Here
result = pool.apply_async(test, (2, 3)) # Here
print(result.get()) # 5
And, you can also use submit() of concurrent.futures.ThreadPoolExecutor() to return the value from test() as shown below:
from concurrent.futures import ThreadPoolExecutor
def test(num1, num2):
return num1 + num2
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(test, 2, 3) # Here
print(future.result()) # 5
And, instead of return, you can use the array result as shown below:
from threading import Thread
def test(num1, num2, r):
r[0] = num1 + num2 # Instead of "return"
result = [None] # Here
thread = Thread(target=test, args=(2, 3, result))
thread.start()
thread.join()
print(result[0]) # 5
And instead of return, you can also use the queue result as shown below:
from threading import Thread
import queue
def test(num1, num2, q):
q.put(num1 + num2) # Instead of "return"
queue = queue.Queue() # Here
thread = Thread(target=test, args=(2, 3, queue))
thread.start()
thread.join()
print(queue.get()) # '5'
The shortest and simplest way I've found to do this is to take advantage of Python classes and their dynamic properties. You can retrieve the current thread from within the context of your spawned thread using threading.current_thread(), and assign the return value to a property.
import threading
def some_target_function():
# Your code here.
threading.current_thread().return_value = "Some return value."
your_thread = threading.Thread(target=some_target_function)
your_thread.start()
your_thread.join()
return_value = your_thread.return_value
print(return_value)
One usual solution is to wrap your function foo with a decorator like
result = queue.Queue()
def task_wrapper(*args):
result.put(target(*args))
Then the whole code may looks like that
result = queue.Queue()
def task_wrapper(*args):
result.put(target(*args))
threads = [threading.Thread(target=task_wrapper, args=args) for args in args_list]
for t in threads:
t.start()
while(True):
if(len(threading.enumerate()) < max_num):
break
for t in threads:
t.join()
return result
Note
One important issue is that the return values may be unorderred.
(In fact, the return value is not necessarily saved to the queue, since you can choose arbitrary thread-safe data structure )
Kindall's answer in Python3
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, *, daemon=None):
Thread.__init__(self, group, target, name, args, kwargs, daemon)
self._return = None
def run(self):
try:
if self._target:
self._return = self._target(*self._args, **self._kwargs)
finally:
del self._target, self._args, self._kwargs
def join(self,timeout=None):
Thread.join(self,timeout)
return self._return
I know this thread is old.... but I faced the same problem... If you are willing to use thread.join()
import threading
class test:
def __init__(self):
self.msg=""
def hello(self,bar):
print('hello {}'.format(bar))
self.msg="foo"
def main(self):
thread = threading.Thread(target=self.hello, args=('world!',))
thread.start()
thread.join()
print(self.msg)
g=test()
g.main()
Best way... Define a global variable, then change the variable in the threaded function. Nothing to pass in or retrieve back
from threading import Thread
# global var
radom_global_var = 5
def function():
global random_global_var
random_global_var += 1
domath = Thread(target=function)
domath.start()
domath.join()
print(random_global_var)
# result: 6
Is there a Pool class for worker threads, similar to the multiprocessing module's Pool class?
I like for example the easy way to parallelize a map function
def long_running_func(p):
c_func_no_gil(p)
p = multiprocessing.Pool(4)
xs = p.map(long_running_func, range(100))
however I would like to do it without the overhead of creating new processes.
I know about the GIL. However, in my usecase, the function will be an IO-bound C function for which the python wrapper will release the GIL before the actual function call.
Do I have to write my own threading pool?
I just found out that there actually is a thread-based Pool interface in the multiprocessing module, however it is hidden somewhat and not properly documented.
It can be imported via
from multiprocessing.pool import ThreadPool
It is implemented using a dummy Process class wrapping a python thread. This thread-based Process class can be found in multiprocessing.dummy which is mentioned briefly in the docs. This dummy module supposedly provides the whole multiprocessing interface based on threads.
In Python 3 you can use concurrent.futures.ThreadPoolExecutor, i.e.:
executor = ThreadPoolExecutor(max_workers=10)
a = executor.submit(my_function)
See the docs for more info and examples.
Yes, and it seems to have (more or less) the same API.
import multiprocessing
def worker(lnk):
....
def start_process():
.....
....
if(PROCESS):
pool = multiprocessing.Pool(processes=POOL_SIZE, initializer=start_process)
else:
pool = multiprocessing.pool.ThreadPool(processes=POOL_SIZE,
initializer=start_process)
pool.map(worker, inputs)
....
For something very simple and lightweight (slightly modified from here):
from Queue import Queue
from threading import Thread
class Worker(Thread):
"""Thread executing tasks from a given tasks queue"""
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.start()
def run(self):
while True:
func, args, kargs = self.tasks.get()
try:
func(*args, **kargs)
except Exception, e:
print e
finally:
self.tasks.task_done()
class ThreadPool:
"""Pool of threads consuming tasks from a queue"""
def __init__(self, num_threads):
self.tasks = Queue(num_threads)
for _ in range(num_threads):
Worker(self.tasks)
def add_task(self, func, *args, **kargs):
"""Add a task to the queue"""
self.tasks.put((func, args, kargs))
def wait_completion(self):
"""Wait for completion of all the tasks in the queue"""
self.tasks.join()
if __name__ == '__main__':
from random import randrange
from time import sleep
delays = [randrange(1, 10) for i in range(100)]
def wait_delay(d):
print 'sleeping for (%d)sec' % d
sleep(d)
pool = ThreadPool(20)
for i, d in enumerate(delays):
pool.add_task(wait_delay, d)
pool.wait_completion()
To support callbacks on task completion you can just add the callback to the task tuple.
Hi to use the thread pool in Python you can use this library :
from multiprocessing.dummy import Pool as ThreadPool
and then for use, this library do like that :
pool = ThreadPool(threads)
results = pool.map(service, tasks)
pool.close()
pool.join()
return results
The threads are the number of threads that you want and tasks are a list of task that most map to the service.
Yes, there is a threading pool similar to the multiprocessing Pool, however, it is hidden somewhat and not properly documented. You can import it by following way:-
from multiprocessing.pool import ThreadPool
Just I show you simple example
def test_multithread_stringio_read_csv(self):
# see gh-11786
max_row_range = 10000
num_files = 100
bytes_to_df = [
'\n'.join(
['%d,%d,%d' % (i, i, i) for i in range(max_row_range)]
).encode() for j in range(num_files)]
files = [BytesIO(b) for b in bytes_to_df]
# read all files in many threads
pool = ThreadPool(8)
results = pool.map(self.read_csv, files)
first_result = results[0]
for result in results:
tm.assert_frame_equal(first_result, result)
Here's the result I finally ended up using. It's a modified version of the classes by dgorissen above.
File: threadpool.py
from queue import Queue, Empty
import threading
from threading import Thread
class Worker(Thread):
_TIMEOUT = 2
""" Thread executing tasks from a given tasks queue. Thread is signalable,
to exit
"""
def __init__(self, tasks, th_num):
Thread.__init__(self)
self.tasks = tasks
self.daemon, self.th_num = True, th_num
self.done = threading.Event()
self.start()
def run(self):
while not self.done.is_set():
try:
func, args, kwargs = self.tasks.get(block=True,
timeout=self._TIMEOUT)
try:
func(*args, **kwargs)
except Exception as e:
print(e)
finally:
self.tasks.task_done()
except Empty as e:
pass
return
def signal_exit(self):
""" Signal to thread to exit """
self.done.set()
class ThreadPool:
"""Pool of threads consuming tasks from a queue"""
def __init__(self, num_threads, tasks=[]):
self.tasks = Queue(num_threads)
self.workers = []
self.done = False
self._init_workers(num_threads)
for task in tasks:
self.tasks.put(task)
def _init_workers(self, num_threads):
for i in range(num_threads):
self.workers.append(Worker(self.tasks, i))
def add_task(self, func, *args, **kwargs):
"""Add a task to the queue"""
self.tasks.put((func, args, kwargs))
def _close_all_threads(self):
""" Signal all threads to exit and lose the references to them """
for workr in self.workers:
workr.signal_exit()
self.workers = []
def wait_completion(self):
"""Wait for completion of all the tasks in the queue"""
self.tasks.join()
def __del__(self):
self._close_all_threads()
def create_task(func, *args, **kwargs):
return (func, args, kwargs)
To use the pool
from random import randrange
from time import sleep
delays = [randrange(1, 10) for i in range(30)]
def wait_delay(d):
print('sleeping for (%d)sec' % d)
sleep(d)
pool = ThreadPool(20)
for i, d in enumerate(delays):
pool.add_task(wait_delay, d)
pool.wait_completion()
another way can be adding the process to thethread queue pool
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
for i in range(10):
a = executor.submit(arg1, arg2,....)
The overhead of creating the new processes is minimal, especially when it's just 4 of them. I doubt this is a performance hot spot of your application. Keep it simple, optimize where you have to and where profiling results point to.
There is no built in thread based pool. However, it can be very quick to implement a producer/consumer queue with the Queue class.
From:
https://docs.python.org/2/library/queue.html
from threading import Thread
from Queue import Queue
def worker():
while True:
item = q.get()
do_work(item)
q.task_done()
q = Queue()
for i in range(num_worker_threads):
t = Thread(target=worker)
t.daemon = True
t.start()
for item in source():
q.put(item)
q.join() # block until all tasks are done
If you don't mind executing other's code, here's mine:
Note: There is lot of extra code you may want to remove [added for better clarificaiton and demonstration how it works]
Note: Python naming conventions were used for method names and variable names instead of camelCase.
Working procedure:
MultiThread class will initiate with no of instances of threads by sharing lock, work queue, exit flag and results.
SingleThread will be started by MultiThread once it creates all instances.
We can add works using MultiThread (It will take care of locking).
SingleThreads will process work queue using a lock in middle.
Once your work is done, you can destroy all threads with shared boolean value.
Here, work can be anything. It can automatically import (uncomment import line) and process module using given arguments.
Results will be added to results and we can get using get_results
Code:
import threading
import queue
class SingleThread(threading.Thread):
def __init__(self, name, work_queue, lock, exit_flag, results):
threading.Thread.__init__(self)
self.name = name
self.work_queue = work_queue
self.lock = lock
self.exit_flag = exit_flag
self.results = results
def run(self):
# print("Coming %s with parameters %s", self.name, self.exit_flag)
while not self.exit_flag:
# print(self.exit_flag)
self.lock.acquire()
if not self.work_queue.empty():
work = self.work_queue.get()
module, operation, args, kwargs = work.module, work.operation, work.args, work.kwargs
self.lock.release()
print("Processing : " + operation + " with parameters " + str(args) + " and " + str(kwargs) + " by " + self.name + "\n")
# module = __import__(module_name)
result = str(getattr(module, operation)(*args, **kwargs))
print("Result : " + result + " for operation " + operation + " and input " + str(args) + " " + str(kwargs))
self.results.append(result)
else:
self.lock.release()
# process_work_queue(self.work_queue)
class MultiThread:
def __init__(self, no_of_threads):
self.exit_flag = bool_instance()
self.queue_lock = threading.Lock()
self.threads = []
self.work_queue = queue.Queue()
self.results = []
for index in range(0, no_of_threads):
thread = SingleThread("Thread" + str(index+1), self.work_queue, self.queue_lock, self.exit_flag, self.results)
thread.start()
self.threads.append(thread)
def add_work(self, work):
self.queue_lock.acquire()
self.work_queue._put(work)
self.queue_lock.release()
def destroy(self):
self.exit_flag.value = True
for thread in self.threads:
thread.join()
def get_results(self):
return self.results
class Work:
def __init__(self, module, operation, args, kwargs={}):
self.module = module
self.operation = operation
self.args = args
self.kwargs = kwargs
class SimpleOperations:
def sum(self, *args):
return sum([int(arg) for arg in args])
#staticmethod
def mul(a, b, c=0):
return int(a) * int(b) + int(c)
class bool_instance:
def __init__(self, value=False):
self.value = value
def __setattr__(self, key, value):
if key != "value":
raise AttributeError("Only value can be set!")
if not isinstance(value, bool):
raise AttributeError("Only True/False can be set!")
self.__dict__[key] = value
# super.__setattr__(key, bool(value))
def __bool__(self):
return self.value
if __name__ == "__main__":
multi_thread = MultiThread(5)
multi_thread.add_work(Work(SimpleOperations(), "mul", [2, 3], {"c":4}))
while True:
data_input = input()
if data_input == "":
pass
elif data_input == "break":
break
else:
work = data_input.split()
multi_thread.add_work(Work(SimpleOperations(), work[0], work[1:], {}))
multi_thread.destroy()
print(multi_thread.get_results())
The function foo below returns a string 'foo'. How can I get the value 'foo' which is returned from the thread's target?
from threading import Thread
def foo(bar):
print('hello {}'.format(bar))
return 'foo'
thread = Thread(target=foo, args=('world!',))
thread.start()
return_value = thread.join()
The "one obvious way to do it", shown above, doesn't work: thread.join() returned None.
One way I've seen is to pass a mutable object, such as a list or a dictionary, to the thread's constructor, along with a an index or other identifier of some sort. The thread can then store its results in its dedicated slot in that object. For example:
def foo(bar, result, index):
print 'hello {0}'.format(bar)
result[index] = "foo"
from threading import Thread
threads = [None] * 10
results = [None] * 10
for i in range(len(threads)):
threads[i] = Thread(target=foo, args=('world!', results, i))
threads[i].start()
# do some other stuff
for i in range(len(threads)):
threads[i].join()
print " ".join(results) # what sound does a metasyntactic locomotive make?
If you really want join() to return the return value of the called function, you can do this with a Thread subclass like the following:
from threading import Thread
def foo(bar):
print 'hello {0}'.format(bar)
return "foo"
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs, Verbose)
self._return = None
def run(self):
if self._Thread__target is not None:
self._return = self._Thread__target(*self._Thread__args,
**self._Thread__kwargs)
def join(self):
Thread.join(self)
return self._return
twrv = ThreadWithReturnValue(target=foo, args=('world!',))
twrv.start()
print twrv.join() # prints foo
That gets a little hairy because of some name mangling, and it accesses "private" data structures that are specific to Thread implementation... but it works.
For Python 3:
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs)
self._return = None
def run(self):
if self._target is not None:
self._return = self._target(*self._args,
**self._kwargs)
def join(self, *args):
Thread.join(self, *args)
return self._return
FWIW, the multiprocessing module has a nice interface for this using the Pool class. And if you want to stick with threads rather than processes, you can just use the multiprocessing.pool.ThreadPool class as a drop-in replacement.
def foo(bar, baz):
print 'hello {0}'.format(bar)
return 'foo' + baz
from multiprocessing.pool import ThreadPool
pool = ThreadPool(processes=1)
async_result = pool.apply_async(foo, ('world', 'foo')) # tuple of args for foo
# do some other stuff in the main process
return_val = async_result.get() # get the return value from your function.
In Python 3.2+, stdlib concurrent.futures module provides a higher level API to threading, including passing return values or exceptions from a worker thread back to the main thread:
import concurrent.futures
def foo(bar):
print('hello {}'.format(bar))
return 'foo'
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(foo, 'world!')
return_value = future.result()
print(return_value)
Jake's answer is good, but if you don't want to use a threadpool (you don't know how many threads you'll need, but create them as needed) then a good way to transmit information between threads is the built-in Queue.Queue class, as it offers thread safety.
I created the following decorator to make it act in a similar fashion to the threadpool:
def threaded(f, daemon=False):
import Queue
def wrapped_f(q, *args, **kwargs):
'''this function calls the decorated function and puts the
result in a queue'''
ret = f(*args, **kwargs)
q.put(ret)
def wrap(*args, **kwargs):
'''this is the function returned from the decorator. It fires off
wrapped_f in a new thread and returns the thread object with
the result queue attached'''
q = Queue.Queue()
t = threading.Thread(target=wrapped_f, args=(q,)+args, kwargs=kwargs)
t.daemon = daemon
t.start()
t.result_queue = q
return t
return wrap
Then you just use it as:
#threaded
def long_task(x):
import time
x = x + 5
time.sleep(5)
return x
# does not block, returns Thread object
y = long_task(10)
print y
# this blocks, waiting for the result
result = y.result_queue.get()
print result
The decorated function creates a new thread each time it's called and returns a Thread object that contains the queue that will receive the result.
UPDATE
It's been quite a while since I posted this answer, but it still gets views so I thought I would update it to reflect the way I do this in newer versions of Python:
Python 3.2 added in the concurrent.futures module which provides a high-level interface for parallel tasks. It provides ThreadPoolExecutor and ProcessPoolExecutor, so you can use a thread or process pool with the same api.
One benefit of this api is that submitting a task to an Executor returns a Future object, which will complete with the return value of the callable you submit.
This makes attaching a queue object unnecessary, which simplifies the decorator quite a bit:
_DEFAULT_POOL = ThreadPoolExecutor()
def threadpool(f, executor=None):
#wraps(f)
def wrap(*args, **kwargs):
return (executor or _DEFAULT_POOL).submit(f, *args, **kwargs)
return wrap
This will use a default module threadpool executor if one is not passed in.
The usage is very similar to before:
#threadpool
def long_task(x):
import time
x = x + 5
time.sleep(5)
return x
# does not block, returns Future object
y = long_task(10)
print y
# this blocks, waiting for the result
result = y.result()
print result
If you're using Python 3.4+, one really nice feature of using this method (and Future objects in general) is that the returned future can be wrapped to turn it into an asyncio.Future with asyncio.wrap_future. This makes it work easily with coroutines:
result = await asyncio.wrap_future(long_task(10))
If you don't need access to the underlying concurrent.Future object, you can include the wrap in the decorator:
_DEFAULT_POOL = ThreadPoolExecutor()
def threadpool(f, executor=None):
#wraps(f)
def wrap(*args, **kwargs):
return asyncio.wrap_future((executor or _DEFAULT_POOL).submit(f, *args, **kwargs))
return wrap
Then, whenever you need to push cpu intensive or blocking code off the event loop thread, you can put it in a decorated function:
#threadpool
def some_long_calculation():
...
# this will suspend while the function is executed on a threadpool
result = await some_long_calculation()
Another solution that doesn't require changing your existing code:
import Queue # Python 2.x
#from queue import Queue # Python 3.x
from threading import Thread
def foo(bar):
print 'hello {0}'.format(bar) # Python 2.x
#print('hello {0}'.format(bar)) # Python 3.x
return 'foo'
que = Queue.Queue() # Python 2.x
#que = Queue() # Python 3.x
t = Thread(target=lambda q, arg1: q.put(foo(arg1)), args=(que, 'world!'))
t.start()
t.join()
result = que.get()
print result # Python 2.x
#print(result) # Python 3.x
It can be also easily adjusted to a multi-threaded environment:
import Queue # Python 2.x
#from queue import Queue # Python 3.x
from threading import Thread
def foo(bar):
print 'hello {0}'.format(bar) # Python 2.x
#print('hello {0}'.format(bar)) # Python 3.x
return 'foo'
que = Queue.Queue() # Python 2.x
#que = Queue() # Python 3.x
threads_list = list()
t = Thread(target=lambda q, arg1: q.put(foo(arg1)), args=(que, 'world!'))
t.start()
threads_list.append(t)
# Add more threads here
...
threads_list.append(t2)
...
threads_list.append(t3)
...
# Join all the threads
for t in threads_list:
t.join()
# Check thread's return value
while not que.empty():
result = que.get()
print result # Python 2.x
#print(result) # Python 3.x
UPDATE:
I think there's a significantly simpler and more concise way to save the result of the thread, and in a way that keeps the interface virtually identical to the threading.Thread class (please let me know if there are edge cases - I haven't tested as much as my original post below):
import threading
class ConciseResult(threading.Thread):
def run(self):
self.result = self._target(*self._args, **self._kwargs)
To be robust and avoid potential errors:
import threading
class ConciseRobustResult(threading.Thread):
def run(self):
try:
if self._target is not None:
self.result = self._target(*self._args, **self._kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs
Short explanation: we override only the run method of threading.Thread, and modify nothing else. This allows us to use everything else the threading.Thread class does for us, without needing to worry about missing potential edge cases such as _private attribute assignments or custom attribute modifications in the way that my original post does.
We can verify that we only modify the run method by looking at the output of help(ConciseResult) and help(ConciseRobustResult). The only method/attribute/descriptor included under Methods defined here: is run, and everything else comes from the inherited threading.Thread base class (see the Methods inherited from threading.Thread: section).
To test either of these implementations using the example code below, substitute ConciseResult or ConciseRobustResult for ThreadWithResult in the main function below.
Original post using a closure function in the init method:
Most answers I've found are long and require being familiar with other modules or advanced python features, and will be rather confusing to someone unless they're already familiar with everything the answer talks about.
Working code for a simplified approach:
import threading
class ThreadWithResult(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, *, daemon=None):
def function():
self.result = target(*args, **kwargs)
super().__init__(group=group, target=function, name=name, daemon=daemon)
Example code:
import time, random
def function_to_thread(n):
count = 0
while count < 3:
print(f'still running thread {n}')
count +=1
time.sleep(3)
result = random.random()
print(f'Return value of thread {n} should be: {result}')
return result
def main():
thread1 = ThreadWithResult(target=function_to_thread, args=(1,))
thread2 = ThreadWithResult(target=function_to_thread, args=(2,))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print(thread1.result)
print(thread2.result)
main()
Explanation:
I wanted to simplify things significantly, so I created a ThreadWithResult class and had it inherit from threading.Thread. The nested function function in __init__ calls the threaded function we want to save the value of, and saves the result of that nested function as the instance attribute self.result after the thread finishes executing.
Creating an instance of this is identical to creating an instance of threading.Thread. Pass in the function you want to run on a new thread to the target argument and any arguments that your function might need to the args argument and any keyword arguments to the kwargs argument.
e.g.
my_thread = ThreadWithResult(target=my_function, args=(arg1, arg2, arg3))
I think this is significantly easier to understand than the vast majority of answers, and this approach requires no extra imports! I included the time and random module to simulate the behavior of a thread, but they're not required to achieve the functionality asked in the original question.
I know I'm answering this looong after the question was asked, but I hope this can help more people in the future!
EDIT: I created the save-thread-result PyPI package to allow you to access the same code above and reuse it across projects (GitHub code is here). The PyPI package fully extends the threading.Thread class, so you can set any attributes you would set on threading.thread on the ThreadWithResult class as well!
The original answer above goes over the main idea behind this subclass, but for more information, see the more detailed explanation (from the module docstring) here.
Quick usage example:
pip3 install -U save-thread-result # MacOS/Linux
pip install -U save-thread-result # Windows
python3 # MacOS/Linux
python # Windows
from save_thread_result import ThreadWithResult
# As of Release 0.0.3, you can also specify values for
#`group`, `name`, and `daemon` if you want to set those
# values manually.
thread = ThreadWithResult(
target = my_function,
args = (my_function_arg1, my_function_arg2, ...)
kwargs = {my_function_kwarg1: kwarg1_value, my_function_kwarg2: kwarg2_value, ...}
)
thread.start()
thread.join()
if getattr(thread, 'result', None):
print(thread.result)
else:
# thread.result attribute not set - something caused
# the thread to terminate BEFORE the thread finished
# executing the function passed in through the
# `target` argument
print('ERROR! Something went wrong while executing this thread, and the function you passed in did NOT complete!!')
# seeing help about the class and information about the threading.Thread super class methods and attributes available:
help(ThreadWithResult)
Parris / kindall's answer join/return answer ported to Python 3:
from threading import Thread
def foo(bar):
print('hello {0}'.format(bar))
return "foo"
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, *, daemon=None):
Thread.__init__(self, group, target, name, args, kwargs, daemon=daemon)
self._return = None
def run(self):
if self._target is not None:
self._return = self._target(*self._args, **self._kwargs)
def join(self):
Thread.join(self)
return self._return
twrv = ThreadWithReturnValue(target=foo, args=('world!',))
twrv.start()
print(twrv.join()) # prints foo
Note, the Thread class is implemented differently in Python 3.
I stole kindall's answer and cleaned it up just a little bit.
The key part is adding *args and **kwargs to join() in order to handle the timeout
class threadWithReturn(Thread):
def __init__(self, *args, **kwargs):
super(threadWithReturn, self).__init__(*args, **kwargs)
self._return = None
def run(self):
if self._Thread__target is not None:
self._return = self._Thread__target(*self._Thread__args, **self._Thread__kwargs)
def join(self, *args, **kwargs):
super(threadWithReturn, self).join(*args, **kwargs)
return self._return
UPDATED ANSWER BELOW
This is my most popularly upvoted answer, so I decided to update with code that will run on both py2 and py3.
Additionally, I see many answers to this question that show a lack of comprehension regarding Thread.join(). Some completely fail to handle the timeout arg. But there is also a corner-case that you should be aware of regarding instances when you have (1) a target function that can return None and (2) you also pass the timeout arg to join(). Please see "TEST 4" to understand this corner case.
ThreadWithReturn class that works with py2 and py3:
import sys
from threading import Thread
from builtins import super # https://stackoverflow.com/a/30159479
_thread_target_key, _thread_args_key, _thread_kwargs_key = (
('_target', '_args', '_kwargs')
if sys.version_info >= (3, 0) else
('_Thread__target', '_Thread__args', '_Thread__kwargs')
)
class ThreadWithReturn(Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._return = None
def run(self):
target = getattr(self, _thread_target_key)
if target is not None:
self._return = target(
*getattr(self, _thread_args_key),
**getattr(self, _thread_kwargs_key)
)
def join(self, *args, **kwargs):
super().join(*args, **kwargs)
return self._return
Some sample tests are shown below:
import time, random
# TEST TARGET FUNCTION
def giveMe(arg, seconds=None):
if not seconds is None:
time.sleep(seconds)
return arg
# TEST 1
my_thread = ThreadWithReturn(target=giveMe, args=('stringy',))
my_thread.start()
returned = my_thread.join()
# (returned == 'stringy')
# TEST 2
my_thread = ThreadWithReturn(target=giveMe, args=(None,))
my_thread.start()
returned = my_thread.join()
# (returned is None)
# TEST 3
my_thread = ThreadWithReturn(target=giveMe, args=('stringy',), kwargs={'seconds': 5})
my_thread.start()
returned = my_thread.join(timeout=2)
# (returned is None) # because join() timed out before giveMe() finished
# TEST 4
my_thread = ThreadWithReturn(target=giveMe, args=(None,), kwargs={'seconds': 5})
my_thread.start()
returned = my_thread.join(timeout=random.randint(1, 10))
Can you identify the corner-case that we may possibly encounter with TEST 4?
The problem is that we expect giveMe() to return None (see TEST 2), but we also expect join() to return None if it times out.
returned is None means either:
(1) that's what giveMe() returned, or
(2) join() timed out
This example is trivial since we know that giveMe() will always return None. But in real-world instance (where the target may legitimately return None or something else) we'd want to explicitly check for what happened.
Below is how to address this corner-case:
# TEST 4
my_thread = ThreadWithReturn(target=giveMe, args=(None,), kwargs={'seconds': 5})
my_thread.start()
returned = my_thread.join(timeout=random.randint(1, 10))
if my_thread.isAlive():
# returned is None because join() timed out
# this also means that giveMe() is still running in the background
pass
# handle this based on your app's logic
else:
# join() is finished, and so is giveMe()
# BUT we could also be in a race condition, so we need to update returned, just in case
returned = my_thread.join()
Using Queue :
import threading, queue
def calc_square(num, out_queue1):
l = []
for x in num:
l.append(x*x)
out_queue1.put(l)
arr = [1,2,3,4,5,6,7,8,9,10]
out_queue1=queue.Queue()
t1=threading.Thread(target=calc_square, args=(arr,out_queue1))
t1.start()
t1.join()
print (out_queue1.get())
My solution to the problem is to wrap the function and thread in a class. Does not require using pools,queues, or c type variable passing. It is also non blocking. You check status instead. See example of how to use it at end of code.
import threading
class ThreadWorker():
'''
The basic idea is given a function create an object.
The object can then run the function in a thread.
It provides a wrapper to start it,check its status,and get data out the function.
'''
def __init__(self,func):
self.thread = None
self.data = None
self.func = self.save_data(func)
def save_data(self,func):
'''modify function to save its returned data'''
def new_func(*args, **kwargs):
self.data=func(*args, **kwargs)
return new_func
def start(self,params):
self.data = None
if self.thread is not None:
if self.thread.isAlive():
return 'running' #could raise exception here
#unless thread exists and is alive start or restart it
self.thread = threading.Thread(target=self.func,args=params)
self.thread.start()
return 'started'
def status(self):
if self.thread is None:
return 'not_started'
else:
if self.thread.isAlive():
return 'running'
else:
return 'finished'
def get_results(self):
if self.thread is None:
return 'not_started' #could return exception
else:
if self.thread.isAlive():
return 'running'
else:
return self.data
def add(x,y):
return x +y
add_worker = ThreadWorker(add)
print add_worker.start((1,2,))
print add_worker.status()
print add_worker.get_results()
Taking into consideration #iman comment on #JakeBiesinger answer I have recomposed it to have various number of threads:
from multiprocessing.pool import ThreadPool
def foo(bar, baz):
print 'hello {0}'.format(bar)
return 'foo' + baz
numOfThreads = 3
results = []
pool = ThreadPool(numOfThreads)
for i in range(0, numOfThreads):
results.append(pool.apply_async(foo, ('world', 'foo'))) # tuple of args for foo)
# do some other stuff in the main process
# ...
# ...
results = [r.get() for r in results]
print results
pool.close()
pool.join()
I'm using this wrapper, which comfortably turns any function for running in a Thread - taking care of its return value or exception. It doesn't add Queue overhead.
def threading_func(f):
"""Decorator for running a function in a thread and handling its return
value or exception"""
def start(*args, **kw):
def run():
try:
th.ret = f(*args, **kw)
except:
th.exc = sys.exc_info()
def get(timeout=None):
th.join(timeout)
if th.exc:
raise th.exc[0], th.exc[1], th.exc[2] # py2
##raise th.exc[1] #py3
return th.ret
th = threading.Thread(None, run)
th.exc = None
th.get = get
th.start()
return th
return start
Usage Examples
def f(x):
return 2.5 * x
th = threading_func(f)(4)
print("still running?:", th.is_alive())
print("result:", th.get(timeout=1.0))
#threading_func
def th_mul(a, b):
return a * b
th = th_mul("text", 2.5)
try:
print(th.get())
except TypeError:
print("exception thrown ok.")
Notes on threading module
Comfortable return value & exception handling of a threaded function is a frequent "Pythonic" need and should indeed already be offered by the threading module - possibly directly in the standard Thread class. ThreadPool has way too much overhead for simple tasks - 3 managing threads, lots of bureaucracy. Unfortunately Thread's layout was copied from Java originally - which you see e.g. from the still useless 1st (!) constructor parameter group.
Based of what kindall mentioned, here's the more generic solution that works with Python3.
import threading
class ThreadWithReturnValue(threading.Thread):
def __init__(self, *init_args, **init_kwargs):
threading.Thread.__init__(self, *init_args, **init_kwargs)
self._return = None
def run(self):
self._return = self._target(*self._args, **self._kwargs)
def join(self):
threading.Thread.join(self)
return self._return
Usage
th = ThreadWithReturnValue(target=requests.get, args=('http://www.google.com',))
th.start()
response = th.join()
response.status_code # => 200
join always return None, i think you should subclass Thread to handle return codes and so.
You can define a mutable above the scope of the threaded function, and add the result to that. (I also modified the code to be python3 compatible)
returns = {}
def foo(bar):
print('hello {0}'.format(bar))
returns[bar] = 'foo'
from threading import Thread
t = Thread(target=foo, args=('world!',))
t.start()
t.join()
print(returns)
This returns {'world!': 'foo'}
If you use the function input as the key to your results dict, every unique input is guaranteed to give an entry in the results
Define your target to
1) take an argument q
2) replace any statements return foo with q.put(foo); return
so a function
def func(a):
ans = a * a
return ans
would become
def func(a, q):
ans = a * a
q.put(ans)
return
and then you would proceed as such
from Queue import Queue
from threading import Thread
ans_q = Queue()
arg_tups = [(i, ans_q) for i in xrange(10)]
threads = [Thread(target=func, args=arg_tup) for arg_tup in arg_tups]
_ = [t.start() for t in threads]
_ = [t.join() for t in threads]
results = [q.get() for _ in xrange(len(threads))]
And you can use function decorators/wrappers to make it so you can use your existing functions as target without modifying them, but follow this basic scheme.
GuySoft's idea is great, but I think the object does not necessarily have to inherit from Thread and start() could be removed from interface:
from threading import Thread
import queue
class ThreadWithReturnValue(object):
def __init__(self, target=None, args=(), **kwargs):
self._que = queue.Queue()
self._t = Thread(target=lambda q,arg1,kwargs1: q.put(target(*arg1, **kwargs1)) ,
args=(self._que, args, kwargs), )
self._t.start()
def join(self):
self._t.join()
return self._que.get()
def foo(bar):
print('hello {0}'.format(bar))
return "foo"
twrv = ThreadWithReturnValue(target=foo, args=('world!',))
print(twrv.join()) # prints foo
This is a pretty old question, but I wanted to share a simple solution that has worked for me and helped my dev process.
The methodology behind this answer is the fact that the "new" target function, inner is assigning the result of the original function (passed through the __init__ function) to the result instance attribute of the wrapper through something called closure.
This allows the wrapper class to hold onto the return value for callers to access at anytime.
NOTE: This method doesn't need to use any mangled methods or private methods of the threading.Thread class, although yield functions have not been considered (OP did not mention yield functions).
Enjoy!
from threading import Thread as _Thread
class ThreadWrapper:
def __init__(self, target, *args, **kwargs):
self.result = None
self._target = self._build_threaded_fn(target)
self.thread = _Thread(
target=self._target,
*args,
**kwargs
)
def _build_threaded_fn(self, func):
def inner(*args, **kwargs):
self.result = func(*args, **kwargs)
return inner
Additionally, you can run pytest (assuming you have it installed) with the following code to demonstrate the results:
import time
from commons import ThreadWrapper
def test():
def target():
time.sleep(1)
return 'Hello'
wrapper = ThreadWrapper(target=target)
wrapper.thread.start()
r = wrapper.result
assert r is None
time.sleep(2)
r = wrapper.result
assert r == 'Hello'
As mentioned multiprocessing pool is much slower than basic threading. Using queues as proposeded in some answers here is a very effective alternative. I have use it with dictionaries in order to be able run a lot of small threads and recuperate multiple answers by combining them with dictionaries:
#!/usr/bin/env python3
import threading
# use Queue for python2
import queue
import random
LETTERS = 'abcdefghijklmnopqrstuvwxyz'
LETTERS = [ x for x in LETTERS ]
NUMBERS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def randoms(k, q):
result = dict()
result['letter'] = random.choice(LETTERS)
result['number'] = random.choice(NUMBERS)
q.put({k: result})
threads = list()
q = queue.Queue()
results = dict()
for name in ('alpha', 'oscar', 'yankee',):
threads.append( threading.Thread(target=randoms, args=(name, q)) )
threads[-1].start()
_ = [ t.join() for t in threads ]
while not q.empty():
results.update(q.get())
print(results)
Here is the version that I created of #Kindall's answer.
This version makes it so that all you have to do is input your command with arguments to create the new thread.
This was made with Python 3.8:
from threading import Thread
from typing import Any
def test(plug, plug2, plug3):
print(f"hello {plug}")
print(f'I am the second plug : {plug2}')
print(plug3)
return 'I am the return Value!'
def test2(msg):
return f'I am from the second test: {msg}'
def test3():
print('hello world')
def NewThread(com, Returning: bool, *arguments) -> Any:
"""
Will create a new thread for a function/command.
:param com: Command to be Executed
:param arguments: Arguments to be sent to Command
:param Returning: True/False Will this command need to return anything
"""
class NewThreadWorker(Thread):
def __init__(self, group = None, target = None, name = None, args = (), kwargs = None, *,
daemon = None):
Thread.__init__(self, group, target, name, args, kwargs, daemon = daemon)
self._return = None
def run(self):
if self._target is not None:
self._return = self._target(*self._args, **self._kwargs)
def join(self):
Thread.join(self)
return self._return
ntw = NewThreadWorker(target = com, args = (*arguments,))
ntw.start()
if Returning:
return ntw.join()
if __name__ == "__main__":
print(NewThread(test, True, 'hi', 'test', test2('hi')))
NewThread(test3, True)
You can use pool.apply_async() of ThreadPool() to return the value from test() as shown below:
from multiprocessing.pool import ThreadPool
def test(num1, num2):
return num1 + num2
pool = ThreadPool(processes=1) # Here
result = pool.apply_async(test, (2, 3)) # Here
print(result.get()) # 5
And, you can also use submit() of concurrent.futures.ThreadPoolExecutor() to return the value from test() as shown below:
from concurrent.futures import ThreadPoolExecutor
def test(num1, num2):
return num1 + num2
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(test, 2, 3) # Here
print(future.result()) # 5
And, instead of return, you can use the array result as shown below:
from threading import Thread
def test(num1, num2, r):
r[0] = num1 + num2 # Instead of "return"
result = [None] # Here
thread = Thread(target=test, args=(2, 3, result))
thread.start()
thread.join()
print(result[0]) # 5
And instead of return, you can also use the queue result as shown below:
from threading import Thread
import queue
def test(num1, num2, q):
q.put(num1 + num2) # Instead of "return"
queue = queue.Queue() # Here
thread = Thread(target=test, args=(2, 3, queue))
thread.start()
thread.join()
print(queue.get()) # '5'
The shortest and simplest way I've found to do this is to take advantage of Python classes and their dynamic properties. You can retrieve the current thread from within the context of your spawned thread using threading.current_thread(), and assign the return value to a property.
import threading
def some_target_function():
# Your code here.
threading.current_thread().return_value = "Some return value."
your_thread = threading.Thread(target=some_target_function)
your_thread.start()
your_thread.join()
return_value = your_thread.return_value
print(return_value)
One usual solution is to wrap your function foo with a decorator like
result = queue.Queue()
def task_wrapper(*args):
result.put(target(*args))
Then the whole code may looks like that
result = queue.Queue()
def task_wrapper(*args):
result.put(target(*args))
threads = [threading.Thread(target=task_wrapper, args=args) for args in args_list]
for t in threads:
t.start()
while(True):
if(len(threading.enumerate()) < max_num):
break
for t in threads:
t.join()
return result
Note
One important issue is that the return values may be unorderred.
(In fact, the return value is not necessarily saved to the queue, since you can choose arbitrary thread-safe data structure )
Kindall's answer in Python3
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, *, daemon=None):
Thread.__init__(self, group, target, name, args, kwargs, daemon)
self._return = None
def run(self):
try:
if self._target:
self._return = self._target(*self._args, **self._kwargs)
finally:
del self._target, self._args, self._kwargs
def join(self,timeout=None):
Thread.join(self,timeout)
return self._return
I know this thread is old.... but I faced the same problem... If you are willing to use thread.join()
import threading
class test:
def __init__(self):
self.msg=""
def hello(self,bar):
print('hello {}'.format(bar))
self.msg="foo"
def main(self):
thread = threading.Thread(target=self.hello, args=('world!',))
thread.start()
thread.join()
print(self.msg)
g=test()
g.main()
Best way... Define a global variable, then change the variable in the threaded function. Nothing to pass in or retrieve back
from threading import Thread
# global var
radom_global_var = 5
def function():
global random_global_var
random_global_var += 1
domath = Thread(target=function)
domath.start()
domath.join()
print(random_global_var)
# result: 6
I'm trying to implement threading(with using decorators) to my application, but can't understand some things about locks and managing threads.
import threading
def run_in_thread(fn):
def run(*k, **kw):
t = threading.Thread(target=fn, args=k, kwargs=kw)
t.start()
return run
class A:
#run_in_thread
def method1(self):
for x in range(10000):
print x
#run_in_thread
def method2(self):
for y in list('wlkefjwfejwiefwhfwfkjshkjadgfjhkewgfjwjefjwe'):
print y
def stop_thread(self):
pass
c = A()
c.method1()
c.method2()
As I understand, method1 and method2 are not synchronized, but synchronizing of that stuff implementing with help of locks. How I can add locks to my decorator-function?
How can I realize method for stopping long threads using decorators?
If you extend the function to
def run_in_thread(fn):
def run(*k, **kw):
t = threading.Thread(target=fn, args=k, kwargs=kw)
t.start()
return t # <-- this is new!
return run
i. e., let the wrapper function return the created thread, you can do
c = A()
t1 = c.method1()
t1.join() # wait for it to finish
t2 = c.method2()
# ...
i. e, get the thread where the original method runs in, do whatever you want with it (e. g. join it) and only then call the next method.
If you don't need it in a given case, you are free to omit it.
If you want to synchronize the two threads you simply need to add locks inside the decorated functions, not the decorators themselves.
There is not a simple way to directly stop a Thread, only way is to use an Event to signal the thread it must exit.
For threading decorators you can take a look at pebble.
Maybe Semaphores could help in decorators, something like this - calculating factorial numbers from 1 to 1000:
import threading
from functools import wraps
from math import factorial
DIC = {}
def limit(number):
''' This decorator limits the number of simultaneous Threads
'''
sem = threading.Semaphore(number)
def wrapper(func):
#wraps(func)
def wrapped(*args):
with sem:
return func(*args)
return wrapped
return wrapper
def async(f):
''' This decorator executes a function in a Thread'''
#wraps(f)
def wrapper(*args, **kwargs):
thr = threading.Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
#limit(10) # Always use #limit as the outter decorator
#async
def calcula_fatorial(number):
DIC.update({number: factorial(number)})
#limit(10)
def main(lista):
for elem in lista:
calcula_fatorial(elem)
if __name__ == '__main__':
from pprint import pprint
main(range(1000))
pprint(DIC)
I'm working on a subclass of threading.Thread which allows its methods to be called and run in the thread represented by the object that they are called on as opposed to the usual behavior. I do this by using decorators on the target method that place the call to the method in a collections.deque and using the run method to process the deque.
the run method uses a while not self.__stop: statement and a threading.Condition object to wait for a call to be placed in the deque and then call self.__process_calls. The else part of the while loop makes a final call to __process_calls. if self.__stop, an exception is raised on any attempts to call one of the 'callable' methods from another thread.
The problem is that __process_calls fails to return unless the last statement is a print which I discovered during debugging. I've tried a = 1 and an explicit return but neither work. with any print statement as the final statement of the function though, it returns and the thread doesn't hang. Any ideas what's going on?
EDIT: It was pointed out by David Zaslavsky that the print works because it takes a while
and I've confirmed that
The code's a little long but hopefully, my explanation above is clear enough to help understand it.
import threading
import collections
class BrokenPromise(Exception): pass
class CallableThreadError(Exception): pass
class CallToNonRunningThreadError(CallableThreadError): pass
class Promise(object):
def __init__(self, deque, condition):
self._condition = condition
self._deque = deque
def read(self, timeout=None):
if not self._deque:
with self._condition:
if timeout:
self._condition.wait(timeout)
else:
self._condition.wait()
if self._deque:
value = self._deque.popleft()
del self._deque
del self._condition
return value
else:
raise BrokenPromise
def ready(self):
return bool(self._deque)
class CallableThread(threading.Thread):
def __init__(self, *args, **kwargs):
# _enqueued_calls is used to store tuples that encode a function call.
# It is processed by the run method
self.__enqueued_calls = collections.deque()
# _enqueue_call_permission is for callers to signal that they have
# placed something on the queue
self.__enqueue_call_permission = threading.Condition()
self.__stop = False
super(CallableThread, self).__init__(*args, **kwargs)
#staticmethod
def blocking_method(f):
u"""A decorator function to implement a blocking method on a thread"""
# the returned function enqueues the decorated function and blocks
# until the decorated function# is called and returns. It then returns
# the value unmodified. The code in register runs in the calling thread
# and the decorated method runs in thread that it is called on
f = CallableThread.nonblocking_method_with_promise(f)
def register(self, *args, **kwargs):
p = f(self, *args, **kwargs)
return p.read()
return register
#staticmethod
def nonblocking_method_with_promise(f):
u"""A decorator function to implement a non-blocking method on a
thread
"""
# the returned function enqueues the decorated function and returns a
# Promise object.N The code in register runs in the calling thread
# and the decorated method runs in thread that it is called on.
def register(self, *args, **kwargs):
call_complete = threading.Condition()
response_deque = collections.deque()
self.__push_call(f, args, kwargs, response_deque, call_complete)
return Promise(response_deque, call_complete)
return register
#staticmethod
def nonblocking_method(f):
def register(self, *args, **kwargs):
self.__push_call(f, args, kwargs)
return register
def run(self):
while not self.__stop: # while we've not been killed
with self.__enqueue_call_permission:
# get the condition so that we can wait on it if we need too.
if not self.__enqueued_calls:
self.__enqueue_call_permission.wait()
self.__process_calls()
else:
# if we exit because self._run == False, finish processing
# the pending calls if there are any
self.__process_calls()
def stop(self):
u""" Signal the thread to stop"""
with self.__enqueue_call_permission:
# we do this in case the run method is stuck waiting on an update
self.__stop = True
self.__enqueue_call_permission.notify()
def __process_calls(self):
print "processing calls"
while self.__enqueued_calls:
((f, args, kwargs),
response_deque, call_complete) = self.__enqueued_calls.popleft()
if call_complete:
with call_complete:
response_deque.append(f(self, *args, **kwargs))
call_complete.notify()
else:
f(self, *args, **kwargs)
# this is where you place the print statement if you want to see the
# behavior
def __push_call(self, f, args, kwargs, response_deque=None,
call_complete=None):
if self.__stop:
raise CallToNonRunningThreadError(
"This thread is no longer accepting calls")
with self.__enqueue_call_permission:
self.__enqueued_calls.append(((f, args, kwargs),
response_deque, call_complete))
self.__enqueue_call_permission.notify()
#if __name__=='__main__': i lost the indent on the following code in copying but
#it doesn't matter in this context
class TestThread(CallableThread):
u"""Increment a counter on each call and print the value"""
counter = 0
#CallableThread.nonblocking_method_with_promise
def increment(self):
self.counter += 1
return self.counter
class LogThread(CallableThread):
#CallableThread.nonblocking_method
def log(self, message):
print message
l = LogThread()
l.start()
l.log("logger started")
t = TestThread()
t.start()
l.log("test thread started")
p = t.increment()
l.log("promise aquired")
v = p.read()
l.log("promise read")
l.log("{0} read from promise".format(v))
l.stop()
t.stop()
l.join()
t.join()
__process_calls is modifying __enqueued_calls without owning the lock. This may be creating a race condition.
Edit: deque may be "threadsafe" (ie not corrupted by thread accesses), but the checking of its state still should be locked.
The stop condition is also not safe.
Comments inline:
def run(self):
while not self.__stop: # while we've not been killed
with self.__enqueue_call_permission:
# get the condition so that we can wait on it if we need too.
### should be checking __stop here, it could have been modified before
### you took the lock.
if not self.__enqueued_calls:
self.__enqueue_call_permission.wait()
self.__process_calls()
else:
# if we exit because self._run == False, finish processing
# the pending calls if there are any
self.__process_calls()