I need to update the GUI after a thread completes and call this update_ui function from main thread (like a software interrupt maybe?). How can a worker thread call a function in the main thread?
Sample code:
def thread():
...some long task
update_ui() #But call this in main thread somehow
def main():
start_new_thread(thread)
...other functionality
def update_ui():
Tkinter_widget.update()
I tried to use Queue or any flag accessible to both threads but I have to wait/poll continuously to check if the value has been updated and then call the function - this wait makes the UI unresponsive. e.g.
flag = True
def thread():
...some long task
flag = False
def main():
start_new_thread(thread)
while(flag): sleep(1)
update_ui()
...other functionality
Your code appears to be somewhat hypothetical. Here is some that accomplishes that does what you describe. It creates three labels and initializes their text. It then starts three threads. Each thread updates the tkinter variable associated with the label created in the main thread after a period of time. Now if the main thread really needs to do the updating, queuing does work, but the program must be modified to accomplish that.
import threading
import time
from tkinter import *
import queue
import sys
def createGUI(master, widget_var):
for i in range(3):
Label(master, textvariable=widget_var[i]).grid(row=i, column=0)
widget_var[i].set("Thread " + str(i) + " started")
def sometask(thread_id, delay, queue):
print("Delaying", delay)
time.sleep(delay)
tdict = {'id': thread_id, 'message': 'success'}
# You can put simple strings/ints, whatever in the queue instead
queue.put(tdict)
return
def updateGUI(master, q, widget_var, td):
if not q.empty():
tdict = q.get()
widget_var[tdict['id']].set("Thread " + str(tdict['id']) + " completed with status: " + tdict['message'])
td.append(1)
if len(td) == 3:
print("All threads completed")
master.after(1000, timedExit)
else:
master.after(100, lambda w=master,que=q,v=widget_var, tcount=td: updateGUI(w,que,v,td))
def timedExit():
sys.exit()
root = Tk()
message_q = queue.Queue()
widget_var = []
threads_done = []
for i in range(3):
v = StringVar()
widget_var.append(v)
t = threading.Thread(target=sometask, args=(i, 3 + i * 3, message_q))
t.start()
createGUI(root, widget_var)
updateGUI(root,message_q, widget_var, threads_done)
root.mainloop()
Related
I am trying to add threading to a Python 3.63 Tkinter program where a function will run but the GUI will still be responsive, including if the user wants to close the program while the function is running.
In the example below I have tried to run a simple printing to console function on a separate thread to the GUI mainloop so the user could click the X in the top right to close the program while the loop is running if they so wish.
The error I am getting is:
TypeError: start() takes 1 positional argument but 2 were given
try:
import tkinter as tk
import queue as queue
except:
import Tkinter as tk
import Queue as queue
import threading
def center(toplevel,desired_width=None,desired_height=None):
toplevel.update_idletasks()
w, h = toplevel.winfo_screenwidth() - 20, toplevel.winfo_screenheight() - 100
if desired_width and desired_height:
size = (desired_width,desired_height)
else:
size = tuple(int(Q) for Q in toplevel.geometry().split("+")[0].split("x"))
toplevel.geometry("%dx%d+%d+%d" % (size + (w/2 - size[0]/2, h/2 - size[1]/2)))
class ThreadedTask(threading.Thread):
def __init__(self,queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self,func):
func()
class app(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
center(self,desired_width=500,desired_height=400)
self.queue = queue.Queue()
self.run_func_button = tk.Button(self,
text="Run Function",
font=("Calibri",20,"bold"),
command=self.run_func)
self.run_func_button.pack()
def run_func(self):
ThreadedTask(self.queue).start(self.count_to_1500)
def count_to_1500(self):
for i in range(1500):
print (i)
app_start = app()
app_start.mainloop()
See doc threading - start() doesn't use arguments but you use .start(self.count_to_1500) - and this gives your error.
You could use
Thread(target=self.count_to_1500).start()
or
Thread(target=self.count_to_1500, args=(self.queue,)).start()
if you define
def count_to_1500(self, queue):
EDIT: working example with thread which put in quoue and method which get data from queue.
try:
import tkinter as tk
import queue as queue
except:
import Tkinter as tk
import Queue as queue
import threading
import time
def center(toplevel,desired_width=None,desired_height=None):
toplevel.update_idletasks()
w, h = toplevel.winfo_screenwidth() - 20, toplevel.winfo_screenheight() - 100
if desired_width and desired_height:
size = (desired_width,desired_height)
else:
size = tuple(int(Q) for Q in toplevel.geometry().split("+")[0].split("x"))
toplevel.geometry("%dx%d+%d+%d" % (size + (w/2 - size[0]/2, h/2 - size[1]/2)))
class app(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
center(self,desired_width=500,desired_height=400)
self.queue = queue.Queue()
self.run_func_button = tk.Button(self,
text="Run Function",
font=("Calibri",20,"bold"),
command=self.run_func)
self.run_func_button.pack()
def run_func(self):
threading.Thread(target=self.count_to_1500).start()
threading.Thread(target=self.count_to_1500_with_queue, args=(self.queue,)).start()
self.check_queue()
def count_to_1500(self):
for i in range(10):
print('1:', i)
time.sleep(0.2)
def count_to_1500_with_queue(self, queue):
for i in range(10):
print('put:', i)
queue.put(i)
time.sleep(1)
queue.put('last')
def check_queue(self):
print("check queue")
data = None
if not self.queue.empty():
data = self.queue.get()
print('get:', data)
if data != 'last':
self.after(200, self.check_queue)
app_start = app()
app_start.mainloop()
Thread.start takes no parameters: https://docs.python.org/3/library/threading.html
The correct way to use a Thread is:
# Will call func(*args, **kwargs)
t = threading.Thread(target=func, args=(), kwargs={})
t.start()
t.join()
The join is important. Without it you will have many zombie threads in your app, which will also prevent your app from shutting down cleanly.
Another pattern is to use a daemon thread, which processes a queue. daemon threads are automatically killed when the program exits.
def worker(q):
while True:
try:
f = q.get()
q.task_done()
if f is None: return
f()
except Exception:
import traceback
traceback.print_exc()
q = Queue.Queue()
t = threading.Thread(target=worker, args=(q,))
t.daemon=True
t.start()
# f is a no-arg function to be executed
q.put(f)
# Call at shutdown
q.join()
To run several tasks at the same time, start many threads.
Yet another method, use multiprocessing.pool.ThreadPool
from multiprocessing.pool import ThreadPool
# Create at startup
pool = ThreadPool(8)
# For each treaded task
pool.apply_async(func, args, kwds)
# Call at shutdown
pool.close()
pool.join()
... which works, more or less, as the above.
I recommend reading:
https://docs.python.org/2/library/multiprocessing.html#multiprocessing-programming
Given the following class:
from abc import ABCMeta, abstractmethod
from time import sleep
import threading
from threading import active_count, Thread
class ScraperPool(metaclass=ABCMeta):
Queue = []
ResultList = []
def __init__(self, Queue, MaxNumWorkers=0, ItemsPerWorker=50):
# Initialize attributes
self.MaxNumWorkers = MaxNumWorkers
self.ItemsPerWorker = ItemsPerWorker
self.Queue = Queue # For testing purposes.
def initWorkerPool(self, PrintIDs=True):
for w in range(self.NumWorkers()):
Thread(target=self.worker, args=(w + 1, PrintIDs,)).start()
sleep(1) # Explicitly wait one second for this worker to start.
def run(self):
self.initWorkerPool()
# Wait until all workers (i.e. threads) are done.
while active_count() > 1:
print("Active threads: " + str(active_count()))
sleep(5)
self.HandleResults()
def worker(self, id, printID):
if printID:
print("Starting worker " + str(id) + ".")
while (len(self.Queue) > 0):
self.scraperMethod()
if printID:
print("Worker " + str(id) + " is quiting.")
# Todo Kill is this Thread.
return
def NumWorkers(self):
return 1 # Simplified for testing purposes.
#abstractmethod
def scraperMethod(self):
pass
class TestScraper(ScraperPool):
def scraperMethod(self):
# print("I am scraping.")
# print("Scraping. Threads#: " + str(active_count()))
temp_item = self.Queue[-1]
self.Queue.pop()
self.ResultList.append(temp_item)
def HandleResults(self):
print(self.ResultList)
ScraperPool.register(TestScraper)
scraper = TestScraper(Queue=["Jaap", "Piet"])
scraper.run()
print(threading.active_count())
# print(scraper.ResultList)
When all the threads are done, there's still one active thread - threading.active_count() on the last line gets me that number.
The active thread is <_MainThread(MainThread, started 12960)> - as printed with threading.enumerate().
Can I assume that all my threads are done when active_count() == 1?
Or can, for instance, imported modules start additional threads so that my threads are actually done when active_count() > 1 - also the condition for the loop I'm using in the run method.
You can assume that your threads are done when active_count() reaches 1. The problem is, if any other module creates a thread, you'll never get to 1. You should manage your threads explicitly.
Example: You can put the threads in a list and join them one at a time. The relevant changes to your code are:
def __init__(self, Queue, MaxNumWorkers=0, ItemsPerWorker=50):
# Initialize attributes
self.MaxNumWorkers = MaxNumWorkers
self.ItemsPerWorker = ItemsPerWorker
self.Queue = Queue # For testing purposes.
self.WorkerThreads = []
def initWorkerPool(self, PrintIDs=True):
for w in range(self.NumWorkers()):
thread = Thread(target=self.worker, args=(w + 1, PrintIDs,))
self.WorkerThreads.append(thread)
thread.start()
sleep(1) # Explicitly wait one second for this worker to start.
def run(self):
self.initWorkerPool()
# Wait until all workers (i.e. threads) are done. Waiting in order
# so some threads further in the list may finish first, but we
# will get to all of them eventually
while self.WorkerThreads:
self.WorkerThreads[0].join()
self.HandleResults()
according to docs active_count() includes the main thread, so if you're at 1 then you're most likely done, but if you have another source of new threads in your program then you may be done before active_count() hits 1.
I would recommend implementing explicit join method on your ScraperPool and keeping track of your workers and explicitly joining them to main thread when needed instead of checking that you're done with active_count() calls.
Also remember about GIL...
I have a python GUI program that needs to do a same task but with several threads. The problem is that I call the threads but they don't execute parallel but sequentially. First one executes, it ends and then second one, etc. I want them to start independently.
The main components are:
1. Menu (view)
2. ProcesStarter (controller)
3. Process (controller)
The Menu is where you click on the "Start" button which calls a function at ProcesStarter.
The ProcesStarter creates objects of Process and threads, and starts all threads in a for-loop.
Menu:
class VotingFrame(BaseFrame):
def create_widgets(self):
self.start_process = tk.Button(root, text="Start Process", command=lambda: self.start_process())
self.start_process.grid(row=3,column=0, sticky=tk.W)
def start_process(self):
procesor = XProcesStarter()
procesor_thread = Thread(target=procesor.start_process())
procesor_thread.start()
ProcesStarter:
class XProcesStarter:
def start_process(self):
print "starting new process..."
# thread count
thread_count = self.get_thread_count()
# initialize Process objects with data, and start threads
for i in range(thread_count):
vote_process = XProcess(self.get_proxy_list(), self.get_url())
t = Thread(target=vote_process.start_process())
t.start()
Process:
class XProcess():
def __init__(self, proxy_list, url, browser_show=False):
# init code
def start_process(self):
# code for process
When I press the GUI button for "Start Process" the gui is locked until both threads finish execution.
The idea is that threads should work in the background and work in parallel.
you call procesor.start_process() immediately when specifying it as the target of the Thread:
#use this
procesor_thread = Thread(target=procesor.start_process)
#not this
procesor_thread = Thread(target=procesor.start_process())
# this is called right away ^
If you call it right away it returns None which is a valid target for Thread (it just does nothing) which is why it happens sequentially, the threads are not doing anything.
One way to use a class as the target of a thread is to use the class as the target, and the arguments to the constructor as args.
from threading import Thread
from time import sleep
from random import randint
class XProcesStarter:
def __init__(self, thread_count):
print ("starting new process...")
self._i = 0
for i in range(thread_count):
t = Thread(
target=XProcess,
args=(self.get_proxy_list(), self.get_url())
)
t.start()
def get_proxy_list(self):
self._i += 1
return "Proxy list #%s" % self._i
def get_url(self):
self._i += 1
return "URL #%d" % self._i
class XProcess():
def __init__(self, proxy_list, url, browser_show=False):
r = 0.001 * randint( 1, 5000)
sleep(r)
print (proxy_list)
print (url)
def main():
t = Thread( target=XProcesStarter, args=(4, ) )
t.start()
if __name__ == '__main__':
main()
This code runs in python2 and python3.
The reason is that the target of a Thread object must be a callable (search for "callable" and "__call__" in python documentation for a complete explanation).
Edit The other way has been explained in other people's answers (see Tadhg McDonald-Jensen).
I think your issue is that in both places you're starting threads, you're actually calling the method you want to pass as the target to the thread. That runs its code in the main thread (and tries to start the new thread on the return value, if any, once its done).
Try:
procesor_thread = Thread(target=procesor.start_process) # no () after start_process
And:
t = Thread(target=vote_process.start_process) # no () here either
I have a background Process (using Process from multiprocessing) that is pushing objects to my GUI, however this background process keeps locking up the GUI and the changes being pushed are never being displayed. The objects are being put in to my queue, however the update method in my GUI isn't being called regularly. What can I do make the GUI update more regularly? My GUI is written in Tkinter.
My background process has a infinite loop within it because I always need to keep reading the USB port for more data, so basically my code looks like this:
TracerAccess.py
import usb
from types import *
import sys
from multiprocessing import Process, Queue
import time
__idVendor__ = 0xFFFF
__idProduct__ = 0xFFFF
END_POINT = 0x82
def __printHEXList__(list):
print ' '.join('%02x' % b for b in list)
def checkDeviceConnected():
dev = usb.core.find(idVendor=__idVendor__, idProduct=__idProduct__)
if dev is None:
return False
else:
return True
class LowLevelAccess():
def __init__(self):
self.rawIn = []
self.tracer = usb.core.find(idVendor=__idVendor__, idProduct=__idProduct__)
if self.tracer is None:
raise ValueError("Device not connected")
self.tracer.set_configuration()
def readUSB(self):
"""
This method reads the USB data from the simtracer.
"""
try:
tmp = self.tracer.read(END_POINT, 10000,None, 100000).tolist()
while(self.checkForEmptyData(tmp)):
tmp = self.tracer.read(END_POINT, 10000,None, 100000).tolist()
self.rawIn = tmp
except:
time.sleep(1)
self.readUSB()
def checkForEmptyData(self, raw):
if(len(raw) == 10 or raw[10] is 0x60 or len(raw) == 11):
return True
else:
return False
class DataAbstraction:
def __init__(self, queue):
self.queue = queue
self.lowLevel = LowLevelAccess()
def readInput(self):
while True:
self.lowLevel.readUSB()
raw = self.lowLevel.rawIn
self.queue.put(raw)
ui.py
from Tkinter import *
import time
import TracerAccess as io
from multiprocessing import Process, Queue
from Queue import Empty
from math import ceil
def findNumberOfLines(message):
lines = message.split("\n")
return len(lines)
class Application(Frame):
def addTextToRaw(self, text, changeColour=False, numberOfLines=0):
self.rawText.config(state=NORMAL)
if changeColour is True:
self.rawText.insert(END,text, 'oddLine')
else:
self.rawText.insert(END,text)
self.rawText.config(state=DISABLED)
def updateRaw(self, text):
if(self.numberOfData() % 2 is not 0):
self.addTextToRaw(text, True)
else:
self.addTextToRaw(text)
def startTrace(self):
self.dataAbstraction = io.DataAbstraction(self.queue)
self.splitProc = Process(target=self.dataAbstraction.readInput())
self.stopButton.config(state="normal")
self.startButton.config(state="disabled")
self.splitProc.start()
def pollQueue(self):
try:
data = self.queue.get(0)
self.dataReturned.append(data)
self.updateRaw(str(data).upper())
self.rawText.tag_config("oddLine", background="#F3F6FA")
except Empty:
pass
finally:
try:
if(self.splitProc.is_alive() is False):
self.stopButton.config(state="disabled")
self.startButton.config(state="normal")
except AttributeError:
pass
self.master.after(10, self.pollQueue)
def stopTrace(self):
self.splitProc.join()
self.stopButton.config(state="disabled")
self.startButton.config(state="normal")
def createWidgets(self):
self.startButton = Button(self)
self.startButton["text"] = "Start"
self.startButton["command"] = self.startTrace
self.startButton.grid(row = 0, column=0)
self.stopButton = Button(self)
self.stopButton["text"] = "Stop"
self.stopButton["command"] = self.stopTrace
self.stopButton.config(state="disabled")
self.stopButton.grid(row = 0, column=1)
self.rawText = Text(self, state=DISABLED, width=82)
self.rawText.grid(row=1, columnspan=4)
def __init__(self, master):
Frame.__init__(self, master)
self.queue = Queue()
self.master.after(10, self.pollQueue)
self.pack()
self.dataReturned = []
self.createWidgets()
def numberOfData(self):
return len(self.dataReturned)
Main.py
import ui as ui
if __name__ == "__main__":
root = Tk()
root.columnconfigure(0,weight=1)
app = ui.Application(root)
app.mainloop()
So the background thread never finishes, however when I end the process the UI starts to be displayed before closing. The problem could have appeared because of my design for the TracerAccess.py module as I developed this after moving straight form java and little to no design experience for python.
What multiprocess.Process does, internally, is really a fork(), which effectively duplicated your process. You can perhaps visualize it as:
/ ["background" process] -------------\
[main process] --+ +-- [main process]
\ [main process continued] -----------/
p.join() attempts to "join" the two processes back to one. This effectively means: waiting until the background process is finished. Here's the actual (full) code from the .join() function:
def join(self, timeout=None):
'''
Wait until child process terminates
'''
assert self._parent_pid == os.getpid(), 'can only join a child process'
assert self._popen is not None, 'can only join a started process'
res = self._popen.wait(timeout)
if res is not None:
_current_process._children.discard(self)
Note how self._popen.wait is called.
This is obviously not what you want.
What you probably want, in the context of TKinter, is use the tk event loop, for example like this (Python 3, but the concept also works on Python 2)
from multiprocessing import Process, Queue
import time, tkinter, queue, random, sys
class Test:
def __init__(self, root):
self.root = root
txt = tkinter.Text(root)
txt.pack()
self.q = Queue()
p = Process(target=self.bg)
p.start()
self.checkqueue()
print('__init__ done', end=' ')
def bg(self):
print('Starting bg loop', end=' ')
n = 42
while True:
# Burn some CPU cycles
(int(random.random() * 199999)) ** (int(random.random() * 1999999))
n += 1
self.q.put(n)
print('bg task finished', end=' ')
def checkqueue(self):
try:
print(self.q.get_nowait(), end=' ')
except queue.Empty:
print('Queue empty', end=' ')
sys.stdout.flush()
# Run myself again after 1 second
self.root.after(1000, self.checkqueue)
root = tkinter.Tk()
Test(root)
root.mainloop()
You don't call the .join(), and instead use the .after() method, which schedules a function to run after n microseconds (if you've ever used Javascript, then think setTimeout()) to read the queue.
Depending on the actual content of your bg() function, you may not event need multiprocesing at all, just scheduling a function with .after() may be enough.
Also see:
http://tkinter.unpythonic.net/wiki/UsingTheEventLoop
If I have two threading.Event() objects, and wish to sleep until either one of them is set, is there an efficient way to do that in python? Clearly I could do something with polling/timeouts, but I would like to really have the thread sleep until one is set, akin to how select is used for file descriptors.
So in the following implementation, what would an efficient non-polling implementation of wait_for_either look like?
a = threading.Event()
b = threading.Event()
wait_for_either(a, b)
Here is a non-polling non-excessive thread solution: modify the existing Events to fire a callback whenever they change, and handle setting a new event in that callback:
import threading
def or_set(self):
self._set()
self.changed()
def or_clear(self):
self._clear()
self.changed()
def orify(e, changed_callback):
e._set = e.set
e._clear = e.clear
e.changed = changed_callback
e.set = lambda: or_set(e)
e.clear = lambda: or_clear(e)
def OrEvent(*events):
or_event = threading.Event()
def changed():
bools = [e.is_set() for e in events]
if any(bools):
or_event.set()
else:
or_event.clear()
for e in events:
orify(e, changed)
changed()
return or_event
Sample usage:
def wait_on(name, e):
print "Waiting on %s..." % (name,)
e.wait()
print "%s fired!" % (name,)
def test():
import time
e1 = threading.Event()
e2 = threading.Event()
or_e = OrEvent(e1, e2)
threading.Thread(target=wait_on, args=('e1', e1)).start()
time.sleep(0.05)
threading.Thread(target=wait_on, args=('e2', e2)).start()
time.sleep(0.05)
threading.Thread(target=wait_on, args=('or_e', or_e)).start()
time.sleep(0.05)
print "Firing e1 in 2 seconds..."
time.sleep(2)
e1.set()
time.sleep(0.05)
print "Firing e2 in 2 seconds..."
time.sleep(2)
e2.set()
time.sleep(0.05)
The result of which was:
Waiting on e1...
Waiting on e2...
Waiting on or_e...
Firing e1 in 2 seconds...
e1 fired!or_e fired!
Firing e2 in 2 seconds...
e2 fired!
This should be thread-safe. Any comments are welcome.
EDIT: Oh and here is your wait_for_either function, though the way I wrote the code, it's best to make and pass around an or_event. Note that the or_event shouldn't be set or cleared manually.
def wait_for_either(e1, e2):
OrEvent(e1, e2).wait()
I think the standard library provides a pretty canonical solution to this problem that I don't see brought up in this question: condition variables. You have your main thread wait on a condition variable, and poll the set of events each time it is notified. It is only notified when one of the events is updated, so there is no wasteful polling. Here is a Python 3 example:
from threading import Thread, Event, Condition
from time import sleep
from random import random
event1 = Event()
event2 = Event()
cond = Condition()
def thread_func(event, i):
delay = random()
print("Thread {} sleeping for {}s".format(i, delay))
sleep(delay)
event.set()
with cond:
cond.notify()
print("Thread {} done".format(i))
with cond:
Thread(target=thread_func, args=(event1, 1)).start()
Thread(target=thread_func, args=(event2, 2)).start()
print("Threads started")
while not (event1.is_set() or event2.is_set()):
print("Entering cond.wait")
cond.wait()
print("Exited cond.wait ({}, {})".format(event1.is_set(), event2.is_set()))
print("Main thread done")
Example output:
Thread 1 sleeping for 0.31569427100177794s
Thread 2 sleeping for 0.486548134317051s
Threads started
Entering cond.wait
Thread 1 done
Exited cond.wait (True, False)
Main thread done
Thread 2 done
Note that wit no extra threads or unnecessary polling, you can wait for an arbitrary predicate to become true (e.g. for any particular subset of the events to be set). There's also a wait_for wrapper for the while (pred): cond.wait() pattern, which can make your code a bit easier to read.
One solution (with polling) would be to do sequential waits on each Event in a loop
def wait_for_either(a, b):
while True:
if a.wait(tunable_timeout):
break
if b.wait(tunable_timeout):
break
I think that if you tune the timeout well enough the results would be OK.
The best non-polling I can think of is to wait for each one in a different thread and set a shared Event whom you will wait after in the main thread.
def repeat_trigger(waiter, trigger):
waiter.wait()
trigger.set()
def wait_for_either(a, b):
trigger = threading.Event()
ta = threading.Thread(target=repeat_trigger, args=(a, trigger))
tb = threading.Thread(target=repeat_trigger, args=(b, trigger))
ta.start()
tb.start()
# Now do the union waiting
trigger.wait()
Pretty interesting, so I wrote an OOP version of the previous solution:
class EventUnion(object):
"""Register Event objects and wait for release when any of them is set"""
def __init__(self, ev_list=None):
self._trigger = Event()
if ev_list:
# Make a list of threads, one for each Event
self._t_list = [
Thread(target=self._triggerer, args=(ev, ))
for ev in ev_list
]
else:
self._t_list = []
def register(self, ev):
"""Register a new Event"""
self._t_list.append(Thread(target=self._triggerer, args=(ev, )))
def wait(self, timeout=None):
"""Start waiting until any one of the registred Event is set"""
# Start all the threads
map(lambda t: t.start(), self._t_list)
# Now do the union waiting
return self._trigger.wait(timeout)
def _triggerer(self, ev):
ev.wait()
self._trigger.set()
This is an old question, but I hope this helps someone coming from Google.
The accepted answer is fairly old and will cause an infinite loop for twice-"orified" events.
Here is an implementation using concurrent.futures
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
def wait_for_either(events, timeout=None, t_pool=None):
'''blocks untils one of the events gets set
PARAMETERS
events (list): list of threading.Event objects
timeout (float): timeout for events (used for polling)
t_pool (concurrent.futures.ThreadPoolExecutor): optional
'''
if any(event.is_set() for event in events):
# sanity check
pass
else:
t_pool = t_pool or ThreadPoolExecutor(max_workers=len(events))
tasks = []
for event in events:
tasks.append(t_pool.submit(event.wait))
concurrent.futures.wait(tasks, timeout=timeout, return_when='FIRST_COMPLETED')
# cleanup
for task in tasks:
try:
task.result(timeout=0)
except concurrent.futures.TimeoutError:
pass
Testing the function
import threading
import time
from datetime import datetime, timedelta
def bomb(myevent, sleep_s):
'''set event after sleep_s seconds'''
with lock:
print('explodes in ', datetime.now() + timedelta(seconds=sleep_s))
time.sleep(sleep_s)
myevent.set()
with lock:
print('BOOM!')
lock = threading.RLock() # so prints don't get jumbled
a = threading.Event()
b = threading.Event()
t_pool = ThreadPoolExecutor(max_workers=2)
threading.Thread(target=bomb, args=(event1, 5), daemon=True).start()
threading.Thread(target=bomb, args=(event2, 120), daemon=True).start()
with lock:
print('1 second timeout, no ThreadPool', datetime.now())
wait_for_either([a, b], timeout=1)
with lock:
print('wait_event_or done', datetime.now())
print('=' * 15)
with lock:
print('wait for event1', datetime.now())
wait_for_either([a, b], t_pool=t_pool)
with lock:
print('wait_event_or done', datetime.now())
Starting extra threads seems a clear solution, not very effecient though.
Function wait_events will block util any one of events is set.
def wait_events(*events):
event_share = Event()
def set_event_share(event):
event.wait()
event.clear()
event_share.set()
for event in events:
Thread(target=set_event_share(event)).start()
event_share.wait()
wait_events(event1, event2, event3)
Extending Claudiu's answer where you can either wait for:
event 1 OR event 2
event 1 AND even 2
from threading import Thread, Event, _Event
class ConditionalEvent(_Event):
def __init__(self, events_list, condition):
_Event.__init__(self)
self.event_list = events_list
self.condition = condition
for e in events_list:
self._setup(e, self._state_changed)
self._state_changed()
def _state_changed(self):
bools = [e.is_set() for e in self.event_list]
if self.condition == 'or':
if any(bools):
self.set()
else:
self.clear()
elif self.condition == 'and':
if all(bools):
self.set()
else:
self.clear()
def _custom_set(self,e):
e._set()
e._state_changed()
def _custom_clear(self,e):
e._clear()
e._state_changed()
def _setup(self, e, changed_callback):
e._set = e.set
e._clear = e.clear
e._state_changed = changed_callback
e.set = lambda: self._custom_set(e)
e.clear = lambda: self._custom_clear(e)
Example usage will be very similar as before
import time
e1 = Event()
e2 = Event()
# Example to wait for triggering of event 1 OR event 2
or_e = ConditionalEvent([e1, e2], 'or')
# Example to wait for triggering of event 1 AND event 2
and_e = ConditionalEvent([e1, e2], 'and')
Not pretty, but you can use two additional threads to multiplex the events...
def wait_for_either(a, b):
flag = False #some condition variable, event, or similar
class Event_Waiter(threading.Thread):
def __init__(self, event):
self.e = event
def run(self):
self.e.wait()
flag.set()
a_thread = Event_Waiter(a)
b_thread = Event_Waiter(b)
a.start()
b.start()
flag.wait()
Note, you may have to worry about accidentally getting both events if they arrive too quickly. The helper threads (a_thread and b_thread) should lock synchronize around trying to set flag and then should kill the other thread (possibly resetting that thread's event if it was consumed).
def wait_for_event_timeout(*events):
while not all([e.isSet() for e in events]):
#Check to see if the event is set. Timeout 1 sec.
ev_wait_bool=[e.wait(1) for e in events]
# Process if all events are set. Change all to any to process if any event set
if all(ev_wait_bool):
logging.debug('processing event')
else:
logging.debug('doing other work')
e1 = threading.Event()
e2 = threading.Event()
t3 = threading.Thread(name='non-block-multi',
target=wait_for_event_timeout,
args=(e1,e2))
t3.start()
logging.debug('Waiting before calling Event.set()')
time.sleep(5)
e1.set()
time.sleep(10)
e2.set()
logging.debug('Event is set')