Close asyncio loop on KeyboardInterrupt - Run stop routine - python

I'm using python to create a script which runs and interacts with some processes simultaneously. For that I'm using asyncio to implement this parallelism. The main problem is how to run another cleanup routine when a KeyboardInterrupt or a SIGINT occurs.
Here's an example code I wrote to show the problem:
import asyncio
import logging
import signal
from time import sleep
class Process:
async def start(self, arguments):
self._process = await asyncio.create_subprocess_exec("/bin/bash", *arguments)
return await self._process.wait()
async def stop(self):
self._process.terminate()
class BackgroundTask:
async def start(self):
# Very important process which needs to run while process 2 is running
self._process1 = Process()
self._process1_task = asyncio.create_task(self._process1.start(["-c", "sleep 100"]))
self._process2 = Process()
self._process2_task = asyncio.create_task(self._process2.start(["-c", "sleep 50"]))
await asyncio.wait([self._process1_task, self._process2_task], return_when=asyncio.ALL_COMPLETED)
async def stop(self):
# Stop process
await self._process1.stop()
# Call a cleanup process which cleans up process 1
cleanup_process = Process()
await cleanup_process.start(["-c", "sleep 10"])
# After that we can stop our second process
await self._process2.stop()
backgroundTask = BackgroundTask()
async def main():
await asyncio.create_task(backgroundTask.start())
logging.basicConfig(level=logging.DEBUG)
asyncio.run(main(), debug=True)
This code creates a background task which starts two processes (in this example two bash sleep commands) and waits for them to finish. This works fine and both command are running in parallel.
The main problem is the stop routine. I'd like to run the stop method when the program receives a SIGINT or KeyboardInterrupt, which first stops the process1, then starts a cleanup method and stops process2 afterwards. This is necessary because the cleanup command depends on process2.
What I've tried (instead of the asyncio.run() and the async main):
def main():
try:
asyncio.get_event_loop().run_until_complete(backgroundTask.start())
except KeyboardInterrupt:
asyncio.get_event_loop().run_until_complete(backgroundTask.stop())
main()
This of course doens't work as expected, because as soon as an KeyboardInterrupt exception occours the backgroundTask.start Task is canceled and the backgroundTask.stop is started in the main loop, so my processes are canceled and can't stopped properly.
So is there a way to detect the KeyboardInterrupt without canceling the current main loop and run my backgroundTask.stop method instead?

You want to add a signal handler as shown in this example in the docs:
import asyncio
import functools
import os
import signal
def ask_exit(signame, loop):
print("got signal %s: exit" % signame)
loop.stop()
async def main():
loop = asyncio.get_running_loop()
for signame in {'SIGINT', 'SIGTERM'}:
loop.add_signal_handler(
getattr(signal, signame),
functools.partial(ask_exit, signame, loop))
await asyncio.sleep(3600)
print("Event loop running for 1 hour, press Ctrl+C to interrupt.")
print(f"pid {os.getpid()}: send SIGINT or SIGTERM to exit.")
asyncio.run(main())
That's a bit of an overcomplicated/outdated example though, consider it more like this (your coroutine code goes where the asyncio.sleep call is):
import asyncio
from signal import SIGINT, SIGTERM
async def main():
loop = asyncio.get_running_loop()
for signal_enum in [SIGINT, SIGTERM]:
loop.add_signal_handler(signal_enum, loop.stop)
await asyncio.sleep(3600) # Your code here
asyncio.run(main())
At this point a Ctrl + C will break the loop and raise a RuntimeError, which you can catch by putting the asyncio.run call in a try/except block like so:
try:
asyncio.run(main())
except RuntimeError as exc:
expected_msg = "Event loop stopped before Future completed."
if exc.args and exc.args[0] == expected_msg:
print("Bye")
else:
raise
That's not very satisfying though (what if something else caused the same error?), so I'd prefer to raise a distinct error. Also, if you're exiting on the command line, the proper thing to do is to return the proper exit code (in fact, the code in the example just uses the name, but it's actually an IntEnum with that numeric exit code in it!)
import asyncio
from functools import partial
from signal import SIGINT, SIGTERM
from sys import stderr
class SignalHaltError(SystemExit):
def __init__(self, signal_enum):
self.signal_enum = signal_enum
print(repr(self), file=stderr)
super().__init__(self.exit_code)
#property
def exit_code(self):
return self.signal_enum.value
def __repr__(self):
return f"\nExitted due to {self.signal_enum.name}"
def immediate_exit(signal_enum, loop):
loop.stop()
raise SignalHaltError(signal_enum=signal_enum)
async def main():
loop = asyncio.get_running_loop()
for signal_enum in [SIGINT, SIGTERM]:
exit_func = partial(immediate_exit, signal_enum=signal_enum, loop=loop)
loop.add_signal_handler(signal_enum, exit_func)
await asyncio.sleep(3600)
print("Event loop running for 1 hour, press Ctrl+C to interrupt.")
asyncio.run(main())
Which when Ctrl + C'd out of gives:
python cancelling_original.py
⇣
Event loop running for 1 hour, press Ctrl+C to interrupt.
^C
Exitted due to SIGINT
echo $?
⇣
2
Now there's some code I'd be happy to serve! :^)
P.S. here it is with type annotations:
from __future__ import annotations
import asyncio
from asyncio.events import AbstractEventLoop
from functools import partial
from signal import Signals, SIGINT, SIGTERM
from sys import stderr
from typing import Coroutine
class SignalHaltError(SystemExit):
def __init__(self, signal_enum: Signals):
self.signal_enum = signal_enum
print(repr(self), file=stderr)
super().__init__(self.exit_code)
#property
def exit_code(self) -> int:
return self.signal_enum.value
def __repr__(self) -> str:
return f"\nExitted due to {self.signal_enum.name}"
def immediate_exit(signal_enum: Signals, loop: AbstractEventLoop) -> None:
loop.stop()
raise SignalHaltError(signal_enum=signal_enum)
async def main() -> Coroutine:
loop = asyncio.get_running_loop()
for signal_enum in [SIGINT, SIGTERM]:
exit_func = partial(immediate_exit, signal_enum=signal_enum, loop=loop)
loop.add_signal_handler(signal_enum, exit_func)
return await asyncio.sleep(3600)
print("Event loop running for 1 hour, press Ctrl+C to interrupt.")
asyncio.run(main())
The advantage of a custom exception here is that you can then catch it specifically, and avoid the traceback being dumped to the screen
try:
asyncio.run(main())
except SignalHaltError as exc:
# log.debug(exc)
pass
else:
raise

Related

How to run a blocking task asynchronously with ProcessPoolExecutor and asyncio?

Im trying to run a blocking task asynchronously with ProcessPoolExecutor (It works with ThreadPoolExecutor but I need ProcessPoolExecutor for CPU-bound task). Here is my code :
import asyncio
import time
from concurrent.futures import ProcessPoolExecutor
async def run_in_thread(task, *args):
with ProcessPoolExecutor() as process_pool:
loop = asyncio.get_event_loop()
result = await loop.run_in_executor(process_pool, task, *args)
return result
async def main_task():
while True:
await asyncio.sleep(1)
print("ticker")
async def main():
asyncio.create_task(main_task())
global blocking_task
def blocking_task():
time.sleep(5)
print("blocking task done!")
await run_in_thread(blocking_task)
if __name__ == "__main__":
asyncio.run(main())
And I get this error :
result = await loop.run_in_executor(process_pool, task, *args)
concurrent.futures.process.BrokenProcessPool: A process in the process pool was terminated abruptly while the future was running or pending.
I don't understand where is the issue, can someone please help me?
I'd also like to understand why it works with ThreadPoolExecutor but not ProcessPoolExecutor
I was expecting the code to print :
ticker
ticker
ticker
ticker
ticker
blocking task done!
Move the definition of blocking_task to the outer level of the module. As the script stands this function is invisible to other Processes. The code of the function isn't sent directly to the other Process, only its name. The other Process performs its own separate import of the script but the name isn't defined at the top level.
It's the same logic as if you tried to import this script into another script. Let's say this script is in a file named foo.py. After you do import foo, there is no function named foo.blocking_task so you would be unable to call it.
This is a little bit more clear if you looked at the whole traceback, instead of just the last line.
Incidentally, using the global statement in front of the function definition isn't the same thing as moving the definition to the top level. In your script the name blocking_task does not exist at module level until the main() function actually runs (which the secondary Process never does). In the working script below, the name blocking_task exists as soon as the module is imported.
import asyncio
import time
from concurrent.futures import ProcessPoolExecutor
async def run_in_thread(task, *args):
with ProcessPoolExecutor() as process_pool:
loop = asyncio.get_event_loop()
result = await loop.run_in_executor(process_pool, task, *args)
return result
async def main_task():
while True:
await asyncio.sleep(1)
print("ticker")
def blocking_task():
time.sleep(5)
print("blocking task done!")
async def main():
asyncio.create_task(main_task())
await run_in_thread(blocking_task)
if __name__ == "__main__":
asyncio.run(main())
This prints exactly what you were expecting.

How do you get tkinter to work with asyncio?

How do you get tkinter to work with asyncio? My studies suggest this general question does resolve into the specific problem of getting tkinter to await a coroutine function.
Context
If tkinter's event loop is blocked the loop will freeze until the blocking function returns. If the event loop also happens to be running a GUI that will freeze as well. The traditional solution to this problem is to move any blocking code into a thread.
The new asyncio module is able to schedule threaded calls using the coroutine function asyncio.to_thread(coro). I gather this avoids the difficulties of writing correct threaded code.
Baseline: blocked.py
As a starting point I wrote a baseline program (See code below). It creates a tkinter event loop which attempts to
destroy itself and end the program after 2000ms. That attempt is thwarted by a blocking function which runs for 4s.
The program output is:
08:51:57: Program started.
08:51:58: blocking_func started.
08:52:02: blocking_func completed.
08:52:02: Tk event loop terminated.
08:52:02: Program ended.
Process finished with exit code 0
1st try: async_blocked.py
The blocking code has been refactored as a coroutine function so there are two event loops - tkinter's and asyncio's. The function blocking_io_handler is scheduled onto tkinter's event loop which runs it successfully. The coroutine function blocking_func is scheduled onto asyncio's loop where it starts successfully.
The problem is it doesn't start until after tkinter's event loop has terminated. Asyncio's loop was available throughout the execution of the coroutine function main so it was available when tk_root.mainloop() was executed. In spite of this asyncio was helpless because control was not yielded by an await statement during the execution of tk_root.mainloop. It had to wait for the await asyncio.sleep(3) statement which ran later and, by then, tkinter had stopped running.
At that time the await expression returns control to the async loop for three seconds — enough to start the four second blocking_func but not enough for it to finish.
08:38:22: Program started.
08:38:22: blocking_io_handler started.
08:38:22: blocking_io_handler completed.
08:38:24: Tk event loop terminated.
08:38:24: blocking_func started.
08:38:27: Program ended.
Process finished with exit code 0
2nd try: asyncth_blocked.py
This code replaces the function asyncio.create_task with the coroutine function asyncio.to_thread. This fails
with a runtime warning:
07:26:46: Program started.
07:26:47: blocking_io_handler started.
07:26:47: blocking_io_handler completed.
RuntimeWarning: coroutine 'to_thread' was never awaited
asyncio.to_thread(blocking_func)
RuntimeWarning: Enable tracemalloc to get the object allocation traceback
07:26:49: Tk event loop terminated.
07:26:49: Program ended.
> Process finished with exit code 0
3rd try: asyncth_blocked_2.py
asyncio.to_thread must be awaited because it is a coroutine function and not a regular function:
await asyncio.to_thread(blocking_func).
Since the await keyword is a syntax error inside a regular function, def blocking_io_handler has to be changed into a coroutine function: async def blocking_io_handler.
These changes are shown in asyncth_blocked_2.py which produces this output:
07:52:29: Program started.
RuntimeWarning:
coroutine 'blocking_io_handler' was never awaited
func(*args)
RuntimeWarning: Enable tracemalloc to get the object allocation traceback
07:52:31: Tk event loop terminated.
07:52:31: Program ended.
Process finished with exit code 0
Conclusion
For tkinter to work with asyncio the scheduled function call tk_root.after(0, blocking_io_handler) has to be somehow turned into a scheduled coroutine function call. This is the only way the asycio loop will have a chance to run scheduled async
tasks.
Is it possible?
Code
"""blocked.py"""
import time
import tkinter as tk
def timestamped_msg(msg: str):
print(f"{time.strftime('%X')}: {msg}")
def blocking_func():
timestamped_msg('blocking_func started.')
time.sleep(4)
timestamped_msg('blocking_func completed.')
def main():
timestamped_msg('Program started.')
tk_root = tk.Tk()
tk_root.after(0, blocking_func)
tk_root.after(2000, tk_root.destroy)
tk_root.mainloop()
timestamped_msg('Tk event loop terminated.')
timestamped_msg('Program ended.')
if __name__ == '__main__':
main()
"""async_blocked.py"""
import asyncio
import time
import tkinter as tk
def timestamped_msg(msg: str):
print(f"{time.strftime('%X')}: {msg}")
async def blocking_func():
timestamped_msg('blocking_func started.')
await asyncio.sleep(4)
timestamped_msg('blocking_func completed.')
def blocking_io_handler():
timestamped_msg('blocking_io_handler started.')
asyncio.create_task(blocking_func())
timestamped_msg('blocking_io_handler completed.')
async def main():
timestamped_msg('Program started.')
tk_root = tk.Tk()
tk_root.after(0, blocking_io_handler)
tk_root.after(2000, tk_root.destroy)
tk_root.mainloop()
timestamped_msg('Tk event loop terminated.')
await asyncio.sleep(3)
timestamped_msg('Program ended.')
if __name__ == '__main__':
asyncio.run(main())
"""asyncth_blocked.py"""
import asyncio
import time
import tkinter as tk
def timestamped_msg(msg: str):
print(f"{time.strftime('%X')}: {msg}")
async def blocking_func():
timestamped_msg('blocking_func started.')
await asyncio.sleep(4)
timestamped_msg('blocking_func completed.')
def blocking_io_handler():
timestamped_msg('blocking_io_handler started.')
asyncio.to_thread(blocking_func)
timestamped_msg('blocking_io_handler completed.')
async def main():
timestamped_msg('Program started.')
tk_root = tk.Tk()
tk_root.after(0, blocking_io_handler)
tk_root.after(2000, tk_root.destroy)
tk_root.mainloop()
timestamped_msg('Tk event loop terminated.')
timestamped_msg('Program ended.')
if __name__ == '__main__':
asyncio.run(main())
"""asyncth_blocked_2.py"""
import asyncio
import time
import tkinter as tk
def timestamped_msg(msg: str):
print(f"{time.strftime('%X')}: {msg}")
async def blocking_func():
timestamped_msg('blocking_func started.')
await asyncio.sleep(4)
timestamped_msg('blocking_func completed.')
async def blocking_io_handler():
timestamped_msg('blocking_io_handler started.')
await asyncio.to_thread(blocking_func)
timestamped_msg('blocking_io_handler completed.')
async def main():
timestamped_msg('Program started.')
tk_root = tk.Tk()
tk_root.after(0, blocking_io_handler)
tk_root.after(2000, tk_root.destroy)
tk_root.mainloop()
timestamped_msg('Tk event loop terminated.')
timestamped_msg('Program ended.')
if __name__ == '__main__':
asyncio.run(main())
Tkinter's Problem with Blocking IO Calls
The statement asyncio.sleep(60) will block tkinter for a minute if both are running in the same thread.
Blocking coroutine functions cannot run in the same thread as tkinter.
Similarly, the statement time.sleep(60) will block both tkinter and asyncio for a minute if all three are running in the same
thread.
Blocking non-coroutine functions cannot run in the same thread as either tkinter or asyncio.
Sleep commands have been used here to simplify this example of the blocking problem. The principles shown are applicable to internet or database accesses.
Solution
A solution is to create three distinct environments and take care when moving data between them.
Environment 1 - Main Thread
This is Python's MainThread. It's where Python starts and Tkinter lives. No blocking code can be allowed in this environment.
Environment 2 - Asyncio's Thread
This is where asyncio and all its coroutine functions live. Blocking functions are only allowed if they are coroutine
functions.
Environment 3 - Multiple single use threads
This is where non-coroutine blocking functions run. Since these are capable of blocking each other each needs
its own thread.
Data
Data returned from blocking IO to tkinter should be returned in threadsafe queues using a producer/consumer pattern.
Arguments and return values should not be passed between environments using regular functions. Use the threadsafe calling protocols provided by Python as illustrated below.
Wrong code
func(*args, **kwargs)
return_value = func(*args, **kwargs)
print(*args, **kwargs)
Correct code
threading.Thread(func, *args, **kwargs).start()
The return_value is not directly available. Use a queue.
future = asyncio.run_coroutine_threadsafe(func(*args, **kwargs), loop)
return_value = future.result().
print: Use a threadsafe queue to move printable objects to a single print thread. (See the SafePrinter context
maanger in the code below).
The Polling Problem
With tkinter, asyncio, and threading all running together there are three event loops controlling different stuff. Bad things can
happen when they mix. For example threading's Queue.get() will block environment 1 where tkinter's loop is trying to
control events. In this particular case, Queue.get_nowait() has to be used with polling via tkinter's after command. See the code below for other examples of unusual polling of queues.
GUI
Console output
0.001s In Print Thread of 2 without a loop: The SafePrinter is open for output.
0.001s In MainThread of 2 without a loop --- main starting
0.001s In Asyncio Thread of 3 without a loop --- aio_main starting
0.001s In MainThread of 3 without a loop --- tk_main starting
0.305s In Asyncio Thread of 3 with a loop --- manage_aio_loop starting
0.350s In MainThread of 3 without a loop --- tk_callbacks starting
0.350s In MainThread of 3 without a loop --- tk_callback_consumer starting
0.350s In Asyncio Thread of 3 with a loop --- aio_blocker starting. block=3.1s.
0.350s In MainThread of 3 without a loop --- aio_exception_handler starting. block=3.1s
0.351s In MainThread of 3 without a loop --- aio_exception_handler starting. block=1.1s
0.351s In Asyncio Thread of 4 with a loop --- aio_blocker starting. block=1.1s.
0.351s In IO Block Thread (3.2s) of 4 without a loop --- io_exception_handler starting. block=3.2s.
0.351s In IO Block Thread (3.2s) of 4 without a loop --- io_blocker starting. block=3.2s.
0.351s In IO Block Thread (1.2s) of 5 without a loop --- io_exception_handler starting. block=1.2s.
0.351s In IO Block Thread (1.2s) of 5 without a loop --- io_blocker starting. block=1.2s.
0.351s In MainThread of 5 without a loop --- tk_callbacks ending - All blocking callbacks have been scheduled.
1.451s In Asyncio Thread of 5 with a loop --- aio_blocker ending. block=1.1s.
1.459s In MainThread of 5 without a loop --- aio_exception_handler ending. block=1.1s
1.555s In IO Block Thread (1.2s) of 5 without a loop --- io_blocker ending. block=1.2s.
1.555s In IO Block Thread (1.2s) of 5 without a loop --- io_exception_handler ending. block=1.2s.
3.450s In Asyncio Thread of 4 with a loop --- aio_blocker ending. block=3.1s.
3.474s In MainThread of 4 without a loop --- aio_exception_handler ending. block=3.1s
3.553s In IO Block Thread (3.2s) of 4 without a loop --- io_blocker ending. block=3.2s.
3.553s In IO Block Thread (3.2s) of 4 without a loop --- io_exception_handler ending. block=3.2s.
4.140s In MainThread of 3 without a loop --- tk_callback_consumer ending
4.140s In MainThread of 3 without a loop --- tk_main ending
4.141s In Asyncio Thread of 3 with a loop --- manage_aio_loop ending
4.141s In Asyncio Thread of 3 without a loop --- aio_main ending
4.141s In MainThread of 2 without a loop --- main ending
4.141s In Print Thread of 2 without a loop: The SafePrinter has closed.
Process finished with exit code 0
Code
""" tkinter_demo.py
Created with Python 3.10
"""
import asyncio
import concurrent.futures
import functools
import itertools
import queue
import sys
import threading
import time
import tkinter as tk
import tkinter.ttk as ttk
from collections.abc import Iterator
from contextlib import AbstractContextManager
from dataclasses import dataclass
from types import TracebackType
from typing import Optional, Type
# Global reference to loop allows access from different environments.
aio_loop: Optional[asyncio.AbstractEventLoop] = None
def io_blocker(task_id: int, tk_q: queue.Queue, block: float = 0) -> None:
""" Block the thread and put a 'Hello World' work package into Tkinter's work queue.
This is a producer for Tkinter's work queue. It will run in a special thread created solely for running this
function. The statement `time.sleep(block)` can be replaced with any non-awaitable blocking code.
Args:
task_id: Sequentially issued tkinter task number.
tk_q: tkinter's work queue.
block: block time
Returns:
Nothing. The work package is returned via the threadsafe tk_q.
"""
safeprint(f'io_blocker starting. {block=}s.')
time.sleep(block)
# Exceptions for testing handlers. Uncomment these to see what happens when exceptions are raised.
# raise IOError('Just testing an expected error.')
# raise ValueError('Just testing an unexpected error.')
work_package = f"Task #{task_id} {block}s: 'Hello Threading World'."
tk_q.put(work_package)
safeprint(f'io_blocker ending. {block=}s.')
def io_exception_handler(task_id: int, tk_q: queue.Queue, block: float = 0) -> None:
""" Exception handler for non-awaitable blocking callback.
It will run in a special thread created solely for running io_blocker.
Args:
task_id: Sequentially issued tkinter task number.
tk_q: tkinter's work queue.
block: block time
"""
safeprint(f'io_exception_handler starting. {block=}s.')
try:
io_blocker(task_id, tk_q, block)
except IOError as exc:
safeprint(f'io_exception_handler: {exc!r} was handled correctly. ')
finally:
safeprint(f'io_exception_handler ending. {block=}s.')
async def aio_blocker(task_id: int, tk_q: queue.Queue, block: float = 0) -> None:
""" Asynchronously block the thread and put a 'Hello World' work package into Tkinter's work queue.
This is a producer for Tkinter's work queue. It will run in the same thread as the asyncio loop. The statement
`await asyncio.sleep(block)` can be replaced with any awaitable blocking code.
Args:
task_id: Sequentially issued tkinter task number.
tk_q: tkinter's work queue.
block: block time
Returns:
Nothing. The work package is returned via the threadsafe tk_q.
"""
safeprint(f'aio_blocker starting. {block=}s.')
await asyncio.sleep(block)
# Exceptions for testing handlers. Uncomment these to see what happens when exceptions are raised.
# raise IOError('Just testing an expected error.')
# raise ValueError('Just testing an unexpected error.')
work_package = f"Task #{task_id} {block}s: 'Hello Asynchronous World'."
# Put the work package into the tkinter's work queue.
while True:
try:
# Asyncio can't wait for the thread blocking `put` method…
tk_q.put_nowait(work_package)
except queue.Full:
# Give control back to asyncio's loop.
await asyncio.sleep(0)
else:
# The work package has been placed in the queue so we're done.
break
safeprint(f'aio_blocker ending. {block=}s.')
def aio_exception_handler(mainframe: ttk.Frame, future: concurrent.futures.Future, block: float,
first_call: bool = True) -> None:
""" Exception handler for future coroutine callbacks.
This non-coroutine function uses tkinter's event loop to wait for the future to finish.
It runs in the Main Thread.
Args:
mainframe: The after method of this object is used to poll this function.
future: The future running the future coroutine callback.
block: The block time parameter used to identify which future coroutine callback is being reported.
first_call: If True will cause an opening line to be printed on stdout.
"""
if first_call:
safeprint(f'aio_exception_handler starting. {block=}s')
poll_interval = 100 # milliseconds
try:
# Python will not raise exceptions during future execution until `future.result` is called. A zero timeout is
# required to avoid blocking the thread.
future.result(0)
# If the future hasn't completed, reschedule this function on tkinter's event loop.
except concurrent.futures.TimeoutError:
mainframe.after(poll_interval, functools.partial(aio_exception_handler, mainframe, future, block,
first_call=False))
# Handle an expected error.
except IOError as exc:
safeprint(f'aio_exception_handler: {exc!r} was handled correctly. ')
else:
safeprint(f'aio_exception_handler ending. {block=}s')
def tk_callback_consumer(tk_q: queue.Queue, mainframe: ttk.Frame, row_itr: Iterator):
""" Display queued 'Hello world' messages in the Tkinter window.
This is the consumer for Tkinter's work queue. It runs in the Main Thread. After starting, it runs
continuously until the GUI is closed by the user.
"""
# Poll continuously while queue has work needing processing.
poll_interval = 0
try:
# Tkinter can't wait for the thread blocking `get` method…
work_package = tk_q.get_nowait()
except queue.Empty:
# …so be prepared for an empty queue and slow the polling rate.
poll_interval = 40
else:
# Process a work package.
label = ttk.Label(mainframe, text=work_package)
label.grid(column=0, row=(next(row_itr)), sticky='w', padx=10)
finally:
# Have tkinter call this function again after the poll interval.
mainframe.after(poll_interval, functools.partial(tk_callback_consumer, tk_q, mainframe, row_itr))
def tk_callbacks(mainframe: ttk.Frame, row_itr: Iterator):
""" Set up 'Hello world' callbacks.
This runs in the Main Thread.
Args:
mainframe: The mainframe of the GUI used for displaying results from the work queue.
row_itr: A generator of line numbers for displaying items from the work queue.
"""
safeprint('tk_callbacks starting')
task_id_itr = itertools.count(1)
# Create the job queue and start its consumer.
tk_q = queue.Queue()
safeprint('tk_callback_consumer starting')
tk_callback_consumer(tk_q, mainframe, row_itr)
# Schedule the asyncio blocker.
for block in [3.1, 1.1]:
# This is a concurrent.futures.Future not an asyncio.Future because it isn't threadsafe. Also,
# it doesn't have a wait with timeout which we shall need.
task_id = next(task_id_itr)
future = asyncio.run_coroutine_threadsafe(aio_blocker(task_id, tk_q, block), aio_loop)
# Can't use Future.add_done_callback here. It doesn't return until the future is done and that would block
# tkinter's event loop.
aio_exception_handler(mainframe, future, block)
# Run the thread blocker.
for block in [3.2, 1.2]:
task_id = next(task_id_itr)
threading.Thread(target=io_exception_handler, args=(task_id, tk_q, block),
name=f'IO Block Thread ({block}s)').start()
safeprint('tk_callbacks ending - All blocking callbacks have been scheduled.\n')
def tk_main():
""" Run tkinter.
This runs in the Main Thread.
"""
safeprint('tk_main starting\n')
row_itr = itertools.count()
# Create the Tk root and mainframe.
root = tk.Tk()
mainframe = ttk.Frame(root, padding="15 15 15 15")
mainframe.grid(column=0, row=0)
# Add a close button
button = ttk.Button(mainframe, text='Shutdown', command=root.destroy)
button.grid(column=0, row=next(row_itr), sticky='w')
# Add an information widget.
label = ttk.Label(mainframe, text=f'\nWelcome to hello_world*4.py.\n')
label.grid(column=0, row=next(row_itr), sticky='w')
# Schedule the 'Hello World' callbacks
mainframe.after(0, functools.partial(tk_callbacks, mainframe, row_itr))
# The asyncio loop must start before the tkinter event loop.
while not aio_loop:
time.sleep(0)
root.mainloop()
safeprint(' ', timestamp=False)
safeprint('tk_callback_consumer ending')
safeprint('tk_main ending')
async def manage_aio_loop(aio_initiate_shutdown: threading.Event):
""" Run the asyncio loop.
This provides an always available asyncio service for tkinter to make any number of simultaneous blocking IO
calls. 'Any number' includes zero.
This runs in Asyncio's thread and in asyncio's loop.
"""
safeprint('manage_aio_loop starting')
# Communicate the asyncio loop status to tkinter via a global variable.
global aio_loop
aio_loop = asyncio.get_running_loop()
# If there are no awaitables left in the queue asyncio will close.
# The usual wait command — Event.wait() — would block the current thread and the asyncio loop.
while not aio_initiate_shutdown.is_set():
await asyncio.sleep(0)
safeprint('manage_aio_loop ending')
def aio_main(aio_initiate_shutdown: threading.Event):
""" Start the asyncio loop.
This non-coroutine function runs in Asyncio's thread.
"""
safeprint('aio_main starting')
asyncio.run(manage_aio_loop(aio_initiate_shutdown))
safeprint('aio_main ending')
def main():
"""Set up working environments for asyncio and tkinter.
This runs in the Main Thread.
"""
safeprint('main starting')
# Start the permanent asyncio loop in a new thread.
# aio_shutdown is signalled between threads. `asyncio.Event()` is not threadsafe.
aio_initiate_shutdown = threading.Event()
aio_thread = threading.Thread(target=aio_main, args=(aio_initiate_shutdown,), name="Asyncio's Thread")
aio_thread.start()
tk_main()
# Close the asyncio permanent loop and join the thread in which it runs.
aio_initiate_shutdown.set()
aio_thread.join()
safeprint('main ending')
#dataclass
class SafePrinter(AbstractContextManager):
_time_0 = time.perf_counter()
_print_q = queue.Queue()
_print_thread: threading.Thread | None = None
def __enter__(self):
""" Run the safeprint consumer method in a print thread.
Returns:
Thw safeprint producer method. (a.k.a. the runtime context)
"""
self._print_thread = threading.Thread(target=self._safeprint_consumer, name='Print Thread')
self._print_thread.start()
return self._safeprint
def __exit__(self, __exc_type: Type[BaseException] | None, __exc_value: BaseException | None,
__traceback: TracebackType | None) -> bool | None:
""" Close the print and join the print thread.
Args:
None or the exception raised during the execution of the safeprint producer method.
__exc_type:
__exc_value:
__traceback:
Returns:
False to indicate that any exception raised in self._safeprint has not been handled.
"""
self._print_q.put(None)
self._print_thread.join()
return False
def _safeprint(self, msg: str, *, timestamp: bool = True, reset: bool = False):
"""Put a string into the print queue.
'None' is a special msg. It is not printed but will close the queue and this context manager.
The exclusive thread and a threadsafe print queue ensure race free printing.
This is the producer in the print queue's producer/consumer pattern.
It runs in the same thread as the calling function
Args:
msg: The message to be printed.
timestamp: Print a timestamp (Default = True).
reset: Reset the time to zero (Default = False).
"""
if reset:
self._time_0 = time.perf_counter()
if timestamp:
self._print_q.put(f'{self._timestamp()} --- {msg}')
else:
self._print_q.put(msg)
def _safeprint_consumer(self):
"""Get strings from the print queue and print them on stdout.
The print statement is not threadsafe, so it must run in its own thread.
This is the consumer in the print queue's producer/consumer pattern.
"""
print(f'{self._timestamp()}: The SafePrinter is open for output.')
while True:
msg = self._print_q.get()
# Exit function when any producer function places 'None'.
if msg is not None:
print(msg)
else:
break
print(f'{self._timestamp()}: The SafePrinter has closed.')
def _timestamp(self) -> str:
"""Create a timestamp with useful status information.
This is a support function for the print queue producers. It runs in the same thread as the calling function
so the returned data does not cross between threads.
Returns:
timestamp
"""
secs = time.perf_counter() - self._time_0
try:
asyncio.get_running_loop()
except RuntimeError as exc:
if exc.args[0] == 'no running event loop':
loop_text = 'without a loop'
else:
raise
else:
loop_text = 'with a loop'
return f'{secs:.3f}s In {threading.current_thread().name} of {threading.active_count()} {loop_text}'
if __name__ == '__main__':
with SafePrinter() as safeprint:
sys.exit(main())

Asyncio.sleep causes script to End Immediately

In my simple asyncio Python program below, bar_loop is supposed to run continuously with a 1 second delay between loops.
Things run as expected when we have simply
async def bar_loop(self):
while True:
print('bar')
However, when we add a asyncio.sleep(1), the loop will end instead of looping.
async def bar_loop(self):
while True:
print('bar')
await asyncio.sleep(1)
Why does asyncio.sleep() cause bar_loop to exit immediately? How can we let it loop with a 1 sec delay?
Full Example:
import asyncio
from typing import Optional
class Foo:
def __init__(self):
self.bar_loop_task: Optional[asyncio.Task] = None
async def start(self):
self.bar_loop_task = asyncio.create_task(self.bar_loop())
async def stop(self):
if self.bar_loop_task is not None:
self.bar_loop_task.cancel()
async def bar_loop(self):
while True:
print('bar')
await asyncio.sleep(1)
if __name__ == '__main__':
try:
foo = Foo()
asyncio.run(foo.start())
except KeyboardInterrupt:
asyncio.run(foo.stop())
Using Python 3.9.5 on Ubuntu 20.04.
This behavior has nothing to do with calling asyncio.sleep, but with the expected behavior of creating a task and doing nothing else.
Tasks will run in parallel in the the asyncio loop, while other code that uses just coroutine and await expressions can be thought as if run in a linear pattern - however, as the are "out of the way" of the - let's call it "visible path of execution", they also won't prevent that flow.
In this case, your program simply reaches the end of the start method, with nothing left being "awaited", the asyncio loop simply finishes its execution.
If you have no explicit code to run in parallel to bar_loop, just await for the task. Change your start method to read:
async def start(self):
self.bar_loop_task = asyncio.create_task(self.bar_loop())
try:
await self.bar_loop_task
except XXX:
# handle excptions that might have taken place inside the task

Terminate external program run through asyncio with specific signal

I need to terminate external programs which run from an asyncio Python script with a specific signal, say SIGTERM. My problem is that programs always receives SIGINT even if I send them SIGTERM signal.
Here is a test case, source code for a fakeprg used in the test below can be found here.
import asyncio
import traceback
import os
import os.path
import sys
import time
import signal
import shlex
from functools import partial
class ExtProgramRunner:
run = True
processes = []
def __init__(self):
pass
def start(self, loop):
self.current_loop = loop
self.current_loop.add_signal_handler(signal.SIGINT, lambda: asyncio.async(self.stop('SIGINT')))
self.current_loop.add_signal_handler(signal.SIGTERM, lambda: asyncio.async(self.stop('SIGTERM')))
asyncio.async(self.cancel_monitor())
asyncio.Task(self.run_external_programs())
#asyncio.coroutine
def stop(self, sig):
print("Got {} signal".format(sig))
self.run = False
for process in self.processes:
print("sending SIGTERM signal to the process with pid {}".format(process.pid))
process.send_signal(signal.SIGTERM)
print("Canceling all tasks")
for task in asyncio.Task.all_tasks():
task.cancel()
#asyncio.coroutine
def cancel_monitor(self):
while True:
try:
yield from asyncio.sleep(0.05)
except asyncio.CancelledError:
break
print("Stopping loop")
self.current_loop.stop()
#asyncio.coroutine
def run_external_programs(self):
os.makedirs("/tmp/files0", exist_ok=True)
os.makedirs("/tmp/files1", exist_ok=True)
# schedule tasks for execution
asyncio.Task(self.run_cmd_forever("/tmp/fakeprg /tmp/files0 1000"))
asyncio.Task(self.run_cmd_forever("/tmp/fakeprg /tmp/files1 5000"))
#asyncio.coroutine
def run_cmd_forever(self, cmd):
args = shlex.split(cmd)
while self.run:
process = yield from asyncio.create_subprocess_exec(*args)
self.processes.append(process)
exit_code = yield from process.wait()
for idx, p in enumerate(self.processes):
if process.pid == p.pid:
self.processes.pop(idx)
print("External program '{}' exited with exit code {}, relauching".format(cmd, exit_code))
def main():
loop = asyncio.get_event_loop()
try:
daemon = ExtProgramRunner()
loop.call_soon(daemon.start, loop)
# start main event loop
loop.run_forever()
except KeyboardInterrupt:
pass
except asyncio.CancelledError as exc:
print("asyncio.CancelledError")
except Exception as exc:
print(exc, file=sys.stderr)
print("====", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
finally:
print("Stopping daemon...")
loop.close()
if __name__ == '__main__':
main()
The reason for this is: When you start your python program (parent) and it starts it's processes /tmp/fakeprg (children) they get all different processes with its pid but they all run in the same foreground process group. Your shell is bound to this group, so when you hit Ctrl-C (SIGINT), Ctrl-Y (SIGTSTP) or Ctrl-\ (SIGQUIT) they are sent to all processes in the foreground process group.
In your code this happens before the parent can even send the signal to its children through send_signal, so this line sends a signal to an already dead process (and should fail, so IMO that's an issue with asyncio).
To solve that, you can explicitly put your child process into a separate process group, like this:
asyncio.create_subprocess_exec(*args, preexec_fn=os.setpgrp)

How to interrupt Tornado coroutine

Suppose I have two functions that work like this:
#tornado.gen.coroutine
def f():
for i in range(4):
print("f", i)
yield tornado.gen.sleep(0.5)
#tornado.gen.coroutine
def g():
yield tornado.gen.sleep(1)
print("Let's raise RuntimeError")
raise RuntimeError
In general, function f might contain endless loop and never return (e.g. it can process some queue).
What I want to do is to be able to interrupt it, at any time it yields.
The most obvious way doesn't work. Exception is only raised after function f exits (if it's endless, it obviously never happens).
#tornado.gen.coroutine
def main():
try:
yield [f(), g()]
except Exception as e:
print("Caught", repr(e))
while True:
yield tornado.gen.sleep(10)
if __name__ == "__main__":
tornado.ioloop.IOLoop.instance().run_sync(main)
Output:
f 0
f 1
Let's raise RuntimeError
f 2
f 3
Traceback (most recent call last):
File "/tmp/test/lib/python3.4/site-packages/tornado/gen.py", line 812, in run
yielded = self.gen.send(value)
StopIteration
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
<...>
File "test.py", line 16, in g
raise RuntimeError
RuntimeError
That is, exception is only raised when both of the coroutines return (both futures resolve).
This's partially solved by tornado.gen.WaitIterator, but it's buggy (unless I'm mistaken). But that's not the point.
It still doesn't solve the problem of interrupting existing coroutines. Coroutine continues to run even though the function that started it exits.
EDIT: it seems like coroutine cancellation is something not really supported in Tornado, unlike in Python's asyncio, where you can easily throw CancelledError at every yield point.
If you use WaitIterator according to the instructions, and use a toro.Event to signal between coroutines, it works as expected:
from datetime import timedelta
import tornado.gen
import tornado.ioloop
import toro
stop = toro.Event()
#tornado.gen.coroutine
def f():
for i in range(4):
print("f", i)
# wait raises Timeout if not set before the deadline.
try:
yield stop.wait(timedelta(seconds=0.5))
print("f done")
return
except toro.Timeout:
print("f continuing")
#tornado.gen.coroutine
def g():
yield tornado.gen.sleep(1)
print("Let's raise RuntimeError")
raise RuntimeError
#tornado.gen.coroutine
def main():
wait_iterator = tornado.gen.WaitIterator(f(), g())
while not wait_iterator.done():
try:
result = yield wait_iterator.next()
except Exception as e:
print("Error {} from {}".format(e, wait_iterator.current_future))
stop.set()
else:
print("Result {} received from {} at {}".format(
result, wait_iterator.current_future,
wait_iterator.current_index))
if __name__ == "__main__":
tornado.ioloop.IOLoop.instance().run_sync(main)
For now, pip install toro to get the Event class. Tornado 4.2 will include Event, see the changelog.
Since version 5, Tornado runs on asyncio event loop.
On Python 3, the IOLoop is always a wrapper around the asyncio event loop, and asyncio.Future and asyncio.Task are used instead of their Tornado counterparts.
Hence you can use asyncio Task cancellation, i.e. asyncio.Task.cancel.
Your example with a queue reading while-true loop, might look like this.
import logging
from asyncio import CancelledError
from tornado import ioloop, gen
async def read_off_a_queue():
while True:
try:
await gen.sleep(1)
except CancelledError:
logging.debug('Reader cancelled')
break
else:
logging.debug('Pretend a task is consumed')
async def do_some_work():
await gen.sleep(5)
logging.debug('do_some_work is raising')
raise RuntimeError
async def main():
logging.debug('Starting queue reader in background')
reader_task = gen.convert_yielded(read_off_a_queue())
try:
await do_some_work()
except RuntimeError:
logging.debug('do_some_work failed, cancelling reader')
reader_task.cancel()
# give the task a chance to clean up, in case it
# catches CancelledError and awaits something
try:
await reader_task
except CancelledError:
pass
if __name__ == '__main__':
logging.basicConfig(level='DEBUG')
ioloop.IOLoop.instance().run_sync(main)
If you run it, you should see:
DEBUG:asyncio:Using selector: EpollSelector
DEBUG:root:Starting queue reader in background
DEBUG:root:Pretend a task is consumed
DEBUG:root:Pretend a task is consumed
DEBUG:root:Pretend a task is consumed
DEBUG:root:Pretend a task is consumed
DEBUG:root:do_some_work is raising
DEBUG:root:do_some_work failed, cancelling reader
DEBUG:root:Reader cancelled
Warning: This is not a working solution. Look at the commentary. Still if you're new (as myself), this example can show the logical flow. Thanks #nathaniel-j-smith and #wgh
What is the difference using something more primitive, like global variable for instance?
import asyncio
event = asyncio.Event()
aflag = False
async def short():
while not aflag:
print('short repeat')
await asyncio.sleep(1)
print('short end')
async def long():
global aflag
print('LONG START')
await asyncio.sleep(3)
aflag = True
print('LONG END')
async def main():
await asyncio.gather(long(), short())
if __name__ == '__main__':
asyncio.run(main())
It is for asyncio, but I guess the idea stays the same. This is a semi-question (why Event would be better?). Yet solution yields exact result author needs:
LONG START
short repeat
short repeat
short repeat
LONG END
short end
UPDATE:
this slides may be really helpful in understanding core of a problem.

Categories