I have a process running with asyncio which should run forever.
I can interact with that process with a ProcessIterator, which can (left out here) send data to stdin and fetch from stdout.
I can access the data with async for fd, data in ProcessIterator(...):.
The problem is now that the execution of this async iterator must be timelimited. If the time runs out, the timeout() function is called,
but the exception does not originate out of the __anext__ function to notify of the timeout.
How can I raise this exception in the async iterator?
I found no way of calling awaitable.throw(something) or similar for it.
class ProcessIterator:
def __init__(self, process, loop, run_timeout):
self.process = process
self.loop = loop
self.run_timeout = run_timeout
# set the global timer
self.overall_timer = self.loop.call_later(
self.run_timeout, self.timeout)
def timeout(self):
# XXX: how do i pass this exception into the iterator?
raise ProcTimeoutError(
self.process.args,
self.run_timeout,
was_global,
)
async def __aiter__(self):
return self
async def __anext__(self):
if self.process.exited:
raise StopAsyncIteration()
else:
# fetch output from the process asyncio.Queue()
entry = await self.process.output_queue.get()
if entry == StopIteration:
raise StopAsyncIteration()
return entry
The usage of the async iterator is now roughly:
async def test_coro(loop):
code = 'print("rofl"); time.sleep(5); print("lol")'
proc = Process([sys.executable, '-u', '-c', code])
await proc.create()
try:
async for fd, line in ProcessIterator(proc, loop, run_timeout=1):
print("%d: %s" % (fd, line))
except ProcessTimeoutError as exc:
# XXX This is the exception I'd like to get here! How can i throw it?
print("timeout: %s" % exc)
await proc.wait()
tl;dr: How can I throw a timed exception so it originates from a async iterator?
EDIT: Added solution 2
Solution 1:
Can the timeout() callback store the ProcTimeoutError exception in an instance variable? Then __anext__() can check the instance variable and raise the exception if it is set.
class ProcessIterator:
def __init__(self, process, loop, run_timeout):
self.process = process
self.loop = loop
self.error = None
self.run_timeout = run_timeout
# set the global timer
self.overall_timer = self.loop.call_later(
self.run_timeout, self.timeout)
def timeout(self):
# XXX: set instance variable
self.error = ProcTimeoutError(
self.process.args,
self.run_timeout,
was_global
)
async def __aiter__(self):
return self
async def __anext__(self):
# XXX: if error is set, then raise the exception
if self.error:
raise self.error
elif self.process.exited:
raise StopAsyncIteration()
else:
# fetch output from the process asyncio.Queue()
entry = await self.process.output_queue.get()
if entry == StopIteration:
raise StopAsyncIteration()
return entry
Solution 2:
Put the exception on the process.output_queue.
....
def timeout(self):
# XXX: set instance variable
self.process.ouput_queue.put(ProcTimeoutError(
self.process.args,
self.run_timeout,
was_global
))
....
# fetch output from the process asyncio.Queue()
entry = await self.process.output_queue.get()
if entry == StopIteration:
raise StopAsyncIteration()
elif entry = ProcTimeoutError:
raise entry
....
If there may be entries on the queue, use a priority queue. Assign ProcTimeoutError a higher priority than the other entries, e.g., (0, ProcTimeoutError) vs (1, other_entry).
Please check out timeout context manager from asyncio:
with asyncio.timeout(10):
async for i in get_iter():
process(i)
It is not released yet but you can copy-paste the implementation from asyncio master branch
You could use get_nowait, which will return entry or throw QueueEmpty immediately. Wrapping it in while loop on self.error with some async sleep should do the trick. Something like:
async def __anext__(self):
if self.process.exited:
raise StopAsyncIteration()
else:
while self.error is None:
try:
entry = self.process.output_queue.get_nowait()
if entry == StopIteration:
raise StopAsyncIteration()
return entry
except asyncio.QueueEmpty:
# some sleep to give back control to ioloop
# since we using nowait
await asyncio.sleep(0.1)
else:
raise self.error
And as a hint approach that is used in Tornado's Queue.get implementation with timeout:
def get(self, timeout=None):
"""Remove and return an item from the queue.
Returns a Future which resolves once an item is available, or raises
`tornado.gen.TimeoutError` after a timeout.
"""
future = Future()
try:
future.set_result(self.get_nowait())
except QueueEmpty:
self._getters.append(future)
_set_timeout(future, timeout)
return future
This is the solution I came up with by now.
See https://github.com/SFTtech/kevin kevin/process.py for the upstream version.
It also features line counting and output timeouts, which I stripped from this example.
class Process:
def __init__(self, command, loop=None):
self.loop = loop or asyncio.get_event_loop()
self.created = False
self.killed = asyncio.Future()
self.proc = self.loop.subprocess_exec(
lambda: WorkerInteraction(self), # see upstream repo
*command)
self.transport = None
self.protocol = None
async def create(self):
self.transport, self.protocol = await self.proc
def communicate(self, timeout):
if self.killed.done():
raise Exception("process was already killed "
"and no output is waiting")
return ProcessIterator(self, self.loop, timeout)
class ProcessIterator:
"""
Asynchronous iterator for the process output.
Use like `async for (fd, data) in ProcessIterator(...):`
"""
def __init__(self, process, loop, run_timeout):
self.process = process
self.loop = loop
self.run_timeout = run_timeout
self.overall_timer = None
if self.run_timeout < INF:
# set the global timer
self.overall_timer = self.loop.call_later(
self.run_timeout,
functools.partial(self.timeout, was_global=True))
def timeout(self):
if not self.process.killed.done():
self.process.killed.set_exception(ProcTimeoutError(
self.process.args,
self.run_timeout,
))
async def __aiter__(self):
return self
async def __anext__(self):
# either the process exits,
# there's an exception (process killed, timeout, ...)
# or the queue gives us the next data item.
# wait for the first of those events.
done, pending = await asyncio.wait(
[self.process.protocol.queue.get(), self.process.killed],
return_when=asyncio.FIRST_COMPLETED)
# at least one of them is done now:
for future in done:
# if something failed, cancel the pending futures
# and raise the exception
# this happens e.g. for a timeout.
if future.exception():
for future_pending in pending:
future_pending.cancel()
# kill the process before throwing the error!
await self.process.pwn()
raise future.exception()
# fetch output from the process
entry = future.result()
# it can be stopiteration to indicate the last data chunk
# as the process exited on its own.
if entry == StopIteration:
if not self.process.killed.done():
self.process.killed.set_result(entry)
# raise the stop iteration
await self.stop_iter(enough=False)
return entry
raise Exception("internal fail: no future was done!")
async def stop_iter(self):
# stop the timer
if self.overall_timer:
self.overall_timer.cancel()
retcode = self.process.returncode()
raise StopAsyncIteration()
The magic function is this:
done, pending = await asyncio.wait(
[self.process.protocol.queue.get(), self.process.killed],
return_when=asyncio.FIRST_COMPLETED)
When the timeout occurs, the queue fetching is aborted reliably.
Related
For some reason, my program is hanging using multiprocessing and queues, even though I set timeouts and check if the queue is empty. This happens on both Windows and Linux.
There are multiple processes that recieve inputs (here a, b and c) and should send results (here they just send back the inputs a, b and c).
From what I see, after all "arguments are given" they send back results for a and b over and over again, although a and b are provided only once.
import multiprocessing as mp
import queue
class Multithreading:
def __init__(self, n_processes):
self._processes = [
_Thread(name='Process-{}'.format(i))
for i in range(n_processes)]
def __enter__(self):
for process in self._processes:
process.start()
print(f'Started {process.name}')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for process in self._processes:
process.event_stopped.set()
process.join()
def run(self):
args = ['a', 'b', 'c']
n_calls = len(args)
for i, arg in enumerate(args):
m = i % len(self._processes)
print(f'Setting arguments to {self._processes[m].name}')
is_started = False
while not is_started:
try:
self._processes[m].queue_inputs.put(arg, timeout=0.05)
is_started = True
print(f'Argument given to {self._processes[m].name}')
except queue.Full:
pass
print(f'All arguments given')
for i in range(n_calls):
m = i % len(self._processes)
print(f'Checking result from {self._processes[m].name}')
arg = None
while True:
try:
arg = self._processes[m].queue_results.get(timeout=0.05)
print('Received {}'.format(arg))
break
except queue.Empty:
print(f'Empty in {self._processes[m].name}, arg = {arg}')
pass
class _Thread(mp.Process):
def __init__(self, name):
super().__init__(name=name, target=self._run)
self.queue_inputs = mp.Queue()
self.queue_results = mp.Queue()
self.event_stopped = mp.Event()
def _run(self):
print(f'Running {self.name}')
while not self.event_stopped.is_set():
try:
arg = self.queue_inputs.get(timeout=0.05)
print(f'{self.name} received {arg}')
while not self.event_stopped.is_set():
try:
self.queue_results.put(arg, timeout=0.05)
print(f'{self.name} sent {arg}')
except queue.Full:
pass
except queue.Empty:
pass
if __name__ == '__main__':
for _ in range(100000000):
with Multithreading(n_processes=2) as m:
m.run()
I would expect timeouts of put and get methods to raise the according exceptions, but apparently they do not.
The problem is in _Thread._run:
def _run(self):
print(f'Running {self.name}')
while not self.event_stopped.is_set(): # Ok, loop until event_stopped
try:
arg = self.queue_inputs.get(timeout=0.05) # Ok, try to get an item
print(f'{self.name} received {arg}')
while not self.event_stopped.is_set(): # Oops, what is this loop for???
try:
self.queue_results.put(arg, timeout=0.05)
print(f'{self.name} sent {arg}')
except queue.Full:
pass
except queue.Empty:
pass
Your current code loops infinitely (or until its queue_results queue become full of event_stopped is set) on the same item repeatedly adding it to its output queue. Replacing the offending while with a if is enough to fix the problem:
...
while not self.event_stopped.is_set(): # Ok, loop until event_stopped
try:
arg = self.queue_inputs.get(timeout=0.05) # Ok, try to get an item
print(f'{self.name} received {arg}')
if not self.event_stopped.is_set():# ignore the item if stopped in the meanwhile
try:
...
Is there a way to raise an exception in the child process when the main process gets an KeyboardInterrupt exception (instead of a loop polling for an event or queue value)?
For now I am using a Queue to communicate the KeyboardInterrupt triggered in the main process to the child processes. For the while part it gets noticed in the child process loop and until now I can do a good clean up for the child process.
However, when KeyboardInterrupt gets triggered during child's initialization I have to check after every statement if the user has aborted the main process. Another option would be to trigger an exception by freeing the connection resource - that will be called later - to trigger a (general or connection-related) exception.
Are there better ways for a good clean up (daemon processes will not yield a good clean up I think)?
def connect(self):
self.conn = mysql.connector.connect(
host="192.168.10.10",
user="homestead",
password="xxxx",
database="xxxx"
)
self.cursor = self.conn.cursor()
def dispose(self):
self.cursor.close()
self.conn.close()
def init(self):
# set up root logger
# ...
root_logger = logging.getLogger()
root_logger.addHandler(fh)
# ...
try: # init check 1 for KeyboardInterrupt exception in
# main process (*1)
row = self.task_queue.get(timeout=5) # or something like using an
# Event.is_set() whenever KeyboardInterrupt is raised
# in main process could be possible too
if row is None: # None is sent when KeyboardInterrupt exception
# in main process
self.task_queue.task_done()
return false
# for example, calling self.dispose() here generates an exception at
# self.connect() because connection gets closed / freed (*2)
# or raise CustomException (*2b)?
except:
pass
# ...
self.connect()
# ...
try: # init check 2 for KeyboardInterrupt exception in main process (*3)
row = self.task_queue.get(timeout=5)
if row is None:
self.task_queue.task_done()
self.dispose()
return false
# raise CustomException?
except:
pass
return true
def run(self):
try:
self.init()
except KeyboardInterrupt: # would something like this be possible
# (or disrupt the code flow to elicit another exception like in *2
# , or raise a CustomException in *2b, both which get caught here, as
# an alternative)?
...
# this would be handy instead of checking after each statement
# in the init parts (*1, *3)
except:
logging.error("Something went wrong during initialization")
self.task_queue.task_done()
self.dispose()
return
while True:
if self.conn.is_connected() == False:
# ....
row = None
empty = False
leave = False
try:
row = self.task_queue.get(timeout=5)
if row is None:
self.task_queue.task_done()
leave = True
else:
# save item
except:
empty = True
pass
if (leave == True):
break
self.dispose()
BTW: I have read some other topics like Python: while KeyboardInterrupt is forwarded to multiprocessing child process? and How to use KeyboardInterrupt from the main process to stop child processes?
Edit (added from main()):
def manage_ctrlC(*args):
sqlDataSaver.exit.set()
def main():
global tasks, sqlDataSaver
# Manage Ctrl_C keyboard event
signal.signal(signal.SIGINT, manage_ctrlC) # dummy, not used yet
# ...
tasks = multiprocessing.JoinableQueue()
sqlDataSaver = sqlExecutor(tasks) # inherits from multiprocessing.Process
sqlDataSaver.start()
#Tim Roberts:
You mean something like this? So each process has its own sigint handler and a separate cleanup process that is triggered by the exception that is raised in each handler?
from multiprocessing import *
import signal
import time
import sys
class SigInt(Exception):
"""SIG INT"""
pass
class MyProcess(Process):
def __init__(self, toExecute, sighandler):
Process.__init__(self)
self.toExecute = toExecute
self.sighandler = sighandler
def run(self):
# set up custom handler
signal.signal(signal.SIGINT, self.sighandler)
while True:
try:
self.toExecute()
except SigInt:
# clean up
break
print(current_process().name," process exited")
def manage_ctrlC_main(*args):
print('main crtl-c')
sys.exit()
def toExecute1():
time.sleep(1)
print("exec1");
def toExecute2():
time.sleep(1)
print("exec2");
def sigh1(signal, frame):
print('sig 1 handler')
raise SigInt
def sigh2(signal, frame):
print('sig 2 handler')
raise SigInt
def main():
global myProcess1, myProcess2
signal.signal(signal.SIGINT, manage_ctrlC_main)
myProcess1 = MyProcess(toExecute1,sigh1)
myProcess1.start()
time.sleep(4)
myProcess2 = MyProcess(toExecute2,sigh2)
myProcess2.start()
myProcess1.join()
myProcess2.join()
if __name__ == '__main__':
main()
I have written code for async pool below. in __aexit__ i'm cancelling the _worker tasks after the tasks get finished. But when i run the code, the worker tasks are not getting cancelled and the code is running forever. This what the task looks like: <Task pending coro=<AsyncPool._worker() running at \async_pool.py:17> wait_for=<Future cancelled>>. The asyncio.wait_for is getting cancelled but not the worker tasks.
class AsyncPool:
def __init__(self,coroutine,no_of_workers,timeout):
self._loop = asyncio.get_event_loop()
self._queue = asyncio.Queue()
self._no_of_workers = no_of_workers
self._coroutine = coroutine
self._timeout = timeout
self._workers = None
async def _worker(self):
while True:
try:
ret = False
queue_item = await self._queue.get()
ret = True
result = await asyncio.wait_for(self._coroutine(queue_item), timeout = self._timeout,loop= self._loop)
except Exception as e:
print(e)
finally:
if ret:
self._queue.task_done()
async def push_to_queue(self,item):
self._queue.put_nowait(item)
async def __aenter__(self):
assert self._workers == None
self._workers = [asyncio.create_task(self._worker()) for _ in range(self._no_of_workers)]
return self
async def __aexit__(self,type,value,traceback):
await self._queue.join()
for worker in self._workers:
worker.cancel()
await asyncio.gather(*self._workers, loop=self._loop, return_exceptions =True)
To use the Asyncpool:
async def something(item):
print("got", item)
await asyncio.sleep(item)
async def main():
async with AsyncPool(something, 5, 2) as pool:
for i in range(10):
await pool.push_to_queue(i)
asyncio.run(main())
The Output in my terminal:
The problem is that your except Exception exception clause also catches cancellation, and ignores it. To add to the confusion, print(e) just prints an empty line in case of a CancelledError, which is where the empty lines in the output come from. (Changing it to print(type(e)) shows what's going on.)
To correct the issue, change except Exception to something more specific, like except asyncio.TimeoutError. This change is not needed in Python 3.8 where asyncio.CancelledError no longer derives from Exception, but from BaseException, so except Exception doesn't catch it.
When you have an asyncio task created and then cancelled, you still have the task alive that need to be "reclaimed". So you want to await worker for it. However, once you await such a cancelled task, as it will never give you back the expected return value, the asyncio.CancelledError will be raised and you need to catch it somewhere.
Because of this behavior, I don't think you should gather them but to await for each of the cancelled tasks, as they are supposed to return right away:
async def __aexit__(self,type,value,traceback):
await self._queue.join()
for worker in self._workers:
worker.cancel()
for worker in self._workers:
try:
await worker
except asyncio.CancelledError:
print("worker cancelled:", worker)
This appears to work. The event is a counting timer and when it expires it cancels the tasks.
import asyncio
from datetime import datetime as dt
from datetime import timedelta as td
import random
import time
class Program:
def __init__(self):
self.duration_in_seconds = 20
self.program_start = dt.now()
self.event_has_expired = False
self.canceled_success = False
async def on_start(self):
print("On Start Event Start! Applying Overrides!!!")
await asyncio.sleep(random.randint(3, 9))
async def on_end(self):
print("On End Releasing All Overrides!")
await asyncio.sleep(random.randint(3, 9))
async def get_sensor_readings(self):
print("getting sensor readings!!!")
await asyncio.sleep(random.randint(3, 9))
async def evauluate_data(self):
print("checking data!!!")
await asyncio.sleep(random.randint(3, 9))
async def check_time(self):
if (dt.now() - self.program_start > td(seconds = self.duration_in_seconds)):
self.event_has_expired = True
print("Event is DONE!!!")
else:
print("Event is not done! ",dt.now() - self.program_start)
async def main(self):
# script starts, do only once self.on_start()
await self.on_start()
print("On Start Done!")
while not self.canceled_success:
readings = asyncio.ensure_future(self.get_sensor_readings())
analysis = asyncio.ensure_future(self.evauluate_data())
checker = asyncio.ensure_future(self.check_time())
if not self.event_has_expired:
await readings
await analysis
await checker
else:
# close other tasks before final shutdown
readings.cancel()
analysis.cancel()
checker.cancel()
self.canceled_success = True
print("cancelled hit!")
# script ends, do only once self.on_end() when even is done
await self.on_end()
print('Done Deal!')
async def main():
program = Program()
await program.main()
I'm using threading.Thread and t.start() with a List of Callables to do long-running multithreaded processing. My main thread is blocked until all threads did finish. I'd like however t.start() to immediately return if one of the Callables throw an exception and terminate the other threads.
Using t.join() to check that the thread got executed provides no information about failures due to exception.
Here is the code:
import json
import requests
class ThreadServices:
def __init__(self):
self.obj = ""
def execute_services(self, arg1, arg2):
try:
result = call_some_process(arg1, arg2) #some method
#save results somewhere
except Exception, e:
# raise exception
print e
def invoke_services(self, stubs):
"""
Thread Spanning Function
"""
try:
p1 = "" #some value
p2 = "" #some value
# Call service 1
t1 = threading.Thread(target=self.execute_services, args=(a, b,)
# Start thread
t1.start()
# Block till thread completes execution
t1.join()
thread_pool = list()
for stub in stubs:
# Start parallel execution of threads
t = threading.Thread(target=self.execute_services,
args=(p1, p2))
t.start()
thread_pool.append(t)
for thread in thread_pool:
# Block till all the threads complete execution: Wait for all
the parallel tasks to complete
thread.join()
# Start another process thread
t2 = threading.Thread(target=self.execute_services,
args=(p1, p2)
t2.start()
# Block till this thread completes execution
t2.join()
requests.post(url, data= json.dumps({status_code=200}))
except Exception, e:
print e
requests.post(url, data= json.dumps({status_code=500}))
# Don't return anything as this function is invoked as a thread from
# main calling function
class Service(ThreadServices):
"""
Service Class
"""
def main_thread(self, request, context):
"""
Main Thread:Invokes Task Execution Sequence in ThreadedService
:param request:
:param context:
:return:
"""
try:
main_thread = threading.Thread(target=self.invoke_services,
args=(request,))
main_thread.start()
return True
except Exception, e:
return False
When i call Service().main_thread(request, context) and there is some exception executing t1, I need to get it raised in main_thread and return False. How can i implement it for this structure. Thanks!!
For one thing, you are complicating matters too much. I would do it this way:
from thread import start_new_thread as thread
from time import sleep
class Task:
"""One thread per task.
This you should do with subclassing threading.Thread().
This is just conceptual example.
"""
def __init__ (self, func, args=(), kwargs={}):
self.func = func
self.args = args
self.kwargs = kwargs
self.error = None
self.done = 0
self.result = None
def _run (self):
self.done = 0
self.error = None
self.result = None
# So this is what you should do in subclassed Thread():
try: self.result = self.func(*self.args, **self.kwargs)
except Exception, e:
self.error = e
self.done = 1
def start (self):
thread(self._run,())
def wait (self, retrexc=1):
"""Used in place of threading.Thread.join(), but it returns the result of the function self.func() and manages errors.."""
while not self.done: sleep(0.001)
if self.error:
if retrexc: return self.error
raise self.error
return self.result
# And this is how you should use your pool:
def do_something (tasknr):
print tasknr-20
if tasknr%7==0: raise Exception, "Dummy exception!"
return tasknr**120/82.0
pool = []
for task in xrange(20, 50):
t = Task(do_something, (task,))
pool.append(t)
# And only then wait for each one:
results = []
for task in pool:
results.append(task.wait())
print results
This way you can make task.wait() raise the error instead. The thread would already be stopped. So all you need to do is remove their references from pool, or whole pool, after you are done. You can even:
results = []
for task in pool:
try: results.append(task.wait(0))
except Exception, e:
print task.args, "Error:", str(e)
print results
Now, do not use strictly this (I mean Task() class) as it needs a lot of things added to be used for real.
Just subclass threading.Thread() and implement the similar concept by overriding run() and join() or add new functions like wait().
I am trying to set a timer that will interrupt the running process and call a coroutine when it fires. However, I'm not sure what the right way to accomplish this is. I've found AbstractEventLoop.call_later, along with threading.Timer but neither of these seem to work (or I'm using them incorrectly). The code is pretty basic and looks something like this:
def set_timer( time ):
self.timer = Timer( 10.0, timeout )
self.timer.start()
#v2
#self.timer = get_event_loop()
#self.timer.call_later( 10.0, timeout )
return
async def timeout():
await some_func()
return
What is the correct way to set a non-blocking timer, that will call a callback function after some number of seconds? Being able to cancel the timer would be a bonus but is not a requirement. The major things I need are: non-blocking and successfully calling the co-routine. Right now it returns an error that the object can't be await'd (if I toss an await in) or that some_func was never await'd, and the expected output never happens.
Creating Task using ensure_future is a common way to start some job executing without blocking your execution flow. You can also cancel tasks.
I wrote example implementation for you to have something to start from:
import asyncio
class Timer:
def __init__(self, timeout, callback):
self._timeout = timeout
self._callback = callback
self._task = asyncio.ensure_future(self._job())
async def _job(self):
await asyncio.sleep(self._timeout)
await self._callback()
def cancel(self):
self._task.cancel()
async def timeout_callback():
await asyncio.sleep(0.1)
print('echo!')
async def main():
print('\nfirst example:')
timer = Timer(2, timeout_callback) # set timer for two seconds
await asyncio.sleep(2.5) # wait to see timer works
print('\nsecond example:')
timer = Timer(2, timeout_callback) # set timer for two seconds
await asyncio.sleep(1)
timer.cancel() # cancel it
await asyncio.sleep(1.5) # and wait to see it won't call callback
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(main())
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
Output:
first example:
echo!
second example:
Thanks Mikhail Gerasimov for your answer, it was very useful. Here is an extension to Mikhail’s anwer. This is an interval timer with some twists. Perhaps it is useful for some users.
import asyncio
class Timer:
def __init__(self, interval, first_immediately, timer_name, context, callback):
self._interval = interval
self._first_immediately = first_immediately
self._name = timer_name
self._context = context
self._callback = callback
self._is_first_call = True
self._ok = True
self._task = asyncio.ensure_future(self._job())
print(timer_name + " init done")
async def _job(self):
try:
while self._ok:
if not self._is_first_call or not self._first_immediately:
await asyncio.sleep(self._interval)
await self._callback(self._name, self._context, self)
self._is_first_call = False
except Exception as ex:
print(ex)
def cancel(self):
self._ok = False
self._task.cancel()
async def some_callback(timer_name, context, timer):
context['count'] += 1
print('callback: ' + timer_name + ", count: " + str(context['count']))
if timer_name == 'Timer 2' and context['count'] == 3:
timer.cancel()
print(timer_name + ": goodbye and thanks for all the fish")
timer1 = Timer(interval=1, first_immediately=True, timer_name="Timer 1", context={'count': 0}, callback=some_callback)
timer2 = Timer(interval=5, first_immediately=False, timer_name="Timer 2", context={'count': 0}, callback=some_callback)
try:
loop = asyncio.get_event_loop()
loop.run_forever()
except KeyboardInterrupt:
timer1.cancel()
timer2.cancel()
print("clean up done")
The solution proposed by Mikhail has one drawback. Calling cancel() cancels both: the timer and the actual callback (if cancel() fired after timeout is passed, but actual job is still in progress). Canceling the job itself may be not the desired behavior.
An alternative approach is to use loop.call_later:
async def some_job():
print('Job started')
await asyncio.sleep(5)
print('Job is done')
loop = asyncio.get_event_loop() # or asyncio.get_running_loop()
timeout = 5
timer = loop.call_later(timeout, lambda: asyncio.ensure_future(some_job()))
timer.cancel() # cancels the timer, but not the job, if it's already started