Close asyncio loop - python

import asyncio
import aiohttp
aut_token = ("token")
tasks = []
iter_flag = True
interval = 0
seq = 0
class WAPI:
async def receiver(WAPI_S):
async for msg in WAPI_S:
global interval
global seq
data = msg.json()
seq = data.get("s")
if data.get("op") == 10:
interval = data.get("d").get("heartbeat_interval") / 1000
if data.get("op") == 11:
pass
raise aiohttp.ClientError
async def heartbeating(WAPI_S):
while iter_flag:
await WAPI_S.send_json({
"op": 1,
"d": seq
})
await asyncio.sleep(interval)
async def event_manager():
loop = asyncio.get_running_loop()
try:
async with aiohttp.ClientSession() as session:
async with session.ws_connect("url") as WAPI_S:
task_receive = loop.create_task(WAPI.receiver(WAPI_S)); task_heartbeating = loop.create_task(WAPI.heartbeating(WAPI_S))
tasks.append(task_receive); tasks.append(task_heartbeating)
await asyncio.gather(*tasks)
except aiohttp.ClientError:
global iter_flag
iter_flag = False
await asyncio.sleep(interval)
for task in tasks:
task.cancel()
try:
loop.close()
except:
loop.stop()
asyncio.run(WAPI.event_manager())
I am trying to implement catching ClientError exception with loop closing, however, loop.close throws "RuntimeError: Event loop stopped before Future completed."
How to implement interception correctly?

You do not need to keep track of your tasks manually, you can simply use
asyncio.all_tasks():
Return a set of not yet finished Task objects run by the loop.
And then a:
pending = asyncio.all_tasks()
c in pending:
wait_for(c, timeout=5)
Also, you are trying to stop the loop while you are in it.
This is the pattern I use most of the times:
async def main():
<do some stuff>
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
except ExceptionYouWantToHandle:
<cleaning up>
finally:
loop.stop()
And in the event_manager you simply return once you get an execption or you pass the exception

Related

Closing a client

There is the following code:
import asyncio
import aiohttp
aut_token = ("token")
tasks = []
iter_flag = False
class WAPI:
async def receiver(WAPI_S):
async for msg in WAPI_S:
data = msg.json()
raise aiohttp.ClientError #test
async def heartbeating(WAPI_S):
while iter_flag:
await WAPI_S.send_json({
"op": 1,
"d": None
})
await asyncio.sleep(42.5)
async def event_manager():
loop = asyncio.get_running_loop()
try:
async with aiohttp.ClientSession().ws_connect("url") as WAPI_S:
task_receive = loop.create_task(WAPI.receiver(WAPI_S)); task_heartbeating = loop.create_task(WAPI.heartbeating(WAPI_S))
tasks.append(task_receive); tasks.append(task_heartbeating)
await asyncio.gather(*tasks)
except aiohttp.ClientError:
global iter_flag
iter_flag = False
await asyncio.sleep(44)
[task.cancel() for task in tasks]
try:
loop.close()
except:
loop.stop()
asyncio.run(WAPI.event_manager())
I want to correctly shutdown the client when the exception is raised. My implementation throws "RuntimeError: Event loop stopped before Future completed" exception while executing. How to do it right?
In method event_manager, the statement:
async with aiohttp.ClientSession().ws_connect("url") as WAPI_S:
needs to be replaced with:
async with aiohttp.ClientSession() as session:
async with session.ws_connect("url") as WAPI_S:
Also, it is considered anti-Pythonic to use a list comprehension for its side effects. See Is it Pythonic to use list comprehensions for just side effects? So you really should replace:
[task.cancel() for task in tasks]
with:
for task in tasks:
task.cancel()
Putting this all together:
async def event_manager():
loop = asyncio.get_running_loop()
try:
async with aiohttp.ClientSession() as session:
async with session.ws_connect("url") as WAPI_S:
task_receive = loop.create_task(WAPI.receiver(WAPI_S)); task_heartbeating = loop.create_task(WAPI.heartbeating(WAPI_S))
tasks.append(task_receive); tasks.append(task_heartbeating)
await asyncio.gather(*tasks)
except aiohttp.ClientError:
global iter_flag
iter_flag = False
await asyncio.sleep(44)
for task in tasks:
task.cancel()
try:
loop.close()
except:
loop.stop()

Python - Cancel task in asyncio?

I have written code for async pool below. in __aexit__ i'm cancelling the _worker tasks after the tasks get finished. But when i run the code, the worker tasks are not getting cancelled and the code is running forever. This what the task looks like: <Task pending coro=<AsyncPool._worker() running at \async_pool.py:17> wait_for=<Future cancelled>>. The asyncio.wait_for is getting cancelled but not the worker tasks.
class AsyncPool:
def __init__(self,coroutine,no_of_workers,timeout):
self._loop = asyncio.get_event_loop()
self._queue = asyncio.Queue()
self._no_of_workers = no_of_workers
self._coroutine = coroutine
self._timeout = timeout
self._workers = None
async def _worker(self):
while True:
try:
ret = False
queue_item = await self._queue.get()
ret = True
result = await asyncio.wait_for(self._coroutine(queue_item), timeout = self._timeout,loop= self._loop)
except Exception as e:
print(e)
finally:
if ret:
self._queue.task_done()
async def push_to_queue(self,item):
self._queue.put_nowait(item)
async def __aenter__(self):
assert self._workers == None
self._workers = [asyncio.create_task(self._worker()) for _ in range(self._no_of_workers)]
return self
async def __aexit__(self,type,value,traceback):
await self._queue.join()
for worker in self._workers:
worker.cancel()
await asyncio.gather(*self._workers, loop=self._loop, return_exceptions =True)
To use the Asyncpool:
async def something(item):
print("got", item)
await asyncio.sleep(item)
async def main():
async with AsyncPool(something, 5, 2) as pool:
for i in range(10):
await pool.push_to_queue(i)
asyncio.run(main())
The Output in my terminal:
The problem is that your except Exception exception clause also catches cancellation, and ignores it. To add to the confusion, print(e) just prints an empty line in case of a CancelledError, which is where the empty lines in the output come from. (Changing it to print(type(e)) shows what's going on.)
To correct the issue, change except Exception to something more specific, like except asyncio.TimeoutError. This change is not needed in Python 3.8 where asyncio.CancelledError no longer derives from Exception, but from BaseException, so except Exception doesn't catch it.
When you have an asyncio task created and then cancelled, you still have the task alive that need to be "reclaimed". So you want to await worker for it. However, once you await such a cancelled task, as it will never give you back the expected return value, the asyncio.CancelledError will be raised and you need to catch it somewhere.
Because of this behavior, I don't think you should gather them but to await for each of the cancelled tasks, as they are supposed to return right away:
async def __aexit__(self,type,value,traceback):
await self._queue.join()
for worker in self._workers:
worker.cancel()
for worker in self._workers:
try:
await worker
except asyncio.CancelledError:
print("worker cancelled:", worker)
This appears to work. The event is a counting timer and when it expires it cancels the tasks.
import asyncio
from datetime import datetime as dt
from datetime import timedelta as td
import random
import time
class Program:
def __init__(self):
self.duration_in_seconds = 20
self.program_start = dt.now()
self.event_has_expired = False
self.canceled_success = False
async def on_start(self):
print("On Start Event Start! Applying Overrides!!!")
await asyncio.sleep(random.randint(3, 9))
async def on_end(self):
print("On End Releasing All Overrides!")
await asyncio.sleep(random.randint(3, 9))
async def get_sensor_readings(self):
print("getting sensor readings!!!")
await asyncio.sleep(random.randint(3, 9))
async def evauluate_data(self):
print("checking data!!!")
await asyncio.sleep(random.randint(3, 9))
async def check_time(self):
if (dt.now() - self.program_start > td(seconds = self.duration_in_seconds)):
self.event_has_expired = True
print("Event is DONE!!!")
else:
print("Event is not done! ",dt.now() - self.program_start)
async def main(self):
# script starts, do only once self.on_start()
await self.on_start()
print("On Start Done!")
while not self.canceled_success:
readings = asyncio.ensure_future(self.get_sensor_readings())
analysis = asyncio.ensure_future(self.evauluate_data())
checker = asyncio.ensure_future(self.check_time())
if not self.event_has_expired:
await readings
await analysis
await checker
else:
# close other tasks before final shutdown
readings.cancel()
analysis.cancel()
checker.cancel()
self.canceled_success = True
print("cancelled hit!")
# script ends, do only once self.on_end() when even is done
await self.on_end()
print('Done Deal!')
async def main():
program = Program()
await program.main()

Python asyncio blocks on coroutine, but not websockets

Consider the following code:
async def remote_data_retriever():
remote = Remote(sock_path)
while True:
Cached.update_global(remote.get_global())
await asyncio.sleep(RTR_RETR_INTERVAL)
async def on_message(websocket, path):
async for message in websocket:
data = Cached.get_global()
await websocket.send(json.dumps(data.__dict__))
if __name__ == '__main__':
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ssl_context.load_cert_chain(RTR_CERT_PATH)
app_server = websockets.serve(on_message, RTR_LISTEN_HOST, RTR_LISTEN_PORT, ssl=ssl_context)
try:
asyncio.get_event_loop().run_until_complete(app_server)
print('1')
asyncio.get_event_loop().run_until_complete(remote_data_retriever())
print('2')
asyncio.get_event_loop().run_forever()
except Exception as e:
print(e)
This code will print '1' and then never print '2'. How to correctly schedule a coroutine so it does NOT block on the following call
asyncio.get_event_loop().run_until_complete(remote_data_retriever())
?
run_until_complete(task) starts task at once and waits for its real end, not for await. But your second task uses while True which never ends.
You should rather add task to loop using create_task(task) and start loop later with run_forever().
asyncio.get_event_loop().create_task(app_server)
print('1')
asyncio.get_event_loop().create_task(remote_data_retriever())
print('2')
asyncio.get_event_loop().run_forever()
And then both tasks will run in one loop and await will stop one task to start another task.
Example code which everyone can run
import asyncio
async def task_1():
number = 0
while True:
number += 1
print('task1', number)
await asyncio.sleep(1)
async def task_2():
number = 0
while True:
number += 1
print('task2', number)
await asyncio.sleep(1)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
try:
loop.create_task(task_1())
print('1')
loop.create_task(task_2())
print('2')
loop.run_forever()
except Exception as e:
print(e)
Your exact suggestion doesn't work as create_task() throws exception (correctly) claiming app_server is not a coroutine. However, based on your proposed code, I've been able to get it to work like this:
loop = asyncio.get_event_loop()
try:
asyncio.ensure_future(app_server, loop=loop)
print('1')
loop.create_task(remote_data_retriever())
print('2')
loop.run_forever()
except Exception as e:
print(e)

How to wait for object to change state

Inside my async handler I want to wait until task's state is changed. For now, I just check the state in an endless loop and wait. Here is an example, the wait_until_done function:
import asyncio
class LongTask:
state = 'PENDING'
my_task = LongTask()
def done():
my_task.state = 'DONE'
async def wait_until_done():
while True:
if my_task.state == 'PENDING':
await asyncio.sleep(2)
else:
break
print("Finally, the task is done")
def main(loop, *args, **kwargs):
asyncio.ensure_future(wait_until_done())
loop.call_later(delay=5, callback=done)
loop = asyncio.get_event_loop()
main(loop)
loop.run_forever()
Is there a better way for doing that?
Just to avoid confusion: I guess you are not talking about asyncio.Task, but some variable state instead, right?
In this case you have Future and synchronization primitives that allows you to wait some thing changed asynchronously.
If you need to switch between two states, asyncio.Event is probably what you want. Here's little examle:
import asyncio
my_task = asyncio.Event()
def done():
my_task.set()
async def wait_until_done():
await my_task.wait() # await until event would be .set()
print("Finally, the task is done")
async def main():
loop.call_later(delay=5, callback=done)
await wait_until_done()
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
Upd:
More complex example that keeps LongTask interface:
import asyncio
class LongTask:
_event = asyncio.Event()
#property
def state(self):
return 'PENDING' if not type(self)._event.is_set() else 'DONE'
#state.setter
def state(self, val):
if val == 'PENDING':
type(self)._event.clear()
elif val == 'DONE':
type(self)._event.set()
else:
raise ValueError('Bad state value.')
async def is_done(self):
return (await type(self)._event.wait())
my_task = LongTask()
def done():
my_task.state = 'DONE'
async def wait_until_done():
await my_task.is_done()
print("Finally, the task is done")
async def main():
loop.call_later(delay=5, callback=done)
await wait_until_done()
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()

Execute future only when accessed

I would like to do something like the following:
import asyncio
async def g():
print('called g')
return 'somevalue'
async def f():
x = g()
loop = asyncio.get_event_loop()
loop.run_until_complete(f())
loop.close()
Where there is no output. Notice that I did not await the g(). This will generate a g was not awaited exception, but I'm looking for behaviour where g most definitely did not run.
This is useful for me where I have a long running operation with complex setup, but I only need its result in certain situations, so why bother running it when it is not needed. Kind of an 'on demand' situation.
How can I do this?
One option is to use simple flags to signal tasks:
import asyncio
import random
async def g(info):
print('> called g')
if not info['skip']:
print('* running g', info['id'])
await asyncio.sleep(random.uniform(1, 3))
else:
print('- skiping g', info['id'])
print('< done g', info['id'])
return info['id']
async def main():
data = [{
'id': i,
'skip': False
} for i in range(10)]
# schedule 10 tasks to run later
tasks = [asyncio.ensure_future(g(info)) for info in data]
# tell some tasks to skip processing
data[2]['skip'] = True
data[5]['skip'] = True
data[6]['skip'] = True
# wait for all results
results = await asyncio.gather(*tasks)
print(results)
print("Done!")
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
A different option would be using task.cancel:
import asyncio
async def coro(x):
print('coro', x)
return x
async def main():
task1 = asyncio.ensure_future(coro(1))
task2 = asyncio.ensure_future(coro(2))
task3 = asyncio.ensure_future(coro(3))
task2.cancel()
for task in asyncio.as_completed([task1, task2, task3]):
try:
result = await task
print("success", result)
except asyncio.CancelledError as e:
print("cancelled", e)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()

Categories