I'm trying to fire a coroutine from within a loop. Here's a simple example of what I'm trying to achieve:
import time
import random
import asyncio
def listen():
while True:
yield random.random()
time.sleep(3)
async def dosomething(data: float):
print("Working on data", data)
asyncio.sleep(2)
print("Processed data!")
async def main():
for pos in listen():
asyncio.create_task(dosomething(pos))
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
Unfortunately, this doesn't work and my dosomething coroutine never executes... what am I doing wrong?
asyncio.create_task function is aimed to schedule Task execution, it should be awaited to wait until it is complete.
Moreover, asyncio.sleep(2) in your code also should awaited, otherwise it'll throw an error/warning.
The right way:
import time
import random
import asyncio
def listen():
while True:
yield random.random()
time.sleep(3)
async def dosomething(data: float):
print("Working on data", data)
await asyncio.sleep(2)
print("Processed data!")
async def main():
for pos in listen():
await asyncio.create_task(dosomething(pos))
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
Sample output:
Working on data 0.9645515392725723
Processed data!
Working on data 0.9249656672476657
Processed data!
Working on data 0.13635467058997397
Processed data!
Working on data 0.03941252405458562
Processed data!
Working on data 0.6299882183389822
Processed data!
Working on data 0.9143748948769984
Processed data!
...
I wanted to point out that after playing around I ended up using a producer, consumer architecture to achieve what I wanted. I appreciate that I didn't make my exact use case clear in the original question. But here's a simplified snippet of what I ended up implementing:
import asyncio
import random
from datetime import datetime
from pydantic import BaseModel
class Measurement(BaseModel):
data: float
time: datetime
async def measure(queue: asyncio.Queue):
while True:
# Replicate blocking call to recieve data
await asyncio.sleep(1)
print("Measurement complete!")
for i in range(3):
data = Measurement(
data=random.random(),
time=datetime.utcnow()
)
await queue.put(data)
await queue.put(None)
async def process(queue: asyncio.Queue):
while True:
data = await queue.get()
print(f"Got measurement! {data}")
# Replicate pause for http request
await asyncio.sleep(0.3)
print("Sent data to server")
loop = asyncio.get_event_loop()
queue = asyncio.Queue(loop=loop)
meansurement = measure(queue)
processor = process(queue)
loop.run_until_complete(asyncio.gather(processor, meansurement))
loop.close()
I should point out here (something I didn't quite understand) that it's imperative that any blocking calls you make are able to be await-ed. Otherwise, you might find that the consumer will never execute.
Related
I need to be able to keep adding coroutines to the asyncio loop at runtime. I tried using create_task() thinking that this would do what I want, but it still needs to be awaited.
This is the code I had, not sure if there is a simple edit to make it work?
async def get_value_from_api():
global ASYNC_CLIENT
return ASYNC_CLIENT.get(api_address)
async def print_subs():
count = await get_value_from_api()
print(count)
async def save_subs_loop():
while True:
asyncio.create_task(print_subs())
time.sleep(0.1)
async def start():
global ASYNC_CLIENT
async with httpx.AsyncClient() as ASYNC_CLIENT:
await save_subs_loop()
asyncio.run(start())
I once created similar pattern when I was mixing trio and kivy, which was demonstration of running multiple coroutines asynchronously.
It use a trio.MemoryChannel which is roughly equivalent to asyncio.Queue, I'll just refer it as queue here.
Main idea is:
Wrap each task with class, which has run function.
Make class object's own async method to put object itself into queue when execution is done.
Create a global task-spawning loop to wait for the object in queue and schedule execution/create task for the object.
import asyncio
import traceback
import httpx
async def task_1(client: httpx.AsyncClient):
resp = await client.get("http://127.0.0.1:5000/")
print(resp.read())
await asyncio.sleep(0.1) # without this would be IP ban
async def task_2(client: httpx.AsyncClient):
resp = await client.get("http://127.0.0.1:5000/meow/")
print(resp.read())
await asyncio.sleep(0.5)
class CoroutineWrapper:
def __init__(self, queue: asyncio.Queue, coro_func, *param):
self.func = coro_func
self.param = param
self.queue = queue
async def run(self):
try:
await self.func(*self.param)
except Exception:
traceback.print_exc()
return
# put itself back into queue
await self.queue.put(self)
class KeepRunning:
def __init__(self):
# queue for gathering CoroutineWrapper
self.queue = asyncio.Queue()
def add_task(self, coro, *param):
wrapped = CoroutineWrapper(self.queue, coro, *param)
# add tasks to be executed in queue
self.queue.put_nowait(wrapped)
async def task_processor(self):
task: CoroutineWrapper
while task := await self.queue.get():
# wait for new CoroutineWrapper Object then schedule it's async method execution
asyncio.create_task(task.run())
async def main():
keep_running = KeepRunning()
async with httpx.AsyncClient() as client:
keep_running.add_task(task_1, client)
keep_running.add_task(task_2, client)
await keep_running.task_processor()
asyncio.run(main())
Server
import time
from flask import Flask
app = Flask(__name__)
#app.route("/")
def hello():
return str(time.time())
#app.route("/meow/")
def meow():
return "meow"
app.run()
Output:
b'meow'
b'1639920445.965701'
b'1639920446.0767004'
b'1639920446.1887035'
b'1639920446.2986999'
b'1639920446.4067013'
b'meow'
b'1639920446.516704'
b'1639920446.6267014'
...
You can see tasks running repeatedly on their own pace.
Old answer
Seems like you only want to cycle fixed amount of tasks.
In that case just iterate list of coroutine with itertools.cycle
But this is no different with synchronous, so lemme know if you need is asynchronous.
import asyncio
import itertools
import httpx
async def main_task(client: httpx.AsyncClient):
resp = await client.get("http://127.0.0.1:5000/")
print(resp.read())
await asyncio.sleep(0.1) # without this would be IP ban
async def main():
async with httpx.AsyncClient() as client:
for coroutine in itertools.cycle([main_task]):
await coroutine(client)
asyncio.run(main())
Server:
import time
from flask import Flask
app = Flask(__name__)
#app.route("/")
def hello():
return str(time.time())
app.run()
Output:
b'1639918937.7694323'
b'1639918937.8804302'
b'1639918937.9914327'
b'1639918938.1014295'
b'1639918938.2124324'
b'1639918938.3204308'
...
asyncio.create_task() works as you describe it. The problem you are having here is that you create an infinite loop here:
async def save_subs_loop():
while True:
asyncio.create_task(print_subs())
time.sleep(0.1) # do not use time.sleep() in async code EVER
save_subs_loop() keeps creating tasks but control is never yielded back to the event loop, because there is no await in there. Try
async def save_subs_loop():
while True:
asyncio.create_task(print_subs())
await asyncio.sleep(0.1) # yield control back to loop to give tasks a chance to actually run
This problem is so common I'm thinking python should raise a RuntimeError if it detects time.sleep() within a coroutine :-)
You might want to try the TaskThread framework
It allows you to add tasks in runtime
Tasks are re-scheduled periodically (like in your while loop up there)
There is a consumer / producer framework built in (parent/child relationships) which you seem to need
disclaimer: I wrote TaskThread out of necessity & it's been a life saver.
I'm scraping some websites, paralelizing requests library using asyncio:
def run():
asyncio.run(scrape());
def check_link(link):
#.... code code code ...
response = requests.get(link)
#.... code code code ...
write_some_stats_into_db()
async def scrape():
#.... code code code ...
task = asyncio.get_event_loop().run_in_executor(check_link(link));
#.... code code code ...
if done:
for task in all_tasks:
task.cancel();
I only need to find one 'correct' link, after that, I can stop the program. However, because the check_link is run in executor, it's threads are automatically daemonized, thus even after calling taks.cancel(), I have to wait for all of the other still running check_link to complete.
Do you have any ideas how to 'force-kill' the other running checks in the thread executor?
You can do it the following way, actually from my point of view, if you do not have to use asyncio for the task, use only threads without any async loop, since it makes your code more complicated.
import asyncio
from random import randint
import time
from functools import partial
# imagine that this is links array
LINKS = list(range(1000))
# how many thread-worker you want to have simultaneously
WORKERS_NUM = 10
# stops the app
STOP_EVENT = asyncio.Event()
STOP_EVENT.clear()
def check_link(link: str) -> int:
"""checks link in another thread and returns result"""
time.sleep(3)
r = randint(1, 11)
print(f"{link}____{r}\n")
return r
async def check_link_wrapper(q: asyncio.Queue):
"""Async wrapper around sync function"""
loop = asyncio.get_event_loop()
while not STOP_EVENT.is_set():
link = await q.get()
if not link:
break
value = await loop.run_in_executor(None, func=partial(check_link, link))
if value == 10:
STOP_EVENT.set()
print("Hurray! We got TEN !")
async def feeder(q: asyncio.Queue):
"""Send tasks and "poison pill" to all workers"""
# send tasks to workers
for link in LINKS:
await q.put(link)
# ask workers to stop
for _ in range(WORKERS_NUM):
await q.put(None)
async def amain():
"""Main async function of the app"""
# maxsize is one since we want the app
# to stop as fast as possible if stop condition is met
q = asyncio.Queue(maxsize=1)
# we create separate task, since we do not want to await feeder
# we are interested only in workers
asyncio.create_task(feeder(q))
await asyncio.gather(
*[check_link_wrapper(q) for _ in range(WORKERS_NUM)],
)
if __name__ == '__main__':
asyncio.run(amain())
Based on the solution that i got: Running multiple sockets using asyncio in python
i tried to add also the computation part using asyncio
Setup: Python 3.7.4
import msgpack
import threading
import os
import asyncio
import concurrent.futures
import functools
import nest_asyncio
nest_asyncio.apply()
class ThreadSafeElem(bytes):
def __init__(self, * p_arg, ** n_arg):
self._lock = threading.Lock()
def __enter__(self):
self._lock.acquire()
return self
def __exit__(self, type, value, traceback):
self._lock.release()
elem = ThreadSafeElem()
async def serialize(data):
return msgpack.packb(data, use_bin_type=True)
async def serialize1(data1):
return msgpack.packb(data1, use_bin_type=True)
async def process_data(data,data1):
loop = asyncio.get_event_loop()
future = await loop.run_in_executor(None, functools.partial(serialize, data))
future1 = await loop.run_in_executor(None, functools.partial(serialize1, data1))
return await asyncio.gather(future,future1)
################ Calculation#############################
def calculate_data():
global elem
while True:
try:
... data is calculated (some dictionary))...
elem, elem1= asyncio.run(process_data(data, data1))
except:
pass
#####################################################################
def get_data():
return elem
def get_data1():
return elem1
########### START SERVER AND get data contionusly ################
async def client_thread(reader, writer):
while True:
try:
bytes_received = await reader.read(100)
package_type = np.frombuffer(bytes_received, dtype=np.int8)
if package_type ==1 :
nn_output = get_data1()
if package_type ==2 :
nn_output = get_data()
writer.write(nn_output)
await writer.drain()
except:
pass
async def start_servers(host, port):
server = await asyncio.start_server(client_thread, host, port)
await server.serve_forever()
async def start_calculate():
await asyncio.run(calculate_data())
def enable_sockets():
try:
host = '127.0.0.1'
port = 60000
sockets_number = 6
loop = asyncio.get_event_loop()
for i in range(sockets_number):
loop.create_task(start_servers(host,port+i))
loop.create_task(start_calculate())
loop.run_forever()
except:
print("weird exceptions")
##############################################################################
enable_sockets()
The issue is that when i make a call from client, the server does not give me anything.
I tested the program with dummy data and no asyncio on calculation part so without this loop.create_task(start_calculate()) and the server responded correctly.
I also run the calculate data without adding it in the enable sockets and it worked. It also working with this implementation, but the problem is the server is not returning anything.
I did it like this cos i need the calculate part to run continuously and when one of the clients is calling to return the data at that point.
An asyncio event loop cannot be nested inside another, and there is no point in doing so: asyncio.run (and similar) blocks the current thread until done. This does not increase parallelism, and merely disables any outer event loop.
If you want to nest another asyncio task, directly run it in the current event loop. If you want to run a non-cooperative, blocking task, run it in the event loop executor.
async def start_calculate():
loop = asyncio.get_running_loop()
await loop.run_in_executor(None, calculate_data)
The default executor uses threads – this allows running blocking tasks, but does not increase parallelism. Use a custom ProcessPoolExecutor to use additional cores:
import concurrent.futures
async def start_calculate():
loop = asyncio.get_running_loop()
with concurrent.futures.ProcessPoolExecutor() as pool:
await loop.run_in_executor(pool, calculate_data)
Why do you call asyncio.run() multiple times?
This function always creates a new event loop and closes it at the end. It should be used as a main entry point for asyncio programs, and should ideally >only be called once.
I would advise you to read the docs
I'm trying to create a WebSocket command line client that waits for messages from a WebSocket server but waits for user input at the same time.
Regularly polling multiple online sources every second works fine on the server, (the one running at localhost:6789 in this example), but instead of using Python's normal sleep() method, it uses asyncio.sleep(), which makes sense because sleeping and asynchronously sleeping aren't the same thing, at least not under the hood.
Similarly, waiting for user input and asynchronously waiting for user input aren't the same thing, but I can't figure out how to asynchronously wait for user input in the same way that I can asynchronously wait for an arbitrary amount of seconds, so that the client can deal with incoming messages from the WebSocket server while simultaneously waiting for user input.
The comment below in the else-clause of monitor_cmd() hopefully explains what I'm getting at:
import asyncio
import json
import websockets
async def monitor_ws():
uri = 'ws://localhost:6789'
async with websockets.connect(uri) as websocket:
async for message in websocket:
print(json.dumps(json.loads(message), indent=2, sort_keys=True))
async def monitor_cmd():
while True:
sleep_instead = False
if sleep_instead:
await asyncio.sleep(1)
print('Sleeping works fine.')
else:
# Seems like I need the equivalent of:
# line = await asyncio.input('Is this your line? ')
line = input('Is this your line? ')
print(line)
try:
asyncio.get_event_loop().run_until_complete(asyncio.wait([
monitor_ws(),
monitor_cmd()
]))
except KeyboardInterrupt:
quit()
This code just waits for input indefinitely and does nothing else in the meantime, and I understand why. What I don't understand, is how to fix it. :)
Of course, if I'm thinking about this problem in the wrong way, I'd be very happy to learn how to remedy that as well.
You can use the aioconsole third-party package to interact with stdin in an asyncio-friendly manner:
line = await aioconsole.ainput('Is this your line? ')
Borrowing heavily from aioconsole, if you would rather avoid using an external library you could define your own async input function:
async def ainput(string: str) -> str:
await asyncio.get_event_loop().run_in_executor(
None, lambda s=string: sys.stdout.write(s+' '))
return await asyncio.get_event_loop().run_in_executor(
None, sys.stdin.readline)
Borrowing heavily from aioconsole, there are 2 ways to handle.
start a new daemon thread:
import sys
import asyncio
import threading
from concurrent.futures import Future
async def run_as_daemon(func, *args):
future = Future()
future.set_running_or_notify_cancel()
def daemon():
try:
result = func(*args)
except Exception as e:
future.set_exception(e)
else:
future.set_result(result)
threading.Thread(target=daemon, daemon=True).start()
return await asyncio.wrap_future(future)
async def main():
data = await run_as_daemon(sys.stdin.readline)
print(data)
if __name__ == "__main__":
asyncio.run(main())
use stream reader:
import sys
import asyncio
async def get_steam_reader(pipe) -> asyncio.StreamReader:
loop = asyncio.get_event_loop()
reader = asyncio.StreamReader(loop=loop)
protocol = asyncio.StreamReaderProtocol(reader)
await loop.connect_read_pipe(lambda: protocol, pipe)
return reader
async def main():
reader = await get_steam_reader(sys.stdin)
data = await reader.readline()
print(data)
if __name__ == "__main__":
asyncio.run(main())
I'm programming a server in Python3, which takes screenshot and sends it over websockets. I have coroutine for handling connection and I would like to create another coroutine for taking screenshot at some interval. Screenshot coroutine will probably run in different thread and I will need to propagate the result to some shared variable with read-write lock, to be able to send it. My questions: (result should be multiplatform, if possible)
How is it possible to schedule tasks like this? I created server which runs forever, and I can create periodical coroutine, but somehow I can't put them together in one loop.
What is a good way to propagate the result from one thread (or coroutine, if server is single threaded) to another?
I found this piece of code similar to this and I can't get it to work (second coroutine doesn't execute). Can someone correct this with and without multithreading?
async def print_var():
global number
await asyncio.sleep(2)
print(number)
async def inc_var():
global number
await asyncio.sleep(5)
number += 1
number = 0
asyncio.get_event_loop().run_until_complete(print_var())
asyncio.async(inc_var)
asyncio.get_event_loop().run_forever()
Post-answer edit
In the end after more hours of googling, I actually got it to work on a single thread, so there's no danger of race condition. (But I'm still not sure what ensure_future does, and why it isn't called on event loop.)
users = set()
def register(websocket):
users.add(websocket)
def unregister(websocket):
users.remove(websocket)
async def get_screenshot():
global screenshot
while True:
screenshot = screenshot()
await asyncio.sleep(0.2)
async def server(websocket, path):
global screenshot
register(websocket)
try:
async for message in websocket:
respond(screenshot)
finally:
unregister(websocket)
def main():
asyncio.get_event_loop().run_until_complete(
websockets.serve(server, 'localhost', 6789))
asyncio.ensure_future(get_screenshot())
asyncio.get_event_loop().run_forever()
main()
In Python 3.7:
import asyncio
import websockets
CAPTURE_INTERVAL = 1
running = True
queues = set()
async def handle(ws, path):
queue = asyncio.Queue()
queues.add(queue)
while running:
data = await queue.get()
if not data:
break
await ws.send(data)
def capture_screen():
# Do some work here, preferably in C extension without holding the GIL
return b'screenshot data'
async def main():
global running
loop = asyncio.get_running_loop()
server = await websockets.serve(handle, 'localhost', 8765)
try:
while running:
data = await loop.run_in_executor(None, capture_screen)
for queue in queues:
queue.put_nowait(data)
await asyncio.sleep(CAPTURE_INTERVAL)
finally:
running = False
for queue in queues:
queue.put_nowait(None)
server.close()
await server.wait_closed()
if __name__ == '__main__':
asyncio.run(main())
Please note, this is only for demonstrating the producer-consumer fan-out pattern. The queues are not essential - you can simply send data to all server.sockets in main() directly, while in handle() you should worry about incoming websocket messages. For example, client may control image compression rate like this:
import asyncio
import websockets
CAPTURE_INTERVAL = 1
DEFAULT = b'default'
qualities = {}
async def handle(ws, path):
try:
async for req in ws:
qualities[ws] = req
finally:
qualities.pop(ws, None)
def capture_screen():
# Do some work here, preferably in C extension without holding the GIL
return {
DEFAULT: b'default screenshot data',
b'60': b'data at 60% quality',
b'80': b'data at 80% quality',
}
async def main():
loop = asyncio.get_running_loop()
server = await websockets.serve(handle, 'localhost', 8765)
try:
while True:
data = await loop.run_in_executor(None, capture_screen)
for ws in server.sockets:
quality = qualities.get(ws, DEFAULT)
if quality not in data:
quality = DEFAULT
asyncio.create_task(ws.send(data[quality]))
await asyncio.sleep(CAPTURE_INTERVAL)
finally:
server.close()
await server.wait_closed()
if __name__ == '__main__':
asyncio.run(main())