I am trying to use the pika AMQP client with RabbitMQ in Python, but am struggling to wait for a connection to open properly. I want to just be able to await this async function, but it hangs. The furthest it gets is printing Future is done from one of the helper functions. I ultimately am trying to avoid many nested callbacks and would like to use async/await where possible. I thought this was on the right path, but I can't seem to get it to work, any advice would be appreciated. There is probably a better way, but I am not super familiar with async patterns in python and am basing most of my architecture around my familiarity with Node.
My class
import asyncio
from asyncio import Future
import pika
from src.services.pika_helpers import init_select_connection_async
asyncio.get_event_loop()
EXCHANGE_NAME = 'ACTIVE_LEARNING'
class AsyncRabbitMQ():
def __init__(self):
self.connection = None
self.channel = None
self.queue_name = None
asyncio.run(self.initialize())
async def update_future_cb(self):
self.future_res.set_result("Yep!")
self.future_res.done()
async def initialize(self):
selectConn = await init_select_connection_async()
print(selectConn)
print("Sleeping!")
await asyncio.sleep(1)
print("Done sleeping!")
print("Calling callback!")
return
My helper functions
def on_open_callback(connection, future):
print("Calling callback")
future.set_result(connection)
future.done()
print("Future is done")
return True
async def init_select_connection_async():
future_result = Future()
print("Going to open connection")
connection = pika.SelectConnection(on_open_callback=lambda v: on_open_callback(v, future_result), on_open_error_callback=print)
connection.ioloop.start()
print("Connection opened")
connection = await future_result
return connection
Related
I need to be able to keep adding coroutines to the asyncio loop at runtime. I tried using create_task() thinking that this would do what I want, but it still needs to be awaited.
This is the code I had, not sure if there is a simple edit to make it work?
async def get_value_from_api():
global ASYNC_CLIENT
return ASYNC_CLIENT.get(api_address)
async def print_subs():
count = await get_value_from_api()
print(count)
async def save_subs_loop():
while True:
asyncio.create_task(print_subs())
time.sleep(0.1)
async def start():
global ASYNC_CLIENT
async with httpx.AsyncClient() as ASYNC_CLIENT:
await save_subs_loop()
asyncio.run(start())
I once created similar pattern when I was mixing trio and kivy, which was demonstration of running multiple coroutines asynchronously.
It use a trio.MemoryChannel which is roughly equivalent to asyncio.Queue, I'll just refer it as queue here.
Main idea is:
Wrap each task with class, which has run function.
Make class object's own async method to put object itself into queue when execution is done.
Create a global task-spawning loop to wait for the object in queue and schedule execution/create task for the object.
import asyncio
import traceback
import httpx
async def task_1(client: httpx.AsyncClient):
resp = await client.get("http://127.0.0.1:5000/")
print(resp.read())
await asyncio.sleep(0.1) # without this would be IP ban
async def task_2(client: httpx.AsyncClient):
resp = await client.get("http://127.0.0.1:5000/meow/")
print(resp.read())
await asyncio.sleep(0.5)
class CoroutineWrapper:
def __init__(self, queue: asyncio.Queue, coro_func, *param):
self.func = coro_func
self.param = param
self.queue = queue
async def run(self):
try:
await self.func(*self.param)
except Exception:
traceback.print_exc()
return
# put itself back into queue
await self.queue.put(self)
class KeepRunning:
def __init__(self):
# queue for gathering CoroutineWrapper
self.queue = asyncio.Queue()
def add_task(self, coro, *param):
wrapped = CoroutineWrapper(self.queue, coro, *param)
# add tasks to be executed in queue
self.queue.put_nowait(wrapped)
async def task_processor(self):
task: CoroutineWrapper
while task := await self.queue.get():
# wait for new CoroutineWrapper Object then schedule it's async method execution
asyncio.create_task(task.run())
async def main():
keep_running = KeepRunning()
async with httpx.AsyncClient() as client:
keep_running.add_task(task_1, client)
keep_running.add_task(task_2, client)
await keep_running.task_processor()
asyncio.run(main())
Server
import time
from flask import Flask
app = Flask(__name__)
#app.route("/")
def hello():
return str(time.time())
#app.route("/meow/")
def meow():
return "meow"
app.run()
Output:
b'meow'
b'1639920445.965701'
b'1639920446.0767004'
b'1639920446.1887035'
b'1639920446.2986999'
b'1639920446.4067013'
b'meow'
b'1639920446.516704'
b'1639920446.6267014'
...
You can see tasks running repeatedly on their own pace.
Old answer
Seems like you only want to cycle fixed amount of tasks.
In that case just iterate list of coroutine with itertools.cycle
But this is no different with synchronous, so lemme know if you need is asynchronous.
import asyncio
import itertools
import httpx
async def main_task(client: httpx.AsyncClient):
resp = await client.get("http://127.0.0.1:5000/")
print(resp.read())
await asyncio.sleep(0.1) # without this would be IP ban
async def main():
async with httpx.AsyncClient() as client:
for coroutine in itertools.cycle([main_task]):
await coroutine(client)
asyncio.run(main())
Server:
import time
from flask import Flask
app = Flask(__name__)
#app.route("/")
def hello():
return str(time.time())
app.run()
Output:
b'1639918937.7694323'
b'1639918937.8804302'
b'1639918937.9914327'
b'1639918938.1014295'
b'1639918938.2124324'
b'1639918938.3204308'
...
asyncio.create_task() works as you describe it. The problem you are having here is that you create an infinite loop here:
async def save_subs_loop():
while True:
asyncio.create_task(print_subs())
time.sleep(0.1) # do not use time.sleep() in async code EVER
save_subs_loop() keeps creating tasks but control is never yielded back to the event loop, because there is no await in there. Try
async def save_subs_loop():
while True:
asyncio.create_task(print_subs())
await asyncio.sleep(0.1) # yield control back to loop to give tasks a chance to actually run
This problem is so common I'm thinking python should raise a RuntimeError if it detects time.sleep() within a coroutine :-)
You might want to try the TaskThread framework
It allows you to add tasks in runtime
Tasks are re-scheduled periodically (like in your while loop up there)
There is a consumer / producer framework built in (parent/child relationships) which you seem to need
disclaimer: I wrote TaskThread out of necessity & it's been a life saver.
Based on the solution that i got: Running multiple sockets using asyncio in python
i tried to add also the computation part using asyncio
Setup: Python 3.7.4
import msgpack
import threading
import os
import asyncio
import concurrent.futures
import functools
import nest_asyncio
nest_asyncio.apply()
class ThreadSafeElem(bytes):
def __init__(self, * p_arg, ** n_arg):
self._lock = threading.Lock()
def __enter__(self):
self._lock.acquire()
return self
def __exit__(self, type, value, traceback):
self._lock.release()
elem = ThreadSafeElem()
async def serialize(data):
return msgpack.packb(data, use_bin_type=True)
async def serialize1(data1):
return msgpack.packb(data1, use_bin_type=True)
async def process_data(data,data1):
loop = asyncio.get_event_loop()
future = await loop.run_in_executor(None, functools.partial(serialize, data))
future1 = await loop.run_in_executor(None, functools.partial(serialize1, data1))
return await asyncio.gather(future,future1)
################ Calculation#############################
def calculate_data():
global elem
while True:
try:
... data is calculated (some dictionary))...
elem, elem1= asyncio.run(process_data(data, data1))
except:
pass
#####################################################################
def get_data():
return elem
def get_data1():
return elem1
########### START SERVER AND get data contionusly ################
async def client_thread(reader, writer):
while True:
try:
bytes_received = await reader.read(100)
package_type = np.frombuffer(bytes_received, dtype=np.int8)
if package_type ==1 :
nn_output = get_data1()
if package_type ==2 :
nn_output = get_data()
writer.write(nn_output)
await writer.drain()
except:
pass
async def start_servers(host, port):
server = await asyncio.start_server(client_thread, host, port)
await server.serve_forever()
async def start_calculate():
await asyncio.run(calculate_data())
def enable_sockets():
try:
host = '127.0.0.1'
port = 60000
sockets_number = 6
loop = asyncio.get_event_loop()
for i in range(sockets_number):
loop.create_task(start_servers(host,port+i))
loop.create_task(start_calculate())
loop.run_forever()
except:
print("weird exceptions")
##############################################################################
enable_sockets()
The issue is that when i make a call from client, the server does not give me anything.
I tested the program with dummy data and no asyncio on calculation part so without this loop.create_task(start_calculate()) and the server responded correctly.
I also run the calculate data without adding it in the enable sockets and it worked. It also working with this implementation, but the problem is the server is not returning anything.
I did it like this cos i need the calculate part to run continuously and when one of the clients is calling to return the data at that point.
An asyncio event loop cannot be nested inside another, and there is no point in doing so: asyncio.run (and similar) blocks the current thread until done. This does not increase parallelism, and merely disables any outer event loop.
If you want to nest another asyncio task, directly run it in the current event loop. If you want to run a non-cooperative, blocking task, run it in the event loop executor.
async def start_calculate():
loop = asyncio.get_running_loop()
await loop.run_in_executor(None, calculate_data)
The default executor uses threads – this allows running blocking tasks, but does not increase parallelism. Use a custom ProcessPoolExecutor to use additional cores:
import concurrent.futures
async def start_calculate():
loop = asyncio.get_running_loop()
with concurrent.futures.ProcessPoolExecutor() as pool:
await loop.run_in_executor(pool, calculate_data)
Why do you call asyncio.run() multiple times?
This function always creates a new event loop and closes it at the end. It should be used as a main entry point for asyncio programs, and should ideally >only be called once.
I would advise you to read the docs
I want to implement a service based on web sockets in the Tornado framework. When a user closes a web socket, I want to notify the other users about this. However, on_close is apparently a blocking function and my _broadcast(str) -> None function is async.
How can I call this function anyway?
from tornado import websocket
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class SocketHandler(websocket.WebSocketHandler):
async def open(self, *args, conns, **kwargs):
logger.info(f"Opened a new connection to client {id(self)}")
self._conns = conns
async def on_message(self, message):
logger.info(f"Client {id(self)} sent message: {message}")
await self._broadcast(message)
def on_close(self):
logger.info(f"Client {id(self)} has left the scene")
self._conns.remove(self)
self._broadcast("something") # TODO
async def _broadcast(self, msg):
for conn in self._conns:
try:
await conn.write_message(msg)
except websocket.WebSocketClosedError:
pass
app = web.Application([
(r'/ws', SocketHandler)
])
if __name__ == '__main__':
app.listen(9000)
ioloop.IOLoop.instance().start()
The simple solution you're looking for is to use asyncio.create_task when calling the coroutine:
def on_close(self):
logger.info(f"Client {id(self)} has left the scene")
self._conns.remove(self)
asyncio.create_task(self._broadcast("something"))
(the legacy Tornado version of this function is tornado.gen.convert_yielded, but now that Tornado and asyncio are integrated there's no reason not to use the asyncio version for native coroutines)
But for this particular problem, the use of await in your _broadcast function is not ideal. Awaiting a write_message is used to provide flow control, but create_task doesn't do anything useful with the backpressure provided by await. (write_message is fairly unusual in that it is fully supported to call it both with and without await). In fact, it applies backpressure to the wrong things - one slow connection will slow notifications to all the others that come after it.
So in this case I'd advise making _broadcast a regular synchronous function:
def _broadcast(self, msg):
for conn in self._conns:
try:
conn.write_message(msg)
except websocket.WebSocketClosedError:
pass
If you want to be better able to control memory usage (via the flow control provided by await write_message), you'll need a more complicated solution, probably involving a bounded queue for each connection (in on_close, use put_nowait to add the message to every connection's queue, then have a task that reads from the queue and writes the message with await write_message)
i think a solution that involves using an asyncio.Queue should work for you.
i made a small class as a mock-up to test this out:
import asyncio
import time
class Thing:
on_close_q = asyncio.Queue()
def __init__(self):
self.conns = range(3)
def on_close(self, id):
time.sleep(id)
print(f'closing {id}')
self.on_close_q.put_nowait((self, id))
async def notify(self, msg):
print('in notify')
for conn in range(3):
print(f'notifying {conn} {msg}')
async def monitor_on_close():
print('monitoring')
while True:
instance, id = await Thing.on_close_q.get()
await instance.notify(f'{id} is closed')
from there, you'll need to run monitor_on_close in the ioloop you get from tornado. i've never used tornado, but i think adding something like this to your __main__ block should work:
ioloop.IOLoop.current().add_callback(monitor_on_close)
I'm trying to implement a websocket client in python using websockets and the apparently mandatory asyncio which I never used before (and I have a hard time to understand...).
I've read a lot on the subject and saw (too) many examples here and everywhere, but I can't find a way to properly make a websocket client with a persistent connection.
I need to have a persistent connection because the commands need to be requested on the same connection, the first one being an authentication command.
The remote server is a 3rd party API I don't have any control over.
I suppose I could run an authentication request along with each command my program sends but that does not feel right to open > auth > request > close for each command instead of keeping one connection alive during the whole program's life
My implementation is a library using many classes and I need to wrap the websocket connector/handler in one of them
Here's what I have right now, based on examples I found here and there (with some obfuscated data) :
import json
import asyncio
from websockets import connect
URL = 'wss://server.com/endpoint'
class Websocket:
async def __aenter__(self):
self._conn = connect(URL)
self.websocket = await self._conn.__aenter__()
return self
async def __aexit__(self, *args, **kwargs):
await self._conn.__aexit__(*args, **kwargs)
async def send(self, message):
await self.websocket.send(message)
async def receive(self):
return await self.websocket.recv()
class Handler:
def __init__(self):
self.wws = Websocket()
self.loop = asyncio.get_event_loop()
def command(self, cmd):
return self.loop.run_until_complete(self.__async__command(cmd))
async def __async__command(self, cmd):
async with self.wws as echo:
await echo.send(json.dumps(cmd))
return await echo.receive()
def main():
handler = Handler()
foo = handler.command('authentication command')
print('auth: ', foo)
bar = handler.command('another command to run depending on the first authentication')
print('command: ', bar)
if __name__ == '__main__':
main()
Basically right now I get these answers (simplified and obfuscated) :
auth: Ok, authenticated
command: Command refused, not authenticated
I suppose my problem is that the block async with self.wws as echo: kind of create the connection, runs its code then drop it instead of keeping the connection alive. Since we are not using a usual __init__ here but some asyncio voodoo I don't understand, I'm kind of stuck.
I think your diagnosis is correct, the problem is that the async context manager it creating and closing a connection for each call of Handler.command ... really not want you want.
Instead you could just synchronously establish the websocket connection during the init of Handler and then store the connection websocket (instance of type WebSocketClientProtocol) as a class member for later use, as in this sample code:
import json
import asyncio
from websockets import connect
URL = 'ws://localhost:8000'
class Handler:
def __init__(self):
self.ws = None
self.loop = asyncio.get_event_loop()
# perform a synchronous connect
self.loop.run_until_complete(self.__async__connect())
async def __async__connect(self):
print("attempting connection to {}".format(URL))
# perform async connect, and store the connected WebSocketClientProtocol
# object, for later reuse for send & recv
self.ws = await connect(URL)
print("connected")
def command(self, cmd):
return self.loop.run_until_complete(self.__async__command(cmd))
async def __async__command(self, cmd):
await self.ws.send(json.dumps(cmd))
return await self.ws.recv()
def main():
handler = Handler()
foo = handler.command('authentication command')
print('auth: ', foo)
bar = handler.command('another command to run depending on the first authentication')
print('command: ', bar)
if __name__ == '__main__':
main()
I am currently developing a server program in Python that uses the websockets and asyncio packages.
I got a basic script handling websockets working (Exhibit A). This script locks when waiting for input, which is not what I want.
The solution for this that I imagine is I can start two asynchronous tasks - one that handles inputs and one that handles outputs - and start them in a secondary event loop. I had to do some research about coroutines, and I came up with Exhibit B as a proof of concept for running two things simultaneously in an event loop.
Now what I'm stuck on is Exhibit C. When I attempted to use this in a practical scenario with the websockets package, I found that websocket.recv() never finishes (or the coroutine never un-pauses - I'm not sure what's going on exactly). In exhibit A it works fine, and I've determined that the coroutine definitely runs at least up until that point.
Any ideas?
Exhibit A:
#!/usr/bin/python3
import asyncio
import websockets
import time
# This works great!
async def hello(websocket, path):
while True:
# This line waits for input from socket
name = await websocket.recv()
print("< {}".format(name))
# "echo... echo... echo... echo... echo..."
greeting = ''.join(name + "... " for x in range(5))
await websocket.send(greeting)
print("> {}".format(greeting))
time.sleep(0.1);
start_server = websockets.serve(hello, '', 26231)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
Exhibit B:
#!/usr/bin/python3
import asyncio
import time
class Thing:
def __init__(self):
pass
async def test(self):
for x in range(10):
print("Hello"+str(x))
await asyncio.sleep(0)
def run(self):
# Add the task to the event loop twice
asyncio.ensure_future(self.test())
asyncio.ensure_future(self.test())
t = Thing()
t.run()
loop = asyncio.get_event_loop();
loop.run_forever()
Exhibit C:
#!/usr/bin/python3
import asyncio
import websockets
import time
class WebsocketRequest:
def __init__(self, websocket):
self.websocket = websocket
# Works great
async def handle_oputs(self):
# This works fine - sends a message
# every 10 seconds to the client
while True:
print("sending...")
await self.websocket.send("Hello")
print("> {}".format("Hello"))
time.sleep(10)
# Doesn't work
async def handle_iputs(self):
# This stops at the await and never receives
# any input from the client :/
while True:
try:
print("receiving...")
# This is the line that causes sadness
data = await self.websocket.recv()
print("< {}".format(data))
except:
# This doesn't happen either
print("Listener is dead")
async def run(self):
# This is the part where the coroutine for
# a client get split off into two of them
# to handle input and output separately.
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
asyncio.ensure_future(self.handle_iputs())
asyncio.ensure_future(self.handle_oputs())
loop.run_forever()
class WebsocketServer:
def __init__(self, address):
self.ip = address[0]
self.port = address[1]
async def hello(self, websocket, path):
req = WebsocketRequest(websocket)
await req.run()
def run(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
start_server = websockets.serve(self.hello, self.ip, self.port)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
Maybe the module websocket (unlike websockets) can help you.
The use of WebsocketApp is very simple:
from websocket import WebSocketApp
class ExampleClass(object):
def __init__(self):
websocket.enableTrace(True)
self.ws = websocket.WebSocketApp("ws://echo.websocket.org",
on_message=on_message,
on_error=on_error,
on_close=on_close)
def on_message(ws, msg):
print "Message Arrived:" + msg
def on_error(ws, error):
print error
def on_close(ws):
print "Connection Closed"
def on_open(ws):
ws.send("Hello!")
To download this module: https://pypi.python.org/pypi/websocket-client
time.sleep() is a blocking operation, so any other tasks cannot interrupt and are not scheduled. Use await asyncio.sleep() instead.