Python asyncio: yield from wasn't used with future? - python

I'm attempting to use asyncio to make an asynchronous client/server setup.
For some reason I'm getting AssertionError: yield from wasn't used with future when running the client.
Searching for this error didn't turn up much.
What does this error mean and what's causing it?
#!/usr/bin/env python3
import asyncio
import pickle
import uuid
port = 9999
class ClientProtocol(asyncio.Protocol):
def __init__(self, loop):
self.loop = loop
self.conn = None
self.uuid = uuid.uuid4()
self.other_clients = []
def connection_made(self, transport):
print("Connected to server")
self.conn = transport
m = "hello"
self.conn.write(m)
def data_received(self, data):
print('Data received: {!r}'.format(data))
def connection_lost(self, exc):
print('The server closed the connection')
print('Stop the event loop')
self.loop.stop()
# note that in my use-case, main() is called continuously by an external game engine
client_init = False
def main():
# use a global here only for the purpose of providing example code runnable outside of aforementioned game engine
global client_init
if client_init != True:
loop = asyncio.get_event_loop()
coro = loop.create_connection(lambda: ClientProtocol(loop), '127.0.0.1', port)
task = asyncio.Task(coro)
transport, protocol = loop.run_until_complete(coro)
client_init = True
# to avoid blocking the execution of main (and of game engine calling it), only run one iteration of the event loop
loop.stop()
loop.run_forever()
if transport:
transport.write("some data")
if __name__ == "__main__":
main()
Traceback:
Traceback (most recent call last):
File "TCPclient.py", line 57, in <module>
main()
File "TCPclient.py", line 45, in main
transport, protocol = loop.run_until_complete(coro)
File "/usr/lib/python3.5/asyncio/base_events.py", line 337, in run_until_complete
return future.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 274, in result
raise self._exception
File "/usr/lib/python3.5/asyncio/tasks.py", line 239, in _step
result = coro.send(None)
File "/usr/lib/python3.5/asyncio/base_events.py", line 599, in create_connection
yield from tasks.wait(fs, loop=self)
File "/usr/lib/python3.5/asyncio/tasks.py", line 341, in wait
return (yield from _wait(fs, timeout, return_when, loop))
File "/usr/lib/python3.5/asyncio/tasks.py", line 424, in _wait
yield from waiter
File "/usr/lib/python3.5/asyncio/futures.py", line 359, in __iter__
assert self.done(), "yield from wasn't used with future"
AssertionError: yield from wasn't used with future

The problem seems to be that you create a task from your coroutine, but then pass the coroutine to run_until_complete instead:
coro = loop.create_connection(lambda: ClientProtocol(loop), '127.0.0.1', port)
task = asyncio.Task(coro)
transport, protocol = loop.run_until_complete(coro)
Either pass the task:
coro = loop.create_connection(lambda: ClientProtocol(loop), '127.0.0.1', port)
task = asyncio.Task(coro)
transport, protocol = loop.run_until_complete(task)
Or don't create the task and pass the coroutine. run_until_complete will create a task for you
coro = loop.create_connection(lambda: ClientProtocol(loop), '127.0.0.1', port)
transport, protocol = loop.run_until_complete(coro)
In addition, you need to ensure the strings you are writing are byte strings. String literals in Python 3 default to unicode. You can either encode these, or just write byte strings in the first place
transport.write("some data".encode('utf-8'))
transport.write(b"some data")
EDIT It's not clear to me why this is an issue, however the source for run_until_complete has this to say:
WARNING: It would be disastrous to call run_until_complete()
with the same coroutine twice -- it would wrap it in two
different Tasks and that can't be good.
I suppose creating a task and then passing in the coroutine (which causes a task to be created) has the same effect.

Related

How to construct proxy objects from multiprocessing.managers.SyncManager?

I have long running file I/O tasks which I'd like to be able to move into a daemon/server process. A CLI tool would be used to queue new jobs to run, query the status of running jobs, and wait for individual jobs. Python's multiprocessing.managers looks like a nice simple way to handle the IPC. I'd like to be able to construct a SyncManager.Event for the client to wait on without blocking the server, but attempting to do so results in triggers a "server not yet started" assertion. Ironically this assertion gets sent from the server to the client, so obviously the server is started, somewhere.
Here's the minimal example:
#!/usr/bin/env python3
import time
import sys
import concurrent.futures
from multiprocessing.managers import SyncManager
def do_work(files):
"""Simulate doing some work on a set of files."""
print(f"Starting work for {files}.")
time.sleep(2)
print(f"Finished work for {files}.")
# Thread pool to do work in.
pool = concurrent.futures.ProcessPoolExecutor(max_workers=1)
class Job:
job_counter = 1
def __init__(self, files):
"""Setup a job and queue work for files on our thread pool."""
self._job_number = self.job_counter
Job.job_counter += 1
print(f"manager._state.value = {manager._state.value}")
self._finished_event = manager.Event()
print(f"Queued job {self.number()}.")
future = pool.submit(do_work, files)
future.add_done_callback(lambda f : self._finished_event.set())
def number(self):
return self._job_number
def event(self):
"""Get an event which can be waited on for the job to complete."""
return self._finished_event
class MyManager(SyncManager):
pass
MyManager.register("Job", Job)
manager = MyManager(address=("localhost", 16000), authkey=b"qca-authkey")
if len(sys.argv) > 1 and sys.argv[1] == "server":
manager.start()
print(f"Manager listening at {manager.address}.")
while True:
time.sleep(1)
else:
manager.connect()
print(f"Connected to {manager.address}.")
job = manager.Job(["a", "b", "c"])
job.event().wait()
print("Done")
If I run the client I see:
$ ./mp-manager.py
Connected to ('localhost', 16000).
Traceback (most recent call last):
File "./mp-manager.py", line 54, in <module>
job = manager.Job(["a", "b", "c"])
File "/usr/lib/python3.8/multiprocessing/managers.py", line 740, in temp
token, exp = self._create(typeid, *args, **kwds)
File "/usr/lib/python3.8/multiprocessing/managers.py", line 625, in _create
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
File "/usr/lib/python3.8/multiprocessing/managers.py", line 91, in dispatch
raise convert_to_error(kind, result)
multiprocessing.managers.RemoteError:
---------------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/lib/python3.8/multiprocessing/managers.py", line 210, in handle_request
result = func(c, *args, **kwds)
File "/usr/lib/python3.8/multiprocessing/managers.py", line 403, in create
obj = callable(*args, **kwds)
File "./mp-manager.py", line 24, in __init__
self._finished_event = manager.Event()
File "/usr/lib/python3.8/multiprocessing/managers.py", line 740, in temp
token, exp = self._create(typeid, *args, **kwds)
File "/usr/lib/python3.8/multiprocessing/managers.py", line 622, in _create
assert self._state.value == State.STARTED, 'server not yet started'
AssertionError: server not yet started
---------------------------------------------------------------------------
The server output is:
$ ./mp-manager.py server
Manager listening at ('127.0.0.1', 16000).
manager._state.value = 0

aiohttp: calling asyncio from a running web.Application: RuntimeError: This event loop is already running

I am trying to make an API call from within my service and I am running into event loop issues. Can someone help me understand what I am doing wrong?
Basically I want to make a service that does some calculations based on data pulled from a different service.
I can call this code below from a cli, but not when I start up a web app (i.e.) hitting http://127.0.0.1:8080/add
loop = asyncio.get_event_loop()
data = loop.run_until_complete(run_fetch(loop, 'http://google.com'))
Sample code:
from aiohttp import web
import aiohttp
import asyncio
async def add(request):
loop = asyncio.get_event_loop()
data = loop.run_until_complete(run_fetch(loop, 'http://google.com'))
return web.json_response(data)
async def fetch(client, url):
async with client.get(url) as resp:
assert resp.status == 200
return await resp.text()
async def run_fetch(loop, url):
async with aiohttp.ClientSession(loop=loop) as client:
html = await fetch(client, url)
return html
app = web.Application()
app.router.add_get('/add', add)
web.run_app(app, host='127.0.0.1', port=8080)
Exception:
Error handling request
Traceback (most recent call last):
File ".../aiohttp/web_protocol.py", line 417, in start
resp = yield from self._request_handler(request)
File ".../aiohttp/web.py", line 289, in _handle
resp = yield from handler(request)
File ".../sample.py", line 11, in add data = loop.run_until_complete(run_fetch(loop, 'http://google.com'))
File ".../python3.6/asyncio/base_events.py", line 454, in run_until_complete
self.run_forever()
File ".../python3.6/asyncio/base_events.py", line 408, in run_forever
raise RuntimeError('This event loop is already running')
RuntimeError: This event loop is already running
The run_until_complete is the way to run some async code from the sync context. Under the hood it adds provided future to the given ioloop and calls run_forever then returns result or throws exception (resolved future).
Actually you need to await run_fetch(loop, 'http://google.com'), since the caller function is asynchronous.

Tornado how to return error exception?

I want to run a method I know this method doesn't work and I want to get the error returned by the method.
This is my code :
def is_connect(s):
print("ok connection")
print(s)
ioloop.stop()
try:
current_job_ready = 0
print("ok1")
beanstalk = beanstalkt.Client(host='host', port=port)
print("ok1")
beanstalk.connect(callback=is_connect)
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.start()
print("ok2")
except IOError as e:
print(e)
And this is the error I have when I run my program with wring port :
WARNING:tornado.general:Connect error on fd 7: ECONNREFUSED
ERROR:tornado.application:Exception in callback <functools.partial object at 0x7f5a0eac6f18>
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/tornado/ioloop.py", line 604, in _run_callback
ret = callback()
File "/usr/local/lib/python2.7/dist-packages/tornado/stack_context.py", line 275, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/tornado/ioloop.py", line 619, in <lambda>
self.add_future(ret, lambda f: f.result())
File "/usr/local/lib/python2.7/dist-packages/tornado/concurrent.py", line 237, in result
raise_exc_info(self._exc_info)
File "/usr/local/lib/python2.7/dist-packages/tornado/gen.py", line 270, in wrapper
result = func(*args, **kwargs)
TypeError: connect() takes exactly 1 argument (2 given)
I want to have e when I enter a false port or host.
How can I do this?
I tired to add raise IOError("connection error") after beanstalk = beanstalkt.Client(host='host', port=port) But this force the error, and I just want to have error when it exist.
Here's where reading the code helps. In beanstalkt 0.6's connect, it creates an IOStream to connect to the server:
https://github.com/nephics/beanstalkt/blob/v0.6.0/beanstalkt/beanstalkt.py#L108
It registers your callback to be executed on success, but if the connection fails it'll just call Client._reconnect once per second forever. I think you should open a feature request in their GitHub project asking for an error-notification system for connect. With the current beanstalkt implementation, you just have to decide how long you're willing to wait for success:
import sys
from datetime import timedelta
from tornado.ioloop import IOLoop
def is_connect(s):
print("ok connection")
print(s)
loop.remove_timeout(timeout)
# Do something with Beanstalkd....
def connection_failed():
print(sys.stderr, "Connection failed!")
# Could call IOLoop.stop() or just quit.
sys.exit(1)
loop = IOLoop.current()
timeout = loop.add_timeout(timedelta(seconds=1), connection_failed)
beanstalk.connect(callback=is_connect)
loop.start()

How to wait for task created by create_task() to complete?

I wrote a test program to try out using create_task() that needs to wait until the created task completes.
I tried using loop.run_until_complete() to wait for task completion, but it results in an error with a traceback.
/Users/jason/.virtualenvs/xxx/bin/python3.5 /Users/jason/asyncio/examples/hello_coroutine.py
Traceback (most recent call last):
Test
File "/Users/jason/asyncio/examples/hello_coroutine.py", line 42, in <module>
Hello World, is a task
loop.run_until_complete(test.greet_every_two_seconds())
File "/Users/jason/asyncio/asyncio/base_events.py", line 373, in run_until_complete
return future.result()
File "/Users/jason/asyncio/asyncio/futures.py", line 274, in result
raise self._exception
File "/Users/jason/asyncio/asyncio/tasks.py", line 240, in _step
result = coro.send(None)
File "/Users/jason/asyncio/examples/hello_coroutine.py", line 33, in greet_every_two_seconds
self.a()
File "/Users/jason/asyncio/examples/hello_coroutine.py", line 26, in a
t = loop.run_until_complete(self.greet_every_one_seconds(self.db_presell))
File "/Users/jason/asyncio/asyncio/base_events.py", line 361, in run_until_complete
self.run_forever()
File "/Users/jason/asyncio/asyncio/base_events.py", line 326, in run_forever
raise RuntimeError('Event loop is running.')
RuntimeError: Event loop is running.
The test code is as follows. The function a() must not be a coroutine,
How can I wait for the task until complete?
import asyncio
class Test(object):
def __init__(self):
print(self.__class__.__name__)
pass
#asyncio.coroutine
def greet_every_one_seconds(self, value):
print('Hello World, one second.')
fut = asyncio.sleep(1,result=value)
a = yield from fut
print(a)
def a(self):
loop = asyncio.get_event_loop()
task=loop.run_until_complete(self.greet_every_one_seconds(4))
#How can i wait for the task until complete?
#asyncio.coroutine
def greet_every_two_seconds(self):
while True:
self.a()
print('Hello World, two seconds.')
yield from asyncio.sleep(2)
if __name__ == '__main__':
test = Test()
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(test.greet_every_two_seconds())
finally:
loop.close()
How can i wait for the task until complete?
loop.run_until_complete(task) in an ordinary function. Or await task in a coroutine.
Here's a complete code example that demonstrates both cases:
#!/usr/bin/env python3
import asyncio
async def some_coroutine(loop):
task = loop.create_task(asyncio.sleep(1)) # just some task
await task # wait for it (inside a coroutine)
loop = asyncio.get_event_loop()
task = loop.create_task(asyncio.sleep(1)) # just some task
loop.run_until_complete(task) # wait for it (outside of a coroutine)
loop.run_until_complete(some_coroutine(loop))

Autobahn Python Errno 99 cannot assign requested address

While trying to set up a Websocket server, I have encountered the following error.
The same code works fine under LAN IP '192.168.x.x', but fails to work with a public ip/domain name
Here is the error trace
Traceback (most recent call last):
File "WSServer.py", line 19, in <module>
server = loop.run_until_complete(coro)
File "/usr/lib64/python3.4/asyncio/base_events.py", line 208, in run_until_complete
return future.result()
File "/usr/lib64/python3.4/asyncio/futures.py", line 243, in result
raise self._exception
File "/usr/lib64/python3.4/asyncio/tasks.py", line 319, in _step
result = coro.send(value)
File "/usr/lib64/python3.4/asyncio/base_events.py", line 579, in create_server
% (sa, err.strerror.lower()))
OSError: [Errno 99] error while attempting to bind on address ('121.6.x.x', 9000): cannot assign requested address
Python Server Code:
from autobahn.asyncio.websocket import WebSocketServerProtocol
class MyServerProtocol(WebSocketServerProtocol):
def onMessage(self, payload, isBinary):
print("message received")
self.sendMessage(payload, isBinary)
if __name__ == '__main__':
import asyncio
from autobahn.asyncio.websocket import WebSocketServerFactory
factory = WebSocketServerFactory()
factory.protocol = MyServerProtocol
loop = asyncio.get_event_loop()
coro = loop.create_server(factory, '121.6.x.x', 9000)
server = loop.run_until_complete(coro)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.close()
loop.close()
Could the issue be related with the server setting? e.g. hostname, SELinux

Categories