I am trying to understand and check how tornado executes coroutines.
I noticed a behavior which make me think that gen.coroutine doesn't work.
Look at the test below. It passes, I expected to get l = [ "S1", "BF", "S2", "S3" ] because when submain yields asyncSubMain it returns to event loop queue to pull next callback it should get "beforeYield", because it was scheduled earlier.
def test_call_coroutine_function(ioLoop):
l = []
def syncSubMain():
return "Oh! at last I understood how tornado turns"
#gen.coroutine
def asyncSubMain():
l.append("S2")
return syncSubMain()
def beforeYield():
l.append("BF")
#gen.coroutine
def submain():
l.append("S1")
ioLoop.add_callback(beforeYield)
y = yield asyncSubMain()
l.append("S3")
raise gen.Return(y)
#gen.coroutine
def main():
x = yield submain()
raise gen.Return(x)
assert ioLoop.run_sync(main).startswith("Oh!")
assert l == ["S1", "S2", "S3", "BF"]
Following test behaves like I want and I don't event use #gen.coroutine.
def test_sync_all_async(ioLoop):
class C:
f = 0
l = []
def mf(self):
return 1
# gen.coroutine is not needed and if just call a method
# with gen_coroutine it's executed synchronously
def m3(self, a):
self.l.append(a)
self.f = self.mf()
def m2(self):
self.l.append("A")
ioLoop.add_callback(self.m3, "B")
self.l.append("C")
def m(self):
self.m2()
c = C()
ioLoop.run_sync(c.m)
assert c.f == 1
assert c.l == ["A", "C", "B"]
I thought that #gen.coroutine is just a syntax sugar for the test above.
Out of these tests follows either it's not working or something different from an event loop with callbacks.
Related
I want to make an event listener
like this:
#some.event
async def on_ready(some_info):
print(some_info)
#some.event
async def on_error(err):
print(err)
So for when something is ready, or if a message is received in like WebSockets, using this for Discord since some info is only available for when the Bot is Identified or Ready
I've seen something like:
def add_listener(func, name):
# ...
def remove_listener(func, name):
# ...
But I don't really know how to use it or create one
Here's a simple class-based solution I quickly coded:
class Event:
def __init__(self):
# Initialise a list of listeners
self.__listeners = []
# Define a getter for the 'on' property which returns the decorator.
#property
def on(self):
# A declorator to run addListener on the input function.
def wrapper(func):
self.addListener(func)
return func
return wrapper
# Add and remove functions from the list of listeners.
def addListener(self,func):
if func in self.__listeners: return
self.__listeners.append(func)
def removeListener(self,func):
if func not in self.__listeners: return
self.__listeners.remove(func)
# Trigger events.
def trigger(self,args = None):
# Run all the functions that are saved.
if args is None:
args = []
for func in self.__listeners: func(*args)
It allows you to create an Event that functions can 'subscribe' to:
evn = Event()
# Some code...
evn.trigger(['arg x','arg y'])
The functions can both subscribe to the event with decorators:
#evn.on
def some_function(x,y): pass
Or with the addListener method:
def some_function(x,y): pass
evn.addListener(some_function)
You can also remove listeners:
evn.removeListener(some_function)
To create something similar to what you asked for you can do something like this:
# some.py
from event import Event
class SomeClass:
def __init__(self):
# Private event variable
self.__event = Event()
# Public event variable (decorator)
self.event = self.__event.on
some = SomeClass()
And then use it like so:
# main.py
from some import some
#some.event
async def on_ready(some_info):
print(some_info)
#some.event
async def on_error(err):
print(err)
Quick example :
################################################################################
# the code for the "framework"
event1_listeners = []
event2_listeners = []
def listen_event1(func):
event1_listeners.append(func)
return func
def listen_event2(func):
event2_listeners.append(func)
return func
def process_event(event):
if event["type"] == 1:
for func in event1_listeners:
func(event)
elif event["type"] == 2:
for func in event2_listeners:
func(event)
else:
raise NotImplementedError(f"{event['type']=!r}")
################################################################################
# your code
#listen_event1
def handle_event1_v1(event):
print(f"handle_event1_v1 : {event!r}")
#listen_event1
def handle_event1_v2(event):
print(f"handle_event1_v2 : {event!r}")
#listen_event2
def handle_event2(event):
print(f"handle_event2 : {event!r}")
################################################################################
# the events processed by the framework
process_event({"type": 1, "msg": "hello"})
process_event({"type": 2, "msg": "world"})
handle_event1_v1 : {'type': 1, 'msg': 'hello'}
handle_event1_v2 : {'type': 1, 'msg': 'hello'}
handle_event2 : {'type': 2, 'msg': 'world'}
Essentially, the decorators will store the function someplace, and when an event is received, the framework iterates over the functions registered for it.
Removing a listener dynamically is basically just removing the func reference from the list.
The decorator in this case is simply "sugar" to not having to do event1_listeners.append(func) yourself.
I am learning coroutine from awesome presentation by David Beazley.Coroutine
I am very confused about a simple task example in which a generator start running without a proceeding next()
class Task(object):
taskid = 0
def __init__(self, target):
Task.taskid += 1
self.tid = Task.taskid #Task id
self.target = target #Target coroutine
self.sendval = None #value to send
def run(self):
return self.target.send(self.sendval)
# a very simple generator
def foo():
print('Part 1')
yield
print("Part 2")
yield
It starts without next()
In [8]: t1 = Task(foo())
In [9]: t1.run()
Part 1
As to foo
In [10]: f1 = foo()
In [11]: f1.send(1)
TypeError: can't send non-None value to a just-started generator
It should be started with next() in advance.
How could I understand this situation?
Error description tells you what is wrong and what you should do - you should send None to just-created generator. That is f1.send(None) instead of f1.send(1). Ususally coroutines are used with the decorator like the one you provided in the question (class Task and its method run) or like the following one:
def coroutine(func):
#functools.wraps(func)
def wrapper(*args, **kwargs):
generator = func(*args, **kwargs)
next(generator)
return generator
return wrapper
I'm using python 3.5 to asynchronously return data from one method to another as follows:
async def A():
# Need to get data here from B continuously
val = await B()
async def B():
# Need to get data here from C continuously as they get generated inside while loop of method C
data = await C()
# Modify and process the data and return to A
return await D(data)
async def C():
i = 0
while i < 5:
await asyncio.sleep(1)
# Return this data to method B one by one, Not sure how to do this ??
return i
async def D(val):
# Do some processing of val and return it
return val
I want to continuously stream data from method C and return it to method B, process each item as they are received and return it to method A.
One way is use an asyncio queue and pass it to method B from A, from where it further gets passed on to C.
Method C would keep writing the content in the queue.
Method B would read from queue, process the data and update the queue.
Method A reads the queue at the end for finally processed data.
Can we achieve it using coroutines or async method itself in any other way ? Wish to avoid calls for reading and writing to queues continuously for every request.
import asyncio
from async_generator import async_generator, yield_, yield_from_
async def fun(n):
print("Finding %d-1" % n)
await asyncio.sleep(n/2)
result = n - 1
print("%d - 1 = %d" % (n, result))
return result
#async_generator
async def main(l):
futures = [ fun(n) for n in l ]
for i, future in enumerate(asyncio.as_completed(futures)):
result = await future
print("inside the main..")
print(result)
await yield_(result)
#async_generator
async def dealer():
l = [2, 4, 6]
gen = main(l)
async for item in gen:
print("inside the dealer....")
await yield_(item)
async def dealer1():
gen = dealer()
async for item in gen:
print("inside dealer 1")
print(item)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
#loop.run_until_complete(cc.main())
loop.run_until_complete(dealer1())
loop.close()
You have support for async generators in python3.6. If you are working with python 3.5 you may use async_generator library(https://pypi.python.org/pypi/async_generator/1.5)
Let's say I have a bunch of coroutines. These coroutines should be called using yield from. How can I detect I made a mistake and directly called the coroutine like a subroutine ?
Here is an example code :
import asyncio
#asyncio.coroutine
def A(msg):
print(msg)
yield from asyncio.sleep(1)
#asyncio.coroutine
def B():
while True :
yield from A('1')
A('2')
yield from A('3')
loop = asyncio.get_event_loop()
loop.run_until_complete(B())
And the output :
1
3
1
3
1
3
...
Calling coroutine like subroutine does nothing, but does not raise exception or block the event loop, so the failure mode is very quiet.
I added a check_iterator function to your code. If you decorate your coroutine with this function, it will print information out if your coroutine is ever directly called and is not accessed using __iter__. Depending on your actual code, you may need to make a more complete implementation and wrap __next__ as well. That would probably be somewhat lower-performance, though.
import asyncio
import functools
class IterWrap(object):
def __init__(self, f, *args, **kwds):
self.iterated = False
self.info = [f, args, kwds]
self.f = f(*args, **kwds)
def __iter__(self):
self.iterated = True
return iter(self.f)
def __del__(self):
if not self.iterated:
print("Did not yield: %s" % self.info)
def check_iterator(f):
#functools.wraps(f)
def wrapper(*args, **kwds):
return IterWrap(f, *args, **kwds)
return wrapper
#check_iterator
#asyncio.coroutine
def A(msg):
print(msg)
yield from asyncio.sleep(1)
#asyncio.coroutine
def B():
while True :
yield from A('1')
A('2')
yield from A('3')
loop = asyncio.get_event_loop()
loop.run_until_complete(B())
The results of running this on Python 3.4 are:
1
Did not yield: [<function A at 0xb6e3189c>, ('2',), {}]
3
1
Did not yield: [<function A at 0xb6e3189c>, ('2',), {}]
3
1
Did not yield: [<function A at 0xb6e3189c>, ('2',), {}]
3
1
Did not yield: [<function A at 0xb6e3189c>, ('2',), {}]
3
I have a generator that takes a long time for each iteration to run. Is there a standard way to have it yield a value, then generate the next value while waiting to be called again?
The generator would be called each time a button is pressed in a gui and the user would be expected to consider the result after each button press.
EDIT: a workaround might be:
def initialize():
res = next.gen()
def btn_callback()
display(res)
res = next.gen()
if not res:
return
If I wanted to do something like your workaround, I'd write a class like this:
class PrefetchedGenerator(object):
def __init__(self, generator):
self._data = generator.next()
self._generator = generator
self._ready = True
def next(self):
if not self._ready:
self.prefetch()
self._ready = False
return self._data
def prefetch(self):
if not self._ready:
self._data = self._generator.next()
self._ready = True
It is more complicated than your version, because I made it so that it handles not calling prefetch or calling prefetch too many times. The basic idea is that you call .next() when you want the next item. You call prefetch when you have "time" to kill.
Your other option is a thread..
class BackgroundGenerator(threading.Thread):
def __init__(self, generator):
threading.Thread.__init__(self)
self.queue = Queue.Queue(1)
self.generator = generator
self.daemon = True
self.start()
def run(self):
for item in self.generator:
self.queue.put(item)
self.queue.put(None)
def next(self):
next_item = self.queue.get()
if next_item is None:
raise StopIteration
return next_item
This will run separately from your main application. Your GUI should remain responsive no matter how long it takes to fetch each iteration.
No. A generator is not asynchronous. This isn't multiprocessing.
If you want to avoid waiting for the calculation, you should use the multiprocessing package so that an independent process can do your expensive calculation.
You want a separate process which is calculating and enqueueing results.
Your "generator" can then simply dequeue the available results.
You can definitely do this with generators, just create your generator so that each next call alternates between getting the next value and returning it by putting in multiple yield statements. Here is an example:
import itertools, time
def quick_gen():
counter = itertools.count().next
def long_running_func():
time.sleep(2)
return counter()
while True:
x = long_running_func()
yield
yield x
>>> itr = quick_gen()
>>> itr.next() # setup call, takes two seconds
>>> itr.next() # returns immediately
0
>>> itr.next() # setup call, takes two seconds
>>> itr.next() # returns immediately
1
Note that the generator does not automatically do the processing to get the next value, it is up to the caller to call next twice for each value. For your use case you would call next once as a setup up, and then each time the user clicks the button you would display the next value generated, then call next again for the pre-fetch.
I was after something similar. I wanted yield to quickly return a value (if it could) while a background thread processed the next, next.
import Queue
import time
import threading
class MyGen():
def __init__(self):
self.queue = Queue.Queue()
# Put a first element into the queue, and initialize our thread
self.i = 1
self.t = threading.Thread(target=self.worker, args=(self.queue, self.i))
self.t.start()
def __iter__(self):
return self
def worker(self, queue, i):
time.sleep(1) # Take a while to process
queue.put(i**2)
def __del__(self):
self.stop()
def stop(self):
while True: # Flush the queue
try:
self.queue.get(False)
except Queue.Empty:
break
self.t.join()
def next(self):
# Start a thread to compute the next next.
self.t.join()
self.i += 1
self.t = threading.Thread(target=self.worker, args=(self.queue, self.i))
self.t.start()
# Now deliver the already-queued element
while True:
try:
print "request at", time.time()
obj = self.queue.get(False)
self.queue.task_done()
return obj
except Queue.Empty:
pass
time.sleep(.001)
if __name__ == '__main__':
f = MyGen()
for i in range(5):
# time.sleep(2) # Comment out to get items as they are ready
print "*********"
print f.next()
print "returned at", time.time()
The code above gave the following results:
*********
request at 1342462505.96
1
returned at 1342462505.96
*********
request at 1342462506.96
4
returned at 1342462506.96
*********
request at 1342462507.96
9
returned at 1342462507.96
*********
request at 1342462508.96
16
returned at 1342462508.96
*********
request at 1342462509.96
25
returned at 1342462509.96