Tornado not running callback - python

This is very simple client for tcp-chat. This is main function:
#gen.coroutine
def main():
factory = TCPClient()
stream = yield factory.connect(af=socket.AF_INET, **options.options.group_dict("connect"))
# Add notification callback
ioloop.IOLoop.instance().add_callback(notification, stream)
# Run application
app = Application(stream)
app.run()
if __name__ == '__main__':
try:
main()
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
pass
Application is running. App read data from console, send it into socket. Main loop of application:
#gen.coroutine
def run(self):
while True:
try:
s = input('> ')
command, text = self._parse_command(s)
handler = self.handler(self._stream, self)
yield handler.execute_command(command, text)
except Exception as e:
print(e)
And i have console notification. This function read response from socket and print into console:
#gen.coroutine
def notification(stream):
message_length = yield stream.read_bytes(2)
length = struct.unpack("!H", message_length)[0]
message = yield stream.read_bytes(length)
# request = Message.unpack(message=message)
sys.stdout.write('\r'+' '*(len(readline.get_line_buffer())+2)+'\r')
print(message)
sys.stdout.write('> ' + readline.get_line_buffer())
sys.stdout.flush()
ioloop.IOLoop.instance().add_callback(notification, stream)
I add this function as callback into ioloop. But this function is never running. How run notification in background? Help my please...
UPD:
I create new thread and run notification in new thread:
th = threading.Thread(target=notification, args=(self._stream, ))
th.run()
But it did not help...

input() is a blocking function; nothing else can happen while it is waiting for input. In order for the application to be responsive you must rework it to avoid blocking functions like input(), or to perform those functions on a separate thread.

Related

Restart a python function in a script after n minutes

I have a script that uses a server-sent event library to connect with a server that pushes events to me regularly. The issue is that the stream will freeze after a long time and I will have to restart the script manually and this is not maintainable. The structure of the current code looks like this
def listen(self):
print("listening to events .....")
try:
url = settings.EVENT_URL + "/v1/events"
auth_key = settings.KEY
headers = {
"Authorization": "Basic " + auth_key,
"Accept": "text/event-stream",
}
response = self.with_urllib3(url, headers)
client = sseclient.SSEClient(response)
for event in client.events():
# the script freezes here.
logger.info(event.data)
process(event.data)
I have tried doing something like
def start(self):
def wait():
time.sleep(10 * 60)
background = threading.Thread(name = 'background', target = self.listen)
background.daemon = True
background.start()
wait()
try:
self.start()
except:
self.start()
finally:
self.start()
But I don't know if this will work mainly because a daemon thread will keep running in the background which means I will have copies of the task running after a while.
What I need is a better way to call a function and after some elapsed time return from the function and recall it again immediately. Thanks for any help.
You could consider a construction using the signal module like shown below. As a note though, the SIGALRM signal is not compatible with Windows.
import signal
TIMEOUT = 5
def _handle_alarm(_, __):
raise TimeoutError("Some useful message")
def listen():
print("Starting to listen...")
import time
time.sleep(10)
while True:
try:
timer = signal.signal(signal.SIGALRM, _handle_alarm)
timer.alarm(TIMEOUT)
listen()
except TimeoutError:
pass

Python gRPC cancel unary-stream call from client side

Using Python gRPC, I would like to be able to cancel a long-running unary-stream call from the client side, when a threading.Event is set.
def application(stub: StreamsStub, event: threading.Event):
stream = stub.Application(ApplicationStreamRequest())
try:
for resp in stream:
print(resp)
except grpc.RpcError as e:
print(e)
For the time being I am cancelling the stream using the channel.close() method, but of course this closes all connections rather than just this stream.
Could someone suggest how I can use the event to cancel the stream iterator? Thanks
Below is some code for a gRPC UnaryStream call. The server sends an unending number of replies, leaving the client to decide when to stop receiving them.
Instead of using a counter, you can have a thread go off and do some work, and set an event that is checked before calling cancel() instead of checking the counter.
Note: using Python 2.7
Protofile:
syntax = "proto3";
package my_package;
service HeartBeat {
rpc Beats(Counter) returns (stream Counter) {}
}
message Counter {
int32 counter = 1;
}
Client:
from __future__ import print_function
import grpc
import heartbeat_pb2
import heartbeat_pb2_grpc
def get_beats(stub, channel):
try:
result_iterator = stub.Beats(heartbeat_pb2.Counter(counter=i))
for result in result_iterator:
print("Count: {}".format(result.counter))
if result.counter >= 3: # We only wants 3 'beats'
result_iterator.cancel()
except grpc.RpcError as rpc_error:
if rpc_error.code() == grpc.StatusCode.CANCELLED:
pass # Otherwise, a traceback is printed
def run():
with grpc.insecure_channel('localhost:9999') as channel:
stub = heartbeat_pb2_grpc.HeartBeatStub(channel)
get_beats(stub, channel)
if __name__ == '__main__':
run()
Server:
from concurrent import futures
import grpc
from proto_generated import heartbeat_pb2
from proto_generated import heartbeat_pb2_grpc
import time
class HeartBeatServicer(heartbeat_pb2_grpc.HeartBeatServicer):
pass
def Beats(self, request, context):
# Not required, only to show sending the server a message
print("Beats: {}".format(request.counter))
def response_message():
i = 0
while context.is_active():
print("Sending {}".format(i))
response = heartbeat_pb2.Counter(counter=i)
i += 1
time.sleep(1) # Simulate doing work
yield response
return response_message()
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
heartbeat_pb2_grpc.add_HeartBeatServicer_to_server(
HeartBeatServicer(), server)
server.add_insecure_port('[::]:9999')
server.start()
server.wait_for_termination()
if __name__ == '__main__':
serve()
The _Rendezvous object returned by a rpc call implements grpc.RpcError, grpc.Future, and grpc.Call, therefore cancelling the stream is as simple as calling stream.cancel (from grpc.Future interface)

Tornado websocket client loosing response messages?

I need to process frames from a webcam and send a few selected frames to a remote websocket server. The server answers immediately with a confirmation message (much like an echo server).
Frame processing is slow and cpu intensive so I want to do it using a separate thread pool (producer) to use all the available cores. So the client (consumer) just sits idle until the pool has something to send.
My current implementation, see below, works fine only if I add a small sleep inside the producer test loop. If I remove this delay I stop receiving any answer from the server (both the echo server and from my real server). Even the first answer is lost, so I do not think this is a flood protection mechanism.
What am I doing wrong?
import tornado
from tornado.websocket import websocket_connect
from tornado import gen, queues
import time
class TornadoClient(object):
url = None
onMessageReceived = None
onMessageSent = None
ioloop = tornado.ioloop.IOLoop.current()
q = queues.Queue()
def __init__(self, url, onMessageReceived, onMessageSent):
self.url = url
self.onMessageReceived = onMessageReceived
self.onMessageSent = onMessageSent
def enqueueMessage(self, msgData, binary=False):
print("TornadoClient.enqueueMessage")
self.ioloop.add_callback(self.addToQueue, (msgData, binary))
print("TornadoClient.enqueueMessage done")
#gen.coroutine
def addToQueue(self, msgTuple):
yield self.q.put(msgTuple)
#gen.coroutine
def main_loop(self):
connection = None
try:
while True:
while connection is None:
try:
print("Connecting...")
connection = yield websocket_connect(self.url)
print("Connected " + str(connection))
except Exception, e:
print("Exception on connection " + str(e))
connection = None
print("Retry in a few seconds...")
yield gen.Task(self.ioloop.add_timeout, time.time() + 3)
try:
print("Waiting for data to send...")
msgData, binaryVal = yield self.q.get()
print("Writing...")
sendFuture = connection.write_message(msgData, binary=binaryVal)
print("Write scheduled...")
finally:
self.q.task_done()
yield sendFuture
self.onMessageSent("Sent ok")
print("Write done. Reading...")
msg = yield connection.read_message()
print("Got msg.")
self.onMessageReceived(msg)
if msg is None:
print("Connection lost")
connection = None
print("main loop completed")
except Exception, e:
print("ExceptionExceptionException")
print(e)
connection = None
print("Exit main_loop function")
def start(self):
self.ioloop.run_sync(self.main_loop)
print("Main loop completed")
######### TEST METHODS #########
def sendMessages(client):
time.sleep(2) #TEST only: wait for client startup
while True:
client.enqueueMessage("msgData", binary=False)
time.sleep(1) # <--- comment this line to break it
def testPrintMessage(msg):
print("Received: " + str(msg))
def testPrintSentMessage(msg):
print("Sent: " + msg)
if __name__=='__main__':
from threading import Thread
client = TornadoClient("ws://echo.websocket.org", testPrintMessage, testPrintSentMessage)
thread = Thread(target = sendMessages, args = (client, ))
thread.start()
client.start()
My real problem
In my real program I use a "window like" mechanism to protect the consumer (an autobahn.twisted.websocket server): the producer can send up to a maximum number of un-acknowledge messages (the webcam frames), then stops waiting for half of the window to free up.
The consumer sends a "PROCESSED" message back acknowleding one or more messages (just a counter, not by id).
What I see on the consumer log is that the messages are processed and the answer is sent back but these acks vanish somewhere in the network.
I have little experience with asynchio so I wanted to be sure that I'm not missing any yield, annotation or something else.
This is the consumer side log:
2017-05-13 18:59:54+0200 [-] TX Frame to tcp4:192.168.0.5:48964 : fin = True, rsv = 0, opcode = 1, mask = -, length = 21, repeat_length = None, chopsize = None, sync = False, payload = {"type": "PROCESSED"}
2017-05-13 18:59:54+0200 [-] TX Octets to tcp4:192.168.0.5:48964 : sync = False, octets = 81157b2274797065223a202250524f434553534544227d
This is neat code. I believe the reason you need a sleep in your sendMessages thread is because, otherwise, it keeps calling enqueueMessage as fast as possible, millions of times per second. Since enqueueMessage does not wait for the enqueued message to be processed, it keeps calling IOLoop.add_callback as fast as it can, without giving the loop enough opportunity to execute the callbacks.
The loop might make some progress running on the main thread, since you're not actually blocking it. But the sendMessages thread adds callbacks much faster than the loop can handle them. By the time the loop has popped one message from the queue and has begun to process it, millions of new callbacks are added already, which the loop must execute before it can advance to the next stage of message-processing.
Therefore, for your test code, I think it's correct to sleep between calls to enqueueMessage on the thread.

Trying to implement 2 "threads" using `asyncio` module

I've played around with threading before in Python, but decided to give the asyncio module a try, especially since you can cancel a running task, which seemed like a nice detail. However, for some reason, I can't wrap my head around it.
Here's what I wanted to implement (sorry if I'm using incorrect terminology):
a downloader thread that downloads the same file every x seconds, checks its hash against the previous download and saves it if it's different.
a webserver thread that runs in the background, allowing control (pause, list, stop) of the downloader thread.
I used aiohttp for the webserver.
This is what I have so far:
class aiotest():
def __init__(self):
self._dl = None # downloader future
self._webapp = None # web server future
self.init_server()
def init_server(self):
print('Setting up web interface')
app = web.Application()
app.router.add_route('GET', '/stop', self.stop)
print('added urls')
self._webapp = app
#asyncio.coroutine
def _downloader(self):
while True:
try:
print('Downloading and verifying file...')
# Dummy sleep - to be replaced by actual code
yield from asyncio.sleep(random.randint(3,10))
# Wait a predefined nr of seconds between downloads
yield from asyncio.sleep(30)
except asyncio.CancelledError:
break
#asyncio.coroutine
def _supervisor(self):
print('Starting downloader')
self._dl = asyncio.async(self._downloader())
def start(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._supervisor())
loop.close()
#asyncio.coroutine
def stop(self):
print('Received STOP')
self._dl.cancel()
return web.Response(body=b"Stopping... ")
This class is called by:
t = aiotest()
t.start()
This doesn't work of course, and I feel that this is a horrible piece of code.
What's unclear to me:
I stop the downloader in the stop() method, but how would I go about stopping the webserver (e.g. in a shutdown() method)?
Does the downloader need a new event loop, or can I use the loop returned by asyncio.get_event_loop()?
Do I really need something like the supervisor for what I'm trying to implement? This seems so clunky. And how do I get supervisor to keep running instead of ending after a single execution as it does now?
One last, more general question: is asyncio supposed to replace the threading module (in the future)? Or does each have its own application?
I appreciate all the pointers, remarks and clarifications!
Why current code is not working:
You're running event loop until self._supervisor() is complete. self._supervisor() creates task (it happens immediately) and finishes immediately.
You're trying to run event loop until _supervisor complete, but how and when are you going start server? I think event loop should be running until server stopped. _supervisor or other stuff can be added as task (to same event loop). aiohttp already has function to start server and event loop - web.run_app, but we can do it manually.
Your questions:
Your server will run until you stop it. You can start/stop different
coroutines while your server working.
You need only one event loop for different coroutines.
I think you don't need supervisor.
More general question: asyncio helps you to run different
functions parallel in single thread in single process. That's why
asyncio is so cool and fast. Some of your sync code with threads you
can rewrite using asyncio and it's coroutines. Moreover: asyncio can
interact with threads and processes.
It can be useful in case you still need threads and processes: here's example.
Useful notes:
It's better to use term coroutine instead of thread while we talk about asyncio coroutines that are not threads
If you use Python 3.5, you can use async/await syntax
instead of coroutine/yield from
I rewrote your code to show you idea. How to check it: run program, see console, open http://localhost:8080/stop, see console, open http://localhost:8080/start, see console, type CTRL+C.
import asyncio
import random
from contextlib import suppress
from aiohttp import web
class aiotest():
def __init__(self):
self._webapp = None
self._d_task = None
self.init_server()
# SERVER:
def init_server(self):
app = web.Application()
app.router.add_route('GET', '/start', self.start)
app.router.add_route('GET', '/stop', self.stop)
app.router.add_route('GET', '/kill_server', self.kill_server)
self._webapp = app
def run_server(self):
# Create server:
loop = asyncio.get_event_loop()
handler = self._webapp.make_handler()
f = loop.create_server(handler, '0.0.0.0', 8080)
srv = loop.run_until_complete(f)
try:
# Start downloader at server start:
asyncio.async(self.start(None)) # I'm using controllers here and below to be short,
# but it's better to split controller and start func
# Start server:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
# Stop downloader when server stopped:
loop.run_until_complete(self.stop(None))
# Cleanup resources:
srv.close()
loop.run_until_complete(srv.wait_closed())
loop.run_until_complete(self._webapp.shutdown())
loop.run_until_complete(handler.finish_connections(60.0))
loop.run_until_complete(self._webapp.cleanup())
loop.close()
#asyncio.coroutine
def kill_server(self, request):
print('Server killing...')
loop = asyncio.get_event_loop()
loop.stop()
return web.Response(body=b"Server killed")
# DOWNLOADER
#asyncio.coroutine
def start(self, request):
if self._d_task is None:
print('Downloader starting...')
self._d_task = asyncio.async(self._downloader())
return web.Response(body=b"Downloader started")
else:
return web.Response(body=b"Downloader already started")
#asyncio.coroutine
def stop(self, request):
if (self._d_task is not None) and (not self._d_task.cancelled()):
print('Downloader stopping...')
self._d_task.cancel()
# cancel() just say task it should be cancelled
# to able task handle CancelledError await for it
with suppress(asyncio.CancelledError):
yield from self._d_task
self._d_task = None
return web.Response(body=b"Downloader stopped")
else:
return web.Response(body=b"Downloader already stopped or stopping")
#asyncio.coroutine
def _downloader(self):
while True:
print('Downloading and verifying file...')
# Dummy sleep - to be replaced by actual code
yield from asyncio.sleep(random.randint(1, 2))
# Wait a predefined nr of seconds between downloads
yield from asyncio.sleep(1)
if __name__ == '__main__':
t = aiotest()
t.run_server()

How to monitor events from workers in a Celery-Django application?

According to the celery tutorial regarding real-time monitoring of celery workers, one can also programmatically capture the events produced by the workers and take action accordingly.
My question is how can I integrate a monitor as the one in this example, in a Celery-Django application?
EDIT:
The code example in the tutorial looks like:
from celery import Celery
def my_monitor(app):
state = app.events.State()
def announce_failed_tasks(event):
state.event(event)
task_id = event['uuid']
print('TASK FAILED: %s[%s] %s' % (
event['name'], task_id, state[task_id].info(), ))
with app.connection() as connection:
recv = app.events.Receiver(connection, handlers={
'task-failed': announce_failed_tasks,
'worker-heartbeat': announce_dead_workers,
})
recv.capture(limit=None, timeout=None, wakeup=True)
if __name__ == '__main__':
celery = Celery(broker='amqp://guest#localhost//')
my_monitor(celery)
So I want to capture task_failed event sent by the worker, and to get its task_id like the tutorial shows, to get the result for this task from the result-backend that was configured for my application and process it further. My problem is that it is not obvious to me how to get the application, as in a django-celery project it is not transparent to me the instantiation of Celery library.
I am also open to any other idea as to how to process the results when a worker has finished executing a task.
Ok, I found a way of doing this, though I am not sure that this is the solution, but it works for me. The monitor function basically connects directly to the broker and listens to different types of events. My code looks like this:
from celery.events import EventReceiver
from kombu import Connection as BrokerConnection
def my_monitor:
connection = BrokerConnection('amqp://guest:guest#localhost:5672//')
def on_event(event):
print "EVENT HAPPENED: ", event
def on_task_failed(event):
exception = event['exception']
print "TASK FAILED!", event, " EXCEPTION: ", exception
while True:
try:
with connection as conn:
recv = EventReceiver(conn,
handlers={'task-failed' : on_task_failed,
'task-succeeded' : on_event,
'task-sent' : on_event,
'task-received' : on_event,
'task-revoked' : on_event,
'task-started' : on_event,
# OR: '*' : on_event
})
recv.capture(limit=None, timeout=None)
except (KeyboardInterrupt, SystemExit):
print "EXCEPTION KEYBOARD INTERRUPT"
sys.exit()
This is all. And I run this in a different process than the normal application, meaning that I create a child process of my celery application which only runs this function.
HTH
Beware of a couple of gotchas
You need to set CELERY_SEND_EVENTS flag as true in your celery config.
You can also set the event monitor in a new thread from your worker.
Here is my implementation:
class MonitorThread(object):
def __init__(self, celery_app, interval=1):
self.celery_app = celery_app
self.interval = interval
self.state = self.celery_app.events.State()
self.thread = threading.Thread(target=self.run, args=())
self.thread.daemon = True
self.thread.start()
def catchall(self, event):
if event['type'] != 'worker-heartbeat':
self.state.event(event)
# logic here
def run(self):
while True:
try:
with self.celery_app.connection() as connection:
recv = self.celery_app.events.Receiver(connection, handlers={
'*': self.catchall
})
recv.capture(limit=None, timeout=None, wakeup=True)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
# unable to capture
pass
time.sleep(self.interval)
if __name__ == '__main__':
app = get_celery_app() # returns app
MonitorThread(app)
app.start()

Categories