Tornado Asynchronous Writes - python

The code below is a simplified version of a Tornado based TCP server that is currently used to host a Videotex system. This code was derived from the Tornado documentation and the server has been running in a live environment for some time without issue, however, there is a feature I need to add.
The system currently blocks until a character is received from the client before returning the data via the stream.write. As the system typically runs at 1200 baud at the client end (via a telnet modem), this means that the user has to wait until all stream writes have completed before the next 'user entered' character is processed.
What I would like to do is find a way that would allow me to abandon writing data to stream.write if another character is received form the client.
I am new to Tornado and fairly new to Python, however, I have coded asynchronous functions and threaded solutions in the past using C#.
From the documentation the stream.write operation is asynchronous, I am assuming therefore that the call may return before the data is completely written, I am left thinking that I need a method to abandon/empty/advance the write buffer to stop the write operation if a new char is detected on the stream.read.
One option that would seem to give me what I need is to somehow perform the stream.writes on another thread , however, this approach seems inappropriate when using Tornado's IOLoop etc.
Is there a way to give me the facility I am after? I have full control of the code and am happy to restructure the app if needed.
import logging
import struct
import os
import traceback
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.iostream import StreamClosedError
from tornado.tcpserver import TCPServer
# Configure logging.
logger = logging.getLogger(os.path.basename(__file__))
logger.setLevel(logging.INFO)
# Cache this struct definition; important optimization.
int_struct = struct.Struct("<i")
_UNPACK_INT = int_struct.unpack
_PACK_INT = int_struct.pack
class TornadoServer(TCPServer):
def start(self, port):
self.port = port
server.listen(port)
#gen.coroutine
def handle_stream(self, stream, address):
logging.info("[viewdata] Connection from client address {0}.".format(address))
try:
while True:
char = yield stream.read_bytes(1) # this call blocks
asc = ord(char)
logger.info('[viewdata] Byte Received {0} ({1})'.format(hex(asc), asc))
# Do some processing using the received char and return the appropriate page of data
stream.write('This is the data you asked for...'.encode())
except StreamClosedError as ex:
logger.info("[viewdata] {0} Disconnected: {1} Message: {2}".format(address, type(ex), str(ex)))
except Exception as ex:
logger.error("[viewdata] {0} Exception: {1} Message: {2}".format(address, type(ex), str(ex)))
logger.error(traceback.format_exc())
if __name__ == '__main__':
server = TornadoServer()
server.start(25232)
loop = IOLoop.current()
loop.start()

The main idea is that you move long processing into separate task.
When you receive some new data, you choose what to do (in case below I cancel current operation)
import logging
import os
import traceback
import threading
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.iostream import StreamClosedError
from tornado.tcpserver import TCPServer
# Configure logging.
logger = logging.getLogger(os.path.basename(__file__))
logger.setLevel(logging.INFO)
class TornadoServer(TCPServer):
def start(self, port):
self.port = port
server.listen(port)
async def process_stream(self, stream, char, cancel_event):
asc = ord(char)
logger.info('[viewdata] Byte Received {0} ({1})'.format(hex(asc), asc))
N = 5
for i in range(N):
if cancel_event.is_set():
logger.info('[viewdata] Abort streaming')
break
# Do some processing using the received char and return the appropriate page of data
msg = 'This is the {0} data you asked for...'.format(i)
logger.info(msg)
await stream.write('This is the part {0} of {1} you asked for...'.format(i, N).encode())
await gen.sleep(1.0) # make this processing longer..
async def handle_stream(self, stream, address):
process_stream_future = None
cancel_event = None
logging.info("[viewdata] Connection from client address {0}.".format(address))
while True:
try:
char = await stream.read_bytes(1) # this call blocks
# when received client input, cancel running job
if process_stream_future:
process_stream_future.cancel()
if cancel_event:
cancel_event.set()
cancel_event = threading.Event()
process_stream_future = gen.convert_yielded(
self.process_stream(stream, char, cancel_event))
self.io_loop.add_future(process_stream_future, lambda f: f.result())
except StreamClosedError as ex:
logger.info("[viewdata] {0} Disconnected: {1} Message: {2}".format(address, type(ex), str(ex)))
except Exception as ex:
logger.error("[viewdata] {0} Exception: {1} Message: {2}".format(address, type(ex), str(ex)))
logger.error(traceback.format_exc())
if __name__ == '__main__':
server = TornadoServer()
server.listen(25232)
loop = IOLoop.current()
loop.start()

Related

implementing threading to consume queues from rabbitmq

This is an update to my previous question. I realized that I should have added some code to explain my issue further. I am currently trying to implement threading to queues being consumed from a rabbitmq exchange. As I am new to rabbitmq and threading, I am finding it difficult to amalgamate both and apply them. I was wondering if anyone could provide any templates I could apply to begin with.
I am coding in visual studio platform, where a simulator is being used to generate data, emulating the producer(a smart device).
The first part of the code assigns a few variables relevant to the project. I have imported the required libraries and a few extra scripts I have made myself. The import inthe second line are scripts that assist with communicating with the smart device.
import pika, sys, os
import SockAlertMessage_pb2, SockDataProcessedMessage_pb2, SockDataRawMessage_pb2, SockDataSessionEndMessage_pb2, SockDataSessionStartMessage_pb2, SockMessage_pb2
import numpy as np
import scipy
import ampd
import python_file_3
import heartpy
import time
import threading
from scipy.signal import detrend
from python_file_3 import filter_signal, get_hrv, get_rmssd, get_std, heart_rate
from ampd import find_peaks_originalode here
The second part of the code
sock_data_session_start_queue = 'sock_data_session_start_queue'
sock_data_session_end_queue= 'sock_data_session_end_queue'
sock_data_raw_queue = 'sock_data_raw_queue'
tx_queue = 'tbd'
# establish connection with rabbitmq server
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
# create rx message queues
channel.queue_declare(queue=sock_data_session_start_queue, durable = True)
channel.queue_declare(queue=sock_data_session_end_queue, durable = True)
# create tx message queue
channel.queue_declare(queue=tx_queue, durable = True)e here
def sock_data_session_start_callback(ch, method, properties, body):
message = SockDataSessionStartMessage_pb2.SockDataSessionStartMessage()
message.ParseFromString(body)
new_dict['1'] = message
print(new_dict)
# todo: create thread w/ state
print(" [x] Start session %r" % message)
# send message
def sock_data_session_end_callback(ch, method, properties, body):
message = SockDataSessionEndMessage_pb2.SockDataSessionEndMessage()
message.ParseFromString(body)
# todo: destroy thread w/ state
print(" [x] End session %r" % message)
def sock_data_raw_callback(ch, method, properties, body):
message = SockDataRawMessage_pb2.SockDataRawMessage()
message.ParseFromString(body)
print(message)
# todo: destroy thread w/ state
print(" [x] Sock data raw %r" % message)
if __name__ == '__main__':
try:
channel.basic_consume(queue=sock_data_session_start_queue, auto_ack=True, on_message_callback=sock_data_session_start_callback)
channel.basic_consume(queue=sock_data_session_end_queue, auto_ack=True, on_message_callback=sock_data_session_end_callback)
channel.basic_consume(queue=sock_data_raw_queue, auto_ack=True, on_message_callback=sock_data_raw_callback)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
except KeyboardInterrupt:
print('Interrupted')
# close connection
connection.close()
try:
sys.exit(0)
except SystemExit:
os._exit(0)
The start and end data callbacks refer to data sessions, where data session connection is acknowledged and started and then later ended. I believe the raw data callback is where I may need to implement my threads, where data will be processed and then sent back to another queue. The challenge is to make each data session a thread, and then process that.

Python gRPC cancel unary-stream call from client side

Using Python gRPC, I would like to be able to cancel a long-running unary-stream call from the client side, when a threading.Event is set.
def application(stub: StreamsStub, event: threading.Event):
stream = stub.Application(ApplicationStreamRequest())
try:
for resp in stream:
print(resp)
except grpc.RpcError as e:
print(e)
For the time being I am cancelling the stream using the channel.close() method, but of course this closes all connections rather than just this stream.
Could someone suggest how I can use the event to cancel the stream iterator? Thanks
Below is some code for a gRPC UnaryStream call. The server sends an unending number of replies, leaving the client to decide when to stop receiving them.
Instead of using a counter, you can have a thread go off and do some work, and set an event that is checked before calling cancel() instead of checking the counter.
Note: using Python 2.7
Protofile:
syntax = "proto3";
package my_package;
service HeartBeat {
rpc Beats(Counter) returns (stream Counter) {}
}
message Counter {
int32 counter = 1;
}
Client:
from __future__ import print_function
import grpc
import heartbeat_pb2
import heartbeat_pb2_grpc
def get_beats(stub, channel):
try:
result_iterator = stub.Beats(heartbeat_pb2.Counter(counter=i))
for result in result_iterator:
print("Count: {}".format(result.counter))
if result.counter >= 3: # We only wants 3 'beats'
result_iterator.cancel()
except grpc.RpcError as rpc_error:
if rpc_error.code() == grpc.StatusCode.CANCELLED:
pass # Otherwise, a traceback is printed
def run():
with grpc.insecure_channel('localhost:9999') as channel:
stub = heartbeat_pb2_grpc.HeartBeatStub(channel)
get_beats(stub, channel)
if __name__ == '__main__':
run()
Server:
from concurrent import futures
import grpc
from proto_generated import heartbeat_pb2
from proto_generated import heartbeat_pb2_grpc
import time
class HeartBeatServicer(heartbeat_pb2_grpc.HeartBeatServicer):
pass
def Beats(self, request, context):
# Not required, only to show sending the server a message
print("Beats: {}".format(request.counter))
def response_message():
i = 0
while context.is_active():
print("Sending {}".format(i))
response = heartbeat_pb2.Counter(counter=i)
i += 1
time.sleep(1) # Simulate doing work
yield response
return response_message()
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
heartbeat_pb2_grpc.add_HeartBeatServicer_to_server(
HeartBeatServicer(), server)
server.add_insecure_port('[::]:9999')
server.start()
server.wait_for_termination()
if __name__ == '__main__':
serve()
The _Rendezvous object returned by a rpc call implements grpc.RpcError, grpc.Future, and grpc.Call, therefore cancelling the stream is as simple as calling stream.cancel (from grpc.Future interface)

Gevent.monkey.patch_all breaks code that relies on socket.shutdown()

I'm currently working to add support for gevent-socketio to an existing django project. I'm finding that gevent.monkey.patch_all() call is breaking the cancellation mechanism of a thread which is responsible for receiving data from a socket, we'll call the class SocketReadThread for now.
SocketReadThread is pretty simple, it calls recv() on a blocking socket. When it receives data is processes it and calls recv() again. The thread stops when an exception occurs or when recv() returns 0 bytes as occurs when socket.shutdown(SHUT_RDWR) is called in SocketReadThread.stop_reading()
The problem occurs when the gevent.monkey.patch_all() replaces the default socket implementation. Instead of shutting down nicely I get the following exception:
error: [Errno 9] File descriptor was closed in another greenlet
I'm assuming this is occurring because gevent makes my socket non-blocking in order to work its magic. This means that when I call socket.shutdown(socket.SHUT_RDWR) the greenlet that was doing the work for the monkey patched socket.recv call tried to read from the closed file descriptor.
I coded an example to isolate this issue:
from gevent import monkey
monkey.patch_all()
import socket
import sys
import threading
import time
class SocketReadThread(threading.Thread):
def __init__(self, socket):
super(SocketReadThread, self).__init__()
self._socket = socket
def run(self):
connected = True
while connected:
try:
print "calling socket.recv"
data = self._socket.recv(1024)
if (len(data) < 1):
print "received nothing, assuming socket shutdown"
connected = False
else :
print "Recieved something: {}".format(data)
except socket.timeout as e:
print "Socket timeout: {}".format(e)
connected = false
except :
ex = sys.exc_info()[1]
print "Unexpected exception occurrred: {}".format(str(ex))
raise ex
def stop_reading(self):
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
if __name__ == '__main__':
sock = socket.socket()
sock.connect(('127.0.0.1', 4242))
st = SocketReadThread(sock)
st.start()
time.sleep(3)
st.stop_reading()
st.join()
If you open a terminal an run nc -lp 4242 & (to give this program something to connect to) and then run this program you will see the exception mentioned above. If you remove the call to monkey.patch_all() you will see that it works just fine.
My question is: How can support cancellation of the SocketReadThread in a way that works with or without gevent monkey patching and doesn't require the use of an arbitrary timeout that would make cancellation slow (i.e. calling recv() with a timeout and checking a conditional)?
I found that there were two different workarounds for this. The first was to simply catch and suppress the exception. This appears to work fine since it is common practice for one thread to close a socket in order to cause another thread to exit from a blocking read. I don't know or understand why greenlets would complain about this other than a debugging aid. It is really just an annoyance.
The second option was to use the self-pipe trick (a quick search yields many explanations) as a mechanism to wake up a blocked thread. Essentially we create a second file descriptor (a socket is like a type of file descriptor to the OS) for signaling cancellation. We then use select as our blocking to wait for either incoming data on the socket or a cancellation request to come in on the cancellation file descriptor. See the example code below.
from gevent import monkey
monkey.patch_all()
import os
import select
import socket
import sys
import threading
import time
class SocketReadThread(threading.Thread):
def __init__(self, socket):
super(SocketReadThread, self).__init__()
self._socket = socket
self._socket.setblocking(0)
r, w = os.pipe()
self._cancelpipe_r = os.fdopen(r, 'r')
self._cancelpipe_w = os.fdopen(w, 'w')
def run(self):
connected = True
read_fds = [self._socket, self._cancelpipe_r]
while connected:
print "Calling select"
read_list, write_list, x_list = select.select(read_fds, [], [])
print "Select returned"
if self._cancelpipe_r in read_list :
print "exiting"
self._cleanup()
connected = False
elif self._socket in read_list:
print "calling socket.recv"
data = self._socket.recv(1024)
if (len(data) < 1):
print "received nothing, assuming socket shutdown"
connected = False
self._cleanup()
else :
print "Recieved something: {}".format(data)
def stop_reading(self):
print "writing to pipe"
self._cancelpipe_w.write("\n")
self._cancelpipe_w.flush()
print "joining"
self.join()
print "joined"
def _cleanup(self):
self._cancelpipe_r.close()
self._cancelpipe_w.close()
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
if __name__ == '__main__':
sock = socket.socket()
sock.connect(('127.0.0.1', 4242))
st = SocketReadThread(sock)
st.start()
time.sleep(3)
st.stop_reading()
Again, before running the above program run netcat -lp 4242 & to give it a listening socket to connect to.

Attaching ZMQStream with existing tornado ioloop

I have an application where every websocket connection (within tornado open callback) creates a zmq.SUB socket to an existing zmq.FORWARDER device. Idea is to receive data from zmq as callbacks, which can then be relayed to frontend clients over websocket connection.
https://gist.github.com/abhinavsingh/6378134
ws.py
import zmq
from zmq.eventloop import ioloop
from zmq.eventloop.zmqstream import ZMQStream
ioloop.install()
from tornado.websocket import WebSocketHandler
from tornado.web import Application
from tornado.ioloop import IOLoop
ioloop = IOLoop.instance()
class ZMQPubSub(object):
def __init__(self, callback):
self.callback = callback
def connect(self):
self.context = zmq.Context()
self.socket = self.context.socket(zmq.SUB)
self.socket.connect('tcp://127.0.0.1:5560')
self.stream = ZMQStream(self.socket)
self.stream.on_recv(self.callback)
def subscribe(self, channel_id):
self.socket.setsockopt(zmq.SUBSCRIBE, channel_id)
class MyWebSocket(WebSocketHandler):
def open(self):
self.pubsub = ZMQPubSub(self.on_data)
self.pubsub.connect()
self.pubsub.subscribe("session_id")
print 'ws opened'
def on_message(self, message):
print message
def on_close(self):
print 'ws closed'
def on_data(self, data):
print data
def main():
application = Application([(r'/channel', MyWebSocket)])
application.listen(10001)
print 'starting ws on port 10001'
ioloop.start()
if __name__ == '__main__':
main()
forwarder.py
import zmq
def main():
try:
context = zmq.Context(1)
frontend = context.socket(zmq.SUB)
frontend.bind('tcp://*:5559')
frontend.setsockopt(zmq.SUBSCRIBE, '')
backend = context.socket(zmq.PUB)
backend.bind('tcp://*:5560')
print 'starting zmq forwarder'
zmq.device(zmq.FORWARDER, frontend, backend)
except KeyboardInterrupt:
pass
except Exception as e:
logger.exception(e)
finally:
frontend.close()
backend.close()
context.term()
if __name__ == '__main__':
main()
publish.py
import zmq
if __name__ == '__main__':
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.connect('tcp://127.0.0.1:5559')
socket.send('session_id helloworld')
print 'sent data for channel session_id'
However, my ZMQPubSub class doesn't seem like is receiving any data at all.
I further experimented and realized that I need to call ioloop.IOLoop.instance().start() after registering on_recv callback within ZMQPubSub. But, that will just block the execution.
I also tried passing main.ioloop instance to ZMQStream constructor but doesn't help either.
Is there a way by which I can bind ZMQStream to existing main.ioloop instance without blocking flow within MyWebSocket.open?
In your now complete example, simply change frontend in your forwarder to a PULL socket and your publisher socket to PUSH, and it should behave as you expect.
The general principles of socket choice that are relevant here:
use PUB/SUB when you want to send a message to everyone who is ready to receive it (may be no one)
use PUSH/PULL when you want to send a message to exactly one peer, waiting for them to be ready
it may appear initially that you just want PUB-SUB, but once you start looking at each socket pair, you realize that they are very different. The frontend-websocket connection is definitely PUB-SUB - you may have zero-to-many receivers, and you just want to send messages to everyone who happens to be available when a message comes through. But the backend side is different - there is only one receiver, and it definitely wants every message from the publishers.
So there you have it - backend should be PULL and frontend PUB. All your sockets:
PUSH -> [PULL-PUB] -> SUB
publisher.py: socket is PUSH, connected to backend in device.py
forwarder.py: backend is PULL, frontend is PUB
ws.py: SUB connects and subscribes to forwarder.frontend.
The relevant behavior that makes PUB/SUB fail on the backend in your case is the slow joiner syndrome, which is described in The Guide. Essentially, subscribers take a finite time to tell publishers about there subscriptions, so if you send a message immediately after opening a PUB socket, the odds are it hasn't been told that it has any subscribers yet, so it's just discarding messages.
ZeroMq subscribers have to subscribe on what messages they wish to receive; I don't see that in your code. I believe the Python way is this:
self.socket.setsockopt(zmq.SUBSCRIBE, "")

RabbitMQ, Pika and reconnection strategy

I'm using Pika to process data from RabbitMQ.
As I seemed to run into different kind of problems I decided to write a small test application to see how I can handle disconnects.
I wrote this test app which does following:
Connect to Broker, retry until successful
When connected create a queue.
Consume this queue and put result into a python Queue.Queue(0)
Get item from Queue.Queue(0) and produce it back into the broker queue.
What I noticed were 2 issues:
When I run my script from one host connecting to rabbitmq on another host (inside a vm) then this scripts exits on random moments without producing an error.
When I run my script on the same host on which RabbitMQ is installed it runs fine and keeps running.
This might be explained because of network issues, packets dropped although I find the connection not really robust.
When the script runs locally on the RabbitMQ server and I kill the RabbitMQ then the script exits with error: "ERROR pika SelectConnection: Socket Error on 3: 104"
So it looks like I can't get the reconnection strategy working as it should be. Could someone have a look at the code so see what I'm doing wrong?
Thanks,
Jay
#!/bin/python
import logging
import threading
import Queue
import pika
from pika.reconnection_strategies import SimpleReconnectionStrategy
from pika.adapters import SelectConnection
import time
from threading import Lock
class Broker(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.logging = logging.getLogger(__name__)
self.to_broker = Queue.Queue(0)
self.from_broker = Queue.Queue(0)
self.parameters = pika.ConnectionParameters(host='sandbox',heartbeat=True)
self.srs = SimpleReconnectionStrategy()
self.properties = pika.BasicProperties(delivery_mode=2)
self.connection = None
while True:
try:
self.connection = SelectConnection(self.parameters, self.on_connected, reconnection_strategy=self.srs)
break
except Exception as err:
self.logging.warning('Cant connect. Reason: %s' % err)
time.sleep(1)
self.daemon=True
def run(self):
while True:
self.submitData(self.from_broker.get(block=True))
pass
def on_connected(self,connection):
connection.channel(self.on_channel_open)
def on_channel_open(self,new_channel):
self.channel = new_channel
self.channel.queue_declare(queue='sandbox', durable=True)
self.channel.basic_consume(self.processData, queue='sandbox')
def processData(self, ch, method, properties, body):
self.logging.info('Received data from broker')
self.channel.basic_ack(delivery_tag=method.delivery_tag)
self.from_broker.put(body)
def submitData(self,data):
self.logging.info('Submitting data to broker.')
self.channel.basic_publish(exchange='',
routing_key='sandbox',
body=data,
properties=self.properties)
if __name__ == '__main__':
format=('%(asctime)s %(levelname)s %(name)s %(message)s')
logging.basicConfig(level=logging.DEBUG, format=format)
broker=Broker()
broker.start()
try:
broker.connection.ioloop.start()
except Exception as err:
print err
The main problem with your script is that it is interacting with a single channel from both your main thread (where the ioloop is running) and the "Broker" thread (calls submitData in a loop). This is not safe.
Also, SimpleReconnectionStrategy does not seem to do anything useful. It does not cause a reconnect if the connection is interrupted. I believe this is a bug in Pika: https://github.com/pika/pika/issues/120
I attempted to refactor your code to make it work as I think you wanted it to, but ran into another problem. Pika does not appear to have a way to detect delivery failure, which means that data may be lost if the connection drops. This seems like such an obvious requirement! How can there be no way to detect that basic_publish failed? I tried all kinds of stuff including transactions and add_on_return_callback (all of which seemed clunky and overly complicated), but came up with nothing. If there truly is no way then pika only seems to be useful in situations that can tolerate loss of data sent to RabbitMQ, or in programs that only need to consume from RabbitMQ.
This is not reliable, but for reference, here's some code that solves your multi-thread problem:
import logging
import pika
import Queue
import sys
import threading
import time
from functools import partial
from pika.adapters import SelectConnection, BlockingConnection
from pika.exceptions import AMQPConnectionError
from pika.reconnection_strategies import SimpleReconnectionStrategy
log = logging.getLogger(__name__)
DEFAULT_PROPERTIES = pika.BasicProperties(delivery_mode=2)
class Broker(object):
def __init__(self, parameters, on_channel_open, name='broker'):
self.parameters = parameters
self.on_channel_open = on_channel_open
self.name = name
def connect(self, forever=False):
name = self.name
while True:
try:
connection = SelectConnection(
self.parameters, self.on_connected)
log.debug('%s connected', name)
except Exception:
if not forever:
raise
log.warning('%s cannot connect', name, exc_info=True)
time.sleep(10)
continue
try:
connection.ioloop.start()
finally:
try:
connection.close()
connection.ioloop.start() # allow connection to close
except Exception:
pass
if not forever:
break
def on_connected(self, connection):
connection.channel(self.on_channel_open)
def setup_submitter(channel, data_queue, properties=DEFAULT_PROPERTIES):
def on_queue_declared(frame):
# PROBLEM pika does not appear to have a way to detect delivery
# failure, which means that data could be lost if the connection
# drops...
channel.confirm_delivery(on_delivered)
submit_data()
def on_delivered(frame):
if frame.method.NAME in ['Confirm.SelectOk', 'Basic.Ack']:
log.info('submission confirmed %r', frame)
# increasing this value seems to cause a higher failure rate
time.sleep(0)
submit_data()
else:
log.warn('submission failed: %r', frame)
#data_queue.put(...)
def submit_data():
log.info('waiting on data queue')
data = data_queue.get()
log.info('got data to submit')
channel.basic_publish(exchange='',
routing_key='sandbox',
body=data,
properties=properties,
mandatory=True)
log.info('submitted data to broker')
channel.queue_declare(
queue='sandbox', durable=True, callback=on_queue_declared)
def blocking_submitter(parameters, data_queue,
properties=DEFAULT_PROPERTIES):
while True:
try:
connection = BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue='sandbox', durable=True)
except Exception:
log.error('connection failure', exc_info=True)
time.sleep(1)
continue
while True:
log.info('waiting on data queue')
try:
data = data_queue.get(timeout=1)
except Queue.Empty:
try:
connection.process_data_events()
except AMQPConnectionError:
break
continue
log.info('got data to submit')
try:
channel.basic_publish(exchange='',
routing_key='sandbox',
body=data,
properties=properties,
mandatory=True)
except Exception:
log.error('submission failed', exc_info=True)
data_queue.put(data)
break
log.info('submitted data to broker')
def setup_receiver(channel, data_queue):
def process_data(channel, method, properties, body):
log.info('received data from broker')
data_queue.put(body)
channel.basic_ack(delivery_tag=method.delivery_tag)
def on_queue_declared(frame):
channel.basic_consume(process_data, queue='sandbox')
channel.queue_declare(
queue='sandbox', durable=True, callback=on_queue_declared)
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'usage: %s RABBITMQ_HOST' % sys.argv[0]
sys.exit()
format=('%(asctime)s %(levelname)s %(name)s %(message)s')
logging.basicConfig(level=logging.DEBUG, format=format)
host = sys.argv[1]
log.info('connecting to host: %s', host)
parameters = pika.ConnectionParameters(host=host, heartbeat=True)
data_queue = Queue.Queue(0)
data_queue.put('message') # prime the pump
# run submitter in a thread
setup = partial(setup_submitter, data_queue=data_queue)
broker = Broker(parameters, setup, 'submitter')
thread = threading.Thread(target=
partial(broker.connect, forever=True))
# uncomment these lines to use the blocking variant of the submitter
#thread = threading.Thread(target=
# partial(blocking_submitter, parameters, data_queue))
thread.daemon = True
thread.start()
# run receiver in main thread
setup = partial(setup_receiver, data_queue=data_queue)
broker = Broker(parameters, setup, 'receiver')
broker.connect(forever=True)

Categories