I'm trying to implement a websocket/wamp client using autobahn|python
and asyncio, and while it's somewhat working, there are parts that have
eluded me.
What I'm really trying to do is implement WAMP in qt5/QML, but this
seemed like an easier path for the moment.
This simplified client mostly copied from online does work. It reads the
time service when the onJoin occurs.
What I'd like to do is trigger this read from an external source.
The convoluted approach I've taken is to run the asyncio event loop in a
thread, and then to send a command over a socket to trigger the read. I
have so far unable to figure out where to put the routine/coroutine so
that it can be found from the reader routine.
I suspect there's a simpler way to go about this but I haven't found it
yet. Suggestions are welcome.
#!/usr/bin/python3
try:
import asyncio
except ImportError:
## Trollius >= 0.3 was renamed
import trollius as asyncio
from autobahn.asyncio import wamp, websocket
import threading
import time
from socket import socketpair
rsock, wsock = socketpair()
def reader() :
data = rsock.recv(100)
print("Received:", data.decode())
class MyFrontendComponent(wamp.ApplicationSession):
def onConnect(self):
self.join(u"realm1")
#asyncio.coroutine
def onJoin(self, details):
print('joined')
## call a remote procedure
##
try:
now = yield from self.call(u'com.timeservice.now')
except Exception as e:
print("Error: {}".format(e))
else:
print("Current time from time service: {}".format(now))
def onLeave(self, details):
self.disconnect()
def onDisconnect(self):
asyncio.get_event_loop().stop()
def start_aloop() :
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
transport_factory = websocket.WampWebSocketClientFactory(session_factory,
debug = False,
debug_wamp = False)
coro = loop.create_connection(transport_factory, '127.0.0.1', 8080)
loop.add_reader(rsock,reader)
loop.run_until_complete(coro)
loop.run_forever()
loop.close()
if __name__ == '__main__':
session_factory = wamp.ApplicationSessionFactory()
session_factory.session = MyFrontendComponent
## 4) now enter the asyncio event loop
print('starting thread')
thread = threading.Thread(target=start_aloop)
thread.start()
time.sleep(5)
print("IN MAIN")
# emulate an outside call
wsock.send(b'a byte string')
You can listen on a socket asynchronous inside the event loop, using loop.sock_accept. You can just call a coroutine to setup the socket inside of onConnect or onJoin:
try:
import asyncio
except ImportError:
## Trollius >= 0.3 was renamed
import trollius as asyncio
from autobahn.asyncio import wamp, websocket
import socket
class MyFrontendComponent(wamp.ApplicationSession):
def onConnect(self):
self.join(u"realm1")
#asyncio.coroutine
def setup_socket(self):
# Create a non-blocking socket
self.sock = socket.socket()
self.sock.setblocking(0)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(('localhost', 8889))
self.sock.listen(5)
loop = asyncio.get_event_loop()
# Wait for connections to come in. When one arrives,
# call the time service and disconnect immediately.
while True:
conn, address = yield from loop.sock_accept(self.sock)
yield from self.call_timeservice()
conn.close()
#asyncio.coroutine
def onJoin(self, details):
print('joined')
# Setup our socket server
asyncio.async(self.setup_socket())
## call a remote procedure
##
yield from self.call_timeservice()
#asyncio.coroutine
def call_timeservice(self):
try:
now = yield from self.call(u'com.timeservice.now')
except Exception as e:
print("Error: {}".format(e))
else:
print("Current time from time service: {}".format(now))
... # The rest is the same
Thanks for the response dano. Not quite the solution I needed but it pointed me in the right direction. Yes, I wish to have the client mae remote RPC calls from an external trigger.
I came up with the following which allows me to pass a string for the specific call ( though only one is implemented right now)
Here's what I came up with, though I'm not sure how elegant it is.
import asyncio
from autobahn.asyncio import wamp, websocket
import threading
import time
import socket
rsock, wsock = socket.socketpair()
class MyFrontendComponent(wamp.ApplicationSession):
def onConnect(self):
self.join(u"realm1")
#asyncio.coroutine
def setup_socket(self):
# Create a non-blocking socket
self.sock = rsock
self.sock.setblocking(0)
loop = asyncio.get_event_loop()
# Wait for connections to come in. When one arrives,
# call the time service and disconnect immediately.
while True:
rcmd = yield from loop.sock_recv(rsock,80)
yield from self.call_service(rcmd.decode())
#asyncio.coroutine
def onJoin(self, details):
# Setup our socket server
asyncio.async(self.setup_socket())
#asyncio.coroutine
def call_service(self,rcmd):
print(rcmd)
try:
now = yield from self.call(rcmd)
except Exception as e:
print("Error: {}".format(e))
else:
print("Current time from time service: {}".format(now))
def onLeave(self, details):
self.disconnect()
def onDisconnect(self):
asyncio.get_event_loop().stop()
def start_aloop() :
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
transport_factory = websocket.WampWebSocketClientFactory(session_factory,
debug = False,
debug_wamp = False)
coro = loop.create_connection(transport_factory, '127.0.0.1', 8080)
loop.run_until_complete(coro)
loop.run_forever()
loop.close()
if __name__ == '__main__':
session_factory = wamp.ApplicationSessionFactory()
session_factory.session = MyFrontendComponent
## 4) now enter the asyncio event loop
print('starting thread')
thread = threading.Thread(target=start_aloop)
thread.start()
time.sleep(5)
wsock.send(b'com.timeservice.now')
time.sleep(5)
wsock.send(b'com.timeservice.now')
time.sleep(5)
wsock.send(b'com.timeservice.now')
Related
I am trying to write a python app that will run on raspberry pi, that will have both socket connection (socketio with uvicorn) and physical input listeners. I intend to listen for socket connection and gpio events concurrently, without blocking each other. This is what I have so far:
api.py
import uvicorn
import asyncio
from interaction.volume import VolumeControl
from system.platform_info import PlatformInfo
from connection.api_socket import app
class Api:
def __init__(self):
pass
def initialize_volume_listener(self):
volume_controller = VolumeControl()
volume_controller.start_listener()
def start(self):
PlatformInfo().print_info()
self.initialize_volume_listener()
uvicorn.run(app, host='127.0.0.1', port=5000, loop="asyncio")
volume_control.py
import asyncio
from gpiozero import Button
from connection.api_socket import volume_up
class VolumeControl:
def __init__(self):
self.volume_up_button = Button(4)
def volume_up(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
future = asyncio.ensure_future(volume_up(None, None))
loop.run_until_complete(future)
loop.close()
def start_listener(self):
self.volume_up_button.when_pressed = self.volume_up
api_socket.py
import socketio
from system.platform_info import PlatformInfo
sio = socketio.AsyncServer(async_mode='asgi', cors_allowed_origins='*')
app = socketio.ASGIApp(sio)
#sio.on('connect')
async def test_connect(sid, environ):
system_info = PlatformInfo().get_info()
current_volume = 35
initial_data = {"system_info": system_info,
"settings": {"volume": current_volume}
}
await sio.emit('initial_data', initial_data, room=sid)
#sio.on('disconnect request')
async def disconnect_request(sid):
await sio.disconnect(sid)
#sio.on('disconnect')
async def test_disconnect(sid):
print('Client disconnected')
await sio.emit('disconnect', {'data': 'Connected', 'count': 0}, room=sid)
#sio.on('volume_up')
async def volume_up(sid, volume=None):
increased_volume = 25
await sio.emit('volume_up', {'volume': increased_volume})
#sio.on('volume_down')
async def volume_down(sid, volume=None):
decreased_volume = 25
await sio.emit('volume_down', {'volume': decreased_volume})
I have tried using asyncio, but I am kind of new on async features of python. The problem is that, I wasn't able to run the button listener continuously, so that while socket functions are in progress, I would be able to concurrently listen for button interactions, without blocking one another. The button listener does not work at all. Instead, I need the button listener to be running as long as the uvicorn app is up.
Any help will be appreciated.
Thanks.
gpiozero creates a new thread which executes the callback (this isn't documented very well). If the callback should be executed in the main asyncio loop then you need to pass control back to the main thread.
The call_soon_threadsafe method does that for you. Essentially it adds the callback to the list of tasks the main asyncio loop calls when an await happens.
However asyncio loops are local to each thread: see get_running_loop
So when the gpiozero object is created in the main asyncio thread then you need make that loop object available to the object when the callback is called.
Here's how I do that for a PIR that calls an asyncio MQTT method:
class PIR:
def __init__(self, mqtt, pin):
self.pir = MotionSensor(pin=pin)
self.pir.when_motion = self.motion
# store the mqtt client we'll need to call
self.mqtt = mqtt
# This PIR object is created in the main thread
# so store that loop object
self.loop = asyncio.get_running_loop()
def motion(self):
# motion is called in the gpiozero monitoring thread
# it has to use our stored copy of the loop and then
# tell that loop to call the callback:
self.loop.call_soon_threadsafe(self.mqtt.publish,
f'sensor/gpiod/pir/kitchen', True)
You probably want this:
import asyncio
from gpiozero import Button
from connection.api_socket import volume_up
class VolumeControl:
def __init__(self):
self.volume_up_button = Button(4)
self.loop = asyncio.get_running_loop()
def volume_up_cb(self):
self.loop.call_soon_threadsafe(volume_up, None, None)
def start_listener(self):
self.volume_up_button.when_pressed = self.volume_up_cb
Much cleaner - and threadsafe! :)
#Miguel thank you for your answer. As you suggested, I have started the gpio in a while loop and used asyncio.run() command inside the loop to call the related socketio function. It works as intended. Side Note: I have started the gpio thread with param daemon=True. This enables to quit the gpio loop as soon as I quit the main thread, which is the uvicorn server. Final code is as follows:
api_socket.py
#sio.on('video_load')
async def load_video(sid, video_number=3):
data = open(os.path.join(os.getcwd(), f'sample_videos/dummy_video_{str(video_number)}.mp4'), 'rb').read()
print('Streaming video...')
await sio.emit('video_load', {'source': data}, room=sid)
nfc_listener.py
class NFCListener:
reading = True
def __init__(self):
GPIO.setmode(GPIO.BOARD)
self.rdr = RFID()
util = self.rdr.util()
util.debug = True
self.listener_thread = threading.Thread(target=self.start_nfc, daemon=True)
def start_nfc(self):
selected_video = None
while self.reading:
self.rdr.wait_for_tag()
(error, data) = self.rdr.request()
if not error:
print("\nCard identified!")
(error, uid) = self.rdr.anticoll()
if not error:
# Print UID
card_uid = str(uid[0])+" "+str(uid[1])+" " + \
str(uid[2])+" "+str(uid[3])+" "+str(uid[4])
print(card_uid)
if card_uid == "166 107 86 20 143":
if selected_video != 2:
selected_video = 2
asyncio.run(load_video(None, selected_video))
else:
if selected_video != 3:
selected_video = 3
asyncio.run(load_video(None, selected_video))
def start_reading(self):
self.listener_thread.start()
Summary
I have a client-server application which makes use of Websockets. The backend (server) part is implemented in Python using autobahn.
The server, in addition to serving a Websockets endpoint, runs a series of threads which will feed the Websockets channel with data, though a queue.Queue().
One of these threads has a problem: it crashes at a missing parameter and hangs when resolving the exception.
Implementation details
The server implementation (cut down to highlight the problem):
from autobahn.asyncio.websocket import WebSocketServerProtocol, WebSocketServerFactory
import time
import threading
import arrow
import queue
import asyncio
import json
# backends of components
import dummy
class MyServerProtocol(WebSocketServerProtocol):
def __init__(self):
super().__init__()
print("webserver initialized")
# global queue to handle updates from modules
self.events = queue.Queue()
# consumer
threading.Thread(target=self.push).start()
threading.Thread(target=dummy.Dummy().dummy, args=(self.events,)).start()
def push(self):
""" consume the content of the queue and push it to the browser """
while True:
update = self.events.get()
print(update)
if update:
self.sendMessage(json.dumps(update).encode('utf-8'), False)
print(update)
time.sleep(1)
def worker(self):
print("started thread")
while True:
try:
self.sendMessage(arrow.now().isoformat().encode('utf-8'), False)
except AttributeError:
print("not connected?")
time.sleep(3)
def onConnect(self, request):
print("Client connecting: {0}".format(request.peer))
def onOpen(self):
print("WebSocket connection open.")
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
if __name__ == '__main__':
factory = WebSocketServerFactory(u"ws://127.0.0.1:9100")
factory.protocol = MyServerProtocol
loop = asyncio.get_event_loop()
coro = loop.create_server(factory, '0.0.0.0', 9100)
loop.run_until_complete(coro)
loop.run_forever()
The dummy module imported in the code above:
import time
import arrow
class Dummy:
def __init__(self, events):
self.events = events
print("dummy initialized")
def dummy(self):
while True:
self.events.put({
'dummy': {
'time': arrow.now().isoformat()
}
})
time.sleep(1)
The problem
When running the code above and connecting from a client, I get on the output webserver initialized (which proves that the connection was initiated), and WebSocket connection to 'ws://127.0.0.1:9100/' failed: Error in connection establishment: net::ERR_CONNECTION_REFUSED on the client.
When debugging the code, I see that the call to threading.Thread(target=dummy.Dummy().dummy, args=(self.events,)).start() crashes and the debugger (PyCharm) leads me to C:\Program Files (x86)\Python36-32\Lib\asyncio\selector_events.py, specifically to the line 236
# It's now up to the protocol to handle the connection.
except Exception as exc:
if self._debug:
The thread hangs when executing if self._debug but I see on the exceptline (thanks to Pycharm) that
exc: __init__() missing 1 required positional argument: 'events'
My question
Why is this parameter missing? It is provided via the threading.Thread(target=dummy.Dummy().dummy, args=(self.events,)).start() call.
As a side question: why does the thread hangs on the if condition?
Notes
there is never a Traceback thrown by my program (due to the hang)
removing this thread call resolves the issue (the client connects correctly)
The events arg is needed for the constructor, not the dummy method. I think you meant something more like:
d = Dummy(self.events)
threading.Thread(d.dummy).start()
I wrote 3 sections of code with Tornado TCP. I've encountered some difficulties.
My code is following:
client.py
'''tcp client'''
from socket import socket, AF_INET, SOCK_STREAM
s = socket(AF_INET, SOCK_STREAM)
s.connect(('localhost', 20000))
resp = s.recv(8192)
print('Response:', resp)
s.send(b'Hello\n')
s.close()
server.py
'''tcp server'''
#! /usr/bin/env python
#coding=utf-8
from tornado.tcpserver import TCPServer
from tornado.ioloop import IOLoop
from tornado.gen import *
clientDict=dict() #save infomation of client
class TcpConnection(object):
def __init__(self,stream,address):
self._stream=stream
self._address=address
self._stream.set_close_callback(self.on_close)
#coroutine
def send_messages(self):
yield self.send_message(b'world \n')
response = yield self.read_message()
return response
def read_message(self):
return self._stream.read_until(b'\n')
def send_message(self,data):
return self._stream.write(data)
def on_close(self):
global clientDict
clientDict.pop(self._address)
print("the monitored %d has left",self._address)
class MonitorServer(TCPServer):
#coroutine
def handle_stream(self,stream,address):
global clientDict
print("new connection",address,stream)
clientDict.setdefault(address, TcpConnection(stream,address))
if __name__=='__main__':
print('server start .....')
server=MonitorServer()
server.listen(20000)
IOLoop.instance().start()
main.py
import time
from threading import Thread
import copy
from server import *
def watchClient():
'''call the "send" function when a new client connect''
global clientDict
print('start watch')
lastClientList=list()
while True:
currentClientList=copy.deepcopy([key for key in clientDict.keys()])
difference=list(set(currentClientList).difference(set(lastClientList)))
if len(difference)>0:
send(difference)
lastClientList=copy.deepcopy(currentClientList)
time.sleep(5)
else:
time.sleep(5)
continue
def send(addressList):
'''send message to a new client and get response'''
global clientDict
for address in addressList:
response=clientDict[address].send_messages()
print(address," response :",response)
def listen():
server=MonitorServer()
server.listen(20000)
IOLoop.instance().start()
if __name__=='__main__':
listenThread=Thread(target=listen)
watchThead=Thread(target=watchClient)
watchThead.start()
listenThread.start()
I want to get the "print information" when the main.py is run---address,response:b'hello\n'
But in fact I get the "print information" as ----
('127.0.0.1', 41233) response :<tornado.concurrent.Future object at 0x7f2894d30518>
It can't return the b'hello\n'.
Then I guess it can't get the response reasonably in the no-coroutine function(def send(addressList)) from the coroutine function( #coroutine def send_messages(self)).
How to solve this?
By the way, I want to know how to make the clientDict to be a property of the class MonitorServer,not a global property.
Please help me! Thank you.
In general, anything that calls a coroutine should be a coroutine itself. Mixing threads and coroutines can be very tricky; most coroutine-based code is deliberately not thread-safe.
The correct way to call a coroutine from a non-coroutine function on a different thread is something like this:
def call_coro_from_thread(f, *args, **kwargs):
q = queue.Queue()
def wrapper():
fut = f(*args, **kwargs)
fut.add_done_callback(q.put)
IOLoop.instance().add_callback(wrapper)
fut = q.get()
return fut.result()
IOLoop.add_callback is necessary to safely transfer control to the IOLoop thread, and then the Queue is used to transfer the result back.
I need your help please.
This code only works once, a second wget gives timeout (attached file).
wget http://localhost:9090
#!/usr/bin/env python
import trollius as asyncio
from trollius import From
import os
class Client(asyncio.Protocol):
def connection_made(self, transport):
self.connected = True
# save the transport
self.transport = transport
def data_received(self, data):
# forward data to the server
self.server_transport.write(data)
def connection_lost(self, *args):
self.connected = False
class Server(asyncio.Protocol):
clients = {}
def connection_made(self, transport):
# save the transport
self.transport = transport
#asyncio.coroutine
def send_data(self, data):
# get a client by its peername
peername, port = self.transport.get_extra_info('peername')
client = self.clients.get(peername)
# create a client if peername is not known or the client disconnect
if client is None or not client.connected:
protocol, client = yield From(loop.create_connection(Client, 'google.com', 80))
client.server_transport = self.transport
self.clients[peername] = client
# forward data to the client
client.transport.write(data)
def data_received(self, data):
# use a task so this is executed async
asyncio.Task(self.send_data(data))
#asyncio.coroutine
def initialize(loop):
# use a coroutine to use yield from and get the async result of
# create_server
server = yield From(loop.create_server(Server, '127.0.0.1', 9090))
loop = asyncio.get_event_loop()
# main task to initialize everything
asyncio.Task(initialize(loop))
# run
loop.run_forever()
Does anyone know the reason?
Thanks!
You need a real 'loop' in servers when you are writing socket servers in asyncio. Note that despite 'sync' coding, infinite loops do not block execution here. You need an infinite while loop within your server. There are many samples,I recommend websockets library samples!
I'm using Pika to process data from RabbitMQ.
As I seemed to run into different kind of problems I decided to write a small test application to see how I can handle disconnects.
I wrote this test app which does following:
Connect to Broker, retry until successful
When connected create a queue.
Consume this queue and put result into a python Queue.Queue(0)
Get item from Queue.Queue(0) and produce it back into the broker queue.
What I noticed were 2 issues:
When I run my script from one host connecting to rabbitmq on another host (inside a vm) then this scripts exits on random moments without producing an error.
When I run my script on the same host on which RabbitMQ is installed it runs fine and keeps running.
This might be explained because of network issues, packets dropped although I find the connection not really robust.
When the script runs locally on the RabbitMQ server and I kill the RabbitMQ then the script exits with error: "ERROR pika SelectConnection: Socket Error on 3: 104"
So it looks like I can't get the reconnection strategy working as it should be. Could someone have a look at the code so see what I'm doing wrong?
Thanks,
Jay
#!/bin/python
import logging
import threading
import Queue
import pika
from pika.reconnection_strategies import SimpleReconnectionStrategy
from pika.adapters import SelectConnection
import time
from threading import Lock
class Broker(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.logging = logging.getLogger(__name__)
self.to_broker = Queue.Queue(0)
self.from_broker = Queue.Queue(0)
self.parameters = pika.ConnectionParameters(host='sandbox',heartbeat=True)
self.srs = SimpleReconnectionStrategy()
self.properties = pika.BasicProperties(delivery_mode=2)
self.connection = None
while True:
try:
self.connection = SelectConnection(self.parameters, self.on_connected, reconnection_strategy=self.srs)
break
except Exception as err:
self.logging.warning('Cant connect. Reason: %s' % err)
time.sleep(1)
self.daemon=True
def run(self):
while True:
self.submitData(self.from_broker.get(block=True))
pass
def on_connected(self,connection):
connection.channel(self.on_channel_open)
def on_channel_open(self,new_channel):
self.channel = new_channel
self.channel.queue_declare(queue='sandbox', durable=True)
self.channel.basic_consume(self.processData, queue='sandbox')
def processData(self, ch, method, properties, body):
self.logging.info('Received data from broker')
self.channel.basic_ack(delivery_tag=method.delivery_tag)
self.from_broker.put(body)
def submitData(self,data):
self.logging.info('Submitting data to broker.')
self.channel.basic_publish(exchange='',
routing_key='sandbox',
body=data,
properties=self.properties)
if __name__ == '__main__':
format=('%(asctime)s %(levelname)s %(name)s %(message)s')
logging.basicConfig(level=logging.DEBUG, format=format)
broker=Broker()
broker.start()
try:
broker.connection.ioloop.start()
except Exception as err:
print err
The main problem with your script is that it is interacting with a single channel from both your main thread (where the ioloop is running) and the "Broker" thread (calls submitData in a loop). This is not safe.
Also, SimpleReconnectionStrategy does not seem to do anything useful. It does not cause a reconnect if the connection is interrupted. I believe this is a bug in Pika: https://github.com/pika/pika/issues/120
I attempted to refactor your code to make it work as I think you wanted it to, but ran into another problem. Pika does not appear to have a way to detect delivery failure, which means that data may be lost if the connection drops. This seems like such an obvious requirement! How can there be no way to detect that basic_publish failed? I tried all kinds of stuff including transactions and add_on_return_callback (all of which seemed clunky and overly complicated), but came up with nothing. If there truly is no way then pika only seems to be useful in situations that can tolerate loss of data sent to RabbitMQ, or in programs that only need to consume from RabbitMQ.
This is not reliable, but for reference, here's some code that solves your multi-thread problem:
import logging
import pika
import Queue
import sys
import threading
import time
from functools import partial
from pika.adapters import SelectConnection, BlockingConnection
from pika.exceptions import AMQPConnectionError
from pika.reconnection_strategies import SimpleReconnectionStrategy
log = logging.getLogger(__name__)
DEFAULT_PROPERTIES = pika.BasicProperties(delivery_mode=2)
class Broker(object):
def __init__(self, parameters, on_channel_open, name='broker'):
self.parameters = parameters
self.on_channel_open = on_channel_open
self.name = name
def connect(self, forever=False):
name = self.name
while True:
try:
connection = SelectConnection(
self.parameters, self.on_connected)
log.debug('%s connected', name)
except Exception:
if not forever:
raise
log.warning('%s cannot connect', name, exc_info=True)
time.sleep(10)
continue
try:
connection.ioloop.start()
finally:
try:
connection.close()
connection.ioloop.start() # allow connection to close
except Exception:
pass
if not forever:
break
def on_connected(self, connection):
connection.channel(self.on_channel_open)
def setup_submitter(channel, data_queue, properties=DEFAULT_PROPERTIES):
def on_queue_declared(frame):
# PROBLEM pika does not appear to have a way to detect delivery
# failure, which means that data could be lost if the connection
# drops...
channel.confirm_delivery(on_delivered)
submit_data()
def on_delivered(frame):
if frame.method.NAME in ['Confirm.SelectOk', 'Basic.Ack']:
log.info('submission confirmed %r', frame)
# increasing this value seems to cause a higher failure rate
time.sleep(0)
submit_data()
else:
log.warn('submission failed: %r', frame)
#data_queue.put(...)
def submit_data():
log.info('waiting on data queue')
data = data_queue.get()
log.info('got data to submit')
channel.basic_publish(exchange='',
routing_key='sandbox',
body=data,
properties=properties,
mandatory=True)
log.info('submitted data to broker')
channel.queue_declare(
queue='sandbox', durable=True, callback=on_queue_declared)
def blocking_submitter(parameters, data_queue,
properties=DEFAULT_PROPERTIES):
while True:
try:
connection = BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue='sandbox', durable=True)
except Exception:
log.error('connection failure', exc_info=True)
time.sleep(1)
continue
while True:
log.info('waiting on data queue')
try:
data = data_queue.get(timeout=1)
except Queue.Empty:
try:
connection.process_data_events()
except AMQPConnectionError:
break
continue
log.info('got data to submit')
try:
channel.basic_publish(exchange='',
routing_key='sandbox',
body=data,
properties=properties,
mandatory=True)
except Exception:
log.error('submission failed', exc_info=True)
data_queue.put(data)
break
log.info('submitted data to broker')
def setup_receiver(channel, data_queue):
def process_data(channel, method, properties, body):
log.info('received data from broker')
data_queue.put(body)
channel.basic_ack(delivery_tag=method.delivery_tag)
def on_queue_declared(frame):
channel.basic_consume(process_data, queue='sandbox')
channel.queue_declare(
queue='sandbox', durable=True, callback=on_queue_declared)
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'usage: %s RABBITMQ_HOST' % sys.argv[0]
sys.exit()
format=('%(asctime)s %(levelname)s %(name)s %(message)s')
logging.basicConfig(level=logging.DEBUG, format=format)
host = sys.argv[1]
log.info('connecting to host: %s', host)
parameters = pika.ConnectionParameters(host=host, heartbeat=True)
data_queue = Queue.Queue(0)
data_queue.put('message') # prime the pump
# run submitter in a thread
setup = partial(setup_submitter, data_queue=data_queue)
broker = Broker(parameters, setup, 'submitter')
thread = threading.Thread(target=
partial(broker.connect, forever=True))
# uncomment these lines to use the blocking variant of the submitter
#thread = threading.Thread(target=
# partial(blocking_submitter, parameters, data_queue))
thread.daemon = True
thread.start()
# run receiver in main thread
setup = partial(setup_receiver, data_queue=data_queue)
broker = Broker(parameters, setup, 'receiver')
broker.connect(forever=True)