Running RabbitMQ Pika with Quart - python

I am using the Quart framework, but I also need to use the RabbitMQ Pika connector, but I can't get them to play nice as they both have infinite loops.
Entrypoint:
from quart import Quart
from .service import Service
app = Quart(__name__)
#app.before_serving
async def startup():
app.service_task = asyncio.ensure_future(service.start())
if not service.initialise():
sys.exit()
Service Class:
class Service:
def __init__(self, new_instance):
self._connection = None
self._channel = None
self._messaging_thread = None
def initialise(self):
credentials = pika.PlainCredentials('username', 'password')
parameters = pika.ConnectionParameters('localhost', credentials=credentials)
self._connection = pika.BlockingConnection(parameters)
self._channel = self._connection.channel()
self._channel.queue_declare(queue='to_be_processed_queue')
self._channel.basic_consume(queue='to_be_processed_queue',
auto_ack=True,
on_message_callback=self.callback)
print('creating thread')
self._messaging_thread = Thread(target=self.run_consume())
#self._messaging_thread.start()
print('Thread created...')
def run_consume(self):
try:
self._channel.start_consuming()
except KeyboardInterrupt:
self._shutdown()
The code isn't even getting to the print('Thread created...') and I don't understand. From this question I do understand that RabbitMQ isn't thread-safe, but I don't understand how else to run RabbitMQ.

Pika is not thread safe as you have already spotted but this is not why your program blocks.
Your problem might be here:
print('creating thread')
self._messaging_thread = Thread(target=self.run_consume())
#self._messaging_thread.start()
Does it work better if you remove parentheses from run_consume? Now you are actually not creating a thread but executing self.run_consume() on the spot and it does not exit.
self._messaging_thread = Thread(target=self.run_consume)
would be my first attempt.
However, as Pika is not thread safe, you must also move your channel creation & stuff to your thread instead of doing that in the main program. It might work if you are not using it anywhere else but the correct way with Pika is to contain absolutely everything in the thread and not share any Pika structures between threads as you do now here.

Related

Unable to perform RMQ publish from Locust "BlockingIOError: [WinError 10035]"

My project requires client sending messages directly to Rabbit MQ and we need to do load testing for this.
I tried PIKA, works fine in a plain python file but as soon as I tried to implement this in Locust I start getting error due to compatibility issues
I tried PIKA Async, B-Rabbit, etc.. none works with Locust(Gevent)
I dont have to integrate with locust but just importing locust on these python file is enough to trigger the error.
I have read in several blogs that Gevent is not compatible with pika.
class RMQ:
def __init__(self) -> None:
self.connection = pika.BlockingConnection(pika.ConnectionParameters('localhost', credentials=pcredentails))
self.channel = self.connection.channel()
def connect(self):
self.channel.basic_publish(exchange='locust_events', routing_key='python3', body='Hello World!')
print("[x] Sent 'Hello World!'")
def close(self):
self.channel.close()
self.connection.close()
Error:
BlockingIOError: [WinError 10035] A non-blocking socket operation could not be completed immediately
Some one please let me know a possible way to solve this
Note: B-rabbit did say it is thread safe but it still throws error when I publish "Time out reading from server" with 12s delay, this happens only when I use locust else it is fast
Pika has a GeventConnection connection class. That is what you should be using.
NOTE: the RabbitMQ team monitors the rabbitmq-users mailing list and only sometimes answers questions on StackOverflow.
Having tried
**Pika** = not compatible with Gevent Locust (esp windows)
**B-rabbit**, **Rabbit-py** = Slow with locust and times out
I can confirm Kombu works perfectly with Locust, anyone looking to implement queues with locust this is the solution
https://github.com/celery/kombu
#luke Bakken:
Im using my own bespoke framework for my company, its not so simple to decouple this bit.. so I created a quick snippet class for Kombu
from locust import HttpClient, TaskSet, task, SingleRunner
from kombu import Connection, Exchange, Queue
class KombuClient(HttpClient):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.connection = Connection('amqp://guest:guest#localhost:5672//')
self.channel = self.connection.channel()
def send_message(self, message):
exchange = Exchange('test_exchange', type='direct')
queue = Queue('test_queue', exchange, routing_key='test_key')
queue.maybe_bind(self.connection)
queue.declare()
producer = self.connection.Producer(exchange=exchange, routing_key='test_key')
producer.publish(message)
print(" [x] Sent message: '{}'".format(message))
def close_connection(self):
self.connection.release()
class UserBehavior(TaskSet):
#task
def send_hello_message(self):
self.client.send_message('Hello World!')
class WebsiteUser(HttpUser):
tasks = [UserBehavior]
min_wait = 5000
max_wait = 9000
client = KombuClient
def main():
runner = SingleRunner(http_user_cls=WebsiteUser)
runner.run(options={
"host": "http://example.com",
"num_users": 1000,
"hatch_rate": 100
})
if __name__ == "__main__":
main()

How can I listen a message queue in flask microservices

Here is my flask microservices scenario.
Activity-server expose a api for ticket purchasing.
A-server will push a message with order_id(no) to rabbitmq after payment completion.
And C-server insert order_no to mysql since it received the specific message.
My solution is just start a pika thread and it seems works.
# content of /user/src/app.py
def create_app(config=None):
app = Flask(__name__)
t = Thread(target=start_message_consumer)
t.daemon = True
t.start()
return app
def start_message_consumer():
credential = pika.credentials.PlainCredentials(xxxxx)
connection = pika.BlockingConnection(xxx, credentials=credential))
channel = connection.channel()
channel.queue_declare(queue='order')
def callback(ch, method, properties, body):
....Insert statement
channel.basic_consume(callback, queue='order', no_ack=True)
channel.start_consuming()
Do I need locks somewhere Or is there a better approach (like flask extension)to achieve this?

pika consuming from thread ignores KeyboardInterrupt

I am trying to run the following test using pika 0.10.0 from github:
import logging
import sys
import pika
import threading
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
URL = 'amqp://guest:guest#127.0.0.1:5672/%2F?socket_timeout=0.25'
class BaseConsumer(threading.Thread):
def run(self):
self._connection = None
self._channel = None
self.connect()
self.open_channel()
self.consume()
def connect(self):
parameters = pika.URLParameters(URL)
self._connection = pika.BlockingConnection(parameters)
def open_channel(self):
self._channel = self._connection.channel()
self._channel.exchange_declare(exchange='exc1', exchange_type='topic', passive=False,
durable=False, auto_delete=False, internal=False, arguments=None)
self._channel.queue_declare(queue='test', passive=False, durable=False,
exclusive=False, auto_delete=False, arguments=None)
self._channel.queue_bind(
'test', 'exc1', routing_key='rk', arguments=None)
def consume(self):
self._channel.basic_consume(self.on_message, 'test')
try:
self._channel.start_consuming()
except KeyboardInterrupt:
logging.info("Stop consuming now!")
self._channel.stop_consuming()
self._connection.close()
def on_message(self, channel, method_frame, header_frame, body):
print method_frame.delivery_tag
print body
channel.basic_ack(delivery_tag=method_frame.delivery_tag)
c1 = BaseConsumer()
c1.setDaemon(False)
c1.start()
The script is connecting to my MQ and is apparently able to consume messages from the MQ. The problem is that I have no way of stoping the thread. Pressing CTRL-C on my keyboard only causes "^C" to appear in the console without interrupting the consume.
Question is, how do I make pika stop consuming when it is running inside of a thread ? I would like to note that I am following the guidelines of creating the connection in the consumer thread.
If after starting the thread with c1.start() I also do an infinite while loop and log something from there then pressing CTRL-C will end the while loop but the consumer thread will still ignore any additional CTRL-C.
Side question: is it possible to stop consuming inside the thread with some outside signalling like a threading.Condition or something ? I don't see how i can interfere with start_consuming.
Question: ... from there then pressing CTRL-C will end the while loop
Add a def stop() to your BaseConsumer,
catch the KeyboardInterrupt and call stop().
try:
BaseConsumer.run()
except KeyboardInterrupt:
BaseConsumer.stop()
Read pika: asynchronous_consumer_example

Trying to implement 2 "threads" using `asyncio` module

I've played around with threading before in Python, but decided to give the asyncio module a try, especially since you can cancel a running task, which seemed like a nice detail. However, for some reason, I can't wrap my head around it.
Here's what I wanted to implement (sorry if I'm using incorrect terminology):
a downloader thread that downloads the same file every x seconds, checks its hash against the previous download and saves it if it's different.
a webserver thread that runs in the background, allowing control (pause, list, stop) of the downloader thread.
I used aiohttp for the webserver.
This is what I have so far:
class aiotest():
def __init__(self):
self._dl = None # downloader future
self._webapp = None # web server future
self.init_server()
def init_server(self):
print('Setting up web interface')
app = web.Application()
app.router.add_route('GET', '/stop', self.stop)
print('added urls')
self._webapp = app
#asyncio.coroutine
def _downloader(self):
while True:
try:
print('Downloading and verifying file...')
# Dummy sleep - to be replaced by actual code
yield from asyncio.sleep(random.randint(3,10))
# Wait a predefined nr of seconds between downloads
yield from asyncio.sleep(30)
except asyncio.CancelledError:
break
#asyncio.coroutine
def _supervisor(self):
print('Starting downloader')
self._dl = asyncio.async(self._downloader())
def start(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._supervisor())
loop.close()
#asyncio.coroutine
def stop(self):
print('Received STOP')
self._dl.cancel()
return web.Response(body=b"Stopping... ")
This class is called by:
t = aiotest()
t.start()
This doesn't work of course, and I feel that this is a horrible piece of code.
What's unclear to me:
I stop the downloader in the stop() method, but how would I go about stopping the webserver (e.g. in a shutdown() method)?
Does the downloader need a new event loop, or can I use the loop returned by asyncio.get_event_loop()?
Do I really need something like the supervisor for what I'm trying to implement? This seems so clunky. And how do I get supervisor to keep running instead of ending after a single execution as it does now?
One last, more general question: is asyncio supposed to replace the threading module (in the future)? Or does each have its own application?
I appreciate all the pointers, remarks and clarifications!
Why current code is not working:
You're running event loop until self._supervisor() is complete. self._supervisor() creates task (it happens immediately) and finishes immediately.
You're trying to run event loop until _supervisor complete, but how and when are you going start server? I think event loop should be running until server stopped. _supervisor or other stuff can be added as task (to same event loop). aiohttp already has function to start server and event loop - web.run_app, but we can do it manually.
Your questions:
Your server will run until you stop it. You can start/stop different
coroutines while your server working.
You need only one event loop for different coroutines.
I think you don't need supervisor.
More general question: asyncio helps you to run different
functions parallel in single thread in single process. That's why
asyncio is so cool and fast. Some of your sync code with threads you
can rewrite using asyncio and it's coroutines. Moreover: asyncio can
interact with threads and processes.
It can be useful in case you still need threads and processes: here's example.
Useful notes:
It's better to use term coroutine instead of thread while we talk about asyncio coroutines that are not threads
If you use Python 3.5, you can use async/await syntax
instead of coroutine/yield from
I rewrote your code to show you idea. How to check it: run program, see console, open http://localhost:8080/stop, see console, open http://localhost:8080/start, see console, type CTRL+C.
import asyncio
import random
from contextlib import suppress
from aiohttp import web
class aiotest():
def __init__(self):
self._webapp = None
self._d_task = None
self.init_server()
# SERVER:
def init_server(self):
app = web.Application()
app.router.add_route('GET', '/start', self.start)
app.router.add_route('GET', '/stop', self.stop)
app.router.add_route('GET', '/kill_server', self.kill_server)
self._webapp = app
def run_server(self):
# Create server:
loop = asyncio.get_event_loop()
handler = self._webapp.make_handler()
f = loop.create_server(handler, '0.0.0.0', 8080)
srv = loop.run_until_complete(f)
try:
# Start downloader at server start:
asyncio.async(self.start(None)) # I'm using controllers here and below to be short,
# but it's better to split controller and start func
# Start server:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
# Stop downloader when server stopped:
loop.run_until_complete(self.stop(None))
# Cleanup resources:
srv.close()
loop.run_until_complete(srv.wait_closed())
loop.run_until_complete(self._webapp.shutdown())
loop.run_until_complete(handler.finish_connections(60.0))
loop.run_until_complete(self._webapp.cleanup())
loop.close()
#asyncio.coroutine
def kill_server(self, request):
print('Server killing...')
loop = asyncio.get_event_loop()
loop.stop()
return web.Response(body=b"Server killed")
# DOWNLOADER
#asyncio.coroutine
def start(self, request):
if self._d_task is None:
print('Downloader starting...')
self._d_task = asyncio.async(self._downloader())
return web.Response(body=b"Downloader started")
else:
return web.Response(body=b"Downloader already started")
#asyncio.coroutine
def stop(self, request):
if (self._d_task is not None) and (not self._d_task.cancelled()):
print('Downloader stopping...')
self._d_task.cancel()
# cancel() just say task it should be cancelled
# to able task handle CancelledError await for it
with suppress(asyncio.CancelledError):
yield from self._d_task
self._d_task = None
return web.Response(body=b"Downloader stopped")
else:
return web.Response(body=b"Downloader already stopped or stopping")
#asyncio.coroutine
def _downloader(self):
while True:
print('Downloading and verifying file...')
# Dummy sleep - to be replaced by actual code
yield from asyncio.sleep(random.randint(1, 2))
# Wait a predefined nr of seconds between downloads
yield from asyncio.sleep(1)
if __name__ == '__main__':
t = aiotest()
t.run_server()

RabbitMQ, Pika and reconnection strategy

I'm using Pika to process data from RabbitMQ.
As I seemed to run into different kind of problems I decided to write a small test application to see how I can handle disconnects.
I wrote this test app which does following:
Connect to Broker, retry until successful
When connected create a queue.
Consume this queue and put result into a python Queue.Queue(0)
Get item from Queue.Queue(0) and produce it back into the broker queue.
What I noticed were 2 issues:
When I run my script from one host connecting to rabbitmq on another host (inside a vm) then this scripts exits on random moments without producing an error.
When I run my script on the same host on which RabbitMQ is installed it runs fine and keeps running.
This might be explained because of network issues, packets dropped although I find the connection not really robust.
When the script runs locally on the RabbitMQ server and I kill the RabbitMQ then the script exits with error: "ERROR pika SelectConnection: Socket Error on 3: 104"
So it looks like I can't get the reconnection strategy working as it should be. Could someone have a look at the code so see what I'm doing wrong?
Thanks,
Jay
#!/bin/python
import logging
import threading
import Queue
import pika
from pika.reconnection_strategies import SimpleReconnectionStrategy
from pika.adapters import SelectConnection
import time
from threading import Lock
class Broker(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.logging = logging.getLogger(__name__)
self.to_broker = Queue.Queue(0)
self.from_broker = Queue.Queue(0)
self.parameters = pika.ConnectionParameters(host='sandbox',heartbeat=True)
self.srs = SimpleReconnectionStrategy()
self.properties = pika.BasicProperties(delivery_mode=2)
self.connection = None
while True:
try:
self.connection = SelectConnection(self.parameters, self.on_connected, reconnection_strategy=self.srs)
break
except Exception as err:
self.logging.warning('Cant connect. Reason: %s' % err)
time.sleep(1)
self.daemon=True
def run(self):
while True:
self.submitData(self.from_broker.get(block=True))
pass
def on_connected(self,connection):
connection.channel(self.on_channel_open)
def on_channel_open(self,new_channel):
self.channel = new_channel
self.channel.queue_declare(queue='sandbox', durable=True)
self.channel.basic_consume(self.processData, queue='sandbox')
def processData(self, ch, method, properties, body):
self.logging.info('Received data from broker')
self.channel.basic_ack(delivery_tag=method.delivery_tag)
self.from_broker.put(body)
def submitData(self,data):
self.logging.info('Submitting data to broker.')
self.channel.basic_publish(exchange='',
routing_key='sandbox',
body=data,
properties=self.properties)
if __name__ == '__main__':
format=('%(asctime)s %(levelname)s %(name)s %(message)s')
logging.basicConfig(level=logging.DEBUG, format=format)
broker=Broker()
broker.start()
try:
broker.connection.ioloop.start()
except Exception as err:
print err
The main problem with your script is that it is interacting with a single channel from both your main thread (where the ioloop is running) and the "Broker" thread (calls submitData in a loop). This is not safe.
Also, SimpleReconnectionStrategy does not seem to do anything useful. It does not cause a reconnect if the connection is interrupted. I believe this is a bug in Pika: https://github.com/pika/pika/issues/120
I attempted to refactor your code to make it work as I think you wanted it to, but ran into another problem. Pika does not appear to have a way to detect delivery failure, which means that data may be lost if the connection drops. This seems like such an obvious requirement! How can there be no way to detect that basic_publish failed? I tried all kinds of stuff including transactions and add_on_return_callback (all of which seemed clunky and overly complicated), but came up with nothing. If there truly is no way then pika only seems to be useful in situations that can tolerate loss of data sent to RabbitMQ, or in programs that only need to consume from RabbitMQ.
This is not reliable, but for reference, here's some code that solves your multi-thread problem:
import logging
import pika
import Queue
import sys
import threading
import time
from functools import partial
from pika.adapters import SelectConnection, BlockingConnection
from pika.exceptions import AMQPConnectionError
from pika.reconnection_strategies import SimpleReconnectionStrategy
log = logging.getLogger(__name__)
DEFAULT_PROPERTIES = pika.BasicProperties(delivery_mode=2)
class Broker(object):
def __init__(self, parameters, on_channel_open, name='broker'):
self.parameters = parameters
self.on_channel_open = on_channel_open
self.name = name
def connect(self, forever=False):
name = self.name
while True:
try:
connection = SelectConnection(
self.parameters, self.on_connected)
log.debug('%s connected', name)
except Exception:
if not forever:
raise
log.warning('%s cannot connect', name, exc_info=True)
time.sleep(10)
continue
try:
connection.ioloop.start()
finally:
try:
connection.close()
connection.ioloop.start() # allow connection to close
except Exception:
pass
if not forever:
break
def on_connected(self, connection):
connection.channel(self.on_channel_open)
def setup_submitter(channel, data_queue, properties=DEFAULT_PROPERTIES):
def on_queue_declared(frame):
# PROBLEM pika does not appear to have a way to detect delivery
# failure, which means that data could be lost if the connection
# drops...
channel.confirm_delivery(on_delivered)
submit_data()
def on_delivered(frame):
if frame.method.NAME in ['Confirm.SelectOk', 'Basic.Ack']:
log.info('submission confirmed %r', frame)
# increasing this value seems to cause a higher failure rate
time.sleep(0)
submit_data()
else:
log.warn('submission failed: %r', frame)
#data_queue.put(...)
def submit_data():
log.info('waiting on data queue')
data = data_queue.get()
log.info('got data to submit')
channel.basic_publish(exchange='',
routing_key='sandbox',
body=data,
properties=properties,
mandatory=True)
log.info('submitted data to broker')
channel.queue_declare(
queue='sandbox', durable=True, callback=on_queue_declared)
def blocking_submitter(parameters, data_queue,
properties=DEFAULT_PROPERTIES):
while True:
try:
connection = BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue='sandbox', durable=True)
except Exception:
log.error('connection failure', exc_info=True)
time.sleep(1)
continue
while True:
log.info('waiting on data queue')
try:
data = data_queue.get(timeout=1)
except Queue.Empty:
try:
connection.process_data_events()
except AMQPConnectionError:
break
continue
log.info('got data to submit')
try:
channel.basic_publish(exchange='',
routing_key='sandbox',
body=data,
properties=properties,
mandatory=True)
except Exception:
log.error('submission failed', exc_info=True)
data_queue.put(data)
break
log.info('submitted data to broker')
def setup_receiver(channel, data_queue):
def process_data(channel, method, properties, body):
log.info('received data from broker')
data_queue.put(body)
channel.basic_ack(delivery_tag=method.delivery_tag)
def on_queue_declared(frame):
channel.basic_consume(process_data, queue='sandbox')
channel.queue_declare(
queue='sandbox', durable=True, callback=on_queue_declared)
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'usage: %s RABBITMQ_HOST' % sys.argv[0]
sys.exit()
format=('%(asctime)s %(levelname)s %(name)s %(message)s')
logging.basicConfig(level=logging.DEBUG, format=format)
host = sys.argv[1]
log.info('connecting to host: %s', host)
parameters = pika.ConnectionParameters(host=host, heartbeat=True)
data_queue = Queue.Queue(0)
data_queue.put('message') # prime the pump
# run submitter in a thread
setup = partial(setup_submitter, data_queue=data_queue)
broker = Broker(parameters, setup, 'submitter')
thread = threading.Thread(target=
partial(broker.connect, forever=True))
# uncomment these lines to use the blocking variant of the submitter
#thread = threading.Thread(target=
# partial(blocking_submitter, parameters, data_queue))
thread.daemon = True
thread.start()
# run receiver in main thread
setup = partial(setup_receiver, data_queue=data_queue)
broker = Broker(parameters, setup, 'receiver')
broker.connect(forever=True)

Categories