Kombu/Celery messaging - python

I have a simple application that sends & receives messages, kombu, and uses Celery to task the message. Kombu alon, I can receive the message properly. when I send "Hello", kombu receives "Hello". But when I added the task, what kombu receives is the task ID of the celery.
My purpose for this project is so that I can schedule when to send and receive messages, hence Celery.
What I would like to know is why is kombu receiving the task id instead of the sent message? I have searched and searched and have not found any related results on this matter. I am a beginner in using this applications and I would appreciate some help in fixing this matter.
My codes:
task.py
from celery import Celery
app = Celery('tasks', broker='amqp://xx:xx#localhost/xx', backend='amqp://')
#app.task(name='task.add')
def add(x, y):
return x+y
send.py
import kombu
from task import add
#declare connection with broker connection
connection = kombu.Connection(hostname='xx',
userid='xx',
password='xx',
virtual_host='xx')
connection.connect()
if connection.connect() is False:
print("not connected")
else:
print("connected")
#checks if connection is okay
#rabbitmq connection
channel = connection.channel()
#queue & exchange for kombu
exchange = kombu.Exchange('exchnge', type='direct')
queue = kombu.Queue('kombu_queue', exchange, routing_key='queue1')
#message here
x = input ("Enter first name: ")
y = input ("Enter last name: ")
result= add.delay(x,y)
print(result)
#syntax used for sending messages to queue
producer = kombu.Producer(channel, exchange)
producer.publish(result,
exchange = exchange,
routing_key='queue1')
print("Message sent: [x]")
connection.release()
receive.py
import kombu
#receive
connection = kombu.Connection(hostname='xx',
userid='xx',
password='xx',
virtual_host='xx')
connection.connect()
channel = connection.channel()
exchange = kombu.Exchange('exchnge', type='direct')
queue = kombu.Queue('kombu_queue', exchange, routing_key='queue1')
print("Waiting for messages...")
def callback(body, message):
print('Got message - %s' % body)
message.ack()
consumer = kombu.Consumer(channel,
queues=queue,
callbacks=[callback])
consumer.consume()
while True:
connection.drain_events()
I am using:
Kombu 3.0.26
Celery 3.1.18
RabbitMQ as the broker
What I sent:
xxx
yyy
What kombu receives:
Got message - d22880c9-b22c-48d8-bc96-5d839b224f2a

I found an answer to my problem and to anyone who may come across this kind of problem, I'll share the answer that worked for me.
I found the solution here.
Or here - user jennaliu answer may probably help you if the first link didn't work.

You need to call result.get() to receive the actual value of add.delay(). What you are seeing as the message body is AsyncResult instance in string format. Which doesn't make much sense.

Related

Move a message to subscription deadletter for failed HTTP request

I've been looking for resources but I can't seem to find what I need.. I have an Azure function with a Service Bus trigger. From this, I make an HTTP call with one of the values found in the Service Bus message.
An additional requirement for me is to deadletter a message if it the HTTP call fails. But as I understand it, the message is not present in the subscription anymore because it was properly received. Is there a way for me to keep the message in the subscription, and then dispose it once it is successful (transfer to DLQ if not?)
I found this piece of code but I'm not sure how it's sending to the DLQ?
https://github.com/Azure/azure-sdk-for-python/blob/azure-servicebus_7.3.0/sdk/servicebus/azure-servicebus/samples/sync_samples/receive_deadlettered_messages.py
"""
Example to show receiving dead-lettered messages from a Service Bus Queue.
"""
# pylint: disable=C0111
import os
from azure.servicebus import ServiceBusClient, ServiceBusMessage, ServiceBusSubQueue
CONNECTION_STR = os.environ['SERVICE_BUS_CONNECTION_STR']
QUEUE_NAME = os.environ["SERVICE_BUS_QUEUE_NAME"]
servicebus_client = ServiceBusClient.from_connection_string(conn_str=CONNECTION_STR)
with servicebus_client:
sender = servicebus_client.get_queue_sender(queue_name=QUEUE_NAME)
messages = [ServiceBusMessage("Message to be deadlettered") for _ in range(10)]
with sender:
sender.send_messages(messages)
print('dead lettering messages')
receiver = servicebus_client.get_queue_receiver(queue_name=QUEUE_NAME)
with receiver:
received_msgs = receiver.receive_messages(max_message_count=10, max_wait_time=5)
for msg in received_msgs:
print(str(msg))
receiver.dead_letter_message(msg)
print('receiving deadlettered messages')
dlq_receiver = servicebus_client.get_queue_receiver(queue_name=QUEUE_NAME, sub_queue=ServiceBusSubQueue.DEAD_LETTER)
with dlq_receiver:
received_msgs = dlq_receiver.receive_messages(max_message_count=10, max_wait_time=5)
for msg in received_msgs:
print(str(msg))
dlq_receiver.complete_message(msg)
print("Receive is done.")
Here is a code snippet in mine:
async def main(msg: func.ServiceBusMessage):
try:
logging.info('Python ServiceBus queue trigger processed message: %s',
msg.get_body().decode('utf-8'))
await asyncio.gather(wait(), wait())
result = json.dumps({
'message_id': msg.message_id,
'metadata' : msg.metadata
})
msgobj = json.loads(result)
val = msgobj['metadata']['value']
run_pipeline(val, msg)
except Exception as e:
logging.error(f"trigger failed: {e}")
TLDR; How do I keep the message in the subscription and either dispose them (if successful) or send them to the DLQ if not?
The code that you pasted is to Recieve Deadletter Messages from the deadletter queue.
I found some code in the docs. You can use this snippet from their example
from azure.servicebus import ServiceBusClient
import os
connstr = os.environ['SERVICE_BUS_CONNECTION_STR']
queue_name = os.environ['SERVICE_BUS_QUEUE_NAME']
with ServiceBusClient.from_connection_string(connstr) as client:
with client.get_queue_receiver(queue_name) as receiver:
for msg in receiver:
print(str(msg))
receiver.dead_letter_message(msg)
You can look at using this above code in your Exception handler
There're four methods to settle a message after receipt:
Complete:
Declares the message processing to be successfully completed, removing the message from the queue.
receiver.complete_message(msg)
Abandon:
Abandon processing of the message for the time being, returning the message immediately back to the queue to be picked up by another (or the same) receiver.
receiver.abandon_message(msg)
DeadLetter:
Transfer the message from the primary queue into the DQL.
receiver.dead_letter_message(msg)
Defer:
Defer is subtly different from the prior settlement methods. It prevents the message from being directly received from the queue by setting it aside.
receiver.defer_message(msg)
To answer your question "How do I keep the message in the subscription and either dispose them (if successful) or send them to the DLQ if not?":
keep the message in the subscription: use abandon_message
dispose them (if successful): use complete_message
send them to the DLQ: use dead_letter_message

Multiple consumer Rabbitmq through multiprocessing

New to python.
I am trying to create multiple consumer for a RabbitMQ client.
I am using PIKA and trying to do with multiprocessing.
It seems connecting but not being able to sustain the loop.
Can you please help.
The part of the code should also take care the writer option through the call back.
it should start the loop and should consume always
import multiprocessing
import time
import pika
# this is the writer part
def callback(ch, method, properties, body):
print (" [x] %r received %r" % (multiprocessing.current_process(), body,))
time.sleep(body.count('.'))
# print " [x] Done"
ch.basic_ack(delivery_tag=method.delivery_tag)
def consume():
credentials = pika.PlainCredentials(userid, password)
parameters = pika.ConnectionParameters(url, port, '/', credentials)
connection = pika.BlockingConnection(
parameters=parameters)
channel = connection.channel()
channel.queue_declare(queue='queuename', durable=True)
channel.basic_consume('queuename',callback)
print (' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
userid = "user"
password = "pwd"
url = "localhost"
port = 5672
if __name__ == "__main__":
workers = 5
pool = multiprocessing.Pool(processes=workers)
for i in range(0, workers):
pool.apply_async(consume)
#Stay alive
try:
while True:
You aren't doing any exception handling in your sub-processes, so my guess is that exceptions are being thrown that you don't expect. This code works fine in my environment, using Pika 1.1.0 and Python 3.7.3.
Before I checked for exceptions in body.count() a TypeError would be thrown because body was not a str in that case.
Please note that I'm using the correct method to wait for sub-processes, according to these docs.
NOTE: the RabbitMQ team monitors the rabbitmq-users mailing list and only sometimes answers questions on StackOverflow.

Receive multiple amqp queues in python / pika

I'm trying to receive multiple queues, I tried the code: https://stackoverflow.com/a/42351395/3303330
But it's necessary declare the "queue_declare". Hope you can help me guys, it's my code:
import pika
import time
from zeep import Client
parameters = pika.URLParameters('amqp://user:pass#theurl:5672/%2F')
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue='queue1', passive=True, durable=True, exclusive=False, auto_delete=False)
print(' [*] Waiting for messages. To exit press CTRL+C')
def callback(ch, method, header, body):
print(" [x] Received %r" % body)
time.sleep(body.count(b'.'))
ch.basic_ack(delivery_tag = method.delivery_tag)
channel.basic_consume(callback, queue='queue1')
channel.start_consuming()
It is not necessary to declare a queue more than once as long as you delcare it to be durable. You can declare more than one queue in your client code or using the RabbitMQ admin interface.
You can use your channel to consume messages from more than one queue. Just execute channel.basic_consume more than once using different queue parameter values.

Pika worker throws exception when running channel.declare_queue

I'm writing a python client to accept job messages from a RabbitMQ broker and process the jobs, returning the results to another server. My script that sends messages to the RabbitMQ broker starts up fine, but my worker throws the following error when running channel.declare_queue(queue='task_queue')
pika.exceptions.AMQPChannelError: (406, "PRECONDITION_FAILED - parameters for queue 'task_queue' in vhost '/' not equivalent")
Client:
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters(host=cmdargs.server))
channel = connection.channel()
channel.queue_declare(queue='task_queue')
channel.basic_qos(prefetch_count=1)
channel.basic_consume(ProcJobCallback, queue='task_queue')
channel.start_consuming()
Server method that interacts with RabbitMQ:
def addNewJob(self, newJob):
self.jobList.append(newJob)
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='task_queue')
for tile in newJob.TileStatus:
message = "{0},{1},{2}".format(newJob, tile[0], tile[1])
channel.basic_publish(exchange='', routing_key='task_queue', body=message, properties=pika.BasicProperties(delivery_mode = 2, ))
connection.close()
Any help or insight is greatly appreciated.
EDIT: I discovered why I was getting an error with the code listed above. I was specifying delivery_mode=2 when publishing my messages, but when I declared the queue, I forgot to add the Durable=True parameter.
Are you sure you are connecting to the same server (host) on the publisher and consumer side?
connection = pika.BlockingConnection(pika.ConnectionParameters(host=cmdargs.server))
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
if your queue is durable just remove the declaration "channel.queue_declare(queue='task_queue')", that should be enough in your case.
i meet the same problem when I try to make the queue msg persistent with durable=True.
Try to rename the queue name, it works well with my script. Maybe kill the queue new_task, and re-run your script also works.

Get Queue Size in Pika (AMQP Python)

Simple question, but Google or the Pika open source code did not help. Is there a way to query the current queue size (item counter) in Pika?
I know that this question is a bit old, but here is an example of doing this with pika.
Regarding AMQP and RabbitMQ, if you have already declared the queue, you can re-declare the queue with the passive flag on and keeping all other queue parameters identical. The response to this declaration declare-ok will include the number of messages in the queue.
Here is an example With pika 0.9.5:
import pika
def on_callback(msg):
print msg
params = pika.ConnectionParameters(
host='localhost',
port=5672,
credentials=pika.credentials.PlainCredentials('guest', 'guest'),
)
# Open a connection to RabbitMQ on localhost using all default parameters
connection = pika.BlockingConnection(parameters=params)
# Open the channel
channel = connection.channel()
# Declare the queue
channel.queue_declare(
callback=on_callback,
queue="test",
durable=True,
exclusive=False,
auto_delete=False
)
# ...
# Re-declare the queue with passive flag
res = channel.queue_declare(
callback=on_callback,
queue="test",
durable=True,
exclusive=False,
auto_delete=False,
passive=True
)
print 'Messages in queue %d' % res.method.message_count
This will print the following:
<Method(['frame_type=1', 'channel_number=1', "method=<Queue.DeclareOk(['queue=test', 'message_count=0', 'consumer_count=0'])>"])>
<Method(['frame_type=1', 'channel_number=1', "method=<Queue.DeclareOk(['queue=test', 'message_count=0', 'consumer_count=0'])>"])>
Messages in queue 0
You get the number of messages from the message_count member.
Here is how you can get queue length using pika(Considering you are using default user and password on localhost)
replace q_name by your queue name.
import pika
connection = pika.BlockingConnection()
channel = connection.channel()
q = channel.queue_declare(q_name)
q_len = q.method.message_count
Have you tried PyRabbit? It has a get_queue_depth() method which sounds like what you're looking for.
There are two ways to get the queue size in the AMQP protocol. You can either use Queue.Declare or Basic.Get.
If you are consuming messages as they arrive using Basic.Consume, then you can't get this info unless you disconnect (timeout) and redeclare the queue, or else get one message but don't ack it. In newer versions of AMQP you can actively requeue the message.
As for Pika, I don't know the specifics but Python clients for AMQP have been a thorn in my side. Often you will need to monkeypatch classes in order to get the info you need, or to allow a queue consumer to timeout so that you can do other things at periodic intervals like record stats or find out how many messages are in a queue.
Another way around this is to give up, and use the Pipe class to run sudo rabbitmqctl list_queues -p my_vhost. Then parse the output to find the size of all queues. If you do this you will need to configure /etc/sudoers to not ask for the usual sudo password.
I pray that someone else with more Pika experience answers this by pointing out how you can do all the things that I mentioned, in which case I will download Pika and kick the tires. But if that doesn't happen and you are having difficulty with monkeypatching the Pika code, then have a look at haigha. I found their code to be much more straightforward than other Python AMQP client libraries because they stick closer to the AMQP protocol.
I am late to the party but this is an example getting queue count using pyrabbit or pyrabbit2 from AWS AmazonMQ with HTTPS, should work on RabbitMQ as well:
from pyrabbit2.api import Client
cl = Client('b-xxxxxx.mq.ap-southeast-1.amazonaws.com', 'user', 'password', scheme='https')
if not cl.is_alive():
raise Exception("Failed to connect to rabbitmq")
for i in cl.get_all_vhosts():
print(i['name'])
queues = [q['name'] for q in cl.get_queues('/')]
print(queues)
itemCount = cl.get_queue_depth('/', 'event.stream.my-api')
print(itemCount)
Just posting this in case anyone else comes across this discussion. The answer with the most votes, i.e.:
# Re-declare the queue with passive flag
res = channel.queue_declare(
callback=on_callback,
queue="test",
durable=True,
exclusive=False,
auto_delete=False,
passive=True
)
was very helpful for me, but it comes with a serious caveat. According to the pika documentation, the passive flag is used to "Only check to see if the queue exists." As such, one would imagine you can use the queue_declare function with the passive flag to check if a queue exists in situations where there's a chance that the queue was never declared. From my testing, if you call this function with the passive flag and the queue does not exist, not only does the api throw an exception; it will also cause the broker to disconnect your channel, so even if you catch the exception gracefully, you've lost your connection to the broker. I tested this with 2 different python scripts against a plain vanilla RabbitMQ container running in minikube. I've run this test many times and I get the same behavior every time.
My test code:
import logging
import pika
logging.basicConfig(level="INFO")
logger = logging.getLogger(__name__)
logging.getLogger("pika").setLevel(logging.WARNING)
def on_callback(msg):
logger.info(f"Callback msg: {msg}")
queue_name = "testy"
credentials = pika.PlainCredentials("guest", "guest")
connection = pika.BlockingConnection(
pika.ConnectionParameters(host="localhost", port=5672, credentials=credentials)
)
logger.info("Connection established")
channel = connection.channel()
logger.info("Channel created")
channel.exchange_declare(exchange="svc-exchange", exchange_type="direct", durable=True)
response = channel.queue_declare(
queue=queue_name, durable=True, exclusive=False, auto_delete=False, passive=True
)
logger.info(f"queue_declare response: {response}")
channel.queue_delete(queue=queue_name)
connection.close()
The output:
INFO:__main__:Connection established
INFO:__main__:Channel created
WARNING:pika.channel:Received remote Channel.Close (404): "NOT_FOUND - no queue 'testy' in vhost '/'" on <Channel number=1 OPEN conn=<SelectConnection OPEN transport=<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x1047e2700> params=<ConnectionParameters host=localhost port=5672 virtual_host=/ ssl=False>>>
Traceback (most recent call last):
File "check_queue_len.py", line 29, in <module>
response = channel.queue_declare(
File "/Users/dbailey/dev/asc-service-deployment/venv/lib/python3.8/site-packages/pika/adapters/blocking_connection.py", line 2521, in queue_declare
self._flush_output(declare_ok_result.is_ready)
File "/Users/dbailey/dev/asc-service-deployment/venv/lib/python3.8/site-packages/pika/adapters/blocking_connection.py", line 1354, in _flush_output
raise self._closing_reason # pylint: disable=E0702
pika.exceptions.ChannelClosedByBroker: (404, "NOT_FOUND - no queue 'testy' in vhost '/'")
When I set passive to False:
scripts % python check_queue_len.py
INFO:__main__:Connection established
INFO:__main__:Channel created
INFO:__main__:queue_declare response: <METHOD(['channel_number=1', 'frame_type=1', "method=<Queue.DeclareOk(['consumer_count=0', 'message_count=0', 'queue=testy'])>"])>
Please let me know if I'm somehow missing something here.

Categories