Connecting OCPP CSMS to the front end to get realtime data - python

We have modified and implemented the below code to accomodate remote start transaction and we are able to control the start and stop the charger quite well.
However, we are stuck in trying to implement the below:
Web Interface - Using reactJS
a) Has to show LIVE Status of the chargers
How do i make my web app listen to the CSMS system and get all messages for all chargers?
Do i need to add to the database Boot and Status Notifications as and when the CSMS receives a message?
Basically, my question is how do I achieve this to show realtime data.
Mobile Interface - Using Flutter
a) Has to Start and Stop a transaction (Successful)
b) Keep getting the meter values, time etc to show on the mobile device real time while charging.
On click of the "Start Charging" button on the mobile app, we make an API call to "localhost:8080/remoteStart", however, we just get a 200 response. How do get realtime update here to show the meter values etc.
Any help or guidance in the right direction will be very helpful. Thanks.
import asyncio
import websockets
from aiohttp import web
from functools import partial
from datetime import datetime
from ocpp.routing import on
from ocpp.v16 import ChargePoint as cp
from ocpp.v16.enums import Action, RegistrationStatus
from ocpp.v16 import call_result, call
class ChargePoint(cp):
#on(Action.BootNotification)
def on_boot_notitication(self, charge_point_vendor, charge_point_model, **kwargs):
return call_result.BootNotificationPayload(
current_time=datetime.utcnow().isoformat(),
interval=10,
status=RegistrationStatus.accepted,
)
async def change_configuration(self, key: str, value: str):
return await self.call(call.ChangeConfigurationPayload(key=key, value=value))
class CentralSystem:
def __init__(self):
self._chargers = {}
def register_charger(self, cp: ChargePoint) -> asyncio.Queue:
""" Register a new ChargePoint at the CSMS. The function returns a
queue. The CSMS will put a message on the queue if the CSMS wants to
close the connection.
"""
queue = asyncio.Queue(maxsize=1)
# Store a reference to the task so we can cancel it later if needed.
task = asyncio.create_task(self.start_charger(cp, queue))
self._chargers[cp] = task
return queue
async def start_charger(self, cp, queue):
""" Start listening for message of charger. """
try:
await cp.start()
except Exception as e:
print(f"Charger {cp.id} disconnected: {e}")
finally:
# Make sure to remove referenc to charger after it disconnected.
del self._chargers[cp]
# This will unblock the `on_connect()` handler and the connection
# will be destroyed.
await queue.put(True)
async def change_configuration(self, key: str, value: str):
for cp in self._chargers:
await cp.change_configuration(key, value)
def disconnect_charger(self, id: str):
for cp, task in self._chargers.items():
if cp.id == id:
task.cancel()
return
raise ValueError(f"Charger {id} not connected.")
async def change_config(request):
""" HTTP handler for changing configuration of all charge points. """
data = await request.json()
csms = request.app["csms"]
await csms.change_configuration(data["key"], data["value"])
return web.Response()
async def disconnect_charger(request):
""" HTTP handler for disconnecting a charger. """
data = await request.json()
csms = request.app["csms"]
try:
csms.disconnect_charger(data["id"])
except ValueError as e:
print(f"Failed to disconnect charger: {e}")
return web.Response(status=404)
return web.Response()
async def on_connect(websocket, path, csms):
""" For every new charge point that connects, create a ChargePoint instance
and start listening for messages.
The ChargePoint is registered at the CSMS.
"""
charge_point_id = path.strip("/")
cp = ChargePoint(charge_point_id, websocket)
print(f"Charger {cp.id} connected.")
# If this handler returns the connection will be destroyed. Therefore we need some
# synchronization mechanism that blocks until CSMS wants to close the connection.
# An `asyncio.Queue` is used for that.
queue = csms.register_charger(cp)
await queue.get()
async def create_websocket_server(csms: CentralSystem):
handler = partial(on_connect, csms=csms)
return await websockets.serve(handler, "0.0.0.0", 9000, subprotocols=["ocpp1.6"])
async def create_http_server(csms: CentralSystem):
app = web.Application()
app.add_routes([web.post("/", change_config)])
app.add_routes([web.post("/disconnect", disconnect_charger)])
# Put CSMS in app so it can be accessed from request handlers.
# https://docs.aiohttp.org/en/stable/faq.html#where-do-i-put-my-database-connection-so-handlers-can-access-it
app["csms"] = csms
# https://docs.aiohttp.org/en/stable/web_advanced.html#application-runners
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, "localhost", 8080)
return site
async def main():
csms = CentralSystem()
websocket_server = await create_websocket_server(csms)
http_server = await create_http_server(csms)
await asyncio.wait([websocket_server.wait_closed(), http_server.start()])
if __name__ == "__main__":
asyncio.run(main())

Related

EV Driver Authorization using RFID in OCPP protocol

I am new to OCPP protocol and I am building a Python OCPP server that can communicate with an EV charger using OCPP protocol. This server has the feature "Authenticate user via RFID". I have created 2 Python files which are Charge_Stattion.py:
# Charge_Stattion.py
import asyncio
import logging
import websockets
from ocpp.v201 import call
from ocpp.v201 import ChargePoint as cp
logging.basicConfig(level=logging.INFO)
class ChargePoint(cp):
async def authentication(self):
request = call.AuthorizePayload(
id_token={'id_token':'AA12345',
'type': 'ISO14443'})
response = await self.call(request)
print(response)
async def main():
async with websockets.connect(
'ws://localhost:9000/CP_1',
subprotocols=['ocpp2.0.1']
) as ws:
cp = ChargePoint('CP_1', ws)
await asyncio.gather(cp.start(), cp.authentication())
if __name__ == '__main__':
asyncio.run(main())
and Central_System.py:
#Central_System.py
import asyncio
import logging
import websockets
from datetime import datetime
from ocpp.routing import on
from ocpp.v201 import ChargePoint as cp
from ocpp.v201 import call_result
from ocpp.v201.enums import AuthorizationStatusType, Action
logging.basicConfig(level=logging.INFO)
class ChargePoint(cp):
#on('BootNotification')
async def on_boot_notification(self, charging_station, reason, **kwargs):
return call_result.BootNotificationPayload(
current_time=datetime.utcnow().isoformat(),
interval=10,
status='Accepted'
)
#on(Action.Authorize)
async def on_authorize(self, id_token):
return call_result.AuthorizePayload(id_token_info={"status": AuthorizationStatusType.accepted})
async def on_connect(websocket, path):
""" For every new charge point that connects, create a ChargePoint
instance and start listening for messages.
"""
try:
requested_protocols = websocket.request_headers[
'Sec-WebSocket-Protocol']
except KeyError:
logging.info("Client hasn't requested any Subprotocol. "
"Closing Connection")
if websocket.subprotocol:
logging.info("Protocols Matched: %s", websocket.subprotocol)
else:
# In the websockets lib if no subprotocols are supported by the
# client and the server, it proceeds without a subprotocol,
# so we have to manually close the connection.
logging.warning('Protocols Mismatched | Expected Subprotocols: %s,'
' but client supports %s | Closing connection',
websocket.available_subprotocols,
requested_protocols)
return await websocket.close()
charge_point_id = path.strip('/')
cp = ChargePoint(charge_point_id, websocket)
logging.info("abcxyz: %s", charge_point_id)
await cp.start()
async def main():
server = await websockets.serve(
on_connect,
'0.0.0.0',
9000,
subprotocols=['ocpp2.0.1']
)
logging.info("WebSocket Server Started")
await server.wait_closed()
if __name__ == '__main__':
asyncio.run(main())
Following the document here, I understand that the user must present an RFID card first, then the Charge Station will send an AuthorizeRequest containing idToken from this RFID card to Central System, then Central System will send and AuthorizeResponse to Charge Station. In the 2 python files above, I have implemented the process Charge Station sends andAuthorizeRequest to Central System and Central System sends back AuthorizeResponse to Charge Station. This picture demonstrates these processes
My questions are:
How can I implement the process EV driver present an RFID card to Charge Station. Should I create 2 other python files which represent EV driver and RFID card?
How can I know whether Center System accept this authentication and how to implement this ?
Any help will be appreciated.
This is a simple flow
EV owner registers himself as a EV client on some server where the server provides an unique id, like "unique-client-id" and stores this value as idTag on a database.
When this client go to charge to some charging station, he inputs that unique id to charging device which sends the id in the following form via websocket connection:
[3, "unique-id-representing-the-current-msg", "Authorize", {"idTag": "unique-client-id"}]
OCPP server receives that message, and looks for received idTag on the database, if it exists it will send back response like below:
[4, "unique-id-representing-the-current-msg", {"idTagInfo": {"status": "Accepted"}}]
I recommend using sanic framework since it has both websocket and http support by default.

Python-socketio: How to connect one client to multiple servers?

There is plenty of information and examples when it comes to connecting to one server with multiple clients. But I was wondering is there a way for one client to connect to two servers at the same time? Here is my situation:
I have a python client that brings data from one server, analyzes it and sends an appropriate command to another server. There seems to be less information on this issue, If I may call it.
Here is how I tried approaching the issue. First, I made a socketio.Client class, which would enable me to create two client instances. It did not work. What am I missing here?:
import socketio
class SocketClient(socketio.Client):
def __init__(self, server_ip):
self.server_ip = server_ip # server's ip address
self.sio = socketio.Client(logger=True)
def connect(self):
self.sio.connect(self.server_ip, namespaces=['/my_namespace'])
#self.sio.event
def connect_error(self, error):
print('connection error=> ', error)
#self.sio.event
def my_event(self, server_response):
# Here I have to take the server_response
# and send it to another server.
# How do I do it?
# self.sio.emit('some_event', server_response)
# that does not work, as I do not have the second client instance
pass
#self.sio.event
def my_other_event(self, server_response):
# process the response
pass
# initiate the two client instances:
if __name__ == '__main__':
first_client = SocketClient('http://192.168.100.103')
second_client = SocketClient('http://192.168.100.104')
first_client.connect()
second_client.connect()
after my first try did not work, I ditched the class-instance approach and went for functional one:
import socketio
first_client = socketio.Client()
second_client = socketio.Client()
#second_client.event
#first_client.event
def connect():
print(f'connected with id {first_client.sid}')
#second_client.event
#first_client.event
def connect_error(e):
print('Error=> ', e)
#second_client.event
#first_client.event
def disconnect():
print('disconnected')
#first_client.event
def my_event(server_response):
# Here I have to take the server_response
# and send it to another server.
second_client.emit('some_event', server_response) # is it even possible?
#second_client.event
def my_other_event(server_response):
# handle the response
pass
if __name__ == '__main__':
first_client.connect('http://192.168.100.103')
second_client.connect('http://192.168.100.104')
In both cases, I am technically creating two clients. I might as well make them into separate files like first_client.py and second_client.py.
See where I am going with this? The goal is to get the data from server one, process it and send it to the other server with ideally one client. Please forgive me if I am missing something very obvious here. Any help is much appreciated.
P.S. both servers are up and running without any problem.
I am using NameSpace to solve this problem.
first make a Namespace class
class MyCustomNamespace(socketio.AsyncClientNamespace):
async def on_connect(self):
print("I'm connected!")
async def on_disconnect(self):
print("I'm disconnected!")
async def on_my_event(self, data):
await self.emit('my_response', data)
async def on_message(self, data):
print("[echo]:", data)
class mysio:
def __init__(self) -> None:
global sio
self.sio = socketio.AsyncClient(logger=False, engineio_logger=False)
self.sio.register_namespace(MyCustomNamespace('/')) # bind
then make 2 clients.
since wait() will block the process, I use create_task().
async def main():
async def fun1():
sio1 = mysio().sio
await sio1.connect('http://192.168.3.85:11451')
await sio1.emit('message', b'11111110001')
await sio1.wait()
async def fun2():
sio2 = mysio().sio
await sio2.connect('http://localhost:8080')
await sio2.emit('message', 'from sio2')
await sio2.wait()
tasks = [asyncio.create_task(fun1()),asyncio.create_task(fun2()) ]
await asyncio.wait(tasks)
asyncio.run(main())

How to handle a bidirectional grpc stream asynchronously

I have a game or for that matter any remote user interface with a server and multiple clients which should communicate via network.
Both client and server should be able to send updates asynchronously.
This seems to be a very natural service definition, which let's grpc manage sessions.
syntax = "proto3";
package mygame;
service Game {
rpc participate(stream ClientRequest) returns (ServerResponse);
}
message ClientRequest {
// Fields for the initial request and further updates
}
message ServerResponse {
// Game updates
}
Implementing the client is trivial (although the following code is obviously incomplete and simplified).
class Client:
def __init__(self):
self.channel = grpc.insecure_channel("localhost:50051")
self.stub = game_pb2_grpc.GameStub(channel)
self.output_queue = queue.Queue()
def output_iter(self):
while True:
client_output_msg = self.output_queue.get()
self.output_queue.task_done()
yield client_output_msg
def do_work(self):
for response in self.stub.participate(self.output_iter()):
print(response) # handle update
with grpc.insecure_channel("localhost:50051") as channel:
client = Client()
client.do_work()
What seems hard is implementing the server without blocking.
class Game(game_pb2_grpc.GameServicer):
def __init__(self):
self.pending_events = queue.Queue()
def participate(self, request_iter, context):
for client_update in request_iter:
print(client_update)
# !!!
# The next bit won't happen if the client has no updates
# !!!
try:
while True:
server_update = self.pending_events.get_nowait()
yield server_update
except queue.Empty:
pass
server = grpc.server(ThreadPoolExecutor(max_workers=100))
game_pb2_grpc.add_GameServicer_to_server(Game(), server)
server.add_insecure_port("[::]:50051")
server.start()
server.wait_for_termination()
As commented in the code, the client won't receive updates if it doesn't constantly send requests.
Maybe a async approach would be better, which might also solve other problems in this design.
PS: This issue has been solved with grpc in go here, however i don't see how to translate this to pythons grpc implementations.
I would be very happy about any help!
I was finally able to get it working using the python asynio api.
The basic idea is to decouple read and write into two coroutines using asyncio.create_task.
For anybody interested, here is a solution.
class Game(game_pb2_grpc.GameServicer):
async def read_client_requests(self, request_iter):
async for client_update in request_iter:
print("Recieved message from client:", client_update, end="")
async def write_server_responses(self, context):
for i in range(15):
await context.write(game_pb2.ServerResponse(dummy_value=str(i)))
await asyncio.sleep(0.5)
async def participate(self, request_iter, context):
read_task = asyncio.create_task(self.read_client_requests(request_iter))
write_task = asyncio.create_task(self.write_server_responses(context))
await read_task
await write_task
async def serve():
server = grpc.aio.server()
game_pb2_grpc.add_GameServicer_to_server(Game(), server)
server.add_insecure_port("[::]:50051")
await server.start()
await server.wait_for_termination()
if __name__ == "__main__":
asyncio.run(serve())
Note that instead of the write coroutine, a yield would also be sufficient.

Python gRPC cancel unary-stream call from client side

Using Python gRPC, I would like to be able to cancel a long-running unary-stream call from the client side, when a threading.Event is set.
def application(stub: StreamsStub, event: threading.Event):
stream = stub.Application(ApplicationStreamRequest())
try:
for resp in stream:
print(resp)
except grpc.RpcError as e:
print(e)
For the time being I am cancelling the stream using the channel.close() method, but of course this closes all connections rather than just this stream.
Could someone suggest how I can use the event to cancel the stream iterator? Thanks
Below is some code for a gRPC UnaryStream call. The server sends an unending number of replies, leaving the client to decide when to stop receiving them.
Instead of using a counter, you can have a thread go off and do some work, and set an event that is checked before calling cancel() instead of checking the counter.
Note: using Python 2.7
Protofile:
syntax = "proto3";
package my_package;
service HeartBeat {
rpc Beats(Counter) returns (stream Counter) {}
}
message Counter {
int32 counter = 1;
}
Client:
from __future__ import print_function
import grpc
import heartbeat_pb2
import heartbeat_pb2_grpc
def get_beats(stub, channel):
try:
result_iterator = stub.Beats(heartbeat_pb2.Counter(counter=i))
for result in result_iterator:
print("Count: {}".format(result.counter))
if result.counter >= 3: # We only wants 3 'beats'
result_iterator.cancel()
except grpc.RpcError as rpc_error:
if rpc_error.code() == grpc.StatusCode.CANCELLED:
pass # Otherwise, a traceback is printed
def run():
with grpc.insecure_channel('localhost:9999') as channel:
stub = heartbeat_pb2_grpc.HeartBeatStub(channel)
get_beats(stub, channel)
if __name__ == '__main__':
run()
Server:
from concurrent import futures
import grpc
from proto_generated import heartbeat_pb2
from proto_generated import heartbeat_pb2_grpc
import time
class HeartBeatServicer(heartbeat_pb2_grpc.HeartBeatServicer):
pass
def Beats(self, request, context):
# Not required, only to show sending the server a message
print("Beats: {}".format(request.counter))
def response_message():
i = 0
while context.is_active():
print("Sending {}".format(i))
response = heartbeat_pb2.Counter(counter=i)
i += 1
time.sleep(1) # Simulate doing work
yield response
return response_message()
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
heartbeat_pb2_grpc.add_HeartBeatServicer_to_server(
HeartBeatServicer(), server)
server.add_insecure_port('[::]:9999')
server.start()
server.wait_for_termination()
if __name__ == '__main__':
serve()
The _Rendezvous object returned by a rpc call implements grpc.RpcError, grpc.Future, and grpc.Call, therefore cancelling the stream is as simple as calling stream.cancel (from grpc.Future interface)

How to do a simple Pika SelectConnection to send a message, in python?

I am trying to convert my code to send rabbitmq messages via Pika instead. I am having a lot of trouble understanding how to send a simple message using an asynchronous connection (such as SelectConnection).
In my old code, which I use the amqp library I simply declare a class like this:
import amqp as amqp
class MQ():
mqConn = None
channel = None
def __init__(self):
self.connect()
def connect(self):
if self.mqConn is None:
self.mqConn = amqp.Connection(host="localhost", userid="dev", password="dev", virtual_host="/", insist=False)
self.channel = self.mqConn.channel()
elif not self.mqConn.connected:
self.mqConn = amqp.Connection(host="localhost", userid="dev", password="dev", virtual_host="/", insist=False)
self.channel = self.mqConn.channel()
def sendMQ(self, message):
self.connect()
lMessage = amqp.Message(message)
self.channel.basic_publish(lMessage, exchange="DevMatrixE", routing_key="dev_matrix_q")
And then elsewhere in my code I call sendMQ("this is my message"), and then the code continues. I do not need to listen for acknowledgements etc.
Could someone please write a simple class utilizing pika and SelectConnection that would also work to just send a message using sendMQ("this is my message")? I've looked at the pika examples but I don't know how to get around the ioloop and KeyboardInterrupt. I guess I'm just not sure how to make my code continue to run without all these try/excepts... Also, not exactly sure how I can pass my message on through all the callbacks...
Any help is appreciated!
Thanks.
The whole thing is call back driven, as it is a async way of doing things. Async consumer is easy to understand, we can get the message by providing a call back function. However the publisher part is a bit difficult to understand, at least, for beginner.
Usually we need a Queue to do the communication, and the publisher get data from it periodically.
The key thing of using SelectConnection is to register your publish message function into the event loop, which can be done by connection.add_timeout. After you are done with the publish, register next round of your publish.
The next question is where to put the the initial registration. The initial registration can be done in the channel open call back.
Below is a code-snip for better understanding. Be aware, it is not production ready. Because it only publish message at max speed of 10 per second. You need to adjust the publish interval and publish more message at one call back.
class MQ(Object):
def __init___(self, queue):
self.queue = queue
def on_channel_open(self, chn):
self.channel = chn
self.connection.add_timeout(0.1, self.schedule_next_message)
def schedule_next_message(self):
try:
msg = self.queue.get(True, 0.01)
self.channel.basic_publish('YOUR EXCHANGE','YOUR ROUTING KEY',msg)
except Queue.Empty:
pass
self.connection.add_timeout(0.1, self.schedule_next_message)
def on_open(self, conn):
self.connection = conn
self.connection.channel(on_open_callback=self.on_channel_open)
def run(self):
# create a connection
self.connection = pika.SelectConnection(pika.ConnectionParameters(heartbeat=600,host=args.mq_ip),self.on_open)
try:
self.connection.ioloop.start()
except Exception:
print("exception in publisher")
self.connection.close()
self.connection.ioloop.start()
Put MQ(queue).run() in a separate thread, and whenever you want to put message to mq, just put it into the queue object.
I updated the code from TerrenceSun to work with the latest version of pika (currently v1.3.0) and also added a thread so everything will work in a self contained class:
(note: had to use call_later as Andrew suggested)
# async_messenger.py : simple asynchronous rabbitmq message producer
# based on https://stackoverflow.com/questions/30332320/how-to-do-a-simple-pika-selectconnection-to-send-a-message-in-python
import os
import sys
import time
import traceback
import logging
import json
from optparse import OptionParser
import pika
import queue
import threading
'''
USAGE:
python async_messenger.py --debuglevel=1
cat ./async_messenger.log
'''
logger = logging.getLogger(__name__)
class AsyncMessenger:
def __init__(self, debuglevel=0, queue=queue.Queue()):
self.debuglevel = debuglevel
if self.debuglevel > 0:
print('AsyncMessenger: init debuglevel:',debuglevel)
self.credentials = pika.PlainCredentials('guest','guest')
self.parameters = pika.ConnectionParameters(host='localhost',
port=5672,
virtual_host='/',
credentials=self.credentials,
heartbeat=600)
self.queue = queue
self.exchange = 'YOUR EXCHANGE'
self.routing_key = 'YOUR ROUTING KEY'
self.msgThread = None
# self.start -> (creates thread) -> self.run
def run(self):
print('AsyncMessenger: run')
self.connection = pika.SelectConnection(parameters=self.parameters,
on_open_callback=self.on_open)
try:
print('AsyncMessenger: run: connection.ioloop.start')
self.connection.ioloop.start()
except Exception as e:
print("exception in publisher:",format(e))
# traceback.print_exc(file=sys.stdout)
self.connection.close()
self.connection.ioloop.start()
# run -> on_open
def on_open(self, conn):
print('AsyncMessenger: on_open')
self.connection = conn
self.connection.channel(on_open_callback=self.on_channel_open)
# run -> on_open -> on_channel_open
def on_channel_open(self, chn):
print('AsyncMessenger: on_channel_open')
self.channel = chn
self.connection.ioloop.call_later(0.1, self.schedule_next_message)
# run -> on_open -> on_channel_open -> schedule_next_message
def schedule_next_message(self):
if (self.debuglevel > 1): print('AsyncMessenger: schedule_next_message')
try:
msg = self.queue.get(True, 0.01)
print('AsyncMessenger: queue msg:',msg)
self.channel.basic_publish(self.exchange,self.routing_key,msg)
except queue.Empty:
pass
self.connection.ioloop.call_later(0.1, self.schedule_next_message)
def close(self):
print('AsyncMessenger: close')
self.connection.ioloop.stop()
self.connection.close()
# start our own self contained thread in class
def start(self):
print('AsyncMessenger: start')
# function for worker thread
def message_worker():
self.run()
# Turn-on the worker thread.
self.msgThread = threading.Thread(target=message_worker, daemon=True)
# start the threads
self.msgThread.start()
def main():
parser = OptionParser()
parser.add_option("--debuglevel", action="store", type="int", \
nargs=1, dest="debuglevel", default=0)
(options, args) = parser.parse_args()
debuglevel = options.debuglevel
log_file = './async_messenger.log'
logging.basicConfig(filename=log_file, level=logging.INFO, format= \
'%(name)s : %(asctime)s : Line: %(lineno)d - %(levelname)s :: %(message)s', \
datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
q = queue.Queue()
asyncMessenger = AsyncMessenger(debuglevel, q)
# Send task requests to the worker.
for item in range(10):
print('adding queue item:',item)
# put a str so each item has len
q.put(str(item))
asyncMessenger.start()
# keep checking queue, exit when empty
while (q.qsize() > 0):
time.sleep(1)
asyncMessenger.close()
# blocking wait for the threads to complete
# Note: thread will wait forever unless we use: connection.ioloop.stop()
asyncMessenger.msgThread.join()
print('All work completed')
if __name__ == '__main__':
main()
If all goes well, your output should look like this:
python async_messenger.py --debuglevel=1
AsyncMessenger: init debuglevel: 1
adding queue item: 0
adding queue item: 1
adding queue item: 2
adding queue item: 3
adding queue item: 4
adding queue item: 5
adding queue item: 6
adding queue item: 7
adding queue item: 8
adding queue item: 9
AsyncMessenger: start
AsyncMessenger: run
AsyncMessenger: run: connection.ioloop.start
AsyncMessenger: on_open
AsyncMessenger: on_channel_open
AsyncMessenger: queue msg: 0
AsyncMessenger: queue msg: 1
AsyncMessenger: queue msg: 2
AsyncMessenger: queue msg: 3
AsyncMessenger: queue msg: 4
AsyncMessenger: queue msg: 5
AsyncMessenger: queue msg: 6
AsyncMessenger: queue msg: 7
AsyncMessenger: queue msg: 8
AsyncMessenger: queue msg: 9
AsyncMessenger: close
All work completed
As a first approach I recommend you to start with this pub/sub examples provided at the end of the post. Once you understand this simple example start following the tutorial provided right before the code blocks at the end. The tutorial that has 6 different use cases, with its python examples. With the 5 first steps you will understand the way it works. You should have the clear the concept of exchange (entity that routes the messages to each queue), binding key (key used to connect an exchange and a queue), routing key (key that is sent along with the message from the publisher and that is used by the exchange to route message to one queue or another) and queue (a buffer that can store messages, can have more than 1 (or 1 if wanted) subscriber and that can get messages from more than 1 exchange and based in different binding keys). Besides, there's more than one type of exchange (fanout, topic (this one is probably the one you need)...).
If this all sounds new, please follow the tutorial provided by RabbitMQ:
https://www.rabbitmq.com/tutorials/tutorial-one-python.html
pub.py:
#!/usr/bin/env python
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='hello')
channel.basic_publish(exchange='',
routing_key='hello',
body='Hello World!')
print " [x] Sent 'Hello World!'"
connection.close()
sub.py:
#!/usr/bin/env python
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='hello')
print ' [*] Waiting for messages. To exit press CTRL+C'
def callback(ch, method, properties, body):
print " [x] Received %r" % (body,)
channel.basic_consume(callback,
queue='hello',
no_ack=True)
channel.start_consuming()

Categories