I am struggling to solve my issue, hope anyone from the community can help me here.
Our requirement is locked and can't be changed as the producer publishing the queues is controlled by a different team.
Producer which is written in JAVA declares three queues (TASK, RESPONSE, TASK_RESPONSE) and listens on them with the help of spring framework.
A hashmap is sent to the TASK and TASK_RESPONSE queue from the java AMQP client (Producer).
We need to consume these hashmaps and send the responses as follows.
If the queue TASK is processed, the response needs to be sent on RESPONSE queue incrementally.
If the queue TASK_RESPONSE is processed, the response needs to be sent on TASK_RESPONSE queue incrementally (RPC mode).
Now, we need to consume and publish this in python since we need to do some background processing on the tasks.
I tried to work with celery and dramatiq, but was not able to figure out how it can be done with them, so I tried writing myself (with the help of tutorials available online)
Problem is, I am able to consume the messages but not able to reply_to the RESPONSE queue. Here is my code.
from collections import OrderedDict
from concurrent.futures import ThreadPoolExecutor
import pika
import datetime
import logging
import json
from logging import StreamHandler
from time import sleep
from random import randint
from pika import SelectConnection
from settings import *
logging.basicConfig(handlers=[StreamHandler()], level=logging.INFO, format=logging.BASIC_FORMAT)
_logger = logging.getLogger(__name__)
class QueueConsumer(object):
"""The consumer class to manage connections to the AMQP server/queue"""
def __init__(self, queue, logger, parameters, thread_id=0):
self.channel = None
self.connection = None
self.queue_name_task = queue['task']
self.queue_name_response = queue['response']
self.logger = logger
self.consumer_id = 'Consumer Thread: %d' % (thread_id,)
self.parameters = pika.ConnectionParameters(**parameters)
def consume(self):
try:
self.connection = SelectConnection(parameters=self.parameters, on_open_callback=self._on_connected)
self.connection.ioloop.start()
except Exception as e:
self.logger.error('{} {}'.format(self.consumer_id, str(e)))
self.connection.close()
self.connection.ioloop.start()
def _on_connected(self, connection):
connection.channel(on_open_callback=self._on_channel_open)
def _on_channel_open(self, channel):
self.channel = channel
try:
# Declare Task Queue
self.channel.queue_declare(queue=self.queue_name_task,
exclusive=False,
durable=True,
auto_delete=False,
callback=self._on_queue_declared)
self.logger.info("{} Opened Channel....".format(self.consumer_id))
# Declare Task Response Queue
self.channel.queue_declare(queue=self.queue_name_response,
exclusive=False,
durable=True,
auto_delete=False)
self.logger.info("{} Opened Channel....".format(self.consumer_id))
except Exception as e:
self.logger.error('{} {}'.format(self.consumer_id, str(e)))
def _on_queue_declared(self, frame):
self.logger.debug('{} ... declaring queue'.format(self.consumer_id))
self.channel.basic_qos(prefetch_count=1)
try:
self.channel.basic_consume(queue=self.queue_name_task,
on_message_callback=self.handle_delivery,
auto_ack=True)
self.logger.info("{} Declared queue...".format(self.consumer_id))
except Exception as e:
self.logger.error('{} crashing:--> {}'.format(self.consumer_id, str(e)))
def handle_delivery(self, channel, method, header, body):
try:
start_time = datetime.datetime.now()
_logger.info("Received...")
_logger.info("Content: %s" % body)
req = json.loads(self.decode(body))
# Do something
sleep(randint(10, 20))
time_taken = datetime.datetime.now() - start_time
log_msg = "[{}] Time Taken: {}.{}".format(req['bar']['baz'], time_taken.seconds, time_taken.microseconds)
_logger.info(log_msg)
# Publish the result to another queue.
try:
self.channel.basic_publish(exchange='',
routing_key=self.queue_name_response,
properties=pika.BasicProperties(),
body=log_msg)
_logger.info("Message Published...\t(%s)" % self.queue_name_response)
except Exception as e:
self.logger.error('{} Message publishing failed:--> {}'.format(self.consumer_id, str(e)))
except Exception as err:
_logger.exception(err)
def decode(self, body):
try:
_body = body.decode('utf-8')
except AttributeError:
_body = body
return _body
if __name__ == "__main__":
pika_parameters = OrderedDict([
('host', TF_BROKER_HOST),
('port', TF_BROKER_PORT),
('virtual_host', TF_BROKER_VHOST)
])
queue = {'task': TF_IAAS_TASK_QUEUE, 'response': TF_IAAS_REPLY_QUEUE}
try:
with ThreadPoolExecutor(max_workers=TF_IAAS_THREAD_SIZE, thread_name_prefix=TF_IAAS_THREAD_PREFIX) as executor:
start = 1
for thread_id in range(start, (TF_IAAS_THREAD_SIZE + start)):
executor.submit(QueueConsumer(queue, _logger, pika_parameters, thread_id).consume)
except Exception as err:
_logger.exception(err)
Publish Messages On RabbitMQ
import pika
import json
import random
import datetime
from faker import Faker
from random import randint
fake = Faker('en_US')
if __name__ == '__main__':
try:
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='tf_task', durable=True)
started_at = datetime.datetime.now()
properties = pika.BasicProperties(delivery_mode=2)
for i in range(0, 10000):
body = {
'foo': randint(i, i+100),
'bar': {
'baz': fake.name(),
'poo': float(random.randrange(155+i, 389+i))/100
}
}
channel.basic_publish(exchange='',
routing_key='tf_task',
body=json.dumps(body),
properties=properties)
if i%10000 == 0:
duration = datetime.datetime.now() - started_at
print(i, duration.total_seconds())
print(" [x] Sent 'Hello World!'")
connection.close()
now = datetime.datetime.now()
duration = now - started_at
print(duration.total_seconds())
except Exception as e:
print(e)
Related
I am trying to measure Bluetooth signal strength using winrt APi in python using winsdk. My workflow is to measure Bluetooth Signal strength of a device that is already connected with my Windows machine. I followed the guideline from Pywinrt documentation found here:
Here is my code snippet:
import asyncio
import winsdk.windows.devices.enumeration as e
import winsdk.windows.devices.bluetooth as bl
async def scan():
sig_strength = "System.Devices.Aep.SignalStrength"
additionalProperties = [sig_strength]
watcher = e.DeviceInformation.create_watcher(bl.BluetoothDevice.get_device_selector(), additionalProperties)
received_queue = asyncio.Queue()
def added_w(device_watcher, device_info_update):
if(device_info_update.name == "my_device"):
print("found!")
for value, key in enumerate(device_info_update.properties):
if key == "System.Devices.Aep.SignalStrength":
print("signal strength: {}".format(value) )
def updated_w(device_watcher, device_info_update):
print("update for {} with kind {}".format(device_info_update.id, device_info_update.kind))
def removed_w(device_watcher, device_info_update):
pass
def stopped_w(device_watcher, device_info_update):
pass
received_token = watcher.add_added(
lambda s, e: event_loop.call_soon_threadsafe(added_w, s, e)
)
updated_token = watcher.add_updated(
lambda s, e: event_loop.call_soon_threadsafe(updated_w, s, e)
)
removed_token = watcher.add_removed(
lambda s, e: event_loop.call_soon_threadsafe(removed_w, s, e)
)
event_loop = asyncio.get_running_loop()
stopped_future = event_loop.create_future()
def handle_stopped(sender, event_args):
stopped_future.set_result(event_args)
try:
print("scanning...")
watcher.start()
# this is the consumer for the received event queue
async def print_received():
while True:
event_args = await received_queue.get()
print(
"received:",
event_args.bluetooth_address.to_bytes(6, "big").hex(":"),
event_args.raw_signal_strength_in_d_bm, "dBm",
)
printer_task = asyncio.create_task(print_received())
# since the print task is an infinite loop, we have to cancel it when we don't need it anymore
stopped_future.add_done_callback(printer_task.cancel)
# scan for 30 seconds or until an unexpected stopped event (due to error)
done, pending = await asyncio.wait(
[stopped_future, printer_task], timeout=30, return_when=asyncio.FIRST_COMPLETED
)
if stopped_future in done:
print("unexpected stopped event", stopped_future.result().error)
else:
print("stopping...")
watcher.stop()
await stopped_future
finally:
# event handler are removed in a finally block to ensure we don't leak
watcher.remove_received(received_token)
watcher.remove_stopped(handle_stopped)
asyncio.run(scan())
However, I only get a fixed RSSI value 8 in my print in added_w function.
Any help on potential solution would be greatly appreciated!
I have the following code:
from confluent_kafka.admin import AdminClient, NewTopic
a = AdminClient({'bootstrap.servers': 'localhost:9092'})
new_topics = [NewTopic(topic, num_partitions=3, replication_factor=1) for topic in ["topic1", "topic2"]]
fs = a.create_topics(new_topics)
for topic, f in fs.items():
try:
f.result()
print("Topic {} created".format(topic))
except Exception as e:
print("Failed to create topic {}: {}".format(topic, e))
Creating the topics worked fine.
This is my producer:
from confluent_kafka import Producer
p = Producer({'bootstrap.servers': 'localhost:9092'})
some_data_source = ["hello", "wuff"]
def delivery_report(err, msg):
if err is not None:
print('Message delivery failed: {}'.format(err))
else:
print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
for data in some_data_source:
p.poll(0)
p.produce('mytopic', data.encode('utf-8'), callback=delivery_report)
p.flush()
Message delivered to mytopic [0]
Message delivered to mytopic [0]
Consumer:
from confluent_kafka import Consumer
c = Consumer({
'bootstrap.servers': 'localhost:9092',
'group.id': 'mygroup',
'auto.offset.reset': 'earliest'
})
c.subscribe(['topic1'])
while True:
msg = c.poll(1.0)
print(msg)
if msg is None:
continue
if msg.error():
print("Consumer error: {}".format(msg.error()))
continue
print('Received message: {}'.format(msg.value().decode('utf-8')))
c.close()
When I subscribe to the topic (which works), I only get None every second. Am I doing something wrong here? Does it has to do something with 'group.id': 'mygroup'? Can anyone help me?
Your producer code is writing to mytopic topic. Which doesn't match your create script or what your consumer has subscribed to.
Also, if you don't want it to print None, then move the print statement inside the if statement since poll function can return None
As commented, you may also want to try further debugging with CLI tools
I try to connect two M5StickC to PC via BLE to upload their sensor data.
I wrote a data acquisition python script using bleak library.
The data acquisition rate is very slow when I connect 2 devices.
How to improve data acquisition rate on my script ?
I hope to get 20 data per second.
import asyncio
from bleak import BleakClient
address1 = "D8:A0:1D:55:EE:8A"
UUID1 = "beb5483e-36e1-4688-b7f5-ea07361b26a8"
address2 = "94:B9:7E:93:21:76"
UUID2 = "beb5483e-36e1-4688-b7f5-ea07361b26a2"
async def main():
client1 = BleakClient(address1)
client2 = BleakClient(address2)
print(client1.address)
print(client2.address)
await client1.connect()
# await client2.connect()
while(True): # very slow when comment out client2
print(await client1.read_gatt_char(UUID1))
# print(await client2.read_gatt_char(UUID2))
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
edit:
Thank you for your comments.
I have updated my script following ukBaz's link.
Data upload rate is much improved, but I could get almost only single M5StickC data, the data from another one available sparsely...
I will ask this question to bleak maintainer.
from bleak import BleakClient
import asyncio
address1 = "D8:A0:1D:55:EE:8A"
UUID1 = "beb5483e-36e1-4688-b7f5-ea07361b26a8"
address2 = "94:B9:7E:93:21:76"
UUID2 = "beb5483e-36e1-4688-b7f5-ea07361b26a2"
def callback(sender, data):
print(sender, data)
def run(addresses, UUIDs):
loop = asyncio.get_event_loop()
tasks = asyncio.gather(*(connect_to_device(address, UUID) for address, UUID in zip(addresses, UUIDs)))
loop.run_until_complete(tasks)
async def connect_to_device(address, UUID):
print("starting", address, "loop")
async with BleakClient(address, timeout=5.0) as client:
print("connect to", address)
while(True):
try:
print(await client.read_gatt_char(UUID))
except Exception as e:
print(e)
if __name__ == "__main__":
run([address1, address2], [UUID1, UUID2])
I've written some api to communicate with a website using websocketapp. It works fine only on 2 pc. If i put my code on every other pc the websocket doesnt receive any message and closes. I've tried a lot of different machines and operating systems, many version of python (included the same that works), wireless and wired connection but nothing changed. There's no error or exception. What can it be?
EDIT: i don't own the website or the server. All other methods send messages and parse the response in on_socket_message
import requests
import websocket
import time
from threading import Thread
from datetime import datetime
import json
from position import Position
from constants import ACTIVES
class IQOption():
practice_balance = 0
real_balance = 0
server_time = 0
positions = {}
instruments_categories = ["cfd","forex","crypto"]
top_assets_categories = ["forex","crypto","fx-option"]
instruments_to_id = ACTIVES
id_to_instruments = {y:x for x,y in ACTIVES.items()}
market_data = {}
binary_expiration_list = {}
open_markets = {}
digital_strike_list = {}
candle_data = []
latest_candle = 0
position_id = 0
quotes =[]
position_id_list=[]
def __init__(self,username,password,host="iqoption.com"):
self.username = username
self.password = password
self.host = host
self.session = requests.Session()
self.generate_urls()
self.socket = websocket.WebSocketApp(self.socket_url,on_open=self.on_socket_connect,on_message=self.on_socket_message,on_close=self.on_socket_close,on_error=self.on_socket_error)
def generate_urls(self):
"""Generates Required Urls to operate the API"""
#https://auth.iqoption.com/api/v1.0/login
self.api_url = "https://{}/api/".format(self.host)
self.socket_url = "wss://{}/echo/websocket".format(self.host)
self.login_url = self.api_url+"v1.0/login"
self.profile_url = self.api_url+"profile"
self.change_account_url = self.profile_url+"/"+"changebalance"
self.getprofile_url = self.api_url+"getprofile"
def login(self):
"""Login and set Session Cookies"""
print("LOGIN")
data = {"email":self.username,"password":self.password}
self.log_resp = self.session.request(url="https://auth.iqoption.com/api/v1.0/login",data=data,method="POST")
requests.utils.add_dict_to_cookiejar(self.session.cookies, dict(platform="9"))
self.__ssid = self.log_resp.cookies.get("ssid")
print(self.__ssid)
self.start_socket_connection()
time.sleep(1) ## artificial delay to complete socket connection
self.log_resp2 = self.session.request(url="https://eu.iqoption.com/api/getprofile",method="GET")
ss = self.log_resp2._content.decode('utf-8')
js_ss=json.loads(ss)
self.parse_account_info(js_ss)
self.balance_id = js_ss["result"]["balance_id"]
self.get_instruments()
self.get_top_assets()
self.setOptions()
#self.getFeatures()
time.sleep(1)
print(js_ss["isSuccessful"])
return js_ss["isSuccessful"]
def on_socket_message(self,socket,message):
#do things
def on_socket_connect(self,socket):
"""Called on Socket Connection"""
self.initial_subscriptions()
print("On connect")
def initial_subscriptions(self):
self.send_socket_message("ssid",self.__ssid)
self.send_socket_message("subscribe","tradersPulse")
def on_socket_error(self,socket,error):
"""Called on Socket Error"""
print(message)
def on_socket_close(self,socket):
"""Called on Socket Close, does nothing"""
def start_socket_connection(self):
"""Start Socket Connection"""
self.socket_thread = Thread(target=self.socket.run_forever)
self.socket_thread.start()
def send_socket_message(self,name,msg):
#print(msg)
data = {"name":name,"msg":msg}
self.socket.send(json.dumps(data))
Here is an example running under Gevent Websockets. This makes it ASYNC (which I suspect is part of your problem) and allows for bidirectional communication.
import gevent
from gevent import monkey, signal, Timeout, sleep, spawn as gspawn
monkey.patch_all()
from gevent.pywsgi import WSGIServer
from geventwebsocket.handler import WebSocketHandler
from geventwebsocket import WebSocketError
import bottle
from bottle import get, route, template, request, response, abort, static_file
import ujson as json
#route('/static/<filepath:path>')
def server_static(filepath):
return static_file(filepath, root='static')
#route('/ws/remote')
def handle_websocket():
wsock = request.environ.get('wsgi.websocket')
if not wsock:
abort(400, 'Expected WebSocket request.')
while 1:
try:
message = ''
with Timeout(2, False) as timeout:
message = wsock.receive()
if message:
message = json.loads(message)
if 'command' in message:
r.command(message['command'])
except WebSocketError:
break
except Exception as exc:
print(str(exc))
#get('/')
def remote():
return template('templates/remote.tpl', title='WebsocketTest', websocket=WEBSOCKET, command='command', status=status)
if __name__ == '__main__':
r=None
status="Connecting..."
gspawn(initialize)
print 'Started...'
HOST = socket.gethostbyname(socket.gethostname())
HOST = 'localhost'
WEBSOCKET = 'ws://{}/ws/remote'.format(HOST)
botapp = bottle.app()
server = WSGIServer(("0.0.0.0", 80), botapp, handler_class=WebSocketHandler)
def shutdown():
print('Shutting down ...')
server.stop(timeout=60)
exit(signal.SIGTERM)
gevent.signal(signal.SIGTERM, shutdown)
gevent.signal(signal.SIGINT, shutdown) #CTRL C
server.serve_forever()
Then in your HTML you really should use reconnecting websocket library
https://github.com/joewalnes/reconnecting-websocket
<button id="TRIGGERED" type="button" class="btn btn-outline-primary">TRIGGER</button>
<script type="text/javascript" src="/static/reconnecting-websocket.min.js"></script>
<script>
var ws = new ReconnectingWebSocket('{{websocket}}');
ws.reconnectInterval = 3000;
ws.maxReconnectAttempts = 10;
ws.onmessage = function (evt) {
var wsmsg = JSON.parse(evt.data);
console.log(evt.data)
};
$("button").click(function() {
<!--console.log(this.id);-->
ws.send(JSON.stringify({'{{command}}': this.id}));
});
</script>
I am using azure event hub python SDK to send to and receive messages from event hub following this link.https://github.com/Azure/azure-event-hubs-python/tree/develop. I can successfully send and receive messages. But how do i parse the messages and retrieve the data from the event data object. Please find the code below.
import os
import sys
#import logging
from azure.eventhub import EventHubClient, Receiver, Offset
ADDRESS = 'sb://####.servicebus.windows.net/#####'
USER = '##########'
KEY = '##################################'
CONSUMER_GROUP = "$default"
OFFSET = Offset("-1")
PARTITION = "1"
total = 0
last_sn = -1
last_offset = "-1"
try:
if not ADDRESS:
raise ValueError("No EventHubs URL supplied.")
client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY)
receiver = client.add_receiver(CONSUMER_GROUP, PARTITION, prefetch=5000,
offset=OFFSET)
client.run()
try:
batched_events = receiver.receive(timeout=20)
except:
raise
finally:
client.stop()
for event_data in batched_events:
last_offset = event_data.offset.value
last_sn = event_data.sequence_number
total += 1
print("Partition {}, Received {}, sn={} offset={}".format(
PARTITION,
total,
last_sn,
last_offset))
except KeyboardInterrupt:
pass
if i try to view the event_data received i can see the below message.
event_data
<azure.eventhub.common.EventData at 0xd4f1358>
event_data.message
<uamqp.message.Message at 0xd4f1240>
Any help on the above on how to parse this message to extract the data
As of 1.1.0, there are new utility methods to extract the actual data of the message:
body_as_str
body_as_json
So, what used to be
import json
event_obj = json.loads(next(event_data.body).decode('UTF-8'))
Is now:
event_obj = event_data.body_as_json()
For people using the Event Hub version 5.2.0 -- latest as of today (GitHub, Reference Docs), it's the same as the 1.1.0 version, i.e. use body_as_str() or body_as_json(). But the client has changed -- there's an EventHubProducerClient and an EventHubConsumerClient in the new version. To print the body of an event received:
from azure.eventhub import EventHubConsumerClient
connection_str = '<< CONNECTION STRING FOR THE EVENT HUBS NAMESPACE >>'
consumer_group = '<< CONSUMER GROUP >>'
eventhub_name = '<< NAME OF THE EVENT HUB >>'
client = EventHubConsumerClient.from_connection_string(
connection_str, consumer_group, eventhub_name=eventhub_name
)
def on_event_batch(partition_context, events):
partition_context.update_checkpoint()
for e in events:
print(e.body_as_str())
with client:
client.receive_batch(
on_event_batch=on_event_batch,
starting_position="-1", # "-1" is from the beginning of the partition.
)