Winrt API Python getting Bluetooth signal strength - python

I am trying to measure Bluetooth signal strength using winrt APi in python using winsdk. My workflow is to measure Bluetooth Signal strength of a device that is already connected with my Windows machine. I followed the guideline from Pywinrt documentation found here:
Here is my code snippet:
import asyncio
import winsdk.windows.devices.enumeration as e
import winsdk.windows.devices.bluetooth as bl
async def scan():
sig_strength = "System.Devices.Aep.SignalStrength"
additionalProperties = [sig_strength]
watcher = e.DeviceInformation.create_watcher(bl.BluetoothDevice.get_device_selector(), additionalProperties)
received_queue = asyncio.Queue()
def added_w(device_watcher, device_info_update):
if(device_info_update.name == "my_device"):
print("found!")
for value, key in enumerate(device_info_update.properties):
if key == "System.Devices.Aep.SignalStrength":
print("signal strength: {}".format(value) )
def updated_w(device_watcher, device_info_update):
print("update for {} with kind {}".format(device_info_update.id, device_info_update.kind))
def removed_w(device_watcher, device_info_update):
pass
def stopped_w(device_watcher, device_info_update):
pass
received_token = watcher.add_added(
lambda s, e: event_loop.call_soon_threadsafe(added_w, s, e)
)
updated_token = watcher.add_updated(
lambda s, e: event_loop.call_soon_threadsafe(updated_w, s, e)
)
removed_token = watcher.add_removed(
lambda s, e: event_loop.call_soon_threadsafe(removed_w, s, e)
)
event_loop = asyncio.get_running_loop()
stopped_future = event_loop.create_future()
def handle_stopped(sender, event_args):
stopped_future.set_result(event_args)
try:
print("scanning...")
watcher.start()
# this is the consumer for the received event queue
async def print_received():
while True:
event_args = await received_queue.get()
print(
"received:",
event_args.bluetooth_address.to_bytes(6, "big").hex(":"),
event_args.raw_signal_strength_in_d_bm, "dBm",
)
printer_task = asyncio.create_task(print_received())
# since the print task is an infinite loop, we have to cancel it when we don't need it anymore
stopped_future.add_done_callback(printer_task.cancel)
# scan for 30 seconds or until an unexpected stopped event (due to error)
done, pending = await asyncio.wait(
[stopped_future, printer_task], timeout=30, return_when=asyncio.FIRST_COMPLETED
)
if stopped_future in done:
print("unexpected stopped event", stopped_future.result().error)
else:
print("stopping...")
watcher.stop()
await stopped_future
finally:
# event handler are removed in a finally block to ensure we don't leak
watcher.remove_received(received_token)
watcher.remove_stopped(handle_stopped)
asyncio.run(scan())
However, I only get a fixed RSSI value 8 in my print in added_w function.
Any help on potential solution would be greatly appreciated!

Related

CanĀ“t consume messages from topic

I have the following code:
from confluent_kafka.admin import AdminClient, NewTopic
a = AdminClient({'bootstrap.servers': 'localhost:9092'})
new_topics = [NewTopic(topic, num_partitions=3, replication_factor=1) for topic in ["topic1", "topic2"]]
fs = a.create_topics(new_topics)
for topic, f in fs.items():
try:
f.result()
print("Topic {} created".format(topic))
except Exception as e:
print("Failed to create topic {}: {}".format(topic, e))
Creating the topics worked fine.
This is my producer:
from confluent_kafka import Producer
p = Producer({'bootstrap.servers': 'localhost:9092'})
some_data_source = ["hello", "wuff"]
def delivery_report(err, msg):
if err is not None:
print('Message delivery failed: {}'.format(err))
else:
print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
for data in some_data_source:
p.poll(0)
p.produce('mytopic', data.encode('utf-8'), callback=delivery_report)
p.flush()
Message delivered to mytopic [0]
Message delivered to mytopic [0]
Consumer:
from confluent_kafka import Consumer
c = Consumer({
'bootstrap.servers': 'localhost:9092',
'group.id': 'mygroup',
'auto.offset.reset': 'earliest'
})
c.subscribe(['topic1'])
while True:
msg = c.poll(1.0)
print(msg)
if msg is None:
continue
if msg.error():
print("Consumer error: {}".format(msg.error()))
continue
print('Received message: {}'.format(msg.value().decode('utf-8')))
c.close()
When I subscribe to the topic (which works), I only get None every second. Am I doing something wrong here? Does it has to do something with 'group.id': 'mygroup'? Can anyone help me?
Your producer code is writing to mytopic topic. Which doesn't match your create script or what your consumer has subscribed to.
Also, if you don't want it to print None, then move the print statement inside the if statement since poll function can return None
As commented, you may also want to try further debugging with CLI tools

Socket error (An operation was attempted on something that is not a socket) on aiohttp function

async def simultaneous_chunked_download(urls_paths, label):
timeout = ClientTimeout(total=60000)
sem = asyncio.Semaphore(5)
async with aiohttp.ClientSession(timeout=timeout, connector=aiohttp.TCPConnector(verify_ssl=False)) as cs:
async def _fetch(r, path):
async with sem:
async with aiofiles.open(path, "wb") as f:
async for chunk in r.content.iter_any():
if not chunk:
break
size = await f.write(chunk)
if not indeterminate:
bar._done += size
bar.show(bar._done)
if indeterminate:
bar._done += 1
bar.show(bar._done)
indeterminate = False
total_length = 0
tasks = []
for url, path in urls_paths.items():
r = await cs.get(url)
if not indeterminate:
try:
total_length += r.content_length
except Exception:
indeterminate = True
tasks.append(_fetch(r, path))
verbose_print(f"url: {url},\npath: {path}\n\n")
if not indeterminate:
bar = progress.Bar(
expected_size=total_length, label=label, width=28, hide=False
)
else:
bar = progress.Bar(
expected_size=len(tasks), label=label, width=28, hide=False
)
logger._pause_file_output = True
bar.show(0)
bar._done = 0
await asyncio.gather(*tasks)
logger._pause_file_output = False
bar.done()
The function I have above is for downloading a dictionary of urls asynchronously and then printing out a progress bar. An example of its usage:
The code itself runs perfectly fine, however i keep getting these errors:
Whilst benign, they are an eyesore and could point towards my lack of knowledge on both http and asynchronous code, so i would rather try and get it fixed. However im at a loss on where or what is causing it, especially as i like i said the code runs perfectly fine regardless.
If you would like a more practical hands on attempt at recreating this the full code is on my github repo on the dev branch: https://github.com/ohitstom/spicetify-easyinstall/tree/dev
Most of the program can be disregarding if you are testing this out, just press the install button and the problematic code will show itself towards the end.
Bare in mind this is a spotify themer so if you have spotify/spicetify installed you will want to use a vm.
FIXED!:
# Create App
globals.app = QtWidgets.QApplication(sys.argv)
globals.app.setStyleSheet(gui.QSS)
# Configure asyncio loop to work with PyQt5
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
loop = QEventLoop(globals.app)
asyncio.set_event_loop(loop)
# Setup GUI
globals.gui = gui.MainWindow()
globals.gui.show()
# Set off loop
with loop:
sys.exit(loop.run_until_complete(globals.gui.exit_request.wait()))
class MainWindow(QuickWidget):
def __init__(self):
super().__init__(
name="main_window",
...etc
)
self.exit_request = asyncio.Event()
......etc
def closeEvent(self, *args):
self.exit_request.set()
Asyncio and aiohttp have some problems when running a lot of tasks concurrently on Windows, I've been having a lot of problems with it lately.
There are some workarounds available, the ones I use most are:
# set this before your event loop initialization or main function
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
Or:
loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(loop)
loop.run_until_complete(your_main())

Trying to consume messages with python multiprocessing

I am not able to consume messages with the below code. I am able to consume if I just directly consOne.startLoop(). What am I missing here. Appreciate the help.
from confluent_kafka import Consumer, KafkaError, KafkaException, TopicPartition
from multiprocessing import Process
import sys
idlist = []
def setConfig(bootstrapServers, groupId, autoOffsetReset):
consumerConf = {}
consumerConf['bootstrap.servers'] = bootstrapServers
consumerConf['group.id'] = groupId
consumerConf['auto.offset.reset'] = autoOffsetReset
print(consumerConf)
return consumerConf
def createConsumer(consumerConf, topic):
consumer = Consumer(consumerConf)
consumer.subscribe([topic])
print("consumer subscribed to topic {}".format(topic))
return consumer
# self.consumer.assign([TopicPartition(topic, partition)])
def startLoop(consumer):
try:
while True:
message = consumer.poll(1.0)
if message is None:
print("none")
continue
elif message.error():
if message.error().code == KafkaError._PARTITION_EOF:
sys.stderr.write('EOF Partition - {} '.format(message.partition()))
else:
sys.stderr.write('Consumer Error on Topic - {} '.format(message.topic()))
sys.stderr.write('''-- topic - {}
-- partition - {}
-- offset - {}'''.format(
message.topic(), message.partition(), message.offset()))
else:
print('Received message: {}'.format(message.value().decode('utf-8')))
handleMessage(message.value())
except KeyboardInterrupt:
sys.stderr.write('Kafka Exception raised - {} '.format(message.topic()))
sys.exit(1)
finally:
consumer.close()
# body of the message or (message.vlue())
def handleMessage(body):
global idlist
idlist.append(body)
print(idlist)
if __name__ === '__main__':
config = setConfig('localhost:9092', groupId='group',
autoOffsetReset='smallest')
consOne = createConsumer(config, 'test')
# consOne.startLoop() Works!
processOne = Process(target=startLoop, args=(consOne, ), group=None)
# doesn't work :(
processOne.start()
processOne.join()
consumer = Consumer({'bootstrap.servers':'localhost:9092', 'group.id':'group', 'auto.offset.reset':'smallest'})
consumer.subscribe(['test'])
def startLoop():
try:
global consumer
print(consumer)
while True:
message = consumer.poll(1.0)
if message is None:
print("none")
continue
elif message.error():
if message.error().code == KafkaError._PARTITION_EOF:
sys.stderr.write('EOF Partition - {} '.format(message.partition()))
else:
sys.stderr.write('Consumer Error on Topic - {} '.format(message.topic()))
sys.stderr.write('''-- topic - {}
-- partition - {}
-- offset - {}'''.format(
message.topic(), message.partition(), message.offset()))
else:
print('Received message: {}'.format(message.value().decode('utf-8')))
# handleMessage(message.value())
except KeyboardInterrupt:
sys.stderr.write('Kafka Exception raised - {} '.format(message.topic()))
sys.exit(1)
finally:
consumer.close()
if __name__ == '__main__':
processOne = Process(target=startLoop, group=None)
# still consumes message with startLoop() but not with processOne.start()
# startLoop()
processOne.start()
processOne.join()
Probably you use multiprocessing in the wrong way. An example of the official document.
Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process). Safe importing of main module | Programming guidelines
So, It is necessary to start a process in if __name__ == '__main__':.

Consuming and replying to separate queues | Pika implementation

I am struggling to solve my issue, hope anyone from the community can help me here.
Our requirement is locked and can't be changed as the producer publishing the queues is controlled by a different team.
Producer which is written in JAVA declares three queues (TASK, RESPONSE, TASK_RESPONSE) and listens on them with the help of spring framework.
A hashmap is sent to the TASK and TASK_RESPONSE queue from the java AMQP client (Producer).
We need to consume these hashmaps and send the responses as follows.
If the queue TASK is processed, the response needs to be sent on RESPONSE queue incrementally.
If the queue TASK_RESPONSE is processed, the response needs to be sent on TASK_RESPONSE queue incrementally (RPC mode).
Now, we need to consume and publish this in python since we need to do some background processing on the tasks.
I tried to work with celery and dramatiq, but was not able to figure out how it can be done with them, so I tried writing myself (with the help of tutorials available online)
Problem is, I am able to consume the messages but not able to reply_to the RESPONSE queue. Here is my code.
from collections import OrderedDict
from concurrent.futures import ThreadPoolExecutor
import pika
import datetime
import logging
import json
from logging import StreamHandler
from time import sleep
from random import randint
from pika import SelectConnection
from settings import *
logging.basicConfig(handlers=[StreamHandler()], level=logging.INFO, format=logging.BASIC_FORMAT)
_logger = logging.getLogger(__name__)
class QueueConsumer(object):
"""The consumer class to manage connections to the AMQP server/queue"""
def __init__(self, queue, logger, parameters, thread_id=0):
self.channel = None
self.connection = None
self.queue_name_task = queue['task']
self.queue_name_response = queue['response']
self.logger = logger
self.consumer_id = 'Consumer Thread: %d' % (thread_id,)
self.parameters = pika.ConnectionParameters(**parameters)
def consume(self):
try:
self.connection = SelectConnection(parameters=self.parameters, on_open_callback=self._on_connected)
self.connection.ioloop.start()
except Exception as e:
self.logger.error('{} {}'.format(self.consumer_id, str(e)))
self.connection.close()
self.connection.ioloop.start()
def _on_connected(self, connection):
connection.channel(on_open_callback=self._on_channel_open)
def _on_channel_open(self, channel):
self.channel = channel
try:
# Declare Task Queue
self.channel.queue_declare(queue=self.queue_name_task,
exclusive=False,
durable=True,
auto_delete=False,
callback=self._on_queue_declared)
self.logger.info("{} Opened Channel....".format(self.consumer_id))
# Declare Task Response Queue
self.channel.queue_declare(queue=self.queue_name_response,
exclusive=False,
durable=True,
auto_delete=False)
self.logger.info("{} Opened Channel....".format(self.consumer_id))
except Exception as e:
self.logger.error('{} {}'.format(self.consumer_id, str(e)))
def _on_queue_declared(self, frame):
self.logger.debug('{} ... declaring queue'.format(self.consumer_id))
self.channel.basic_qos(prefetch_count=1)
try:
self.channel.basic_consume(queue=self.queue_name_task,
on_message_callback=self.handle_delivery,
auto_ack=True)
self.logger.info("{} Declared queue...".format(self.consumer_id))
except Exception as e:
self.logger.error('{} crashing:--> {}'.format(self.consumer_id, str(e)))
def handle_delivery(self, channel, method, header, body):
try:
start_time = datetime.datetime.now()
_logger.info("Received...")
_logger.info("Content: %s" % body)
req = json.loads(self.decode(body))
# Do something
sleep(randint(10, 20))
time_taken = datetime.datetime.now() - start_time
log_msg = "[{}] Time Taken: {}.{}".format(req['bar']['baz'], time_taken.seconds, time_taken.microseconds)
_logger.info(log_msg)
# Publish the result to another queue.
try:
self.channel.basic_publish(exchange='',
routing_key=self.queue_name_response,
properties=pika.BasicProperties(),
body=log_msg)
_logger.info("Message Published...\t(%s)" % self.queue_name_response)
except Exception as e:
self.logger.error('{} Message publishing failed:--> {}'.format(self.consumer_id, str(e)))
except Exception as err:
_logger.exception(err)
def decode(self, body):
try:
_body = body.decode('utf-8')
except AttributeError:
_body = body
return _body
if __name__ == "__main__":
pika_parameters = OrderedDict([
('host', TF_BROKER_HOST),
('port', TF_BROKER_PORT),
('virtual_host', TF_BROKER_VHOST)
])
queue = {'task': TF_IAAS_TASK_QUEUE, 'response': TF_IAAS_REPLY_QUEUE}
try:
with ThreadPoolExecutor(max_workers=TF_IAAS_THREAD_SIZE, thread_name_prefix=TF_IAAS_THREAD_PREFIX) as executor:
start = 1
for thread_id in range(start, (TF_IAAS_THREAD_SIZE + start)):
executor.submit(QueueConsumer(queue, _logger, pika_parameters, thread_id).consume)
except Exception as err:
_logger.exception(err)
Publish Messages On RabbitMQ
import pika
import json
import random
import datetime
from faker import Faker
from random import randint
fake = Faker('en_US')
if __name__ == '__main__':
try:
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='tf_task', durable=True)
started_at = datetime.datetime.now()
properties = pika.BasicProperties(delivery_mode=2)
for i in range(0, 10000):
body = {
'foo': randint(i, i+100),
'bar': {
'baz': fake.name(),
'poo': float(random.randrange(155+i, 389+i))/100
}
}
channel.basic_publish(exchange='',
routing_key='tf_task',
body=json.dumps(body),
properties=properties)
if i%10000 == 0:
duration = datetime.datetime.now() - started_at
print(i, duration.total_seconds())
print(" [x] Sent 'Hello World!'")
connection.close()
now = datetime.datetime.now()
duration = now - started_at
print(duration.total_seconds())
except Exception as e:
print(e)

Why isn't trollius making this loop run asynchronously?

I am trying to write some simple loops to control objects in Pygazebo, but alas it only ever calls the method once and then the loops appears to block.
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 2 12:52:50 2015
#author: skylion
"""
import trollius #NOTE: Trollius requires protobuffer from Google
from trollius import From
import pygazebo
import pygazebo.msg.joint_cmd_pb2
import time
def apply_joint_force(world_name, robot_name, joint_name, force, duration=-1):
#trollius.coroutine
def joint_force_loop():
manager = yield From(pygazebo.connect())
print("connected")
publisher = yield From(
manager.advertise('/gazebo/' + world_name + '/' + robot_name + '/joint_cmd',
'gazebo.msgs.JointCmd'))
message = pygazebo.msg.joint_cmd_pb2.JointCmd()
message.name = robot_name + '::' + joint_name #format should be: name_of_robot + '::name_of_joint'
message.force = force
#t_end = time.time() + duration # The time that you want the controller to stop
while True: #time.time() < t_end or duration == -1:
try:
yield From(publisher.publish(message))
yield From(trollius.sleep(1.0))
except:
pass
#Nothing
print("Connection closed")
wait_net_service('localhost',11345)
loop = trollius.new_event_loop()
loop.run_until_complete(joint_force_loop())
raise
def wait_net_service(server, port, timeout=None):
""" Wait for network service to appear
#param timeout: in seconds, if None or 0 wait forever
#return: True of False, if timeout is None may return only True or
throw unhandled network exception
"""
import socket
import errno
s = socket.socket()
if timeout:
from time import time as now
# time module is needed to calc timeout shared between two exceptions
end = now() + timeout
while True:
try:
if timeout:
next_timeout = end - now()
if next_timeout < 0:
return False
else:
s.settimeout(next_timeout)
s.connect((server, port))
time.sleep(1)
except socket.timeout, err:
# this exception occurs only if timeout is set
if timeout:
return False
except socket.error, err:
# catch timeout exception from underlying network library
# this one is different from socket.timeout
if type(err.args) != tuple or (err[0] != errno.ETIMEDOUT and err[0] != errno.ECONNREFUSED):
raise err
else:
s.close()
return True
I thought #coroutines were suppose to be wrapped asynchronously? Do I just misunderstand the use this code? Or am I doing something else wrong? This is my first time with concurrency in Python btw.
Also this is how I am calling that function:
counter = 0
for joint_def in self.all_joint_props:
print("each joint_def")
apply_joint_force(world_name, robot_name, "hingejoint" + str(counter), joint_def[2])
#print("Appliing joint force")
Any idea why it keep blocking the thread? Should I be using a different method to this? Any help would be appreciated
So, the answer is quite simple really. You have to queue up the multiple Trollius.Tasks you want to run as a list before starting the object and combine that with Trollius.wait() to achieve this. To ensure the thread is non-blocking you then use the following method
Here is my code so far:
tasks = []
for joint_name in joint_names:
tasks.append(trollius.Task(joint_force_loop(world_name, robot_name, joint_name, force, duration))
loop = trollius.get_event_loop()
loop.run_until_complete(trollius.wait(tasks))

Categories