I try to connect two M5StickC to PC via BLE to upload their sensor data.
I wrote a data acquisition python script using bleak library.
The data acquisition rate is very slow when I connect 2 devices.
How to improve data acquisition rate on my script ?
I hope to get 20 data per second.
import asyncio
from bleak import BleakClient
address1 = "D8:A0:1D:55:EE:8A"
UUID1 = "beb5483e-36e1-4688-b7f5-ea07361b26a8"
address2 = "94:B9:7E:93:21:76"
UUID2 = "beb5483e-36e1-4688-b7f5-ea07361b26a2"
async def main():
client1 = BleakClient(address1)
client2 = BleakClient(address2)
print(client1.address)
print(client2.address)
await client1.connect()
# await client2.connect()
while(True): # very slow when comment out client2
print(await client1.read_gatt_char(UUID1))
# print(await client2.read_gatt_char(UUID2))
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
edit:
Thank you for your comments.
I have updated my script following ukBaz's link.
Data upload rate is much improved, but I could get almost only single M5StickC data, the data from another one available sparsely...
I will ask this question to bleak maintainer.
from bleak import BleakClient
import asyncio
address1 = "D8:A0:1D:55:EE:8A"
UUID1 = "beb5483e-36e1-4688-b7f5-ea07361b26a8"
address2 = "94:B9:7E:93:21:76"
UUID2 = "beb5483e-36e1-4688-b7f5-ea07361b26a2"
def callback(sender, data):
print(sender, data)
def run(addresses, UUIDs):
loop = asyncio.get_event_loop()
tasks = asyncio.gather(*(connect_to_device(address, UUID) for address, UUID in zip(addresses, UUIDs)))
loop.run_until_complete(tasks)
async def connect_to_device(address, UUID):
print("starting", address, "loop")
async with BleakClient(address, timeout=5.0) as client:
print("connect to", address)
while(True):
try:
print(await client.read_gatt_char(UUID))
except Exception as e:
print(e)
if __name__ == "__main__":
run([address1, address2], [UUID1, UUID2])
Related
I am using motor but pymongo was my initial choice, switched to motor because it is an async version of mongodb in python.
My aim here is to query the mongodb with large number of calls at the same time with minimal waiting time.
There's about 1000 symbols and for each symbol I have to query its latest candlestick data from mongodb from time to time in order to perform certain calculation. I need to query the latest 5K documents for each symbol. So the collection contains roughly 1000 * 5000 = 5,000,000 documents.
With Motor and asyncio, I use the following method to fetch documents asynchronously, but it takes really long time to run the code and I can't seem to know why. I am using 8 core cpu on a virtual machine.
Any help with this problem?
async def getCandleList(symbol): # each symbol contains about 5K latest candles in the collection
final_str = "{'symbol': '%s'}"%(symbol)
resultType = 'candlestick_archive'
dbName = 'candle_db'
cursor = eval("db.{}.find({}).sort('timeStamp',-1)".format(dbName, final_str))
finalList = await cursor.to_list(length=None)
return finalList
async def taskForEachSymbol(symbol):
while True:
candleList = await getCandleList(symbol)
await generateSignal(candleList) # a function that generates certain signals in real time
def getAllTasks():
awaitableTasks = []
for symbol in symbolList: # symbolList contains around 1k symbols
awaitableTasks.append(asyncio.create_task(taskForEachSymbol(symbol)))
return awaitableTasks
async def mainTask():
awaitableTasks = getAllTasks()
await asyncio.gather(*awaitableTasks, return_exceptions=False)
async def main()
mainLoop.run_until_complete(mainTask())
print('completed! ... ')
if __name__ == '__main__':
mainLoop=asyncio.new_event_loop()
asyncio.set_event_loop(mainLoop)
client = motor.motor_asyncio.AsyncIOMotorClient(io_loop=mainLoop)
db = client.candles
main()
async def simultaneous_chunked_download(urls_paths, label):
timeout = ClientTimeout(total=60000)
sem = asyncio.Semaphore(5)
async with aiohttp.ClientSession(timeout=timeout, connector=aiohttp.TCPConnector(verify_ssl=False)) as cs:
async def _fetch(r, path):
async with sem:
async with aiofiles.open(path, "wb") as f:
async for chunk in r.content.iter_any():
if not chunk:
break
size = await f.write(chunk)
if not indeterminate:
bar._done += size
bar.show(bar._done)
if indeterminate:
bar._done += 1
bar.show(bar._done)
indeterminate = False
total_length = 0
tasks = []
for url, path in urls_paths.items():
r = await cs.get(url)
if not indeterminate:
try:
total_length += r.content_length
except Exception:
indeterminate = True
tasks.append(_fetch(r, path))
verbose_print(f"url: {url},\npath: {path}\n\n")
if not indeterminate:
bar = progress.Bar(
expected_size=total_length, label=label, width=28, hide=False
)
else:
bar = progress.Bar(
expected_size=len(tasks), label=label, width=28, hide=False
)
logger._pause_file_output = True
bar.show(0)
bar._done = 0
await asyncio.gather(*tasks)
logger._pause_file_output = False
bar.done()
The function I have above is for downloading a dictionary of urls asynchronously and then printing out a progress bar. An example of its usage:
The code itself runs perfectly fine, however i keep getting these errors:
Whilst benign, they are an eyesore and could point towards my lack of knowledge on both http and asynchronous code, so i would rather try and get it fixed. However im at a loss on where or what is causing it, especially as i like i said the code runs perfectly fine regardless.
If you would like a more practical hands on attempt at recreating this the full code is on my github repo on the dev branch: https://github.com/ohitstom/spicetify-easyinstall/tree/dev
Most of the program can be disregarding if you are testing this out, just press the install button and the problematic code will show itself towards the end.
Bare in mind this is a spotify themer so if you have spotify/spicetify installed you will want to use a vm.
FIXED!:
# Create App
globals.app = QtWidgets.QApplication(sys.argv)
globals.app.setStyleSheet(gui.QSS)
# Configure asyncio loop to work with PyQt5
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
loop = QEventLoop(globals.app)
asyncio.set_event_loop(loop)
# Setup GUI
globals.gui = gui.MainWindow()
globals.gui.show()
# Set off loop
with loop:
sys.exit(loop.run_until_complete(globals.gui.exit_request.wait()))
class MainWindow(QuickWidget):
def __init__(self):
super().__init__(
name="main_window",
...etc
)
self.exit_request = asyncio.Event()
......etc
def closeEvent(self, *args):
self.exit_request.set()
Asyncio and aiohttp have some problems when running a lot of tasks concurrently on Windows, I've been having a lot of problems with it lately.
There are some workarounds available, the ones I use most are:
# set this before your event loop initialization or main function
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
Or:
loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(loop)
loop.run_until_complete(your_main())
I am struggling to solve my issue, hope anyone from the community can help me here.
Our requirement is locked and can't be changed as the producer publishing the queues is controlled by a different team.
Producer which is written in JAVA declares three queues (TASK, RESPONSE, TASK_RESPONSE) and listens on them with the help of spring framework.
A hashmap is sent to the TASK and TASK_RESPONSE queue from the java AMQP client (Producer).
We need to consume these hashmaps and send the responses as follows.
If the queue TASK is processed, the response needs to be sent on RESPONSE queue incrementally.
If the queue TASK_RESPONSE is processed, the response needs to be sent on TASK_RESPONSE queue incrementally (RPC mode).
Now, we need to consume and publish this in python since we need to do some background processing on the tasks.
I tried to work with celery and dramatiq, but was not able to figure out how it can be done with them, so I tried writing myself (with the help of tutorials available online)
Problem is, I am able to consume the messages but not able to reply_to the RESPONSE queue. Here is my code.
from collections import OrderedDict
from concurrent.futures import ThreadPoolExecutor
import pika
import datetime
import logging
import json
from logging import StreamHandler
from time import sleep
from random import randint
from pika import SelectConnection
from settings import *
logging.basicConfig(handlers=[StreamHandler()], level=logging.INFO, format=logging.BASIC_FORMAT)
_logger = logging.getLogger(__name__)
class QueueConsumer(object):
"""The consumer class to manage connections to the AMQP server/queue"""
def __init__(self, queue, logger, parameters, thread_id=0):
self.channel = None
self.connection = None
self.queue_name_task = queue['task']
self.queue_name_response = queue['response']
self.logger = logger
self.consumer_id = 'Consumer Thread: %d' % (thread_id,)
self.parameters = pika.ConnectionParameters(**parameters)
def consume(self):
try:
self.connection = SelectConnection(parameters=self.parameters, on_open_callback=self._on_connected)
self.connection.ioloop.start()
except Exception as e:
self.logger.error('{} {}'.format(self.consumer_id, str(e)))
self.connection.close()
self.connection.ioloop.start()
def _on_connected(self, connection):
connection.channel(on_open_callback=self._on_channel_open)
def _on_channel_open(self, channel):
self.channel = channel
try:
# Declare Task Queue
self.channel.queue_declare(queue=self.queue_name_task,
exclusive=False,
durable=True,
auto_delete=False,
callback=self._on_queue_declared)
self.logger.info("{} Opened Channel....".format(self.consumer_id))
# Declare Task Response Queue
self.channel.queue_declare(queue=self.queue_name_response,
exclusive=False,
durable=True,
auto_delete=False)
self.logger.info("{} Opened Channel....".format(self.consumer_id))
except Exception as e:
self.logger.error('{} {}'.format(self.consumer_id, str(e)))
def _on_queue_declared(self, frame):
self.logger.debug('{} ... declaring queue'.format(self.consumer_id))
self.channel.basic_qos(prefetch_count=1)
try:
self.channel.basic_consume(queue=self.queue_name_task,
on_message_callback=self.handle_delivery,
auto_ack=True)
self.logger.info("{} Declared queue...".format(self.consumer_id))
except Exception as e:
self.logger.error('{} crashing:--> {}'.format(self.consumer_id, str(e)))
def handle_delivery(self, channel, method, header, body):
try:
start_time = datetime.datetime.now()
_logger.info("Received...")
_logger.info("Content: %s" % body)
req = json.loads(self.decode(body))
# Do something
sleep(randint(10, 20))
time_taken = datetime.datetime.now() - start_time
log_msg = "[{}] Time Taken: {}.{}".format(req['bar']['baz'], time_taken.seconds, time_taken.microseconds)
_logger.info(log_msg)
# Publish the result to another queue.
try:
self.channel.basic_publish(exchange='',
routing_key=self.queue_name_response,
properties=pika.BasicProperties(),
body=log_msg)
_logger.info("Message Published...\t(%s)" % self.queue_name_response)
except Exception as e:
self.logger.error('{} Message publishing failed:--> {}'.format(self.consumer_id, str(e)))
except Exception as err:
_logger.exception(err)
def decode(self, body):
try:
_body = body.decode('utf-8')
except AttributeError:
_body = body
return _body
if __name__ == "__main__":
pika_parameters = OrderedDict([
('host', TF_BROKER_HOST),
('port', TF_BROKER_PORT),
('virtual_host', TF_BROKER_VHOST)
])
queue = {'task': TF_IAAS_TASK_QUEUE, 'response': TF_IAAS_REPLY_QUEUE}
try:
with ThreadPoolExecutor(max_workers=TF_IAAS_THREAD_SIZE, thread_name_prefix=TF_IAAS_THREAD_PREFIX) as executor:
start = 1
for thread_id in range(start, (TF_IAAS_THREAD_SIZE + start)):
executor.submit(QueueConsumer(queue, _logger, pika_parameters, thread_id).consume)
except Exception as err:
_logger.exception(err)
Publish Messages On RabbitMQ
import pika
import json
import random
import datetime
from faker import Faker
from random import randint
fake = Faker('en_US')
if __name__ == '__main__':
try:
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='tf_task', durable=True)
started_at = datetime.datetime.now()
properties = pika.BasicProperties(delivery_mode=2)
for i in range(0, 10000):
body = {
'foo': randint(i, i+100),
'bar': {
'baz': fake.name(),
'poo': float(random.randrange(155+i, 389+i))/100
}
}
channel.basic_publish(exchange='',
routing_key='tf_task',
body=json.dumps(body),
properties=properties)
if i%10000 == 0:
duration = datetime.datetime.now() - started_at
print(i, duration.total_seconds())
print(" [x] Sent 'Hello World!'")
connection.close()
now = datetime.datetime.now()
duration = now - started_at
print(duration.total_seconds())
except Exception as e:
print(e)
Socket module has a socket.recv_into method, so it can use user-defined bytebuffer (like bytearray) for zero-copy. But perhaps BaseEventLoop has no method like that. Is there a way to use method like socket.recv_into in asyncio?
The low-level socket operations defined for BaseEventLoop require a socket.socket object to be passed in, e.g. BaseEventLoop.sock_recv(sock, nbytes). So, given that you have a socket.socket, you could call sock.recv_into(). Whether it is a good idea to do that is another question.
You may implement own asyncio transport which utilizes .recv_into() function but yes, for now asyncio has not a way to use .recv_into() out-the-box.
Personally I doubt in very big speedup: when you develop with C the zero-copy is extremely important but for high-level languages like Python benefits are much lesser.
Update: Starting with Python 3.7.0, which is in alpha release as I write this, the standard library's asyncio module documents AbstractEventLoop.sock_recv_into().
Edit: expanding answer as requested...
A call to asyncio's sock_recv_into() typically looks like:
byte_count = await loop.sock_recv_into(sock, buff)
The buff is a mutable object that implements Python's buffer protocol, examples of which include a bytearray and a memoryview on a bytearray. The code below demonstrates receiving into a bytearray using a memoryview.
Working demo code for asyncio sockets necessarily includes a bunch of scaffolding to set up both sides of the connections and run the event loop. The point here is the use of asyncio's sock_recv_into() in the sock_read_exactly() co-routine below.
#!/usr/bin/env python3
"""Demo the asyncio module's sock_recv_into() facility."""
import sys
assert sys.version_info[:2] >= (3, 7), (
'asyncio sock_recv_into() new in Python 3.7')
import socket
import asyncio
def local_echo_server(port=0):
"""Trivial treaded echo server with sleep delay."""
import threading
import time
import random
ssock = socket.socket()
ssock.bind(('127.0.0.1', port))
_, port = ssock.getsockname()
ssock.listen(5)
def echo(csock):
while True:
data = csock.recv(8192)
if not data:
break
time.sleep(random.random())
csock.sendall(data)
csock.shutdown(1)
def serve():
while True:
csock, client_addr = ssock.accept()
tclient = threading.Thread(target=echo, args=(csock,), daemon=True)
tclient.start()
tserve = threading.Thread(target=serve, daemon=True)
tserve.start()
return port
N_COROS = 100
nrunning = 0
async def sock_read_exactly(sock, size, loop=None):
"Read and return size bytes from sock in event-loop loop."
if loop is None: loop = asyncio.get_event_loop()
bytebuff = bytearray(size)
sofar = 0
while sofar < size:
memview = memoryview(bytebuff)[sofar:]
nread = await loop.sock_recv_into(sock, memview)
print('socket', sock.getsockname(), 'read %d bytes' % nread)
if not nread:
raise RuntimeError('Unexpected socket shutdown.')
sofar += nread
return bytebuff
async def echo_client(port):
"Send random data to echo server and test that we get back the same."
from os import urandom
global nrunning
loop = asyncio.get_event_loop()
sock = socket.socket()
sock.setblocking(False)
await loop.sock_connect(sock, ('127.0.0.1', port))
for size in [1, 64, 1024, 55555]:
sending = urandom(size)
await loop.sock_sendall(sock, sending)
received = await sock_read_exactly(sock, size)
assert received == sending
nrunning -= 1
if not nrunning:
loop.stop()
if __name__ == '__main__':
port = local_echo_server()
print('port is', port)
loop = asyncio.get_event_loop()
for _ in range(N_COROS):
loop.create_task(echo_client(port))
nrunning += 1
print('Start loop.')
loop.run_forever()
I have a basic IRC bot that looks something like this (see below), what I would like to do is use something like the _5_mins function to be called every 5 mins with a LoopingCall
import sys
import re
from twisted.internet import reactor, task, defer, protocol
from twisted.python import log
from twisted.words.protocols import irc
from twisted.application import internet, service
import time
HOST, PORT = 'irc.freenode.net', 6667
class IrcProtocol(irc.IRCClient):
nickname = 'BOTSNAME'
password = 'NICKPASSWORD'
timeout = 600.0
def signedOn(self):
pMess = "IDENTIFY %s" % self.password
self.msg("NickServ",pMess)
time.sleep(10)
for channel in self.factory.channels:
self.join(channel)
def _5_mins(self):
self.msg(self.factory.channels[0],"5 minutes have elapsed")
class IrcFactory(protocol.ReconnectingClientFactory):
channels = ['#BOTCHANNEL']
protocol = IrcProtocol
if __name__ == '__main__':
reactor.connectTCP(HOST, PORT, IrcFactory())
log.startLogging(sys.stdout)
reactor.run()
elif __name__ == '__builtin__':
application = service.Application('IrcBot')
ircService = internet.TCPClient(HOST, PORT, IrcFactory())
ircService.setServiceParent(application)
How do I alter the signedOn function work with the task.LoopingCall function or is there a better way?
EDIT: I was really close to a solution, the following is what I have gone with
def signedOn(self):
pMess = "IDENTIFY %s" % self.password
self.msg("NickServ",pMess)
time.sleep(10)
for channel in self.factory.channels:
self.join(channel)
lc = task.LoopingCall(self._5_mins)
lc.start(self.timeout)
def signedOn(self):
pMess = "IDENTIFY %s" % self.password
self.msg("NickServ",pMess)
time.sleep(10)
for channel in self.factory.channels:
self.join(channel)
lc = task.LoopingCall(self._5_mins)
lc.start(self.timeout)