I have a question about async client tests. Here is my code about my test
class TestSocketClient:
#classmethod
def setup_class(cls):
# enable parallel testing by adding the pytest worker id number to the default port
worker_id = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker_number = int(worker_id[2:])
cls.mock_server = ServerMock()
cls.mock_server.port = ServerMock.port + worker_number
cls.username = os.environ.get("USERNAME", "")
cls.password = os.environ.get("PASSWORD", "")
cls.socket_client = SocketClient(username=cls.username, password=cls.password)
cls.socket_client.hostname = "0.0.0.0"
cls.socket_client.port = SocketClient.port + worker_number
#pytest.mark.asyncio
async def test_login(self):
await self.mock_server.start()
response = await self.socket_client.login()
assert response == actual_response
await self.socket_client.close()
#pytest.mark.asyncio
async def test_send_heartbeat(self):
await self.mock_server.start()
await self.socket_client.login()
await self.socket_client.send_heartbeat()
await self.socket_client.close()
I can run the tests under TestSocketClient individually and they will pass individually. But when I run the test suites together with pytest -n auto, The latter test case will raise error while attempting to bind on address ('0.0.0.0', 2056): address already in use. My question is how to make the test suites pass without address the allocation issues so that they can run successfully in CI process. I will be of great appreciation if there is some more valuable suggestion in writing async tests(for example, what should I assert if I only want to test a request the client would like to send to the server? Should I assert the message received on the server-side or just write something like assert_called_once on the client-side). Thanks in advance!
Updates:
I finally solved the problem with port increment in different tests like below
class TestSocketClient:
ports_taken = set()
#classmethod
def setup_class(cls):
cls.mock_server = ServerMock()
cls.username = os.environ.get("USERNAME", "")
cls.password = os.environ.get("PASSWORD", "")
cls.socket_client = SocketClient(username=cls.username, password=cls.password)
cls.socket_client.hostname = "0.0.0.0"
cls.socket_client.port = cls.mock_server.port
def bump(self):
if len(self.ports_taken) == 0:
self.ports_taken.add(self.mock_server.port)
new_port = max(self.ports_taken) + 1
self.mock_server.port = new_port
self.socket_client.port = new_port
self.ports_taken.add(self.mock_server.port)
async def start(self):
self.bump()
try:
await self.mock_server.start()
except:
self.bump()
await self.mock_server.start()
#pytest.mark.asyncio
async def test_login(self):
await self.start()
...
Hope this could be helpful!
For address already in use, check the current running processes by
ps -ax | grep <your app/app port>
In this, you will notice that the process already exists. Pls. kill that process using sudo kill -9 <process_id>
and now restart your service. I am not sure about async tests.
Related
I am making my own userbot,
I was trying to place each command in its own python file (To make it easier to manage) but for some mythical reason only one file (first on the list of imports) is being imported, I've tried to look through documentation, even asked in "Pyrogram Inn" chat on Telegram, but nobody seemed to respond
import asyncio
from os import path
from modules.clients.main_user import user
from modules.commands.echo import command_echo, execute_echo
from modules.commands.help import command_help
def login():
if path.exists("./config.ini"):
print('Credentials config found')
else:
print("Login at https://my.telegram.org/apps and")
api_id = int(input("enter your api_id: "))
api_hash = input("enter your api_hash: ")
with open(f'{str(__file__).replace("main.py", "")}/config.ini', 'w') as config:
config.write(f"[pyrogram] \n api_id = {api_id} \n api_hash = {api_hash}")
if __name__ == "__main__":
login()
user.run()
In example above only command_echo and execute_echo are being imported, while command_help is ignored, unless I comment out echo import, then help works
Content of echo module:
from pyrogram import filters
from modules.clients.main_user import user, command_prefix
chats_involved = {}
loop = False
#user.on_message(filters.command('echo', command_prefix))
async def command_echo(client, message) -> None:
"""Enable repeating of all incoming messages in chat
Args:
client ([Client]): Pyrogram client, usually passed by decorator
message ([Message]): Pyrogram message, usually passed by decorator
"""
global loop
chat_data = await user.get_chat(message.chat.id)
chat_name = f'**{chat_data.title}**'
data = str(message.text)
if "enable" in data.lower() or "true" in data.lower():
chats_involved[message.chat.id] = 1
await message.edit(f"Module **echo** was enabled in {chat_name}")
elif "disable" in data.lower() or "false" in data.lower():
chats_involved[message.chat.id] = 0
loop = False
await message.edit(f"Module **echo** was disabled in {chat_name}")
elif ("loop" in data.lower() or "kill" in data.lower()) and "YES" in data:
loop = True
await message.edit(f"**Loop** mode of **echo** is **activated**! Run, fools!")
elif "loop" in data.lower() or "kill" in data.lower():
if loop == True:
loop = not loop
await message.edit(f"**Loop** mode is very dangerous and can get you **BANNED**, to confirm activation run: ```{command_prefix}echo loop YES```")
try:
if chats_involved[message.chat.id] == 0 and loop:
await message.reply(f"Not really, you forgot to enable **echo**, genius... run: ```{command_prefix}echo true```")
except:
pass # TODO log some info or warning about chat not being in dictionary yet
print(chats_involved)
print(message.chat.id)
#print(loop)
#user.on_message()
async def execute_echo(client, message):
global loop
if message.chat.id not in chats_involved:
chats_involved[message.chat.id] = 0
if chats_involved[message.chat.id] == 1:
if message.text is not f'{command_prefix}echo':
if message.sticker is not None:
while loop:
await message.reply_sticker(message.sticker.file_id)
await message.reply_sticker(message.sticker.file_id)
elif message.text is not None:
print(loop)
while loop:
await message.reply(message.text)
await message.reply(message.text)
# await message.reply(message) # FOR DEBUG
Content of help module:
from pyrogram import filters
from modules.clients.main_user import user, command_prefix
commands = {
"echo": f"""
**==Repeat messages after others==**
Usage: ```{command_prefix}echo [option]```
Options:
true, enable : activate echo mode
false, disable : deactivate echo mode
loop, kill : repeats all messages it can see indefinitely,
requires further confirmation for your account's
safety but can be bypassed by confirming it ahead of time"""
}
##user.on_message(filters.command('help', command_prefix))
#user.on_message()
async def command_help(client, message) -> None:
data = str(message.text)
for command in commands:
await message.edit("TEST TEST!")
Content of "main_user" that's being imported in both cases:
from pyrogram import Client
user = Client("LuxTenebris")
command_prefix = '#'
Does anyone have any idea why wouldn't it work like I expected it to? I am really stuck on this one
I was suggested to use Smart Plugins for modular system instead of my solution, which solves it.
I created a simple Django Channels consumer that should connects to an external source, retrieve data and send it to the client. So, the user opens the page > the consumer connects to the external service and gets the data > the data is sent to the websocket.
Here is my code:
import json
from channels.generic.websocket import WebsocketConsumer, AsyncConsumer, AsyncJsonWebsocketConsumer
from binance.client import Client
import json
from binance.websockets import BinanceSocketManager
import time
import asyncio
client = Client('', '')
trades = client.get_recent_trades(symbol='BNBBTC')
bm = BinanceSocketManager(client)
class EchoConsumer(AsyncJsonWebsocketConsumer):
async def connect(self):
await self.accept()
await self.send_json('test')
bm.start_trade_socket('BNBBTC', self.process_message)
bm.start()
def process_message(self, message):
JSON1 = json.dumps(message)
JSON2 = json.loads(JSON1)
#define variables
Rate = JSON2['p']
Quantity = JSON2['q']
Symbol = JSON2['s']
Order = JSON2['m']
asyncio.create_task(self.send_json(Rate))
print(Rate)
This code works when i open one page; if i try to open a new window with a new account, though, it will throw the following error:
File "C:\Users\User\Desktop\Heroku\github\master\myapp\consumers.py", line 54, in connect
bm.start()
File "C:\Users\User\lib\threading.py", line 843, in start
raise RuntimeError("threads can only be started once")
threads can only be started once
I'm new to Channels, so this is a noob question, but how can i fix this problem? What i wanted to do was: user opens the page and gets the data, another user opens the page and gets the data; is there no way to do that? Or am i simply misunderstanding how Django Channels and websockets works?
Do you really need a secondary thread ?
class EchoConsumer(AsyncJsonWebsocketConsumer):
symbol = ''
async def connect(self):
self.symbol = 'BNBBTC'
# or, more probably, retrieve the value for "symbol" from query_string
# so the client can specify which symbol he's interested into:
# socket = new WebSocket("ws://.../?symbol=BNBBTC");
await self.accept()
def process_message(self, message):
# PSEUDO-CODE BELOW !
if self.symbol == message['symbol']:
await self.send({
'type': 'websocket.send',
'text': json.dumps(message),
})
For extra flexibility, you might also accept al list of symbols from the client, instead:
//HTML
socket = new WebSocket("ws://.../?symbols=XXX,YYY,ZZZ");
then (in the consumer):
class EchoConsumer(AsyncJsonWebsocketConsumer):
symbols = []
async def connect(self):
# here we need to parse "?symbols=XXX,YYY,ZZZ" ...
# the code below has been stolen from another project of mine and should be suitably adapted
params = urllib.parse.parse_qs(self.scope.get('query_string', b'').decode('utf-8'))
try:
self.symbols = json.loads(params.get('symbols', ['[]'])[0])
except:
self.symbols = []
def process_message(self, message):
if message['symbol'] in self.symbols:
...
I'm no Django developer, but if I understand correctly, the function connect is being called more than once-- and bm.start references the same thread most likely made in bm.start_trade_socket (or somewhere else in connect). In conclusion, when bm.start is called, a thread is started, and when it is done again, you get that error.
Here start() Start the thread’s activity.
It should be called at most once per thread object. You have made a global object of BinanceSocketManager as "bm".
It will always raise a RuntimeError if called more than once on the same thread object.
Please refer the below mentioned code, it may help you
from channels.generic.websocket import WebsocketConsumer, AsyncConsumer, AsyncJsonWebsocketConsumer
from binance.client import Client
import json
from binance.websockets import BinanceSocketManager
import time
import asyncio
class EchoConsumer(AsyncJsonWebsocketConsumer):
client = Client('', '')
trades = client.get_recent_trades(symbol='BNBBTC')
bm = BinanceSocketManager(client)
async def connect(self):
await self.accept()
await self.send_json('test')
self.bm.start_trade_socket('BNBBTC', self.process_message)
self.bm.start()
def process_message(self, message):
JSON1 = json.dumps(message)
JSON2 = json.loads(JSON1)
#define variables
Rate = JSON2['p']
Quantity = JSON2['q']
Symbol = JSON2['s']
Order = JSON2['m']
asyncio.create_task(self.send_json(Rate))
print(Rate)
This downloads updated fasta files (protein sequences) from a database, I've gotten this to work faster using asyncio compared to requests, however I'm not convinced the downloads are actually happening asynchronously.
import os
import aiohttp
import aiofiles
import asyncio
folder = '~/base/fastas/proteomes/'
upos = {'UP000005640': 'Human_Homo_sapien',
'UP000002254': 'Dog_Boxer_Canis_Lupus_familiaris',
'UP000002311': 'Yeast_Saccharomyces_cerevisiae',
'UP000000589': 'Mouse_Mus_musculus',
'UP000006718': 'Monkey_Rhesus_macaque_Macaca_mulatta',
'UP000009130': 'Monkey_Cynomolgus_Macaca_fascicularis',
'UP000002494': 'Rat_Rattus_norvegicus',
'UP000000625': 'Escherichia_coli',
}
#https://www.uniprot.org/uniprot/?query=proteome:UP000005640&format=fasta Example link
startline = r'https://www.uniprot.org/uniprot/?query=proteome:'
endline = r'&format=fasta&include=False' #include is true to include isoforms, make false for only canonical sequences
async def fetch(session, link, folderlocation, name):
async with session.get(link, timeout=0) as response:
try:
file = await aiofiles.open(folderlocation, mode='w')
file.write(await response.text())
await file.close()
print(name, 'ended')
except FileNotFoundError:
loc = ''.join((r'/'.join((folderlocation.split('/')[:-1])), '/'))
command = ' '.join(('mkdir -p', loc))
os.system(command)
file = await aiofiles.open(folderlocation, mode='w')
file.write(await response.text())
await file.close()
print(name, 'ended')
async def rfunc():
async with aiohttp.ClientSession() as session:
for upo, name in upos.items():
print(name, 'started')
link = ''.join((startline, upo, endline))
folderlocation =''.join((folder, name, '.fasta'))
await fetch(session, link, folderlocation, name)
loop = asyncio.get_event_loop()
loop.run_until_complete(rfunc())
My output from running this:
In [5]: runfile('~/base/Fasta Proteome Updater.py')
Human_Homo_sapien started
Human_Homo_sapien ended
Dog_Boxer_Canis_Lupus_familiaris started
Dog_Boxer_Canis_Lupus_familiaris ended
Yeast_Saccharomyces_cerevisiae started
Yeast_Saccharomyces_cerevisiae ended
Mouse_Mus_musculus started
Mouse_Mus_musculus ended
Monkey_Rhesus_macaque_Macaca_mulatta started
Monkey_Rhesus_macaque_Macaca_mulatta ended
Monkey_Cynomolgus_Macaca_fascicularis started
Monkey_Cynomolgus_Macaca_fascicularis ended
Rat_Rattus_norvegicus started
Rat_Rattus_norvegicus ended
Escherichia_coli started
Escherichia_coli ended
The printed output seems to signify the downloads are happening one at a time, is there something wrong here?
You are looping the items to download and waiting (await) for each item to finish. To make them happen all at one time, you need to schedule all downloads for execution at once - e.g. using gather.
Then your code could look like this:
async def rfunc():
async with aiohttp.ClientSession() as session:
await gather(
*[
fetch(
session,
''.join((startline, upo, endline)),
''.join((folder, name, '.fasta')),
name,
) for upo, name in upos.items()
]
)
loop = asyncio.get_event_loop()
loop.run_until_complete(rfunc())
I'm currently building a Discord bot that uploads a file to Google Drive when a command is used. However, the command methods are Asynchronous and the files().create() method is Synchronous, and calling it simply causes the bot to get stuck.
#bot.command(pass_context = True)
#commands.has_role(name = 'Archivist')
async def archivechannel(ctx, channel : discord.Channel, filename):
await bot.say("Archiving....")
try:
with open("{}.txt".format(filename), "w") as openfile:
lines = []
async for message in bot.logs_from(channel, limit=500, reverse=True):
if not (message.author.bot or message.content.startswith("]")):
print ("<{}> {}#{}: {}".format(message.timestamp, message.author.name, message.author.discriminator, message.content))
lines.append("<{}> {}#{}: {}\n".format(message.timestamp, message.author.name, message.author.discriminator, message.content))
openfile.writelines(lines)
await bot.say("Archive Complete!")
except IOError:
await bot.say("Error: IOException")
await bot.say("Uploading....")
metadata = {'name' : "{}.txt".format(filename), 'mimetype' : 'application/vnd.google.apps.document', 'parents' : folderID}
media = MediaFileUpload('{}.txt'.format(filename), mimetype='text/plain')
res = service.files().create(body=metadata, media_body=media).execute()
print(res)
The line causing the problem is:
res = service.files().create(body=metadata, media_body=media).execute()
The bot just gets stuck after saying "Uploading...." and doesn't upload anything.
Does anyone know how I can fix this?
Edit: Using a ThreadPoolExecutor, nor a DefaultExecutor has worked, nor has setting up a synchronous function that runs the create and execute methods, taking in the metadata and media parameters
Edit 2: After doing some more screwing around, it turns out the problem is now in the following line:
media = MediaFileUpload('{}.txt'.format(filename), mimetype='text/plain')
However from my testing, for the question I asked, Patrick is correct and I have marked the question as answered.
You can run your blocking operation in another thread, while your asynchronous code waits for it to complete without blocking the event loop.
We'll create a new ThreadPoolExecutor, then use run_in_executor to use it to run the task.
from concurrent.futures import ThreadPoolExecutor
def upload_file(metadata, media):
return service.files().create(body=metadata, media_body=media).execute()
#bot.command(pass_context = True)
#commands.has_role(name = 'Archivist')
async def archivechannel(ctx, channel : discord.Channel, filename):
await bot.say("Archiving....")
try:
with open("{}.txt".format(filename), "w") as openfile:
lines = []
async for message in bot.logs_from(channel, limit=500, reverse=True):
if not (message.author.bot or message.content.startswith("]")):
print ("<{}> {}#{}: {}".format(message.timestamp, message.author.name, message.author.discriminator, message.content))
lines.append("<{}> {}#{}: {}\n".format(message.timestamp, message.author.name, message.author.discriminator, message.content))
openfile.writelines(lines)
await bot.say("Archive Complete!")
except IOError:
await bot.say("Error: IOException")
await bot.say("Uploading....")
metadata = {filename : "{}.txt".format(filename), 'mimetype' : 'application/vnd.google.apps.document', 'parents' : folderID}
media = MediaFileUpload('{}.txt'.format(filename), mimetype='text/plain')
with ThreadPoolExecutor() as pool:
res = await bot.loop.run_in_executor(
pool, upload_file, metadata, media
)
print(res)
You may also be able to use the default executor by removing the context manager and passing None instead of pool. I'm having trouble finding information about the default executor though, so you may want to experiment.
I need to call a celery task for each GRPC request, and return the result.
In default GRPC implementation, each request is processed in a separate thread from a threadpool.
In my case, the server is supposed to process ~400 requests in batch mode per second. So one request may have to wait 1 second for the result due to the batch processing, which means the size of the threadpool must be larger than 400 to avoid blocking.
Can this be done asynchronously?
Thanks a lot.
class EventReporting(ss_pb2.BetaEventReportingServicer, ss_pb2.BetaDeviceMgtServicer):
def ReportEvent(self, request, context):
res = tasks.add.delay(1,2)
result = res.get() ->here i have to block
return ss_pb2.GeneralReply(message='Hello, %s!' % result.message)
As noted by #Michael in a comment, as of version 1.32, gRPC now supports asyncio in its Python API. If you're using an earlier version, you can still use the asyncio API via the experimental API: from grpc.experimental import aio. An asyncio hello world example has also been added to the gRPC repo. The following code is a copy of the example server:
import logging
import asyncio
from grpc import aio
import helloworld_pb2
import helloworld_pb2_grpc
class Greeter(helloworld_pb2_grpc.GreeterServicer):
async def SayHello(self, request, context):
return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name)
async def serve():
server = aio.server()
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
listen_addr = '[::]:50051'
server.add_insecure_port(listen_addr)
logging.info("Starting server on %s", listen_addr)
await server.start()
await server.wait_for_termination()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
asyncio.run(serve())
See my other answer for how to implement the client.
It can be done asynchronously if your call to res.get can be done asynchronously (if it is defined with the async keyword).
While grpc.server says it requires a futures.ThreadPoolExecutor, it will actually work with any futures.Executor that calls the behaviors submitted to it on some thread other than the one on which they were passed. Were you to pass to grpc.server a futures.Executor implemented by you that only used one thread to carry out four hundred (or more) concurrent calls to EventReporting.ReportEvent, your server should avoid the kind of blocking that you describe.
In my opinion is good simple implementation async grpc server, same like http based on aiohttp.
import asyncio
from concurrent import futures
import functools
import inspect
import threading
from grpc import _server
def _loop_mgr(loop: asyncio.AbstractEventLoop):
asyncio.set_event_loop(loop)
loop.run_forever()
# If we reach here, the loop was stopped.
# We should gather any remaining tasks and finish them.
pending = asyncio.Task.all_tasks(loop=loop)
if pending:
loop.run_until_complete(asyncio.gather(*pending))
class AsyncioExecutor(futures.Executor):
def __init__(self, *, loop=None):
super().__init__()
self._shutdown = False
self._loop = loop or asyncio.get_event_loop()
self._thread = threading.Thread(target=_loop_mgr, args=(self._loop,),
daemon=True)
self._thread.start()
def submit(self, fn, *args, **kwargs):
if self._shutdown:
raise RuntimeError('Cannot schedule new futures after shutdown')
if not self._loop.is_running():
raise RuntimeError("Loop must be started before any function can "
"be submitted")
if inspect.iscoroutinefunction(fn):
coro = fn(*args, **kwargs)
return asyncio.run_coroutine_threadsafe(coro, self._loop)
else:
func = functools.partial(fn, *args, **kwargs)
return self._loop.run_in_executor(None, func)
def shutdown(self, wait=True):
self._loop.stop()
self._shutdown = True
if wait:
self._thread.join()
# --------------------------------------------------------------------------- #
async def _call_behavior(rpc_event, state, behavior, argument, request_deserializer):
context = _server._Context(rpc_event, state, request_deserializer)
try:
return await behavior(argument, context), True
except Exception as e: # pylint: disable=broad-except
with state.condition:
if e not in state.rpc_errors:
details = 'Exception calling application: {}'.format(e)
_server.logging.exception(details)
_server._abort(state, rpc_event.operation_call,
_server.cygrpc.StatusCode.unknown, _server._common.encode(details))
return None, False
async def _take_response_from_response_iterator(rpc_event, state, response_iterator):
try:
return await response_iterator.__anext__(), True
except StopAsyncIteration:
return None, True
except Exception as e: # pylint: disable=broad-except
with state.condition:
if e not in state.rpc_errors:
details = 'Exception iterating responses: {}'.format(e)
_server.logging.exception(details)
_server._abort(state, rpc_event.operation_call,
_server.cygrpc.StatusCode.unknown, _server._common.encode(details))
return None, False
async def _unary_response_in_pool(rpc_event, state, behavior, argument_thunk,
request_deserializer, response_serializer):
argument = argument_thunk()
if argument is not None:
response, proceed = await _call_behavior(rpc_event, state, behavior,
argument, request_deserializer)
if proceed:
serialized_response = _server._serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
_server._status(rpc_event, state, serialized_response)
async def _stream_response_in_pool(rpc_event, state, behavior, argument_thunk,
request_deserializer, response_serializer):
argument = argument_thunk()
if argument is not None:
# Notice this calls the normal `_call_behavior` not the awaitable version.
response_iterator, proceed = _server._call_behavior(
rpc_event, state, behavior, argument, request_deserializer)
if proceed:
while True:
response, proceed = await _take_response_from_response_iterator(
rpc_event, state, response_iterator)
if proceed:
if response is None:
_server._status(rpc_event, state, None)
break
else:
serialized_response = _server._serialize_response(
rpc_event, state, response, response_serializer)
print(response)
if serialized_response is not None:
print("Serialized Correctly")
proceed = _server._send_response(rpc_event, state,
serialized_response)
if not proceed:
break
else:
break
else:
break
_server._unary_response_in_pool = _unary_response_in_pool
_server._stream_response_in_pool = _stream_response_in_pool
if __name__ == '__main__':
server = grpc.server(AsyncioExecutor())
# Add Servicer and Start Server Here
link to original:
https://gist.github.com/seglberg/0b4487b57b4fd425c56ad72aba9971be