I need to call a celery task for each GRPC request, and return the result.
In default GRPC implementation, each request is processed in a separate thread from a threadpool.
In my case, the server is supposed to process ~400 requests in batch mode per second. So one request may have to wait 1 second for the result due to the batch processing, which means the size of the threadpool must be larger than 400 to avoid blocking.
Can this be done asynchronously?
Thanks a lot.
class EventReporting(ss_pb2.BetaEventReportingServicer, ss_pb2.BetaDeviceMgtServicer):
def ReportEvent(self, request, context):
res = tasks.add.delay(1,2)
result = res.get() ->here i have to block
return ss_pb2.GeneralReply(message='Hello, %s!' % result.message)
As noted by #Michael in a comment, as of version 1.32, gRPC now supports asyncio in its Python API. If you're using an earlier version, you can still use the asyncio API via the experimental API: from grpc.experimental import aio. An asyncio hello world example has also been added to the gRPC repo. The following code is a copy of the example server:
import logging
import asyncio
from grpc import aio
import helloworld_pb2
import helloworld_pb2_grpc
class Greeter(helloworld_pb2_grpc.GreeterServicer):
async def SayHello(self, request, context):
return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name)
async def serve():
server = aio.server()
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
listen_addr = '[::]:50051'
server.add_insecure_port(listen_addr)
logging.info("Starting server on %s", listen_addr)
await server.start()
await server.wait_for_termination()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
asyncio.run(serve())
See my other answer for how to implement the client.
It can be done asynchronously if your call to res.get can be done asynchronously (if it is defined with the async keyword).
While grpc.server says it requires a futures.ThreadPoolExecutor, it will actually work with any futures.Executor that calls the behaviors submitted to it on some thread other than the one on which they were passed. Were you to pass to grpc.server a futures.Executor implemented by you that only used one thread to carry out four hundred (or more) concurrent calls to EventReporting.ReportEvent, your server should avoid the kind of blocking that you describe.
In my opinion is good simple implementation async grpc server, same like http based on aiohttp.
import asyncio
from concurrent import futures
import functools
import inspect
import threading
from grpc import _server
def _loop_mgr(loop: asyncio.AbstractEventLoop):
asyncio.set_event_loop(loop)
loop.run_forever()
# If we reach here, the loop was stopped.
# We should gather any remaining tasks and finish them.
pending = asyncio.Task.all_tasks(loop=loop)
if pending:
loop.run_until_complete(asyncio.gather(*pending))
class AsyncioExecutor(futures.Executor):
def __init__(self, *, loop=None):
super().__init__()
self._shutdown = False
self._loop = loop or asyncio.get_event_loop()
self._thread = threading.Thread(target=_loop_mgr, args=(self._loop,),
daemon=True)
self._thread.start()
def submit(self, fn, *args, **kwargs):
if self._shutdown:
raise RuntimeError('Cannot schedule new futures after shutdown')
if not self._loop.is_running():
raise RuntimeError("Loop must be started before any function can "
"be submitted")
if inspect.iscoroutinefunction(fn):
coro = fn(*args, **kwargs)
return asyncio.run_coroutine_threadsafe(coro, self._loop)
else:
func = functools.partial(fn, *args, **kwargs)
return self._loop.run_in_executor(None, func)
def shutdown(self, wait=True):
self._loop.stop()
self._shutdown = True
if wait:
self._thread.join()
# --------------------------------------------------------------------------- #
async def _call_behavior(rpc_event, state, behavior, argument, request_deserializer):
context = _server._Context(rpc_event, state, request_deserializer)
try:
return await behavior(argument, context), True
except Exception as e: # pylint: disable=broad-except
with state.condition:
if e not in state.rpc_errors:
details = 'Exception calling application: {}'.format(e)
_server.logging.exception(details)
_server._abort(state, rpc_event.operation_call,
_server.cygrpc.StatusCode.unknown, _server._common.encode(details))
return None, False
async def _take_response_from_response_iterator(rpc_event, state, response_iterator):
try:
return await response_iterator.__anext__(), True
except StopAsyncIteration:
return None, True
except Exception as e: # pylint: disable=broad-except
with state.condition:
if e not in state.rpc_errors:
details = 'Exception iterating responses: {}'.format(e)
_server.logging.exception(details)
_server._abort(state, rpc_event.operation_call,
_server.cygrpc.StatusCode.unknown, _server._common.encode(details))
return None, False
async def _unary_response_in_pool(rpc_event, state, behavior, argument_thunk,
request_deserializer, response_serializer):
argument = argument_thunk()
if argument is not None:
response, proceed = await _call_behavior(rpc_event, state, behavior,
argument, request_deserializer)
if proceed:
serialized_response = _server._serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
_server._status(rpc_event, state, serialized_response)
async def _stream_response_in_pool(rpc_event, state, behavior, argument_thunk,
request_deserializer, response_serializer):
argument = argument_thunk()
if argument is not None:
# Notice this calls the normal `_call_behavior` not the awaitable version.
response_iterator, proceed = _server._call_behavior(
rpc_event, state, behavior, argument, request_deserializer)
if proceed:
while True:
response, proceed = await _take_response_from_response_iterator(
rpc_event, state, response_iterator)
if proceed:
if response is None:
_server._status(rpc_event, state, None)
break
else:
serialized_response = _server._serialize_response(
rpc_event, state, response, response_serializer)
print(response)
if serialized_response is not None:
print("Serialized Correctly")
proceed = _server._send_response(rpc_event, state,
serialized_response)
if not proceed:
break
else:
break
else:
break
_server._unary_response_in_pool = _unary_response_in_pool
_server._stream_response_in_pool = _stream_response_in_pool
if __name__ == '__main__':
server = grpc.server(AsyncioExecutor())
# Add Servicer and Start Server Here
link to original:
https://gist.github.com/seglberg/0b4487b57b4fd425c56ad72aba9971be
Related
This is the base consumer class I'm using for creating new consumers. It works fine for "enable.auto.commit":True consumer. But when I create a consumer with enable.auto.commit=False and any of the (KeyDeserializationError, ValueDeserializationError) exceptions occurs then I need to manually commit that message in except block. As this base class will be used for auto-commit=True as well, so this line self.consumer.commit() is getting called to these types of consumers also.
By calling commit() for auto.commit=True consumers give any issue internally? (it seems fine when I tried locally)
What should be ideal handling for (KeyDeserializationError, ValueDeserializationError) exceptions for auto.commit=False?
class KafkaConsumer(object):
"""Wrapper over Kafka Consumer"""
def __init__(self, topics: list[str], **kwargs: Any):
config = {
**kwargs,
}
self.consumer = DeserializingConsumer(config)
self.consumer.subscribe(topics=topics)
def consume(self, poll_timeout_secs: float = 1.0):
try:
while True:
try:
msg = self.consumer.poll(timeout=poll_timeout_secs)
except (KeyDeserializationError, ValueDeserializationError) as err:
self.consumer.commit()
continue
if msg is None:
continue
if msg.error():
raise KafkaException(msg.error())
else:
yield msg
except:
self.consumer.close()
# create consumer object auto.commit=True/False
kafka_consumer = KafkaConsumer(topics=topics, **kwargs) # i can pass "enable.auto.commit":False for manual commit mode.
# Actual consuming business logic
for message in kafka_consumer.consume():
try:
event = message.value()
logger.info(f"message {event}")
except Exception as e:
logger.exception(f'Unable to consume data from kafka {e}')
finally:
pass
# kafka_consumer.consumer.commit(message=message) # in case of manual commit consumer mode
There is something which is funny, I have the following external function.
def get_A_records(domain, default="A"):
"""
Get the A Records of a domain
"""
try:
resolver = Resolver()
a_record = resolver.query(domain, default)
a_records = list()
for r in a_record:
a_dict = dict()
a_dict["domain"]=domain
a_dict["a_ip"]=r.address
a_dict["rtype"]=default
a_records.append(a_dict)
return a_records
except Exception as e:
print(e)
return {}
When I import this function and i run it in the interactive shell, it was in less than a second
The function is meant to be used in celery. But when I invoke the function in celery
#app.task
def update_domain(domain):
try:
ips_v4 = get_A_records(domain)
except Exception as e:
print("Failure... recorded!")
I get the following error
Why is it like that? And What is an alternative? Thanks!
I finally found a solution a working one. In case someone faces the same problem.
Use asynchronous dns module which is supported in dnspython.
from dns import asyncresolver
import dns
async def async_get_A_records(domain, default):
"""
Asyncrhonous
"""
a_record = await asyncresolver.resolve(domain, default)
a_records = list()
for r in a_record:
a_dict = dict()
a_dict["domain"]=domain
a_dict["a_ip"]=r.address
a_dict["rtype"]=default
a_records.append(a_dict)
return a_records
Now in celery app.task
async def query_domain_a(domain):
result = await async_get_A_records(domain)
print(result)
#app.task
def update_domain(domain):
try:
asyncio.run(query_domain_a(domain, "A"))
asyncio.run(query_domain_a(domain, "AAAA"))
except Domain.DoesNotExist as E:
print(E)
except Exception as e:
print(e)
And you won't see timeouts.
HEADS-UP. It is really discouraged by the celery docs to use asyncio or run none asynchronous task.
The goal
I want to make regular HTTP requests to a REST API. The requests happen at an interval ranging from a few seconds up to multiple hours, depending on the user input. I want to keep a single connection alive and close it in a smart way.
The questions
Is there an existing library that provides features similar to what i wrote with the class SessionOneToN?
Would it be possible to use a single session of type aiohttp.ClientSession that runs forever and is never closed?
What i have tried
What i have tried does work, but i wonder if there is an established library that can achieve the goals.
My code
My application uses the event loop module asyncio, and the HTTP module aiohttp.
My application opens a single session of type aiohttp.ClientSession and shares it between multiple asynchronous callers.
The callers can make their requests concurrently and asynchronously.
Whenever all callers have received their responses at the same time, the aiohttp.ClientSession is closed. A new aiohttp.ClientSession is opened as necessary when a caller makes a new request.
import aiohttp
import asyncio
from contextlib import asynccontextmanager
import sys
url = 'http://stackoverflow.com/questions/64305548/sharing-an-aiohttp-clientsession-between-multiple-asynchronous-callers'
# The callers look like so
async def request():
async with session() as s:
async with s.get(url) as response:
return await response.text()
# The session manager looks like so:
class SessionOneToN:
""" Manage one session that is reused by multiple clients, instantiate a new
session object from the given session_class as required. Provide a context
manager for accessing the session. The session_class returns a context
manager when instantiated, this context manager is referred to as the
internal context manager, and is entered when the first
client enters the context manager returned by context_manager, and is exited
when the last client exits context_manager."""
def __init__(self, session_class):
self.n = 0
self.session_class = session_class
self.context = None
self.aenter_result = None
async def plus(self):
self.n += 1
if self.n == 1:
# self.context: The internal context manager
self.context = context = self.session_class()
# self.aenter_result: The result from entering the internal context
# manager
self.aenter_result = await context.__aenter__()
assert self.aenter_result is not None
return self.aenter_result
def minus(self):
self.n -= 1
if self.n == 0:
return True
def cleanup(self):
self.context = None
self.aenter_result = None
#asynccontextmanager
async def get_context(self):
try:
aenter_result = await self.plus()
yield aenter_result
except:
if self.minus():
if not await self.context.__aexit__(*sys.exc_info()):
self.cleanup()
raise
self.cleanup()
else:
if self.minus():
await self.context.__aexit__(None, None, None)
self.cleanup()
class SessionOneToNaiohttp:
def __init__(self):
self.sotn = SessionOneToN(aiohttp.ClientSession)
def get_context(self):
return self.sotn.get_context()
sotnaiohttp = SessionOneToNaiohttp()
def session():
return sotnaiohttp.get_context()
response_text = asyncio.run(request())
print(response_text[0:10])
I have an script that loops through a range of URLs to pull item location based on returned json data. However, the script takes an 60 minutes to run and 55 minutes of that (per cprofile) is spent waiting for json data to load.
I would like to multithread to run multiple POST requests at a time to speed this up and have initially split up URL ranges into two halves to do this. Where I am getting stuck is how to implement multithreading or asyncio.
Slimmed down code:
import asyncio
import aiohttp
# i am not recommend to use globals
results = dict()
url = "https://www.website.com/store/ajax/search"
query = "store={}&size=18&query=17360031"
# this is default url opener got from aiohttp documentation
async def open_url(store, loop=None):
async with aiohttp.ClientSession(loop=loop) as session:
async with session.post(url, data={'searchQuery': query.format(store)}) as resp:
return await resp.json(), store
async def processing(loop=None):
# U need to use 'global' keyworld if U wan't to write to global variables
global results
# one of the simplest ways to parallelize requests, is to init Future, and when data will be ready save it to global
tasks = [open_url(store, loop=event_loop) for store in range(0, 5)]
for coro in asyncio.as_completed(tasks, loop=loop):
try:
data, store = await coro
results[store] = data['searchResults']['results'][0]['location']['aisle']
except (IndexError, KeyError):
continue
if __name__ == '__main__':
event_loop = asyncio.new_event_loop()
event_loop.run_until_complete(processing(loop=event_loop))
# Print Results
for store, data in results.items():
print(store, data)
json:
{u'count': 1,
u'results': [{u'department': {u'name': u'Home', u'storeDeptId': -1},
u'location': {u'aisle': [A], u'detailed': [A.536]},
u'score': u'0.507073'}],
u'totalCount': 1}
Even if you use multithreading or multiprocessing, each thread/process will still block until the JSON data is retrieved. This could speed up things a little but it's still not your best choice.
Since you're using requests, try grequests which combines this one with gevent. This lets you define a series of HTTP requests that run asynchronously. As a result you'll get a huge speed boost. The usage is very simple: just create a list of requests (using grequests.get) and pass it grequests.map.
Hope this helps!
If u wan't to parallelize requests( i hope u ask for this). This code snippet will help.
There are request opener,and 2000 post requests sent via aiohttp and asyncio.
python3.5 used
import asyncio
import aiohttp
# i am not recommend to use globals
results = dict()
MAX_RETRIES = 5
MATCH_SLEEP_TIME = 3 # i am recommend U to move this variables to other file like constants.py or any else
url = "https://www.website.com/store/ajax/search"
query = "store={}&size=18&query=44159"
# this is default url opener got from aiohttp documentation
async def open_url(store, semaphore, loop=None):
for _ in range(MAX_RETRIES):
with await semarhore:
try:
async with aiohttp.ClientSession(loop=loop) as session:
async with session.post(url, data={'searchQuery': query.format(store)}) as resp:
return await resp.json(), store
except ConnectionResetError:
# u can handle more exceptions here, and sleep if they are raised
await asyncio.sleep(MATCH_SLEEP_TIME, loop=loop)
continue
return None
async def processing(semaphore, loop=None):
# U need to use 'global' keyworld if U wan't to write to global variables
global results
# one of the simplest ways to parallelize requests, is to init Future, and when data will be ready save it to global
tasks = [open_url(store, semaphore, loop=event_loop) for store in range(0, 2000)]
for coro in asyncio.as_completed(tasks, loop=loop):
try:
response = await coro
if response is None:
continue
data, store = response
results[store] = data['searchResults']['results'][0]['location']['aisle']
except (IndexError, KeyError):
continue
if __name__ == '__main__':
event_loop = asyncio.new_event_loop()
semaphore = asyncio.Semaphore(50, loop=event_loop) # count of concurrent requests
event_loop.run_until_complete(processing(semaphore, loop=event_loop))
I have web-crawler and http interface for it.
Crawler gets grouped urls as dictionary. I need to return a result in the same format in JSON. But I was faced with a large memory usage, which is not returned to the operating system. How can I implement this solution without large memory usage?
Code:
#!/usr/bin/env python
# coding=utf-8
import collections
import tornado.web
import tornado.ioloop
import tornado.queues
import tornado.httpclient
class ResponseError(Exception):
pass
class Crawler(object):
client = tornado.httpclient.AsyncHTTPClient()
def __init__(self, groups, concurrency=10, retries=3, validators=None):
self.groups = groups
self.concurrency = concurrency
self.retries = retries
self.validators = validators or []
self.requests = tornado.queues.Queue()
self.responses = collections.defaultdict(list)
async def worker(self):
while True:
await self.consume()
async def validate(self, response):
for validator in self.validators:
validator(response)
async def save(self, response):
self.responses[response.request.group].append(response.body.decode('utf-8'))
async def consume(self):
async for request in self.requests:
try:
response = await self.client.fetch(request, raise_error=False)
await self.validate(response)
await self.save(response)
except ResponseError:
if request.retries < self.retries:
request.retries += 1
await self.requests.put(request)
finally:
self.requests.task_done()
async def produce(self):
for group, urls in self.groups.items():
for url in urls:
request = tornado.httpclient.HTTPRequest(url)
request.group = group
request.retries = 0
await self.requests.put(request)
async def fetch(self):
await self.produce()
for __ in range(self.concurrency):
tornado.ioloop.IOLoop.current().spawn_callback(self.worker)
await self.requests.join()
class MainHandler(tornado.web.RequestHandler):
async def get(self):
urls = []
with open('urls') as f: # mock
for line in f:
urls.append(line.strip())
crawler = Crawler({'default': urls})
await crawler.fetch()
self.write(crawler.responses)
if __name__ == '__main__':
app = tornado.web.Application(
(tornado.web.url(r'/', MainHandler),), debug=True
)
app.listen(8000)
tornado.ioloop.IOLoop.current().start()
It looks to me like most of the memory usage is devoted to self.responses. Since you seem to be ordering responses by "group" before writing them to a file, I can understand why you do it this way. One idea is to store them in a database (MySQL or MongoDB or whatever) with the "group" as column or field value in the database record.
The database might be the final destination of your data, or else it might be a temporary place to store the data until crawler.fetch completes. Then, query all the data from the database, ordered by "group", and write it to the file.
This doesn't solve the problem, it just means that the database process is responsible for most of your memory usage, instead of the Python process. This may be preferable for you, however.