Asyncio Coroutine check - python

I have got this function for logging in existing code. There is this duplication due to if asyncio.iscoroutinefunction(func).
Is there any better way to avoid this duplication along with also keeping async wrapper).
def log_it():
def decorator(func):
if asyncio.iscoroutinefunction(func):
async def wrapper(*args, **kwargs):
name = func.__name__
event = args[0].__class__.__name__
try:
result = await func(*args, **kwargs)
except Exception as e:
LOGGER.error('Error in processing: {} - {}'.format(name, event))
LOGGER.exception(e)
raise e
return result
else:
def wrapper(*args, **kwargs):
name = func.__name__
event = args[0].__class__.__name__
try:
result = func(*args, **kwargs)
except Exception as e:
LOGGER.error('Error in processing: {} - {}'.format(name, event))
LOGGER.exception(e)
raise e
return result
return wrapper
return decorator

Related

Catch any excpetion to avoid memory leak - Python bad/good pratices

I want to make sure a method is called if any exception is raised. In this situation, is it ok (good/bad practices or may lead to unexpected consequences) to try/except any Exception? Here's an example of what's on my mind using a decorator:
# implementation
import sys
import traceback
class AmazingClass:
def __init__(self, arg=None):
self.__att = arg
#property
def att(self, ):
return self.__att
def make_sure_it_quits(method):
def inner(self, *args, **kwargs):
try:
return method(self, *args, **kwargs)
except Exception as err:
print(err, "- This was caught because it couldn't be foreseen.")
traceback.print_exc()
print("\nQuitting what is suppose to be quited...")
self.quit()
return inner
#make_sure_it_quits
def this_may_raise_errors(self, arg):
try:
self.__att += arg
except TypeError as err:
print("This I can handle! Cleaning and exiting...")
self.quit()
# sys.exit(1) # exit, if it's the case
def quit(self, ):
self.__arg = None
print("Everything is very clean now!")
# examples
def no_errors():
obj = AmazingClass("test")
obj.this_may_raise_errors("_01")
print(obj.att)
def with_error_01():
obj = AmazingClass("test")
obj.this_may_raise_erros(1)
print(obj.att)
def with_error_02():
obj = AmazingClass("test")
obj.this_may_raise_errors()
print(obj.att)
# main
if __name__ == '__main__':
no_errors()
with_error_01()
with_error_02()
In this case, with_error_01 represents situations I know in advance that can happen, while with_error_02 is an unexpected use of the class.
In both cases, the use of traceback shows what and where went wrong. Also, the method quit must always be called in case of any error.

Python3: Thread still alive after joined

This my code
def timeout(seconds_before_timeout):
def deco(func):
#functools.wraps(func)
def wrapper(*args, **kwargs):
res = [
Exception("function [%s] timeout [%s seconds] exceeded!"
% (func.__name__, seconds_before_timeout))
]
def new_func():
try:
res[0] = func(*args, **kwargs)
except Exception as ex:
res[0] = ex
thread = Thread(target=new_func)
thread.daemon = True
try:
thread.start()
thread.join(seconds_before_timeout)
except Exception as ex:
print("error starting thread")
raise ex
ret = res[0]
if isinstance(ret, BaseException):
raise ret
return ret
return wrapper
return deco
And timeout function i used for:
#timeout(2)
def listen_for_a_new_campaign(self):
"""
Start listening for new campaign in list_campaign queue
"""
while True:
try:
for method_frame, properties, body \
in self.obj_requester_channel.consume(LIST_CAMPAIGN_QUEUE):
body_dict = literal_eval(body.decode("utf-8"))
message_number = body_dict["Msg_Count"]
n_message = min(message_number, BATCH_SIZE)
identify(n_message)
a_request = {
"campaign_request": body_dict,
"campaign_ack" : method_frame.delivery_tag,
"n_message" : n_message
}
identify(a_request)
return a_request
# Acknowledge the message
n_requeued_messages = self.obj_requester_channel.cancel()
print("Requeued %i messages" % n_requeued_messages)
break
except pika.exceptions.ConnectionWrongStateError:
print("Create connection ...")
self.create_connection()
continue
except pika.exceptions.ChannelWrongStateError:
print("Create connection ...")
self.create_connection()
self.obj_requester_channel = self.obj_connection.channel()
self.obj_requester_channel.queue_declare(queue=LIST_CAMPAIGN_QUEUE)
self.obj_campaign_channel = self.obj_connection.channel()
continue
When I run my program, I checked all process by htop and below is result, all thread is alive:
I don't know what's wrong with that.
I run this code on my laptop everything was OK, but when I deploy them to EC2 instance I found that problems.
Help me!!

Async call and not wait while using pubnub with django

This is a module from PubNub that I'm using to publish a message to a topic from an API. By design I've kept the PubNub object singleton.
class Pubnub:
instance = None
#classmethod
def get(cls):
if cls.instance is None:
cls.instance = cls()
return cls.instance
def __init__(self):
with open('config/config.yaml', 'r') as stream:
try:
conf = yaml.load(stream)
pnconfig = PNConfiguration()
pnconfig.subscribe_key = conf['pubnub']['publish_key']
pnconfig.publish_key = conf['pubnub']['subscribe_key']
pnconfig.ssl = False
self.pubnub = PubNub(pnconfig)
except yaml.YAMLError as e:
logger.error(str(e))
def publish(self, channel):
try:
envelope = self.pubnub.publish().channel(channel).message({
'message': True
}).sync()
print("publish timetoken: %d" % envelope.result.timetoken)
except PubNubException as e:
logger.error(str(e))
This is how I'm calling it,
class SendCommunityTextMessage(views.APIView):
def post(self, request, **kwargs):
try:
client_id = request.GET['client_id']
client_secret = request.GET['client_secret']
if Authenticator.authenticate_client(client_id, client_secret):
try:
//do something
try:
//do something more
pubbub = Pubnub.get()
pubbub.publish(receiver.hex_code)
return Response({"Success": CommunityTextMessageSerializer(message).data},
status=status.HTTP_200_OK)
except KeyError as e:
return Response({"Failure": str(e)}, status=status.HTTP_400_BAD_REQUEST)
except (User.DoesNotExist, CommunityRoom.DoesNotExist) as e:
return Response({"Failure": str(e)}, status=status.HTTP_404_NOT_FOUND)
else:
return Response({"Failure": "Invalid client"}, status=status.HTTP_403_FORBIDDEN)
except KeyError as _:
return Response({"Failure": "Probably a typo, read the docs to use this API."},
status=status.HTTP_400_BAD_REQUEST)
The issue is this slows down the API by minutes. How can I call the two lines,
pubbub = Pubnub.get()
pubbub.publish(receiver.hex_code)
asynchronously and return out of the view without waiting for the call to finish.
Thanks in anticipation.

retry decorator implementation, retries not defined

I have implemented the following retry decorator.
def retry(delay=10, retries=4):
def retry_decorator(f):
#wraps(f)
def f_retry(*args, **kwargs):
while retries > 1:
try:
return f(*args, **kwargs)
except Exception as e:
msg = "Exception: {}, Retrying in {} seconds...'.format(e, delay)"
print(msg)
time.sleep(delay)
retries -= 1
return f(*args, **kwargs)
return f_retry
return retry_decorator
I get the error that retries is not defined. However, retries is mentioned in the function definition. I am unable to figure out what went wrong here. Any help will be appreciated.
I made it work by collecting the variables retry and delay in a dictionary and then using that inside the function.
def retry(delay=10, retries=4):
def retry_decorator(f):
#wraps(f)
def f_retry(*args, **kwargs):
opt_dict = {'retries': retries, 'delay': delay}
while opt_dict['retries'] > 1:
try:
return f(*args, **kwargs)
except Exception as e:
msg = "Exception: {}, Retrying in {} seconds...".format(e, delay)
print(msg)
time.sleep(opt_dict['delay'])
opt_dict['retries'] -= 1
return f(*args, **kwargs)
return f_retry
return retry_decorator

`DummyExecutor` for Python's `futures`

Python's futures package allows us to enjoy ThreadPoolExecutor and ProcessPoolExecutor for doing tasks in parallel.
However, for debugging it is sometimes useful to temporarily replace the true parallelism with a dummy one, which carries out the tasks in a serial way in the main thread, without spawning any threads or processes.
Is there anywhere an implementation of a DummyExecutor?
Something like this should do it:
from concurrent.futures import Future, Executor
from threading import Lock
class DummyExecutor(Executor):
def __init__(self):
self._shutdown = False
self._shutdownLock = Lock()
def submit(self, fn, *args, **kwargs):
with self._shutdownLock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = Future()
try:
result = fn(*args, **kwargs)
except BaseException as e:
f.set_exception(e)
else:
f.set_result(result)
return f
def shutdown(self, wait=True):
with self._shutdownLock:
self._shutdown = True
if __name__ == '__main__':
def fnc(err):
if err:
raise Exception("test")
else:
return "ok"
ex = DummyExecutor()
print(ex.submit(fnc, True))
print(ex.submit(fnc, False))
ex.shutdown()
ex.submit(fnc, True) # raises exception
locking is probably not needed in this case, but can't hurt to have it.
Use this to mock your ThreadPoolExecutor
class MockThreadPoolExecutor():
def __init__(self, **kwargs):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
pass
def submit(self, fn, *args, **kwargs):
# execute functions in series without creating threads
# for easier unit testing
result = fn(*args, **kwargs)
return result
def shutdown(self, wait=True):
pass
if __name__ == "__main__":
def sum(a, b):
return a + b
with MockThreadPoolExecutor(max_workers=3) as executor:
future_result = list()
for i in range(5):
future_result.append(executor.submit(sum, i + 1, i + 2))

Categories