Retry in python methods - python

This is the standard language-neutral approach
import logging
logger = logging.getLogger(__file__)
class SomeClass(object):
max_retry = 2
def get(self, key):
try:
return self.__get_value(key)
except Exception:
logger.error("Exception occured even after retrying...")
raise
def __get_value(self, key, retry_num=0):
try:
return self.connection().get(key)
except Exception:
logger.error("Exception occured")
if retry_num < self.max_retry:
retry_num += 1
logger.warning("Retrying!!! Retry count - %s", retry_num)
self.__get_value(key, retry_num)
else:
raise
Is there any better pythonic way to do this?

Cleaner approach would be not to change state of the class since it's retry just for the function call (current implementation wouldn't work as expected when the method is called multiple times). I'd prefer a retry decorator (as loop with break when succeed) used as:
...
#retry(n_times=2, catch=Exception)
def get (self, key):
...

Related

Catch any excpetion to avoid memory leak - Python bad/good pratices

I want to make sure a method is called if any exception is raised. In this situation, is it ok (good/bad practices or may lead to unexpected consequences) to try/except any Exception? Here's an example of what's on my mind using a decorator:
# implementation
import sys
import traceback
class AmazingClass:
def __init__(self, arg=None):
self.__att = arg
#property
def att(self, ):
return self.__att
def make_sure_it_quits(method):
def inner(self, *args, **kwargs):
try:
return method(self, *args, **kwargs)
except Exception as err:
print(err, "- This was caught because it couldn't be foreseen.")
traceback.print_exc()
print("\nQuitting what is suppose to be quited...")
self.quit()
return inner
#make_sure_it_quits
def this_may_raise_errors(self, arg):
try:
self.__att += arg
except TypeError as err:
print("This I can handle! Cleaning and exiting...")
self.quit()
# sys.exit(1) # exit, if it's the case
def quit(self, ):
self.__arg = None
print("Everything is very clean now!")
# examples
def no_errors():
obj = AmazingClass("test")
obj.this_may_raise_errors("_01")
print(obj.att)
def with_error_01():
obj = AmazingClass("test")
obj.this_may_raise_erros(1)
print(obj.att)
def with_error_02():
obj = AmazingClass("test")
obj.this_may_raise_errors()
print(obj.att)
# main
if __name__ == '__main__':
no_errors()
with_error_01()
with_error_02()
In this case, with_error_01 represents situations I know in advance that can happen, while with_error_02 is an unexpected use of the class.
In both cases, the use of traceback shows what and where went wrong. Also, the method quit must always be called in case of any error.

How repeat http request after exception? [duplicate]

I have multiple functions in my script which does a REST API api requests.As i need to handle the error scenarios i have put a retry mechanism as below.
no_of_retries = 3
def check_status():
for i in range(0,no_of_retries):
url = "http://something/something"
try:
result = requests.get(url, auth=HTTPBasicAuth(COMMON_USERNAME, COMMON_PASSWORD)).json()
if 'error' not in result:
return result
else:
continue
except Exception as e:
continue
return None
I have several different methods which does similar operation. How can we do it better way to avoid duplication may be using decorators.
You can use a decorator like this and handle your own exception.
def retry(times, exceptions):
"""
Retry Decorator
Retries the wrapped function/method `times` times if the exceptions listed
in ``exceptions`` are thrown
:param times: The number of times to repeat the wrapped function/method
:type times: Int
:param Exceptions: Lists of exceptions that trigger a retry attempt
:type Exceptions: Tuple of Exceptions
"""
def decorator(func):
def newfn(*args, **kwargs):
attempt = 0
while attempt < times:
try:
return func(*args, **kwargs)
except exceptions:
print(
'Exception thrown when attempting to run %s, attempt '
'%d of %d' % (func, attempt, times)
)
attempt += 1
return func(*args, **kwargs)
return newfn
return decorator
#retry(times=3, exceptions=(ValueError, TypeError))
def foo1():
print('Some code here ....')
print('Oh no, we have exception')
raise ValueError('Some error')
foo1()
if you do not mind installing a library you could use the tenacity (github.com/jd/tenacity) module. one of their examples:
import random
from tenacity import retry, stop_after_attempt
# #retry # retry forever
#retry(stop=stop_after_attempt(3))
def do_something_unreliable():
if random.randint(0, 10) > 1:
raise IOError("Broken sauce, everything is hosed!!!111one")
else:
return "Awesome sauce!"
print(do_something_unreliable())
this also allows you to specify the number of tries or seconds you want to keep retrying.
for your case this might look something like this (not tested!):
#retry(stop=stop_after_attempt(3))
def retry_get():
result = requests.get(
url, auth=HTTPBasicAuth(COMMON_USERNAME, COMMON_PASSWORD)).json()
if 'error' not in result:
raise RequestException(result)
The third-party retry module is now widely accepted for this. You can also pass the list of exceptions to retry for, number of retries, delays, maximum delay, exponential back-off, etc.
$ pip install retry
Example usage:
from retry import retry
#retry(ZeroDivisionError, tries=3, delay=2)
def make_trouble():
'''Retry on ZeroDivisionError, raise error after 3 attempts, sleep 2 seconds between attempts.'''
Production level example
import logging
import time
import functools
import traceback
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(pathname)s - %(funcName)s - %(lineno)d -msg: %(message)s"
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
def retry(retry_num, retry_sleep_sec):
"""
retry help decorator.
:param retry_num: the retry num; retry sleep sec
:return: decorator
"""
def decorator(func):
"""decorator"""
# preserve information about the original function, or the func name will be "wrapper" not "func"
#functools.wraps(func)
def wrapper(*args, **kwargs):
"""wrapper"""
for attempt in range(retry_num):
try:
return func(*args, **kwargs) # should return the raw function's return value
except Exception as err: # pylint: disable=broad-except
logging.error(err)
logging.error(traceback.format_exc())
time.sleep(retry_sleep_sec)
logging.error("Trying attempt %s of %s.", attempt + 1, retry_num)
logging.error("func %s retry failed", func)
raise Exception('Exceed max retry num: {} failed'.format(retry_num))
return wrapper
return decorator
usage
# this means try your function 5 times, each time sleep 60 seconds
#retry(5, 60)
def your_func():
pass
Formal reference: https://peps.python.org/pep-0318/
I would recommend using the retry library like #MohitC mentioned. However, if you're restricted to import 3rd party libraries like I was, you're welcome to try my version:
import time
def retry(tries= -1, delay=0, max_delay=None, backoff=1, exceptions=Exception, log=False):
"""Retry Decorator with arguments
Args:
tries (int): The maximum number of attempts. Defaults to -1 (infinite)
delay (int, optional): Delay between attempts (seconds). Defaults to 0
max_delay (int, optional): The maximum value of delay (seconds). Defaults to None (Unlimited)
backoff (int, optional): Multiplier applied to delay between attempts (seconds). Defaults to 1 (No backoff)
exceptions (tuple, optional): Types of exceptions to catch. Defaults to Exception (all)
log (bool, optional): Print debug logs. Defaults to False
"""
def retry_decorator(func):
def retry_wrapper(*args, **kwargs):
nonlocal tries, delay, max_delay, backoff, exceptions, log
while tries:
try:
return func(*args, **kwargs)
except exceptions:
tries -= 1
# Reached to maximum tries
if not tries:
raise
# Log the retry logs for the given function
if log:
print(f"Retrying {func.__name__} in {delay} seconds")
# Apply delay between requests
time.sleep(delay)
# Adjust the next delay according to backoff
delay *= backoff
# Adjust maximum delay duration
if max_delay is not None:
delay = min(delay, max_delay)
return retry_wrapper
return retry_decorator
Example Uses:
Simple:
#retry(10, delay=5)
def do_something(params):
# Example func to retry
pass
Advanced:
#retry(10, delay=1, backoff=2, max_delay=10, exceptions=(TimeoutError), log=True)
def do_something(params):
# Example func to retry only for TimeoutErrors
pass
Instead of using decorators, the probably better solution is to move the request to its own function, arriving at a structure similar to this:
no_of_retries = 3
def make_request(url):
for i in range(0,no_of_retries):
try:
result = requests.get(url, auth=HTTPBasicAuth(COMMON_USERNAME, COMMON_PASSWORD)).json()
if 'error' not in result:
return result
else:
continue
except Exception as e:
continue
return result
def check_status():
result = make_request("http://something/status")
def load_file():
result = make_request("http://something/file")
This way, you avoid duplicate code while encapsulating the request. If you were to use a decorator, you would need to wrap the whole load_file() method which would prevent you from further processing the request's result within this function.
I have created a custom retry function. The function will retry if the first number is less than the second.
CODE:
import time
# CUSTOM EXCEPTION
class custom_error(Exception):
pass
# RETRY FUNCTION.
def retry(func, retries=3):
print(func)
def retry_wrapper(*args, **kwargs):
print(args)
n = args[0]
u = args[1]
print(n, u)
attempts = 0
while attempts < retries:
try:
if n > u:
return func(*args, **kwargs)
else:
raise custom_error
except custom_error:
print("error")
time.sleep(2)
attempts += 1
return retry_wrapper
#retry
def akash(a, b):
c = a / b
return c
# CALLING THE FUNCTION
a = akash(1, 2)
print(a)
OUTPUT:
<function akash at 0x00000187C3A66B00>
(1, 2)
1 2
error
error
error
Using functools on top of mrkiril's answer:
from functools import wraps, partial
def retry(f=None, times=10):
if f is None:
return partial(retry, times=times)
#wraps(f)
def wrap(*args, **kwargs):
attempt = 0
while attempt < times:
try:
return f(*args, **kwargs)
except:
print(f"{f.__name__}, attempt {attempt} of {times}")
attempt += 1
return f(*args, **kwargs)
return wrap
Then, wrap your function like the following:
import random
#retry
def foo():
if random.randint(0, 5) != 0:
raise Exception

Timeout a function inside a class with a decorator

I'm trying to put a timeout on a function send.
I have found some elements in these posts :
https://stackoverflow.com/a/494273/3824723
https://stackoverflow.com/a/2282656/3824723
https://stackoverflow.com/a/11731208/3824723
The first one seems to apply to every function and not to a precise one, this is why I chose a decorator implementation like the second one.
I tried to mix it up and I have this :
from functools import wraps
import os
import signal
class TimeoutError(Exception):
pass
def timeout_func(error_message="Timeout in send pipe!"):
def decorator(func):
def _handle_timeout(signum, frame):
if args[0].action=="warn":
print "WARNING : ",error_message
elif args[0].action=="kill":
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
print args
signal.signal(signal.SIGALRM, _handle_timeout,args[0].action)
signal.alarm(args[0].seconds)
print str(args)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
return decorator
class Link(object):
def __init__(self,timeout=1,action="warn"):
self.timeout=timeout
self.action=action
#timeout_func
def send(self,value):
print "working : ", value
It gives me this :
In [6]: l=Link()
In [7]: l.send(1)
--------------------------------------------------------------------------- TypeError Traceback (most recent call
last) in ()
----> 1 l.send(1)
TypeError: decorator() takes exactly 1 argument (2 given)
My issue is that I would like to pass the timeout value second to the decorator through the Link's self. I don't fully understand the whole decorator mechanism here, and can't figure out what is wrong.
Can someone explain me how this decorator works, and what should I modify to fix it? Or if you think of a simpler/more explicit solution to implement it ?
So I've been debugging my issue, and I found a working solution:
from functools import wraps
import signal
class TimeoutError(Exception):
pass
def timeout_func(f):
def _handle_timeout(signum, frame):
raise TimeoutError("timeout error")
def wrapper(*args):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.setitimer(signal.ITIMER_REAL,args[0].timeout) #args[0] is self of Link class here
try:
result = f(*args,**kwargs)
finally:
signal.alarm(0)
return result
return wrapper
class Link(object):
def __init__(self,timeout=0.1,action="warn"):
self.timeout=timeout
self.action=action
def send(self,value): # I use this func to handle the exceptions
try:
self.send_timeout(value) # call the timeout function
except TimeoutError as e: # handle Timeout exception
if self.action=="warn":
print "WARNING : Timeout error in pipe send!"
elif self.action=="kill":
print "Killing Link : ", e
raise
except (Exception,KeyboardInterrupt) as e:
print "Exception in link : ", e
raise
#timeout_func # apply timeout decorator to this function
def send_timeout(self,value):
# DO STUFF HERE
To call it :
l=Link()
l.send("any value")
I use signal.setitimer(signal.ITIMER_REAL,args[0].timeout) because it allows setting a timeout < 1 second, wich was not the case with signal.signal(), wich only accept integer as timer.

python stop exception passing

I have a custom InvalidError, and I want my function handles two kinds of errors: one is InvalidError, the other are all other errors. I tried in this way:
try:
a = someFunc()
if a:
# do things
else:
raise InvalidError('Invalid Error!')
except InvalidError as e:
return "Invalid"
except Exception as ex:
return "Other"
But seems I will get Other either way. How can I achieve my functionality in right way?
Can you tell us how you created you InvalidError class? It is working.
class InvalidError(Exception):
pass
>>> try:
... raise InvalidError("dsfsdf")
... except InvalidError as my_exception:
... print "yes"
... except Exception as e:
... print "No"
...
yes
One way of doing this would be to create a context manager class. Within the contect manager you can ignore whatever error you like,
E.g.
class ctx_mgr:
def __enter__(self):
cm = object()
return cm
def __exit__(self, exc_type, exc_value, exc_tb):
return (exc_type == InvalidError)
with ctx_mgr:
a = someFunc()

Short form to return method result if condition passed

I'm wondering if there's any pythonic or short-form method to achieve the following:
error_response = self.check_conditions(request)
# If we have an error response, return it, otherwise continue as normal.
if error_response:
return error_response
Something like:
(return self.check_conditions(request)) or pass
Alternatively, is it possible for a function to return the calling method, such as:
self.check_conditions(request)
def check_conditions(self, request):
error_response = do_stuff()
if error_response:
return_parent error_response
I get the feeling the second concept is breaking a ton of programming laws to prevent chaos and the apocalypse, just a thought though :)
No, there is no short form for a conditional return.
But, to get to the second part of your question:
There are exceptions in Python. You can write something like this:
class MyErrorResponse(Exception): pass
class MyClass:
...
def check_conditions(self, request):
error_response = do_stuff()
if error_response:
raise MyErrorResponse(error_response)
def do_the_main_stuff():
try:
self.check_conditions()
...
except MyErrorResponse as e:
return e.args[0]
That depends a lot on what check_conditions does under the hood. It's likely that you can move error handling down a level of abstraction and handle things directly:
Compare:
error = False
def foo(request):
global error
try:
result = do_something_with(request)
except SomeWellDefinedError:
error = True
def check_conditions(request):
foo(request)
return error
def main():
error_response = check_conditions(some_request)
if error_response:
# freak out!
With
def foo(request):
try:
result = do_something_with(request)
except SomeWellDefinedError:
# you can try to handle the error here, or...
raise # uh oh!
def main():
try:
foo(some_request)
except SomeWellDefinedError:
# handle the error here, instead!

Categories