retVal = None
retries = 5
success = False
while retries > 0 and success == False:
try:
retVal = graph.put_event(**args)
success = True
except:
retries = retries-1
logging.info('Facebook put_event timed out. Retrying.')
return success, retVal
In the code above, how can I wrap this whole thing up as a function and make it so that any command (in this example, 'graph.put_event(**args)') can be passed in as a parameter to be executed within the function?
To directly answer your question:
def foo(func, *args, **kwargs):
retVal = None
retries = 5
success = False
while retries > 0 and success == False:
try:
retVal = func(*args, **kwargs)
success = True
except:
retries = retries-1
logging.info('Facebook put_event timed out. Retrying.')
return success, retVal
This can then be called as such:
s, r = foo(graph.put_event, arg1, arg2, kwarg1="hello", kwarg2="world")
As an aside, given the above task, I would write it along the lines of:
class CustomException(Exception): pass
# Note: untested code...
def foo(func, *args, **kwargs):
retries = 5
while retries > 0:
try:
return func(*args, **kwargs)
except:
retries -= 1
# maybe sleep a short while
raise CustomException
# to be used as such
try:
rv = foo(graph.put_event, arg1, arg2, kwarg1="hello", kwarg2="world")
except CustomException:
# handle failure
def do_event(evt, *args, **kwargs):
...
retVal = evt(*args, **kwargs)
...
Related
I have multiple functions in my script which does a REST API api requests.As i need to handle the error scenarios i have put a retry mechanism as below.
no_of_retries = 3
def check_status():
for i in range(0,no_of_retries):
url = "http://something/something"
try:
result = requests.get(url, auth=HTTPBasicAuth(COMMON_USERNAME, COMMON_PASSWORD)).json()
if 'error' not in result:
return result
else:
continue
except Exception as e:
continue
return None
I have several different methods which does similar operation. How can we do it better way to avoid duplication may be using decorators.
You can use a decorator like this and handle your own exception.
def retry(times, exceptions):
"""
Retry Decorator
Retries the wrapped function/method `times` times if the exceptions listed
in ``exceptions`` are thrown
:param times: The number of times to repeat the wrapped function/method
:type times: Int
:param Exceptions: Lists of exceptions that trigger a retry attempt
:type Exceptions: Tuple of Exceptions
"""
def decorator(func):
def newfn(*args, **kwargs):
attempt = 0
while attempt < times:
try:
return func(*args, **kwargs)
except exceptions:
print(
'Exception thrown when attempting to run %s, attempt '
'%d of %d' % (func, attempt, times)
)
attempt += 1
return func(*args, **kwargs)
return newfn
return decorator
#retry(times=3, exceptions=(ValueError, TypeError))
def foo1():
print('Some code here ....')
print('Oh no, we have exception')
raise ValueError('Some error')
foo1()
if you do not mind installing a library you could use the tenacity (github.com/jd/tenacity) module. one of their examples:
import random
from tenacity import retry, stop_after_attempt
# #retry # retry forever
#retry(stop=stop_after_attempt(3))
def do_something_unreliable():
if random.randint(0, 10) > 1:
raise IOError("Broken sauce, everything is hosed!!!111one")
else:
return "Awesome sauce!"
print(do_something_unreliable())
this also allows you to specify the number of tries or seconds you want to keep retrying.
for your case this might look something like this (not tested!):
#retry(stop=stop_after_attempt(3))
def retry_get():
result = requests.get(
url, auth=HTTPBasicAuth(COMMON_USERNAME, COMMON_PASSWORD)).json()
if 'error' not in result:
raise RequestException(result)
The third-party retry module is now widely accepted for this. You can also pass the list of exceptions to retry for, number of retries, delays, maximum delay, exponential back-off, etc.
$ pip install retry
Example usage:
from retry import retry
#retry(ZeroDivisionError, tries=3, delay=2)
def make_trouble():
'''Retry on ZeroDivisionError, raise error after 3 attempts, sleep 2 seconds between attempts.'''
Production level example
import logging
import time
import functools
import traceback
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(pathname)s - %(funcName)s - %(lineno)d -msg: %(message)s"
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
def retry(retry_num, retry_sleep_sec):
"""
retry help decorator.
:param retry_num: the retry num; retry sleep sec
:return: decorator
"""
def decorator(func):
"""decorator"""
# preserve information about the original function, or the func name will be "wrapper" not "func"
#functools.wraps(func)
def wrapper(*args, **kwargs):
"""wrapper"""
for attempt in range(retry_num):
try:
return func(*args, **kwargs) # should return the raw function's return value
except Exception as err: # pylint: disable=broad-except
logging.error(err)
logging.error(traceback.format_exc())
time.sleep(retry_sleep_sec)
logging.error("Trying attempt %s of %s.", attempt + 1, retry_num)
logging.error("func %s retry failed", func)
raise Exception('Exceed max retry num: {} failed'.format(retry_num))
return wrapper
return decorator
usage
# this means try your function 5 times, each time sleep 60 seconds
#retry(5, 60)
def your_func():
pass
Formal reference: https://peps.python.org/pep-0318/
I would recommend using the retry library like #MohitC mentioned. However, if you're restricted to import 3rd party libraries like I was, you're welcome to try my version:
import time
def retry(tries= -1, delay=0, max_delay=None, backoff=1, exceptions=Exception, log=False):
"""Retry Decorator with arguments
Args:
tries (int): The maximum number of attempts. Defaults to -1 (infinite)
delay (int, optional): Delay between attempts (seconds). Defaults to 0
max_delay (int, optional): The maximum value of delay (seconds). Defaults to None (Unlimited)
backoff (int, optional): Multiplier applied to delay between attempts (seconds). Defaults to 1 (No backoff)
exceptions (tuple, optional): Types of exceptions to catch. Defaults to Exception (all)
log (bool, optional): Print debug logs. Defaults to False
"""
def retry_decorator(func):
def retry_wrapper(*args, **kwargs):
nonlocal tries, delay, max_delay, backoff, exceptions, log
while tries:
try:
return func(*args, **kwargs)
except exceptions:
tries -= 1
# Reached to maximum tries
if not tries:
raise
# Log the retry logs for the given function
if log:
print(f"Retrying {func.__name__} in {delay} seconds")
# Apply delay between requests
time.sleep(delay)
# Adjust the next delay according to backoff
delay *= backoff
# Adjust maximum delay duration
if max_delay is not None:
delay = min(delay, max_delay)
return retry_wrapper
return retry_decorator
Example Uses:
Simple:
#retry(10, delay=5)
def do_something(params):
# Example func to retry
pass
Advanced:
#retry(10, delay=1, backoff=2, max_delay=10, exceptions=(TimeoutError), log=True)
def do_something(params):
# Example func to retry only for TimeoutErrors
pass
Instead of using decorators, the probably better solution is to move the request to its own function, arriving at a structure similar to this:
no_of_retries = 3
def make_request(url):
for i in range(0,no_of_retries):
try:
result = requests.get(url, auth=HTTPBasicAuth(COMMON_USERNAME, COMMON_PASSWORD)).json()
if 'error' not in result:
return result
else:
continue
except Exception as e:
continue
return result
def check_status():
result = make_request("http://something/status")
def load_file():
result = make_request("http://something/file")
This way, you avoid duplicate code while encapsulating the request. If you were to use a decorator, you would need to wrap the whole load_file() method which would prevent you from further processing the request's result within this function.
I have created a custom retry function. The function will retry if the first number is less than the second.
CODE:
import time
# CUSTOM EXCEPTION
class custom_error(Exception):
pass
# RETRY FUNCTION.
def retry(func, retries=3):
print(func)
def retry_wrapper(*args, **kwargs):
print(args)
n = args[0]
u = args[1]
print(n, u)
attempts = 0
while attempts < retries:
try:
if n > u:
return func(*args, **kwargs)
else:
raise custom_error
except custom_error:
print("error")
time.sleep(2)
attempts += 1
return retry_wrapper
#retry
def akash(a, b):
c = a / b
return c
# CALLING THE FUNCTION
a = akash(1, 2)
print(a)
OUTPUT:
<function akash at 0x00000187C3A66B00>
(1, 2)
1 2
error
error
error
Using functools on top of mrkiril's answer:
from functools import wraps, partial
def retry(f=None, times=10):
if f is None:
return partial(retry, times=times)
#wraps(f)
def wrap(*args, **kwargs):
attempt = 0
while attempt < times:
try:
return f(*args, **kwargs)
except:
print(f"{f.__name__}, attempt {attempt} of {times}")
attempt += 1
return f(*args, **kwargs)
return wrap
Then, wrap your function like the following:
import random
#retry
def foo():
if random.randint(0, 5) != 0:
raise Exception
I have written a generator as follows:
def my_generator():
i = 0
while i < 1000000:
i += 1
yield i
Assuming the generator cannot be executed in a second and in the test function I use a timeout decorator to guarantee the function should not run more than 1 second.
#timeout(1)
def test():
for i in my_generator:
print(i)
Unfortunately, the timeout don't work as I wanted, the function print all the number from 1 to 1000000 with more than 1 second.
In the decorator, I have tried gevent and KThread, but none of them can work.
Decorator using KThread:
class KThread(threading.Thread):
"""Subclass of threading.Thread, with a kill() method."""
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.killed = False
def start(self):
"""Start the thread."""
self.__run_backup = self.run
"""Force the Thread to install our trace."""
self.run = self.__run
threading.Thread.start(self)
def __run(self):
"""Hacked run function, which installs the trace."""
sys.settrace(self.globaltrace)
self.__run_backup()
self.run = self.__run_backup
def globaltrace(self, frame, why, arg):
if why == 'call':
return self.localtrace
else:
return None
def localtrace(self, frame, why, arg):
if self.killed:
if why == 'line':
raise SystemExit()
return self.localtrace
def kill(self):
self.killed = True
def timeout(seconds):
def timeout_decorator(func):
def _new_func(oldfunc, result, oldfunc_args, oldfunc_kwargs):
result.append(oldfunc(*oldfunc_args, **oldfunc_kwargs))
def _(*args, **kwargs):
result = []
'''create new args for _new_funcbecause
we want to get the func return val to result list
'''
new_kwargs = {
'oldfunc': func,
'result': result,
'oldfunc_args': args,
'oldfunc_kwargs': kwargs
}
thd = KThread(target=_new_func, args=(), kwargs=new_kwargs)
thd.start()
thd.join(seconds)
alive = thd.isAlive()
'''kill the child thread'''
thd.kill()
if alive:
alert_exce = u'function timeout for [%d s].' % seconds
raise Timeout(alert_exce)
else:
return result[0]
_.__name__ = func.__name__
_.__doc__ = func.__doc__
return _
return timeout_decorator
Decorator using gevent:
def g_timer(timeout_seconds=None, timeout_exception=TimeoutError, exception_message=None, module_name=None):
import gevent
from gevent import monkey
monkey.patch_all()
def decorate(func):
def wrapper(*args, **kwargs):
try:
t0 = time.time()
gevent.with_timeout(timeout_seconds, func, *args, **kwargs)
elapsed = time.time() - t0
except gevent.timeout.Timeout as e:
print("exception")
return wrapper
return decorate
I am using the following approach to pass in an optional argument to a decorator:
def wait(func=None, delay=1.0):
def decorator_wait(func):
def wrapper_wait(*args, **kwargs):
time.sleep(delay)
return func(*args, **kwargs)
return wrapper_wait
return decorator_wait(func) if func is not None else decorator_wait
#wait
def print_something(something):
print (something)
#wait(delay=0.2)
def print_something_else(something):
print (something)
The above code looks pretty difficult to follow though with all the nesting. Is there another approach to do the above, or is this the only method available for something like this?
You can avoid having to remember "do I need to call this or not?" by removing the func argument from the wait function, and remembering to always call your decorator-returner.
It would look like this:
def wait(delay=1.0):
def decorator_wait(func):
def wrapper_wait(*args, **kwargs):
time.sleep(delay)
return func(*args, **kwargs)
return wrapper_wait
return decorator_wait
#wait()
def print_something(something):
print (something)
#wait(delay=0.2)
def print_something_else(something):
print (something)
print_something("hello")
# 1 second delay, then 'hello'
print_something_else("there")
# 0.2 second delay, then 'there'
You just have to remember that wait will always return the decorator, so you have to use () when decorating your functions.
I think it is a little bit better:
import functools
import time
def wait(func=None, delay=1.0):
if func is None:
return lambda func: wait(func=func, delay=delay)
#functools.wraps(func) # this is good practice to use it see: https://stackoverflow.com/questions/308999/what-does-functools-wraps-do
def _wrapper(*args, **kwargs):
time.sleep(delay)
return func(*args, **kwargs)
return _wrapper
#wait
def test():
return
#wait(delay=3)
def test2():
return
You can write classes having a __call__ method, instead of writing a bunch of nested defs.
It sounds like you want a decorator Wait which haults
program execution for a few seconds.
If you don't pass in a Wait-time
then the default value is 1 seconds.
Use-cases are shown below.
##################################################
#Wait
def print_something(something):
print(something)
##################################################
#Wait(3)
def print_something_else(something_else):
print(something_else)
##################################################
#Wait(delay=3)
def print_something_else(something_else):
print(something_else)
When Wait has an argument, such as #Wait(3), then the call Wait(3)
is executed before anything else happens.
That is, the following two pieces of code are equivalent
#Wait(3)
def print_something_else(something_else):
print(something_else)
###############################################
return_value = Wait(3)
#return_value
def print_something_else(something_else):
print(something_else)
This is a problem.
if `Wait` has no arguments:
`Wait` is the decorator.
else: # `Wait` receives arguments
`Wait` is not the decorator itself.
Instead, `Wait` ***returns*** the decorator
One solution is shown below:
Let us begin by creating the following class, DelayedDecorator:
import io
class DelayedDecorator:
def __init__(i, cls, *args, **kwargs):
print("Delayed Decorator __init__", cls, args, kwargs)
i._cls = cls
i._args = args
i._kwargs = kwargs
def __call__(i, func):
print("Delayed Decorator __call__", func)
if not (callable(func)):
import io
with io.StringIO() as ss:
print(
"If only one input, input must be callable",
"Instead, received:",
repr(func),
sep="\n",
file=ss
)
msg = ss.getvalue()
raise TypeError(msg)
return i._cls(func, *i._args, **i._kwargs)
Now we can write things like:
dec = DelayedDecorator(Wait, delay=4)
#dec
def delayed_print(something):
print(something)
Note that:
dec does not not accept multiple arguments.
dec only accepts the function to be wrapped.
import inspect
class PolyArgDecoratorMeta(type):
def __call__(Wait, *args, **kwargs):
try:
arg_count = len(args)
if (arg_count == 1):
if callable(args[0]):
SuperClass = inspect.getmro(PolyArgDecoratorMeta)[1]
r = SuperClass.__call__(Wait, args[0])
else:
r = DelayedDecorator(Wait, *args, **kwargs)
else:
r = DelayedDecorator(Wait, *args, **kwargs)
finally:
pass
return r
import time
class Wait(metaclass=PolyArgDecoratorMeta):
def __init__(i, func, delay = 2):
i._func = func
i._delay = delay
def __call__(i, *args, **kwargs):
time.sleep(i._delay)
r = i._func(*args, **kwargs)
return r
The following two pieces of code are equivalent:
#Wait
def print_something(something):
print (something)
##################################################
def print_something(something):
print(something)
print_something = Wait(print_something)
We can print "something" to the console very slowly, as follows:
print_something("something")
#################################################
#Wait(delay=1)
def print_something_else(something_else):
print(something_else)
##################################################
def print_something_else(something_else):
print(something_else)
dd = DelayedDecorator(Wait, delay=1)
print_something_else = dd(print_something_else)
##################################################
print_something_else("something")
Final Notes
It may look like a lot of code, but you don't have to write the classes DelayedDecorator and PolyArgDecoratorMeta every-time. The only code you have to personally write something like as follows, which is fairly short:
from PolyArgDecoratorMeta import PolyArgDecoratorMeta
import time
class Wait(metaclass=PolyArgDecoratorMeta):
def __init__(i, func, delay = 2):
i._func = func
i._delay = delay
def __call__(i, *args, **kwargs):
time.sleep(i._delay)
r = i._func(*args, **kwargs)
return r
I have implemented the following retry decorator.
def retry(delay=10, retries=4):
def retry_decorator(f):
#wraps(f)
def f_retry(*args, **kwargs):
while retries > 1:
try:
return f(*args, **kwargs)
except Exception as e:
msg = "Exception: {}, Retrying in {} seconds...'.format(e, delay)"
print(msg)
time.sleep(delay)
retries -= 1
return f(*args, **kwargs)
return f_retry
return retry_decorator
I get the error that retries is not defined. However, retries is mentioned in the function definition. I am unable to figure out what went wrong here. Any help will be appreciated.
I made it work by collecting the variables retry and delay in a dictionary and then using that inside the function.
def retry(delay=10, retries=4):
def retry_decorator(f):
#wraps(f)
def f_retry(*args, **kwargs):
opt_dict = {'retries': retries, 'delay': delay}
while opt_dict['retries'] > 1:
try:
return f(*args, **kwargs)
except Exception as e:
msg = "Exception: {}, Retrying in {} seconds...".format(e, delay)
print(msg)
time.sleep(opt_dict['delay'])
opt_dict['retries'] -= 1
return f(*args, **kwargs)
return f_retry
return retry_decorator
Summary
What I am after is a way of determining if the generator is "currently running".
Details
My definition of "currently running" is:
The generator code has begun execution (i.e., next or .send(None) or some other iteration operation such as a for loop, has previously been executed on the generator, AND
the generator was not previously exhausted/closed (again note that for the above generator, .throw(some_error) is caught and will not exhaust the generator since some new loop is started; without a loop or yield from a handled error will usually exhaust the generator).
I understand that there is not a way (or at least, a trivially easy way) to "peek" ahead in a generator and find out if it will raise StopIteration at the next iteration. This is antithetical to the nature of the generator and is NOT what I am after.
Example
Say I have some generator, g created by some generator function, f:
def f():
'''Lots of 1s.'''
print('I am now running.')
while True:
try:
signal = yield 1
if signal:
break
except GeneratorExit:
raise # important so that closing the generator doesn't cause a RuntimeError
except Exception:
print('caught exception!')
We can "initialize" the generator this way:
>>> g = f()
>>> next(g)
I am now running.
1
# OR:
>>> g.send(None) # equivalent to next(g)
I am now running.
1
If I send anything truthy into the generator, the StopIteration error is raised (e.g., g.send('foo')). Alternatively, if I call g.close(), and then do next(g), I will also get StopIteration. If I throw any errors with g.throw(e), the generator will continue. This is all as expected.
What I'd like to be able to do is something like the following, which shows whether or not this generator is running:
>>> g=f()
>>> g.running
False
>>> g.send(None)
I am now running.
1
>>> g.running
True
>>> g.close()
>>> g.running
False
I have provided one possible approach in an answer. However, I think there has to be a better way.
inspect.getgeneratorstate tells you the state of your generator:
>>> import inspect
...
... def gen():
... yield 1
...
... g = gen()
...
>>> inspect.getgeneratorstate(g)
'GEN_CREATED'
>>> next(g)
1
>>> inspect.getgeneratorstate(g)
'GEN_SUSPENDED'
>>> next(g)
>>> inspect.getgeneratorstate(g)
'GEN_CLOSED'
My only idea is some kind of generator wrapper with methods that delegate to a stored generator, but this is a bit unwieldy. It might look something like this (haven't tested this):
class MyGen():
'''Generator wrapper with 'exhausted' and 'running' flags.'''
def __new__(cls, some_func):
cls._func = some_func
return super().__new__(cls)
def __call__(self, *args, **kwargs):
self.exhausted = False
self.running = False
self.start(*args, **kwargs)
return self
def start(self, *args, **kwargs):
self._gen = self._func(*args, **kwargs)
def __next__(self):
try:
self.running = True
next(self._gen)
except StopIteration:
self.exhausted = True
self.running = False
raise
def send(self, *args, **kwargs):
try:
self.running = True
self._gen.send(*args, **kwargs)
except StopIteration:
self.exhausted = True
self.running = False
raise
def throw(self, *args, **kwargs):
try:
self._gen.throw(*args, **kwargs)
except StopIteration:
self.exhausted = True
self.running = False
raise
def close(self):
self._gen.close()
self.exhausted = True
self.running = False
You could even use this as a decorator:
#MyGen
def f():
yield 1
I really don't like this because it seems like there really should be an existing place to look for a "generator exhausted" or "generator running" flag.