Trap exception, try again decorator in Python - python

I have little experience with decorators in Python, but I'd like to write a function decorator that runs the function, catches a specific exception, and if the exception is caught then re-tries the function a certain number of times. That is, I'd like to do this:
#retry_if_exception(BadStatusLine, max_retries=2)
def thing_that_sometimes_fails(self, foo):
foo.do_something_that_sometimes_raises_BadStatusLine()
I assume this kind of thing is easy with decorators, but I'm not clear about how exactly to go about it.

from functools import wraps
def retry_if_exception(ex, max_retries):
def outer(func):
#wraps(func)
def wrapper(*args, **kwargs):
assert max_retries > 0
x = max_retries
while x:
try:
return func(*args, **kwargs)
except ex:
x -= 1
return wrapper
return outer
see why you better use #wraps

I think you're basically wanting something like this:
def retry_if_exception(exception_type=Exception, max_retries=1):
def decorator(fn):
def wrapper(*args, **kwargs):
for i in range(max_retries+1):
print('Try #', i+1)
try:
return fn(*args, **kwargs)
except exception_type as e:
print('wrapper exception:', i+1, e)
return wrapper
return decorator
#retry_if_exception()
def foo1():
raise Exception('foo1')
#retry_if_exception(ArithmeticError)
def foo2():
x=1/0
#retry_if_exception(Exception, 2)
def foo3():
raise Exception('foo3')

The following seems to do what you've described:
def retry_if_exception( exception, max_retries=2 ):
def _retry_if_exception( method_fn ):
# method_fn is the function that gives rise
# to the method that you've decorated,
# with signature (slf, foo)
from functools import wraps
def method_deco( slf, foo ):
tries = 0
while True:
try:
return method_fn(slf, foo)
except exception:
tries += 1
if tries > max_retries:
raise
return wraps(method_fn)(method_deco)
return _retry_if_exception
Here's an example of it in use:
d = {}
class Foo():
def usually_raise_KeyError(self):
print("d[17] = %s" % d[17])
foo1 = Foo()
class A():
#retry_if_exception(KeyError, max_retries=2)
def something_that_sometimes_fails( self, foo ):
print("About to call foo.usually_raise_KeyError()")
foo.usually_raise_KeyError()
a = A()
a.something_that_sometimes_fails(foo1)
This gives:
About to call foo.usually_raise_KeyError()
About to call foo.usually_raise_KeyError()
About to call foo.usually_raise_KeyError()
Traceback (most recent call last):
File " ......... TrapRetryDeco.py", line 39, in <module>
a.something_that_sometimes_fails( foo1)
File " ......... TrapRetryDeco.py", line 15, in method_deco
return method_fn( slf, foo)
File " ......... TrapRetryDeco.py", line 36, in something_that_sometimes_fails
foo.usually_raise_KeyError()
File " ......... TrapRetryDeco.py", line 28, in usually_raise_KeyError
print("d[17] = %s" % d[17])
KeyError: 17
I assume that by "2 retries" you mean the operation will be attempted 3x all told. Your example has a couple of complications which may obscure the basic setup:
It seems you want a method decorator, as your function/method's first parameter is "self"; however, that method immediately delegates to some bad method of its foo parameter. I preserved these complications :)

As outline, you would do something along these lines:
import random
def shaky():
1/random.randint(0,1)
def retry_if_exception(f):
def inner(retries=2):
for retry in range(retries):
try:
return f()
except ZeroDivisionError:
print 'try {}'.format(retry)
raise
return inner
#retry_if_exception
def thing_that_may_fail():
shaky()
thing_that_may_fail()
As written, that will fail about 1/2 the time.
When it does fail, prints:
try 0
try 1
Traceback (most recent call last):
File "Untitled 2.py", line 23, in <module>
thing_that_may_fail()
File "Untitled 2.py", line 10, in inner
return f()
File "Untitled 2.py", line 21, in thing_that_may_fail
shaky()
File "Untitled 2.py", line 4, in shaky
1/random.randint(0,1)
ZeroDivisionError: integer division or modulo by zero
You could adapt this structure to many different types of errors.

Related

How to decorate iterables with an error handler?

Suppose we have two kinds of methods: one returns a list, the other returns an iterator. So they are very comparable in the sense that both return values are iterable.
I'd like to write a decorator that catches errors inside the iteration. The problem is that the iterator is returned without iteration and so no errors will be caught.
In the below code, the wrapped_properly decorator works around the issue by providing two separate wrappers, a default one (wrapper) and one specifically for generator functions (generatorfunctionwrapper). The approach feels quite complicated and verbose.
from inspect import isgeneratorfunction
from functools import wraps
def failing_generator():
for i in range(1, 5):
if i % 2 == 0:
print('I dont like even numbers.')
raise ValueError(i)
yield i
def wrapped_badly(fn):
#wraps(fn)
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except ValueError as err:
print('Not to worry.')
return wrapper
def wrapped_properly(fn):
#wraps(fn)
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except ValueError as err:
print('Not to worry.')
#wraps(fn)
def generatorfunctionwrapper(*args, **kwargs):
try:
yield from fn(*args, **kwargs)
except ValueError as err:
print('Not to worry.')
if isgeneratorfunction(fn):
return generatorfunctionwrapper
else:
return wrapper
for x in wrapped_properly(failing_generator)():
print(x)
# Prints:
# 1
# I dont like even numbers.
# Not to worry.
for x in wrapped_badly(failing_generator)():
print(x)
# Prints:
# 1
# I dont like even numbers.
# Traceback (most recent call last):
# ...
# ValueError: 2
Is there a better/more pythonic way to do this?
I would suggest returning an iterator no matter what iterable the original function returns.
def wrapped(fn):
def wrapper(*args, **kwargs):
try:
yield from iter(fn(*args, **kwargs))
except ValueError as err:
print('Not to worry')
return wrapper

How to use decorator in my package

I want to use original decorator in my original package (ex.'with_error_handler' in 'mypackage') and execute some function.
But it returns function object or error massage that too given arguments.
In my package:
def with_error_handler(func):
import traceback
from functools import wraps
from decorator import decorator
#decorator
#wraps(func)
def error_handler(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
strError = traceback.format_exc() + __file__ + '(' + str(args) + str(kwargs) + ')'
print(strError)
return error_handler
And I want to execute below code.
import mypackage
#mypackage.with_error_handler
def divnum(num):
print(1/num)
#mypackage.with_error_handler
def divone():
print(1/1)
if __name__ == '__main__':
divnum(2)
divone()
These result is here
>>>divnum(2)
・・・with_error_handler() takes 1 positional argument but 2 were given
>>>divone()
・・・<function __main__.divone>
Why these error is occured?
How to fix it?
There is no need to from decorator import decorator. Doing
import functools as ft
import traceback
def with_error_handler(func):
#ft.wraps(func)
def error_handler(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
strError = traceback.format_exc() + __file__ + '(' + str(args) + str(kwargs) + ')'
print(strError)
return error_handler
is okay.
>>> divone()
1.0
>>> divnum(2)
0.5
and as expected
>>> divnum(0)
Traceback (most recent call last):
File "/home/user/mypackage.py", line <X>, in error_handler
return func(*args, **kwargs)
File "/home/user/mypackagetest.py", line <Y>, in divnum
print(1/num)
ZeroDivisionError: division by zero
/home/user/mypackage.py((0,){})

how to make a time-out deco to a function?

I'm trying to make a decoration on some specific functions. If the functions works more than, say five seconds, the deco will raise errors or interrupt the function.
def time_limit(func):
def time_out():
raise AssertionError
#wraps(func)
def deco(*args, **kwargs):
timer = Timer(5, time_out)
timer.start()
res = func(*args, **kwargs)
return res
return deco
however even though the deco works, the function still works without interruptions:
In [69]: #time_limit
...: def f():
...: time.sleep(6)
...: print 'aaa'
...:
In [70]: f()
Exception in thread Thread-2764:
Traceback (most recent call last):
File "/usr/local/Cellar/python/2.7.12/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 801, in __bootstrap_inner
self.run()
File "/usr/local/Cellar/python/2.7.12/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 1073, in run
self.function(*self.args, **self.kwargs)
File "<ipython-input-68-cc74c901d8b8>", line 4, in time_out
raise AssertionError
AssertionError
aaa
How to fix the problem?
ps. I'm using apscheduler to circulalte the function, so a time-out decoration is better.

How to mock a tornado coroutine function using mock framework for unit testing?

The title simply described my problem. I would like to mock "_func_inner_1" with specific return value. Thanks for any advises :)
code under test:
from tornado.gen import coroutine, Return
from tornado.testing import gen_test
from tornado.testing import AsyncTestCase
import mock
#coroutine
def _func_inner_1():
raise Return(1)
#coroutine
def _func_under_test_1():
temp = yield _func_inner_1()
raise Return(temp + 1)
But, this intuitive solution not work
class Test123(AsyncTestCase):
#gen_test
#mock.patch(__name__ + '._func_inner_1')
def test_1(self, mock_func_inner_1):
mock_func_inner_1.side_effect = Return(9)
result_1 = yield _func_inner_1()
print 'result_1', result_1
result = yield _func_under_test_1()
self.assertEqual(10, result, result)
With below error, seems _func_inner_1 is not patched due to it's coroutine nature
AssertionError: 2
if I add coroutine to patch returned mock function
#gen_test
#mock.patch(__name__ + '._func_inner_1')
def test_1(self, mock_func_inner_1):
mock_func_inner_1.side_effect = Return(9)
mock_func_inner_1 = coroutine(mock_func_inner_1)
result_1 = yield _func_inner_1()
print 'result_1', result_1
result = yield _func_under_test_1()
self.assertEqual(10, result, result)
the error becomes:
Traceback (most recent call last):
File "tornado/testing.py", line 118, in __call__
result = self.orig_method(*args, **kwargs)
File "tornado/testing.py", line 494, in post_coroutine
timeout=timeout)
File "tornado/ioloop.py", line 418, in run_sync
return future_cell[0].result()
File "tornado/concurrent.py", line 109, in result
raise_exc_info(self._exc_info)
File "tornado/gen.py", line 175, in wrapper
yielded = next(result)
File "coroutine_unit_test.py", line 39, in test_1
mock_func_inner_1 = coroutine(mock_func_inner_1)
File "tornado/gen.py", line 140, in coroutine
return _make_coroutine_wrapper(func, replace_callback=True)
File "tornado/gen.py", line 150, in _make_coroutine_wrapper
#functools.wraps(func)
File "functools.py", line 33, in update_wrapper
setattr(wrapper, attr, getattr(wrapped, attr))
File "mock.py", line 660, in __getattr__
raise AttributeError(name)
AttributeError: __name__
This is the closest solution I can find, but the mocking function will NOT be reset after test case execution, unlike what patch does
#gen_test
def test_4(self):
global _func_inner_1
mock_func_inner_1 = mock.create_autospec(_func_inner_1)
mock_func_inner_1.side_effect = Return(100)
mock_func_inner_1 = coroutine(mock_func_inner_1)
_func_inner_1 = mock_func_inner_1
result = yield _func_under_test_1()
self.assertEqual(101, result, result)
There are two issues here:
First is the interaction between #mock.patch and #gen_test. gen_test works by converting a generator into a "normal" function; mock.patch only works on normal functions (as far as the decorator can tell, the generator returns as soon as it reaches the first yield, so mock.patch undoes all its work). To avoid this problem, you can either reorder the decorators (always put #mock.patch before #gen_test, or use the with form of mock.patch instead of the decorator form.
Second, coroutines should never raise an exception. Instead, they return a Future which will contain a result or an exception. The special Return exception is encapsulated by the coroutine system; you would never raise it from a Future. When you create your mocks, you must create the appropriate Future and set it as the return value instead of using side_effect to raise on exception.
The complete solution is:
from tornado.concurrent import Future
from tornado.gen import coroutine, Return
from tornado.testing import gen_test
from tornado.testing import AsyncTestCase
import mock
#coroutine
def _func_inner_1():
raise Return(1)
#coroutine
def _func_under_test_1():
temp = yield _func_inner_1()
raise Return(temp + 1)
class Test123(AsyncTestCase):
#mock.patch(__name__ + '._func_inner_1')
#gen_test
def test_1(self, mock_func_inner_1):
future_1 = Future()
future_1.set_result(9)
mock_func_inner_1.return_value = future_1
result_1 = yield _func_inner_1()
print 'result_1', result_1
result = yield _func_under_test_1()
self.assertEqual(10, result, result)
import unittest
unittest.main()

Get full traceback

How can i get full traceback in the following case, including the calls of func2 and func functions?
import traceback
def func():
try:
raise Exception('Dummy')
except:
traceback.print_exc()
def func2():
func()
func2()
When i run this, i get:
Traceback (most recent call last):
File "test.py", line 5, in func
raise Exception('Dummy')
Exception: Dummy
traceback.format_stack() is not what i want, as need traceback object to be passed to a third party module.
I am particularly interested in this case:
import logging
def func():
try:
raise Exception('Dummy')
except:
logging.exception("Something awful happened!")
def func2():
func()
func2()
In this case i am getting:
ERROR:root:Something awful happened!
Traceback (most recent call last):
File "test.py", line 9, in func
raise Exception('Dummy')
Exception: Dummy
As mechmind answered, the stack trace consists only of frames between the site where the exception was raised and the site of the try block. If you need the full stack trace, apparently you're out of luck.
Except that it's obviously possible to extract the stack entries from top-level to the current frame—traceback.extract_stack manages it just fine. The problem is that the information obtained by traceback.extract_stack comes from direct inspection of stack frames without creating a traceback object at any point, and the logging API requires a traceback object to affect traceback output.
Fortunately, logging doesn't require an actual traceback object, it requires an object that it can pass to the formatting routines of the traceback module. traceback doesn't care either—it only uses two attributes of the traceback, the frame and the line number. So, it should be possible to create a linked list of duck-typed faux-traceback objects and pass it off as the traceback.
import sys
class FauxTb(object):
def __init__(self, tb_frame, tb_lineno, tb_next):
self.tb_frame = tb_frame
self.tb_lineno = tb_lineno
self.tb_next = tb_next
def current_stack(skip=0):
try: 1/0
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame
for i in xrange(skip + 2):
f = f.f_back
lst = []
while f is not None:
lst.append((f, f.f_lineno))
f = f.f_back
return lst
def extend_traceback(tb, stack):
"""Extend traceback with stack info."""
head = tb
for tb_frame, tb_lineno in stack:
head = FauxTb(tb_frame, tb_lineno, head)
return head
def full_exc_info():
"""Like sys.exc_info, but includes the full traceback."""
t, v, tb = sys.exc_info()
full_tb = extend_traceback(tb, current_stack(1))
return t, v, full_tb
With these functions in place, your code only requires a trivial modification:
import logging
def func():
try:
raise Exception('Dummy')
except:
logging.error("Something awful happened!", exc_info=full_exc_info())
def func2():
func()
func2()
...to give the expected output:
ERROR:root:Something awful happened!
Traceback (most recent call last):
File "a.py", line 52, in <module>
func2()
File "a.py", line 49, in func2
func()
File "a.py", line 43, in func
raise Exception('Dummy')
Exception: Dummy
Note that the faux-traceback objects are fully usable for introspection—displaying local variables or as argument to pdb.post_mortem()—because they contain references to real stack frames.
This is based on user4815162342's answer, but a bit more minimalistic:
import sys
import collections
FauxTb = collections.namedtuple("FauxTb", ["tb_frame", "tb_lineno", "tb_next"])
def full_exc_info():
"""Like sys.exc_info, but includes the full traceback."""
t, v, tb = sys.exc_info()
f = sys._getframe(2)
while f is not None:
tb = FauxTb(f, f.f_lineno, tb)
f = f.f_back
return t, v, tb
It avoids throwing the dummy exception, at the cost of requiring the usage of sys._getframe(). It assumes being used in the except clause where the exception was caught, as it goes up two stack frames (full_exc_info and the function that calls full_exc_info – that would be the function that calls the raising code, and as such is already included in the original traceback).
This gives the same output as the code in user4815162342's answer.
If you don't mind the slight differences in formatting, you can also use
import logging
def func():
try:
raise Exception('Dummy')
except:
logging.exception("Something awful happened!", stack_info=True)
def func2():
func()
func2()
which results in
ERROR:root:Something awful happened!
Traceback (most recent call last):
File "test.py", line 5, in func
raise Exception('Dummy')
Exception: Dummy
Stack (most recent call last):
File "test.py", line 12, in <module>
func2()
File "test.py", line 10, in func2
func()
File "test.py", line 7, in func
logging.exception("Something awful happened!", stack_info=True)
In this case, you'll get a trace from the try to the exception, and a second from the root call to the location of the logging call.
Stack trace is collected when exception bubbles up. So you should print traceback on top of desired stack:
import traceback
def func():
raise Exception('Dummy')
def func2():
func()
try:
func2()
except:
traceback.print_exc()
i have written a module that writes a more complete traceback
The module is here documentation is docs
(also you can get the module from pypi
sudo pip install pd
)
To catch and pring exceptions do the following:
import pd
try:
<python code>
except BaseException:
pd.print_exception_ex( follow_objects = 1 )
The stack trace looks like this one here:
Exception: got it
#1 def kuku2(self = {'a': 42, 'b': [1, 2, 3, 4]}, depth = 1) at t test_pd.py:29
Calls next frame at:
raise Exception('got it') at: test_pd.py:29
#2 def kuku2(self = {'a': 42, 'b': [1, 2, 3, 4]}, depth = 2) at test_pd.py:28
Calls next frame at:
self.kuku2( depth - 1 ) at: test_pd.py:28
#3 def kuku2(self = {'a': 42, 'b': [1, 2, 3, 4]}, depth = 3) at test_pd.py:28
Calls next frame at:
self.kuku2( depth - 1 ) at: test_pd.py:28
#4 def kuku2(self = {'a': 42, 'b': [1, 2, 3, 4]}, depth = 4) at test_pd.py:28
Calls next frame at:
self.kuku2( depth - 1 ) at: test_pd.py:28
#5 def kuku2(self = {'a': 42, 'b': [1, 2, 3, 4]}, depth = 5) at test_pd.py:28
Calls next frame at:
self.kuku2( depth - 1 ) at: test_pd.py:28
#6 def kuku2(self = {'a': 42, 'b': [1, 2, 3, 4]}, depth = 6) at test_pd.py:28
Calls next frame at:
self.kuku2( depth - 1 ) at: test_pd.py:28
#7 def main() at test_pd.py:44
Local variables:
n = {'a': 42, 'b': [1, 2, 3, 4]}
Calls next frame at:
pd.print_exception_ex( follow_objects = 1 ) at: test_pd.py:44
follow_objects = 0 will not print out object content (with complex data structures follow_objects can take a lot of time).
There's some more information that could be extracted from the traceback, and I sometimes prefer a neater, more 'logical' information instead of multi-line blob with files, line numbers and code snippetsgiven by traceback. Preferably one line should say all the essentials.
To achieve this I use following function:
def raising_code_info():
code_info = ''
try:
frames = inspect.trace()
if(len(frames)):
full_method_name = frames[0][4][0].rstrip('\n\r').strip()
line_number = frames[1][2]
module_name = frames[0][0].f_globals['__name__']
if(module_name == '__main__'):
module_name = os.path.basename(sys.argv[0]).replace('.py','')
class_name = ''
obj_name_dot_method = full_method_name.split('.', 1)
if len(obj_name_dot_method) > 1:
obj_name, full_method_name = obj_name_dot_method
try:
class_name = frames[0][0].f_locals[obj_name].__class__.__name__
except:
pass
method_name = module_name + '.'
if len(class_name) > 0:
method_name += class_name + '.'
method_name += full_method_name
code_info = '%s, line %d' % (method_name, line_number)
finally:
del frames
sys.exc_clear()
return code_info
It gives . and line number, e.g.:
(example module name: test.py):
(line 73:)
def function1():
print 1/0
class AClass(object):
def method2(self):
a = []
a[3] = 1
def try_it_out():
# try it with a function
try:
function1()
except Exception, what:
print '%s: \"%s\"' % (raising_code_info(), what)
# try it with a method
try:
my_obj_name = AClass()
my_obj_name.method2()
except Exception, what:
print '%s: \"%s\"' % (raising_code_info(), what)
if __name__ == '__main__':
try_it_out()
test.function1(), line 75: "integer division or modulo by zero"
test.AClass.method2(), line 80: "list assignment index out of range"
Which might be slightly neater in some use-cases.

Categories