How to use decorator in my package - python

I want to use original decorator in my original package (ex.'with_error_handler' in 'mypackage') and execute some function.
But it returns function object or error massage that too given arguments.
In my package:
def with_error_handler(func):
import traceback
from functools import wraps
from decorator import decorator
#decorator
#wraps(func)
def error_handler(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
strError = traceback.format_exc() + __file__ + '(' + str(args) + str(kwargs) + ')'
print(strError)
return error_handler
And I want to execute below code.
import mypackage
#mypackage.with_error_handler
def divnum(num):
print(1/num)
#mypackage.with_error_handler
def divone():
print(1/1)
if __name__ == '__main__':
divnum(2)
divone()
These result is here
>>>divnum(2)
・・・with_error_handler() takes 1 positional argument but 2 were given
>>>divone()
・・・<function __main__.divone>
Why these error is occured?
How to fix it?

There is no need to from decorator import decorator. Doing
import functools as ft
import traceback
def with_error_handler(func):
#ft.wraps(func)
def error_handler(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
strError = traceback.format_exc() + __file__ + '(' + str(args) + str(kwargs) + ')'
print(strError)
return error_handler
is okay.
>>> divone()
1.0
>>> divnum(2)
0.5
and as expected
>>> divnum(0)
Traceback (most recent call last):
File "/home/user/mypackage.py", line <X>, in error_handler
return func(*args, **kwargs)
File "/home/user/mypackagetest.py", line <Y>, in divnum
print(1/num)
ZeroDivisionError: division by zero
/home/user/mypackage.py((0,){})

Related

How to decorate iterables with an error handler?

Suppose we have two kinds of methods: one returns a list, the other returns an iterator. So they are very comparable in the sense that both return values are iterable.
I'd like to write a decorator that catches errors inside the iteration. The problem is that the iterator is returned without iteration and so no errors will be caught.
In the below code, the wrapped_properly decorator works around the issue by providing two separate wrappers, a default one (wrapper) and one specifically for generator functions (generatorfunctionwrapper). The approach feels quite complicated and verbose.
from inspect import isgeneratorfunction
from functools import wraps
def failing_generator():
for i in range(1, 5):
if i % 2 == 0:
print('I dont like even numbers.')
raise ValueError(i)
yield i
def wrapped_badly(fn):
#wraps(fn)
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except ValueError as err:
print('Not to worry.')
return wrapper
def wrapped_properly(fn):
#wraps(fn)
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except ValueError as err:
print('Not to worry.')
#wraps(fn)
def generatorfunctionwrapper(*args, **kwargs):
try:
yield from fn(*args, **kwargs)
except ValueError as err:
print('Not to worry.')
if isgeneratorfunction(fn):
return generatorfunctionwrapper
else:
return wrapper
for x in wrapped_properly(failing_generator)():
print(x)
# Prints:
# 1
# I dont like even numbers.
# Not to worry.
for x in wrapped_badly(failing_generator)():
print(x)
# Prints:
# 1
# I dont like even numbers.
# Traceback (most recent call last):
# ...
# ValueError: 2
Is there a better/more pythonic way to do this?
I would suggest returning an iterator no matter what iterable the original function returns.
def wrapped(fn):
def wrapper(*args, **kwargs):
try:
yield from iter(fn(*args, **kwargs))
except ValueError as err:
print('Not to worry')
return wrapper

deep-copied subclass of threading.local object throws TypeError: __init__() takes exactly 2 arguments (1 given) when accessed in thread pool

I have a code below that an object is a subclass of threading.local. This object is "deep-copied" before executed in another thread.
import copy
import functools
import sys
import threading
import traceback
from multiprocessing import dummy
# This decorator is purely for debugging purpose.
def get_traceback(f):
#functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception, ex:
ret = '#' * 60
ret += "\nException caught:"
ret += "\n" + '-' * 60
ret += "\n" + traceback.format_exc()
ret += "\n" + '-' * 60
ret += "\n" + "#" * 60
print sys.stderr, ret
sys.stderr.flush()
raise ex
return wrapper
#get_traceback
def func(o):
print("Addr %s" % o.address)
class Server(threading.local):
def __init__(self, address):
self.address = address
# Initialize object.
server = Server("10.10.10.10")
# Make a deepcopy.
# Things work fine when this statement is removed, otherwise
# throws exception as described.
server = copy.deepcopy(server)
# Create pool, execute the function with deep-copied object.
thread_pool = dummy.Pool(1)
try:
thread_pool.map(func, [server])
except Exception as e:
print('Caught exception in '
'test: %s: %s' % (e.__class__, e))
print(traceback.format_exc())
raise e
finally:
thread_pool.close()
thread_pool.join()
Throws this exception within copy.deepcopy(...)
Note that get_traceback(...) decorator is purely for debugging purpose.
Traceback (most recent call last):
File "/Users/hcho/PycharmProjects/test/file.py", line 13, in wrapper
return f(*args, **kwargs)
File "/Users/hcho/PycharmProjects/test/file.py", line 30, in func
print("Addr %s" % o.address)
TypeError: __init__() takes exactly 2 arguments (1 given)
The code works if I remove the server = copy.deepcopy(server) statement.
My guess
It seems that deepcopy.copy(...) makes new objects using __dict__.update such that threading.local's __setattr__(...) isn't invoked, so something there is causing copied object to behave funky when accessed in another thread. Any thoughts?

Timeout a function inside a class with a decorator

I'm trying to put a timeout on a function send.
I have found some elements in these posts :
https://stackoverflow.com/a/494273/3824723
https://stackoverflow.com/a/2282656/3824723
https://stackoverflow.com/a/11731208/3824723
The first one seems to apply to every function and not to a precise one, this is why I chose a decorator implementation like the second one.
I tried to mix it up and I have this :
from functools import wraps
import os
import signal
class TimeoutError(Exception):
pass
def timeout_func(error_message="Timeout in send pipe!"):
def decorator(func):
def _handle_timeout(signum, frame):
if args[0].action=="warn":
print "WARNING : ",error_message
elif args[0].action=="kill":
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
print args
signal.signal(signal.SIGALRM, _handle_timeout,args[0].action)
signal.alarm(args[0].seconds)
print str(args)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
return decorator
class Link(object):
def __init__(self,timeout=1,action="warn"):
self.timeout=timeout
self.action=action
#timeout_func
def send(self,value):
print "working : ", value
It gives me this :
In [6]: l=Link()
In [7]: l.send(1)
--------------------------------------------------------------------------- TypeError Traceback (most recent call
last) in ()
----> 1 l.send(1)
TypeError: decorator() takes exactly 1 argument (2 given)
My issue is that I would like to pass the timeout value second to the decorator through the Link's self. I don't fully understand the whole decorator mechanism here, and can't figure out what is wrong.
Can someone explain me how this decorator works, and what should I modify to fix it? Or if you think of a simpler/more explicit solution to implement it ?
So I've been debugging my issue, and I found a working solution:
from functools import wraps
import signal
class TimeoutError(Exception):
pass
def timeout_func(f):
def _handle_timeout(signum, frame):
raise TimeoutError("timeout error")
def wrapper(*args):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.setitimer(signal.ITIMER_REAL,args[0].timeout) #args[0] is self of Link class here
try:
result = f(*args,**kwargs)
finally:
signal.alarm(0)
return result
return wrapper
class Link(object):
def __init__(self,timeout=0.1,action="warn"):
self.timeout=timeout
self.action=action
def send(self,value): # I use this func to handle the exceptions
try:
self.send_timeout(value) # call the timeout function
except TimeoutError as e: # handle Timeout exception
if self.action=="warn":
print "WARNING : Timeout error in pipe send!"
elif self.action=="kill":
print "Killing Link : ", e
raise
except (Exception,KeyboardInterrupt) as e:
print "Exception in link : ", e
raise
#timeout_func # apply timeout decorator to this function
def send_timeout(self,value):
# DO STUFF HERE
To call it :
l=Link()
l.send("any value")
I use signal.setitimer(signal.ITIMER_REAL,args[0].timeout) because it allows setting a timeout < 1 second, wich was not the case with signal.signal(), wich only accept integer as timer.

How to mock a tornado coroutine function using mock framework for unit testing?

The title simply described my problem. I would like to mock "_func_inner_1" with specific return value. Thanks for any advises :)
code under test:
from tornado.gen import coroutine, Return
from tornado.testing import gen_test
from tornado.testing import AsyncTestCase
import mock
#coroutine
def _func_inner_1():
raise Return(1)
#coroutine
def _func_under_test_1():
temp = yield _func_inner_1()
raise Return(temp + 1)
But, this intuitive solution not work
class Test123(AsyncTestCase):
#gen_test
#mock.patch(__name__ + '._func_inner_1')
def test_1(self, mock_func_inner_1):
mock_func_inner_1.side_effect = Return(9)
result_1 = yield _func_inner_1()
print 'result_1', result_1
result = yield _func_under_test_1()
self.assertEqual(10, result, result)
With below error, seems _func_inner_1 is not patched due to it's coroutine nature
AssertionError: 2
if I add coroutine to patch returned mock function
#gen_test
#mock.patch(__name__ + '._func_inner_1')
def test_1(self, mock_func_inner_1):
mock_func_inner_1.side_effect = Return(9)
mock_func_inner_1 = coroutine(mock_func_inner_1)
result_1 = yield _func_inner_1()
print 'result_1', result_1
result = yield _func_under_test_1()
self.assertEqual(10, result, result)
the error becomes:
Traceback (most recent call last):
File "tornado/testing.py", line 118, in __call__
result = self.orig_method(*args, **kwargs)
File "tornado/testing.py", line 494, in post_coroutine
timeout=timeout)
File "tornado/ioloop.py", line 418, in run_sync
return future_cell[0].result()
File "tornado/concurrent.py", line 109, in result
raise_exc_info(self._exc_info)
File "tornado/gen.py", line 175, in wrapper
yielded = next(result)
File "coroutine_unit_test.py", line 39, in test_1
mock_func_inner_1 = coroutine(mock_func_inner_1)
File "tornado/gen.py", line 140, in coroutine
return _make_coroutine_wrapper(func, replace_callback=True)
File "tornado/gen.py", line 150, in _make_coroutine_wrapper
#functools.wraps(func)
File "functools.py", line 33, in update_wrapper
setattr(wrapper, attr, getattr(wrapped, attr))
File "mock.py", line 660, in __getattr__
raise AttributeError(name)
AttributeError: __name__
This is the closest solution I can find, but the mocking function will NOT be reset after test case execution, unlike what patch does
#gen_test
def test_4(self):
global _func_inner_1
mock_func_inner_1 = mock.create_autospec(_func_inner_1)
mock_func_inner_1.side_effect = Return(100)
mock_func_inner_1 = coroutine(mock_func_inner_1)
_func_inner_1 = mock_func_inner_1
result = yield _func_under_test_1()
self.assertEqual(101, result, result)
There are two issues here:
First is the interaction between #mock.patch and #gen_test. gen_test works by converting a generator into a "normal" function; mock.patch only works on normal functions (as far as the decorator can tell, the generator returns as soon as it reaches the first yield, so mock.patch undoes all its work). To avoid this problem, you can either reorder the decorators (always put #mock.patch before #gen_test, or use the with form of mock.patch instead of the decorator form.
Second, coroutines should never raise an exception. Instead, they return a Future which will contain a result or an exception. The special Return exception is encapsulated by the coroutine system; you would never raise it from a Future. When you create your mocks, you must create the appropriate Future and set it as the return value instead of using side_effect to raise on exception.
The complete solution is:
from tornado.concurrent import Future
from tornado.gen import coroutine, Return
from tornado.testing import gen_test
from tornado.testing import AsyncTestCase
import mock
#coroutine
def _func_inner_1():
raise Return(1)
#coroutine
def _func_under_test_1():
temp = yield _func_inner_1()
raise Return(temp + 1)
class Test123(AsyncTestCase):
#mock.patch(__name__ + '._func_inner_1')
#gen_test
def test_1(self, mock_func_inner_1):
future_1 = Future()
future_1.set_result(9)
mock_func_inner_1.return_value = future_1
result_1 = yield _func_inner_1()
print 'result_1', result_1
result = yield _func_under_test_1()
self.assertEqual(10, result, result)
import unittest
unittest.main()

Trap exception, try again decorator in Python

I have little experience with decorators in Python, but I'd like to write a function decorator that runs the function, catches a specific exception, and if the exception is caught then re-tries the function a certain number of times. That is, I'd like to do this:
#retry_if_exception(BadStatusLine, max_retries=2)
def thing_that_sometimes_fails(self, foo):
foo.do_something_that_sometimes_raises_BadStatusLine()
I assume this kind of thing is easy with decorators, but I'm not clear about how exactly to go about it.
from functools import wraps
def retry_if_exception(ex, max_retries):
def outer(func):
#wraps(func)
def wrapper(*args, **kwargs):
assert max_retries > 0
x = max_retries
while x:
try:
return func(*args, **kwargs)
except ex:
x -= 1
return wrapper
return outer
see why you better use #wraps
I think you're basically wanting something like this:
def retry_if_exception(exception_type=Exception, max_retries=1):
def decorator(fn):
def wrapper(*args, **kwargs):
for i in range(max_retries+1):
print('Try #', i+1)
try:
return fn(*args, **kwargs)
except exception_type as e:
print('wrapper exception:', i+1, e)
return wrapper
return decorator
#retry_if_exception()
def foo1():
raise Exception('foo1')
#retry_if_exception(ArithmeticError)
def foo2():
x=1/0
#retry_if_exception(Exception, 2)
def foo3():
raise Exception('foo3')
The following seems to do what you've described:
def retry_if_exception( exception, max_retries=2 ):
def _retry_if_exception( method_fn ):
# method_fn is the function that gives rise
# to the method that you've decorated,
# with signature (slf, foo)
from functools import wraps
def method_deco( slf, foo ):
tries = 0
while True:
try:
return method_fn(slf, foo)
except exception:
tries += 1
if tries > max_retries:
raise
return wraps(method_fn)(method_deco)
return _retry_if_exception
Here's an example of it in use:
d = {}
class Foo():
def usually_raise_KeyError(self):
print("d[17] = %s" % d[17])
foo1 = Foo()
class A():
#retry_if_exception(KeyError, max_retries=2)
def something_that_sometimes_fails( self, foo ):
print("About to call foo.usually_raise_KeyError()")
foo.usually_raise_KeyError()
a = A()
a.something_that_sometimes_fails(foo1)
This gives:
About to call foo.usually_raise_KeyError()
About to call foo.usually_raise_KeyError()
About to call foo.usually_raise_KeyError()
Traceback (most recent call last):
File " ......... TrapRetryDeco.py", line 39, in <module>
a.something_that_sometimes_fails( foo1)
File " ......... TrapRetryDeco.py", line 15, in method_deco
return method_fn( slf, foo)
File " ......... TrapRetryDeco.py", line 36, in something_that_sometimes_fails
foo.usually_raise_KeyError()
File " ......... TrapRetryDeco.py", line 28, in usually_raise_KeyError
print("d[17] = %s" % d[17])
KeyError: 17
I assume that by "2 retries" you mean the operation will be attempted 3x all told. Your example has a couple of complications which may obscure the basic setup:
It seems you want a method decorator, as your function/method's first parameter is "self"; however, that method immediately delegates to some bad method of its foo parameter. I preserved these complications :)
As outline, you would do something along these lines:
import random
def shaky():
1/random.randint(0,1)
def retry_if_exception(f):
def inner(retries=2):
for retry in range(retries):
try:
return f()
except ZeroDivisionError:
print 'try {}'.format(retry)
raise
return inner
#retry_if_exception
def thing_that_may_fail():
shaky()
thing_that_may_fail()
As written, that will fail about 1/2 the time.
When it does fail, prints:
try 0
try 1
Traceback (most recent call last):
File "Untitled 2.py", line 23, in <module>
thing_that_may_fail()
File "Untitled 2.py", line 10, in inner
return f()
File "Untitled 2.py", line 21, in thing_that_may_fail
shaky()
File "Untitled 2.py", line 4, in shaky
1/random.randint(0,1)
ZeroDivisionError: integer division or modulo by zero
You could adapt this structure to many different types of errors.

Categories