how to make a time-out deco to a function? - python

I'm trying to make a decoration on some specific functions. If the functions works more than, say five seconds, the deco will raise errors or interrupt the function.
def time_limit(func):
def time_out():
raise AssertionError
#wraps(func)
def deco(*args, **kwargs):
timer = Timer(5, time_out)
timer.start()
res = func(*args, **kwargs)
return res
return deco
however even though the deco works, the function still works without interruptions:
In [69]: #time_limit
...: def f():
...: time.sleep(6)
...: print 'aaa'
...:
In [70]: f()
Exception in thread Thread-2764:
Traceback (most recent call last):
File "/usr/local/Cellar/python/2.7.12/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 801, in __bootstrap_inner
self.run()
File "/usr/local/Cellar/python/2.7.12/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 1073, in run
self.function(*self.args, **self.kwargs)
File "<ipython-input-68-cc74c901d8b8>", line 4, in time_out
raise AssertionError
AssertionError
aaa
How to fix the problem?
ps. I'm using apscheduler to circulalte the function, so a time-out decoration is better.

Related

Understanding nested decorators missing required positional argument error

I wrote two decorators, verbose controls whether or not the defined function prints it's output, announcer will announce when the function is called.
import os
import sys
def verbose(func):
'''Sets verbose mode on function'''
#functools.wraps(func)
def wrapper_func(verbboo=True, *args, **kwargs):
# disabling print
if not verbboo:
sys.stdout = open(os.devnull, 'w')
# running func
ret = func(*args, **kwargs)
# enabling print again
if not verbboo:
sys.stdout = sys.__stdout__
return ret
return wrapper_func
def announcer(func, endboo=True):
'''anounces when function is called and when it finishes; if specified'''
#functools.wraps(func)
def wrapper_func(*args, **kwargs):
print('run {}.{}#{:%Y%m%d%H%M}'.format(
func.__module__, func.__name__,
dt.datetime.now())
)
ret = func(*args, **kwargs)
if endboo:
print('end {}.{}#{:%Y%m%d%H%M}'.format(
func.__module__, func.__name__,
dt.datetime.now())
)
return ret
return wrapper_func
I then nest the following function with the decorators
#verbose
#announcer
def f(boo, opboo=True):
if boo:
print('This is True')
if opboo:
print('This is also True')
return f
# testing
f(True)
But I receive the following error
run __main__.f#202006021152
Traceback (most recent call last):
File "/home/user/anaconda3/envs/mpl/lib/python3.8/runpy.py", line 193, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/home/user/anaconda3/envs/mpl/lib/python3.8/runpy.py", line 86, in _run_code
exec(code, run_globals)
File "/home/user/project/test.py", line 17, in <module>
f(True)
File "/home/user/project/decorators.py", line 18, in wrapper_func
ret = func(*args, **kwargs)
File "/home/user/project/decorators.py", line 47, in wrapper_func
ret = func(*args, **kwargs)
TypeError: f() missing 1 required positional argument: 'boo'
The error only occurs when I nest verbose on top of announcer. announcer by itself works fine. What is going on?
I think the problem is in this line:
def wrapper_func(verbboo=True, *args, **kwargs):
A function definition has to have all of the positional arguments before any of the keyword arguments. Probably the keyword argument verbboo interfered with the positional argument boo getting passed through.
When I put the verbboo parameter at the end, it still didn't run, but when I put it between *args and **kwargs, it did run.

dictionary changed size during iteration in multithreading app

I do not know how to solve this problem:
Traceback (most recent call last): File
"/usr/local/cabinet_dev/cabinet/lib/python3.4/site-packages/eventlet/hubs/hub.py",
line 458, in fire_timers
timer() File "/usr/local/cabinet_dev/cabinet/lib/python3.4/site-packages/eventlet/hubs/timer.py",
line 58, in call
cb(*args, **kw) File "/usr/local/cabinet_dev/cabinet/lib/python3.4/site-packages/eventlet/greenthread.py",
line 218, in main
result = function(*args, **kwargs) File "./monitor.py", line 148, in caughtBridge
for call in self.active.keys(): RuntimeError: dictionary changed size during iteration
In the code below:
def caughtBridge(self):
while True:
event = self.bridgeQueue.get()
uniqueid1 = str(event.headers.get('Uniqueid1'))
uniqueid2 = str(event.headers.get('Uniqueid2'))
for call in self.active.keys():
if self.active[call]['uniqueid'] == uniqueid1:
self.active[call]['uniqueid2'] = uniqueid2
if self.active[call]['uniqueid'] == uniqueid1:
for listener in self.listeners:
for number in listener.getNumbers():
if number == self.active[call]['exten']:
if not self.active[call]['answered']:
self.sendEvent({"status": "bridge", "id": self.active[call]['uniqueid'],
"number": self.active[call]['exten']},
listener.getRoom())
self.__callInfo(self.active[call], listener.getRoom())
self.active[call]['answered'] = True
self.bridgeQueue.task_done()
Use a copy of self.active.keys(), for example:
for call in list(self.active.keys()):
Didn't see if you add or remove dict Entries?
In case of adding, the other Threads will not see the added dict Entries.
In case of removing, the current Thread will fail with Key Error,
you have to catch these.
For example:
for call in list(self.active.keys()):
<Lock that call to prevent removing>
if call in self.active:
...
self.active[call]['answered'] = True
else:
# call removed do nothing
<Unlocked that call to do whatever in other Thread>
self.bridgeQueue.task_done()
Read about Python ยป 3.6.2 Documentation: threading.html#lock-objects
Basicly implement Pair Methods self.lock(call) and self.unlock(call), for instance:
Untested Code:
To prevent Deadlocks you have to guarantee self.unlock(call) will be reached!
class xxx
def __init__....
self_lock = threading.Lock
# Init all self.active[call]['lock'] = False
def lock(self, call):
# self._lock ist class threading.Lock
# self._lock has to be the same for all Threads
with self._lock:
if call in self.active and not self.active[call]['lock']:
self.active[call]['lock'] = True
return True
else:
return False
def unlock(self, call):
with self._lock:
self.active[call]['lock'] = False
# Usage:
for call in list(self.active.keys()):
if self.lock(call):
...
self.active[call]['answered'] = True
self.unlock(call)
else:
# call removed do nothing
self.bridgeQueue.task_done()

How to mock a tornado coroutine function using mock framework for unit testing?

The title simply described my problem. I would like to mock "_func_inner_1" with specific return value. Thanks for any advises :)
code under test:
from tornado.gen import coroutine, Return
from tornado.testing import gen_test
from tornado.testing import AsyncTestCase
import mock
#coroutine
def _func_inner_1():
raise Return(1)
#coroutine
def _func_under_test_1():
temp = yield _func_inner_1()
raise Return(temp + 1)
But, this intuitive solution not work
class Test123(AsyncTestCase):
#gen_test
#mock.patch(__name__ + '._func_inner_1')
def test_1(self, mock_func_inner_1):
mock_func_inner_1.side_effect = Return(9)
result_1 = yield _func_inner_1()
print 'result_1', result_1
result = yield _func_under_test_1()
self.assertEqual(10, result, result)
With below error, seems _func_inner_1 is not patched due to it's coroutine nature
AssertionError: 2
if I add coroutine to patch returned mock function
#gen_test
#mock.patch(__name__ + '._func_inner_1')
def test_1(self, mock_func_inner_1):
mock_func_inner_1.side_effect = Return(9)
mock_func_inner_1 = coroutine(mock_func_inner_1)
result_1 = yield _func_inner_1()
print 'result_1', result_1
result = yield _func_under_test_1()
self.assertEqual(10, result, result)
the error becomes:
Traceback (most recent call last):
File "tornado/testing.py", line 118, in __call__
result = self.orig_method(*args, **kwargs)
File "tornado/testing.py", line 494, in post_coroutine
timeout=timeout)
File "tornado/ioloop.py", line 418, in run_sync
return future_cell[0].result()
File "tornado/concurrent.py", line 109, in result
raise_exc_info(self._exc_info)
File "tornado/gen.py", line 175, in wrapper
yielded = next(result)
File "coroutine_unit_test.py", line 39, in test_1
mock_func_inner_1 = coroutine(mock_func_inner_1)
File "tornado/gen.py", line 140, in coroutine
return _make_coroutine_wrapper(func, replace_callback=True)
File "tornado/gen.py", line 150, in _make_coroutine_wrapper
#functools.wraps(func)
File "functools.py", line 33, in update_wrapper
setattr(wrapper, attr, getattr(wrapped, attr))
File "mock.py", line 660, in __getattr__
raise AttributeError(name)
AttributeError: __name__
This is the closest solution I can find, but the mocking function will NOT be reset after test case execution, unlike what patch does
#gen_test
def test_4(self):
global _func_inner_1
mock_func_inner_1 = mock.create_autospec(_func_inner_1)
mock_func_inner_1.side_effect = Return(100)
mock_func_inner_1 = coroutine(mock_func_inner_1)
_func_inner_1 = mock_func_inner_1
result = yield _func_under_test_1()
self.assertEqual(101, result, result)
There are two issues here:
First is the interaction between #mock.patch and #gen_test. gen_test works by converting a generator into a "normal" function; mock.patch only works on normal functions (as far as the decorator can tell, the generator returns as soon as it reaches the first yield, so mock.patch undoes all its work). To avoid this problem, you can either reorder the decorators (always put #mock.patch before #gen_test, or use the with form of mock.patch instead of the decorator form.
Second, coroutines should never raise an exception. Instead, they return a Future which will contain a result or an exception. The special Return exception is encapsulated by the coroutine system; you would never raise it from a Future. When you create your mocks, you must create the appropriate Future and set it as the return value instead of using side_effect to raise on exception.
The complete solution is:
from tornado.concurrent import Future
from tornado.gen import coroutine, Return
from tornado.testing import gen_test
from tornado.testing import AsyncTestCase
import mock
#coroutine
def _func_inner_1():
raise Return(1)
#coroutine
def _func_under_test_1():
temp = yield _func_inner_1()
raise Return(temp + 1)
class Test123(AsyncTestCase):
#mock.patch(__name__ + '._func_inner_1')
#gen_test
def test_1(self, mock_func_inner_1):
future_1 = Future()
future_1.set_result(9)
mock_func_inner_1.return_value = future_1
result_1 = yield _func_inner_1()
print 'result_1', result_1
result = yield _func_under_test_1()
self.assertEqual(10, result, result)
import unittest
unittest.main()

Why is multiprocessing.Process able to pickle decorated functions?

So I read here that decorated functions cannot be pickled. Indeed:
import multiprocessing as mp
def deco(f):
def wrapper(*args, **kwargs):
try:
f(*args, **kwargs)
except:
print 'Exception caught!'
return wrapper
#deco
def f(x):
print x
raise OverflowError
if __name__ == '__main__':
pool = mp.Pool(processes=1)
for _ in pool.imap_unordered(f, range(10)):
pass
pool.close()
pool.join()
print 'All done'
Out:
Traceback (most recent call last):
File "deco0.py", line 19, in <module>
for _ in pool.imap_unordered(f, range(10)):
File "/Users/usualme/anaconda/lib/python2.7/multiprocessing/pool.py", line 659, in next
raise value
cPickle.PicklingError: Can't pickle <type 'function'>: attribute lookup __builtin__.function failed
But now if I replace map by a Process:
import multiprocessing as mp
def deco(f):
def wrapper(*args, **kwargs):
try:
f(*args, **kwargs)
except:
print 'Exception caught!'
return wrapper
#deco
def f(x):
print x
raise OverflowError
if __name__ == '__main__':
p = mp.Process(target=f, args=(1,))
p.start()
p.join()
print 'All done'
Out:
1
Exception caught!
All done
Why is it working? Doesn't process need to pickle the decorated function as well?
It's working because you're running on Linux, which doesn't need to pickle f to call it in a child process via Process.__init__. This is because f gets inherited by the child via os.fork. If you run the same code on Windows (which lacks fork), or try to pass f to Pool.apply/Pool/map (both of which would need to pickle f to call it in a subprocess), you'll get an error.
This example will fail no matter what platform you use:
import multiprocessing as mp
def deco(f):
def wrapper(*args, **kwargs):
try:
f(*args, **kwargs)
except:
print 'Exception caught!'
return wrapper
#deco
def f(x):
print x
raise OverflowError
if __name__ == '__main__':
p = mp.Pool()
p.apply(f, args=(1,)) # f needs to be pickled here.
print 'All done'
Output:
1
Exception caught!
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 551, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 504, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/lib/python2.7/multiprocessing/pool.py", line 319, in _handle_tasks
put(task)
PicklingError: Can't pickle <type 'function'>: attribute lookup __builtin__.function failed

Trap exception, try again decorator in Python

I have little experience with decorators in Python, but I'd like to write a function decorator that runs the function, catches a specific exception, and if the exception is caught then re-tries the function a certain number of times. That is, I'd like to do this:
#retry_if_exception(BadStatusLine, max_retries=2)
def thing_that_sometimes_fails(self, foo):
foo.do_something_that_sometimes_raises_BadStatusLine()
I assume this kind of thing is easy with decorators, but I'm not clear about how exactly to go about it.
from functools import wraps
def retry_if_exception(ex, max_retries):
def outer(func):
#wraps(func)
def wrapper(*args, **kwargs):
assert max_retries > 0
x = max_retries
while x:
try:
return func(*args, **kwargs)
except ex:
x -= 1
return wrapper
return outer
see why you better use #wraps
I think you're basically wanting something like this:
def retry_if_exception(exception_type=Exception, max_retries=1):
def decorator(fn):
def wrapper(*args, **kwargs):
for i in range(max_retries+1):
print('Try #', i+1)
try:
return fn(*args, **kwargs)
except exception_type as e:
print('wrapper exception:', i+1, e)
return wrapper
return decorator
#retry_if_exception()
def foo1():
raise Exception('foo1')
#retry_if_exception(ArithmeticError)
def foo2():
x=1/0
#retry_if_exception(Exception, 2)
def foo3():
raise Exception('foo3')
The following seems to do what you've described:
def retry_if_exception( exception, max_retries=2 ):
def _retry_if_exception( method_fn ):
# method_fn is the function that gives rise
# to the method that you've decorated,
# with signature (slf, foo)
from functools import wraps
def method_deco( slf, foo ):
tries = 0
while True:
try:
return method_fn(slf, foo)
except exception:
tries += 1
if tries > max_retries:
raise
return wraps(method_fn)(method_deco)
return _retry_if_exception
Here's an example of it in use:
d = {}
class Foo():
def usually_raise_KeyError(self):
print("d[17] = %s" % d[17])
foo1 = Foo()
class A():
#retry_if_exception(KeyError, max_retries=2)
def something_that_sometimes_fails( self, foo ):
print("About to call foo.usually_raise_KeyError()")
foo.usually_raise_KeyError()
a = A()
a.something_that_sometimes_fails(foo1)
This gives:
About to call foo.usually_raise_KeyError()
About to call foo.usually_raise_KeyError()
About to call foo.usually_raise_KeyError()
Traceback (most recent call last):
File " ......... TrapRetryDeco.py", line 39, in <module>
a.something_that_sometimes_fails( foo1)
File " ......... TrapRetryDeco.py", line 15, in method_deco
return method_fn( slf, foo)
File " ......... TrapRetryDeco.py", line 36, in something_that_sometimes_fails
foo.usually_raise_KeyError()
File " ......... TrapRetryDeco.py", line 28, in usually_raise_KeyError
print("d[17] = %s" % d[17])
KeyError: 17
I assume that by "2 retries" you mean the operation will be attempted 3x all told. Your example has a couple of complications which may obscure the basic setup:
It seems you want a method decorator, as your function/method's first parameter is "self"; however, that method immediately delegates to some bad method of its foo parameter. I preserved these complications :)
As outline, you would do something along these lines:
import random
def shaky():
1/random.randint(0,1)
def retry_if_exception(f):
def inner(retries=2):
for retry in range(retries):
try:
return f()
except ZeroDivisionError:
print 'try {}'.format(retry)
raise
return inner
#retry_if_exception
def thing_that_may_fail():
shaky()
thing_that_may_fail()
As written, that will fail about 1/2 the time.
When it does fail, prints:
try 0
try 1
Traceback (most recent call last):
File "Untitled 2.py", line 23, in <module>
thing_that_may_fail()
File "Untitled 2.py", line 10, in inner
return f()
File "Untitled 2.py", line 21, in thing_that_may_fail
shaky()
File "Untitled 2.py", line 4, in shaky
1/random.randint(0,1)
ZeroDivisionError: integer division or modulo by zero
You could adapt this structure to many different types of errors.

Categories