So I read here that decorated functions cannot be pickled. Indeed:
import multiprocessing as mp
def deco(f):
def wrapper(*args, **kwargs):
try:
f(*args, **kwargs)
except:
print 'Exception caught!'
return wrapper
#deco
def f(x):
print x
raise OverflowError
if __name__ == '__main__':
pool = mp.Pool(processes=1)
for _ in pool.imap_unordered(f, range(10)):
pass
pool.close()
pool.join()
print 'All done'
Out:
Traceback (most recent call last):
File "deco0.py", line 19, in <module>
for _ in pool.imap_unordered(f, range(10)):
File "/Users/usualme/anaconda/lib/python2.7/multiprocessing/pool.py", line 659, in next
raise value
cPickle.PicklingError: Can't pickle <type 'function'>: attribute lookup __builtin__.function failed
But now if I replace map by a Process:
import multiprocessing as mp
def deco(f):
def wrapper(*args, **kwargs):
try:
f(*args, **kwargs)
except:
print 'Exception caught!'
return wrapper
#deco
def f(x):
print x
raise OverflowError
if __name__ == '__main__':
p = mp.Process(target=f, args=(1,))
p.start()
p.join()
print 'All done'
Out:
1
Exception caught!
All done
Why is it working? Doesn't process need to pickle the decorated function as well?
It's working because you're running on Linux, which doesn't need to pickle f to call it in a child process via Process.__init__. This is because f gets inherited by the child via os.fork. If you run the same code on Windows (which lacks fork), or try to pass f to Pool.apply/Pool/map (both of which would need to pickle f to call it in a subprocess), you'll get an error.
This example will fail no matter what platform you use:
import multiprocessing as mp
def deco(f):
def wrapper(*args, **kwargs):
try:
f(*args, **kwargs)
except:
print 'Exception caught!'
return wrapper
#deco
def f(x):
print x
raise OverflowError
if __name__ == '__main__':
p = mp.Pool()
p.apply(f, args=(1,)) # f needs to be pickled here.
print 'All done'
Output:
1
Exception caught!
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 551, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 504, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/lib/python2.7/multiprocessing/pool.py", line 319, in _handle_tasks
put(task)
PicklingError: Can't pickle <type 'function'>: attribute lookup __builtin__.function failed
Related
This is an old issue which suggested workaround does not work.
Below is a complete example showing how the suggested approach fails. Uncomment L31 for error.
import multiprocessing
import os
import time
from multiprocessing import get_context
from multiprocessing.queues import Queue
class SharedCounter(object):
def __init__(self, n=0):
self.count = multiprocessing.Value('i', n)
def increment(self, n=1):
with self.count.get_lock():
self.count.value += n
#property
def value(self):
return self.count.value
class MyQueue(Queue):
def __init__(self, *args, **kwargs):
super(MyQueue, self).__init__(*args, ctx=get_context(), **kwargs)
self.size = SharedCounter(0)
def put(self, *args, **kwargs):
self.size.increment(1)
super(MyQueue, self).put(*args, **kwargs)
def get(self, *args, **kwargs):
# self.size.increment(-1) # uncomment this for error
return super(MyQueue, self).get(*args, **kwargs)
def qsize(self):
return self.size.value
def empty(self):
return not self.qsize()
def clear(self):
while not self.empty():
self.get()
def worker(queue):
while True:
item = queue.get()
if item is None:
break
print(f'[{os.getpid()}]: got {item}')
time.sleep(1)
if __name__ == '__main__':
num_processes = 4
q = MyQueue()
pool = multiprocessing.Pool(num_processes, worker, (q,))
for i in range(10):
q.put("hello")
q.put("world")
for i in range(num_processes):
q.put(None)
q.close()
q.join_thread()
pool.close()
pool.join()
For some reason, the newly defined MyQueue forgets about the size attribute.
Process SpawnPoolWorker-1:
Traceback (most recent call last):
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 314, in _bootstrap
self.run()
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/pool.py", line 109, in worker
initializer(*initargs)
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 47, in worker
item = queue.get()
^^^^^^^^^^^
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 31, in get
self.size.increment(-1) # uncomment this for error
^^^^^^^^^
AttributeError: 'MyQueue' object has no attribute 'size'
Process SpawnPoolWorker-2:
Traceback (most recent call last):
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 314, in _bootstrap
self.run()
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/pool.py", line 109, in worker
initializer(*initargs)
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 47, in worker
item = queue.get()
^^^^^^^^^^^
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 31, in get
self.size.increment(-1) # uncomment this for error
^^^^^^^^^
AttributeError: 'MyQueue' object has no attribute 'size'
Process SpawnPoolWorker-4:
Process SpawnPoolWorker-3:
Traceback (most recent call last):
Traceback (most recent call last):
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 314, in _bootstrap
self.run()
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/pool.py", line 109, in worker
initializer(*initargs)
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 47, in worker
item = queue.get()
^^^^^^^^^^^
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 31, in get
self.size.increment(-1) # uncomment this for error
^^^^^^^^^
AttributeError: 'MyQueue' object has no attribute 'size'
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 314, in _bootstrap
self.run()
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/pool.py", line 109, in worker
initializer(*initargs)
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 47, in worker
item = queue.get()
^^^^^^^^^^^
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 31, in get
self.size.increment(-1) # uncomment this for error
^^^^^^^^^
AttributeError: 'MyQueue' object has no attribute 'size'
well, you didn't to override __setstate__ and __getstate__ to include your variable, which are used by pickle to control the serialization Handling Stateful Objects ... so you should override them to add your variable to what's being serialized.
import multiprocessing
import os
import time
from multiprocessing import get_context
from multiprocessing.queues import Queue
class SharedCounter(object):
def __init__(self, n=0):
self.count = multiprocessing.Value('i', n)
def increment(self, n=1):
with self.count.get_lock():
self.count.value += n
#property
def value(self):
return self.count.value
class MyQueue(Queue):
def __init__(self, *args, **kwargs):
super(MyQueue, self).__init__(*args, ctx=get_context(), **kwargs)
self.size = SharedCounter(0)
def __getstate__(self):
return (super(MyQueue, self).__getstate__(),self.size)
def __setstate__(self, state):
super(MyQueue, self).__setstate__(state[0])
self.size = state[1]
def put(self, *args, **kwargs):
self.size.increment(1)
super(MyQueue, self).put(*args, **kwargs)
def get(self, *args, **kwargs):
self.size.increment(-1) # uncomment this for error
return super(MyQueue, self).get(*args, **kwargs)
def qsize(self):
return self.size.value
def empty(self):
return not self.qsize()
def clear(self):
while not self.empty():
self.get()
def worker(queue):
while True:
item = queue.get()
if item is None:
break
print(f'[{os.getpid()}]: got {item}')
time.sleep(1)
if __name__ == '__main__':
num_processes = 4
q = MyQueue()
pool = multiprocessing.Pool(num_processes, initializer=worker, initargs=(q,))
for i in range(10):
q.put("hello")
q.put("world")
for i in range(num_processes):
q.put(None)
q.close()
q.join_thread()
pool.close()
pool.join()
note that in python 3 we don't need to use super(MyQueue, self), as super() would suffice, and will make it easier to rename your class in the future and other portability and refactoring benefits, so consider swapping any super(x,y) with just super()
I have a python service which may be ended by exception - I'm ok with it. But the problem is that it spawned an child process which continues to run even it's parent got a fail.
import multiprocessing as mp
from time import sleep
import os
import atexit
import psutil
#atexit.register # Doesn't fired at exception
def goodbye():
current_process = psutil.Process()
children = current_process.children(recursive=True)
for child in children:
print('Kill child with pid {}'.format(child.pid))
try:
child.terminate()
except:
pass
print("You are now leaving the Python sector.")
def func(): # Child process
while True:
ppid = os.getppid()
print("Parent process id:", ppid)
if ppid == 1:
print("Parent process has terminated")
break
sleep(1)
t = mp.Process(target=func, args=())
t.start()
print(9 + "0") # Exception here
print("I'm ok")
And service continues to work (formally) until it got a kick from outside:
Parent process id: 29118
Traceback (most recent call last):
File "stestcp.py", line 32, in <module>
print(9 + "0") # Exception here
TypeError: unsupported operand type(s) for +: 'int' and 'str'
Parent process id: 29118
Parent process id: 29118
Parent process id: 29118
Parent process id: 29118
Parent process id: 29118
^CError in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/usr/lib/python3.6/multiprocessing/popen_fork.py", line 28, in poll
Process Process-1:
pid, sts = os.waitpid(self.pid, flag)
KeyboardInterrupt
Traceback (most recent call last):
File "/usr/lib/python3.6/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/lib/python3.6/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "stestcp.py", line 27, in func
sleep(1)
KeyboardInterrupt
Kill child with pid 29119
You are now leaving the Python sector.
The question is - is there any way to call some global fallback function (like atexit) when program failed with exception?
Thanks to Andrej Kesely, solution found. Working example now looks like:
import multiprocessing as mp
from time import sleep
import sys
import os
import atexit
import psutil
class ExitHooks(object):
def __init__(self):
self.exit_code = None
self.exception = None
def hook(self):
self._orig_exit = sys.exit
sys.exit = self.exit
sys.excepthook = self.exc_handler
def exit(self, code=0):
self.exit_code = code
self._orig_exit(code)
def exc_handler(self, exc_type, exc, *args): # Called at exception
self.exception = exc
goodbye()
hooks = ExitHooks()
hooks.hook()
#atexit.register # Doesn't fired at exception
def goodbye():
if hooks.exit_code is not None:
print("death by sys.exit(%d)" % hooks.exit_code)
elif hooks.exception is not None:
print("death by exception: %s" % hooks.exception)
else:
print("natural death")
current_process = psutil.Process()
children = current_process.children(recursive=True)
for child in children:
print('Kill child with pid {}'.format(child.pid))
try:
child.terminate()
except:
pass
print("You are now leaving the Python sector.")
def func(): # Child process
while True:
ppid = os.getppid()
print("Parent process id:", ppid)
if ppid == 1:
print("Parent process has terminated")
break
sleep(1)
t = mp.Process(target=func, args=())
t.start()
sleep(2)
print(9 + "0") # Exception here
I'm trying to make a decoration on some specific functions. If the functions works more than, say five seconds, the deco will raise errors or interrupt the function.
def time_limit(func):
def time_out():
raise AssertionError
#wraps(func)
def deco(*args, **kwargs):
timer = Timer(5, time_out)
timer.start()
res = func(*args, **kwargs)
return res
return deco
however even though the deco works, the function still works without interruptions:
In [69]: #time_limit
...: def f():
...: time.sleep(6)
...: print 'aaa'
...:
In [70]: f()
Exception in thread Thread-2764:
Traceback (most recent call last):
File "/usr/local/Cellar/python/2.7.12/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 801, in __bootstrap_inner
self.run()
File "/usr/local/Cellar/python/2.7.12/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 1073, in run
self.function(*self.args, **self.kwargs)
File "<ipython-input-68-cc74c901d8b8>", line 4, in time_out
raise AssertionError
AssertionError
aaa
How to fix the problem?
ps. I'm using apscheduler to circulalte the function, so a time-out decoration is better.
I am trying to use this example as a template for a queuing system on my cherrypy app.
I was able to convert it from python 2 to python 3 (change from Queue import Empty into from queue import Empty) and to execute it in Ubuntu. But when I execute it in Windows I get the following error:
F:\workspace\test>python test.py
Traceback (most recent call last):
File "test.py", line 112, in <module>
broker.start()
File "C:\Anaconda3\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Anaconda3\lib\multiprocessing\context.py", line 212, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Anaconda3\lib\multiprocessing\context.py", line 313, in _Popen
return Popen(process_obj)
File "C:\Anaconda3\lib\multiprocessing\popen_spawn_win32.py", line 66, in __init__
reduction.dump(process_obj, to_child)
File "C:\Anaconda3\lib\multiprocessing\reduction.py", line 59, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot serialize '_io.TextIOWrapper' object
F:\workspace\test>Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Anaconda3\lib\multiprocessing\spawn.py", line 100, in spawn_main
new_handle = steal_handle(parent_pid, pipe_handle)
File "C:\Anaconda3\lib\multiprocessing\reduction.py", line 81, in steal_handle
_winapi.PROCESS_DUP_HANDLE, False, source_pid)
OSError: [WinError 87] The parameter is incorrect
Here is the full code:
# from http://www.defuze.org/archives/198-managing-your-process-with-the-cherrypy-bus.html
import sys
import logging
from logging import handlers
from cherrypy.process import wspbus
class MyBus(wspbus.Bus):
def __init__(self, name=""):
wspbus.Bus.__init__(self)
self.open_logger(name)
self.subscribe("log", self._log)
def exit(self):
wspbus.Bus.exit(self)
self.close_logger()
def open_logger(self, name=""):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
h = logging.StreamHandler(sys.stdout)
h.setLevel(logging.INFO)
h.setFormatter(logging.Formatter("[%(asctime)s] %(name)s - %(levelname)s - %(message)s"))
logger.addHandler(h)
self.logger = logger
def close_logger(self):
for handler in self.logger.handlers:
handler.flush()
handler.close()
def _log(self, msg="", level=logging.INFO):
self.logger.log(level, msg)
import random
import string
from multiprocessing import Process
class Bank(object):
def __init__(self, queue):
self.bus = MyBus(Bank.__name__)
self.queue = queue
self.bus.subscribe("main", self.randomly_place_order)
self.bus.subscribe("exit", self.terminate)
def randomly_place_order(self):
order = random.sample(['BUY', 'SELL'], 1)[0]
code = random.sample(string.ascii_uppercase, 4)
amount = random.randint(0, 100)
message = "%s %s %d" % (order, ''.join(code), amount)
self.bus.log("Placing order: %s" % message)
self.queue.put(message)
def run(self):
self.bus.start()
self.bus.block(interval=0.01)
def terminate(self):
self.bus.unsubscribe("main", self.randomly_place_order)
self.bus.unsubscribe("exit", self.terminate)
from queue import Empty
class Broker(Process):
def __init__(self, queue):
Process.__init__(self)
self.queue = queue
self.bus = MyBus(Broker.__name__)
self.bus.subscribe("main", self.check)
def check(self):
try:
message = self.queue.get_nowait()
except Empty:
return
if message == "stop":
self.bus.unsubscribe("main", self.check)
self.bus.exit()
elif message.startswith("BUY"):
self.buy(*message.split(' ', 2)[1:])
elif message.startswith("SELL"):
self.sell(*message.split(' ', 2)[1:])
def run(self):
self.bus.start()
self.bus.block(interval=0.01)
def stop(self):
self.queue.put("stop")
def buy(self, code, amount):
self.bus.log("BUY order placed for %s %s" % (amount, code))
def sell(self, code, amount):
self.bus.log("SELL order placed for %s %s" % (amount, code))
if __name__ == '__main__':
from multiprocessing import Queue
queue = Queue()
broker = Broker(queue)
broker.start()
bank = Bank(queue)
bank.run()
The problem is that parts of the MyBus object are not picklable, and you're saving an instance of MyBus to your Broker instance. Because Windows lacks fork() support, when you call broker.start(), the entire state of broker must be pickled and recreated in the child process that multiprocessing spawns to execute broker.run. It works on Linux because Linux supports fork; it doesn't need to pickle anything in this case - the child process contains the complete state of the parent as soon as it is forked.
There are two ways to sole this problem. The first, and more difficult, way, is to make your broker instance picklable. To do that, you need to make MyBus picklable. The error you're getting right now refers to the logger attribute on MyBus, which is not picklable. That one is easy to fix; just add __getstate__/__setstate__ methods to MyBus, which are used to control how the object is pickled/unpickled. If we remove the logger when we pickle, and recreate it when we unpickle, we'll work around the issue:
class MyBus(wspbus.Bus):
...
def __getstate__(self):
self_dict = self.__dict__
del self_dict['logger']
return self_dict
def __setstate__(self, d):
self.__dict__.update(d)
self.open_logger()
This works, but then we hit another pickling error:
Traceback (most recent call last):
File "async2.py", line 121, in <module>
broker.start()
File "C:\python34\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\python34\lib\multiprocessing\context.py", line 212, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\python34\lib\multiprocessing\context.py", line 313, in _Popen
return Popen(process_obj)
File "C:\python34\lib\multiprocessing\popen_spawn_win32.py", line 66, in __init__
reduction.dump(process_obj, to_child)
File "C:\python34\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
_pickle.PicklingError: Can't pickle <class 'cherrypy.process.wspbus._StateEnum.State'>: attribute lookup State on cherrypy.process.wspbus failed
Turns outcherrypy.process.wspbus._StateEnum.State, which is an attribute on the wspbus.Bus class inherited by MyBus, is a nested class, and nested classes can't be pickled:
class _StateEnum(object):
class State(object):
name = None
def __repr__(self):
return "states.%s" % self.name
The State object (surprise) is used to track the Bus instance's state. Since we're doing the pickling before we start up the bus, we could just remove the state attribute from the object when we pickle, and set it to States.STOPPED when we unpickle.
class MyBus(wspbus.Bus):
def __init__(self, name=""):
wspbus.Bus.__init__(self)
self.open_logger(name)
self.subscribe("log", self._log)
def __getstate__(self):
self_dict = self.__dict__
del self_dict['logger']
del self_dict['state']
return self_dict
def __setstate__(self, d):
self.__dict__.update(d)
self.open_logger()
self.state = wspbus.states.STOPPED # Initialize to STOPPED
With these changes, the code works as expected! The only limitation is that it's only safe to pickle MyBus if the bus hasn't started yet, which is fine for your usecase.
Again, this is the hard way. The easy way is to just remove the need to pickle the MyBus instance altogether. You can just create the MyBus instance in the child process, rather than the parent:
class Broker(Process):
def __init__(self, queue):
Process.__init__(self)
self.queue = queue
...
def run(self):
self.bus = MyBus(Broker.__name__) # Create the instance here, in the child
self.bus.subscribe("main", self.check)
self.bus.start()
self.bus.block(interval=0.01)
As long as you don't need to access broker.bus in the parent, this is the simpler option.
I have little experience with decorators in Python, but I'd like to write a function decorator that runs the function, catches a specific exception, and if the exception is caught then re-tries the function a certain number of times. That is, I'd like to do this:
#retry_if_exception(BadStatusLine, max_retries=2)
def thing_that_sometimes_fails(self, foo):
foo.do_something_that_sometimes_raises_BadStatusLine()
I assume this kind of thing is easy with decorators, but I'm not clear about how exactly to go about it.
from functools import wraps
def retry_if_exception(ex, max_retries):
def outer(func):
#wraps(func)
def wrapper(*args, **kwargs):
assert max_retries > 0
x = max_retries
while x:
try:
return func(*args, **kwargs)
except ex:
x -= 1
return wrapper
return outer
see why you better use #wraps
I think you're basically wanting something like this:
def retry_if_exception(exception_type=Exception, max_retries=1):
def decorator(fn):
def wrapper(*args, **kwargs):
for i in range(max_retries+1):
print('Try #', i+1)
try:
return fn(*args, **kwargs)
except exception_type as e:
print('wrapper exception:', i+1, e)
return wrapper
return decorator
#retry_if_exception()
def foo1():
raise Exception('foo1')
#retry_if_exception(ArithmeticError)
def foo2():
x=1/0
#retry_if_exception(Exception, 2)
def foo3():
raise Exception('foo3')
The following seems to do what you've described:
def retry_if_exception( exception, max_retries=2 ):
def _retry_if_exception( method_fn ):
# method_fn is the function that gives rise
# to the method that you've decorated,
# with signature (slf, foo)
from functools import wraps
def method_deco( slf, foo ):
tries = 0
while True:
try:
return method_fn(slf, foo)
except exception:
tries += 1
if tries > max_retries:
raise
return wraps(method_fn)(method_deco)
return _retry_if_exception
Here's an example of it in use:
d = {}
class Foo():
def usually_raise_KeyError(self):
print("d[17] = %s" % d[17])
foo1 = Foo()
class A():
#retry_if_exception(KeyError, max_retries=2)
def something_that_sometimes_fails( self, foo ):
print("About to call foo.usually_raise_KeyError()")
foo.usually_raise_KeyError()
a = A()
a.something_that_sometimes_fails(foo1)
This gives:
About to call foo.usually_raise_KeyError()
About to call foo.usually_raise_KeyError()
About to call foo.usually_raise_KeyError()
Traceback (most recent call last):
File " ......... TrapRetryDeco.py", line 39, in <module>
a.something_that_sometimes_fails( foo1)
File " ......... TrapRetryDeco.py", line 15, in method_deco
return method_fn( slf, foo)
File " ......... TrapRetryDeco.py", line 36, in something_that_sometimes_fails
foo.usually_raise_KeyError()
File " ......... TrapRetryDeco.py", line 28, in usually_raise_KeyError
print("d[17] = %s" % d[17])
KeyError: 17
I assume that by "2 retries" you mean the operation will be attempted 3x all told. Your example has a couple of complications which may obscure the basic setup:
It seems you want a method decorator, as your function/method's first parameter is "self"; however, that method immediately delegates to some bad method of its foo parameter. I preserved these complications :)
As outline, you would do something along these lines:
import random
def shaky():
1/random.randint(0,1)
def retry_if_exception(f):
def inner(retries=2):
for retry in range(retries):
try:
return f()
except ZeroDivisionError:
print 'try {}'.format(retry)
raise
return inner
#retry_if_exception
def thing_that_may_fail():
shaky()
thing_that_may_fail()
As written, that will fail about 1/2 the time.
When it does fail, prints:
try 0
try 1
Traceback (most recent call last):
File "Untitled 2.py", line 23, in <module>
thing_that_may_fail()
File "Untitled 2.py", line 10, in inner
return f()
File "Untitled 2.py", line 21, in thing_that_may_fail
shaky()
File "Untitled 2.py", line 4, in shaky
1/random.randint(0,1)
ZeroDivisionError: integer division or modulo by zero
You could adapt this structure to many different types of errors.