Understanding class variable behavior - python

We came across the need to have a dynamic class variable in the following code in python 2.
from datetime import datetime
from retrying import retry
class TestClass(object):
SOME_VARIABLE = None
def __init__(self, some_arg=None):
self.some_arg = some_arg
#retry(retry_on_exception=lambda e: isinstance(e, EnvironmentError), wait_fixed=3000 if SOME_VARIABLE == "NEEDED" else 1000, stop_max_attempt_number=3)
def some_func(self):
print("Running {} at {}".format(self.some_arg, datetime.now()))
if self.some_arg != "something needed":
raise EnvironmentError("Unexpected value")
TestClass.SOME_VARIABLE = "NEEDED"
x = TestClass()
x.some_func()
Output:
Running None at 2021-07-26 19:40:22.374736
Running None at 2021-07-26 19:40:23.376027
Running None at 2021-07-26 19:40:24.377523
Traceback (most recent call last):
File "/home/raj/tmp/test_test.py", line 19, in <module>
x.some_func()
File "/home/raj/.local/share/virtualenvs/test-DzpjW1fZ/lib/python2.7/site-packages/retrying.py", line 49, in wrapped_f
return Retrying(*dargs, **dkw).call(f, *args, **kw)
File "/home/raj/.local/share/virtualenvs/test-DzpjW1fZ/lib/python2.7/site-packages/retrying.py", line 212, in call
raise attempt.get()
File "/home/raj/.local/share/virtualenvs/test-DzpjW1fZ/lib/python2.7/site-packages/retrying.py", line 247, in get
six.reraise(self.value[0], self.value[1], self.value[2])
File "/home/raj/.local/share/virtualenvs/test-DzpjW1fZ/lib/python2.7/site-packages/retrying.py", line 200, in call
attempt = Attempt(fn(*args, **kwargs), attempt_number, False)
File "/home/raj/tmp/test_test.py", line 14, in some_func
raise EnvironmentError("Unexpected value")
EnvironmentError: Unexpected value
We can see that the value of SOME_VARIABLE is not being updated.
Trying to understand if there is way in which we can update SOME_VARIABLE dynamically. The use case is to have dynamic timings in the retry function based on SOME_VARIABLE value at runtime.

Your class definition is equivalent, based on the definition of decorator syntax, to
class TestClass(object):
SOME_VARIABLE = None
def __init__(self, some_arg=None):
self.some_arg = some_arg
decorator = retry(retry_on_exception=lambda e: isinstance(e, EnvironmentError),
wait_fixed=3000 if SOME_VARIABLE == "NEEDED" else 1000,
stop_max_attempt_number=3)
def some_func(self):
...
some_func = decorator(some_func)
Note that retry is called long before you change the value of TestClass.SOME_VARIABLE (indeed, before the class object that will be bound to TestClass even exists), so the comparison SOME_VARIABLE == "NEEDED" is evaluated when SOME_VARIABLE still equals None.
To have the retry behavior configured at run-time, try something like
class TestClass(object):
SOME_VARIABLE = None
def __init__(self, some_arg=None):
self.some_arg = some_arg
def _some_func_implemenation(self):
print("Running {} at {}".format(self.some_arg, datetime.now()))
if self.some_arg != "something needed":
raise EnvironmentError("Unexpected value")
def some_func(self):
wait = 3000 if self.SOME_VARIABLE == "NEEDED" else 1000
impl = retry(retry_on_exception=lambda e: isinstance(e, EnvironmentError),
wait_fixed=wait,
stop_max_attempt_number=3)(self._some_func)
return impl()
some_func becomes a function that, at runtime, creates a function (based on the private _some_func) with the appropriate retry behavior, then calls it.
(Not tested; I may have gotten the interaction between the bound method self._some_func and retry wrong.)

Related

Using decorator in a class in python

For understanding decorators in Python, i created in a class an example. But when i run it i receive an error.
class Operation:
def __init__(self, groupe):
self.__groupe = groupe
#property
def groupe(self):
return self.__groupe
#groupe.setter
def groupe(self, value):
self.__groupe = value
def addition(self, func_goodbye):
ln_house = len('house')
ln_school = len('school')
add = ln_house + ln_school
print('The result is :' + str(add))
return func_goodbye
#addition
def goodbye(self):
print('Goodbye people !!')
if __name__ == '__main__':
p1 = Operation('Student')
p1.goodbye()
I receive this error :
Traceback (most recent call last):
File "Operation.py", line 1, in
class Operation:
File "Operation.py", line 21, in Operation
#addition
TypeError: addition() missing 1 required positional argument: 'func_goodbye'
You can have a class scoped decorator, however there won't be a self when the decorator is called
a decorator:
#foo
def bar(): ...
is roughly equivalent to
def bar(): ...
bar = foo(bar)
in your particular example, if you remove the self parameter, it should function as you expect:
def addition(func_goodbye):
ln_house = len('house')
ln_school = len('school')
add = ln_house + ln_school
print('The result is :' + str(add))
return func_goodbye
#addition
def goodbye(self):
print('Goodbye people !!')
for good measure, I might del addition after that just to ensure it isn't accidentally called later
(an aside: one unfortunate side-effect of this is many linters and type checkers will consider this "odd" so I've yet to find a way to appease them (for example mypy))

Why Python pickling library complain about class member that doesn't exist?

I have the following simple class definition:
def apmSimUp(i):
return APMSim(i)
def simDown(sim):
sim.close()
class APMSimFixture(TestCase):
def setUp(self):
self.pool = multiprocessing.Pool()
self.sims = self.pool.map(
apmSimUp,
range(numCores)
)
def tearDown(self):
self.pool.map(
simDown,
self.sims
)
Where class APMSim is defined purely by plain simple python primitive types (string, list etc.) the only exception is a static member, which is a multiprocessing manager.list
However, when I try to execute this class, I got the following error information:
Error
Traceback (most recent call last):
File "/home/peng/git/datapassport/spookystuff/mav/pyspookystuff_test/mav/__init__.py", line 77, in setUp
range(numCores)
File "/usr/lib/python2.7/multiprocessing/pool.py", line 251, in map
return self.map_async(func, iterable, chunksize).get()
File "/usr/lib/python2.7/multiprocessing/pool.py", line 567, in get
raise self._value
MaybeEncodingError: Error sending result: '[<pyspookystuff.mav.sim.APMSim object at 0x7f643c4ca8d0>]'. Reason: 'TypeError("can't pickle thread.lock objects",)'
Which is strange as thread.lock cannot be found anywhere, I strictly avoid any multithreading component (as you can see, only multiprocessing component is used). And none of these component exist in my class, or only as static member, what should I do to make this class picklable?
BTW, is there a way to exclude a black sheep member from pickling? Like Java's #transient annotation?
Thanks a lot for any help!
UPDATE: The following is my full APMSim class, please see if you find anything that violates it picklability:
usedINums = mav.manager.list()
class APMSim(object):
global usedINums
#staticmethod
def nextINum():
port = mav.nextUnused(usedINums, range(0, 254))
return port
def __init__(self, iNum):
# type: (int) -> None
self.iNum = iNum
self.args = sitl_args + ['-I' + str(iNum)]
#staticmethod
def create():
index = APMSim.nextINum()
try:
result = APMSim(index)
return result
except Exception as ee:
usedINums.remove(index)
raise
#lazy
def _sitl(self):
sitl = SITL()
sitl.download('copter', '3.3')
sitl.launch(self.args, await_ready=True, restart=True)
print("launching .... ", sitl.p.pid)
return sitl
#lazy
def sitl(self):
self.setParamAndRelaunch('SYSID_THISMAV', self.iNum + 1)
return self._sitl
def _getConnStr(self):
return tcp_master(self.iNum)
#lazy
def connStr(self):
self.sitl
return self._getConnStr()
def setParamAndRelaunch(self, key, value):
wd = self._sitl.wd
print("relaunching .... ", self._sitl.p.pid)
v = connect(self._getConnStr(), wait_ready=True) # if use connStr will trigger cyclic invocation
v.parameters.set(key, value, wait_ready=True)
v.close()
self._sitl.stop()
self._sitl.launch(self.args, await_ready=True, restart=True, wd=wd, use_saved_data=True)
v = connect(self._getConnStr(), wait_ready=True)
# This fn actually rate limits itself to every 2s.
# Just retry with persistence to get our first param stream.
v._master.param_fetch_all()
v.wait_ready()
actualValue = v._params_map[key]
assert actualValue == value
v.close()
def close(self):
self._sitl.stop()
usedINums.remove(self.iNum)
lazy decorator is from this library:
https://docs.python.org/2/tutorial/classes.html#generator-expressions
It would help to see how your class looks, but if it has methods from multiprocessing you may have issues just pickling it by default. Multiprocessing objects can use locks as well, and these are (obviously) unpickle-able.
You can customize pickling with the __getstate__ method, or __reduce__ (documented in the same place).

decorator that add variable to closure

I want to write a decorator that inject custom local variable into function.
interface may like this.
def enclose(name, value):
...
def decorator(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return decorator
expectation:
#enclose('param1', 1)
def f():
param1 += 1
print param1
f() will compile and run without error
output:
2
Is it possible to do this in python? why?
I thought I'd try this out just to see how hard it would be. Pretty hard as it turns out.
First thing was how do you implement this? Is the extra parameter an injected local variable, an additional argument to the function or a nonlocal variable. An injected local variable will be a fresh object each time, but how to create more complicated objects... An additional argument will record mutations to the object, but assignments to the name will be forgotten between function invocations. Additionally, this will require either parsing of the source to find where to place the argument, or directly manipulating code objects. Finally, declaring the variables nonlocal will record mutations to the object and assignments to the name. Effectively a nonlocal is global, but only reachable by the decorated function. Again, using a nonlocal will requiring parsing the source and finding where to place the nonlocal declaration or direct manipulation of a code object.
In the end I decided with using a nonlocal variable and parsing the function source. Originally I was going to manipulate code objects, but it seemed too complicated.
Here is the code for the decorator:
import re
import types
import inspect
class DummyInject:
def __call__(self, **kwargs):
return lambda func: func
def __getattr__(self, name):
return self
class Inject:
function_end = re.compile(r"\)\s*:\s*\n")
indent = re.compile("\s+")
decorator = re.compile("#([a-zA-Z0-9_]+)[.a-zA-Z0-9_]*")
exec_source = """
def create_new_func({closure_names}):
{func_source}
{indent}return {func_name}"""
nonlocal_declaration = "{indent}nonlocal {closure_names};"
def __init__(self, **closure_vars):
self.closure_vars = closure_vars
def __call__(self, func):
lines, line_number = inspect.getsourcelines(func)
self.inject_nonlocal_declaration(lines)
new_func = self.create_new_function(lines, func)
return new_func
def inject_nonlocal_declaration(self, lines):
"""hides nonlocal declaration in first line of function."""
function_body_start = self.get_function_body_start(lines)
nonlocals = self.nonlocal_declaration.format(
indent=self.indent.match(lines[function_body_start]).group(),
closure_names=", ".join(self.closure_vars)
)
lines[function_body_start] = nonlocals + lines[function_body_start]
return lines
def get_function_body_start(self, lines):
line_iter = enumerate(lines)
found_function_header = False
for i, line in line_iter:
if self.function_end.search(line):
found_function_header = True
break
assert found_function_header
for i, line in line_iter:
if not line.strip().startswith("#"):
break
return i
def create_new_function(self, lines, func):
# prepares source -- eg. making sure indenting is correct
declaration_indent, body_indent = self.get_indent(lines)
if not declaration_indent:
lines = [body_indent + line for line in lines]
exec_code = self.exec_source.format(
closure_names=", ".join(self.closure_vars),
func_source="".join(lines),
indent=declaration_indent if declaration_indent else body_indent,
func_name=func.__name__
)
# create new func -- mainly only want code object contained by new func
lvars = {"closure_vars": self.closure_vars}
gvars = self.get_decorators(exec_code, func.__globals__)
exec(exec_code, gvars, lvars)
new_func = eval("create_new_func(**closure_vars)", gvars, lvars)
# add back bits that enable function to work well
# includes original global references and
new_func = self.readd_old_references(new_func, func)
return new_func
def readd_old_references(self, new_func, old_func):
"""Adds back globals, function name and source reference."""
func = types.FunctionType(
code=self.add_src_ref(new_func.__code__, old_func.__code__),
globals=old_func.__globals__,
name=old_func.__name__,
argdefs=old_func.__defaults__,
closure=new_func.__closure__
)
func.__doc__ = old_func.__doc__
return func
def add_src_ref(self, new_code, old_code):
return types.CodeType(
new_code.co_argcount,
new_code.co_kwonlyargcount,
new_code.co_nlocals,
new_code.co_stacksize,
new_code.co_flags,
new_code.co_code,
new_code.co_consts,
new_code.co_names,
new_code.co_varnames,
old_code.co_filename, # reuse filename
new_code.co_name,
old_code.co_firstlineno, # reuse line number
new_code.co_lnotab,
new_code.co_freevars,
new_code.co_cellvars
)
def get_decorators(self, source, global_vars):
"""Creates a namespace for exec function creation in. Must remove
any reference to Inject decorator to prevent infinite recursion."""
namespace = {}
for match in self.decorator.finditer(source):
decorator = eval(match.group()[1:], global_vars)
basename = match.group(1)
if decorator is Inject:
namespace[basename] = DummyInject()
else:
namespace[basename] = global_vars[basename]
return namespace
def get_indent(self, lines):
"""Takes a set of lines used to create a function and returns the
outer indentation that the function is declared in and the inner
indentation of the body of the function."""
body_indent = None
function_body_start = self.get_function_body_start(lines)
for line in lines[function_body_start:]:
match = self.indent.match(line)
if match:
body_indent = match.group()
break
assert body_indent
match = self.indent.match(lines[0])
if not match:
declaration_indent = ""
else:
declaration_indent = match.group()
return declaration_indent, body_indent
if __name__ == "__main__":
a = 1
#Inject(b=10)
def f(c, d=1000):
"f uses injected variables"
return a + b + c + d
#Inject(var=None)
def g():
"""Purposefully generate exception to show stacktraces are still
meaningful."""
create_name_error # line number 164
print(f(100)) # prints 1111
assert f(100) == 1111
assert f.__doc__ == "f uses injected variables" # show doc is retained
try:
g()
except NameError:
raise
else:
assert False
# stack trace shows NameError on line 164
Which outputs the following:
1111
Traceback (most recent call last):
File "inject.py", line 171, in <module>
g()
File "inject.py", line 164, in g
create_name_error # line number 164
NameError: name 'create_name_error' is not defined
The whole thing is hideously ugly, but it works. It's also worth noting that if Inject is used for method, then any injected values are shared between all instances of the class.
You can do it using globals but I don't recommend this approach.
def enclose(name, value):
globals()[name] = value
def decorator(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return decorator
#enclose('param1', 1)
def f():
global param1
param1 += 1
print(param1)
f()

Faking a traceback in Python

I'm writing a test runner. I have an object that can catch and store exceptions, which will be formatted as a string later as part of the test failure report. I'm trying to unit-test the procedure that formats the exception.
In my test setup, I don't want to actually throw an exception for my object to catch, mainly because it means that the traceback won't be predictable. (If the file changes length, the line numbers in the traceback will change.)
How can I attach a fake traceback to an exception, so that I can make assertions about the way it's formatted? Is this even possible? I'm using Python 3.3.
Simplified example:
class ExceptionCatcher(object):
def __init__(self, function_to_try):
self.f = function_to_try
self.exception = None
def try_run(self):
try:
self.f()
except Exception as e:
self.exception = e
def format_exception_catcher(catcher):
pass
# No implementation yet - I'm doing TDD.
# This'll probably use the 'traceback' module to stringify catcher.exception
class TestFormattingExceptions(unittest.TestCase):
def test_formatting(self):
catcher = ExceptionCatcher(None)
catcher.exception = ValueError("Oh no")
# do something to catcher.exception so that it has a traceback?
output_str = format_exception_catcher(catcher)
self.assertEquals(output_str,
"""Traceback (most recent call last):
File "nonexistent_file.py", line 100, in nonexistent_function
raise ValueError("Oh no")
ValueError: Oh no
""")
Reading the source of traceback.py pointed me in the right direction. Here's my hacky solution, which involves faking the frame and code objects which the traceback would normally hold references to.
import traceback
class FakeCode(object):
def __init__(self, co_filename, co_name):
self.co_filename = co_filename
self.co_name = co_name
class FakeFrame(object):
def __init__(self, f_code, f_globals):
self.f_code = f_code
self.f_globals = f_globals
class FakeTraceback(object):
def __init__(self, frames, line_nums):
if len(frames) != len(line_nums):
raise ValueError("Ya messed up!")
self._frames = frames
self._line_nums = line_nums
self.tb_frame = frames[0]
self.tb_lineno = line_nums[0]
#property
def tb_next(self):
if len(self._frames) > 1:
return FakeTraceback(self._frames[1:], self._line_nums[1:])
class FakeException(Exception):
def __init__(self, *args, **kwargs):
self._tb = None
super().__init__(*args, **kwargs)
#property
def __traceback__(self):
return self._tb
#__traceback__.setter
def __traceback__(self, value):
self._tb = value
def with_traceback(self, value):
self._tb = value
return self
code1 = FakeCode("made_up_filename.py", "non_existent_function")
code2 = FakeCode("another_non_existent_file.py", "another_non_existent_method")
frame1 = FakeFrame(code1, {})
frame2 = FakeFrame(code2, {})
tb = FakeTraceback([frame1, frame2], [1,3])
exc = FakeException("yo").with_traceback(tb)
print(''.join(traceback.format_exception(FakeException, exc, tb)))
# Traceback (most recent call last):
# File "made_up_filename.py", line 1, in non_existent_function
# File "another_non_existent_file.py", line 3, in another_non_existent_method
# FakeException: yo
Thanks to #User for providing FakeException, which is necessary because real exceptions type-check the argument to with_traceback().
This version does have a few limitations:
It doesn't print the lines of code for each stack frame, as a real
traceback would, because format_exception goes off to look for the
real file that the code came from (which doesn't exist in our case).
If you want to make this work, you need to insert fake data into
linecache's
cache (because traceback uses linecache to get hold of the source
code), per #User's answer
below.
You also can't actually raise exc and expect the fake traceback
to survive.
More generally, if you have client code that traverses tracebacks in
a different manner than traceback does (such as much of the inspect
module), these fakes probably won't work. You'd need to add whatever
extra attributes the client code expects.
These limitations are fine for my purposes - I'm just using it as a test double for code that calls traceback - but if you want to do more involved traceback manipulation, it looks like you might have to go down to the C level.
EDIT2:
That is the code of linecache.. I will comment on it.
def updatecache(filename, module_globals=None): # module_globals is a dict
# ...
if module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
# module_globals = dict(__name__ = 'somename', __loader__ = loader)
get_source = getattr(loader, 'get_source', None)
# loader must have a 'get_source' function that returns the source
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return []
cache[filename] = (
len(data), None,
[line+'\n' for line in data.splitlines()], fullname
)
return cache[filename][2]
That means before you testrun just do:
class Loader:
def get_source(self):
return 'source of the module'
import linecache
linecache.updatecache(filename, dict(__name__ = 'modulename without <> around',
__loader__ = Loader()))
and 'source of the module' is the source of the module you test.
EDIT1:
My solution so far:
class MyExeption(Exception):
_traceback = None
#property
def __traceback__(self):
return self._traceback
#__traceback__.setter
def __traceback__(self, value):
self._traceback = value
def with_traceback(self, tb_or_none):
self.__traceback__ = tb_or_none
return self
Now you can set the custom tracebacks of the exception:
e = MyExeption().with_traceback(1)
What you usually do if you reraise an exception:
raise e.with_traceback(fake_tb)
All exception prints walk through this function:
import traceback
traceback.print_exception(_type, _error, _traceback)
Hope it helps somehow.
You should be able to simply raise whatever fake exception you want where you want it in your test runs. The python exception docs suggest you create a class and raise that as your exception. It's section 8.5 of the docs.
http://docs.python.org/2/tutorial/errors.html
Should be pretty straightforward once you've got the class created.

Wrapping exceptions in Python

I'm working on a mail-sending library, and I want to be able to catch exceptions produced by the senders (SMTP, Google AppEngine, etc.) and wrap them in easily catchable exceptions specific to my library (ConnectionError, MessageSendError, etc.), with the original traceback intact so it can be debugged. What is the best way to do this in Python 2?
The simplest way would be to reraise with the old trace object. The following example shows this:
import sys
def a():
def b():
raise AssertionError("1")
b()
try:
a()
except AssertionError: # some specific exception you want to wrap
trace = sys.exc_info()[2]
raise Exception("error description"), None, trace
Check the documentation of the raise statement for details of the three parameters. My example would print:
Traceback (most recent call last):
File "C:\...\test.py", line 9, in <module>
a()
File "C:\...\test.py", line 6, in a
b()
File "C:\...\test.py", line 5, in b
raise AssertionError("1")
Exception: error description
For completeness, in Python 3 you'd use the raise MyException(...) from e syntax.
This answer is probably a little bit late, but you can wrap the function in a python decorator.
Here is a simple cheatsheet on how different decorators.
Here is some sample code of how to do this. Just change the decorator to catch different errors in the different ways that you need.
def decorator(wrapped_function):
def _wrapper(*args, **kwargs):
try:
# do something before the function call
result = wrapped_function(*args, **kwargs)
# do something after the function call
except TypeError:
print("TypeError")
except IndexError:
print("IndexError")
# return result
return _wrapper
#decorator
def type_error():
return 1 / 'a'
#decorator
def index_error():
return ['foo', 'bar'][5]
type_error()
index_error()
Use raise_from from the future.utils package.
Relevant example copied below:
from future.utils import raise_from
class FileDatabase:
def __init__(self, filename):
try:
self.file = open(filename)
except IOError as exc:
raise_from(DatabaseError('failed to open'), exc)
Within that package, raise_from is implemented as follows:
def raise_from(exc, cause):
"""
Equivalent to:
raise EXCEPTION from CAUSE
on Python 3. (See PEP 3134).
"""
# Is either arg an exception class (e.g. IndexError) rather than
# instance (e.g. IndexError('my message here')? If so, pass the
# name of the class undisturbed through to "raise ... from ...".
if isinstance(exc, type) and issubclass(exc, Exception):
e = exc()
# exc = exc.__name__
# execstr = "e = " + _repr_strip(exc) + "()"
# myglobals, mylocals = _get_caller_globals_and_locals()
# exec(execstr, myglobals, mylocals)
else:
e = exc
e.__suppress_context__ = False
if isinstance(cause, type) and issubclass(cause, Exception):
e.__cause__ = cause()
e.__suppress_context__ = True
elif cause is None:
e.__cause__ = None
e.__suppress_context__ = True
elif isinstance(cause, BaseException):
e.__cause__ = cause
e.__suppress_context__ = True
else:
raise TypeError("exception causes must derive from BaseException")
e.__context__ = sys.exc_info()[1]
raise e

Categories