How to unit test a decorated method - python

Assume I have to unit test methodA, defined in the following class:
class SomeClass(object):
def wrapper(fun):
def _fun(self, *args, **kwargs):
self.b = 'Original'
fun(self, *args, **kwargs)
return _fun
#wrapper
def methodA(self):
pass
My test class is as follows:
from mock import patch
class TestSomeClass(object):
def testMethodA(self):
def mockDecorator(f):
def _f(self, *args, **kwargs):
self.b = 'Mocked'
f(self, *args, **kwargs)
return _f
with patch('some_class.SomeClass.wrapper', mockDecorator):
from some_class import SomeClass
s = SomeClass()
s.methodA()
assert s.b == 'Mocked', 's.b is equal to %s' % s.b
If I run the test, I hit the assertion:
File "/home/klinden/workinprogress/mockdecorators/test_some_class.py", line 17, in testMethodA
assert s.b == 'Mocked', 's.b is equal to %s' % s.b
AssertionError: s.b is equal to Original
If I stick a breakpoint in the test, after patching, this is I can see wrapper has been mocked out just fine, but that methodA still references the old wrapper:
(Pdb) p s.wrapper
<bound method SomeClass.mockDecorator of <some_class.SomeClass object at 0x7f9ed1bf60d0>>
(Pdb) p s.methodA
<bound method SomeClass._fun of <some_class.SomeClass object at 0x7f9ed1bf60d0>>
Any idea of what the problem is here?

After mulling over, I've found a solution.
Since monkey patching seems not to be effective (and I've also tried a few
other solutions), I dug into the function internals and that proved to be fruitful.
Python 3
You're lucky - just use the wraps decorator, which creates a __wrapped__ attribute, which in turn contains the wrapped function. See the linked answers above for more details.
Python 2
Even if you use #wraps, no fancy attribute is created.
However, you just need to realise that the wrapper method does nothing but a closure: so you'll be able to find your wrapped function in its func_closure attribute.
In the original example, the wrapped function would be at: s.methodA.im_func.func_closure[0].cell_contents
Wrapping up (ha!)
I created a getWrappedFunction helper along this lines, to ease my testing:
#staticmethod
def getWrappedFunction(wrapper):
return wrapper.im_func.func_closure[0].cell_contents
YMMV, especially if you do fancy stuff and include other objects in the closure.

Related

Python patching __new__ method

I am trying to patch __new__ method of a class, and it is not working as I expect.
from contextlib import contextmanager
class A:
def __init__(self, arg):
print('A init', arg)
#contextmanager
def patch_a():
new = A.__new__
def fake_new(cls, *args, **kwargs):
print('call fake_new')
return new(cls, *args, **kwargs)
# here I get error: TypeError: object.__new__() takes exactly one argument (the type to instantiate)
A.__new__ = fake_new
try:
yield
finally:
A.__new__ = new
if __name__ == '__main__':
A('foo')
with patch_a():
A('bar')
A('baz')
I expect the following output:
A init foo
call fake_new
A init bar
A init baz
But after call fake_new I get an error (see comment in the code).
For me It seems like I just decorate a __new__ method and propagate all args unchanged.
It doesn't work and the reason is obscure for me.
Also I can write return new(cls) and call A('bar') works fine. But then A('baz') breaks.
Can someone explain what is going on?
Python version is 3.8
You've run into a complicated part of Python object instantiation - in which the language opted for a design that would allow one to create a custom __init__ method with parameters, without having to touch __new__.
However, the in the base of class hierarchy, object, both __new__ and __init__ take one single parameter each.
IIRC, it goes this way: if your class have a custom __init__ and you did not touch __new__ and there are more any parameters to the class instantiation that would be passed to both __init__ and __new__, the parameters will be stripped from the call do __new__, so you don't have to customize it just to swallow the parameters you consume in __init__. The converse is also true: if your class have a custom __new__ with extra parameters, and no custom __init__, these are not passed to object.__init__.
With your design, Python sees a custom __new__ and passes it the same extra arguments that are passed to __init__ - and by using *args, **kw, you forward those to object.__new__ which accepts a single parameter - and you get the error you presented us.
The fix is to not pass those extra parameters to the original __new__ method - unless they are needed there - so you have to make the same check Python's type does when initiating an object.
And an interesting surprise to top it: while making the example work, I found out that even if A.__new__
is deleted when restoring the patch, it is still considered as "touched" by cPython's type instantiation, and the arguments are passed through.
In order to get your code working I needed to leave a permanent stub A.__new__ that will forward only the cls argument:
from contextlib import contextmanager
class A:
def __init__(self, arg):
print('A init', arg)
#contextmanager
def patch_a():
new = A.__new__
def fake_new(cls, *args, **kwargs):
print('call fake_new')
if new is object.__new__:
return new(cls)
return new(cls, *args, **kwargs)
# here I get error: TypeError: object.__new__() takes exactly one argument (the type to instantiate)
A.__new__ = fake_new
try:
yield
finally:
del A.__new__
if new is not object.__new__:
A.__new__ = new
else:
A.__new__ = lambda cls, *args, **kw: object.__new__(cls)
print(A.__new__)
if __name__ == '__main__':
A('foo')
with patch_a():
A('bar')
A('baz')
(I tried inspecting the original __new__ signature instead of the new is object.__new__ comparison - to no avail: object.__new__ signature is *args, **kwargs - possibly made so that it will never fail on static checking)

How can I limit Python class instance method calls to one at a time? [duplicate]

I'm trying to create a #synchronized wrapper that creates one Lock per object and makes method calls thread safe. I can only do this if I can access method.im_self of the method in the wrapped method.
class B:
def f(self): pass
assert inspect.ismethod( B.f ) # OK
assert inspect.ismethod( B().f ) # OK
print B.f # <unbound method B.f>
print B().f # <bound method B.f of <__main__.B instance at 0x7fa2055e67e8>>
def synchronized(func):
# func is not bound or unbound!
print func # <function f at 0x7fa20561b9b0> !!!!
assert inspect.ismethod(func) # FAIL
# ... allocate one lock per C instance
return func
class C:
#synchronized
def f(self): pass
(1) What's confusing is that the func parameter passed to my decorator changes type before it gets passed into the wrapper-generator. This seem is rude and unnecessary. Why does this happen?
(2) Is there some decorator magic by which I can make method calls to an object mutex-ed (i.e. one lock per object, not per class).
UPDATE: There are many examples of #synchronized(lock) wrappers. However, really what I want is #synchronized(self). I can solve it like this:
def synchronizedMethod(func):
def _synchronized(*args, **kw):
self = args[0]
lock = oneLockPerObject(self)
with lock: return func(*args, **kw)
return _synchronized
However, because its much more efficient, I'd prefer:
def synchronizedMethod(func):
lock = oneLockPerObject(func.im_self)
def _synchronized(*args, **kw):
with lock: return func(*args, **kw)
return _synchronized
Is this possible?
Go read:
https://github.com/GrahamDumpleton/wrapt/tree/develop/blog
and in particular:
https://github.com/GrahamDumpleton/wrapt/blob/develop/blog/07-the-missing-synchronized-decorator.md
https://github.com/GrahamDumpleton/wrapt/blob/develop/blog/08-the-synchronized-decorator-as-context-manager.md
The wrapt module then contains the #synchronized decorator described there.
https://pypi.python.org/pypi/wrapt
The full implementation is flexible enough to do:
#synchronized # lock bound to function1
def function1():
pass
#synchronized # lock bound to function2
def function2():
pass
#synchronized # lock bound to Class
class Class(object):
#synchronized # lock bound to instance of Class
def function_im(self):
pass
#synchronized # lock bound to Class
#classmethod
def function_cm(cls):
pass
#synchronized # lock bound to function_sm
#staticmethod
def function_sm():
pass
Along with context manager like usage as well:
class Object(object):
#synchronized
def function_im_1(self):
pass
def function_im_2(self):
with synchronized(self):
pass
Further information and examples can also be found in:
http://wrapt.readthedocs.org/en/latest/examples.html
There is also a conference talk you can watch on how this is implemented at:
https://www.youtube.com/watch?v=EB6AH-85zfY&t=1s
You can't get self at decoration time because the decorator is applied at function definition time. No self exists yet; in fact, the class doesn't exist yet.
If you're willing to store your lock on the instance (which is arguably where a per-instance value should go) then this might do ya:
def synchronized_method(func):
def _synchronized(self, *args, **kw):
if not hasattr(self, "_lock"): self._lock = oneLockPerObject(self)
with self._lock: return func(self, *args, **kw)
return _synchronized
You could also generate the lock in your __init__() method on a base class of some sort, and store it on the instance in the same way. That simplifies your decorator because you don't have to check for the existence of the self._lock attribute.
(1) What's confusing is that the func parameter passed to my decorator
changes type before it gets passed into the wrapper-generator. This
seem is rude and unnecessary. Why does this happen?
It doesn't! Rather, function objects (and other descriptors) produce their __get__'s results when that method of theirs is called -- and that result is the method object!
But what lives in the class's __dict__ is always the descriptor -- specifically, the function object! Check it out...:
>>> class X(object):
... def x(self): pass
...
>>> X.__dict__['x']
<function x at 0x10fe04e60>
>>> type(X.__dict__['x'])
<type 'function'>
See? No method objects around anywhere at all!-)
Therefore, no im_self around either, at decoration time -- and you'll need to go with your introspection-based alternative idea.

Using classes as decorators to decorate a method from another class [duplicate]

Consider this small example:
import datetime as dt
class Timed(object):
def __init__(self, f):
self.func = f
def __call__(self, *args, **kwargs):
start = dt.datetime.now()
ret = self.func(*args, **kwargs)
time = dt.datetime.now() - start
ret["time"] = time
return ret
class Test(object):
def __init__(self):
super(Test, self).__init__()
#Timed
def decorated(self, *args, **kwargs):
print(self)
print(args)
print(kwargs)
return dict()
def call_deco(self):
self.decorated("Hello", world="World")
if __name__ == "__main__":
t = Test()
ret = t.call_deco()
which prints
Hello
()
{'world': 'World'}
Why is the self parameter (which should be the Test obj instance) not passed as first argument to the decorated function decorated?
If I do it manually, like :
def call_deco(self):
self.decorated(self, "Hello", world="World")
it works as expected. But if I must know in advance if a function is decorated or not, it defeats the whole purpose of decorators. What is the pattern to go here, or do I misunderstood something?
tl;dr
You can fix this problem by making the Timed class a descriptor and returning a partially applied function from __get__ which applies the Test object as one of the arguments, like this
class Timed(object):
def __init__(self, f):
self.func = f
def __call__(self, *args, **kwargs):
print(self)
start = dt.datetime.now()
ret = self.func(*args, **kwargs)
time = dt.datetime.now() - start
ret["time"] = time
return ret
def __get__(self, instance, owner):
from functools import partial
return partial(self.__call__, instance)
The actual problem
Quoting Python documentation for decorator,
The decorator syntax is merely syntactic sugar, the following two function definitions are semantically equivalent:
def f(...):
...
f = staticmethod(f)
#staticmethod
def f(...):
...
So, when you say,
#Timed
def decorated(self, *args, **kwargs):
it is actually
decorated = Timed(decorated)
only the function object is passed to the Timed, the object to which it is actually bound is not passed on along with it. So, when you invoke it like this
ret = self.func(*args, **kwargs)
self.func will refer to the unbound function object and it is invoked with Hello as the first argument. That is why self prints as Hello.
How can I fix this?
Since you have no reference to the Test instance in the Timed, the only way to do this would be to convert Timed as a descriptor class. Quoting the documentation, Invoking descriptors section,
In general, a descriptor is an object attribute with “binding behavior”, one whose attribute access has been overridden by methods in the descriptor protocol: __get__(), __set__(), and __delete__(). If any of those methods are defined for an object, it is said to be a descriptor.
The default behavior for attribute access is to get, set, or delete the attribute from an object’s dictionary. For instance, a.x has a lookup chain starting with a.__dict__['x'], then type(a).__dict__['x'], and continuing through the base classes of type(a) excluding metaclasses.
However, if the looked-up value is an object defining one of the descriptor methods, then Python may override the default behavior and invoke the descriptor method instead.
We can make Timed a descriptor, by simply defining a method like this
def __get__(self, instance, owner):
...
Here, self refers to the Timed object itself, instance refers to the actual object on which the attribute lookup is happening and owner refers to the class corresponding to the instance.
Now, when __call__ is invoked on Timed, the __get__ method will be invoked. Now, somehow, we need to pass the first argument as the instance of Test class (even before Hello). So, we create another partially applied function, whose first parameter will be the Test instance, like this
def __get__(self, instance, owner):
from functools import partial
return partial(self.__call__, instance)
Now, self.__call__ is a bound method (bound to Timed instance) and the second parameter to partial is the first argument to the self.__call__ call.
So, all these effectively translate like this
t.call_deco()
self.decorated("Hello", world="World")
Now self.decorated is actually Timed(decorated) (this will be referred as TimedObject from now on) object. Whenever we access it, the __get__ method defined in it will be invoked and it returns a partial function. You can confirm that like this
def call_deco(self):
print(self.decorated)
self.decorated("Hello", world="World")
would print
<functools.partial object at 0x7fecbc59ad60>
...
So,
self.decorated("Hello", world="World")
gets translated to
Timed.__get__(TimedObject, <Test obj>, Test.__class__)("Hello", world="World")
Since we return a partial function,
partial(TimedObject.__call__, <Test obj>)("Hello", world="World"))
which is actually
TimedObject.__call__(<Test obj>, 'Hello', world="World")
So, <Test obj> also becomes a part of *args, and when self.func is invoked, the first argument will be the <Test obj>.
You first have to understand how function become methods and how self is "automagically" injected.
Once you know that, the "problem" is obvious: you are decorating the decorated function with a Timed instance - IOW, Test.decorated is a Timed instance, not a function instance - and your Timed class does not mimick the function type's implementation of the descriptor protocol. What you want looks like this:
import types
class Timed(object):
def __init__(self, f):
self.func = f
def __call__(self, *args, **kwargs):
start = dt.datetime.now()
ret = self.func(*args, **kwargs)
time = dt.datetime.now() - start
ret["time"] = time
return ret
def __get__(self, instance, cls):
return types.MethodType(self, instance, cls)

Class Decorator when Inheriting from another class

Ive been on a tear of writing some decorators recently.
One of the ones I just wrote allows you to put the decorator just before a class definition, and it will cause every method of the class to print some logigng info when its run (more for debugging/initial super basic speed tests during a build)
def class_logit(cls):
class NCls(object):
def __init__(self, *args, **kwargs):
self.instance = cls(*args, **kwargs)
#staticmethod
def _class_logit(original_function):
def arg_catch(*args, **kwargs):
start = time.time()
result = original_function(*args, **kwargs)
print('Called: {0} | From: {1} | Args: {2} | Kwargs: {3} | Run Time: {4}'
''.format(original_function.__name__, str(inspect.getmodule(original_function)),
args, kwargs, time.time() - start))
return result
return arg_catch
def __getattribute__(self, s):
try:
x = super(NCls, self).__getattribute__(s)
except AttributeError:
pass
else:
return x
x = self.instance.__getattribute__(s)
if type(x) == type(self.__init__):
return self._class_logit(x)
else:
return x
return NCls
This works great when applied to a very basic class i create.
Where I start to encounter issues is when I apply it to a class that is inheriting another - for instance, using QT:
#scld.class_logit
class TestWindow(QtGui.QDialog):
def __init__(self):
print self
super(TestWindow, self).__init__()
a = TestWindow()
Im getting the following error... and im not entirely sure what to do about it!
self.instance = cls(*args, **kwargs)
File "<string>", line 15, in __init__
TypeError: super(type, obj): obj must be an instance or subtype of type
Any help would be appreciated!
(Apologies in advance, no matter WHAT i do SO is breaking the formatting on my first bit of code... Im even manually spending 10 minutes adding spaces but its coming out incorrectly... sorry!)
You are being a bit too intrusive with your decorator.
While if you want to profile methods defined on the Qt framework itself, a somewhat aggressive approach is needed, your decorator replaces the entire class by a proxy.
Qt bindings are somewhat complicated indeed, and it is hard to tell why it is erroring when being instantiated in this case.
So - first things first - if your intent would be to apply the decorator to a class hierarchy defined by yourself, or at least one defined in pure Python, a good approach there could be using metaclasses: with a metaclass you could decorate each method when a class is created, and do not mess anymore at runtime, when methods are retrieved from each class.
but Qt, as some other libraries, have its methods and classes defined in native code, and that will prevent you from wrapping existing methods in a new class. So, wrapping the methods on attribute retrieval on __getattribute__ could work.
Here is a simpler approach that instead of using a Proxy, just plug-in a foreign __getattribute__ that does the wrap-with-logger thing you want.
Your mileage may vary with it. Specially, it won't be triggered if one method of the class is called by other method in native code - as this won't go through Python's attribute retrieval mechanism (instead, it will use C++ method retrieval directly).
from PyQt5 import QtWidgets, QtGui
def log_dec(func):
def wraper(*args, **kwargs):
print(func.__name__, args, kwargs)
return func(*args, **kwargs)
return wraper
def decorate(cls):
def __getattribute__(self, attr):
attr = super(cls, self).__getattribute__(attr)
if callable(attr):
return log_dec(attr)
return attr
cls.__getattribute__ = __getattribute__
return cls
#decorate
class Example(QtGui.QWindow):
pass
app = QtWidgets.QApplication([])
w = Example()
w.show()
(Of course, just replace the basic logger by your fancy logger above)

Better solution to access class that owns the method from a method decorator

Recently, I faced a problem which was similar to this question:
Accessing the class that owns a decorated method from the decorator
My rep was not high enough to comment there, so I am starting a new question to address some improvements to the answer to that problem.
This is what I needed:
def original_decorator(func):
# need to access class here
# for eg, to append the func itself to class variable "a", to register func
# or say, append func's default arg values to class variable "a"
return func
class A(object):
a=[]
#classmethod
#original_decorator
def some_method(self,a=5):
''' hello'''
print "Calling some_method"
#original_decorator
def some_method_2(self):
''' hello again'''
print "Calling some_method_2"
The solution would need to work both with class methods and instance methods, the method returned from the decorator should work and behave just the same way if it was undecorated i.e. method signature should be preserved.
The accepted answer for that question returned a Class from the decorator and the metaclass identified that specific Class, and did the "class-accessing" operations.
The answer did mention itself as a rough solution, but clearly it had a few caveats :
Decorator returned a class and it was not callable. Obviously, it can be made callable easily, but the returned value is still a class - it just behaves the same way while calling, but its properties and behaviors would be different. Essentially, it would not work the same way as the undecorated method.
It forced the decorator to return a custom-type class and all the "class-accessing" code was put inside the metaclass directly. It is simply not nice, writing the decorator should not enforce touching the metaclass directly.
I have tried to come up with a better solution, documented in the answer.
Here is the solution.
It uses a decorator (which would work on "class-accessing" decorators) and a metaclass, which would fulfill all my requirements and address the problems of that answer. Probably the best advantage is that the "class-accessing" decorators can just access the class, without even touching the metaclass.
# Using metaclass and decorator to allow class access during class creation time
# No method defined within the class should have "_process_meta" as arg
# Potential problems: Using closures, function.func_globals is read-only
from functools import partial
import inspect
class meta(type):
def __new__(cls, name, base, clsdict):
temp_cls = type.__new__(cls, name, base, clsdict)
methods = inspect.getmembers(temp_cls, inspect.ismethod)
for (method_name, method_obj) in methods:
tmp_spec = inspect.getargspec(method_obj)
if "__process_meta" in tmp_spec.args:
what_to_do, main_func = tmp_spec.defaults[:-1]
f = method_obj.im_func
f.func_code, f.func_defaults, f.func_dict, f.func_doc, f.func_name = main_func.func_code, main_func.func_defaults, main_func.func_dict, main_func.func_doc, main_func.func_name
mod_func = what_to_do(temp_cls, f)
f.func_code, f.func_defaults, f.func_dict, f.func_doc, f.func_name = mod_func.func_code, mod_func.func_defaults, mod_func.func_dict, mod_func.func_doc, mod_func.func_name
return temp_cls
def do_it(what_to_do, main_func=None):
if main_func is None:
return partial(do_it, what_to_do)
def whatever(what_to_do=what_to_do, main_func=main_func, __process_meta=True):
pass
return whatever
def original_classmethod_decorator(cls, func):
# cls => class of the method
# appends default arg values to class variable "a"
func_defaults = inspect.getargspec(func).defaults
cls.a.append(func_defaults)
func.__doc__ = "This is a class method"
print "Calling original classmethod decorator"
return func
def original_method_decorator(cls, func):
func_defaults = inspect.getargspec(func).defaults
cls.a.append(func_defaults)
func.__doc__ = "This is a instance method" # Can change func properties
print "Calling original method decorator"
return func
class A(object):
__metaclass__ = meta
a = []
#classmethod
#do_it(original_classmethod_decorator)
def some_method(cls, x=1):
''' hello'''
print "Calling original class method"
#do_it(original_method_decorator)
def some_method_2(self, y=2):
''' hello again'''
print "Calling original method"
# signature preserved
print(inspect.getargspec(A.some_method))
print(inspect.getargspec(A.some_method_2))
Open to suggestions on whether this approach has any ceveats.

Categories