I'm trying to create a partial function but with dynamic arguments that are stored as class attributes and changed accordingly. Something like the following code:
from functools import partial
def foo(*args, msg):
print(msg)
class Bar:
def __init__(self, msg):
self.msg = msg
self.functions = dict()
self.functions['foo'] = partial(foo, msg=self.msg)
def foo_method(self, *args):
return self.functions['foo'](*args)
b =Bar('1')
b.foo_method()
b.msg = '2'
b.foo_method()
Only, of course, both statements will print '1' as the partial object fixes the arguments. The only alternative I found was changing the attribute to a property and manually changing the partial attributes with the setter:
class Bar:
def __init__(self, msg):
self._msg = None
self.functions = dict()
self.functions['foo'] = partial(foo)
self.msg = msg
def foo_method(self, *args):
return self.functions['foo'](*args)
#property
def msg(self):
return self._msg
#msg.setter
def msg(self, msg):
self._msg = msg
self.functions['foo'].keywords['msg'] = msg
I would like to know if there is a more "pythonic" / efficient way to do this, since I really don't need to use properties except for this workaround.
You can use lambda instead of partial for deferred (or often referred to as "lazy") evaluation of the arguments, so that self.msg is not evaluated until the function is called:
class Bar:
def __init__(self, msg):
self.msg = msg
self.functions = dict()
self.functions['foo'] = lambda *args: foo(*args, msg=self.msg)
def foo_method(self, *args):
return self.functions['foo'](*args)
What's wrong with just storing a reference to the passed function and constructing the call on the spot? i.e.:
class Bar:
def __init__(self, msg):
self.msg = msg
self.foo = foo # a reference to foo, not needed here but used as an example
def foo_method(self, *args):
return self.foo(*args, msg=self.msg) # or just: foo(*args, msg=self.msg)
A thing that seems to be working as well is defining the function to work with a class attribute.
You can then define a function using partial with one of the arguments being the class.
class myContex:
a = 5
def my_fun(context, b, c):
print(context.a, b, c)
my_fun_partial = partial(my_fun, myContext)
my_fun_partial(4,7)
# Output: 5 4 7
myContext.a = 50
my_fun_partial = partial(my_fun, myContext)
my_fun_partial(4,7)
# Output: 50, 4, 7
The simplest possible way I can think of would be just constructing a dict and passing it double-starred to the function to unpack.
Something like:
def some_func(msg, some_arg=None):
print("Hello world") # ignore the msg for now
call_args = {}
call_args['some_arg'] = 2 # single field
call_args.update({'msg': 1, 'stuff': [2,3,4]}) # multiple at once
some_func(**call_args)
Right now, some_func will throw a TypeError because we've passed more args than the function takes. You could work around this either by having the function accept **kwargs in the signature, trimming down the arguments you don't expect or some other approach.
For now, continuing the last session:
call_args = {'msg': 'abc'} # let's get rid of those extra args
some_func(**call_args) # => prints 'Hello world'
Related
I have a class that has multiple methods and I want to store all of the available methods that would be easily accessible in example would be something like this
class Methods:
def foo(self, a):
return f'hello {a}'
def bar(self, b):
return f'hello {b}'
def methods_dict(self, var, **kwargs):
dic = {
'me' : self.foo(var),
'be': self.bar(var)
}
return dic
But on runtime my methods_dict() method will execute both of the methods inside of it's dictionary.
One one hand I'm planing to store only strings in there and it's really easily accessible, on the other hand i probably would not need to access all of the available methods at once.
Any suggestions ?
I am planning to use those methods as follows
class InheritMethods(Methods):
def __init__(self, method_name):
self.method_name = method_name
def add_to_list(self, input):
arr = []
arr.append(self.method_dicts(input)[self.method_name]
return arr
To clear things up, I am gonna call specific method based on input name, so basically input == method_name
I could do conditional statements like if input == 'foo': do somethings.., but if i end up having a lot of methods, my code is going to be a mess, so i assume(!) that would not be a great idea
I think you can get what you want with the following. Your exact usecase is still not clear. Please respond if I am heading in the wrong direction.
Using self.__getattribute__() you can get a function by name. Of course you would have to catch exceptions etc.
class Methods:
def foo(self, a):
return f'hello {a}'
def bar(self, b):
return f'hello {b}'
class InheritMethods(Methods):
def __init__(self, method_name):
self.method_name = method_name
def add_to_list(self, method_name, input):
method = getattr(self, method_name)
result = method(input)
return [result]
class InheritSingleMethod(Methods):
def __init__(self, method_name):
self.add_to_list = self.getattr(self, method_name)
Output
# Any method version
inherit_methods = InheritMethods('a') # < no use for that argument right?
inherit_methods.add_to_list('foo', 'laurens')
> ['hello laurens']
# Single method version
inherit_single_method = InheritSingleMethod('foo')
inherit_single_method.add_to_list('laurens')
> 'hello laurens'
If all you want to do is access a method of Methods given the name in a str, use getattr:
name = input()
m = Methods()
getattr(m, name)("bob")
I'm learning about python at the moment and came across this code:
class Simple:
def __init__(self, str):
print("Inside the Simple constructor")
self.s = str
# Two methods:
def show(self):
print(self.s)
def showMsg(self, msg):
print(msg + ':',
self.show())
I'm playing around with it in the python shell and did the following:
x = Simple("A constructor argument")
x.show()
which outputs:
A constructor argument
This makes sense to me, however I then input:
x.showMsg("A message")
Which outputs:
A constructor argument
A Message:None
This is where I'm confused. Why is the call to the self.show() in showMsg() resulting in "None" when x.Show() results in "A constructor argument"? I thought that self was a placeholder until an instance of the class was created, which would mean that self.show() in this case would be equivalent to x.show()?
Any help on this would be very appreciated.
Get in the habit of having your functions return values. Functions with no return statement return None by default.
Here is an example of how you might rewrite your program:
class Simple:
def __init__(self, str):
self.s = str
# Two methods:
def show(self):
return self.s
def showMsg(self, msg):
return msg + ': ' + self.show()
x = Simple("A constructor argument")
print(x.show())
# A constructor argument
print(x.showMsg('A message'))
# A message: A constructor argument
While you can have print inside your class, it's a better idea to have your class handle logic and have flexibility with what you do with the results (print them, store them in a list, pass them on to another function, etc).
What the show() method in your class does is just print the stored message, however, what showMsg is trying to do concatenating some msg with the stored one, by calling the show method, however, since show() returns nothing, or None, it will get cat'ed just like that, you will have to change your method to either:
def show(self):
return self.s
or
def show(self):
print(self.s)
return self.s
In the second case you will retain the functionality for both cases, but it is bad practice
Suppose I have this decorator:
def decorator(f):
def f_wrap(*args):
for item in args:
print(args)
return f(*args)
return f_wrap
When used as "permanent" decorators with the # syntax, args retrieves the arguments of the wrapped function. For example, when used with the class below, I receive the instance of MyObject.
Class MyObject(object):
def __init__(self):
pass
#decorator
def function(self):
return
How can I achieve the same result using a "fluid" decorator. Or a decorator that is not permanently bound to the function it is decorating? For example:
def decorator(f):
def f_wrap(*args):
if (not args):
print("Nothing in args")
return f(*args)
return f_wrap
class MyClass(object):
def __init__(self):
pass
def function(self):
return
if __name__ == "__main__":
myobj = MyClass()
myobj.function = decorator(myobj.function)
myobj.function()
In this case, the args tuple always returns empty (I always get "Nothing in args"), even though I anticipated that it would return the instance variable myobj.
EDIT:
In case it was not clear from #AChampion's post the solution is to simply call the fluid-decoratored method as an "unbound" method. E.g.,
from types import MethodType
def decorator(f):
def f_wrap(*args):
# I replaced this with an iteration through
# args. It's a bit more demonstrative.
for item in args:
print(item)
return f(*args)
return f_wrap
class MyClass(object):
def __init__(self):
pass
def function(self):
return
if __name__ == "__main__":
myobj = MyClass()
myobj.function = MethodType(decorator(MyClass.function), myobj)
myobj.function()
The reason for the difference is that you are wrapping different things, a unbound method vs a bound method:
class MyObject(object):
#decorator
def function(self):
pass
Is equivalent to:
import types
class MyClass(object):
def function(self):
pass
m = MyClass(object)
m.function = types.MethodType(decorator(MyClass.function), m)
Not:
m.function = decorator(m.function)
The first being an unbound method, the second being a bound method.
You aren't using all properly. all returns a bool on whether all conditions are met inside what you are checking for in all. In your case, you aren't really doing anything. You will always evaluate to True with how you are using all.
I believe what you are looking for is simply this:
if not args:
Now, ultimately what this checks is if the method you are executing has *args. For the case of the function you have, you aren't passing any arguments, therefore, with the if not args check, you will actually get:
"Nothing in args"
However, if you add an argument to your method as such:
def function(self, x):
return
Then call: myobj.function(1)
You will not get "Nothing in args".
To answer your last question about not getting your instance. If you print out f using this method of calling your decorator:
myobj.function = decorator(myobj.function)
myobj.function()
You will get a bound method:
<bound method MyClass.function of <__main__.MyClass object at 0x102002390>>
Now, set up your decorator as such:
#decorator
def function(self):
return
You will see you get a function attached to your class object:
<function MyClass.function at 0x102001620>
Hence showing that they aren't doing the exact same thing you would expect. Hope this helps clarify a bit.
I have a set of arrays that are very large and expensive to compute, and not all will necessarily be needed by my code on any given run. I would like to make their declaration optional, but ideally without having to rewrite my whole code.
Example of how it is now:
x = function_that_generates_huge_array_slowly(0)
y = function_that_generates_huge_array_slowly(1)
Example of what I'd like to do:
x = lambda: function_that_generates_huge_array_slowly(0)
y = lambda: function_that_generates_huge_array_slowly(1)
z = x * 5 # this doesn't work because lambda is a function
# is there something that would make this line behave like
# z = x() * 5?
g = x * 6
While using lambda as above achieves one of the desired effects - computation of the array is delayed until it is needed - if you use the variable "x" more than once, it has to be computed each time. I'd like to compute it only once.
EDIT:
After some additional searching, it looks like it is possible to do what I want (approximately) with "lazy" attributes in a class (e.g. http://code.activestate.com/recipes/131495-lazy-attributes/). I don't suppose there's any way to do something similar without making a separate class?
EDIT2: I'm trying to implement some of the solutions, but I'm running in to an issue because I don't understand the difference between:
class sample(object):
def __init__(self):
class one(object):
def __get__(self, obj, type=None):
print "computing ..."
obj.one = 1
return 1
self.one = one()
and
class sample(object):
class one(object):
def __get__(self, obj, type=None):
print "computing ... "
obj.one = 1
return 1
one = one()
I think some variation on these is what I'm looking for, since the expensive variables are intended to be part of a class.
The first half of your problem (reusing the value) is easily solved:
class LazyWrapper(object):
def __init__(self, func):
self.func = func
self.value = None
def __call__(self):
if self.value is None:
self.value = self.func()
return self.value
lazy_wrapper = LazyWrapper(lambda: function_that_generates_huge_array_slowly(0))
But you still have to use it as lazy_wrapper() not lazy_wrapper.
If you're going to be accessing some of the variables many times, it may be faster to use:
class LazyWrapper(object):
def __init__(self, func):
self.func = func
def __call__(self):
try:
return self.value
except AttributeError:
self.value = self.func()
return self.value
Which will make the first call slower and subsequent uses faster.
Edit: I see you found a similar solution that requires you to use attributes on a class. Either way requires you rewrite every lazy variable access, so just pick whichever you like.
Edit 2: You can also do:
class YourClass(object)
def __init__(self, func):
self.func = func
#property
def x(self):
try:
return self.value
except AttributeError:
self.value = self.func()
return self.value
If you want to access x as an instance attribute. No additional class is needed. If you don't want to change the class signature (by making it require func), you can hard code the function call into the property.
Writing a class is more robust, but optimizing for simplicity (which I think you are asking for), I came up with the following solution:
cache = {}
def expensive_calc(factor):
print 'calculating...'
return [1, 2, 3] * factor
def lookup(name):
return ( cache[name] if name in cache
else cache.setdefault(name, expensive_calc(2)) )
print 'run one'
print lookup('x') * 2
print 'run two'
print lookup('x') * 2
Python 3.2 and greater implement an LRU algorithm in the functools module to handle simple cases of caching/memoization:
import functools
#functools.lru_cache(maxsize=128) #cache at most 128 items
def f(x):
print("I'm being called with %r" % x)
return x + 1
z = f(9) + f(9)**2
You can't make a simple name, like x, to really evaluate lazily. A name is just an entry in a hash table (e.g. in that which locals() or globals() return). Unless you patch access methods of these system tables, you cannot attach execution of your code to simple name resolution.
But you can wrap functions in caching wrappers in different ways.
This is an OO way:
class CachedSlowCalculation(object):
cache = {} # our results
def __init__(self, func):
self.func = func
def __call__(self, param):
already_known = self.cache.get(param, None)
if already_known:
return already_known
value = self.func(param)
self.cache[param] = value
return value
calc = CachedSlowCalculation(function_that_generates_huge_array_slowly)
z = calc(1) + calc(1)**2 # only calculates things once
This is a classless way:
def cached(func):
func.__cache = {} # we can attach attrs to objects, functions are objects
def wrapped(param):
cache = func.__cache
already_known = cache.get(param, None)
if already_known:
return already_known
value = func(param)
cache[param] = value
return value
return wrapped
#cached
def f(x):
print "I'm being called with %r" % x
return x + 1
z = f(9) + f(9)**2 # see f called only once
In real world you'll add some logic to keep the cache to a reasonable size, possibly using a LRU algorithm.
To me, it seems that the proper solution for your problem is subclassing a dict and using it.
class LazyDict(dict):
def __init__(self, lazy_variables):
self.lazy_vars = lazy_variables
def __getitem__(self, key):
if key not in self and key in self.lazy_vars:
self[key] = self.lazy_vars[key]()
return super().__getitem__(key)
def generate_a():
print("generate var a lazily..")
return "<a_large_array>"
# You can add as many variables as you want here
lazy_vars = {'a': generate_a}
lazy = LazyDict(lazy_vars)
# retrieve the variable you need from `lazy`
a = lazy['a']
print("Got a:", a)
And you can actually evaluate a variable lazily if you use exec to run your code. The solution is just using a custom globals.
your_code = "print('inside exec');print(a)"
exec(your_code, lazy)
If you did your_code = open(your_file).read(), you could actually run your code and achieve what you want. But I think the more practical approach would be the former one.
I need to generate code for a method at runtime. It's important to be able to run arbitrary code and have a docstring.
I came up with a solution combining exec and setattr, here's a dummy example:
class Viking(object):
def __init__(self):
code = '''
def dynamo(self, arg):
""" dynamo's a dynamic method!
"""
self.weight += 1
return arg * self.weight
'''
self.weight = 50
d = {}
exec code.strip() in d
setattr(self.__class__, 'dynamo', d['dynamo'])
if __name__ == "__main__":
v = Viking()
print v.dynamo(10)
print v.dynamo(10)
print v.dynamo.__doc__
Is there a better / safer / more idiomatic way of achieving the same result?
Based on Theran's code, but extending it to methods on classes:
class Dynamo(object):
pass
def add_dynamo(cls,i):
def innerdynamo(self):
print "in dynamo %d" % i
innerdynamo.__doc__ = "docstring for dynamo%d" % i
innerdynamo.__name__ = "dynamo%d" % i
setattr(cls,innerdynamo.__name__,innerdynamo)
for i in range(2):
add_dynamo(Dynamo, i)
d=Dynamo()
d.dynamo0()
d.dynamo1()
Which should print:
in dynamo 0
in dynamo 1
Function docstrings and names are mutable properties. You can do anything you want in the inner function, or even have multiple versions of the inner function that makedynamo() chooses between. No need to build any code out of strings.
Here's a snippet out of the interpreter:
>>> def makedynamo(i):
... def innerdynamo():
... print "in dynamo %d" % i
... innerdynamo.__doc__ = "docstring for dynamo%d" % i
... innerdynamo.__name__ = "dynamo%d" % i
... return innerdynamo
>>> dynamo10 = makedynamo(10)
>>> help(dynamo10)
Help on function dynamo10 in module __main__:
dynamo10()
docstring for dynamo10
Python will let you declare a function in a function, so you don't have to do the exec trickery.
def __init__(self):
def dynamo(self, arg):
""" dynamo's a dynamic method!
"""
self.weight += 1
return arg * self.weight
self.weight = 50
setattr(self.__class__, 'dynamo', dynamo)
If you want to have several versions of the function, you can put all of this in a loop and vary what you name them in the setattr function:
def __init__(self):
for i in range(0,10):
def dynamo(self, arg, i=i):
""" dynamo's a dynamic method!
"""
self.weight += i
return arg * self.weight
setattr(self.__class__, 'dynamo_'+i, dynamo)
self.weight = 50
(I know this isn't great code, but it gets the point across). As far as setting the docstring, I know that's possible but I'd have to look it up in the documentation.
Edit: You can set the docstring via dynamo.__doc__, so you could do something like this in your loop body:
dynamo.__doc__ = "Adds %s to the weight" % i
Another Edit: With help from #eliben and #bobince, the closure problem should be solved.
class Dynamo(object):
def __init__(self):
pass
#staticmethod
def init(initData=None):
if initData is not None:
dynamo= Dynamo()
for name, value in initData.items():
code = '''
def %s(self, *args, **kwargs):
%s
''' % (name, value)
result = {}
exec code.strip() in result
setattr(dynamo.__class__, name, result[name])
return dynamo
return None
service = Dynamo.init({'fnc1':'pass'})
service.fnc1()
A bit more general solution:
You can call any method of an instance of class Dummy.
The docstring is generated based on the methods name.
The handling of any input arguments is demonstrated, by just returning them.
Code
class Dummy(object):
def _mirror(self, method, *args, **kwargs):
"""doc _mirror"""
return args, kwargs
def __getattr__(self, method):
"doc __getattr__"
def tmp(*args, **kwargs):
"""doc tmp"""
return self._mirror(method, *args, **kwargs)
tmp.__doc__ = (
'generated docstring, access by {:}.__doc__'
.format(method))
return tmp
d = Dummy()
print(d.test2('asd', level=0), d.test.__doc__)
print(d.whatever_method(7, 99, par=None), d.whatever_method.__doc__)
Output
(('asd',), {'level': 0}) generated docstring, access by test.__doc__
((7, 99), {'par': None}) generated docstring, access by whatever_method.__doc__
Pardon me for my bad English.
I recently need to generate dynamic function to bind each menu item to open particular frame on wxPython. Here is what i do.
first, i create a list of mapping between the menu item and the frame.
menus = [(self.menuItemFile, FileFrame), (self.menuItemEdit, EditFrame)]
the first item on the mapping is the menu item and the last item is the frame to be opened. Next, i bind the wx.EVT_MENU event from each of the menu item to particular frame.
for menu in menus:
f = genfunc(self, menu[1])
self.Bind(wx.EVT_MENU, f, menu[0])
genfunc function is the dynamic function builder, here is the code:
def genfunc(parent, form):
def OnClick(event):
f = form(parent)
f.Maximize()
f.Show()
return OnClick