I'm writing a feature generation class that can be extendable. For example in the following example any method that starts with generate is a feature generation method:
class FeatureGenerator:
def __init__(self):
self.generate_a()
self.generate_b()
def method_a(self): pass
def generate_a(self): do stuffs
def generate_b(self): do stuffs
I want to execute all methods with generate prefix within init. However, I don't want to add it manually every time I write a new method. One solution could be writing a decorator that will add it to a list and then execute all elements in the list within init. But I am not sure that's a good idea. Is there any pythonic way to do that?
Using the dir of the instance:
class FeatureGenerator:
def __init__(self):
for name in dir(self):
attr = getattr(self, name)
if callable(attr) and name.startswith("generate"):
attr()
def method_a(self): pass
def generate_a(self): print("gen a")
def generate_b(self): print("gen b")
Related
I have scenario where I am passing a file name and checking if it has argument start as constructor if it has then I have to create instance of that class.
Consider the example where I have a file named test.py which have three class namely A,B,C now only class A has start parameter others have other different parameter or extra parameter.
#test.py
class A:
def __init__(self, start=""):
pass
class B:
def __init__(self, randomKeyword, start=""):
pass
class C:
def __init__(self):
pass
Now I want to write a script which takes test.py as an argument and create instance of A. Till now my progress is
detail = importlib.util.spec_from_file_location('test.py', '/path/to/test.py')
module = importlib.util.module_from_spec(detail)
spec.loader.exec_module(mod)
Bacially I need to write a program which finds init argument of all class in file and create an instance of file with start as init argument.
As mentioned by #deceze it's not a good idea to instantiate a class on the basis of it's init parameter as we're not sure what is there. But it's possible to do it. So I am posting this answer just so that you know how it can be done.
#test.py
class A:
def __init__(self, start=""):
pass
class B:
def __init__(self, randomKeyword, start=""):
pass
class C:
def __init__(self):
pass
One of the possibility is
#init.py
import importlib.util
from inspect import getmembers, isclass, signature
detail = importlib.util.spec_from_file_location('test.py', '/path/to/test.py')
module = importlib.util.module_from_spec(detail)
spec.loader.exec_module(module)
for name, data in getmembers(mod, isclass):
cls = getattr(mod, name)
parameter = signature(cls.__init__).parameters.keys()
# parameter start
if len(parameter) == 2 and 'start' in parameter:
object = cls(start="Whatever you want")
Ofcourse it's not the best approach so more answer are welcome and if you are in this scenario consider #deceze comment and define a builder.
I have a basic class Framework with 3 methods that can be set by the user: initialize, handle_event and finalize.
These methods are executed by the method run:
class Framework(object):
def initialize(self):
pass
def handle_event(self, event):
pass
def finalize(self):
pass
def run(self):
self.initialize()
for event in range(10):
self.handle_event(event)
self.finalize()
I would like to create 3 decorators: on_initialize, on_event and on_finalize so that I could write such a class:
class MyFramework(Framework):
# The following methods will be executed once in this order
#on_initialize(precedence=-1)
def say_hi(self):
print('say_hi')
#on_initialize(precedence=0)
def initialize(self):
print('initialize')
#on_initialize(precedence=1)
def start_processing_events(self):
print('start_processing_events')
# The following methods will be executed in each event in this order
#on_event(precedence=-1)
def before_handle_event(self, event):
print('before_handle_event:', event)
#on_event(precedence=0)
def handle_event(self, event):
print('handle_event:', event)
#on_event(precedence=1)
def after_handle_event(self, event):
print('after_handle_event:', event)
# The following methods will be executed once at the end on this order
#on_finalize(precedence=-1)
def before_finalize(self):
print('before_finalize')
#on_finalize(precedence=0)
def finalize(self):
print('finalize')
#on_finalize(precedence=1)
def after_finalize(self):
print('after_finalize')
if __name__ == '__main__':
f = MyFramework()
f.run()
These decorators determine the order of execution of the optional methods the user may add to the class. I think that by default, initialize, handle_event and finalize should take precedence=0. Then the user could add any method with the right decorator and he will know when they get executed in the simulation run.
I have honestly no idea how to get started with this problem. Any help to push me in the right direction will be very welcome! Many thanks.
If you are using Python 3.6, this is a case that can take advantage of the new __init_subclass__ method. It is called on the superclass by subclasses when they are created.
Withut Python3.6 you have to resort to a metaclass.
The decorator itself can just mark each method with the needed data.
def on_initialize(precedence=0):
def marker(func):
func._initializer = precedence
return func
return marker
def on_event(precedence=0):
def marker(func):
func._event_handler = precedence
return func
return marker
def on_finalize(precedence=0):
def marker(func):
func._finalizer = precedence
return func
return marker
class Framework:
def __init_subclass__(cls, *args, **kw):
super().__init_subclass__(*args, **kw)
handlers = dict(_initializer=[], _event_handler=[], _finalizer=[])
for name, method in cls.__dict__.items():
for handler_type in handlers:
if hasattr(method, handler_type):
handlers[handler_type].append((getattr(method, handler_type), name))
for handler_type in handlers:
setattr(cls, handler_type,
[handler[1] for handler in sorted(handlers[handler_type])])
def initialize(self):
for method_name in self._initializer:
getattr(self, method_name)()
def handle_event(self, event):
for method_name in self._event_handler:
getattr(self, method_name)(event)
def finalize(self):
for method_name in self._finalizer:
getattr(self, method_name)()
def run(self):
self.initialize()
for event in range(10):
self.handle_event(event)
self.finalize()
If you will have a complex class hierarchy that should inherit the action methods properly, you wll have to merge the lists in the handlers dictionary with the ones in the superclass (get the superclass as cls.__mro__[1]) before applying then as class attributes.
Also, if you are using any Python < 3.6, you will need to move the logic on __init_subclass__ to a metaclass. Just put the code as it is on the __init__ method of a metaclass (and adjust the incoming parameters and super call as apropriate), and it should work just the same.
My idea is to use class based decorators, which are simple and gives intermediate context to share between decorated functions. So decorator would look like this (I am using python3.5):
class OnInitialize:
methods = {}
def __init__(self, precedence):
self.precedence = precedence
def __call__(self, func):
self.methods[self.precedence] = func
def wrapper(*a, **kw):
for precedence in sorted(self.methods.keys()):
self.methods[precedence](*a, **kw)
return wrapper
on decoration, first of all init is executed and it stores the precedence value for further use. Secondly the call is executed which just appends target function to the methods dictionary (Please note that call and methods structure could be customized to allow calling multiple methods with same precedence).
on the other hand, target class and methods would look like this
class Target:
#OnInitialize(precedence=-1)
def say_hi(self):
print("say_hi")
#OnInitialize(precedence=0)
def initialize(self):
print("initialize")
#OnInitialize(precedence=1)
def start_processing_events(self):
print("start_processing_events")
which ensures that, if one of the following methods are called, it will call all the decorated methods with given order.
target = Target()
target.initialize()
Hope it helps, please comment me below if you were interested in something other.
I created the following class:
import loader
import pandas
class SavTool(pd.DataFrame):
def __init__(self, path):
pd.DataFrame.__init__(self, data=loader.Loader(path).data)
#property
def path(self):
return path
#property
def meta_dict(self):
return loader.Loader(path).dict
If the class is instantiated the instance becomes a pandas DataFrame which I wanted to extend by other attributes like the path to the file and a dictionary containing meta information (called 'meta_dict').
What I want is the following: the dictionary 'meta_dict' shall be mutable. Namely, the following should work:
df = SavTool("somepath")
df.meta_dict["new_key"] = "new_value"
print df.meta_dict["new_key"]
But what happens is that every time I use the syntax 'df.meta_dict' the method 'meta_dict' is called and the original 'meta_dict' from loader.Loader is returned such that 'df.meta_dict' cannot be changed. Therefore, the syntax leads to "KeyError: 'new_key'". 'meta_dict' shall be called only once and then never again if it is used/called a second/third... time. The second/third... time 'meta_dict' should just be an attribute, in this case a dictionary.
How can I fix this? Maybe the whole design of the class is bad and should be changed (I'm new to using classes)? Thanks for your answers!
When you call loader.Loader you'll create a new instance of the dictionary each time. The #property doesn't cache anything for you, just provides a convenience for wrapping complicated getters for a clean interface for the caller.
Something like this should work. I also updated the path variable so it's bound correctly on the class and returned in the path property correctly.
import loader
import pandas
class SavTool(pd.DataFrame):
def __init__(self, path):
pd.DataFrame.__init__(self, data=loader.Loader(path).data)
self._path = path
self._meta_dict = loader.Loader(path).dict
#property
def path(self):
return self._path
#property
def meta_dict(self):
return self._meta_dict
def update_meta_dict(self, **kwargs):
self._meta_dict.update(kwargs)
Another way to just cache the variable is by using hasattr:
#property
def meta_dict(self):
if not hasattr(self, "_meta_dict"):
self._meta_dict = loader.Loader(path).dict
return self._meta_dict
I'm writing a GUI library, and I'd like to let the programmer provide meta-information about their program which I can use to fine-tune the GUI. I was planning to use function decorators for this purpose, for example like this:
class App:
#Useraction(description='close the program', hotkey='ctrl+q')
def quit(self):
sys.exit()
The problem is that this information needs to be bound to the respective class. For example, if the program is an image editor, it might have an Image class which provides some more Useractions:
class Image:
#Useraction(description='invert the colors')
def invert_colors(self):
...
However, since the concept of unbound methods has been removed in python 3, there doesn't seem to be a way to find a function's defining class. (I found this old answer, but that doesn't work in a decorator.)
So, since it looks like decorators aren't going to work, what would be the best way to do this? I'd like to avoid having code like
class App:
def quit(self):
sys.exit()
Useraction(App.quit, description='close the program', hotkey='ctrl+q')
if at all possible.
For completeness' sake, the #Useraction decorator would look somewhat like this:
class_metadata= defaultdict(dict)
def Useraction(**meta):
def wrap(f):
cls= get_defining_class(f)
class_metadata[cls][f]= meta
return f
return wrap
You are using decorators to add meta data to methods. That is fine. It can be done e.g. this way:
def user_action(description):
def decorate(func):
func.user_action = {'description': description}
return func
return decorate
Now, you want to collect that data and store it in a global dictionary in form class_metadata[cls][f]= meta. For that, you need to find all decorated methods and their classes.
The simplest way to do that is probably using metaclasses. In metaclass, you can define what happens when a class is created. In this case, go through all methods of the class, find decorated methods and store them in the dictionary:
class UserActionMeta(type):
user_action_meta_data = collections.defaultdict(dict)
def __new__(cls, name, bases, attrs):
rtn = type.__new__(cls, name, bases, attrs)
for attr in attrs.values():
if hasattr(attr, 'user_action'):
UserActionMeta.user_action_meta_data[rtn][attr] = attr.user_action
return rtn
I have put the global dictionary user_action_meta_data in the meta class just because it felt logical. It can be anywhere.
Now, just use that in any class:
class X(metaclass=UserActionMeta):
#user_action('Exit the application')
def exit(self):
pass
Static UserActionMeta.user_action_meta_data now contains the data you want:
defaultdict(<class 'dict'>, {<class '__main__.X'>: {<function exit at 0x00000000029F36C8>: {'description': 'Exit the application'}}})
I've found a way to make decorators work with the inspect module, but it's not a great solution, so I'm still open to better suggestions.
Basically what I'm doing is to traverse the interpreter stack until I find the current class. Since no class object exists at this time, I extract the class's qualname and module instead.
import inspect
def get_current_class():
"""
Returns the name of the current module and the name of the class that is currently being created.
Has to be called in class-level code, for example:
def deco(f):
print(get_current_class())
return f
def deco2(arg):
def wrap(f):
print(get_current_class())
return f
return wrap
class Foo:
print(get_current_class())
#deco
def f(self):
pass
#deco2('foobar')
def f2(self):
pass
"""
frame= inspect.currentframe()
while True:
frame= frame.f_back
if '__module__' in frame.f_locals:
break
dict_= frame.f_locals
cls= (dict_['__module__'], dict_['__qualname__'])
return cls
Then in a sort of post-processing step, I use the module and class names to find the actual class object.
def postprocess():
global class_metadata
def findclass(module, qualname):
scope= sys.modules[module]
for name in qualname.split('.'):
scope= getattr(scope, name)
return scope
class_metadata= {findclass(cls[0], cls[1]):meta for cls,meta in class_metadata.items()}
The problem with this solution is the delayed class lookup. If classes are overwritten or deleted, the post-processing step will find the wrong class or fail altogether. Example:
class C:
#Useraction(hotkey='ctrl+f')
def f(self):
print('f')
class C:
pass
postprocess()
I make use of PyCLIPS to integrate CLIPS into Python. Python methods are registered in CLIPS using clips.RegisterPythonFunction(method, optional-name). Since I have to register several functions and want to keep the code clear, I am looking for a decorator to do the registration.
This is how it is done now:
class CLIPS(object):
...
def __init__(self, data):
self.data = data
clips.RegisterPythonFunction(self.pyprint, "pyprint")
def pyprint(self, value):
print self.data, "".join(map(str, value))
and this is how I would like to do it:
class CLIPS(object):
...
def __init__(self, data):
self.data = data
#clips.RegisterPythonFunction(self.pyprint, "pyprint")
#clips_callable
def pyprint(self, value):
print self.data, "".join(map(str, value))
It keeps the coding of the methods and registering them in one place.
NB: I use this in a multiprocessor set-up in which the CLIPS process runs in a separate process like this:
import clips
import multiprocessing
class CLIPS(object):
def __init__(self, data):
self.environment = clips.Environment()
self.data = data
clips.RegisterPythonFunction(self.pyprint, "pyprint")
self.environment.Load("test.clp")
def Run(self, cycles=None):
self.environment.Reset()
self.environment.Run()
def pyprint(self, value):
print self.data, "".join(map(str, value))
class CLIPSProcess(multiprocessing.Process):
def run(self):
p = multiprocessing.current_process()
self.c = CLIPS("%s %s" % (p.name, p.pid))
self.c.Run()
if __name__ == "__main__":
p = multiprocessing.current_process()
c = CLIPS("%s %s" % (p.name, p.pid))
c.Run()
# Now run CLIPS from another process
cp = CLIPSProcess()
cp.start()
it should be fairly simple to do like this:
# mock clips for testing
class clips:
#staticmethod
def RegisterPythonFunction(func, name):
print "register: ", func, name
def clips_callable(fnc):
clips.RegisterPythonFunction(fnc, fnc.__name__)
return fnc
#clips_callable
def test(self):
print "test"
test()
edit: if used on a class method it will register the unbound method only. So it won't work if the function will be called without an instance of the class as the first argument. Therefore this would be usable to register module level functions, but not class methods. To do that, you'll have to register them in __init__.
It seems that the elegant solution proposed by mata wouldn't work because the CLIPS environment should be initialized before registering methods to it.
I'm not a Python expert, but from some searching it seems that combination of inspect.getmembers() and hasattr() will do the trick for you - you could loop all members of your class, and register the ones that have the #clips_callable attribute to CLIPS.
Got it working now by using a decorator to set an attribute on the method to be registered in CLIPS and using inspect in init to fetch the methods and register them. Could have used some naming strategy as well, but I prefer using a decorator to make the registering more explicit. Python functions can be registered before initializing a CLIPS environment. This is what I have done.
import inspect
def clips_callable(func):
from functools import wraps
#wraps(func)
def wrapper(*__args,**__kw):
return func(*__args,**__kw)
setattr(wrapper, "clips_callable", True)
return wrapper
class CLIPS(object):
def __init__(self, data):
members = inspect.getmembers(self, inspect.ismethod)
for name, method in members:
try:
if method.clips_callable:
clips.RegisterPythonFunction(method, name)
except:
pass
...
#clips_callable
def pyprint(self, value):
print self.data, "".join(map(str, value))
For completeness, the CLIPS code in test.clp is included below.
(defrule MAIN::start-me-up
=>
(python-call pyprint "Hello world")
)
If somebody knows a more elegant approach, please let me know.