create objects on runtime in python - python

how do i create object-instances on runtime in python?
say i have 2 classes:
class MyClassA(object):
def __init__(self, prop):
self.prop = prop
self.name = "CLASS A"
def println(self):
print self.name
class MyClassB(object):
def __init__(self, prop):
self.prop = prop
self.name = "CLASS B"
def println(self):
print self.name
and a dict
{('a': MyClassA), ('b': MyClassB)}
how can i create dynamic an instance of one of my two classes, depending of i choose 'a' or 'b'.
kind of this:
somefunc(str):
if 'a': return new MyClassA
if 'b': return new MyClassB
to get "CLASS B" on calling: somefunc('a').println
but in a more elegant and dynamic way (say i add more classes to the dict on runtime)

You might create a dispatcher, which is a dictionary with your keys mapping to classes.
dispatch = {
"a": MyClassA,
"b": MyClassB,
}
instance = dispatch[which_one]() # Notice the second pair of parens here!

You create a class instance by calling the class. Your class dict {('a': MyClassA), ('b': MyClassB)} returns classes; so you need only call the class:
classes['a']()
But I get the sense you want something more specific. Here's a subclass of dict that, when called with a key, looks up the associated item and calls it:
>>> class ClassMap(dict):
... def __call__(self, key, *args, **kwargs):
... return self.__getitem__(key)(*args, **kwargs)
...
>>> c = ClassMap()
>>> c['a'] = A
>>> c['b'] = B
>>> c('a')
<__main__.A object at 0x1004cc7d0>

Related

Serialize and deserialize objects from user-defined classes

Suppose I have class hierarchy like this:
class SerializableWidget(object):
# some code
class WidgetA(SerilizableWidget):
# some code
class WidgetB(SerilizableWidget):
# some code
I want to be able to serialize instances of WidgetA and WidgetB (and potentially other widgets) to text files as json. Then, I want to be able to deserialize those, without knowing beforehand their specific class:
some_widget = deserielize_from_file(file_path) # pseudocode, doesn't have to be exactly a method like this
and some_widget needs to be constructed from the precise subclass of SerilizableWidget. How do I do this? What methods exactly do I need to override/implement in each of the classes of my hierarchy?
Assume all fields of the above classes are primitive types. How do I override some __to_json__ and __from_json__ methods, something like that?
You can solve this with many methods. One example is to use the object_hook and default parameters to json.load and json.dump respectively.
All you need is to store the class together with the serialized version of the object, then when loading you have to use a mapping of which class goes with which name.
The example below uses a dispatcher class decorator to store the class name and object when serializing, and look it up later when deserializing. All you need is a _as_dict method on each class to convert the data to a dict:
import json
#dispatcher
class Parent(object):
def __init__(self, name):
self.name = name
def _as_dict(self):
return {'name': self.name}
#dispatcher
class Child1(Parent):
def __init__(self, name, n=0):
super().__init__(name)
self.n = n
def _as_dict(self):
d = super()._as_dict()
d['n'] = self.n
return d
#dispatcher
class Child2(Parent):
def __init__(self, name, k='ok'):
super().__init__(name)
self.k = k
def _as_dict(self):
d = super()._as_dict()
d['k'] = self.k
return d
Now for the tests. First lets create a list with 3 objects of different types.
>>> obj = [Parent('foo'), Child1('bar', 15), Child2('baz', 'works')]
Serializing it will yield the data with the class name in each object:
>>> s = json.dumps(obj, default=dispatcher.encoder_default)
>>> print(s)
[
{"__class__": "Parent", "name": "foo"},
{"__class__": "Child1", "name": "bar", "n": 15},
{"__class__": "Child2", "name": "baz", "k": "works"}
]
And loading it back generates the correct objects:
obj2 = json.loads(s, object_hook=dispatcher.decoder_hook)
print(obj2)
[
<__main__.Parent object at 0x7fb6cd561cf8>,
<__main__.Child1 object at 0x7fb6cd561d68>,
<__main__.Child2 object at 0x7fb6cd561e10>
]
Finally, here's the implementation of dispatcher:
class _Dispatcher:
def __init__(self, classname_key='__class__'):
self._key = classname_key
self._classes = {} # to keep a reference to the classes used
def __call__(self, class_): # decorate a class
self._classes[class_.__name__] = class_
return class_
def decoder_hook(self, d):
classname = d.pop(self._key, None)
if classname:
return self._classes[classname](**d)
return d
def encoder_default(self, obj):
d = obj._as_dict()
d[self._key] = type(obj).__name__
return d
dispatcher = _Dispatcher()
I really liked #nosklo's answer, but I wanted to customize what the property value was for how the model type got saved, so I extended his code a little to add a sub-annotation.
(I know this isn't directly related to the question, but you can use this to serialize to json too since it produces dict objects. Note that your base class must use the #dataclass annotation to serialize correctly - otherwise you could adjust this code to define the __as_dict__ method like #nosklo's answer)
data.csv:
model_type, prop1
sub1, testfor1
sub2, testfor2
test.py:
import csv
from abc import ABC
from dataclasses import dataclass
from polymorphic import polymorphic
#polymorphic(keyname="model_type")
#dataclass
class BaseModel(ABC):
prop1: str
#polymorphic.subtype_when_(keyval="sub1")
class SubModel1(BaseModel):
pass
#polymorphic.subtype_when_(keyval="sub2")
class SubModel2(BaseModel):
pass
with open('data.csv') as csvfile:
reader = csv.DictReader(csvfile, skipinitialspace=True)
for row_data_dict in reader:
price_req = BaseModel.deserialize(row_data_dict)
print(price_req, '\n\tre-serialized: ', price_req.serialize())
polymorphic.py:
import dataclasses
import functools
from abc import ABC
from typing import Type
# https://stackoverflow.com/a/51976115
class _Polymorphic:
def __init__(self, keyname='__class__'):
self._key = keyname
self._class_mapping = {}
def __call__(self, abc: Type[ABC]):
functools.update_wrapper(self, abc)
setattr(abc, '_register_subtype', self._register_subtype)
setattr(abc, 'serialize', lambda self_subclass: self.serialize(self_subclass))
setattr(abc, 'deserialize', self.deserialize)
return abc
def _register_subtype(self, keyval, cls):
self._class_mapping[keyval] = cls
def serialize(self, self_subclass) -> dict:
my_dict = dataclasses.asdict(self_subclass)
my_dict[self._key] = next(keyval for keyval, cls in self._class_mapping.items() if cls == type(self_subclass))
return my_dict
def deserialize(self, data: dict):
classname = data.pop(self._key, None)
if classname:
return self._class_mapping[classname](**data)
raise ValueError(f'Invalid data: {self._key} was not found or it referred to an unrecognized class')
#staticmethod
def subtype_when_(*, keyval: str):
def register_subtype_for(_cls: _Polymorphic):
nonlocal keyval
if not keyval:
keyval = _cls.__name__
_cls._register_subtype(keyval, _cls)
#functools.wraps(_cls)
def construct_original_subclass(*args, **kwargs):
return _cls(*args, **kwargs)
return construct_original_subclass
return register_subtype_for
polymorphic = _Polymorphic
Sample console output:
SubModel1(prop1='testfor1')
re-serialized: {'prop1': 'testfor1', 'model_type': 'sub1'}
SubModel2(prop1='testfor2')
re-serialized: {'prop1': 'testfor2', 'model_type': 'sub2'}

multiple python class inheritance

I am trying to understand python's class inheritance methods and I have some troubles figuring out how to do the following:
How can I inherit a method from a class conditional on the child's input?
I have tried the following code below without much success.
class A(object):
def __init__(self, path):
self.path = path
def something(self):
print("Function %s" % self.path)
class B(object):
def __init__(self, path):
self.path = path
self.c = 'something'
def something(self):
print('%s function with %s' % (self.path, self.c))
class C(A, B):
def __init__(self, path):
# super(C, self).__init__(path)
if path=='A':
A.__init__(self, path)
if path=='B':
B.__init__(self, path)
print('class: %s' % self.path)
if __name__ == '__main__':
C('A')
out = C('B')
out.something()
I get the following output:
class: A
class: B
Function B
While I would like to see:
class: A
class: B
B function with something
I guess the reason why A.something() is used (instead of B.something()) has to do with the python's MRO.
Calling __init__ on either parent class does not change the inheritance structure of your classes, no. You are only changing what initialiser method is run in addition to C.__init__ when an instance is created. C inherits from both A and B, and all methods of B are shadowed by those on A due to the order of inheritance.
If you need to alter class inheritance based on a value in the constructor, create two separate classes, with different structures. Then provide a different callable as the API to create an instance:
class CA(A):
# just inherit __init__, no need to override
class CB(B):
# just inherit __init__, no need to override
def C(path):
# create an instance of a class based on the value of path
class_map = {'A': CA, 'B': CB}
return class_map[path](path)
The user of your API still has name C() to call; C('A') produces an instance of a different class from C('B'), but they both implement the same interface so this doesn't matter to the caller.
If you have to have a common 'C' class to use in isinstance() or issubclass() tests, you could mix one in, and use the __new__ method to override what subclass is returned:
class C:
def __new__(cls, path):
if cls is not C:
# for inherited classes, not C itself
return super().__new__(cls)
class_map = {'A': CA, 'B': CB}
cls = class_map[path]
# this is a subclass of C, so __init__ will be called on it
return cls.__new__(cls, path)
class CA(C, A):
# just inherit __init__, no need to override
pass
class CB(C, B):
# just inherit __init__, no need to override
pass
__new__ is called to construct the new instance object; if the __new__ method returns an instance of the class (or a subclass thereof) then __init__ will automatically be called on that new instance object. This is why C.__new__() returns the result of CA.__new__() or CB.__new__(); __init__ is going to be called for you.
Demo of the latter:
>>> C('A').something()
Function A
>>> C('B').something()
B function with something
>>> isinstance(C('A'), C)
True
>>> isinstance(C('B'), C)
True
>>> isinstance(C('A'), A)
True
>>> isinstance(C('A'), B)
False
If neither of these options are workable for your specific usecase, you'd have to add more routing in a new somemethod() implementation on C, which then calls either A.something(self) or B.something(self) based on self.path. This becomes cumbersome really quickly when you have to do this for every single method, but a decorator could help there:
from functools import wraps
def pathrouted(f):
#wraps
def wrapped(self, *args, **kwargs):
# call the wrapped version first, ignore return value, in case this
# sets self.path or has other side effects
f(self, *args, **kwargs)
# then pick the class from the MRO as named by path, and call the
# original version
cls = next(c for c in type(self).__mro__ if c.__name__ == self.path)
return getattr(cls, f.__name__)(self, *args, **kwargs)
return wrapped
then use that on empty methods on your class:
class C(A, B):
#pathrouted
def __init__(self, path):
self.path = path
# either A.__init__ or B.__init__ will be called next
#pathrouted
def something(self):
pass # doesn't matter, A.something or B.something is called too
This is, however, becoming very unpythonic and ugly.
While Martijn's answer is (as usual) close to perfect, I'd just like to point out that from a design POV, inheritance is the wrong tool here.
Remember that implementation inheritance is actually a static and somehow restricted kind of composition/delegation, so as soon as you want something more dynamic the proper design is to eschew inheritance and go for full composition/delegation, canonical examples being the State and the Strategy patterns. Applied to your example, this might look something like:
class C(object):
def __init__(self, strategy):
self.strategy = strategy
def something(self):
return self.strategy.something(self)
class AStrategy(object):
def something(self, owner):
print("Function A")
class BStrategy(object):
def __init__(self):
self.c = "something"
def something(self, owner):
print("B function with %s" % self.c)
if __name__ == '__main__':
a = C(AStrategy())
a.something()
b = C(BStrategy())
b.something()
Then if you need to allow the user to specify the strategy by name (as string), you can add the factory pattern to the solution
STRATEGIES = {
"A": AStrategy,
"B": BStrategy,
}
def cfactory(strategy_name):
try:
strategy_class = STRATEGIES[strategy_name]
except KeyError:
raise ValueError("'%s' is not a valid strategy" % strategy_name)
return C(strategy_class())
if __name__ == '__main__':
a = cfactory("A")
a.something()
b = cfactory("B")
b.something()
Martijn's answer explained how to choose an object inheriting from one of two classes. Python also allows to easily forward a method to a different class:
>>> class C:
parents = { 'A': A, 'B': B }
def __init__(self, path):
self.parent = C.parents[path]
self.parent.__init__(self, path) # forward object initialization
def something(self):
self.parent.something(self) # forward something method
>>> ca = C('A')
>>> cb = C('B')
>>> ca.something()
Function A
>>> cb.something()
B function with something
>>> ca.path
'A'
>>> cb.path
'B'
>>> cb.c
'something'
>>> ca.c
Traceback (most recent call last):
File "<pyshell#46>", line 1, in <module>
ca.c
AttributeError: 'C' object has no attribute 'c'
>>>
But here class C does not inherit from A or B:
>>> C.__mro__
(<class '__main__.C'>, <class 'object'>)
Below is my original solution using monkey patching:
>>> class C:
parents = { 'A': A, 'B': B }
def __init__(self, path):
parent = C.parents[path]
parent.__init__(self, path) # forward object initialization
self.something = lambda : parent.something(self) # "borrow" something method
it avoids the parent attribute in C class, but is less readable...

Call constructor of cls object in Python

I am trying to call the constructor of a class object in python. I managed to get it to work using the following few lines:
obj = cls.__new__(cls)
n = (List of attribute names)
v = (List of attribute values)
for s in n:
setattr(obj, s, v[s])
I was wondering if there is a way to directly insert the attribute value + name pairs into the constructor, cause the arguments are just ignored if i call the following:
obj = cls.__new__(cls, v)
p.s.: I am using python3
The class looks similar to this:
class InheritingClass(BaseClass):
def __init__(self, basic_attribute, another_attribute=None):
super().__init__(basic_attribute=basic_attribute)
self.another_attribute= another_attribute
class BaseClass:
def __init__(self, basic_attribute=1):
self.basic_attribute= basic_attribute
So nothing special there
I was wondering if there is a way to directly insert the attribute value + name pairs into the constructor
Please don't do that. This would be the anti pattern. Instead, use the __init__ method to set the values. The __new__ method should be the memory space allocation that returns the object instance, obj in your case.
So you should probable better do this inside your __init__:
k = ['a', 'b', 'c']
v = [1, 2, 3]
d = dict(zip(k, v))
class C:
def __init__(self, d):
for _ in d:
setattr(self, _, d[_])
ci=C(d)
print(ci.a) # 1
I used the dict as __init__ parameter, where I used the zip method to create one.
__init__ is the constructor of Python class instead of __new__. Refer Pythons use of new and init for more information.
To add, if you want to store arbitrary attributes to your class, you can use dict.update like so:
class BaseClass:
def __init__(self, basic_attribute=1, **kw):
self.basic_attribute = basic_attribute
self.__dict__.update(**kw)
class InheritingClass(BaseClass):
def __init__(self, basic_attribute, another_attribute=None, **kw):
super().__init__(basic_attribute=basic_attribute, **kw)
self.another_attribute = another_attribute
Then:
ic = InheritingClass('hi', a=1, b=20)
print(ic.a, ic.b) # prints 1, 20
To answer the question "How do you call the constructor on a class object?" you need to look at the comments from Amadan way back on Aug 24, 2016 at 6:41.
The answer:
new_obj = cls()
Here's some example code that illustrates the point:
class C:
#classmethod
def c(cls):
return cls()
c = C.c()
print(c) # displays <__main__.C object at 0x10ef16a90>
class D(C):
pass
d = D.c()
print(d) # displays <__main__.D object at 0x10ef16370>
And so we see that you can instantiate an object from the cls object.
Now if we combine Amadan's comment with prosti's cool code for setting attributes, we get this:
class ObjectFactory:
#classmethod
def new(cls,**kwargs):
return cls(**kwargs)
def __init__( self, **kwargs ):
for _ in kwargs:
setattr( self, _ , kwargs[ _ ] )
class Person(ObjectFactory):
pass
person = Person.new( first = "John", last = "Doe" )
print(person) # <__main__.Person object at 0x10fe49ff0>
print(person.__dict__) # {'first': 'John', 'last': 'Doe'}

Dynamically adding attributes to __init__ method in python

Is there any way you can define a function that can add,at some later point in the program, new attributes to an already existing __init__ method? For example, below I created a Class for a Family Tree. Each instance would create a root.
class FamilyTree:
def __init__(self,rootObj):
self.key = rootObj
I want each new root instance to have the ability to have a specific number of children attributes: For example:
self.child1 = 'B'
self.child2 = 'C'
self.child3 = 'D'
Since every new root instance can have various number of children, how can I variably add new attributes to the __init__ method?
A possible way to automate this is as follow:
class FamilyTree:
def __init__(self,rootObj, child_names=None):
self.key = rootObj
if child_names is not None:
# child_names is a list of child attributes values
for i, child in enumerate(child_names):
setattr(self, 'child{0}'.format(i), child)
setattr(self, 'child_nbr', len(child_names))
def add_child(self, *child_names)
for name in child_names:
self.child_nbr += 1
setattr(self, 'child{0}'.format(self.child_nbr), name)
usage:
>>> f=FamilyTree('Family1', ['B', 'C', 'D'])
>>> print(f.child1, f.child2, f.child3)
>>> i = 0
>>> while i < f.child_nbr:
>>> print getattr(f, 'child{0}'.format(i+1))
>>> f.add_child('E')
>>> print(f.child4)
>>> f.add_child('F', 'G')
>>> print(f.child5, f.child6)
I guess what you really (should) want is a list of children:
class FamilyTree:
def __init__(self, rootObj):
self.key = rootObj
self.children = []
def add_children(*new_children)
self.children.extend(new_children)
Now you can use the add_children method to add any number of children to the list at once, or you could simply directly access the children instance member list as well:
tree = FamilyTree("whatever a rootObj is...")
tree.add_children("Alice", "Bob", "Claudia", "Dave")
tree.children.append("Eva")
tree.children += ["Fred", "Gina", "Herbert"]
print(tree.children)
# Output: ["Alice", "Bob", "Claudia", "Dave", "Eva", "Fred", "Gina", "Herbert"]
class FamilyTree:
def __init__(self,rootObj):
self.key = rootObj
child1='B'
child2='C'
child3='D'
objold=FamilyTree('keyold')
FamilyTree.child1=child1
FamilyTree.child2=child2
FamilyTree.child3=child3
objnew=FamilyTree('keynew')
print objnew.key,objnew.child1,objnew.child2,objnew.child3
print objold.key,objold.child1,objold.child2,objold.child3
'''
keynew B C D
keyold B C D
'''
Maybe I understand this wrong, but you don't add attributes to the 'init' method, you add attributes to the instance of the class. 'self' refers to the instance of the class.
To add attributes during runtime to a class, look at Abhra's example.
To add attributes to a specific instance of a class, look here (sorry Abhra, stole your example):
class FamilyTree:
def __init__(self,rootObj):
self.key = rootObj
child1='B'
child2='C'
child3='D'
objold=FamilyTree('keyold')
objold.child1=child1
objold.child2=child2
objold.child3=child3
objnew=FamilyTree('keynew')
print objnew.key, objnew.child1, objnew.child2, objnew.child3
print objold.key, objold.child1, objold.child2, objold.child3
'''
keynew AttributeError: 'FamilyTree' object has no attribute 'child1' (same error for the others)
keyold B C D
'''

Create a subclass object with initialized parent object

I have a BaseEntity class, which defines a bunch (a lot) of non-required properties and has most of functionality. I extend this class in two others, which have some extra methods, as well as initialize one required property.
class BaseEntity(object):
def __init__(self, request_url):
self.clearAllFilters()
super(BaseEntity, self).__init__(request_url=request_url)
#property
def filter1(self):
return self.filter1
#filter1.setter
def filter1(self, some_value):
self.filter1 = some_value
...
def clearAllFilters(self):
self.filter1 = None
self.filter2 = None
...
def someCommonAction1(self):
...
class DefinedEntity1(BaseEntity):
def __init__(self):
super(BaseEntity, self).__init__(request_url="someUrl1")
def foo():
...
class DefinedEntity2(BaseEntity):
def __init__(self):
super(ConsensusSequenceApi, self).__init__(request_url="someUrl2")
def bar(self):
...
What I would like is to initialize a BaseEntity object once, with all the filters specified, and then use it to create each of the DefinedEntities, i.e.
baseObject = BaseEntity(None)
baseObject.filter1 = "boo"
baseObject.filter2 = "moo"
entity1 = baseObject.create(DefinedEntity1)
Looking for pythonic ideas, since I've just switched from statically typed language and still trying to grasp the power of python.
One way to do it:
import copy
class A(object):
def __init__(self, sth, blah):
self.sth = sth
self.blah = blah
def do_sth(self):
print(self.sth, self.blah)
class B(A):
def __init__(self, param):
self.param = param
def do_sth(self):
print(self.param, self.sth, self.blah)
a = A("one", "two")
almost_b = copy.deepcopy(a)
almost_b.__class__ = B
B.__init__(almost_b, "three")
almost_b.do_sth() # it would print "three one two"
Keep in mind that Python is an extremely open language with lot of dynamic modification possibilities and it is better not to abuse them. From clean code point of view I would use just a plain old call to superconstructor.
I had the same problem as the OP and was able to use the idea from Radosław Łazarz above of explicitly setting the class attribute of the object to the subclass, but without the deep copy:
class A:
def __init__(a) : pass
def amethod(a) : return 'aresult'
class B(A):
def __init__(b) : pass
def bmethod(self) : return 'bresult'
a=A()
print(f"{a} of class {a.__class__} is {'' if isinstance(a,B) else ' not'} an instance of B")
a.__class__=B # here is where the magic happens!
print(f"{a} of class {a.__class__} is {'' if isinstance(a,B) else ' not'} an instance of B")
print(f"a.amethod()={a.amethod()} a.bmethod()={a.bmethod()}")
Output:
<__main__.A object at 0x00000169F74DBE88> of class <class '__main__.A'> is not an instance of B
<__main__.B object at 0x00000169F74DBE88> of class <class '__main__.B'> is an instance of B
a.amethod()=aresult a.bmethod()=bresult

Categories