Avoid specifying all arguments in a subclass - python

I have a class:
class A(object):
def __init__(self,a,b,c,d,e,f,g,...........,x,y,z)
#do some init stuff
And I have a subclass which needs one extra arg (the last W)
class B(A):
def __init__(self.a,b,c,d,e,f,g,...........,x,y,z,W)
A.__init__(self,a,b,c,d,e,f,g,...........,x,y,z)
self.__W=W
It seems dumb to write all this boiler-plate code, e.g passing all the args from B's Ctor to the inside call to A's ctor, since then every change to A's ctor must be applied to two other places in B's code.
I am guessing python has some idiom to handle such cases which I am unaware of. Can you point me in the right direction?
My best hunch, is to have a sort of Copy-Ctor for A and then change B's code into
class B(A):
def __init__(self,instanceOfA,W):
A.__copy_ctor__(self,instanceOfA)
self.__W=W
This would suit my needs since I always create the subclass when given an instance of the father class, Though I am not sure whether it's possible...

Considering that arguments could be passed either by name or by position, I'd code:
class B(A):
def __init__(self, *a, **k):
if 'W' in k:
w = k.pop('W')
else:
w = a.pop()
A.__init__(self, *a, **k)
self._W = w

Edit: based on Matt's suggestion, and to address gnibbler's concern re a positional-argument approach; you might check to make sure that the additional subclass-specific argument is being specified—similar to Alex's answer:
class B(A):
def __init__(self, *args, **kwargs):
try:
self._w = kwargs.pop('w')
except KeyError:
pass
super(B,self).__init__(*args, **kwargs)
>>> b = B(1,2,w=3)
>>> b.a
1
>>> b.b
2
>>> b._w
3
Original answer:
Same idea as Matt's answer, using super() instead.
Use super() to call superclass's __init__() method, then continue initialising the subclass:
class A(object):
def __init__(self, a, b):
self.a = a
self.b = b
class B(A):
def __init__(self, w, *args):
super(B,self).__init__(*args)
self.w = w

In situations where some or all of the arguments passed to __init__ have default values, it can be useful to avoid repeating the __init__ method signature in subclasses.
In these cases, __init__ can pass any extra arguments to another method, which subclasses can override:
class A(object):
def __init__(self, a=1, b=2, c=3, d=4, *args, **kwargs):
self.a = a
self.b = b
# …
self._init_extra(*args, **kwargs)
def _init_extra(self):
"""
Subclasses can override this method to support extra
__init__ arguments.
"""
pass
class B(A):
def _init_extra(self, w):
self.w = w

Are you wanting something like this?
class A(object):
def __init__(self, a, b, c, d, e, f, g):
# do stuff
print a, d, g
class B(A):
def __init__(self, *args):
args = list(args)
self.__W = args.pop()
A.__init__(self, *args)

Related

How to avoid designing classes whose member functions depend on each other?

Let's say I have a class called Adder:
class adder(object):
def __init__(self, a, b):
self.a=a
self.b=b
self.result = None
def perform_addition(self):
self.result = self.a + self.b
return self.result
If I instantiate this class:
myAdder = adder(1,2)
Then the value of myAdder.result depends on calling perform_addition() first, otherwise it'll always remain None. It other words, there's a dependency on perform_addition() for the value of self.result. And if we extrapolate, a more complex class can have a chain of dependencies: ie, you have to call functions A, B, and C before D, because they in turn populate the necessary variables that the next function needs.
Is this bad class design? What is the remedy for it?
I think the above is a example of: https://en.wikipedia.org/wiki/Sequential_coupling
I think it all depends on what you want to do and how you want to go about it. the code you have is not necessarily bad, if you want a static dependency on 'perform_addition()' for the value of 'self.result' . But if you want a dynamic dependency, then the code below will be a good and simple approach. this way when an object is created by instantiating the class with values 'a' and 'b', 'self.result' will be automatically computed. you could also use more advanced tools like properties, decorators, descriptors etc. like i said, it all depends on what you want.
Class adder(object):
def __init__(self, a, b):
self.a=a
self.b=b
self.result = self.perform_addition()
def perform_addition(self):
self.result = self.a + self.b
return self.result
This would be a good case to make result a property instead, so that the addition is only performed when the result attribute is accessed:
class adder(object):
def __init__(self, a, b):
self.a = a
self.b = b
#property
def result(self):
return self.a + self.b
myAdder = adder(1,2)
print(myAdder.result)
This outputs: 3
In case the result attribute is expected to be accessed multiple times and that the calculation involved is expensive, you can save the result in an instance variable to avoid re-calculations:
class adder(object):
def __init__(self, a, b):
self.a = a
self.b = b
self._result = None
#property
def result(self):
if self._result is None:
self._result = self.a + self.b
return self._result

Get arguments that an object's __init__ was called with

Is there a way to get an object's init argument values in python 2.7? I'm able to get the defaults through getargspec but i would like to access passed in values
import inspect
class AnObject(object):
def __init__(self, kw='', *args, **kwargs):
print 'Hello'
anobj = AnObject(kw='a keyword arg')
print inspect.getargspec(anobj.__init__)
Returns
Hello
ArgSpec(args=['self', 'kw'], varargs='args', keywords='kwargs', defaults=('',))
__init__ is treated no differently than any other function. So, like with any other function, its arguments are discarded once it returns -- unless you save them somewhere before that.
The standard approach is to save what you need later in attributes of the instance:
class Foo:
def __init__(self, a, b, *args, **kwargs):
self.a = a
self.b = b
<etc>
"Dataclasses" introduced in 3.7 streamline this process but require data annotations:
import dataclasses
#dataclasses.dataclass
class Foo:
a: int
b: str
is equivalent to:
class Foo:
def __init__(self, a:int, b:str):
self.a = a
self.b = b
Though see Python decorator to automatically define __init__ variables why this streamlining is not very useful in practice.
You can store them as attributes.
class AnObject(object):
def __init__(self, kw='', *args, **kwargs):
self.kw = kw
self.args = args
self.kwargs = kwargs
then just print them:
anobj = AnObject(kw='a keyword arg')
print anobj.kw
print anobj.args
print anobj.kwargs
if you want to see them all, you could take a look into its __dict__ attribute.

Python: why should last class in MRO have zero parameter in its super's __init__ call or there would be runtime exception

Suppose the class hierarchy is like below, and the parameters inside args are passed along the inheritance chain so that all classes get the parameter they need:
class A(object):
def __init__(self, **args):
print('A.__init__')
self.a = args['a']
super(A, self).__init__(**args)
class B (object):
def __init__(self, **args):
print('B.__init__')
self.b = args['b']
# super(B, self).__init__(**args)
class C(A, B):
def __init__(self, **args):
print('C.__init__')
self.c = args['c']
super(C, self).__init__(**args)
c = C(a=1, b=2, c=3)
print(c.__dict__)
If class B calls super's __init__ with args, that is, uncomment this line
# super(B, self).__init__(**args)
, an exception is raised:
..., in __init__
super(B, self).__init__(**args)
TypeError: object.__init__() takes no parameters
It seems that the last class in the MRO couldn't call super's __init__, or could only call it with zero parameter. Would anyone explain why?
Suppose coder 0 writes class A and B, and then coder 1 writes class C to extend A & B. Since coder 0 doesn't know the actual MRO, how could he decide which class should call super's __init__ with args and which one shouldn't?
Or would anyone share the best practice of multiple inheritance together with parameter passing?
The base class of A and B is object. object is a class that takes no parameters, because it doesn't do anything with them. (You can't write object(1), can you?)
The best practice for solving this is to remove the consumed args before passing them along.
class A(object):
def __init__(self, **args):
print('A.__init__')
self.a = args.pop("a")
super(A, self).__init__(**args)
class B (object):
def __init__(self, **args):
print('B.__init__')
self.b = args.pop("b")
super(B, self).__init__(**args)
class C(A, B):
def __init__(self, **args):
print('C.__init__')
self.c = args.pop("c") # permanently removes 'c' from args
super(C, self).__init__(**args)
c = C(a=1, b=2, c=3)
print(c.__dict__)

Preventing a class's function attributes from being passed self as the first arg

Okay, so I've got a class where one of the attributes is a callback function. Problem is, whenever I call it from within the class (e.g. as self.function_attr(), it gets passed self as the first argument. Here's an idea of what I'm working with:
def callback(a, b):
# do something with a, b
class A:
def __init__(self, callback):
self.callback = callback
self.callback(1, 2) # Raises a TypeError: takes exactly 2 arguments (3 given)
I'm not willing to write each callback function to take self as a first argument. I wrote a decorator that works around the issue:
def callback_decorator(func):
def newfunc(self, *args, **kw):
return func(*args, **kw)
return newfunc
but I'm wondering if there's anything better.
Basically, my question is, how can I call instance attributes of my class which are functions without them being passed self as the first argument?
You just need to make it a staticmethod when you bind it to the class.
def callback(a, b):
# do something with a, b
class A:
def __init__(self, callback):
# now it won't get passed self
self.callback = staticmethod(callback)
self.callback(1, 2)
or
class A:
def __init__(self, callback):
self.callback(1, 2)
# now it won't get passed self
callback = staticmethod(callback)
As far as I know, a wrapper (like your decorator) is the simplest way to go. Since you already have an object in which to store the function, I wouldn't bother with a decorator. (Note I've inherited from object, which is something you should probably be doing unless you specifically want old-style class behaviour.)
class A(object):
def __init__(self, callback):
self._callback = callback
self.callback(1,2)
def callback(self, *args, **kwargs):
return self._callback(*args, **kwargs)
This behaves as you'd expect:
>>> def f(x, y):
... print "X: %s, Y: %s" % (x,y)
...
>>> mya = A(f)
X: 1, Y: 2

Initialize class but with different parents in python

In python, is there a way, when initializing a Class, to change the superclass in function of the value of a class attribute? Here's an example of what I want to do. First I have theses classes:
class A(object):
pass
class B(A):
# extend and override class A
pass
class C(A or B):
# extend and override class A
pass
Secondly, I want to create other classes that inherit from Class C but in some cases I want C to inherit from A and on other cases, inherit from B:
class D(C):
# C inherit only from A
from_B = False
class E(C):
# C inherit from B because attribute from_B = True
from_B = True
I tried with metaclass but that was setting the base class of C (to A or B) for all subclasses (D, E, ...) at the initialization of the first subclass. So, if the first subclass to be initialize had from_B = True, all subclasses of C had C(B) as parent whatever from_B was set. My code was something like this:
class MetaC(type):
def __new__(cls, name, bases, attrs):
if C in bases and getattr(attrs, 'from_B', False):
C.__bases__[C.__bases__.index(A)] = B
return super(MetaC, cls).__new__(cls, name, bases, attrs)
class C(A):
__metaclass__ = MetaC
My goal is to avoid the duplication of the code of the C class and keeping the possibility to have or not the added functionalities of the B class. I should mention that I don't have control on A and B classes.
UPDATE
I think I got it with this metaclass (code is a bit rough at the moment):
class MetaC(type):
def __new__(cls, name, bases, attrs):
for base in bases:
if base.__name__ == 'C':
if attrs.has_key('from_B'):
list_bases = list(base.__bases__)
list_bases[list_bases.index(A)] = B
base.__bases__ = tuple(list_bases)
elif B in base.__bases__:
list_bases = list(base.__bases__)
list_bases[list_bases.index(B)] = A
base.__bases__ = tuple(list_bases)
break
return super(MetaC, cls).__new__(cls, name, bases, attrs)
UPDATE 2
This solution doesn't work because I'm always modifying the base class C. So, when a subclass is instanciated it will use the C class in it's current state.
I ended by using cooperative multiple inheritance. It works fine. The only drawback is that we need to be sure that for methods that need to be call on many parent classes (like methods that are present in A and B and C), there's a super() call in each method definitions of each classes and that they have the same calling signature in every case. Fortunately for me my B classes respect this.
Example:
class A(object):
some_method(arg1, arg2, karg1=None):
do_some_stuff(arg1, arg2, karg1)
class B(A):
# extend and override class A
some_method(arg1, arg2, karg1=None):
super(B, self).some_method(arg1, arg2, karg1)
do_more_stuff(arg1, arg2, karg1)
class C(A, B):
# extend and override class A
some_method(arg1, arg2, karg1=None):
do_other_stuff(arg1, arg2, karg1)
super(C, self).some_method(arg1, arg2, karg1)
This way, when some_method will be call from C or C childrens, all theses calls will be made in this order:
C.some_method
A.some_method
B.some_method
Check The wonders of cooperative inheritance for more info on the subject.
This looks so painful, you have to consider composition/delegation instead of contorting inheritance this way. What do you think of something like this?
class A(object):
def from_B(self):
return False
class B(object):
def from_B(self):
return True
class C(object):
pass
class PolyClass(object):
def __init__(self, *args):
self.delegates = [c() for c in args[::-1]]
def __getattr__(self, attr):
for d in self.delegates:
if hasattr(d, attr):
return getattr(d,attr)
raise AttributeError(attr + "? what the heck is that?")
def __repr__(self):
return "<instance of (%s)>" % ','.join(d.__class__.__name__
for d in self.delegates[::-1])
pc1 = PolyClass(A,B)
pc2 = PolyClass(A,C)
pc3 = PolyClass(B,C)
for p in (pc1,pc2,pc3):
print p, p.from_B()
print pc1.from_C()
Prints:
<instance of (A,B)> True
<instance of (A,C)> False
<instance of (B,C)> True
Traceback (most recent call last):
File "varying_delegation.py", line 33, in <module>
print pc1.from_C()
File "varying_delegation.py", line 21, in __getattr__
raise AttributeError(attr + "? what the heck is that?")
AttributeError: from_C? what the heck is that?
EDIT:
Here's how to take the not-in-your-control classes A and B, and create custom C classes that look like they extend either an A or a B:
# Django admin classes
class A(object):
def from_B(self):
return False
class B(A):
def from_B(self):
return True
# Your own class, which might get created with an A or B instance
class C(object):
def __init__(self, obj):
self.obj = obj
def __getattr__(self, attr):
return getattr(self.obj, attr)
# these are instantiated some way, not in your control
a,b = A(), B()
# now create different C's
c1 = C(a)
c2 = C(b)
print c1.from_B()
print c2.from_B()
prints:
False
True
And to create your subclasses D and E, create an interim subclass of C (I called it SubC cause I lack imagination), which will auto-init the C superclass with a specific global variable, either a or b.
# a subclass of C for subclasses pre-wired to delegate to a specific
# global object
class SubC(C):
c_init_obj = None
def __init__(self):
super(SubC,self).__init__(self.c_init_obj)
class D(SubC): pass
class E(SubC): pass
# assign globals to C subclasses so they build with the correct contained
# global object
D.c_init_obj = a
E.c_init_obj = b
d = D()
e = E()
print d.from_B()
print e.from_B()
Again, prints:
False
True

Categories