Understanding Python's `property` and `__init__` - python

Consider the following class:
class HotDog():
def __init__(self):
self.v_th = 10
def _new_property(obj_hierarchy, attr_name):
def set(self, value):
obj = reduce(getattr, [self] + obj_hierarchy.split('.'))
setattr(obj, attr_name, value)
def get(self):
obj = reduce(getattr, [self] + obj_hierarchy.split('.'))
return getattr(obj, attr_name)
return property(fset=set, fget=get)
x.vthresh = 77
v_th = _new_property('x', 'vthresh')
If I were to create an instance of this class -- say, x = HotDog() -- I would find that x.v_th == 10. Why is this the case? It seems to me that the value should be set to 10 initially, but then overwritten when self.v_th is rededfined to be _new_property('x', 'vthresh'). Is the code in __init__ executed after this other code when x is initialized?

All code at class scope is executed when the class is created. The __init__() method is called when the object is created. Therefore all class scope code is run before the __init__() method is.

Related

how to access outer class properties inside the inner classes?

class Remote:
aa=7
def __init__(self):
self.name="Lenovo"
self.b=self.Battery()
print("this is outer",self.b.t)
class Battery:
def __init__(self):
self.name="Hp"
self.t="df"
self.c=self.Cover()
class Cover:
def __init__(self):
self.name="Arplastic"
c1=Remote()
I knew today about inner class but i don't know how to i access properties and methods of outer class into inner class please let me know anyone.
Change the constructor(s) of the inner class(es) to accept a parent argument and have the creating instance pass itself to it:
class Remote:
aa=7
def __init__(self):
self.name="Lenovo"
self.b=self.Battery(self)
print("this is outer",self.b.t)
class Battery:
def __init__(self,parent):
self.name="Hp"
self.t="df"
self.c=self.Cover(self)
self.parent=parent
class Cover:
def __init__(self,parent):
self.name="Arplastic"
self.parent=parent
c1=Remote()
print(c1.b.c.parent.parent.name) # prints 'Lenovo'
One approach is to make a metaclass that automatically creates self.parent attributes for nested classes. Note that there is a trade-off between readability and boilerplate here - many programmers would rather you just manually pass parents as arguments and add them to __init__ methods. This is more fun though, and there is something to be said for having less cluttered code.
Here is the code:
import inspect
def inner_class(cls):
cls.__is_inner_class__ = True
return cls
class NestedClass(type):
def __new__(metacls, name, bases, attrs, parent=None):
attrs = dict(attrs.items())
super_getattribute = attrs.get('__getattribute__', object.__getattribute__)
inner_class_cache = {}
def __getattribute__(self, attr):
val = super_getattribute(self, attr)
if inspect.isclass(val) and getattr(val, '__is_inner_class__', False):
if (self, val) not in inner_class_cache:
inner_class_cache[self, val] = NestedClass(val.__name__, val.__bases__, val.__dict__, parent=self)
return inner_class_cache[self, val]
else:
return val
attrs['__getattribute__'] = __getattribute__
attrs['parent'] = parent
return type(name, bases, attrs)
class Remote(metaclass=NestedClass):
aa = 7
def __init__(self):
self.name = "Lenovo"
self.b = self.Battery()
print("this is outer", self.b.t)
#inner_class
class Battery:
def __init__(self):
self.name = "Hp"
self.t = "df"
self.c = self.Cover()
#inner_class
class Cover:
def __init__(self):
self.name = "Arplastic"
print(f'{self.parent=}, {self.parent.parent=}')
c1 = Remote()
print(f'{c1.b.c.parent.parent is c1=}')
print(f'{isinstance(c1.b, c1.Battery)=}')
Output:
self.parent=<__main__.Battery object at 0x7f11e74936a0>, self.parent.parent=<__main__.Remote object at 0x7f11e7493730>
this is outer df
c1.b.c.parent.parent is c1=True
isinstance(c1.b, c1.Battery)=True
The way this works is by storing the parent as a class attribute (which is None by default), and replacing the __getattribute__ method so that all inner classes are replaced with NestedClasses with the parent attribute correctly filled in.
The inner_class decorator is used to mark a class as an inner class by setting the __is_inner_class__ attribute.
def inner_class(cls):
cls.__is_inner_class__ = True
return cls
This is not strictly necessary if all attributes that are classes should be treated as inner classes, but it's good practice to do something like this to prevent Bar.foo being treated as an inner class in this example:
class Foo:
pass
class Bar(metaclass=NestedClass):
foo = Foo
All the NestedClass metaclass does is take the description of the class and modify it, adding the parent attribute:
class NestedClass(type):
def __new__(metacls, name, bases, attrs, parent=None):
attrs = dict(attrs.items())
...
attrs['parent'] = parent
return type(name, bases, attrs)
...and modifying the __getattribute__ method. The __getattribute__ method is a special method that gets called every time an attribute is accessed. For example:
class Foo:
def __init__(self):
self.bar = "baz"
def __getattribute__(self, item):
return 1
foo = Foo()
# these assert statements pass because even though `foo.bar` is set to "baz" and `foo.remote` doesn't exist, accessing either of them is the same as calling `Foo.__getattribute(foo, ...)`
assert foo.bar == 1
assert foo.remote == 1
So, by modifying the __getattribute__ method, you can make accessing self.Battery return a class that has its parent attribute equal to self, and also make it into a nested class:
class NestedClass(type):
def __new__(metacls, name, bases, attrs, parent=None):
attrs = dict(attrs.items())
# get the previous __getattribute__ in case it was not the default one
super_getattribute = attrs.get('__getattribute__', object.__getattribute__)
inner_class_cache = {}
def __getattribute__(self, attr):
# get the attribute
val = super_getattribute(self, attr)
if inspect.isclass(val) and getattr(val, '__is_inner_class__', False):
# if it is an inner class, then make a new version of it using the NestedClass metaclass, setting the parent attribute
if (self, val) not in inner_class_cache:
inner_class_cache[self, val] = NestedClass(val.__name__, val.__bases__, val.__dict__, parent=self)
return inner_class_cache[self, val]
else:
return val
attrs['__getattribute__'] = __getattribute__
attrs['parent'] = parent
return type(name, bases, attrs)
Note that a cache is used to ensure that self.Battery will always return the same object every time rather than re-making the class every time it is called. This ensures that checks like isinstance(c1.b, c1.Battery) work correctly, since otherwise c1.Battery would return a different object to the one used to create c1.b, causing this to return False, when it should return True.
And that's it! You can now enjoy nested classes without boilerplate!

Python class constructor (static)

Does Python have a mechanism for class constructors, i.e. a function that is called whenever the class is first referenced (as opposed to when an instance of that object is created)? I know this exists in some other languages, but I haven't come across it in Python.
Basically, I would like to initialise some static attributes in that function. I put an example below of what I would expect. Of course, the example returns None, but I would like it return 'foo'.
class T:
arg = None
def __class_constructor__():
T.arg = 'foo'
print(T.arg) # returns None
To avoid confusion: I am well aware of the object constructor, but that's not what I want, because it is only called once the first object is created, not before:
class T:
arg = None
def __init__(self):
type(self).arg = 'foo'
print(T.arg) # returns None
obj = T()
print(T.arg) # returns 'foo'
You can use a class decorator:
def add_arg(cls):
if not hasattr(cls, "arg"):
cls.arg = 'foo'
return cls
#add_arg
class T(object):
pass
Or a custom metaclass:
class WithArg(type):
def __new__(meta, name, bases, attrs):
cls = type.__new__(meta, name, bases, attrs)
if not hasattr(cls, "arg"):
cls.arg = "foo"
return cls
# python 2
class T(object):
__metaclass__ = WithArg
# python 3
class T(metaclass=WithArg):
pass
But as others already mention this won't give you much more than plainly setting the class attribute in the class statement.
NB : if you want a computed attribute on the class itself, you'll have to either set it as a property on a custom metaclass
class WithProp(type):
#property
def arg(cls):
return "foo"
class T(object):
__metaclass__ = WithProp
T.arg
=> 'foo'
But arg will only be available on the class object itself, not on it's instances:
T().arg
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'T' object has no attribute 'arg'
or write your own custom descriptor:
class ArgDescriptor(object):
def __get__(self, obj, cls=None):
return 42
class T(object):
arg = ArgDescriptor()
T.arg
=> 42
T().arg
=> 42
You simply have to initialise the class variable when declaring it within the class
class T:
arg = 'foo' #this initialises the class instance
def __init__(self):
self.arg = 'bar' #this initialises the object instance
print(T.arg) # class instance returns 'foo'
obj = T()
print(T.arg) # class instance still returns 'foo'
print(obj.arg) # object instance returns 'bar'
I create a static_init decorator which calls a static_init class method if it exists. This static_init class method will run when the class decorator is evaluated which is when the class is defined - so not quite when the class is first referenced - but it is analogous to static initialization in other languages like Java.
Here is the decorator and example of how to use it to initialize a class variable on an enum class:
# pylint: disable=missing-docstring,no-member
import enum
def static_init(cls):
if getattr(cls, "static_init", None):
cls.static_init()
return cls
#static_init
class SomeEnum(enum.Enum):
VAL_A = enum.auto()
VAL_B = enum.auto()
VAL_C = enum.auto()
VAL_D = enum.auto()
#classmethod
def static_init(cls):
text_dict = {}
setattr(cls, 'text_dict', text_dict)
for value in cls:
text_dict[value.name.lower().replace("_", " ").title()] = value
def test_static_init():
assert SomeEnum.text_dict["Val A"] == SomeEnum.VAL_A
assert SomeEnum.text_dict["Val B"] == SomeEnum.VAL_B
assert SomeEnum.text_dict["Val C"] == SomeEnum.VAL_C
assert SomeEnum.text_dict["Val D"] == SomeEnum.VAL_D

Setting a functools.partial as an instance method in Python

I'm using functools.partial to create a closure, and using setattr to make is callable from a class instance. The idea here is to create a set of methods at runtime.
#!/usr/bin/python
from functools import partial
class MyClass(object):
def __init__(self, val):
self.val = val
#classmethod
def generateMethods(self):
def dummy(conf1, self):
print "conf1:", conf1
print "self.val:", self.val
print
for s in ('dynamic_1', 'dynamic_2'):
closed = partial(dummy, s)
setattr(self, "test_{0}".format(s), closed)
It seems to me that partial would bind the current value of s to dummy's first arg, which would free up self to be passed when this is called from an instance.
It's not working how I'd expect
if __name__ == '__main__':
# Dynamically create some methods
MyClass.generateMethods()
# Create an instance
x = MyClass('FOO')
# The dynamically created methods aren't callable from the instance :(
#x.test_dynamic_1()
# TypeError: dummy() takes exactly 2 arguments (1 given)
# .. but these work just fine
MyClass.test_dynamic_1(x)
MyClass.test_dynamic_2(x)
Is it possible to dynamically create methods which are closures, but callable from instances of the class?
I think the new functools.partialmethod is for this exact use case.
Straight from the docs:
>>> class Cell(object):
... def __init__(self):
... self._alive = False
... #property
... def alive(self):
... return self._alive
... def set_state(self, state):
... self._alive = bool(state)
... set_alive = partialmethod(set_state, True)
... set_dead = partialmethod(set_state, False)
...
>>> c = Cell()
>>> c.alive
False
>>> c.set_alive()
>>> c.alive
True
The issue is that when you're calling them using the instances they are actually not bound methods, i.e they have no knowledge about the instance. Bound methods insert the self to the arguments of the underlying function automatically when called, it is stored in the __self__ attribute of bound method.
So, override __getattribute__ and see if the object being fetched is an instance of partial type or not, if yes, convert it to a bound method using types.MethodType.
Code:
#!/usr/bin/python
from functools import partial
import types
class MyClass(object):
def __init__(self, val):
self.val = val
#classmethod
def generateMethods(self):
def dummy(conf1, self):
print "conf1:", conf1
print "self.val:", self.val
print
for s in ('dynamic_1', 'dynamic_2'):
closed = partial(dummy, s)
setattr(self, "test_{0}".format(s), closed)
def __getattribute__(self, attr):
# Here we do have access to the much need instance(self)
obj = object.__getattribute__(self, attr)
if isinstance(obj, partial):
return types.MethodType(obj, self, type(self))
else:
return obj
if __name__ == '__main__':
MyClass.generateMethods()
x = MyClass('FOO')
x.test_dynamic_1()
x.test_dynamic_2()

dynamically adding callable to class as instance "method"

I implemented a metaclass that tears down the class attributes for classes created with it and builds methods from the data from those arguments, then attaches those dynamically created methods directly to the class object (the class in question allows for easy definition of web form objects for use in a web testing framework). It has been working just fine, but now I have a need to add a more complex type of method, which, to try to keep things clean, I implemented as a callable class. Unfortunately, when I try to call the callable class on an instance, it is treated as a class attribute instead of an instance method, and when called, only receives its own self. I can see why this happens, but I was hoping someone might have a better solution than the ones I've come up with. Simplified illustration of the problem:
class Foo(object):
def __init__(self, name, val):
self.name = name
self.val = val
self.__name__ = name + '_foo'
self.name = name
# This doesn't work as I'd wish
def __call__(self, instance):
return self.name + str(self.val + instance.val)
def get_methods(name, foo_val):
foo = Foo(name, foo_val)
def bar(self):
return name + str(self.val + 2)
bar.__name__ = name + '_bar'
return foo, bar
class Baz(object):
def __init__(self, val):
self.val = val
for method in get_methods('biff', 1):
setattr(Baz, method.__name__, method)
baz = Baz(10)
# baz.val == 10
# baz.biff_foo() == 'biff11'
# baz.biff_bar() == 'biff12'
I've thought of:
Using a descriptor, but that seems way more complex than is necessary here
Using a closure inside of a factory for foo, but nested closures are ugly and messy replacements for objects most of the time, imo
Wrapping the Foo instance in a method that passes its self down to the Foo instance as instance, basically a decorator, that is what I actually add to Baz, but that seems superfluous and basically just a more complicated way of doing the same thing as (2)
Is there a better way then any of these to try to accomplish what I want, or should I just bite the bullet and use some closure factory type pattern?
One way to do this is to attach the callable objects to the class as unbound methods. The method constructor will work with arbitrary callables (i.e. instances of classes with a __call__() method)—not just functions.
from types import MethodType
class Foo(object):
def __init__(self, name, val):
self.name = name
self.val = val
self.__name__ = name + '_foo'
self.name = name
def __call__(self, instance):
return self.name + str(self.val + instance.val)
class Baz(object):
def __init__(self, val):
self.val = val
Baz.biff = MethodType(Foo("biff", 42), None, Baz)
b = Baz(13)
print b.biff()
>>> biff55
In Python 3, there's no such thing as an unbound instance method (classes just have regular functions attached) so you might instead make your Foo class a descriptor that returns a bound instance method by giving it a __get__() method. (Actually, that approach will work in Python 2.x as well, but the above will perform a little better.)
from types import MethodType
class Foo(object):
def __init__(self, name, val):
self.name = name
self.val = val
self.__name__ = name + '_foo'
self.name = name
def __call__(self, instance):
return self.name + str(self.val + instance.val)
def __get__(self, instance, owner):
return MethodType(self, instance) if instance else self
# Python 2: MethodType(self, instance, owner)
class Baz(object):
def __init__(self, val):
self.val = val
Baz.biff = Foo("biff", 42)
b = Baz(13)
print b.biff()
>>> biff55
The trouble you're running into is that your object is not being bound as a method of the Baz class you're putting it in. This is because it is not a descriptor, which regular functions are!
You can fix this by adding a simple __get__ method to your Foo class that makes it into a method when it's accessed as a descriptor:
import types
class Foo(object):
# your other stuff here
def __get__(self, obj, objtype=None):
if obj is None:
return self # unbound
else:
return types.MethodType(self, obj) # bound to obj

How to make a class property? [duplicate]

This question already has answers here:
Using property() on classmethods
(19 answers)
Closed 3 years ago.
In python I can add a method to a class with the #classmethod decorator. Is there a similar decorator to add a property to a class? I can better show what I'm talking about.
class Example(object):
the_I = 10
def __init__( self ):
self.an_i = 20
#property
def i( self ):
return self.an_i
def inc_i( self ):
self.an_i += 1
# is this even possible?
#classproperty
def I( cls ):
return cls.the_I
#classmethod
def inc_I( cls ):
cls.the_I += 1
e = Example()
assert e.i == 20
e.inc_i()
assert e.i == 21
assert Example.I == 10
Example.inc_I()
assert Example.I == 11
Is the syntax I've used above possible or would it require something more?
The reason I want class properties is so I can lazy load class attributes, which seems reasonable enough.
Here's how I would do this:
class ClassPropertyDescriptor(object):
def __init__(self, fget, fset=None):
self.fget = fget
self.fset = fset
def __get__(self, obj, klass=None):
if klass is None:
klass = type(obj)
return self.fget.__get__(obj, klass)()
def __set__(self, obj, value):
if not self.fset:
raise AttributeError("can't set attribute")
type_ = type(obj)
return self.fset.__get__(obj, type_)(value)
def setter(self, func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
self.fset = func
return self
def classproperty(func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
class Bar(object):
_bar = 1
#classproperty
def bar(cls):
return cls._bar
#bar.setter
def bar(cls, value):
cls._bar = value
# test instance instantiation
foo = Bar()
assert foo.bar == 1
baz = Bar()
assert baz.bar == 1
# test static variable
baz.bar = 5
assert foo.bar == 5
# test setting variable on the class
Bar.bar = 50
assert baz.bar == 50
assert foo.bar == 50
The setter didn't work at the time we call Bar.bar, because we are calling
TypeOfBar.bar.__set__, which is not Bar.bar.__set__.
Adding a metaclass definition solves this:
class ClassPropertyMetaClass(type):
def __setattr__(self, key, value):
if key in self.__dict__:
obj = self.__dict__.get(key)
if obj and type(obj) is ClassPropertyDescriptor:
return obj.__set__(self, value)
return super(ClassPropertyMetaClass, self).__setattr__(key, value)
# and update class define:
# class Bar(object):
# __metaclass__ = ClassPropertyMetaClass
# _bar = 1
# and update ClassPropertyDescriptor.__set__
# def __set__(self, obj, value):
# if not self.fset:
# raise AttributeError("can't set attribute")
# if inspect.isclass(obj):
# type_ = obj
# obj = None
# else:
# type_ = type(obj)
# return self.fset.__get__(obj, type_)(value)
Now all will be fine.
If you define classproperty as follows, then your example works exactly as you requested.
class classproperty(object):
def __init__(self, f):
self.f = f
def __get__(self, obj, owner):
return self.f(owner)
The caveat is that you can't use this for writable properties. While e.I = 20 will raise an AttributeError, Example.I = 20 will overwrite the property object itself.
[answer written based on python 3.4; the metaclass syntax differs in 2 but I think the technique will still work]
You can do this with a metaclass...mostly. Dappawit's almost works, but I think it has a flaw:
class MetaFoo(type):
#property
def thingy(cls):
return cls._thingy
class Foo(object, metaclass=MetaFoo):
_thingy = 23
This gets you a classproperty on Foo, but there's a problem...
print("Foo.thingy is {}".format(Foo.thingy))
# Foo.thingy is 23
# Yay, the classmethod-property is working as intended!
foo = Foo()
if hasattr(foo, "thingy"):
print("Foo().thingy is {}".format(foo.thingy))
else:
print("Foo instance has no attribute 'thingy'")
# Foo instance has no attribute 'thingy'
# Wha....?
What the hell is going on here? Why can't I reach the class property from an instance?
I was beating my head on this for quite a while before finding what I believe is the answer. Python #properties are a subset of descriptors, and, from the descriptor documentation (emphasis mine):
The default behavior for attribute access is to get, set, or delete the
attribute from an object’s dictionary. For instance, a.x has a lookup chain
starting with a.__dict__['x'], then type(a).__dict__['x'], and continuing
through the base classes of type(a) excluding metaclasses.
So the method resolution order doesn't include our class properties (or anything else defined in the metaclass). It is possible to make a subclass of the built-in property decorator that behaves differently, but (citation needed) I've gotten the impression googling that the developers had a good reason (which I do not understand) for doing it that way.
That doesn't mean we're out of luck; we can access the properties on the class itself just fine...and we can get the class from type(self) within the instance, which we can use to make #property dispatchers:
class Foo(object, metaclass=MetaFoo):
_thingy = 23
#property
def thingy(self):
return type(self).thingy
Now Foo().thingy works as intended for both the class and the instances! It will also continue to do the right thing if a derived class replaces its underlying _thingy (which is the use case that got me on this hunt originally).
This isn't 100% satisfying to me -- having to do setup in both the metaclass and object class feels like it violates the DRY principle. But the latter is just a one-line dispatcher; I'm mostly okay with it existing, and you could probably compact it down to a lambda or something if you really wanted.
If you use Django, it has a built in #classproperty decorator.
from django.utils.decorators import classproperty
For Django 4, use:
from django.utils.functional import classproperty
I think you may be able to do this with the metaclass. Since the metaclass can be like a class for the class (if that makes sense). I know you can assign a __call__() method to the metaclass to override calling the class, MyClass(). I wonder if using the property decorator on the metaclass operates similarly.
Wow, it works:
class MetaClass(type):
def getfoo(self):
return self._foo
foo = property(getfoo)
#property
def bar(self):
return self._bar
class MyClass(object):
__metaclass__ = MetaClass
_foo = 'abc'
_bar = 'def'
print MyClass.foo
print MyClass.bar
Note: This is in Python 2.7. Python 3+ uses a different technique to declare a metaclass. Use: class MyClass(metaclass=MetaClass):, remove __metaclass__, and the rest is the same.
As far as I can tell, there is no way to write a setter for a class property without creating a new metaclass.
I have found that the following method works. Define a metaclass with all of the class properties and setters you want. IE, I wanted a class with a title property with a setter. Here's what I wrote:
class TitleMeta(type):
#property
def title(self):
return getattr(self, '_title', 'Default Title')
#title.setter
def title(self, title):
self._title = title
# Do whatever else you want when the title is set...
Now make the actual class you want as normal, except have it use the metaclass you created above.
# Python 2 style:
class ClassWithTitle(object):
__metaclass__ = TitleMeta
# The rest of your class definition...
# Python 3 style:
class ClassWithTitle(object, metaclass = TitleMeta):
# Your class definition...
It's a bit weird to define this metaclass as we did above if we'll only ever use it on the single class. In that case, if you're using the Python 2 style, you can actually define the metaclass inside the class body. That way it's not defined in the module scope.
def _create_type(meta, name, attrs):
type_name = f'{name}Type'
type_attrs = {}
for k, v in attrs.items():
if type(v) is _ClassPropertyDescriptor:
type_attrs[k] = v
return type(type_name, (meta,), type_attrs)
class ClassPropertyType(type):
def __new__(meta, name, bases, attrs):
Type = _create_type(meta, name, attrs)
cls = super().__new__(meta, name, bases, attrs)
cls.__class__ = Type
return cls
class _ClassPropertyDescriptor(object):
def __init__(self, fget, fset=None):
self.fget = fget
self.fset = fset
def __get__(self, obj, owner):
if self in obj.__dict__.values():
return self.fget(obj)
return self.fget(owner)
def __set__(self, obj, value):
if not self.fset:
raise AttributeError("can't set attribute")
return self.fset(obj, value)
def setter(self, func):
self.fset = func
return self
def classproperty(func):
return _ClassPropertyDescriptor(func)
class Bar(metaclass=ClassPropertyType):
__bar = 1
#classproperty
def bar(cls):
return cls.__bar
#bar.setter
def bar(cls, value):
cls.__bar = value
bar = Bar()
assert Bar.bar==1
Bar.bar=2
assert bar.bar==2
nbar = Bar()
assert nbar.bar==2
I happened to come up with a solution very similar to #Andrew, only DRY
class MetaFoo(type):
def __new__(mc1, name, bases, nmspc):
nmspc.update({'thingy': MetaFoo.thingy})
return super(MetaFoo, mc1).__new__(mc1, name, bases, nmspc)
#property
def thingy(cls):
if not inspect.isclass(cls):
cls = type(cls)
return cls._thingy
#thingy.setter
def thingy(cls, value):
if not inspect.isclass(cls):
cls = type(cls)
cls._thingy = value
class Foo(metaclass=MetaFoo):
_thingy = 23
class Bar(Foo)
_thingy = 12
This has the best of all answers:
The "metaproperty" is added to the class, so that it will still be a property of the instance
Don't need to redefine thingy in any of the classes
The property works as a "class property" in for both instance and class
You have the flexibility to customize how _thingy is inherited
In my case, I actually customized _thingy to be different for every child, without defining it in each class (and without a default value) by:
def __new__(mc1, name, bases, nmspc):
nmspc.update({'thingy': MetaFoo.services, '_thingy': None})
return super(MetaFoo, mc1).__new__(mc1, name, bases, nmspc)
If you only need lazy loading, then you could just have a class initialisation method.
EXAMPLE_SET = False
class Example(object):
#classmethod
def initclass(cls):
global EXAMPLE_SET
if EXAMPLE_SET: return
cls.the_I = 'ok'
EXAMPLE_SET = True
def __init__( self ):
Example.initclass()
self.an_i = 20
try:
print Example.the_I
except AttributeError:
print 'ok class not "loaded"'
foo = Example()
print foo.the_I
print Example.the_I
But the metaclass approach seems cleaner, and with more predictable behavior.
Perhaps what you're looking for is the Singleton design pattern. There's a nice SO QA about implementing shared state in Python.

Categories