python getattr and setattr with self.__dict__ - python

I have been reading about overriding getattr and setattr and I can't figure out if I need the overriding assignments if I use self.__dict__ = self in the constructor.
Once I make an instance of the class
a = OPT(foo='bar')
a.foo and a['foo'] work with and without the __getattr__ and __setattr__ declaration.
Can someone explain if I need both. If I do, why? Thanks!
class OPT(dict):
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
def __init__(self, *args, **kwargs):
super(OPT, self).__init__(*args, **kwargs)
self.__dict__ = self

You override getattr and setattr when you want to do something extra when the class user gets or sets an attribute. For example:
1) you might avoid raising an exception when user manipulates an invalid attribute, so you just return None for an unknown attribute.
2) attribute manipulations are actually forwarded/delegated, so the valid attributes are not known in advance e.g. a class that represents a database row and the user manipulates columns as attributes. I need to run-time check if the given attribute name matches column name, and perhaps I'd like to forgive upper-case/lower-case differences etc.
Another thing, containment is sometimes preferred to subclassing. Instead of inheriting from a dict, you could create a class that contains a dict.

Try this:
class DotDict(dict):
def __init__(self, d: dict = {}):
super().__init__()
for key, value in d.items():
self[key] = DotDict(value) if type(value) is dict else value
def __getattr__(self, key):
if key in self:
return self[key]
raise AttributeError(key) #Set proper exception, not KeyError
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__

Here's a more elaborate fork of the Marquinho Peli's answer. Changes: support for dictionaries with nested lists;
class DotDict(dict):
"""A dictionary that's recursively navigable with dots, not brackets."""
def __init__(self, data: dict = None):
super().__init__()
if data is None or not isinstance(data, dict):
raise AttributeError(f"{type(self).__name__} must be instantiated with a dictionary, not a {type(data).__name__}.")
for key, value in data.items():
if isinstance(value, list):
self[key] = [DotDict(item) for item in value]
elif isinstance(value, dict):
self[key] = DotDict(value)
else:
self[key] = value
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(f"attribute .{key} not found")
Usage examples:
d = {'key1': 'value1',
'key2': 'value2',
'key3': {'key3a': 'value3a'},
'key4': {'key4a': [{'key4aa': 'value4aa',
'key4ab': 'value4ab',
'key4ac': 'value4ac'}],
'key4b': 'value4b'}}
dd = DotDict(d)
print(dd.key4.key4a[0].key4aa) # value4aa
dd.key4.key4a[0].key4aa = 'newval'
print(dd.key4.key4a[0].key4aa) # newval
print(dd.key4.key4a[0].key4aaa) # AttributeError: attribute .key4aaa not found
DotDict({}) # OK
DotDict() # AttributeError: DotDict must be instantiated with dictionary, not a NoneType.

Related

Set and Get attributes in Bunch type object

For simplicity to parse/create JSON, machine learning applications usually uses the Bunch object, e.g. https://github.com/dsc/bunch/blob/master/bunch/__init__.py
When getting, there's a nested EAFP idiom that checks through the dict.get() function and then trying to access it with dictionary square bracket syntax, i.e.
class Bunch(dict):
def __getattr___(self, k):
try:
return object.__getattribute__(self, k)
except AttributeError:
try:
return self[k]
except KeyError:
raise AttributeError
And when trying to set an attribute,
def __setattr__(self, k, v):
try:
# Throws exception if not in prototype chain
object.__getattribute__(self, k)
except AttributeError:
try:
self[k] = v
except:
raise AttributeError(k)
else:
object.__setattr__(self, k, v)
Seems like the sklearn implementation follows the same train of thought but has lesser checks https://github.com/scikit-learn/scikit-learn/blob/2beed5584/sklearn/utils/__init__.py#L61
class Bunch(dict):
def __init__(self, **kwargs):
super().__init__(kwargs)
def __setattr__(self, key, value):
self[key] = value
def __dir__(self):
return self.keys()
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setstate__(self, state):
# Bunch pickles generated with scikit-learn 0.16.* have an non
# empty __dict__. This causes a surprising behaviour when
# loading these pickles scikit-learn 0.17: reading bunch.key
# uses __dict__ but assigning to bunch.key use __setattr__ and
# only changes bunch['key']. More details can be found at:
# https://github.com/scikit-learn/scikit-learn/issues/6196.
# Overriding __setstate__ to be a noop has the effect of
# ignoring the pickled __dict__
pass
The nested EAFP seems a little hard to maintain, my questions here are:
Is there a simpler way to handle get and set functions for Bunch data objects?
Are there any other Dict like object that allows mutability between attributes and keys?
How should Bunch object's .update() function work, shallow or deep copying? Or just let the default dict.update() do what it does? Understanding dict.copy() - shallow or deep?
Lucky for you, all objects have an internal dict-like object that manages the attributes of the object (this is in the __dict__ attribute). To do what you're asking, you just need to make the class use itself as the __dict__ object:
class Bunch(dict):
def __init__(self, *args, **kwargs):
self.__dict__ = self
super().__init__(*args, **kwargs)
Usage:
>>> b = Bunch()
>>> b.foo = 3
>>> b["foo"]
3
>>> b["foo"] = 5
>>> b.foo
5
>>> b["bar"] = 1
>>> b.bar
1

Python object.__getattribute__ not behaving correctly after override

I'm trying to replicate mongoengine functionality that lets you define field objects that can be used like normal python objects in the code.
My idea is to create a FieldHolder class that contains the value and (de)serialization logic, and a Document object with overridden __setattr__ and __getattribute__ methods.
In my code draft, if I set x.h to some value, this value gets correctly assigned to x.h._value. When I get x.h, I correctly get x.h._value.
However, I would also like get h as a FieldHolder object and not as its value. I have tried using object.__getattribute__ (inside serialize method), but I'm still getting h._value (object.__getattribute__(self, 'h') returns abc). What am I doing wrong? Thanks
class FieldHolder:
_value = None
# Some serialization and deserialization methods
class Document(object):
h = FieldHolder()
def __setattr__(self, key, value):
attr = getattr(self, key, None)
if attr is not None and isinstance(attr, FieldHolder):
attr._value = value
else:
super().__setattr__(key, value)
def __getattribute__(self, key):
val = super().__getattribute__(key)
if isinstance(val, FieldHolder):
return val._value
else:
return val
def serialize(self):
res = {}
for name, value in vars(self).items():
obj = object.__getattribute__(self, name) # not working as expected
if isinstance(obj, FieldHolder):
res[name] = value
return res
x = Document()
x.h = "abc" # h._value is now "abc"
print(x.h) # prints "abc"
s = x.serialize() # should return {'h': 'abc'} but returns {}
print(s)

Python inheritance: Raising an error when non-overridden methods are called

I am writing a subclass of dict that maps from string keys to values of arbitrary types. If a key is a regex, it is stored and queried separately.
class RegexDict(dict):
def __init__(self):
super().__init__() # non-regex keys in the parent class
self.regex_dict = {} # regex keys in the child class
def __getitem__(self, key):
try:
return super().__getitem__(key)
except KeyError:
for x in self.regex_dict:
if re.fullmatch(x, key):
return self.regex_dict[x]
raise KeyError(key)
def __setitem__(self, key, value):
key, is_regex = key
if is_regex:
self.regex_dict[key] = value
else:
super().__setitem__(key, value)
Because this class will be used by other libraries (which is why I have to use inheritance), I want to make sure that an error is raised when non-overridden methods in the base class are called. How should I do this?
You should inherit from collections.abc.MutableMapping instead of dict.
It will fill in the gaps automatically, and also let you know which things you have to implement.
In addition to __getitem__ and __setitem__, you’ll also have to implement __delitem__, __iter__ and __len_. If you can’t implement those reasonably, you can raise e.g. NotImplementedError from them (even though that will limit the use of your class a lot).
This will have the advantage that all dict methods which only need __getitem__ and __setitem__ (+ what you implement) internally will work out of the box.
Don't subclass. To convince third-party libs that your object is a dict, set __class__ attribute.
class RegexDict():
__class__ = dict
def __init__(self):
self.non_regex_dict = {}
self.regex_dict = {}
def __getitem__(self, key):
try:
return self.non_regex_dict[key]
except KeyError:
for x in self.regex_dict:
if re.fullmatch(x, key):
return self.regex_dict[x]
raise KeyError(key)
def __setitem__(self, key, value):
key, is_regex = key
if is_regex:
self.regex_dict[key] = value
else:
self.non_regex_dict[key] = value
rd = RegexDict()
print(isinstance(rd, dict))
rd.clear()
output:
True
Traceback (most recent call last):
File "libo.py", line 30, in <module>
rd.clear()
AttributeError: 'RegexDict' object has no attribute 'clear'

Immutable dictionary, only use as a key for another dictionary

I had the need to implement a hashable dict so I could use a dictionary as a key for another dictionary.
A few months ago I used this implementation: Python hashable dicts
However I got a notice from a colleague saying 'it is not really immutable, thus it is not safe. You can use it, but it does make me feel like a sad Panda'.
So I started looking around to create one that is immutable. I have no need to compare the 'key-dict' to another 'key-dict'. Its only use is as a key for another dictionary.
I have come up with the following:
class HashableDict(dict):
"""Hashable dict that can be used as a key in other dictionaries"""
def __new__(self, *args, **kwargs):
# create a new local dict, that will be used by the HashableDictBase closure class
immutableDict = dict(*args, **kwargs)
class HashableDictBase(object):
"""Hashable dict that can be used as a key in other dictionaries. This is now immutable"""
def __key(self):
"""Return a tuple of the current keys"""
return tuple((k, immutableDict[k]) for k in sorted(immutableDict))
def __hash__(self):
"""Return a hash of __key"""
return hash(self.__key())
def __eq__(self, other):
"""Compare two __keys"""
return self.__key() == other.__key() # pylint: disable-msg=W0212
def __repr__(self):
"""#see: dict.__repr__"""
return immutableDict.__repr__()
def __str__(self):
"""#see: dict.__str__"""
return immutableDict.__str__()
def __setattr__(self, *args):
raise TypeError("can't modify immutable instance")
__delattr__ = __setattr__
return HashableDictBase()
I used the following to test the functionality:
d = {"a" : 1}
a = HashableDict(d)
b = HashableDict({"b" : 2})
print a
d["b"] = 2
print a
c = HashableDict({"a" : 1})
test = {a : "value with a dict as key (key a)",
b : "value with a dict as key (key b)"}
print test[a]
print test[b]
print test[c]
which gives:
{'a': 1}
{'a': 1}
value with a dict as key (key a)
value with a dict as key (key b)
value with a dict as key (key a)
as output
Is this the 'best possible' immutable dictionary that I can use that satisfies my requirements? If not, what would be a better solution?
If you are only using it as a key for another dict, you could go for frozenset(mutabledict.items()). If you need to access the underlying mappings, you could then use that as the parameter to dict.
mutabledict = dict(zip('abc', range(3)))
immutable = frozenset(mutabledict.items())
read_frozen = dict(immutable)
read_frozen['a'] # => 1
Note that you could also combine this with a class derived from dict, and use the frozenset as the source of the hash, while disabling __setitem__, as suggested in another answer. (#RaymondHettinger's answer for code which does just that).
The Mapping abstract base class makes this easy to implement:
import collections
class ImmutableDict(collections.Mapping):
def __init__(self, somedict):
self._dict = dict(somedict) # make a copy
self._hash = None
def __getitem__(self, key):
return self._dict[key]
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict)
def __hash__(self):
if self._hash is None:
self._hash = hash(frozenset(self._dict.items()))
return self._hash
def __eq__(self, other):
return self._dict == other._dict
I realize this has already been answered, but types.MappingProxyType is an analogous implementation for Python 3.3. Regarding the original question of safety, there is a discussion in PEP 416 -- Add a frozendict builtin type on why the idea of a frozendict was rejected.
In order for your immutable dictionary to be safe, all it needs to do is never change its hash. Why don't you just disable __setitem__ as follows:
class ImmutableDict(dict):
def __setitem__(self, key, value):
raise Exception("Can't touch this")
def __hash__(self):
return hash(tuple(sorted(self.items())))
a = ImmutableDict({'a':1})
b = {a:1}
print b
print b[a]
a['a'] = 0
The output of the script is:
{{'a': 1}: 1}
1
Traceback (most recent call last):
File "ex.py", line 11, in <module>
a['a'] = 0
File "ex.py", line 3, in __setitem__
raise Exception("Can't touch this")
Exception: Can't touch this
Here is a link to pip install-able implementation of #RaymondHettinger's answer: https://github.com/pcattori/icicle
Simply pip install icicle and you can from icicle import FrozenDict!
Update: icicle has been deprecated in favor of maps: https://github.com/pcattori/maps (documentation, PyPI).
It appears I am late to post. Not sure if anyone else has come up with ideas. But here is my take on it. The Dict is immutable and hashable. I made it immutable by overriding all the methods, magic and otherwise, with a custom '_readonly' function that raises an Exception. This is done when the object is instantiated. To get around the problem of not being able to apply the values I set the 'hash' under '__new__'. I then I override the '__hash__'function. Thats it!
class ImmutableDict(dict):
_HASH = None
def __new__(cls, *args, **kwargs):
ImmutableDict._HASH = hash(frozenset(args[0].items()))
return super(ImmutableDict, cls).__new__(cls, args)
def __hash__(self):
return self._HASH
def _readonly(self, *args, **kwards):
raise TypeError("Cannot modify Immutable Instance")
__delattr__ = __setattr__ = __setitem__ = pop = update = setdefault = clear = popitem = _readonly
Test:
immutabled1 = ImmutableDict({"This": "That", "Cheese": "Blarg"})
dict1 = {immutabled1: "Yay"}
dict1[immutabled1]
"Yay"
dict1
{{'Cheese': 'Blarg', 'This': 'That'}: 'Yay'}
Variation of Raymond Hettinger's answer by wrapping the self._dict with types.MappingProxyType.
class ImmutableDict(collections.Mapping):
"""
Copies a dict and proxies it via types.MappingProxyType to make it immutable.
"""
def __init__(self, somedict):
dictcopy = dict(somedict) # make a copy
self._dict = MappingProxyType(dictcopy) # lock it
self._hash = None
def __getitem__(self, key):
return self._dict[key]
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict)
def __hash__(self):
if self._hash is None:
self._hash = hash(frozenset(self._dict.items()))
return self._hash
def __eq__(self, other):
return self._dict == other._dict
def __repr__(self):
return str(self._dict)
You can use an enum:
import enum
KeyDict1 = enum.Enum('KeyDict1', {'InnerDictKey1':'bla', 'InnerDictKey2 ':2})
d = { KeyDict1: 'whatever', KeyDict2: 1, ...}
You can access the enums like you would a dictionary:
KeyDict1['InnerDictKey2'].value # This is 2
You can iterate over the names, and get their values... It does everything you'd expect.
You can try using https://github.com/Lightricks/freeze
It provides recursively immutable and hashable dictionaries
from freeze import FDict
a_mutable_dict = {
"list": [1, 2],
"set": {3, 4},
}
a_frozen_dict = FDict(a_mutable_dict)
print(a_frozen_dict)
print(hash(a_frozen_dict))
# FDict: {'list': FList: (1, 2), 'set': FSet: {3, 4}}
# -4855611361973338606

Prevent creating new attributes outside __init__

I want to be able to create a class (in Python) that once initialized with __init__, does not accept new attributes, but accepts modifications of existing attributes. There's several hack-ish ways I can see to do this, for example having a __setattr__ method such as
def __setattr__(self, attribute, value):
if not attribute in self.__dict__:
print "Cannot set %s" % attribute
else:
self.__dict__[attribute] = value
and then editing __dict__ directly inside __init__, but I was wondering if there is a 'proper' way to do this?
I wouldn't use __dict__ directly, but you can add a function to explicitly "freeze" a instance:
class FrozenClass(object):
__isfrozen = False
def __setattr__(self, key, value):
if self.__isfrozen and not hasattr(self, key):
raise TypeError( "%r is a frozen class" % self )
object.__setattr__(self, key, value)
def _freeze(self):
self.__isfrozen = True
class Test(FrozenClass):
def __init__(self):
self.x = 42#
self.y = 2**3
self._freeze() # no new attributes after this point.
a,b = Test(), Test()
a.x = 10
b.z = 10 # fails
Slots is the way to go:
The pythonic way is to use slots instead of playing around with the __setter__. While it may solve the problem, it does not give any performance improvement. The attributes of objects are stored in a dictionary "__dict__", this is the reason, why you can dynamically add attributes to objects of classes that we have created so far. Using a dictionary for attribute storage is very convenient, but it can mean a waste of space for objects, which have only a small amount of instance variables.
Slots are a nice way to work around this space consumption problem. Instead of having a dynamic dict that allows adding attributes to objects dynamically, slots provide a static structure which prohibits additions after the creation of an instance.
When we design a class, we can use slots to prevent the dynamic creation of attributes. To define slots, you have to define a list with the name __slots__. The list has to contain all the attributes, you want to use. We demonstrate this in the following class, in which the slots list contains only the name for an attribute "val".
class S(object):
__slots__ = ['val']
def __init__(self, v):
self.val = v
x = S(42)
print(x.val)
x.new = "not possible"
=> It fails to create an attribute "new":
42
Traceback (most recent call last):
File "slots_ex.py", line 12, in <module>
x.new = "not possible"
AttributeError: 'S' object has no attribute 'new'
Notes:
Since Python 3.3 the advantage optimizing the space consumption is not as impressive any more. With Python 3.3 Key-Sharing Dictionaries are used for the storage of objects. The attributes of the instances are capable of sharing part of their internal storage between each other, i.e. the part which stores the keys and their corresponding hashes. This helps to reduce the memory consumption of programs, which create many instances of non-builtin types. But still is the way to go to avoid dynamically created attributes.
Using slots come also with it's own cost. It will break serialization (e.g. pickle). It will also break multiple inheritance. A class can't inherit from more than one class that either defines slots or has an instance layout defined in C code (like list, tuple or int).
If someone is interested in doing that with a decorator, here is a working solution:
from functools import wraps
def froze_it(cls):
cls.__frozen = False
def frozensetattr(self, key, value):
if self.__frozen and not hasattr(self, key):
print("Class {} is frozen. Cannot set {} = {}"
.format(cls.__name__, key, value))
else:
object.__setattr__(self, key, value)
def init_decorator(func):
#wraps(func)
def wrapper(self, *args, **kwargs):
func(self, *args, **kwargs)
self.__frozen = True
return wrapper
cls.__setattr__ = frozensetattr
cls.__init__ = init_decorator(cls.__init__)
return cls
Pretty straightforward to use:
#froze_it
class Foo(object):
def __init__(self):
self.bar = 10
foo = Foo()
foo.bar = 42
foo.foobar = "no way"
Result:
>>> Class Foo is frozen. Cannot set foobar = no way
Actually, you don't want __setattr__, you want __slots__. Add __slots__ = ('foo', 'bar', 'baz') to the class body, and Python will make sure that there's only foo, bar and baz on any instance. But read the caveats the documentation lists!
The proper way is to override __setattr__. That's what it's there for.
I like very much the solution that uses a decorator, because it's easy to use it for many classes across a project, with minimum additions for each class. But it doesn't work well with inheritance.
So here is my version: It only overrides the __setattr__ function - if the attribute doesn't exist and the caller function is not __init__, it prints an error message.
import inspect
def froze_it(cls):
def frozensetattr(self, key, value):
if not hasattr(self, key) and inspect.stack()[1][3] != "__init__":
print("Class {} is frozen. Cannot set {} = {}"
.format(cls.__name__, key, value))
else:
self.__dict__[key] = value
cls.__setattr__ = frozensetattr
return cls
#froze_it
class A:
def __init__(self):
self._a = 0
a = A()
a._a = 1
a._b = 2 # error
What about this:
class A():
__allowed_attr=('_x', '_y')
def __init__(self,x=0,y=0):
self._x=x
self._y=y
def __setattr__(self,attribute,value):
if not attribute in self.__class__.__allowed_attr:
raise AttributeError
else:
super().__setattr__(attribute,value)
Here is approach i came up with that doesn't need a _frozen attribute or method to freeze() in init.
During init i just add all class attributes to the instance.
I like this because there is no _frozen, freeze(), and _frozen also does not show up in the vars(instance) output.
class MetaModel(type):
def __setattr__(self, name, value):
raise AttributeError("Model classes do not accept arbitrary attributes")
class Model(object):
__metaclass__ = MetaModel
# init will take all CLASS attributes, and add them as SELF/INSTANCE attributes
def __init__(self):
for k, v in self.__class__.__dict__.iteritems():
if not k.startswith("_"):
self.__setattr__(k, v)
# setattr, won't allow any attributes to be set on the SELF/INSTANCE that don't already exist
def __setattr__(self, name, value):
if not hasattr(self, name):
raise AttributeError("Model instances do not accept arbitrary attributes")
else:
object.__setattr__(self, name, value)
# Example using
class Dog(Model):
name = ''
kind = 'canine'
d, e = Dog(), Dog()
print vars(d)
print vars(e)
e.junk = 'stuff' # fails
I like the "Frozen" of Jochen Ritzel. The inconvenient is that the isfrozen variable then appears when printing a Class.__dict
I went around this problem this way by creating a list of authorized attributes (similar to slots):
class Frozen(object):
__List = []
def __setattr__(self, key, value):
setIsOK = False
for item in self.__List:
if key == item:
setIsOK = True
if setIsOK == True:
object.__setattr__(self, key, value)
else:
raise TypeError( "%r has no attributes %r" % (self, key) )
class Test(Frozen):
_Frozen__List = ["attr1","attr2"]
def __init__(self):
self.attr1 = 1
self.attr2 = 1
The FrozenClass by Jochen Ritzel is cool, but calling _frozen() when initialing a class every time is not so cool (and you need to take the risk of forgetting it). I added a __init_slots__ function:
class FrozenClass(object):
__isfrozen = False
def _freeze(self):
self.__isfrozen = True
def __init_slots__(self, slots):
for key in slots:
object.__setattr__(self, key, None)
self._freeze()
def __setattr__(self, key, value):
if self.__isfrozen and not hasattr(self, key):
raise TypeError( "%r is a frozen class" % self )
object.__setattr__(self, key, value)
class Test(FrozenClass):
def __init__(self):
self.__init_slots__(["x", "y"])
self.x = 42#
self.y = 2**3
a,b = Test(), Test()
a.x = 10
b.z = 10 # fails
None of the answers mention the performance impact of overriding __setattr__, which can be an issue when creating many small objects. (And __slots__ would be the performant solution but limits pickle/inheritance).
So I came up with this variant which installs our slower settatr after init:
class FrozenClass:
def freeze(self):
def frozen_setattr(self, key, value):
if not hasattr(self, key):
raise TypeError("Cannot set {}: {} is a frozen class".format(key, self))
object.__setattr__(self, key, value)
self.__setattr__ = frozen_setattr
class Foo(FrozenClass): ...
If you don't want to call freeze at the end of __init__, if inheritance is an issue, or if you don't want it in vars(), it can also be adapted: for example here is a decorator version based on the pystrict answer:
import functools
def strict(cls):
cls._x_setter = getattr(cls, "__setattr__", object.__setattr__)
cls._x_init = cls.__init__
#functools.wraps(cls.__init__)
def wrapper(self, *args, **kwargs):
cls._x_init(self, *args, **kwargs)
def frozen_setattr(self, key, value):
if not hasattr(self, key):
raise TypeError("Class %s is frozen. Cannot set '%s'." % (cls.__name__, key))
cls._x_setter(self, key, value)
cls.__setattr__ = frozen_setattr
cls.__init__ = wrapper
return cls
#strict
class Foo: ...
I wrote pystrict as a solution to this problem. It's too large to paste all of the code in stackoverflow.
pystrict is a pypi installable decorator that can be used with classes to freeze them. Many solutions here don't properly support inheritance.
If __slots__ doesn't work for you (because of inheritance issues), this is a good alternative.
There is an example to the README that shows why a decorator like this is needed even if you have mypy and pylint running on your project:
pip install pystrict
Then just use the #strict decorator:
from pystrict import strict
#strict
class Blah
def __init__(self):
self.attr = 1
#dataclass(slots=True) Nirvana (Python 3.10)
I'm in love with this #dataclass thing:
main.py
from dataclasses import dataclass
#dataclass(slots=True)
class C:
n: int
s: str
c = C(n=1, s='one')
assert c.n == 1
assert c.s == 'one'
c.n == 2
c.s == 'two'
c.asdf = 2
Outcome:
Traceback (most recent call last):
File "/home/ciro/main.py", line 15, in <module>
c.asdf = 2
AttributeError: 'C' object has no attribute 'asdf'
Note how #dataclass only requires use to define our attributes once with type annotations
n: int
s: str
and then, without any repetition we get for free:
def __init__(n, s):
self.n = n
self.s = s
__slots__ = ['n', 's']
Other free things not shown in this example:
__str__
__eq__: Compare object instances for equality by their attributes
__hash__ if you also use frozen=True: Object of custom type as dictionary key
Tested on Python 3.10.7, Ubuntu 22.10.

Categories