Controlling the setting of indexes of attributes with __setattr__ in classes - python

If I have:
class myclass():
def __init__(self, x, list):
self.x = x
self.list = list
def __setattr__(self, name, value):
self.__dict__[name] = value
#some stuff
and you do
instance = myclass(3, ['a', 'b'])
instance.x = 5
instance.list[1] = 'c'
The __setattr __ method will be called for the first one but not the second as only an index of an attribute is being set instead of an attribute
Is there any way to control what happens when an index of an attribute list is changed or is it just bad practice to have a list as an attribute in this way?

Here's an example that might do what you want:
import re
def c_set_port_state(portn,ports,values):
print(portn,ports,values)
class Out:
def __init__(self, obj):
self.obj = obj
def __getitem__(self, i):
return self.obj._out[i]
def __setitem__(self, i, value):
self.obj._out[i] = value
self.obj.set_out({i:value})
class In:
def __init__(self, obj):
self.obj = obj
def __getitem__(self, i):
return self.obj.get_in(i)
def _outidx0(key):
idx = re.match(r'Out(\d)',key).group(1)
idx0 = int(idx)-1
return idx0
class _port_descriptor:
def __init__(self, name):
self.name = name
self.idx0 = _outidx0(name)
def __get__(self, obj, objtype):
return obj.Out[self.idx0]
def __set__(self, obj, value):
obj.Out[self.idx0] = value
class Ports:
"""
This is an example for dynamic attibutes
- that figure in dir() and
- that IPython can auto-complete
>>> ports = Ports()
>>> ports.Out3=0
1 dict_keys([2]) dict_values([0])
>>> ports.Out4=1
1 dict_keys([3]) dict_values([1])
>>> dir(ports) # doctest: +ELLIPSIS
['In', 'Out', 'Out1', 'Out2', 'Out3', 'Out4', 'Out5', 'Out6', 'Out7', 'Out8', ...
>>> ports.set_out(Out7=3) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Output ...
>>> ports.Out7=3 # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Output ...
>>> ports.set_out(Out7=0)
1 dict_keys([6]) dict_values([0])
>>> ports.set_out({0:0,1:0,2:1})
3 dict_keys([0, 1, 2]) dict_values([0, 0, 1])
>>> ports.Out[2]
1
>>> ports.Out[2] = 0
1 dict_keys([2]) dict_values([0])
>>> ports.In[1]
0
"""
def __init__(self):
self._out = dict((i,0) for i in range(8))
self.Out = Out(self)
self.In = In(self)
for i in range(8):
name = 'Out'+str(i+1)
pd = _port_descriptor(name)
setattr(self.__class__,name,pd)
def set_out(self, out=None, **kwargs):
if out:
self._out.update(out)
if kwargs:
out = {_outidx0(k):v for k,v in kwargs.items()}
if not out:
out = self._out
if any((value not in [0,1]) for value in out.values()):
raise ValueError('Output values can only be 0 or 1, but you provided ' + str(out))
c_set_port_state(len(out),out.keys(),out.values())
def get_in(self, i):
return 0 #would call an according c function, too

Related

How do I use the python __get__ descriptor to output values of list elements?

How can I use the get descriptor to output values of list elements?
class X(object):
def __init__(self,value):
self.value = value
def __get__(self,obj,objtype):
return self.value
class Y(object):
a = X(1)
b = X(2)
c = [X(3),X(4)]
y = Y()
print(y.a)
print(y.b)
print(y.c[0])
Output:
1
2
<__main__.X object at ...>
Desired Output:
1
2
3
This snippet could bring you closer, but it's not the same. Z subclasses a list and defines __get__ for acting as a descriptor.
class X(object):
def __init__(self, value):
self.value = value
def __get__(self, obj, objtype):
return self.value
def __repr__(self):
return "X(%r)" % self.value
class Z(list):
def __get__(self, obj, objtype):
return self
def __getitem__(self, index):
"""override brackets operator, suggested by Azat Ibrakov"""
list_item = super(Z, self).__getitem__(index)
try:
return list_item.value
except AttributeError:
return list_item
class _LiteralForContainerDescriptorZ(object):
def __getitem__(self, keys):
"""override brackets operator, basing on https://stackoverflow.com/a/37259917/2823074"""
if not isinstance(keys, tuple):
keys = (keys,)
assert not any(isinstance(key, slice) for key in keys) # avoid e.g. ZL[11:value, key:23, key2:value2]
return Z(keys)
ZL = _LiteralForContainerDescriptorZ()
Using _LiteralForContainerDescriptorZ is optional, it gives a bit nicer syntax.
class Y(object):
a = X(1)
b = X(2)
c = Z([X(3.14), X(4)]) # define 'c' using constructor of Z class inherited from list
d = ZL[X(3.14), X(4)] # define 'd' using custom literal
y = Y()
for statement_to_print in [
"y.a", "y.b", "y.c","y.d", "y.c[0]", "y.c[1]", "y.d[0]",
]:
value = eval(statement_to_print)
print("{st:9} = {ev:<16} # type: {tp}".format(
st=statement_to_print, ev=value, tp=type(value).__name__))
Calling it, the prints are:
y.a = 1 # type: int
y.b = 2 # type: int
y.c = [X(3.14), X(4)] # type: Z
y.d = [X(3.14), X(4)] # type: Z
y.c[0] = 3.14 # type: float
y.c[1] = 4 # type: int
y.d[0] = 3.14 # type: float

Is there a way to support weakrefs with collections.namedtuple?

I want to use a weak reference to a namedtuple, but it fails:
>>> import collections
>>> import weakref
>>>
>>> Foo = collections.namedtuple('Foo','a b c')
>>> weakref.ref(Foo(1,2,3))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: cannot create weak reference to 'Foo' object
I tried to fix this with __slots__ = ('__weakref__',) but this fails also:
>>> class Foo(collections.namedtuple('Foo','a b c')):
... __slots__ = ('__weakref__',)
...
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: Error when calling the metaclass bases
nonempty __slots__ not supported for subtype of 'Foo'
Is there a workaround?
Short answer
Unfortunately, non-empty __slots__ flat-out don't work with any kind of tuple, not just namedtuple().
Work around
The best you can do is to have a class that doesn't inherit from tuple.
I wrote one for you (see below). It is used like this:
from __future__ import print_function
from altnamedtuple import AltNamedTuple
from weakref import proxy
class Point(AltNamedTuple):
__slots__ = ('x', 'y', '__weakref__')
def __init__(self, x, y):
self.x = x
self.y = y
# Now, exercise the capabilities of the named tuple alternative
p = Point(10, 20)
r = proxy(p)
print(len(r)) # sizeable
print(r[0]) # indexed access
print(r.y) # attribute access
print(list(r)) # iterable
x, y = r # unpackable
print(x, y)
print(20 in r) # membership testing
print(tuple(reversed(p))) # reversible
print(r == (10, 20)) # equality
print(r != (30, 40)) # inequality
print(hash(p)) # hashable
print(r) # nice repr
print(r._asdict()) # conversion to a dict
print(r._replace(y=2))
t = (11, 22)
print(r.count(10))
print(r.index(20))
print(Point._make(t)) # alternative constructor
Note, weakrefs proxies don't pass through calls to __hash__ or __reversed__. That's an intrinsic limitation that no class can work around.
Source code for AltNamedTuple
Here's the class that does all the work:
class AltNamedTuple(object):
"Subclasser needs to define: __slots__ and __init__"
__slots__ = ()
def __getattr__(self, attr):
if attr != '_fields': raise AttributeError(attr)
if '__weakref__' in self.__slots__:
return self.__slots__[:-1]
return self.__slots__
def __len__(self):
return len(self._fields)
def __getitem__(self, index):
attr = self._fields[index]
return getattr(self, attr)
def __iter__(self):
for attr in self._fields:
yield getattr(self, attr)
def __reversed__(self):
return iter(reversed(tuple(self)))
def __eq__(self, other):
return tuple(self) == tuple(other)
def __ne__(self, other):
return tuple(self) != tuple(other)
def __hash__(self):
return hash(tuple(self))
def __repr__(self):
pairs = ['%s=%r' % (a, getattr(self, a)) for a in self._fields]
return ('%s(%s)' % (self.__class__.__name__, ', '.join(pairs)))
#classmethod
def _make(cls, iterable):
return cls(*iterable)
def _asdict(self):
return dict(zip(self._fields, self))
def _replace(_self, **kwds):
d = _self._asdict()
d.update(kwds)
return _self.__class__(**d)
def count(self, value):
return tuple(self).count(value)
def index(self, value):
return tuple(self).index(value)

Python set attribute equal to another attribute

Is it possible to have a class attribute targeting another attribute from the same object and have a function to update the value of the target?
class MyObject(object):
def __init__(self):
self.var_1 = 1
self.var_2 = 2
self.var_3 = 3
self.current_var = self.var_1
def update_var(self, value):
self.current_var = ...
Expected outcome:
>>> x = MyObject()
>>> x.update_var(10)
>>> x.var_1
10
>>> x.current_var = x.var_2
>>> x.update_var(5)
>>> x.var_2
5
You can use the __dict__ of the object or as said by #bla setattr,
And Enum so you don't use string to specify the attribute:
from enum import Enum
class MyObject(object):
def __init__(self):
self.var_1 = 1
self.var_2 = 2
self.var_3 = 3
self.current_var = None
def update_var(self, value):
if self.current_var is None:
raise Exception('Current var is not set')
self.__dict__[self.current_var.name] = value
setattr(self, self.current_var.name, value) # Same result
m = MyObject()
attrs = vars(m)
attrs_enum = Enum("attrs_enum", attrs)
m.var_1 # 1
m.current_var = attrs_enum.var_1
m.update_var(10)
m.var_1 # 10
m.current_var = attrs_enum.var_2
m.var_2 # 2
m.update_var(20)
m.var_2 # 20
I don't like using a string to specify the attribute, but this is solution
I suggest making current_var a property that acts as a proxy for a given instance attribute. You can use set_current_var to update the proxy target.
Code
class MyObject(object):
current_var = 1
def __init__(self):
self.var_1 = 1
self.var_2 = 2
self.var_3 = 3
def set_current_var(self, name):
self._current_var = name
#property
def current_var(self):
return getattr(self, self._current_var)
#current_var.setter
def current_var(self, value):
setattr(self, self._current_var, value)
Example
x = MyObject()
print(x.var_1) # 1
x.set_current_var('var_1')
print(x.current_var) # 1
x.current_var = 4
print(x.var_1) # 4
You can create a wrapper class for your MyObject attribute values. That way, a reference will exist from the contents of current_var to the attribute bound in __dict__:
class _int:
def __init__(self, _val):
self.value = _val
def __repr__(self):
return str(self.value)
class MyObject(object):
def __init__(self):
self.var_1 = _int(1)
self.var_2 = _int(2)
self.var_3 = _int(3)
self.current_var = self.var_1
def update_var(self, value):
self.current_var.value = value
x = MyObject()
x.update_var(10)
print(x.var_1)
x.current_var = x.var_2
x.update_var(5)
print(x.var_2)
Output:
10
5

How should I deal with this recursive behaviour?

Suppose I had a class 'A' and nested within 'A' was class 'B' and both objects were for use publicly...
class A(object):
class B(object):
def __init__(self):
self._b = dict()
self.a = A()
def __setitem__(self, key, value):
# set self._b
self.a[key] = value # recursion ??
def __init__(self):
self._a = dict()
self.b = A.B()
def __setitem__(self, key, value):
# set self._a
self.b[key] = value # recursion ??
Now when doing...
d = A()
d['sap'] = 'nin'
A.__setitem__ will call B.__setitem__, which will call A.__setitem__ and so on...
If this is the desired interface, what would be the best way to deal with this recursive behaviour?
for more context, I've posted on codereview here...
https://codereview.stackexchange.com/questions/85842/a-dictionary-that-allows-multiple-keys-for-one-value
I've tried using another nested class to pass to other internal functions so that they can determine whether it was called from the user, or internally from other functions...
class A(object):
class _Container(object):
def __init__(self, _object):
self.object = _object
class B(object):
def __init__(self, d, **kwargs):
if is instance(d, A._Container):
self.a = d.object
else:
self.a = A(A._Container(self))
def __init__(self, d, **kwargs):
if isinstance(d, A._Container):
self.b = d.object
else:
self.b = A.B(A._Container(self))
This works for init, but I'm not sure I'd like to do that with setitem and all other methods.
I've also tried delegating all or most of the work to class 'A' so essentially class 'B''s methods act as hooks...
class A(object):
class B(object):
def __setitem__(self, key, value):
# some functionality
self.a[key] = value
def __setitem__(self, key, value):
self._a[key] = value
self._b[key] = value
If it helps, here is the code I'm working on atm.
# -*- coding: utf-8 -*-
class mkdict(object):
""" A dictionary that allows multiple keys for one value """
class _Container(object):
""" This is used to wrap an object to avoid infinite
recursion when calling my own methods from the inside.
If a method sees this container, it assumes it has been
called from the inside and not the user.
"""
def __init__(self, _object):
self.object = _object
class dict(object):
""" Interface for mkdict.dict for dict-like behaviour """
def __init__(self, d={}, **kwargs):
""" Using an mkdict._Container to avoid infinite
recursion when allowing:
>>> d = mkdict({'what': 'ever'})
>>> d = mkdict.dict({'what': 'ever'})
"""
if isinstance(d, mkdict._Container):
self.mkdict = d.object
else:
self.mkdict = mkdict(mkdict._Container(self))
self.update(d, **kwargs)
def __str__(self):
return str(self.mkdict._dict)
def __repr__(self):
return str(self)
def __len__(self):
return len(self.mkdict._dict)
def __setitem__(self, key, value):
""" Desired behaviour:
>>> d = mkdict()
>>>
>>> d['what', 'ever'] = 'testing'
>>> d
{'what': 'testing', 'ever': 'testing'}
>>> d.dict
{('what', 'ever'): 'testing'}
>>> d['what'] is d['ever']
True
>>>
>>> d.dict['what'] = 'new value'
>>> d
{'what': 'new value', 'ever': 'testing'}
>>> d.dict
{'what': 'new value', 'ever': 'testing'}
>>> d['what'] is d['ever']
False
"""
if key not in self and key in self.mkdict:
self.mkdict._key_already_set(key)
self.mkdict[key] = value
def __getitem__(self, key):
return self.mkdict._dict[key]
def __contains__(self, key):
return key in self.mkdict._dict
def __delitem__(self, key):
if key not in self:
raise KeyError(key)
if isinstance(key, tuple):
key = key[0]
del self.mkdict[key]
def clear(self):
self.mkdict.clear()
def update(self, d, **kwargs):
if isinstance(d, mkdict.dict):
d = d.mkdict._dict
elif isinstance(d, mkdict):
d = d._dict
d.update(kwargs):
for k, v in d.items():
self[k] = v
class _FullKeyPtr(object):
""" Desired behaviour:
full_key_ptr1 = _FullKeyPtr()
mkdict._key_map -> {'key1', full_key_ptr1,
'key2', full_key_ptr1}
>>> d = mkdict()
>>> d['what', 'ever'] = 'testing'
>>> d._key_map
>>>
>>> # d._key_map:
>>> # {'what': full_key_ptr1, 'ever': full_key_ptr1}
>>> d._key_map
>>> {'what': ('what', 'ever'), 'ever': ('what', 'ever')}
>>>
>>> d['what']
>>> 'testing'
>>>
>>> # full_key = _key_map['ever'].full_key
>>> # i.e. full_key = ('what', 'ever')
>>> # _dict[full_key] = 'test'
>>> d['ever'] = 'test'
>>>
>>>
>>> d['what']
>>> 'test'
"""
def __init__(self, full_key):
self.full_key = full_key
def __str__(self):
return str(self.full_key)
def __repr__(self):
return str(self)
def __init__(self, d={}, **kwargs):
self._dict = dict()
self._key_map = dict()
self._dict_backup = None
self._key_map_backup = None
if isinstance(d, mkdict._Container):
self.dict = d.object
else:
self.dict = mkdict.dict(mkdict._Container(self))
self.update(d, **kwargs)
def __str__(self):
return str(dict(self.items()))
def __repr__(self):
return str(self)
def __len__(self):
return len(self._key_map)
def __iter__(self):
return iter(self.keys())
def __getitem__(self, key):
full_key = self.full_key(key)
return self.dict[full_key]
def __setitem__(self, key, value):
""" Desired behaviour:
>>> d = mkdict()
>>> d['what', 'ever'] = 'testing'
>>>
>>> d
{'what': 'testing', 'ever': 'testing'}
>>>
>>> d.dict
{('what', 'ever'): 'testing'}
>>> d['what'] is d['ever']
True
>>>
>>> d['what'] = 'new value'
>>> d
{'what': 'new value', 'ever': 'new value'}
>>>
>>> d.dict
{('what', 'ever'): 'new value'}
>>> d['what'] is d['ever']
True
"""
if key in self:
key = self.full_key(key)
if key not in self._dict:
if isinstance(key, tuple):
full_key_ptr = self._FullKeyPtr(key)
for k in key:
if k in self:
self._key_already_set(k)
self._key_map[k] = full_key_ptr
else:
self._key_map[key] = self._FullKeyPtr(key)
self._dict[key] = value
def __delitem__(self, key):
full_key = self.full_key(key)
if isinstance(full_key, tuple):
for k in full_key:
del self._key_map[k]
else:
del self._key_map[full_key]
del self._dict[full_key]
def __contains__(self, key):
return key in self._key_map
def items(self):
return [(k, self[k]) for k, v in self._key_map.items()]
def iteritems(self):
return iter(self.items())
def update(self, d={}, **kwargs):
if isinstance(d, mkdict.dict):
d = d.mkdict._dict
elif isinstance(d, mkdict):
d = d._dict
d.update(kwargs)
for k, v in d.items():
self[k] = v
def clear(self):
self._dict.clear()
self._key_map.clear()
def keys(self):
return self._key_map.keys()
def full_key(self, key):
return self._key_map[key].full_key
def has_key(self, key):
return key in self
def append(self, key, otherkey):
pass
def remove(self, key):
full_key = self.full_key(key)
if not isinstance(full_key, tuple):
del self._dict[full_key]
del self._key_map[full_key]
return
new_full_key = list(full_key)
new_full_key.remove(key)
if len(new_full_key) == 1:
new_full_key = new_full_key[0]
else:
new_full_key = tuple(new_full_key)
self._dict[new_full_key] = self.dict[full_key]
del self._dict[full_key]
self._key_map[key].full_key = new_full_key
del self._key_map[key]
def aliases(self, key):
full_key = self.full_key(key)
if isinstance(full_key, tuple):
aliases = list(full_key)
aliases.remove(key)
return aliases
return list()
def backup(self):
pass
def revert(self):
pass
def _key_already_set(self, key):
self.remove(key)
Desired behaviour for above code:
>>> d = mkdict()
>>>
>>> d['-p', '--port'] = 1234
>>> d
{'-p': 1234, '--port': 1234}
>>> d.dict
{('-p', '--port'): 1234}
>>>
>>> d['-p'] = 5678
>>> d
{'-p': 5678, '--port': 5678}
>>> d['--port'] is d['-p']
True
>>> d.aliases('-p')
['--port']
>>>
>>> d.dict['-p'] = 1234
>>> d
{'-p': 1234, '--port': 5678}
>>> d.dict
{'-p': 1234, '--port': 5678}
>>>
>>> d['-p'] is d['--port']
False
I've come up with an alternative solution, which is not so elegant (it's a hack), but should work as you've said. It overrides getitem and setitem, and for tuple like key representation, the minimal_get function is added.
from operator import itemgetter
from itertools import groupby
import logging
class mydict(dict):
def __init__(self, **kwargs):
super(mydict, self).__init__(**kwargs)
# self.d = {}
def __getitem__(self, item):
if isinstance(item, tuple):
d = {}
for an_item in item:
d[an_item] = self.__getitem__(an_item)
return d
else:
return super(mydict, self).__getitem__(item)
def __setitem__(self, keys, value, _depth=0):
if isinstance(keys, tuple) and _depth == 0:
if isinstance(value, tuple):
if len(keys) == len(value):
pass
else:
value = len(keys) * (value,)
else:
value = len(keys) * (value,)
for an_item in zip(keys, value):
self.__setitem__(an_item[0], an_item[1], _depth=1)
else:
super(mydict, self).__setitem__(keys, value)
def minimal_get(self):
x = {}
for item in groupby(sorted(self.items(), key=itemgetter(1)), key=itemgetter(1)):
keys = []
try:
while item[1]:
keys.append(item[1].next()[0])
except StopIteration:
logging.info("StopIteration")
x[tuple(keys)] = item[0]
return x
dic = mydict()
dic["one"] = 1
dic["seven"] = 2
print "dic is", dic
dic["two", "three", "four"] = [1, 2, 3]
print "dic.minimal_get() is ", dic.minimal_get()
print "dic is", dic
dic["two", "ten"] = "lol"
print "dic[\"one\", \"two\"] is ", dic["one", "two"]
print "dic[\"three\"] is", dic["three"]
print "dic.minimal_get() is ", dic.minimal_get()
dic[("x", "z",), "h"] = (1, 2, 3)
print "dic is", dic
print "dic.minimal_get() is ", dic.minimal_get()
Result is,

Default constructor parameters in pyyaml

I haven't been able to find out how to do this in the PyYAML documentation. I want to represent python classes I've defined in YAML, and have a default value given to a parameter in the constructor if it's not specified in the YAML. For example:
>>> class Test(yaml.YAMLObject):
... yaml_tag = u"!Test"
... def __init__(self, foo, bar=3):
... self.foo = foo
... self.bar = bar
... def __repr__(self):
... return "%s(foo=%r, bar=%r)" % (self.__class__.__name__, self.foo, self.bar)
...
>>> yaml.load("""
... --- !Test
... foo: 5
... """)
Traceback (most recent call last):
File "<stdin>", line 4, in <module>
File "<stdin>", line 7, in __repr__
AttributeError: 'Test' object has no attribute 'bar'
I expected that it would create a Test object with bar=3, but I guess it bypasses my constructor when it creates the object. If I include a mapping for bar in the YAML, everything works as expected:
>>> yaml.load("""
... --- !Test
... foo: 5
... bar: 42
... """)
Test(foo=5, bar=42)
Does anyone know how I can have it use a default value?
I encountered the same problem: yaml_tag doesn't work for some reason. So I used alternative approach:
import yaml
def constructor(loader, node) :
fields = loader.construct_mapping(node)
return Test(**fields)
yaml.add_constructor('!Test', constructor)
class Test(object) :
def __init__(self, foo, bar=3) :
self.foo = foo
self.bar = bar
def __repr__(self):
return "%s(foo=%r, bar=%r)" % (self.__class__.__name__, self.foo, self.bar)
print yaml.load("""
- !Test { foo: 1 }
- !Test { foo: 10, bar: 20 }""")
Output:
[Test(foo=1, bar=3), Test(foo=10, bar=20)]
Based on alexanderlukanin13's answer. Here's my cut.
import yaml
YAMLObjectTypeRegistry = {}
def register_type(target):
if target.__name__ in YAMLObjectTypeRegistry:
print "{0} already in registry.".format(target.__name__)
elif 'yaml_tag' not in target.__dict__.keys():
print target.__dict__
raise TypeError("{0} must have yaml_tag attribute".format(
target.__name__))
elif target.__dict__['yaml_tag'] is None:
pass
else:
YAMLObjectTypeRegistry[target.__name__] = target
yaml.add_constructor(
target.__dict__['yaml_tag'],
lambda loader, node: target(**loader.construct_mapping(node)))
print "{0} added to registry.".format(target.__name__)
class RegisteredYAMLObjectType(type):
def __new__(meta, name, bases, class_dict):
cls = type.__new__(meta, name, bases, class_dict)
register_type(cls)
return cls
class RegisteredYAMLObject(object):
__metaclass__=RegisteredYAMLObjectType
yaml_tag = None
You can then use it like this:
class MyType(registry.RegisteredYAMLObject):
yaml_tag = u'!mytype'
def __init__(self, name, attr1='default1', attr2='default2'):
super(MyType, self).__init__()
self.name = name
self.attr1 = attr1
self.attr2 = attr2
The answers above work well, but here is a way to make initialisation work fully with a class based approach:
UNSPECIFIED = object()
class SomeYAMLObject(yaml.YAMLObject):
#classmethod
def from_yaml(cls, loader, node):
arg_spec = inspect.getfullargspec(cls.__init__)
arg_spec.args.remove("self")
arg_defaults = reversed(list(
zip_longest(
reversed(arg_spec.args),
reversed(arg_spec.defaults or []),
fillvalue=UNSPECIFIED)))
kwarg_defaults = reversed(list(
zip_longest(
reversed(arg_spec.kwonlyargs),
reversed(arg_spec.kwonlydefaults or []),
fillvalue=UNSPECIFIED)))
node_mapping = loader.construct_mapping(node)
used_nodes = set()
# fill args first
args = []
for a,d in arg_defaults:
if a in node_mapping:
args.append(node_mapping[a])
used_nodes.add(a)
elif d is not UNSPECIFIED:
args.append(d)
else:
raise Exception(f"Tag {cls.yaml_tag} is missing '{a}' argument")
# then kwargs
kwargs = {}
for a,d in kwarg_defaults:
if a in node_mapping:
kwargs[a] = node_mapping[a]
used_nodes.add(a)
elif d is not UNSPECIFIED:
args[a] = d
# if it accepts additional kwargs, fill with leftover kwargs
if arg_spec.varkw and len(used_nodes) != len(node_mapping):
for k,v in node_mapping:
if k not in used_nodes:
kwargs[k] = v
return cls(*args,**kwargs)
It's a bit lengthy, but gives a nice error if required positional arguments without a default value are missing.

Categories