I'm implementing for fun and profit a data-structure allowing fast additive range updates:
class RAUQ:
""" Allow 'l[s:e] += v' update and 'a[i]' query in O(log n)
>>> l = RAUQ([0, 10, 20]) ; l
[0, 10, 20]
>>> l[1]
10
>>> l[2] += 10 ; l
[0, 10, 30]
>>> l[0:2] += 3 ; l
[3, 13, 30]
>>> l[1:10] -= 4 ; l # Support usual out of bounds slices
[3, 9, 26]
"""
According to disassembled bytecode, the l[i] += v expression is translated to:
l.__setitem__(i, l.__getitem__(i).__iadd__(v))
which I find pretty weird (inplace add, and set anyway?).
So, S.O., what would be a nice and pythonic way to implement this?
Here is what I came up with. Does the job, but feels hackish.
class RAUQ:
def __init__(self, iterable):
# Stripped down example,
# actual implementation use segment tree.
self.l = list(iterable)
def __getitem__(self, i):
if isinstance(i, slice):
return _view(self, i)
return self.l[i]
def __setitem__(self, i, val):
if isinstance(i, slice):
""" No-op: work already done in view"""
return self
self.l[i] = val
return self
def __str__(self):
return str(_view(self, slice(None)))
__repr__ = __str__
class _view:
def __init__(self, parent, i):
# generic implementation non designed for single index.
assert isinstance(i, slice)
self.l = parent.l
self.i = i
def __iter__(self):
return iter(self.l[self.i])
def update(self, val):
""" Add val to all element of the view """
self.l[self.i] = [x+val for x in self]
def __iadd__(self, val):
self.update(val)
return self
def __isub__(self, val):
self.update(-val)
return self
def __str__(self):
return str(list(self))
__repr__ = __str__
Related
In the following code the class gets a list as an argument. Object has a "length" variable (length of that list)
The __ len __() method must return the value of "length"
class Gen:
def __init__(self, lst, length ):
self.lst = lst # list
self.length = length
def show(self):
print("List:", self.lst)
def __len__(self):
# will return 'length's value
pass
x = Gen([1, 2, 3, 4])
x.show()
You can access the length of your attribute 'lst' by using "self". Also, because length is based on your attribute, you could define it as a property (or not even declare it ...) :
class Gen:
def __init__(self, lst):
self.lst = lst # list
def show(self):
print("List:", self.lst)
def __len__(self):
return len(self.lst)
x = Gen([1, 2, 3, 4])
x.show()
print(len(x)) # print 4
If you still want to use the length variable, then you can do this :
class Gen:
def __init__(self, lst):
self.lst = lst # list
self.length = len(lst)
def show(self):
print("List:", self.lst)
def __len__(self):
return len(self.length)
x = Gen([1, 2, 3, 4])
x.show()
print(len(x)) # print 4
Note that when you update the lst, the attribute length is not updated.
If you still want to have a length property (which feel javaey, because in python you use len), then you can do this:
class Gen:
def __init__(self, lst):
self.lst = lst # list
#property
def length(self):
return len(self.lst)
def show(self):
print("List:", self.lst)
def __len__(self):
return len(self.length)
x = Gen([1, 2, 3, 4])
x.show()
print(len(x)) # print 4
Anyway, there are lot of variants for your question.
I think you want something like this -
class Gen:
def __init__(self, lst):
self.lst = lst # list
self.length = len(self.lst)
def show(self):
print("List:", self.lst)
print("List Length:", self.length)
def __len__(self):
# will return 'length's value
return self.length;
x = Gen([1, 2, 3, 4])
x.show()
Let's take as example this classical solution to the problem of updating dependent object attributes:
class SomeClass(object):
def __init__(self, n):
self.list = range(0, n)
#property
def list(self):
return self._list
#list.setter
def list(self, val):
self._list = val
self._listsquare = [x**2 for x in self._list ]
#property
def listsquare(self):
return self._listsquare
#listsquare.setter
def listsquare(self, val):
self.list = [int(pow(x, 0.5)) for x in val]
It works as required: when a new value is set for one attribute, the other attribute is updated:
>>> c = SomeClass(5)
>>> c.listsquare
[0, 1, 4, 9, 16]
>>> c.list
[0, 1, 2, 3, 4]
>>> c.list = range(0,6)
>>> c.list
[0, 1, 2, 3, 4, 5]
>>> c.listsquare
[0, 1, 4, 9, 16, 25]
>>> c.listsquare = [x**2 for x in range(0,10)]
>>> c.list
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
But, what if we mutate the attribute list instead of setting it to a new value?:
>>> c.list[0] = 10
>>> c.list
[10, 1, 2, 3, 4, 5, 6, 7, 8, 9] # this is ok
>>> c.listsquare
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81] # we would like 100 as first element
We would like listsquare attribute to be updated accordingly, but it's not the case because the setters are not invoked when we mutate the list attribute.
Of course we could force the update by explicitly invoking the setter after we modify the attribute, for example by doing:
>>> c.list[0] = 10
>>> c.list = c.list. # invoke setter
>>> c.listsquare
[100, 1, 4, 9, 16, 25, 36, 49, 64, 81]
but it looks somewhat cumbersome and error prone for the user, we would prefer that it occurs implicitly.
What would be the most pythonic way for having the attributes updated when another mutable attribute is modified. How the object can know that one of his attributes has been modified?
So, as Davis Herring was saying in the comments, this is eminently possible but not nearly as clean. You essentially have to build your own custom data structure that maintains two lists in parallel, each one aware of the other, so that if one is updated, the other is also updated. Below is my shot at doing that, which took, um, a little longer than expected. Seems to work, but I haven't comprehensively tested it.
I've chosen to inherit from collections.UserList here. The other option would be to inherit from collections.abc.MutableSequence, which has various pros and cons compared to UserList.
from __future__ import annotations
from collections import UserList
from abc import abstractmethod
from typing import (
Sequence,
TypeVar,
Generic,
Optional,
Union,
Any,
Iterable,
overload,
cast
)
### ABSTRACT CLASSES ###
# Initial type
I = TypeVar('I')
# Transformed type
T = TypeVar('T')
# Return type for methods that return self
C = TypeVar('C', bound="AbstractListPairItem[Any, Any]")
class AbstractListPairItem(UserList[I], Generic[I, T]):
"""Base class for AbstractListPairParent and AbstractListPairChild"""
__slots__ = '_other_list'
_other_list: AbstractListPairItem[T, I]
# UserList inherits from `collections.abc.MutableSequence`,
# which has `abc.ABCMeta` as its metaclass,
# so the #abstractmethod decorator works fine.
#abstractmethod
def __init__(self, initlist: Optional[Iterable[I]] = None) -> None:
# We inherit from UserList, which stores the sequence as a `list`
# in a `data` instance attribute
super().__init__(initlist)
#staticmethod
#abstractmethod
def transform(value: I) -> T: ...
#overload
def __setitem__(self, index: int, value: I) -> None: ...
#overload
def __setitem__(self, index: slice, value: Iterable[I]) -> None: ...
def __setitem__(
self,
index: Union[int, slice],
value: Union[I, Iterable[I]]
) -> None:
super().__setitem__(index, value) # type: ignore[index, assignment]
if isinstance(index, int):
value = cast(I, value)
self._other_list.data[index] = self.transform(value)
elif isinstance(index, slice):
value = cast(Iterable[I], value)
for i, val in zip(range(index.start, index.stop, index.step), value):
self._other_list.data[i] = self.transform(val)
else:
raise NotImplementedError
# __getitem__ doesn't need to be altered
def __delitem__(self, index: Union[int, slice]) -> None:
super().__delitem__(index)
del self._other_list.data[index]
def __add__(self, other: Iterable[I]) -> list[I]: # type: ignore[override]
# Return a normal list rather than an instance of this class
return self.data + list(other)
def __radd__(self, other: Iterable[I]) -> list[I]:
# Return a normal list rather than an instance of this class
return list(other) + self.data
def __iadd__(self: C, other: Union[C, Iterable[I]]) -> C:
if isinstance(other, type(self)):
self.data += other.data
self._other_list.data += other._other_list.data
else:
new = list(other)
self.data += new
self._other_list.data += [self.transform(x) for x in new]
return self
def __mul__(self, n: int) -> list[I]: # type: ignore[override]
# Return a normal list rather than an instance of this class
return self.data * n
__rmul__ = __mul__
def __imul__(self: C, n: int) -> C:
self.data *= n
self._other_list.data *= n
return self
def append(self, item: I) -> None:
super().append(item)
self._other_list.data.append(self.transform(item))
def insert(self, i: int, item: I) -> None:
super().insert(i, item)
self._other_list.data.insert(i, self.transform(item))
def pop(self, i: int = -1) -> I:
del self._other_list.data[i]
return self.data.pop(i)
def remove(self, item: I) -> None:
i = self.data.index(item)
del self.data[i]
del self._other_list.data[i]
def clear(self) -> None:
super().clear()
self._other_list.data.clear()
def copy(self) -> list[I]: # type: ignore[override]
# Return a copy of the underlying data, NOT a new instance of this class
return self.data.copy()
def reverse(self) -> None:
super().reverse()
self._other_list.reverse()
def sort(self, /, *args: Any, **kwds: Any) -> None:
super().sort(*args, **kwds)
for i, elem in enumerate(self):
self._other_list.data[i] = self.transform(elem)
def extend(self: C, other: Union[C, Iterable[I]]) -> None:
self.__iadd__(other)
# Initial type for the parent, transformed type for the child.
X = TypeVar('X')
# Transformed type for the parent, initial type for the child.
Y = TypeVar('Y')
# Return type for methods returning self
P = TypeVar('P', bound='AbstractListPairParent[Any, Any]')
class AbstractListPairParent(AbstractListPairItem[X, Y]):
__slots__: Sequence[str] = tuple()
child_cls: type[AbstractListPairChild[Y, X]] = NotImplemented
def __new__(cls: type[P], initlist: Optional[Iterable[X]] = None) -> P:
if not hasattr(cls, 'child_cls'):
raise NotImplementedError(
"'ListPairParent' subclasses must have a 'child_cls' attribute"
)
return super().__new__(cls) # type: ignore[no-any-return]
def __init__(self, initlist: Optional[Iterable[X]] = None) -> None:
super().__init__(initlist)
self._other_list = self.child_cls(
self,
[self.transform(x) for x in self.data]
)
class AbstractListPairChild(AbstractListPairItem[Y, X]):
__slots__: Sequence[str] = tuple()
def __init__(
self,
parent: AbstractListPairParent[X, Y],
initlist: Optional[Iterable[Y]] = None
) -> None:
super().__init__(initlist)
self._other_list = parent
### CONCRETE IMPLEMENTATION ###
# Return type for methods returning self
L = TypeVar('L', bound='ListKeepingTrackOfSquares')
# We have to define the child before we define the parent,
# since the parent creates the child
class SquaresList(AbstractListPairChild[int, int]):
__slots__: Sequence[str] = tuple()
_other_list: ListKeepingTrackOfSquares
#staticmethod
def transform(value: int) -> int:
return int(pow(value, 0.5))
#property
def sqrt(self) -> ListKeepingTrackOfSquares:
return self._other_list
class ListKeepingTrackOfSquares(AbstractListPairParent[int, int]):
__slots__: Sequence[str] = tuple()
_other_list: SquaresList
child_cls = SquaresList
#classmethod
def from_squares(cls: type[L], child_list: Iterable[int]) -> L:
return cls([cls.child_cls.transform(x) for x in child_list])
#staticmethod
def transform(value: int) -> int:
return value ** 2
#property
def squared(self) -> SquaresList:
return self._other_list
class SomeClass:
def __init__(self, n: int) -> None:
self.list = range(0, n) # type: ignore[assignment]
#property
def list(self) -> ListKeepingTrackOfSquares:
return self._list
#list.setter
def list(self, val: Iterable[int]) -> None:
self._list = ListKeepingTrackOfSquares(val)
#property
def listsquare(self) -> SquaresList:
return self.list.squared
#listsquare.setter
def listsquare(self, val: Iterable[int]) -> None:
self.list = ListKeepingTrackOfSquares.from_squares(val)
s = SomeClass(10)
I would like to be able to unpack an object from a dict-similar class.
current:
f(**m.to_dict())
preferred
f(**m)
This would work if starstarprepare existed:
class M:
#... __getitem__, __setitem__
def __starstarprepare__(self):
md = self.to_dict()
return md
You can use collections.abc.Mapping.
from collections.abc import Mapping
class M(Mapping):
def __iter__(self):
return iter(self.to_dict())
def __getitem__(self, item):
return self.to_dict()[item]
def __len__(self):
return len(self.to_dict())
** works with any mapping type. One way to make M a mapping type is to subclass collections.abc.Mapping and implement __getitem__, __iter__, and __len__:
from collections.abc import Mapping
class M(Mapping):
def __init__(self):
self.a = 3
self.b = 5
def __getitem__(self, key):
return getattr(self, key)
def __iter__(self):
yield 'a'
yield 'b'
def __len__(self):
return 2
def foo(**kwargs):
for k, v in kwargs.items():
print(k, v)
m = M()
foo(**m)
If you already have a to_dict method, all three of the magic methods can be wrappers around the corresponding dict methods.
class M(Mapping):
def __init__(self):
self.a = 3
self.b = 5
def to_dict(self):
return {'a': self.a, 'b': self.b}
def __getitem__(self, key):
return self.to_dict()[key]
def __iter__(self):
return iter(self.to_dict())
def __len__(self):
return len(self.to_dict())
Solution due to #peter
class M:
# ... __getitem__ and other functions
def keys(self):
k = self.to_dict().keys()
return k
I'm wondering if it exists a way to create a list where variables inside could be changed to other variables but exclusively if they are of the same type.
for instance
a=[0, 1.0, 'blabla']
a[0] = 0 # is possible
a[1] = 2. # is possible
a[2] = 'albalb' # is possible
a[0] = 1.2 # is not possible
a[1] = 'no' # is not possible
a[2] = 1 # is not possible
I cannot use tuple to do that because it is immutable.
My goal is to create a list where the number of value in it can vary, so append, insert and pop will be useful. I also want slicing available in order to select a part of the list.
At the end, the list will contain my own classes which describe neuronal models. I have different models possible, so different classes.
With the list, I would like to do what we can do with Lists but I don't want the type of a variable in the list to change, except if I insert a neuron at a position. In that case, every variable after that position is shifted too the right.
for instance:
class A():
def __init__(self):
self.A = 0
class B():
def __init__(self):
self.A = 1
class C():
def __init__(self):
self.A = 2
class D():
def __init__(self):
self.A = 3
MyList = [A(),B(),C()]
print([M.A for M in MyList])
#insert
MyList.insert(1,D())
print([M.A for M in MyList])
#slicing
MyList2 = MyList[1:3]
print([M.A for M in MyList2])
#replace if the variable is the same type that the variable of the list to replace
MyList[0] = A()
print([M.A for M in MyList])
#So this should not be possible
MyList[0] = B()
print([M.A for M in MyList])
I would like something really close from the List object, so I expected that it could already exist.
Solution 1 Wrap all methods which modify the list in-place and override __setitem__()
class RewritingLockedTypeList(list):
def __init__(self, original_list):
super().__init__(original_list)
self.types = [type(n) for n in original_list]
def __setitem__(self, key, value):
if self.types[key] != type(value):
raise TypeError(f"Value at index {key} should be {self.types[key]}!")
super().__setitem__(key, value)
def wrap_method(method_name):
orig_method = getattr(RewritingLockedTypeList, method_name)
def new_method(self, *args, **kwargs):
result = orig_method(self, *args, **kwargs)
self.types = [type(n) for n in self]
return result
setattr(RewritingLockedTypeList, method_name, new_method)
for method in ["append", "clear", "extend", "insert", "pop", "remove", "reverse", "sort"]:
wrap_method(method)
Solution 2 Override all methods which modify the list in-place and override __setitem__() too
class LockedTypeList(list):
def __init__(self, original_list):
super().__init__(original_list)
self.types = [type(n) for n in original_list]
def __setitem__(self, key, value):
if self.types[key] != type(value):
raise TypeError(f"Value at index {key} should be {self.types[key]}!")
super().__setitem__(key, value)
def __delitem__(self, key):
del self.types[key]
super().__delitem__(key)
def append(self, thing):
self.types.append(type(thing))
super().append(thing)
def clear(self):
self.types.clear()
super().clear()
def extend(self, objects):
self.types.extend(type(o) for o in objects)
super().extend(objects)
def insert(self, idx, obj):
self.types.insert(idx, type(obj))
super().insert(idx, obj)
def pop(self, index=0):
self.types.pop(index)
super().pop(index)
def remove(self, value):
idx = self.index(value)
self.pop(idx)
def reverse(self):
self.types.reverse()
super().reverse()
def sort(self, key=lambda n: n, reverse=False):
super().sort(key=key, reverse=reverse)
self.types = [type(n) for n in self]
The second solution is longer, but faster for long lists.
Usage
a=LockedTypeList([0, 1.0, 'blabla'])
But you maybe should think about using a class and properties with type checking instead of this ugly list.
I want to write a program that accepts as input a number p and produces as output a type-constructor for a number that obeys integer arithmetic modulo p.
So far I have
def IntegersModP(p):
N = type('IntegersMod%d' % p, (), {})
def __init__(self, x): self.val = x % p
def __add__(a, b): return N(a.val + b.val)
... (more functions) ...
attrs = {'__init__': __init__, '__add__': __add__, ... }
for name, f in attrs.items():
setattr(N, name, f)
return N
This works fine, but I'd like to know what the Pythonic way to do this is, which I understand would use metaclasses.
Like this:
def IntegerModP(p): # class factory function
class IntegerModP(object):
def __init__(self, x):
self.val = x % p
def __add__(a, b):
return IntegerModP(a.val + b.val)
def __str__(self):
return str(self.val)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.val)
IntegerModP.__name__ = 'IntegerMod%s' % p # rename created class
return IntegerModP
IntegerMod4 = IntegerModP(4)
i = IntegerMod4(3)
j = IntegerMod4(2)
print i + j # 1
print repr(i + j) # IntegerMod4(1)
Metaclasses are for when your class needs to behave differently from a normal class or when you want to alter the behavior of the class statement. Neither of those apply here, so there's really no need to use a metaclass. In fact, you could just have one ModularInteger class with instances that record their value and modulus, but assuming you don't want to do that, it's still easy to do this with an ordinary class statement:
def integers_mod_p(p):
class IntegerModP(object):
def __init__(self, n):
self.n = n % IntegerModP.p
def typecheck(self, other):
try:
if self.p != other.p:
raise TypeError
except AttributeError:
raise TypeError
def __add__(self, other):
self.typecheck(other)
return IntegerModP(self.n + other.n)
def __sub__(self, other):
...
IntegerModP.p = p
IntegerModP.__name__ = 'IntegerMod{}'.format(p)
return IntegerModP