I want to use a weak reference to a namedtuple, but it fails:
>>> import collections
>>> import weakref
>>>
>>> Foo = collections.namedtuple('Foo','a b c')
>>> weakref.ref(Foo(1,2,3))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: cannot create weak reference to 'Foo' object
I tried to fix this with __slots__ = ('__weakref__',) but this fails also:
>>> class Foo(collections.namedtuple('Foo','a b c')):
... __slots__ = ('__weakref__',)
...
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: Error when calling the metaclass bases
nonempty __slots__ not supported for subtype of 'Foo'
Is there a workaround?
Short answer
Unfortunately, non-empty __slots__ flat-out don't work with any kind of tuple, not just namedtuple().
Work around
The best you can do is to have a class that doesn't inherit from tuple.
I wrote one for you (see below). It is used like this:
from __future__ import print_function
from altnamedtuple import AltNamedTuple
from weakref import proxy
class Point(AltNamedTuple):
__slots__ = ('x', 'y', '__weakref__')
def __init__(self, x, y):
self.x = x
self.y = y
# Now, exercise the capabilities of the named tuple alternative
p = Point(10, 20)
r = proxy(p)
print(len(r)) # sizeable
print(r[0]) # indexed access
print(r.y) # attribute access
print(list(r)) # iterable
x, y = r # unpackable
print(x, y)
print(20 in r) # membership testing
print(tuple(reversed(p))) # reversible
print(r == (10, 20)) # equality
print(r != (30, 40)) # inequality
print(hash(p)) # hashable
print(r) # nice repr
print(r._asdict()) # conversion to a dict
print(r._replace(y=2))
t = (11, 22)
print(r.count(10))
print(r.index(20))
print(Point._make(t)) # alternative constructor
Note, weakrefs proxies don't pass through calls to __hash__ or __reversed__. That's an intrinsic limitation that no class can work around.
Source code for AltNamedTuple
Here's the class that does all the work:
class AltNamedTuple(object):
"Subclasser needs to define: __slots__ and __init__"
__slots__ = ()
def __getattr__(self, attr):
if attr != '_fields': raise AttributeError(attr)
if '__weakref__' in self.__slots__:
return self.__slots__[:-1]
return self.__slots__
def __len__(self):
return len(self._fields)
def __getitem__(self, index):
attr = self._fields[index]
return getattr(self, attr)
def __iter__(self):
for attr in self._fields:
yield getattr(self, attr)
def __reversed__(self):
return iter(reversed(tuple(self)))
def __eq__(self, other):
return tuple(self) == tuple(other)
def __ne__(self, other):
return tuple(self) != tuple(other)
def __hash__(self):
return hash(tuple(self))
def __repr__(self):
pairs = ['%s=%r' % (a, getattr(self, a)) for a in self._fields]
return ('%s(%s)' % (self.__class__.__name__, ', '.join(pairs)))
#classmethod
def _make(cls, iterable):
return cls(*iterable)
def _asdict(self):
return dict(zip(self._fields, self))
def _replace(_self, **kwds):
d = _self._asdict()
d.update(kwds)
return _self.__class__(**d)
def count(self, value):
return tuple(self).count(value)
def index(self, value):
return tuple(self).index(value)
Related
This question already has answers here:
Overriding special methods on an instance
(5 answers)
Closed 3 years ago.
I have a couple classes and a function:
from functools import partial
def fn(other, self, name):
print(f"calling {name} with {other}")
func = getattr(self.a, name)
return func(other)
class A:
def __add__(self, other):
return 9
def __mul__(self, other):
return 7
def __sub__(self, other):
return 8
class B:
def __init__(self,a):
self.a = a
for name in ['add', 'sub']:
name = f"__{name}__"
p = partial(fn, self=self,name=name)
setattr(self, name, p)
p.__name__ = name
I want to be able to use the forward the magic methods to an existing property. I don't want to inherit the class because I don't want all the builtins. just a couple. For instance I might want to use multiply from a different class. I'm trying to avoid coding like this:
def __add__(self, other):
self.a.__add__(other)
using the above code I receive the following:
>>> b = B(A())
>>> b + 3
TypeError Traceback (most recent call last)
<ipython-input-40-fa904b7bb783> in <module>
----> 1 b + 3
2
TypeError: unsupported operand type(s) for +: 'B' and 'int'
>>> b.__add__(3)
calling __add__ with 3
9
Maybe I'm missing something simple but I can't find a way to dynamically add the builtin function.
The main problem to get around is that magic methods like __add__ are looked up on the class, not on the object itself; otherwise you could just write self.__add__ = a.__add__ in the __init__ method. To get around this, we need to declare methods on the class B, not on individual instances of it.
The function delegate defined below works by adding a method to the B class. This method takes self which will be a B instance, so it has to dynamically load the a attribute and then its __add__ method.
class A:
def __add__(self, other):
return 9
def __mul__(self, other):
return 7
def __sub__(self, other):
return 8
class B:
def __init__(self, a):
self.a = a
def delegate(cls, attr_name, method_name):
def delegated(self, *vargs, **kwargs):
a = getattr(self, attr_name)
m = getattr(a, method_name)
return m(*vargs, **kwargs)
setattr(cls, method_name, delegated)
delegate(B, 'a', '__add__')
delegate(B, 'a', '__sub__')
Example:
>>> b = B(A())
>>> b + 3
9
>>> b - 4
8
Proxying __dunder__ methods is tricky. I would use a descriptor object, which will work much more cleanly with the potential arcana of attribute access than other approaches.
class Proxy:
def __set_name__(self, owner, name):
self.attr = name
def __get__(self, obj, objtype=None):
if obj is None:
return self
try:
proxy = obj._proxy
except AttributeError:
raise AttributeError('tried to access proxy field on object with no _proxy')
return getattr(proxy, self.attr)
class A:
def __add__(self, other):
return 9
def __mul__(self, other):
return 7
def __sub__(self, other):
return 8
class B:
def __init__(self,a):
self.a = a
self._proxy = self.a
__add__ = Proxy()
__sub__ = Proxy()
b = B(A())
An example in the ipython repl:
In [6]: b = B(A())
In [7]: b + b
Out[7]: 9
In [8]: b - b
Out[8]: 8
In [9]: b * b
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-19-cb34cccc83f5> in <module>
----> 1 b * b
TypeError: unsupported operand type(s) for *: 'B' and 'B'
If you wanted to expand on this approach, Proxy could take a fieldname from which it proxies off of, so you could have something like:
class Proxy:
def __init__(self, proxy_field):
self.prox_field = proxy_field
def __set_name__(self, owner, name):
self.attr = name
def __get__(self, obj, objtype=None):
if obj is None:
return self
try:
proxy = getattr(obj, self.proxy_field)
except AttributeError:
raise AttributeError(f'tried to access proxy field on object with no {self.proxy_field} attribute')
return getattr(proxy, self.attr)
class B:
def __init__(self, foo, bar):
self.foo = foo
self.bar = bar
__add__ = Proxy('foo')
__sub__ = Proxy('bar')
I have a few classes each of which has a number of attributes. What all of the attributes have in common is that they should be numeric properties. This seems to be an ideal place to use python's decorators, but I can't seem to wrap my mind around what the correct implementation would be. Here is a simple example:
class Junk(object):
def __init__(self, var):
self._var = var
#property
def var(self):
"""A numeric variable"""
return self._var
#var.setter
def size(self, value):
# need to make sure var is an integer
if not isinstance(value, int):
raise ValueError("var must be an integer, var = {}".format(value))
self._var = value
#var.deleter
def size(self):
raise RuntimeError("You can't delete var")
It seems to me that it should be possible to write a decorator that does everything so that the above can be transformed into:
def numeric_property(*args, **kwargs):
...
class Junk(object):
def __init__(self, var):
self._var = var
#numeric_property
def var(self):
"""A numeric variable"""
return self._var
That way the new numeric_property decorator can be used in many classes.
A #property is just a special case of Python's descriptor protocol, so you can certainly build your own custom versions. For your case:
class NumericProperty:
"""A property that must be numeric.
Args:
attr (str): The name of the backing attribute.
"""
def __init__(self, attr):
self.attr = attr
def __get__(self, obj, type=None):
return getattr(obj, self.attr)
def __set__(self, obj, value):
if not isinstance(value, int):
raise ValueError("{} must be an integer, var = {!r}".format(self.attr, value))
setattr(obj, self.attr, value)
def __delete__(self, obj):
raise RuntimeError("You can't delete {}".format(self.attr))
class Junk:
var = NumericProperty('_var')
def __init__(self, var):
self.var = var
In use:
>>> j = Junk('hi')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/jonrsharpe/test.py", line 29, in __init__
self.var = var
File "/Users/jonrsharpe/test.py", line 17, in __set__
raise ValueError("{} must be an integer, var = {!r}".format(self.attr, value))
ValueError: _var must be an integer, var = 'hi'
>>> j = Junk(1)
>>> del j.var
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/jonrsharpe/test.py", line 21, in __delete__
raise RuntimeError("You can't delete {}".format(self.attr))
RuntimeError: You can't delete _var
>>> j.var = 'hello'
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/jonrsharpe/test.py", line 17, in __set__
raise ValueError("{} must be an integer, var = {!r}".format(self.attr, value))
ValueError: _var must be an integer, var = 'hello'
>>> j.var = 2
>>> j.var
2
Option 1: inherit from property
property is a descriptor. See Descriptor HowTo on python.org.
So, can inherit from property and override the relevant methods.
For example, to enforce int on setter:
class numeric_property(property):
def __set__(self, obj, value):
assert isinstance(value, int), "numeric_property requires an int"
super(numeric_property, self).__set__(obj, value)
class A(object):
#numeric_property
def x(self):
return self._x
#x.setter
def x(self, value):
self._x = value
And now you have integers enforced:
>>> a = A()
>>> a.x = 'aaa'
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 3, in __set__
AssertionError: numeric_property requires an int
Option 2: Create a better descriptor
On the other hand, it may be even better to implement a brand new descriptor which does not inherit from property, which would enable you to define the property in one go.
It would be nicer to have this kind of interface:
class A(object):
x = numeric_property('_x')
For that you would implement a descriptor which takes the attribute name:
class numeric_property(object):
def __init__(self, private_attribute_name, default=0):
self.private_attribute_name = private_attribute_name
self.default = default
def __get__(self, obj, typ):
if not obj: return self
return getattr(obj, self.private_attribute_name, self.default)
def __set__(self, obj, value):
assert isinstance(value, int), "numeric_property requires an int"
setattr(obj, self.private_attribute_name, value)
Disclaimer :)
I would rather not enforce strict typing in Pyhon, because Python is much more powerful without it.
You may just create a function that does it for you . As simple as it can get, no need to create a custom descriptor:
def numprop(name, privname):
#property
def _numprop(self):
return getattr(self, privname)
#_numprop.setter
def _numprop(self, value):
if not isinstance(value, int):
raise ValueError("{name} must be an integer, {name} = {}".format(value, name=name))
setattr(self, privname, value)
#_numprop.deleter
def _numprop(self):
raise RuntimeError("You can't delete var")
return _numprop
class Junk(object):
def __init__(self, var):
self._var = var
var = numprop("var", "_var")
If I have:
class myclass():
def __init__(self, x, list):
self.x = x
self.list = list
def __setattr__(self, name, value):
self.__dict__[name] = value
#some stuff
and you do
instance = myclass(3, ['a', 'b'])
instance.x = 5
instance.list[1] = 'c'
The __setattr __ method will be called for the first one but not the second as only an index of an attribute is being set instead of an attribute
Is there any way to control what happens when an index of an attribute list is changed or is it just bad practice to have a list as an attribute in this way?
Here's an example that might do what you want:
import re
def c_set_port_state(portn,ports,values):
print(portn,ports,values)
class Out:
def __init__(self, obj):
self.obj = obj
def __getitem__(self, i):
return self.obj._out[i]
def __setitem__(self, i, value):
self.obj._out[i] = value
self.obj.set_out({i:value})
class In:
def __init__(self, obj):
self.obj = obj
def __getitem__(self, i):
return self.obj.get_in(i)
def _outidx0(key):
idx = re.match(r'Out(\d)',key).group(1)
idx0 = int(idx)-1
return idx0
class _port_descriptor:
def __init__(self, name):
self.name = name
self.idx0 = _outidx0(name)
def __get__(self, obj, objtype):
return obj.Out[self.idx0]
def __set__(self, obj, value):
obj.Out[self.idx0] = value
class Ports:
"""
This is an example for dynamic attibutes
- that figure in dir() and
- that IPython can auto-complete
>>> ports = Ports()
>>> ports.Out3=0
1 dict_keys([2]) dict_values([0])
>>> ports.Out4=1
1 dict_keys([3]) dict_values([1])
>>> dir(ports) # doctest: +ELLIPSIS
['In', 'Out', 'Out1', 'Out2', 'Out3', 'Out4', 'Out5', 'Out6', 'Out7', 'Out8', ...
>>> ports.set_out(Out7=3) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Output ...
>>> ports.Out7=3 # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Output ...
>>> ports.set_out(Out7=0)
1 dict_keys([6]) dict_values([0])
>>> ports.set_out({0:0,1:0,2:1})
3 dict_keys([0, 1, 2]) dict_values([0, 0, 1])
>>> ports.Out[2]
1
>>> ports.Out[2] = 0
1 dict_keys([2]) dict_values([0])
>>> ports.In[1]
0
"""
def __init__(self):
self._out = dict((i,0) for i in range(8))
self.Out = Out(self)
self.In = In(self)
for i in range(8):
name = 'Out'+str(i+1)
pd = _port_descriptor(name)
setattr(self.__class__,name,pd)
def set_out(self, out=None, **kwargs):
if out:
self._out.update(out)
if kwargs:
out = {_outidx0(k):v for k,v in kwargs.items()}
if not out:
out = self._out
if any((value not in [0,1]) for value in out.values()):
raise ValueError('Output values can only be 0 or 1, but you provided ' + str(out))
c_set_port_state(len(out),out.keys(),out.values())
def get_in(self, i):
return 0 #would call an according c function, too
I haven't been able to find out how to do this in the PyYAML documentation. I want to represent python classes I've defined in YAML, and have a default value given to a parameter in the constructor if it's not specified in the YAML. For example:
>>> class Test(yaml.YAMLObject):
... yaml_tag = u"!Test"
... def __init__(self, foo, bar=3):
... self.foo = foo
... self.bar = bar
... def __repr__(self):
... return "%s(foo=%r, bar=%r)" % (self.__class__.__name__, self.foo, self.bar)
...
>>> yaml.load("""
... --- !Test
... foo: 5
... """)
Traceback (most recent call last):
File "<stdin>", line 4, in <module>
File "<stdin>", line 7, in __repr__
AttributeError: 'Test' object has no attribute 'bar'
I expected that it would create a Test object with bar=3, but I guess it bypasses my constructor when it creates the object. If I include a mapping for bar in the YAML, everything works as expected:
>>> yaml.load("""
... --- !Test
... foo: 5
... bar: 42
... """)
Test(foo=5, bar=42)
Does anyone know how I can have it use a default value?
I encountered the same problem: yaml_tag doesn't work for some reason. So I used alternative approach:
import yaml
def constructor(loader, node) :
fields = loader.construct_mapping(node)
return Test(**fields)
yaml.add_constructor('!Test', constructor)
class Test(object) :
def __init__(self, foo, bar=3) :
self.foo = foo
self.bar = bar
def __repr__(self):
return "%s(foo=%r, bar=%r)" % (self.__class__.__name__, self.foo, self.bar)
print yaml.load("""
- !Test { foo: 1 }
- !Test { foo: 10, bar: 20 }""")
Output:
[Test(foo=1, bar=3), Test(foo=10, bar=20)]
Based on alexanderlukanin13's answer. Here's my cut.
import yaml
YAMLObjectTypeRegistry = {}
def register_type(target):
if target.__name__ in YAMLObjectTypeRegistry:
print "{0} already in registry.".format(target.__name__)
elif 'yaml_tag' not in target.__dict__.keys():
print target.__dict__
raise TypeError("{0} must have yaml_tag attribute".format(
target.__name__))
elif target.__dict__['yaml_tag'] is None:
pass
else:
YAMLObjectTypeRegistry[target.__name__] = target
yaml.add_constructor(
target.__dict__['yaml_tag'],
lambda loader, node: target(**loader.construct_mapping(node)))
print "{0} added to registry.".format(target.__name__)
class RegisteredYAMLObjectType(type):
def __new__(meta, name, bases, class_dict):
cls = type.__new__(meta, name, bases, class_dict)
register_type(cls)
return cls
class RegisteredYAMLObject(object):
__metaclass__=RegisteredYAMLObjectType
yaml_tag = None
You can then use it like this:
class MyType(registry.RegisteredYAMLObject):
yaml_tag = u'!mytype'
def __init__(self, name, attr1='default1', attr2='default2'):
super(MyType, self).__init__()
self.name = name
self.attr1 = attr1
self.attr2 = attr2
The answers above work well, but here is a way to make initialisation work fully with a class based approach:
UNSPECIFIED = object()
class SomeYAMLObject(yaml.YAMLObject):
#classmethod
def from_yaml(cls, loader, node):
arg_spec = inspect.getfullargspec(cls.__init__)
arg_spec.args.remove("self")
arg_defaults = reversed(list(
zip_longest(
reversed(arg_spec.args),
reversed(arg_spec.defaults or []),
fillvalue=UNSPECIFIED)))
kwarg_defaults = reversed(list(
zip_longest(
reversed(arg_spec.kwonlyargs),
reversed(arg_spec.kwonlydefaults or []),
fillvalue=UNSPECIFIED)))
node_mapping = loader.construct_mapping(node)
used_nodes = set()
# fill args first
args = []
for a,d in arg_defaults:
if a in node_mapping:
args.append(node_mapping[a])
used_nodes.add(a)
elif d is not UNSPECIFIED:
args.append(d)
else:
raise Exception(f"Tag {cls.yaml_tag} is missing '{a}' argument")
# then kwargs
kwargs = {}
for a,d in kwarg_defaults:
if a in node_mapping:
kwargs[a] = node_mapping[a]
used_nodes.add(a)
elif d is not UNSPECIFIED:
args[a] = d
# if it accepts additional kwargs, fill with leftover kwargs
if arg_spec.varkw and len(used_nodes) != len(node_mapping):
for k,v in node_mapping:
if k not in used_nodes:
kwargs[k] = v
return cls(*args,**kwargs)
It's a bit lengthy, but gives a nice error if required positional arguments without a default value are missing.
Is there any way to avoid calling __init__ on a class while initializing it, such as from a class method?
I am trying to create a case and punctuation insensitive string class in Python used for efficient comparison purposes but am having trouble creating a new instance without calling __init__.
>>> class String:
def __init__(self, string):
self.__string = tuple(string.split())
self.__simple = tuple(self.__simple())
def __simple(self):
letter = lambda s: ''.join(filter(lambda s: 'a' <= s <= 'z', s))
return filter(bool, map(letter, map(str.lower, self.__string)))
def __eq__(self, other):
assert isinstance(other, String)
return self.__simple == other.__simple
def __getitem__(self, key):
assert isinstance(key, slice)
string = String()
string.__string = self.__string[key]
string.__simple = self.__simple[key]
return string
def __iter__(self):
return iter(self.__string)
>>> String('Hello, world!')[1:]
Traceback (most recent call last):
File "<pyshell#2>", line 1, in <module>
String('Hello, world!')[1:]
File "<pyshell#1>", line 17, in __getitem__
string = String()
TypeError: __init__() takes exactly 2 positional arguments (1 given)
>>>
What should I replace string = String(); string.__string = self.__string[key]; string.__simple = self.__simple[key] with to initialize the new object with the slices?
EDIT:
As inspired by the answer written below, the initializer has been edited to quickly check for no arguments.
def __init__(self, string=None):
if string is None:
self.__string = self.__simple = ()
else:
self.__string = tuple(string.split())
self.__simple = tuple(self.__simple())
When feasible, letting __init__ get called (and make the call innocuous by suitable arguments) is preferable. However, should that require too much of a contortion, you do have an alternative, as long as you avoid the disastrous choice of using old-style classes (there is no good reason to use old-style classes in new code, and several good reasons not to)...:
class String(object):
...
bare_s = String.__new__(String)
This idiom is generally used in classmethods which are meant to work as "alternative constructors", so you'll usually see it used in ways such as...:
#classmethod
def makeit(cls):
self = cls.__new__(cls)
# etc etc, then
return self
(this way the classmethod will properly be inherited and generate subclass instances when called on a subclass rather than on the base class).
A trick the standard pickle and copy modules use is to create an empty class, instantiate the object using that, and then assign that instance's __class__ to the "real" class. e.g.
>>> class MyClass(object):
... init = False
... def __init__(self):
... print 'init called!'
... self.init = True
... def hello(self):
... print 'hello world!'
...
>>> class Empty(object):
... pass
...
>>> a = MyClass()
init called!
>>> a.hello()
hello world!
>>> print a.init
True
>>> b = Empty()
>>> b.__class__ = MyClass
>>> b.hello()
hello world!
>>> print b.init
False
But note, this approach is very rarely necessary. Bypassing the __init__ can have some unexpected side effects, especially if you're not familiar with the original class, so make sure you know what you're doing.
Using a metaclass provides a nice solution in this example. The metaclass has limited use but works fine.
>>> class MetaInit(type):
def __call__(cls, *args, **kwargs):
if args or kwargs:
return super().__call__(*args, **kwargs)
return cls.__new__(cls)
>>> class String(metaclass=MetaInit):
def __init__(self, string):
self.__string = tuple(string.split())
self.__simple = tuple(self.__simple())
def __simple(self):
letter = lambda s: ''.join(filter(lambda s: 'a' <= s <= 'z', s))
return filter(bool, map(letter, map(str.lower, self.__string)))
def __eq__(self, other):
assert isinstance(other, String)
return self.__simple == other.__simple
def __getitem__(self, key):
assert isinstance(key, slice)
string = String()
string.__string = self.__string[key]
string.__simple = self.__simple[key]
return string
def __iter__(self):
return iter(self.__string)
>>> String('Hello, world!')[1:]
<__main__.String object at 0x02E78830>
>>> _._String__string, _._String__simple
(('world!',), ('world',))
>>>
Addendum:
After six years, my opinion favors Alex Martelli's answer more than my own approach. With meta-classes still on the mind, the following answer shows how the problem can be solved both with and without them:
#! /usr/bin/env python3
METHOD = 'metaclass'
class NoInitMeta(type):
def new(cls):
return cls.__new__(cls)
class String(metaclass=NoInitMeta if METHOD == 'metaclass' else type):
def __init__(self, value):
self.__value = tuple(value.split())
self.__alpha = tuple(filter(None, (
''.join(c for c in word.casefold() if 'a' <= c <= 'z') for word in
self.__value)))
def __str__(self):
return ' '.join(self.__value)
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.__alpha == other.__alpha
if METHOD == 'metaclass':
def __getitem__(self, key):
if not isinstance(key, slice):
raise NotImplementedError
instance = type(self).new()
instance.__value = self.__value[key]
instance.__alpha = self.__alpha[key]
return instance
elif METHOD == 'classmethod':
def __getitem__(self, key):
if not isinstance(key, slice):
raise NotImplementedError
instance = self.new()
instance.__value = self.__value[key]
instance.__alpha = self.__alpha[key]
return instance
#classmethod
def new(cls):
return cls.__new__(cls)
elif METHOD == 'inline':
def __getitem__(self, key):
if not isinstance(key, slice):
raise NotImplementedError
cls = type(self)
instance = cls.__new__(cls)
instance.__value = self.__value[key]
instance.__alpha = self.__alpha[key]
return instance
else:
raise ValueError('METHOD did not have an appropriate value')
def __iter__(self):
return iter(self.__value)
def main():
x = String('Hello, world!')
y = x[1:]
print(y)
if __name__ == '__main__':
main()
Pass another argument to the constructor, like so:
def __init__(self, string, simple = None):
if simple is None:
self.__string = tuple(string.split())
self.__simple = tuple(self.__simple())
else:
self.__string = string
self.__simple = simple
You can then call it like this:
def __getitem__(self, key):
assert isinstance(key, slice)
return String(self.__string[key], self.__simple[key])
Also, I'm not sure it's allowed to name both the field and the method __simple. If only for readability, you should change that.