I'm trying to get the following behavior with pydantic.BaseModel:
class MyClass:
def __init__(self, value: T) -> None:
self._value = value
# Maybe:
#property
def value(self) -> T:
return self._value
# Maybe:
#value.setter
def value(self, value: T) -> None:
# ...
self._value = value
If T is also a pydantic model, then recursive initialization using dictionaries should work:
# Initialize `x._value` with `T(foo="bar", spam="ham")`:
x = MyClass(value={"foo": "bar", "spam": "ham"})
Note that _value is initialized using the kwargs value. Validation must also be available for private fields.
The pydantic docs (PrivateAttr, etc.) seem to imply that pydantic will never expose private attributes. I'm sure there is some hack for this. But is there an idiomatic way to achieve the behavior in pydantic? Or should I just use a custom class?
Not sure it this solution is advisable, based on: https://github.com/samuelcolvin/pydantic/issues/1577
https://github.com/samuelcolvin/pydantic/issues/655
import inspect
from typing import Dict
from pydantic import BaseModel, PrivateAttr
from pydantic.main import no_type_check
class PatchedModel(BaseModel):
#no_type_check
def __setattr__(self, name, value):
"""
To be able to use properties with setters
"""
try:
super().__setattr__(name, value)
except ValueError as e:
setters = inspect.getmembers(
self.__class__,
predicate=lambda x: isinstance(x, property) and x.fset is not None
)
for setter_name, func in setters:
if setter_name == name:
object.__setattr__(self, name, value)
break
else:
raise e
class T(BaseModel):
value1: str
value2: int
class MyClassPydantic(PatchedModel):
_value: T = PrivateAttr()
def __init__(self, value: Dict, **kwargs):
super().__init__(**kwargs)
object.__setattr__(self, "_value", T(**value))
#property
def value(self) -> T:
return self._value
#value.setter
def value(self, value: T) -> None:
self._value: T = value
# To avoid the PatchedModel(BaseModel) use instead
# def set_value(self, value: T) -> None:
# self._value: T = value
if __name__ == "__main__":
my_pydantic_class = MyClassPydantic({"value1": "test1", "value2": 1})
print(my_pydantic_class.value)
my_pydantic_class.value = T(value1="test2", value2=2)
# my_pydantic_class.set_value(T(value1="test2", value2=2))
print(my_pydantic_class.value)
I ended up with something like this, it acts like a private field, but i can change it by public methods:
import inspect
from typing import Optional
from uuid import UUID
from pydantic import BaseModel, Field
class Entity(BaseModel):
"""Base entity class."""
def __setattr__(self, name, value):
if "self" not in inspect.currentframe().f_back.f_locals:
raise Exception("set attr is protected")
super().__setattr__(name, value)
class PostId(UUID):
"""Post unique id."""
class Post(Entity):
"""Post."""
post_id: PostId = Field(description='unique post id')
title: Optional[str] = Field(None, description='title')
def change_title(self, new_title: str) -> None:
"""Changes title."""
self.title = new_title
I just looking at inspect.currentframe().f_back.f_locals and looking for self key.
Ispired by accessify
Tested with this little test:
from uuid import uuid4
import pytest
import post_pydantic
def test_pydantic():
"""Test pydantic varriant."""
post_id = uuid4()
post = post_pydantic.Post(post_id=post_id)
with pytest.raises(Exception) as e:
post.post_id = uuid4()
assert post.post_id == post_id
assert e.value.args[0] == "set attr is protected"
new_title = "New title"
post.change_title(new_title)
assert post.title == new_title
Related
Let's say I have a class like this:
class C:
def __init__(self, stuff: int):
self._stuff = stuff
#property
def stuff(self) -> int:
return self._stuff
then stuff is read-only:
c = C(stuff=10)
print(c.stuff) # prints 10
and
c.stuff = 2
fails as expected
AttributeError: can't set attribute
How can I get the identical behavior using a dataclass? If I wanted to also have a setter, I could do:
#dataclass
class DC:
stuff: int
_stuff: int = field(init=False, repr=False)
#property
def stuff(self) -> int:
return self._stuff
#stuff.setter
def stuff(self, stuff: int):
self._stuff = stuff
But how could I do it without the #stuff.setter part?
This answer extends directly from my other post on using descriptor classes, which is a convenient and handy way to define properties, more or less.
Since dataclasses does not offer a field(frozen=True) approach, I think this one can instead work for you.
Here is a straightforward example of usage below:
from dataclasses import dataclass, MISSING
from typing import Generic, TypeVar
_T = TypeVar('_T')
class Frozen(Generic[_T]):
__slots__ = (
'_default',
'_private_name',
)
def __init__(self, default: _T = MISSING):
self._default = default
def __set_name__(self, owner, name):
self._private_name = '_' + name
def __get__(self, obj, objtype=None):
value = getattr(obj, self._private_name, self._default)
return value
def __set__(self, obj, value):
if hasattr(obj, self._private_name):
msg = f'Attribute `{self._private_name[1:]}` is immutable!'
raise TypeError(msg) from None
setattr(obj, self._private_name, value)
#dataclass
class DC:
stuff: int = Frozen()
other_stuff: str = Frozen(default='test')
dc = DC(stuff=10)
# raises a TypeError: Attribute `stuff` is immutable!
# dc.stuff = 2
# raises a TypeError: Attribute `other_stuff` is immutable!
# dc.other_stuff = 'hello'
print(dc)
# raises a TypeError: __init__() missing 1 required positional argument: 'stuff'
# dc = DC()
Another option, is to use a metaclass which automatically applies the #dataclass decorator. This has a few advantages, such as being able to use dataclasses.field(...) for example to set a default value if desired, or to set repr=False for instance.
Note that once #dataclass_transform comes out in PY 3.11, this could potentially be a good use case to apply it here, so that it plays more nicely with IDEs in general.
In any case, here's a working example of this that I was able to put together:
from dataclasses import dataclass, field, fields
class Frozen:
__slots__ = ('private_name', )
def __init__(self, name):
self.private_name = '_' + name
def __get__(self, obj, objtype=None):
value = getattr(obj, self.private_name)
return value
def __set__(self, obj, value):
if hasattr(obj, self.private_name):
msg = f'Attribute `{self.private_name[1:]}` is immutable!'
raise TypeError(msg) from None
setattr(obj, self.private_name, value)
def frozen_field(**kwargs):
return field(**kwargs, metadata={'frozen': True})
def my_meta(name, bases, cls_dict):
cls = dataclass(type(name, bases, cls_dict))
for f in fields(cls):
# if a dataclass field is supposed to be frozen, then set
# the value to a descriptor object accordingly.
if 'frozen' in f.metadata:
setattr(cls, f.name, Frozen(f.name))
return cls
class DC(metaclass=my_meta):
other_stuff: str
stuff: int = frozen_field(default=2)
# DC.stuff = property(lambda self: self._stuff)
dc = DC(other_stuff='test')
print(dc)
# raises TypeError: Attribute `stuff` is immutable!
# dc.stuff = 41
dc.other_stuff = 'hello'
print(dc)
To get the boilerplate reduction that dataclass provides I found the only way to do this is with a descriptor.
In [236]: from dataclasses import dataclass, field
In [237]: class SetOnce:
...: def __init__(self):
...: self.block_set = False
...: def __set_name__(self, owner, attr):
...: self.owner = owner.__name__
...: self.attr = attr
...: def __get__(self, instance, owner):
...: return getattr(instance, f"_{self.attr}")
...: def __set__(self, instance, value):
...: if not self.block_set:
...: self.block_set = True
...: setattr(instance, f"_{self.attr}", value)
...: else:
...: raise AttributeError(f"{self.owner}.{self.attr} cannot be set.")
In [239]: #dataclass
...: class Foo:
...: bar:str = field(default=SetOnce())
In [240]: test = Foo("bar")
In [241]: test.bar
Out[241]: 'bar'
In [242]: test.bar = 1
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-242-9cc7975cd08b> in <module>
----> 1 test.bar = 1
<ipython-input-237-bddce9441c9a> in __set__(self, instance, value)
12 self.value = value
13 else:
---> 14 raise AttributeError(f"{self.owner}.{self.attr} cannot be set.")
15
AttributeError: Foo.bar cannot be set.
In [243]: test
Out[247]: Foo(bar='bar')
from dataclasses import dataclass
#dataclass(frozen=True)
class YourClass:
"""class definition"""
https://docs.python.org/3/library/dataclasses.html#frozen-instances
After instantiation of the class, when trying to change any of its properties, the exception is raised.
Because using the decorator in the class definition essentially triggers the #dataclass decorator to use the property object as a default field, it doesn't play nice. You can set the property outside like:
>>> from dataclasses import dataclass, field
>>> #dataclass
... class DC:
... _stuff: int = field(repr=False)
... stuff: int = field(init=False)
...
>>> DC.stuff = property(lambda self: self._stuff) # dataclass decorator cant see this
>>> dc = DC(42)
>>> dc
DC(stuff=42)
>>> dc.stuff
42
>>> dc.stuff = 99
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: can't set attribute
import operator
#dataclass
class Enum:
name: str = property(operator.attrgetter("_name"))
def __init__(self, name):
self._name = name
You can do it by combining three things:
Set frozen to False (the default);
Use __post_init__, which is called after the auto-generated __init__ finishes, to mark when the initial setting of values is set and the read-only behavior has to start;
Create your own version of __setattr__ to enforce the read-only behavior after the initial assignment.
Example Person class with a read-only ID field and a read-write name field:
from dataclasses import dataclass
#dataclass
class Person(object):
id : str
name : str
def __post_init__(self):
self._initialized = True
def __setattr__(self, key, value):
if "_initialized" not in self.__dict__:
# we are still inside __init__, assign all values
super().__setattr__(key, value)
elif key == 'id':
# __init__ has finished, enforce read-only attributes
raise AttributeError(f'Attribute id is read-only')
else:
# set read-write attributes normally
super().__setattr__(key, value)
p = Person(id="1234", name="John Doe")
p.name = "John Wick" # succeeds
p.id = "3456" # fails
I haven't implemented __delattr__ in this example, but it could follow the same logic we used on __setattr__.
Using a decorator so you don't need to write this much code for each class:
from typing import Optional, Iterable, Callable, Union
from dataclasses import dataclass
def readonlyattr(attrs : Optional[Union[str, Iterable[str]]] = None):
# ensure attrs is a set of strings
if isinstance(attrs, str):
attrs = set([attrs])
elif not isinstance(attrs, set):
attrs = set(attrs)
# return decorator
def wrap_readonly_attributes(cls: type):
# update post_init method
def make_post_init(cls: type, method: Callable):
def post_init(self, *args, **kwargs):
self._initialized = True
if method:
method(self, *args, **kwargs)
else:
for base in cls.__bases__:
try:
getattr(base, "__post_init__")(self, *args, **kwargs)
except AttributeError:
pass
return post_init
setattr(cls, "__post_init__", make_post_init(cls, getattr(cls, "__post_init__", None)))
# update setattr method
def make_setattr(cls: type, method: Callable):
def new_setattr(self, key, value):
if "_initialized" not in self.__dict__:
if method:
method(self, key, value)
else:
super().__setattr__(key, value)
elif key in attrs:
raise AttributeError(f'Attribute {key} is read-only')
else:
if method:
method(self, key, value)
else:
super().__setattr__(key, value)
return new_setattr
setattr(cls, "__setattr__", make_setattr(cls, getattr(cls, "__setattr__", None)))
return cls
return wrap_readonly_attributes
#dataclass
#readonlyattr(["id", "passport_no"])
class Person(object):
id : str
passport_no : str
name : str
p = Person(id="1234", passport_no="AB12345", name="John Doe")
print(p)
p.name = "John Wick" # succeeds
p.id = "3456" # fails
I'm trying to understand __new__ and I'm fairly new to OOP python. I have this code...
import csv
from csv import DictReader
import logging
import typing as T
class MaxLength(str):
def __new__(cls, field: str):
# how do i pass this dynamically in the PCW obj
maximum = 4
if len(field) > maximum:
raise ValueError('invalid length of fields at: ' + field)
return super().__new__(cls, field)
class PCW(T.NamedTuple):
length_checker: MaxLength
#classmethod
def from_row(cls, row: dict):
return cls(**{key: type_(row[key]) for key, type_ in cls._field_types.items()})
def validate_csv(reader: DictReader) -> bool:
for row in reader:
try:
PCW.from_row(row)
except Exception as e:
logging.error('type: {} msg: {}'.format(type(e), e))
return False
return True
input_file = validate_csv(csv.DictReader(open("test.csv")))
This works but I want to be able to pass maximum in as an argument so as this will change. ie:
class PCW(T.NamedTuple):
length_checker: MaxLength(maximum=4)
...
input_file = validate_csv(csv.DictReader(open("test.csv")))
I think I have gone down the rabbit hole. Is this possible or what am I overlooking/misunderstanding?
One way to do it would be to use the object.__init_subclass__() classmethod that was added in Python 3.6. Making use of it requires subclassing your MaxLength subclass.
Here's what I mean:
import csv
from csv import DictReader
import logging
import typing as T
class MaxLength(str):
maximum = 8
#classmethod
def __init_subclass__(cls, **kwargs):
maximum = kwargs.pop('maximum', cls.maximum)
super().__init_subclass__(**kwargs)
cls.maximum = maximum
def __new__(cls, field: str):
if len(field) > cls.maximum:
raise ValueError('invalid length of fields at: ' + field)
return super().__new__(cls, field)
class PCW(T.NamedTuple):
# class PCWMaxLength(MaxLength):
class PCWMaxLength(MaxLength, maximum=4): # Override default maximum.
pass
length_checker: PCWMaxLength
#classmethod
def from_row(cls, row: dict):
return cls(**{key: type_(row[key]) for key, type_ in cls._field_types.items()})
# Display value assigned to nested class' constant.
print(f'PCWMaxLength.maximum: {PCWMaxLength.maximum}') # -> PCWMaxLength.maximum: 4
def validate_csv(reader: DictReader) -> bool:
for row in reader:
try:
PCW.from_row(row)
except Exception as e:
logging.error('type: {} msg: {}'.format(type(e), e))
return False
return True
Otherwise, I think you will need to do some actual metaclass programming…
You could make it a default argument:
class MaxLength(str):
def __new__(cls, field: str, maximum: int=4):
if len(field) > maximum:
raise ValueError('invalid length of fields at: ' + field)
return super().__new__(cls, field)
I am trying to be all fancy with sub element attribute access in a custom class hierarchy.
My fanciness works in that I can successfully use descriptors to do this.
I want to be even more fancy and make the class RefHolder (shown below in the testcase) use slots to save space.
When I try to use slots though, I get RuntimeError: maximum recursion depth exceeded
Note that I have already tried looking at existing solutions for this, the most closely matching I could find being this one:
https://stackoverflow.com/a/19566973/1671693
I have tried this in the testcase below but I am still get the runtimeerror.
Note that in the testcase, if the commented lines are used instead of the ones directly beneath them and __slots__ is removed from RefHolder,
The testcase passes.
Any suggestions?
Additionally, I am creating an object for every attribute access which seems expensive, are there any suggestions on a more efficient way of achieving the same behavior? Thanks!
import unittest
class RefHolder():
__slots__ = ['__obj', 'get_value']
def __init__(self, obj, get_value=False):
self.__dict__['__obj'] = obj
self.__dict__['get_value']=get_value
def get_sub(self, name):
#attr = self.__dict__['__obj'].find_by_name(name)
attr = self.__dict__['__obj'].__get__(self, RefHolder).find_by_name(name)
if attr is None:
raise AttributeError("Can't find field {}".format(name))
return attr
def __getattr__(self, name):
attr = self.get_sub(name)
#if self.__dict__['get_value']:
if self.__dict__['get_value'].__get__(self, RefHolder):
return attr.Value
else:
return attr
def __setattr__(self, name, value):
attr = self.get_sub(name)
#if self.__dict__['get_value']:
if self.__dict__['get_value'].__get__(self, RefHolder):
attr.Value = value
else:
raise AttributeError("{} is read only in this context".format(name))
class ContainerAccess():
__slots__ = ['get_value']
def __init__(self, get_value=False):
self.get_value = get_value
def __get__(self, obj, objtype=None):
if obj is None:
return self
return RefHolder(obj, self.get_value)
def __set__(self, obj, value):
raise AttributeError("Read Only attribute".format(value))
class PropVal():
def __init__(self, val):
self.Value = val
#property
def Value(self):
return self._value
#Value.setter
def Value(self, value):
self._value = value
class T():
get = ContainerAccess()
getv = ContainerAccess(get_value=True)
def __init__(self):
self.store = {}
self._value = 0
def find_by_name(self, name):
return self.store.get(name)
class T2(T):
pass
class TestDesc(unittest.TestCase):
def test_it(self):
t = T()
t2 = T2()
t.store['my_val'] = PropVal(5)
t.store['my_val2'] = PropVal(6)
t2.store['my_val'] = PropVal(1)
self.assertEqual(t.get.my_val.Value, 5)
self.assertEqual(t.get.my_val2.Value, 6)
self.assertEqual(t2.get.my_val.Value, 1)
t.get.my_val.Value = 6
self.assertEqual(t.get.my_val.Value, 6)
with self.assertRaises(AttributeError):
t.get.blah.Value = 6
#self.assertEqual(t.get.my_other_val.Value, None)
self.assertEqual(t.getv.my_val, 6)
t.getv.my_val = 7
self.assertEqual(t.getv.my_val, 7)
with self.assertRaises(AttributeError):
t.get.my_val = 7
I have class container that transmute itself into another class.
For example I have some types such as MyFloat MyStr or MyInt that offer additional methods or attributes. I would like to encapsulate the decision to build any of these types into another class:
My first attempt was to write this:
class MyFloat(float):
def foo_float():
pass
class MyStr(str):
def foo_str():
pass
class MyInt(int):
def foo_int():
pass
# Does not work
class Polymorph(object):
def __init__(self, value):
if isinstance(value, float):
self = MyFloat(value)
elif isinstance(value, int):
self = MyInt(value)
elif isinstance(value, str):
self = MyStr(value)
else:
raise TypeError, 'Unknown type'
Unfortunately I did not get the expected instance at the end:
>>> a = Polymorph(42.42) # Should get an instance of MyFloat
>>> type(a)
__main.MyFloat
I then tried to use __new__ instead
class Polymorph(object):
def __new__(cls, value):
if isinstance(value, float):
return super(MyFloat, cls).__new__(cls, value)
elif isinstance(value, int):
return super(MyInt, cls).__new__(cls, value)
elif isinstance(value, str):
return super(MyStr, cls).__new__(cls, value)
else:
raise TypeError, 'Unknown type'
But this time I get a TypeError: super(type, obj): obj must be an instance or subtype of type
Is it possible to achieve this?
So I found this solution that works. However, I don't know is it is Pythonic Acceptable to do this.
class Polymorph(object):
def __new__(cls, value):
if isinstance(value, float):
return MyFloat(value)
elif isinstance(value, int):
return MyInt(value)
elif isinstance(value, str):
return MyStr(value)
else:
raise TypeError, 'Unknown type'
I'm new to Python and need some advice implementing the scenario below.
I have two classes for managing domains at two different registrars. Both have the same interface, e.g.
class RegistrarA(Object):
def __init__(self, domain):
self.domain = domain
def lookup(self):
...
def register(self, info):
...
and
class RegistrarB(object):
def __init__(self, domain):
self.domain = domain
def lookup(self):
...
def register(self, info):
...
I would like to create a Domain class that, given a domain name, loads the correct registrar class based on the extension, e.g.
com = Domain('test.com') #load RegistrarA
com.lookup()
biz = Domain('test.biz') #load RegistrarB
biz.lookup()
I know this can be accomplished using a factory function (see below), but is this the best way of doing it or is there a better way using OOP features?
def factory(domain):
if ...:
return RegistrarA(domain)
else:
return RegistrarB(domain)
I think using a function is fine.
The more interesting question is how do you determine which registrar to load? One option is to have an abstract base Registrar class which concrete implementations subclass, then iterate over its __subclasses__() calling an is_registrar_for() class method:
class Registrar(object):
def __init__(self, domain):
self.domain = domain
class RegistrarA(Registrar):
#classmethod
def is_registrar_for(cls, domain):
return domain == 'foo.com'
class RegistrarB(Registrar):
#classmethod
def is_registrar_for(cls, domain):
return domain == 'bar.com'
def Domain(domain):
for cls in Registrar.__subclasses__():
if cls.is_registrar_for(domain):
return cls(domain)
raise ValueError
print Domain('foo.com')
print Domain('bar.com')
This will let you transparently add new Registrars and delegate the decision of which domains each supports, to them.
Assuming you need separate classes for different registrars (though it's not obvious in your example) your solution looks okay, though RegistrarA and RegistrarB probably share functionality and could be derived from an Abstract Base Class.
As an alternative to your factory function, you could specify a dict, mapping to your registrar classes:
Registrar = {'test.com': RegistrarA, 'test.biz': RegistrarB}
Then:
registrar = Registrar['test.com'](domain)
One quibble: You're not really doing a Class Factory here as you're returning instances rather than classes.
In Python you can change the actual class directly:
class Domain(object):
def __init__(self, domain):
self.domain = domain
if ...:
self.__class__ = RegistrarA
else:
self.__class__ = RegistrarB
And then following will work.
com = Domain('test.com') #load RegistrarA
com.lookup()
I'm using this approach successfully.
You can create a 'wrapper' class and overload its __new__() method to return instances of the specialized sub-classes, e.g.:
class Registrar(object):
def __new__(self, domain):
if ...:
return RegistrarA(domain)
elif ...:
return RegistrarB(domain)
else:
raise Exception()
Additionally, in order to deal with non-mutually exclusive conditions, an issue that was raised in other answers, the first question to ask yourself is whether you want the wrapper class, which plays the role of a dispatcher, to govern the conditions, or it will delegate it to the specialized classes. I can suggest a shared mechanism, where the specialized classes define their own conditions, but the wrapper does the validation, like this (provided that each specialized class exposes a class method that verifies whether it is a registrar for a particular domain, is_registrar_for(...) as suggested in other answers):
class Registrar(object):
registrars = [RegistrarA, RegistrarB]
def __new__(self, domain):
matched_registrars = [r for r in self.registrars if r.is_registrar_for(domain)]
if len(matched_registrars) > 1:
raise Exception('More than one registrar matched!')
elif len(matched_registrars) < 1:
raise Exception('No registrar was matched!')
else:
return matched_registrars[0](domain)
I have this problem all the time. If you have the classes embedded in your application (and its modules) then you can use a function; but if you load plugins dynamically, you need something more dynamic -- registering the classes with a factory via metaclasses automatically.
Here is a pattern I'm sure I lifted from StackOverflow originally, but I don't still have the path to the original post
_registry = {}
class PluginType(type):
def __init__(cls, name, bases, attrs):
_registry[name] = cls
return super(PluginType, cls).__init__(name, bases, attrs)
class Plugin(object):
__metaclass__ = PluginType # python <3.0 only
def __init__(self, *args):
pass
def load_class(plugin_name, plugin_dir):
plugin_file = plugin_name + ".py"
for root, dirs, files in os.walk(plugin_dir) :
if plugin_file in (s for s in files if s.endswith('.py')) :
fp, pathname, description = imp.find_module(plugin_name, [root])
try:
mod = imp.load_module(plugin_name, fp, pathname, description)
finally:
if fp:
fp.close()
return
def get_class(plugin_name) :
t = None
if plugin_name in _registry:
t = _registry[plugin_name]
return t
def get_instance(plugin_name, *args):
return get_class(plugin_name)(*args)
how about something like
class Domain(object):
registrars = []
#classmethod
def add_registrar( cls, reg ):
registrars.append( reg )
def __init__( self, domain ):
self.domain = domain
for reg in self.__class__.registrars:
if reg.is_registrar_for( domain ):
self.registrar = reg
def lookup( self ):
return self.registrar.lookup()
Domain.add_registrar( RegistrarA )
Domain.add_registrar( RegistrarB )
com = Domain('test.com')
com.lookup()
Since the methods are probably shared, using some base class would make sense.
getattr can be used in the factory function to dynamically call another class.
The logic to figure out the registrartype should not be part these classes, but should be in some helper function.
import sys
class RegistrarBase():
"""Registrar Base Class"""
def __init__(self, domain):
self.name = domain
def register(self, info):
pass
def lookup(self):
pass
def __repr__(self):
return "empty domain"
class RegistrarA(RegistrarBase):
def __repr__(self):
return ".com domain"
class RegistrarB(RegistrarBase):
def __repr__(self):
return ".biz domain"
def create_registrar(domainname, registrartype):
try:
registrar = getattr(sys.modules[__name__], registrartype)
return registrar(domainname)
except:
return RegistrarBase(domainname)
domain = create_registrar(domainname = 'test.com', registrartype='RegistrarA')
print(domain)
print(domain.name)
#.com domain
#test.com
Okay, here is an answer based on the answer of Alec Thomas, modified and extended: taking care of multi-level inheritance and ambiguity. If _resolve should be something more complicated than simple check of uniqueness and is likely to change it may be supplied as an argument and not be a class method.
base class module bbb.py:
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Sequence, Type
class Base(ABC):
def __init__(self, *args, **kwargs):
...
#classmethod
def isit(cls, _s: str) -> bool:
return False
#classmethod
def from_str(cls, s: str, *args, **kwargs) -> Base:
subs = cls._findit(s)
sc = cls._resolve(s, subs)
return sc(*args, **kwargs)
#classmethod
def _findit(cls, s: str) -> Sequence[Type[Base]]:
subs = [cls] if cls.isit(s) else []
subs += [ssc for sc in cls.__subclasses__() for ssc in sc._findit(s)]
return subs
#classmethod
def _resolve(cls, s: str, subs: Sequence[Type[Base]]) -> Type[Base]:
if len(subs) == 0:
raise Exception(f'Cannot find subclass for {s}')
if len(subs) > 1:
raise Exception(
f'Cannot choose unique subclass for {s}: {subs}')
sc = subs[0]
return sc
class B(Base):
#classmethod
def isit(cls, s: str) -> bool:
res = s == 'b class'
return res
enter code here
derived class module ccc.py:
from bbb import Base
class C(Base):
#classmethod
def isit(cls, s: str) -> bool:
res = s == 'c class'
return res
class CC(Base):
#classmethod
def isit(cls, s: str) -> bool:
res = s == 'cc class'
return res
How to use:
In [4]: from bbb import Base
In [5]: import ccc
In [6]: Base.from_str('b class')
Out[6]: <bbb.B at 0x1adf2665288>
In [7]: Base.from_str('c class')
Out[7]: <ccc.C at 0x1adf266a908>
In [8]: Base.from_str('cc class')
Out[8]: <ccc.CC at 0x1adf2665608>
Here a metaclass implicitly collects Registars Classes in an ENTITIES dict
class DomainMeta(type):
ENTITIES = {}
def __new__(cls, name, bases, attrs):
cls = type.__new__(cls, name, bases, attrs)
try:
entity = attrs['domain']
cls.ENTITIES[entity] = cls
except KeyError:
pass
return cls
class Domain(metaclass=DomainMeta):
#classmethod
def factory(cls, domain):
return DomainMeta.ENTITIES[domain]()
class RegistrarA(Domain):
domain = 'test.com'
def lookup(self):
return 'Custom command for .com TLD'
class RegistrarB(Domain):
domain = 'test.biz'
def lookup(self):
return 'Custom command for .biz TLD'
com = Domain.factory('test.com')
type(com) # <class '__main__.RegistrarA'>
com.lookup() # 'Custom command for .com TLD'
com = Domain.factory('test.biz')
type(com) # <class '__main__.RegistrarB'>
com.lookup() # 'Custom command for .biz TLD'