I created a class 'Stage' and want instantiate it only if argument passed to init(arg)
#example code
class Stage:
def __init__(self, arg):
if type(arg) == str:
#create object
else:
#do not create object
#main:
# entry = input()
obj = Stage(entry)
if obj:
print("created") # if entry is string
else:
print("not created") # if entry is float
Raise an exception:
def __init__(self, arg):
if not isinstance(arg, str):
raise TypeError("Stage.__init__ called with a non-str value: %r" % (arg,))
# continue initializing the object
However, consider whether it the value really needs to be a str, or just something that can be turned into a str:
def __init__(self, arg):
arg = str(arg)
# ...
If you want to avoid creating the instance altogether, you need to override __new__, not __init__ (with some of the previous advice folded in):
class Stage:
def __new__(cls, arg):
try:
arg = str(arg)
except ValueError:
raise TypeError("Could not convert arg to str: %r" % (arg, ))
return super().__new__(cls, arg)
Check for the type of argument before instantiating your object. Also consider using isinstance to check for a type, instead of type
class Stage:
def __init__(self, arg):
pass
if isinstance(str, entry):
obj = Stage(entry)
else:
raise TypeError('A str-type is required as an argument to the constructor')
You cannot initialize an object with that condition, but you can throw an error
class Stage:
def __init__(self, arg):
if not isinstance(arg, str):
raise TypeError("non-str value: %r was passed, str type argument required " % (arg,))
You can also use a classmethod to create an instance only if the passed value is a string:
class Stage:
def __init__(self, val):
self.val = val
#classmethod
def stage(cls, arg):
return None if not isinstance(arg, str) else cls(arg)
s = Stage.stage("name")
Now, s will either be instance of Stage if arg is a string or None if arg is any other type.
Related
I have a class Stuff that has several methods, some of which have some argument, let's call it argument_x. For example:
class Stuff:
def method_1(self, argument_x, **other_args):
pass
def method_2(self, argument_x, **other_args):
pass
def method_3(self, I_dont_have_argument_x):
pass
Now I want to subclass this class wrapping all methods that have argument_x in the same way. For example if I were to proceed by hand I would do:
class StuffWithConstantX(Stuff):
def __init__(self, argument_x_value):
super().__init__()
self._argument_x_value = argument_x_value
def method_1(self, **other_args):
super().method_1(argument_x=self._argument_x_value, **other_args)
def method_2(self, **other_args):
super().method_2(argument_x=self._argument_x_value, **other_args)
As method_3 does not have argument_x I leave it unchanged.
Is it possible to automate this? How?
Here's how you might define this as a wrapper, rather than a subclass:
class Stuff:
def method_1(self, argument_x, **other_args):
print("method 1:", argument_x)
def method_2(self, argument_x, **other_args):
print("method 2:", argument_x)
def method_3(self, i_dont_have_argument_x):
print("method 3:", i_dont_have_argument_x)
class StuffWithConstantX:
def __init__(self, argument_x_value) -> None:
self._stuff = Stuff()
self._argument_x = argument_x_value
def __getattr__(self, __name: str):
attr = getattr(self._stuff, __name)
if not callable(attr):
return attr
def wrapped(*args, **kwargs):
try:
return attr(argument_x=self._argument_x, *args, **kwargs)
except TypeError:
# Beware -- if there's a TypeError raised from attr itself,
# it will get run twice before the caller sees the exception.
# You can potentially work around this by closely inspecting
# either the exception or the attr object itself.
return attr(*args, **kwargs)
return wrapped
stuff = StuffWithConstantX("foo")
stuff.method_1()
stuff.method_2()
stuff.method_3("bar")
method 1: foo
method 2: foo
method 3: bar
As noted in the comments, this code is more or less impossible to statically typecheck, and I would not recommend actually using this pattern unless you have a really good reason.
Here's another way you could do it.
import inspect
import functools
class StuffWithConstantX(Stuff):
def __init__(self, argument_x_value):
super().__init__()
self._argument_x_value = argument_x_value
for func_name, func in inspect.getmembers(Stuff, inspect.isfunction):
arg_names = inspect.getfullargspec(func).args
if 'argument_x' in arg_names:
setattr(self, func_name, functools.partial(func, self=self, argument_x=self._argument_x_value))
Let's imagine that I make use of a library which their source code was written following this shape:
class SuperLibraryDemo:
def __init__(self, test) -> None:
self.test = test
def demo(self) -> str:
return "Returning from demo method with name: %s" % self.test
class LibraryDemo(SuperLibraryDemo):
def __init__(self, test: str = "something") -> None:
super(LibraryDemo, self).__init__(test)
print("At LibraryDemo __init__ method: This should have been skipped")
def demo(self) -> None:
super().demo()
Remember that is a library. I not supposed to tweak it source code for my needs.
However I need switch the inner code which __init__ method calls within the LibraryDemo for reasons beyond the scope of this question.
With this goal in mind, I decide to code a CustomLibraryDemo with the help of a metaclass, as this:
class MetaDemo(type):
def __new__(mcs, class_name: str, bases: Tuple[Type, ...], class_dict: Dict[str, Any]):
basis = bases[0]
c_attrs = dict(basis.__dict__)
prior_c_process_bases = basis.__base__
c_attrs["__init__"] = lambda self, settings: prior_c_process_bases.__init__(self, settings)
new_bases = types.new_class(basis.__qualname__, basis.__bases__,
exec_body=lambda np: MetaDemo.populate_class_dict(np, c_attrs))
return super(MetaDemo, mcs).__new__(mcs, class_name, (new_bases,), class_dict)
#staticmethod
def populate_class_dict(namespace: Dict[str, Any], attr: Dict[str, Any]) -> None:
for key, value in attr.items():
namespace[key] = value
class CustomLibraryDemo(LibraryDemo, metaclass=MetaDemo):
def __init__(self, test: Optional[str] = None) -> None:
super(CustomLibraryDemo, self).__init__(test)
print("At CustomDemo __init__ method: This message should appear")
def test(self) -> None:
print("In test method at CustomLibraryDemo class: %s" % self.test)
Though this approach at the first sight seem to work for me, I get an error when I call CustomLibraryDemo().demo() saying:
TypeError: super(type, obj): obj must be an instance or subtype of type
Why that?
You may not need a custom metaclass; instead, just adjust the arguments to super.
class CustomLibraryDemo(LibraryDemo):
def __init__(self, test: Optional[str] = None) -> None:
super(LibraryDemo, self).__init__(test)
print("At CustomDemo __init__ method: This message should appear")
def test(self) -> None:
print("In test method at CustomLibraryDemo class: %s" % self.test)
Using LibraryDemo instead of CustomerLibraryDemo causes super to start further along the MRO when deciding which class to use next.
% python3 tmp.py
At CustomDemo __init__ method: This message should appear
This question solves my issue.
For my case, changing basis.__bases__ for bases argument on __new__ method signature fix the issue.
This way, the syntax on new_bases variable turns out to be:
new_bases = types.new_class(basis.__qualname__, bases,
exec_body=lambda np: MetaDemo.populate_class_dict(np, c_attrs))
By the way, for this code can be simplified to:
new_bases = type(basis.__qualname__, bases, c_attrs)
Dropping the populate_class_dict metaclass method.
I'm trying to understand __new__ and I'm fairly new to OOP python. I have this code...
import csv
from csv import DictReader
import logging
import typing as T
class MaxLength(str):
def __new__(cls, field: str):
# how do i pass this dynamically in the PCW obj
maximum = 4
if len(field) > maximum:
raise ValueError('invalid length of fields at: ' + field)
return super().__new__(cls, field)
class PCW(T.NamedTuple):
length_checker: MaxLength
#classmethod
def from_row(cls, row: dict):
return cls(**{key: type_(row[key]) for key, type_ in cls._field_types.items()})
def validate_csv(reader: DictReader) -> bool:
for row in reader:
try:
PCW.from_row(row)
except Exception as e:
logging.error('type: {} msg: {}'.format(type(e), e))
return False
return True
input_file = validate_csv(csv.DictReader(open("test.csv")))
This works but I want to be able to pass maximum in as an argument so as this will change. ie:
class PCW(T.NamedTuple):
length_checker: MaxLength(maximum=4)
...
input_file = validate_csv(csv.DictReader(open("test.csv")))
I think I have gone down the rabbit hole. Is this possible or what am I overlooking/misunderstanding?
One way to do it would be to use the object.__init_subclass__() classmethod that was added in Python 3.6. Making use of it requires subclassing your MaxLength subclass.
Here's what I mean:
import csv
from csv import DictReader
import logging
import typing as T
class MaxLength(str):
maximum = 8
#classmethod
def __init_subclass__(cls, **kwargs):
maximum = kwargs.pop('maximum', cls.maximum)
super().__init_subclass__(**kwargs)
cls.maximum = maximum
def __new__(cls, field: str):
if len(field) > cls.maximum:
raise ValueError('invalid length of fields at: ' + field)
return super().__new__(cls, field)
class PCW(T.NamedTuple):
# class PCWMaxLength(MaxLength):
class PCWMaxLength(MaxLength, maximum=4): # Override default maximum.
pass
length_checker: PCWMaxLength
#classmethod
def from_row(cls, row: dict):
return cls(**{key: type_(row[key]) for key, type_ in cls._field_types.items()})
# Display value assigned to nested class' constant.
print(f'PCWMaxLength.maximum: {PCWMaxLength.maximum}') # -> PCWMaxLength.maximum: 4
def validate_csv(reader: DictReader) -> bool:
for row in reader:
try:
PCW.from_row(row)
except Exception as e:
logging.error('type: {} msg: {}'.format(type(e), e))
return False
return True
Otherwise, I think you will need to do some actual metaclass programming…
You could make it a default argument:
class MaxLength(str):
def __new__(cls, field: str, maximum: int=4):
if len(field) > maximum:
raise ValueError('invalid length of fields at: ' + field)
return super().__new__(cls, field)
I am trying to expose the classes dictionary making it both and subscriptable and be able to iterate through the dict values. Here is the class :
class ExampleClass():
def __init__(self, *args, **kwargs):
for key, value in self.kwargs.items():
setattr(self, key, value)
for arg in args:
setattr(self, arg, arg) if isinstance(arg, str) else setattr(self, str(arg), arg)
def __str__(self):
return 'This is the example class'
def __getitem__(self, obj):
return self.__dict__[obj]
def __len__(self):
return len(self.__dict__.items())
If we create an instance and pass in these values :
cls = ExampleClass(123456,'cash', name='newexample', id=1)
This will store all of the args and kwargs as instance attributes, and using the syntax cls['id'] will return 1 as expected. But when I use the syntax for i in cls: print(i) I get a KeyError : KeyError : 0
How can I make this object's dict both subscriptable and iterable ?
You need to implement the __iter__ method.
class ExampleClass():
def __init__(self, *args, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
for arg in args:
setattr(self, arg, arg) if isinstance(arg, str) else setattr(self, str(arg), arg)
def __str__(self):
return 'This is the example class'
def __getitem__(self, obj):
return self.__dict__[obj]
def __len__(self):
return len(self.__dict__.items())
def __iter__(self):
return iter(self.__dict__)
cls = ExampleClass(123456,'cash', name='newexample', id=1)
print(cls['cash'])
print(cls['name'])
for i in cls: print(i)
This is the method which is called to create an iterator for your type so that it can be iterated. Your underlying dict already implements it, so you're sort of just proxying it here.
To make a class subscriptable, it must contain dunder getitem(), it may or may not contain dunder iter().
At the same time, an iterable must contain iter().
To check if your class has the required method, perform print(dir(your_class)), and look for the respective dunder function.
If you don't have one, create it.
I have class container that transmute itself into another class.
For example I have some types such as MyFloat MyStr or MyInt that offer additional methods or attributes. I would like to encapsulate the decision to build any of these types into another class:
My first attempt was to write this:
class MyFloat(float):
def foo_float():
pass
class MyStr(str):
def foo_str():
pass
class MyInt(int):
def foo_int():
pass
# Does not work
class Polymorph(object):
def __init__(self, value):
if isinstance(value, float):
self = MyFloat(value)
elif isinstance(value, int):
self = MyInt(value)
elif isinstance(value, str):
self = MyStr(value)
else:
raise TypeError, 'Unknown type'
Unfortunately I did not get the expected instance at the end:
>>> a = Polymorph(42.42) # Should get an instance of MyFloat
>>> type(a)
__main.MyFloat
I then tried to use __new__ instead
class Polymorph(object):
def __new__(cls, value):
if isinstance(value, float):
return super(MyFloat, cls).__new__(cls, value)
elif isinstance(value, int):
return super(MyInt, cls).__new__(cls, value)
elif isinstance(value, str):
return super(MyStr, cls).__new__(cls, value)
else:
raise TypeError, 'Unknown type'
But this time I get a TypeError: super(type, obj): obj must be an instance or subtype of type
Is it possible to achieve this?
So I found this solution that works. However, I don't know is it is Pythonic Acceptable to do this.
class Polymorph(object):
def __new__(cls, value):
if isinstance(value, float):
return MyFloat(value)
elif isinstance(value, int):
return MyInt(value)
elif isinstance(value, str):
return MyStr(value)
else:
raise TypeError, 'Unknown type'