Related
Let's say I have a python function whose single argument is a non-trivial type:
from typing import List, Dict
ArgType = List[Dict[str, int]] # this could be any non-trivial type
def myfun(a: ArgType) -> None:
...
... and then I have a data structure that I have unpacked from a JSON source:
import json
data = json.loads(...)
My question is: How can I check at runtime that data has the correct type to be used as an argument to myfun() before using it as an argument for myfun()?
if not isCorrectType(data, ArgType):
raise TypeError("data is not correct type")
else:
myfun(data)
Validating a type annotation is a non-trivial task. Python does not do it automatically, and writing your own validator is difficult because the typing module doesn't offer much of a useful interface. (In fact the internals of the typing module have changed so much since its introduction in python 3.5 that it's honestly a nightmare to work with.)
Here's a type validator function taken from one of my personal projects (wall of code warning):
import inspect
import typing
__all__ = ['is_instance', 'is_subtype', 'python_type', 'is_generic', 'is_base_generic', 'is_qualified_generic']
if hasattr(typing, '_GenericAlias'):
# python 3.7
def _is_generic(cls):
if isinstance(cls, typing._GenericAlias):
return True
if isinstance(cls, typing._SpecialForm):
return cls not in {typing.Any}
return False
def _is_base_generic(cls):
if isinstance(cls, typing._GenericAlias):
if cls.__origin__ in {typing.Generic, typing._Protocol}:
return False
if isinstance(cls, typing._VariadicGenericAlias):
return True
return len(cls.__parameters__) > 0
if isinstance(cls, typing._SpecialForm):
return cls._name in {'ClassVar', 'Union', 'Optional'}
return False
def _get_base_generic(cls):
# subclasses of Generic will have their _name set to None, but
# their __origin__ will point to the base generic
if cls._name is None:
return cls.__origin__
else:
return getattr(typing, cls._name)
def _get_python_type(cls):
"""
Like `python_type`, but only works with `typing` classes.
"""
return cls.__origin__
def _get_name(cls):
return cls._name
else:
# python <3.7
if hasattr(typing, '_Union'):
# python 3.6
def _is_generic(cls):
if isinstance(cls, (typing.GenericMeta, typing._Union, typing._Optional, typing._ClassVar)):
return True
return False
def _is_base_generic(cls):
if isinstance(cls, (typing.GenericMeta, typing._Union)):
return cls.__args__ in {None, ()}
if isinstance(cls, typing._Optional):
return True
return False
else:
# python 3.5
def _is_generic(cls):
if isinstance(cls, (typing.GenericMeta, typing.UnionMeta, typing.OptionalMeta, typing.CallableMeta, typing.TupleMeta)):
return True
return False
def _is_base_generic(cls):
if isinstance(cls, typing.GenericMeta):
return all(isinstance(arg, typing.TypeVar) for arg in cls.__parameters__)
if isinstance(cls, typing.UnionMeta):
return cls.__union_params__ is None
if isinstance(cls, typing.TupleMeta):
return cls.__tuple_params__ is None
if isinstance(cls, typing.CallableMeta):
return cls.__args__ is None
if isinstance(cls, typing.OptionalMeta):
return True
return False
def _get_base_generic(cls):
try:
return cls.__origin__
except AttributeError:
pass
name = type(cls).__name__
if not name.endswith('Meta'):
raise NotImplementedError("Cannot determine base of {}".format(cls))
name = name[:-4]
return getattr(typing, name)
def _get_python_type(cls):
"""
Like `python_type`, but only works with `typing` classes.
"""
# Many classes actually reference their corresponding abstract base class from the abc module
# instead of their builtin variant (i.e. typing.List references MutableSequence instead of list).
# We're interested in the builtin class (if any), so we'll traverse the MRO and look for it there.
for typ in cls.mro():
if typ.__module__ == 'builtins' and typ is not object:
return typ
try:
return cls.__extra__
except AttributeError:
pass
if is_qualified_generic(cls):
cls = get_base_generic(cls)
if cls is typing.Tuple:
return tuple
raise NotImplementedError("Cannot determine python type of {}".format(cls))
def _get_name(cls):
try:
return cls.__name__
except AttributeError:
return type(cls).__name__[1:]
if hasattr(typing.List, '__args__'):
# python 3.6+
def _get_subtypes(cls):
subtypes = cls.__args__
if get_base_generic(cls) is typing.Callable:
if len(subtypes) != 2 or subtypes[0] is not ...:
subtypes = (subtypes[:-1], subtypes[-1])
return subtypes
else:
# python 3.5
def _get_subtypes(cls):
if isinstance(cls, typing.CallableMeta):
if cls.__args__ is None:
return ()
return cls.__args__, cls.__result__
for name in ['__parameters__', '__union_params__', '__tuple_params__']:
try:
subtypes = getattr(cls, name)
break
except AttributeError:
pass
else:
raise NotImplementedError("Cannot extract subtypes from {}".format(cls))
subtypes = [typ for typ in subtypes if not isinstance(typ, typing.TypeVar)]
return subtypes
def is_generic(cls):
"""
Detects any kind of generic, for example `List` or `List[int]`. This includes "special" types like
Union and Tuple - anything that's subscriptable, basically.
"""
return _is_generic(cls)
def is_base_generic(cls):
"""
Detects generic base classes, for example `List` (but not `List[int]`)
"""
return _is_base_generic(cls)
def is_qualified_generic(cls):
"""
Detects generics with arguments, for example `List[int]` (but not `List`)
"""
return is_generic(cls) and not is_base_generic(cls)
def get_base_generic(cls):
if not is_qualified_generic(cls):
raise TypeError('{} is not a qualified Generic and thus has no base'.format(cls))
return _get_base_generic(cls)
def get_subtypes(cls):
return _get_subtypes(cls)
def _instancecheck_iterable(iterable, type_args):
if len(type_args) != 1:
raise TypeError("Generic iterables must have exactly 1 type argument; found {}".format(type_args))
type_ = type_args[0]
return all(is_instance(val, type_) for val in iterable)
def _instancecheck_mapping(mapping, type_args):
return _instancecheck_itemsview(mapping.items(), type_args)
def _instancecheck_itemsview(itemsview, type_args):
if len(type_args) != 2:
raise TypeError("Generic mappings must have exactly 2 type arguments; found {}".format(type_args))
key_type, value_type = type_args
return all(is_instance(key, key_type) and is_instance(val, value_type) for key, val in itemsview)
def _instancecheck_tuple(tup, type_args):
if len(tup) != len(type_args):
return False
return all(is_instance(val, type_) for val, type_ in zip(tup, type_args))
_ORIGIN_TYPE_CHECKERS = {}
for class_path, check_func in {
# iterables
'typing.Container': _instancecheck_iterable,
'typing.Collection': _instancecheck_iterable,
'typing.AbstractSet': _instancecheck_iterable,
'typing.MutableSet': _instancecheck_iterable,
'typing.Sequence': _instancecheck_iterable,
'typing.MutableSequence': _instancecheck_iterable,
'typing.ByteString': _instancecheck_iterable,
'typing.Deque': _instancecheck_iterable,
'typing.List': _instancecheck_iterable,
'typing.Set': _instancecheck_iterable,
'typing.FrozenSet': _instancecheck_iterable,
'typing.KeysView': _instancecheck_iterable,
'typing.ValuesView': _instancecheck_iterable,
'typing.AsyncIterable': _instancecheck_iterable,
# mappings
'typing.Mapping': _instancecheck_mapping,
'typing.MutableMapping': _instancecheck_mapping,
'typing.MappingView': _instancecheck_mapping,
'typing.ItemsView': _instancecheck_itemsview,
'typing.Dict': _instancecheck_mapping,
'typing.DefaultDict': _instancecheck_mapping,
'typing.Counter': _instancecheck_mapping,
'typing.ChainMap': _instancecheck_mapping,
# other
'typing.Tuple': _instancecheck_tuple,
}.items():
try:
cls = eval(class_path)
except AttributeError:
continue
_ORIGIN_TYPE_CHECKERS[cls] = check_func
def _instancecheck_callable(value, type_):
if not callable(value):
return False
if is_base_generic(type_):
return True
param_types, ret_type = get_subtypes(type_)
sig = inspect.signature(value)
missing_annotations = []
if param_types is not ...:
if len(param_types) != len(sig.parameters):
return False
# FIXME: add support for TypeVars
# if any of the existing annotations don't match the type, we'll return False.
# Then, if any annotations are missing, we'll throw an exception.
for param, expected_type in zip(sig.parameters.values(), param_types):
param_type = param.annotation
if param_type is inspect.Parameter.empty:
missing_annotations.append(param)
continue
if not is_subtype(param_type, expected_type):
return False
if sig.return_annotation is inspect.Signature.empty:
missing_annotations.append('return')
else:
if not is_subtype(sig.return_annotation, ret_type):
return False
if missing_annotations:
raise ValueError("Missing annotations: {}".format(missing_annotations))
return True
def _instancecheck_union(value, type_):
types = get_subtypes(type_)
return any(is_instance(value, typ) for typ in types)
def _instancecheck_type(value, type_):
# if it's not a class, return False
if not isinstance(value, type):
return False
if is_base_generic(type_):
return True
type_args = get_subtypes(type_)
if len(type_args) != 1:
raise TypeError("Type must have exactly 1 type argument; found {}".format(type_args))
return is_subtype(value, type_args[0])
_SPECIAL_INSTANCE_CHECKERS = {
'Union': _instancecheck_union,
'Callable': _instancecheck_callable,
'Type': _instancecheck_type,
'Any': lambda v, t: True,
}
def is_instance(obj, type_):
if type_.__module__ == 'typing':
if is_qualified_generic(type_):
base_generic = get_base_generic(type_)
else:
base_generic = type_
name = _get_name(base_generic)
try:
validator = _SPECIAL_INSTANCE_CHECKERS[name]
except KeyError:
pass
else:
return validator(obj, type_)
if is_base_generic(type_):
python_type = _get_python_type(type_)
return isinstance(obj, python_type)
if is_qualified_generic(type_):
python_type = _get_python_type(type_)
if not isinstance(obj, python_type):
return False
base = get_base_generic(type_)
try:
validator = _ORIGIN_TYPE_CHECKERS[base]
except KeyError:
raise NotImplementedError("Cannot perform isinstance check for type {}".format(type_))
type_args = get_subtypes(type_)
return validator(obj, type_args)
return isinstance(obj, type_)
def is_subtype(sub_type, super_type):
if not is_generic(sub_type):
python_super = python_type(super_type)
return issubclass(sub_type, python_super)
# at this point we know `sub_type` is a generic
python_sub = python_type(sub_type)
python_super = python_type(super_type)
if not issubclass(python_sub, python_super):
return False
# at this point we know that `sub_type`'s base type is a subtype of `super_type`'s base type.
# If `super_type` isn't qualified, then there's nothing more to do.
if not is_generic(super_type) or is_base_generic(super_type):
return True
# at this point we know that `super_type` is a qualified generic... so if `sub_type` isn't
# qualified, it can't be a subtype.
if is_base_generic(sub_type):
return False
# at this point we know that both types are qualified generics, so we just have to
# compare their sub-types.
sub_args = get_subtypes(sub_type)
super_args = get_subtypes(super_type)
return all(is_subtype(sub_arg, super_arg) for sub_arg, super_arg in zip(sub_args, super_args))
def python_type(annotation):
"""
Given a type annotation or a class as input, returns the corresponding python class.
Examples:
::
>>> python_type(typing.Dict)
<class 'dict'>
>>> python_type(typing.List[int])
<class 'list'>
>>> python_type(int)
<class 'int'>
"""
try:
mro = annotation.mro()
except AttributeError:
# if it doesn't have an mro method, it must be a weird typing object
return _get_python_type(annotation)
if Type in mro:
return annotation.python_type
elif annotation.__module__ == 'typing':
return _get_python_type(annotation)
else:
return annotation
Demonstration:
>>> is_instance([{'x': 3}], List[Dict[str, int]])
True
>>> is_instance([{'x': 3}, {'y': 7.5}], List[Dict[str, int]])
False
(As far as I'm aware, this supports all python versions, even the ones <3.5 using the typing module backport.)
It's awkward that there's no built-in function for this but typeguard comes with a convenient check_type() function:
>>> from typeguard import check_type
>>> from typing import List
>>> check_type("foo", [1,2,"3"], List[int])
Traceback (most recent call last):
...
TypeError: type of foo[2] must be int; got str instead
type of foo[2] must be int; got str instead
For more see: https://typeguard.readthedocs.io/en/latest/api.html#typeguard.check_type
First of all, even though I think you are aware but rather for the sake of completeness, the typing library contains types for type hints. These type hints are used by IDE's to check if your code is somewhat sane, and also serves as documentation what types a developer expects.
To check whether a variable is a type of something, we have to use the isinstance function. Amazingly, we can use direct types of the typing library function, eg.
from typing import List
value = []
isinstance(value, List)
However, for nested structures such as List[Dict[str, int]] we cannot use this directly, because you funny enough get a TypeError. What you have to do is:
Check if the initial value is a list
Check if each item of the list is of type dict
Check if each key of each dict is in fact a string and if each value is in fact an int
Unfortunately, for strict checking python is a bit cumbersome. However, do be aware that python makes use of duck typing: if it is like a duck and behaves like a duck, then it definitely is a duck.
The common way to handle this is by making use of the fact that if whatever object you pass to myfun doesn't have the required functionality a corresponding exception will be raised (usually TypeError or AttributeError). So you would do the following:
try:
myfun(data)
except (TypeError, AttributeError) as err:
# Fallback for invalid types here.
You indicate in your question that you would raise a TypeError if the passed object does not have the appropriate structure but Python does this already for you. The critical question is how you would handle this case. You could also move the try / except block into myfun, if appropriate. When it comes to typing in Python you usually rely on duck typing: if the object has the required functionality then you don't care much about what type it is, as long as it serves the purpose.
Consider the following example. We just pass the data into the function and then get the AttributeError for free (which we can then except); no need for manual type checking:
>>> def myfun(data):
... for x in data:
... print(x.items())
...
>>> data = json.loads('[[["a", 1], ["b", 2]], [["c", 3], ["d", 4]]]')
>>> myfun(data)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 3, in myfun
AttributeError: 'list' object has no attribute 'items'
In case you are concerned about the usefulness of the resulting error, you could still except and then re-raise a custom exception (or even change the exception's message):
try:
myfun(data)
except (TypeError, AttributeError) as err:
raise TypeError('Data has incorrect structure') from err
try:
myfun(data)
except (TypeError, AttributeError) as err:
err.args = ('Data has incorrect structure',)
raise
When using third-party code one should always check the documentation for exceptions that will be raised. For example numpy.inner reports that it will raise a ValueError under certain circumstances. When using that function we don't need to perform any checks ourselves but rely on the fact that it will raise the error if needed. When using third-party code for which it is not clear how it will behave in some corner-cases, i.m.o. it is easier and clearer to just hardcode a corresponding type checker (see below) instead of using a generic solution that works for any type. These cases should be rare anyway and leaving a corresponding comment makes your fellow developers aware of the situation.
The typing library is for type-hinting and as such it won't be checking the types at runtime. Sure you could do this manually but it is rather cumbersome:
def type_checker(data):
return (
isinstance(data, list)
and all(isinstance(x, dict) for x in list)
and all(isinstance(k, str) and isinstance(v, int) for x in list for k, v in x.items())
)
This together with an appropriate comment is still an acceptable solution and it is reusable where a similar data structure is expected. The intent is clear and the code is easily verifiable.
You would have to check your nested type structure manually - the type hint's are not enforced.
Checking like this ist best done using ABC (Abstract Meta Classes) - so users can provide their derived classes that support the same accessing as default dict/lists:
import collections.abc
def isCorrectType(data):
if isinstance(data, collections.abc.Collection):
for d in data:
if isinstance(d,collections.abc.MutableMapping):
for key in d:
if isinstance(key,str) and isinstance(d[key],int):
pass
else:
return False
else:
return False
else:
return False
return True
Output:
print ( isCorrectType( [ {"a":2} ] )) # True
print ( isCorrectType( [ {2:2} ] )) # False
print ( isCorrectType( [ {"a":"a"} ] )) # False
print ( isCorrectType( [ {"a":2},1 ] )) # False
Doku:
ABC - abstract meta classes
Related:
What is duck typing?
The other way round would be to follow the "Ask forgiveness not permission" - explain paradigm and simyply use your data in the form you want and try:/except: around if if it does not conform to what you wanted. This fits better with What is duck typing? - and allows (similar to ABC-checking) the consumer to provide you with derived classes from list/dict while it still will work...
If all you want to do is json-parsing, you should just use pydantic.
But, I encountered the same problem where I wanted to check the type of python objects, so I created a simpler solution than in other answers that handles at least complex types with nested lists and dictionaries.
I created a gist with this method at https://gist.github.com/ramraj07/f537bf9f80b4133c65dd76c958d4c461
Some example uses of this method include:
from typing import List, Dict, Union, Type, Optional
check_type('a', str)
check_type({'a': 1}, Dict[str, int])
check_type([{'a': [1.0]}, 'ten'], List[Union[Dict[str, List[float]], str]])
check_type(None, Optional[str])
check_type('abc', Optional[str])
Here's the code below for reference:
import typing
def check_type(obj: typing.Any, type_to_check: typing.Any, _external=True) -> None:
try:
if not hasattr(type_to_check, "_name"):
# base-case
if not isinstance(obj, type_to_check):
raise TypeError
return
# type_to_check is from typing library
type_name = type_to_check._name
if type_to_check is typing.Any:
pass
elif type_name in ("List", "Tuple"):
if (type_name == "List" and not isinstance(obj, list)) or (
type_name == "Tuple" and not isinstance(obj, tuple)
):
raise TypeError
element_type = type_to_check.__args__[0]
for element in obj:
check_type(element, element_type, _external=False)
elif type_name == "Dict":
if not isinstance(obj, dict):
raise TypeError
if len(type_to_check.__args__) != 2:
raise NotImplementedError(
"check_type can only accept Dict typing with separate annotations for key and values"
)
key_type, value_type = type_to_check.__args__
for key, value in obj.items():
check_type(key, key_type, _external=False)
check_type(value, value_type, _external=False)
elif type_name is None and type_to_check.__origin__ is typing.Union:
type_options = type_to_check.__args__
no_option_matched = True
for type_option in type_options:
try:
check_type(obj, type_option, _external=False)
no_option_matched = False
break
except TypeError:
pass
if no_option_matched:
raise TypeError
else:
raise NotImplementedError(
f"check_type method currently does not support checking typing of form '{type_name}'"
)
except TypeError:
if _external:
raise TypeError(
f"Object {repr(obj)} is of type {_construct_type_description(obj)} "
f"when {type_to_check} was expected"
)
raise TypeError()
def _construct_type_description(obj) -> str:
def get_types_in_iterable(iterable) -> str:
types = {_construct_type_description(element) for element in iterable}
return types.pop() if len(types) == 1 else f"Union[{','.join(types)}]"
if isinstance(obj, list):
return f"List[{get_types_in_iterable(obj)}]"
elif isinstance(obj, dict):
key_types = get_types_in_iterable(obj.keys())
val_types = get_types_in_iterable(obj.values())
return f"Dict[{key_types}, {val_types}]"
else:
return type(obj).__name__
I've set up a metaclass and base class pair for creating the line specifications of several different file types I have to parse.
I have decided to go with using enumerations because many of the individual parts of the different lines in the same file often have the same name. Enums make it easy to tell them apart. Additionally, the specification is rigid and there will be no need to add more members, or extend the line specifications later.
The specification classes work as expected. However, I am having some trouble dynamically creating them:
>>> C1 = LineMakerMeta('C1', (LineMakerBase,), dict(a = 0))
AttributeError: 'dict' object has no attribute '_member_names'
Is there a way around this? The example below works just fine:
class A1(LineMakerBase):
Mode = 0, dict(fill=' ', align='>', type='s')
Level = 8, dict(fill=' ', align='>', type='d')
Method = 10, dict(fill=' ', align='>', type='d')
_dummy = 20 # so that Method has a known length
A1.format(**dict(Mode='DESIGN', Level=3, Method=1))
# produces ' DESIGN 3 1'
The metaclass is based on enum.EnumMeta, and looks like this:
import enum
class LineMakerMeta(enum.EnumMeta):
"Metaclass to produce formattable LineMaker child classes."
def _iter_format(cls):
"Iteratively generate formatters for the class members."
for member in cls:
yield member.formatter
def __str__(cls):
"Returns string line with all default values."
return cls.format()
def format(cls, **kwargs):
"Create formatted version of the line populated by the kwargs members."
# build resulting string by iterating through members
result = ''
for member in cls:
# determine value to be injected into member
try:
try:
value = kwargs[member]
except KeyError:
value = kwargs[member.name]
except KeyError:
value = member.default
value_str = member.populate(value)
result = result + value_str
return result
And the base class is as follows:
class LineMakerBase(enum.Enum, metaclass=LineMakerMeta):
"""A base class for creating Enum subclasses used for populating lines of a file.
Usage:
class LineMaker(LineMakerBase):
a = 0, dict(align='>', fill=' ', type='f'), 3.14
b = 10, dict(align='>', fill=' ', type='d'), 1
b = 15, dict(align='>', fill=' ', type='s'), 'foo'
# ^-start ^---spec dictionary ^--default
"""
def __init__(member, start, spec={}, default=None):
member.start = start
member.spec = spec
if default is not None:
member.default = default
else:
# assume value is numerical for all provided types other than 's' (string)
default_or_set_type = member.spec.get('type','s')
default = {'s': ''}.get(default_or_set_type, 0)
member.default = default
#property
def formatter(member):
"""Produces a formatter in form of '{0:<format>}' based on the member.spec
dictionary. The member.spec dictionary makes use of these keys ONLY (see
the string.format docs):
fill align sign width grouping_option precision type"""
try:
# get cached value
return '{{0:{}}}'.format(member._formatter)
except AttributeError:
# add width to format spec if not there
member.spec.setdefault('width', member.length if member.length != 0 else '')
# build formatter using the available parts in the member.spec dictionary
# any missing parts will simply not be present in the formatter
formatter = ''
for part in 'fill align sign width grouping_option precision type'.split():
try:
spec_value = member.spec[part]
except KeyError:
# missing part
continue
else:
# add part
sub_formatter = '{!s}'.format(spec_value)
formatter = formatter + sub_formatter
member._formatter = formatter
return '{{0:{}}}'.format(formatter)
def populate(member, value=None):
"Injects the value into the member's formatter and returns the formatted string."
formatter = member.formatter
if value is not None:
value_str = formatter.format(value)
else:
value_str = formatter.format(member.default)
if len(value_str) > len(member) and len(member) != 0:
raise ValueError(
'Length of object string {} ({}) exceeds available'
' field length for {} ({}).'
.format(value_str, len(value_str), member.name, len(member)))
return value_str
#property
def length(member):
return len(member)
def __len__(member):
"""Returns the length of the member field. The last member has no length.
Length are based on simple subtraction of starting positions."""
# get cached value
try:
return member._length
# calculate member length
except AttributeError:
# compare by member values because member could be an alias
members = list(type(member))
try:
next_index = next(
i+1
for i,m in enumerate(type(member))
if m.value == member.value
)
except StopIteration:
raise TypeError(
'The member value {} was not located in the {}.'
.format(member.value, type(member).__name__)
)
try:
next_member = members[next_index]
except IndexError:
# last member defaults to no length
length = 0
else:
length = next_member.start - member.start
member._length = length
return length
This line:
C1 = enum.EnumMeta('C1', (), dict(a = 0))
fails with exactly the same error message. The __new__ method of EnumMeta expects an instance of enum._EnumDict as its last argument. _EnumDict is a subclass of dict and provides an instance variable named _member_names, which of course a regular dict doesn't have. When you go through the standard mechanism of enum creation, this all happens correctly behind the scenes. That's why your other example works just fine.
This line:
C1 = enum.EnumMeta('C1', (), enum._EnumDict())
runs with no error. Unfortunately, the constructor of _EnumDict is defined as taking no arguments, so you can't initialize it with keywords as you apparently want to do.
In the implementation of enum that's backported to Python3.3, the following block of code appears in the constructor of EnumMeta. You could do something similar in your LineMakerMeta class:
def __new__(metacls, cls, bases, classdict):
if type(classdict) is dict:
original_dict = classdict
classdict = _EnumDict()
for k, v in original_dict.items():
classdict[k] = v
In the official implementation, in Python3.5, the if statement and the subsequent block of code is gone for some reason. Therefore classdict must be an honest-to-god _EnumDict, and I don't see why this was done. In any case the implementation of Enum is extremely complicated and handles a lot of corner cases.
I realize this is not a cut-and-dried answer to your question but I hope it will point you to a solution.
Create your LineMakerBase class, and then use it like so:
C1 = LineMakerBase('C1', dict(a=0))
The metaclass was not meant to be used the way you are trying to use it. Check out this answer for advice on when metaclass subclasses are needed.
Some suggestions for your code:
the double try/except in format seems clearer as:
for member in cls:
if member in kwargs:
value = kwargs[member]
elif member.name in kwargs:
value = kwargs[member.name]
else:
value = member.default
this code:
# compare by member values because member could be an alias
members = list(type(member))
would be clearer with list(member.__class__)
has a false comment: listing an Enum class will never include the aliases (unless you have overridden that part of EnumMeta)
instead of the complicated __len__ code you have now, and as long as you are subclassing EnumMeta you should extend __new__ to automatically calculate the lengths once:
# untested
def __new__(metacls, cls, bases, clsdict):
# let the main EnumMeta code do the heavy lifting
enum_cls = super(LineMakerMeta, metacls).__new__(cls, bases, clsdict)
# go through the members and calculate the lengths
canonical_members = [
member
for name, member in enum_cls.__members__.items()
if name == member.name
]
last_member = None
for next_member in canonical_members:
next_member.length = 0
if last_member is not None:
last_member.length = next_member.start - last_member.start
The simplest way to create Enum subclasses on the fly is using Enum itself:
>>> from enum import Enum
>>> MyEnum = Enum('MyEnum', {'a': 0})
>>> MyEnum
<enum 'MyEnum'>
>>> MyEnum.a
<MyEnum.a: 0>
>>> type(MyEnum)
<class 'enum.EnumMeta'>
As for your custom methods, it might be simpler if you used regular functions, precisely because Enum implementation is so special.
def pnamedtuple(type_name, field_names, mutable=False):
pass
class type_name:
def __init__(self, x, y):
self.x = x
self.y = y
self._fields = ['x','y']
self._mutable = False
def get_x(self):
return self.x
def get_y(self):
return self.y
def __getitem__(self,i):
if i > 1 or i <0:
raise IndexError
if i == 0 or i == 'x':
return self.get_x():
if i == 1 or i == 'y':
return self.get_y():
the getitem method to overload the [] (indexing operator) for this class: an index of 0 returns the value of the first field name in the field_names list; an index of 1 returns the value of the second field name in the field_names list, etc. Also, the index can be a string with the named field. So, for p = Point(1,2) writing p.get_x(), or p[0]), or p['x'] returns a result of 1. Raise an IndexError with an appropriate message if the index is out of bounds int or a string that does not name a field.
I am not sure how to fix the getitme function. below is the bsc.txt
c-->t1 = Triple1(1,2,3)
c-->t2 = Triple2(1,2,3)
c-->t3 = Triple3(1,2,3)
# Test __getitem__ functions
e-->t1[0]-->1
e-->t1[1]-->2
e-->t1[2]-->3
e-->t1['a']-->1
e-->t1['b']-->2
e-->t1['c']-->3
^-->t1[4]-->IndexError
^-->t1['d']-->IndexError
^-->t1[3.2]-->IndexError
can someone tell how to fix my _getitem _ function to get the output in bsc.txt? many thanks.
You've spelled __getitem__ incorrectly. Magic methods require two __ underscores before and after them.
So you haven't overloaded the original __getitem__ method, you've simply created a new method named _getitem_.
Python 3 does not allow strings and integers to be compared with > or <; it's best to stick with == if you don't yet know the type of i. You could use isinstance, but here you can easily convert the only two valid integer values to strings (or vice versa), then work only on strings.
def __getitem__(self, i):
if i == 0:
i = "x"
elif i == 1:
i = "y"
if i == "x":
return self.get_x()
elif i == "y":
return self.get_y()
else:
raise IndexError("Invalid key: {}".format(i))
your function is interesting, but there are some issues with it:
In python 3 you can't compare string with numbers, so you first should check with == against know values and or types. For example
def __getitem__(self,i):
if i in {0,"x"}:
return self.x
elif i in {1,"y"}:
return self.y
else:
raise IndexError(repr(i))
But defined like that (in your code or in the example above) for an instance t1 this t1[X] for all string X others than "x" or "y" will always fail as you don't adjust it for any other value. And that is because
pnamedtuple looks like you want for it to be a factory like collections.namedtuple, but it fail to be general enough because you don't use any the arguments of your function at all. And no, type_name is not used either, whatever value it have is throw away when you make the class declaration.
how to fix it?
You need other ways to store the value of the fields and its respective name, for example a dictionary lets call it self._data
To remember how you called yours field, use the argument of your function, for instance self._fields = field_names
To accept a unknown number of arguments use * like __init__(self, *values) then verify that you have the same numbers of values and fields and build your data structure of point 1 (the dictionary)
Once that those are ready then __getitem__ become something like:
def __getitem__(self, key):
if key in self._data:
return self._data[key]
elif isintance(key,int) and 0 <= key < len(self._fields):
return self._data[ self._fields[key] ]
else:
raise IndexError( repr(key) )
or you can simple inherit from a appropriate namedtuple and the only thing you need to do is overwrite its __getitem__ like
def __getitem__(self,key):
if key in self._fields:
return getattr(self,key)
return super().__getitem__(key)
I'm using a namedtuple to hold sets of strings and their corresponding values.
I'm not using a dictionary, because I want the strings accessible as attributes.
Here's my code:
from collections import namedtuple
# Shortened for readability :-)
strings = namedtuple("strings", ['a0', 'a1', 'a2', ..., 'a400'])
my_strings = strings(value0, value1, value2, ..., value400)
Ideally, once my_strings is initialized, I should be able to do this:
print(my_strings.a1)
and get value1 printed back.
However, I get the following error instead:
strings(value0, value1, value2, ...value400)
^SyntaxError: more than 255 arguments
It seems python functions (including namedtuple's init()), do not accept more than 255 arguments when called.
Is there any way to bypass this issue and have named tuples with more than 255 items? Why is there a 255 arguments limit anyway?
This is a limit to CPython function definitions; in versions before Python 3.7, you cannot specify more than 255 explicit arguments to a callable. This applies to any function definition, not just named tuples.
Note that this limit has been lifted in Python 3.7 and newer, where the new limit is sys.maxint. See What is a maximum number of arguments in a Python function?
It is the generated code for the class that is hitting this limit. You cannot define a function with more than 255 arguments; the __new__ class method of the resulting class is thus not achievable in the CPython implementation.
You'll have to ask yourself, however, if you really should be using a different structure instead. It looks like you have a list-like piece of data to me; 400 numbered names is a sure sign of your data bleeding into your names.
You can work around this by creating your own subclass, manually:
from operator import itemgetter
from collections import OrderedDict
class strings(tuple):
__slots__ = ()
_fields = tuple('a{}'.format(i) for i in range(400))
def __new__(cls, *args, **kwargs):
req = len(cls._fields)
if len(args) + len(kwargs) > req:
raise TypeError(
'__new__() takes {} positional arguments but {} were given'.format(
req, len(args) + len(kwargs)))
if kwargs.keys() > set(cls._fields):
raise TypeError(
'__new__() got an unexpected keyword argument {!r}'.format(
(kwargs.keys() - set(cls._fields)).pop()))
missing = req - len(args)
if kwargs.keys() & set(cls._fields[:-missing]):
raise TypeError(
'__new__() got multiple values for argument {!r}'.format(
(kwargs.keys() & set(cls._fields[:-missing])).pop()))
try:
for field in cls._fields[-missing:]:
args += (kwargs[field],)
missing -= 1
except KeyError:
pass
if len(args) < req:
raise TypeError('__new__() missing {} positional argument{}: {}'.format(
missing, 's' if missing > 1 else '',
' and '.join(filter(None, [', '.join(map(repr, cls._fields[-missing:-1])), repr(cls._fields[-1])]))))
return tuple.__new__(cls, args)
#classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new strings object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != len(cls._fields):
raise TypeError('Expected %d arguments, got %d' % (len(cls._fields), len(result)))
return result
def __repr__(self):
'Return a nicely formatted representation string'
format = '{}({})'.format(self.__class__.__name__, ', '.join('{}=%r'.format(n) for n in self._fields))
return format % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self))
__dict__ = property(_asdict)
def _replace(self, **kwds):
'Return a new strings object replacing specified fields with new values'
result = self._make(map(kwds.pop, self._fields, self))
if kwds:
raise ValueError('Got unexpected field names: %r' % list(kwds))
return result
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
def __getstate__(self):
'Exclude the OrderedDict from pickling'
return None
for i, name in enumerate(strings._fields):
setattr(strings, name,
property(itemgetter(i), doc='Alias for field number {}'.format(i)))
This version of the named tuple avoids the long argument lists altogether, but otherwise behaves exactly like the original. The somewhat verbose __new__ method is not strictly needed but does closely emulate the original behaviour when arguments are incomplete. Note the construction of the _fields attribute; replace this with your own to name your tuple fields.
Pass in a generator expression to set your arguments:
s = strings(i for i in range(400))
or if you have a list of values:
s = strings(iter(list_of_values))
Either technique bypasses the limits on function signatures and function call argument counts.
Demo:
>>> s = strings(i for i in range(400))
>>> s
strings(a0=0, a1=1, a2=2, a3=3, a4=4, a5=5, a6=6, a7=7, a8=8, a9=9, a10=10, a11=11, a12=12, a13=13, a14=14, a15=15, a16=16, a17=17, a18=18, a19=19, a20=20, a21=21, a22=22, a23=23, a24=24, a25=25, a26=26, a27=27, a28=28, a29=29, a30=30, a31=31, a32=32, a33=33, a34=34, a35=35, a36=36, a37=37, a38=38, a39=39, a40=40, a41=41, a42=42, a43=43, a44=44, a45=45, a46=46, a47=47, a48=48, a49=49, a50=50, a51=51, a52=52, a53=53, a54=54, a55=55, a56=56, a57=57, a58=58, a59=59, a60=60, a61=61, a62=62, a63=63, a64=64, a65=65, a66=66, a67=67, a68=68, a69=69, a70=70, a71=71, a72=72, a73=73, a74=74, a75=75, a76=76, a77=77, a78=78, a79=79, a80=80, a81=81, a82=82, a83=83, a84=84, a85=85, a86=86, a87=87, a88=88, a89=89, a90=90, a91=91, a92=92, a93=93, a94=94, a95=95, a96=96, a97=97, a98=98, a99=99, a100=100, a101=101, a102=102, a103=103, a104=104, a105=105, a106=106, a107=107, a108=108, a109=109, a110=110, a111=111, a112=112, a113=113, a114=114, a115=115, a116=116, a117=117, a118=118, a119=119, a120=120, a121=121, a122=122, a123=123, a124=124, a125=125, a126=126, a127=127, a128=128, a129=129, a130=130, a131=131, a132=132, a133=133, a134=134, a135=135, a136=136, a137=137, a138=138, a139=139, a140=140, a141=141, a142=142, a143=143, a144=144, a145=145, a146=146, a147=147, a148=148, a149=149, a150=150, a151=151, a152=152, a153=153, a154=154, a155=155, a156=156, a157=157, a158=158, a159=159, a160=160, a161=161, a162=162, a163=163, a164=164, a165=165, a166=166, a167=167, a168=168, a169=169, a170=170, a171=171, a172=172, a173=173, a174=174, a175=175, a176=176, a177=177, a178=178, a179=179, a180=180, a181=181, a182=182, a183=183, a184=184, a185=185, a186=186, a187=187, a188=188, a189=189, a190=190, a191=191, a192=192, a193=193, a194=194, a195=195, a196=196, a197=197, a198=198, a199=199, a200=200, a201=201, a202=202, a203=203, a204=204, a205=205, a206=206, a207=207, a208=208, a209=209, a210=210, a211=211, a212=212, a213=213, a214=214, a215=215, a216=216, a217=217, a218=218, a219=219, a220=220, a221=221, a222=222, a223=223, a224=224, a225=225, a226=226, a227=227, a228=228, a229=229, a230=230, a231=231, a232=232, a233=233, a234=234, a235=235, a236=236, a237=237, a238=238, a239=239, a240=240, a241=241, a242=242, a243=243, a244=244, a245=245, a246=246, a247=247, a248=248, a249=249, a250=250, a251=251, a252=252, a253=253, a254=254, a255=255, a256=256, a257=257, a258=258, a259=259, a260=260, a261=261, a262=262, a263=263, a264=264, a265=265, a266=266, a267=267, a268=268, a269=269, a270=270, a271=271, a272=272, a273=273, a274=274, a275=275, a276=276, a277=277, a278=278, a279=279, a280=280, a281=281, a282=282, a283=283, a284=284, a285=285, a286=286, a287=287, a288=288, a289=289, a290=290, a291=291, a292=292, a293=293, a294=294, a295=295, a296=296, a297=297, a298=298, a299=299, a300=300, a301=301, a302=302, a303=303, a304=304, a305=305, a306=306, a307=307, a308=308, a309=309, a310=310, a311=311, a312=312, a313=313, a314=314, a315=315, a316=316, a317=317, a318=318, a319=319, a320=320, a321=321, a322=322, a323=323, a324=324, a325=325, a326=326, a327=327, a328=328, a329=329, a330=330, a331=331, a332=332, a333=333, a334=334, a335=335, a336=336, a337=337, a338=338, a339=339, a340=340, a341=341, a342=342, a343=343, a344=344, a345=345, a346=346, a347=347, a348=348, a349=349, a350=350, a351=351, a352=352, a353=353, a354=354, a355=355, a356=356, a357=357, a358=358, a359=359, a360=360, a361=361, a362=362, a363=363, a364=364, a365=365, a366=366, a367=367, a368=368, a369=369, a370=370, a371=371, a372=372, a373=373, a374=374, a375=375, a376=376, a377=377, a378=378, a379=379, a380=380, a381=381, a382=382, a383=383, a384=384, a385=385, a386=386, a387=387, a388=388, a389=389, a390=390, a391=391, a392=392, a393=393, a394=394, a395=395, a396=396, a397=397, a398=398, a399=399)
>>> s.a391
391
namedtuple out of the box doesn't support what you are trying to do.
So the following might achieve the goal, which might change from 400 to 450 arguments, or lesser and saner.
def customtuple(*keys):
class string:
_keys = keys
_dict = {}
def __init__(self, *args):
args = list(args)
if len(args) != len(self._keys):
raise Exception("No go forward")
for key in range(len(args)):
self._dict[self._keys[key]] = args[key]
def __setattr__(self, *args):
raise BaseException("Not allowed")
def __getattr__(self, arg):
try:
return self._dict[arg]
except:
raise BaseException("Name not defined")
def __repr__(self):
return ("string(%s)"
%(", ".join(["%s=%r"
%(self._keys[key],
self._dict[self._keys[key]])
for key in range(len(self._dict))])))
return string
>>> strings = customtuple(*['a'+str(x) for x in range(1, 401)])
>>> s = strings(*['a'+str(x) for x in range(2, 402)])
>>> s.a1
'a2'
>>> s.a1 = 1
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/hus787/p.py", line 15, in __setattr__
def __setattr__(self, *args):
BaseException: Not allowed
For more light on the subject.
Here is my version of a replacement for namedtuple that supports more than 255 arguments. The idea was not to be functionally equivalent but rather to improve on some aspects (in my opinion). This is for Python 3.4+ only:
class SequenceAttrReader(object):
""" Class to function similar to collections.namedtuple but allowing more than 255 keys.
Initialize with attribute string (space separated), then load in data via a sequence, then access the list keys as properties
i.e.
csv_line = SequenceAttrReader('a b c')
csv_line = csv_line.load([1, 2, 3])
print(csv_line.b)
>> 2
"""
_attr_string = None
_attr_list = []
_data_list = []
def __init__(self, attr_string):
if not attr_string:
raise AttributeError('SequenceAttrReader not properly initialized, please use a non-empty string')
self._attr_string = attr_string
self._attr_list = attr_string.split(' ')
def __getattr__(self, name):
if not self._attr_string or not self._attr_list or not self._data_list:
raise AttributeError('SequenceAttrReader not properly initialized or loaded')
try:
index = self._attr_list.index(name)
except ValueError:
raise AttributeError("'{name}'' not in attribute string".format(name=name)) from None
try:
value = self._data_list[index]
except IndexError:
raise AttributeError("No attribute named '{name}'' in".format(name=name)) from None
return value
def __str__(self):
return str(self._data_list)
def __repr__(self):
return 'SequenceAttrReader("{attr_string}")'.format(attr_string=self._attr_string)
def load_data(self, data_list):
if not self._attr_list:
raise AttributeError('SequenceAttrReader not properly initialized')
if not data_list:
raise AttributeError('SequenceAttrReader needs to load a non-empty sequence')
self._data_list = data_list
This is probably not the most efficient way if you are doing a lot of individual lookups, converting it internally to a dict may be better. I'll work on an optimized version once I have more time or at least see what the performance difference is.
Sometimes in my code I have a function which can take an argument in one of two ways. Something like:
def func(objname=None, objtype=None):
if objname is not None and objtype is not None:
raise ValueError("only 1 of the ways at a time")
if objname is not None:
obj = getObjByName(objname)
elif objtype is not None:
obj = getObjByType(objtype)
else:
raise ValueError("not given any of the ways")
doStuffWithObj(obj)
Is there any more elegant way to do this? What if the arg could come in one of three ways? If the types are distinct I could do:
def func(objnameOrType):
if type(objnameOrType) is str:
getObjByName(objnameOrType)
elif type(objnameOrType) is type:
getObjByType(objnameOrType)
else:
raise ValueError("unk arg type: %s" % type(objnameOrType))
But what if they are not? This alternative seems silly:
def func(objnameOrType, isName=True):
if isName:
getObjByName(objnameOrType)
else:
getObjByType(objnameOrType)
cause then you have to call it like func(mytype, isName=False) which is weird.
How about using something like a command dispatch pattern:
def funct(objnameOrType):
dispatcher = {str: getObjByName,
type1: getObjByType1,
type2: getObjByType2}
t = type(objnameOrType)
obj = dispatcher[t](objnameOrType)
doStuffWithObj(obj)
where type1,type2, etc are actual python types (e.g. int, float, etc).
Sounds like it should go to https://codereview.stackexchange.com/
Anyway, keeping the same interface, I may try
arg_parsers = {
'objname': getObjByName,
'objtype': getObjByType,
...
}
def func(**kwargs):
assert len(kwargs) == 1 # replace this with your favorite exception
(argtypename, argval) = next(kwargs.items())
obj = arg_parsers[argtypename](argval)
doStuffWithObj(obj)
or simply create 2 functions?
def funcByName(name): ...
def funcByType(type_): ...
One way to make it slightly shorter is
def func(objname=None, objtype=None):
if [objname, objtype].count(None) != 1:
raise TypeError("Exactly 1 of the ways must be used.")
if objname is not None:
obj = getObjByName(objname)
else:
obj = getObjByType(objtype)
I have not yet decided if I would call this "elegant".
Note that you should raise a TypeError if the wrong number of arguments was given, not a ValueError.
For whatever it's worth, similar kinds of things happen in the Standard Libraries; see, for example, the beginning of GzipFile in gzip.py (shown here with docstrings removed):
class GzipFile:
myfileobj = None
max_read_chunk = 10 * 1024 * 1024 # 10Mb
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None):
if mode and 'b' not in mode:
mode += 'b'
if fileobj is None:
fileobj = self.myfileobj = __builtin__.open(filename, mode or 'rb')
if filename is None:
if hasattr(fileobj, 'name'): filename = fileobj.name
else: filename = ''
if mode is None:
if hasattr(fileobj, 'mode'): mode = fileobj.mode
else: mode = 'rb'
Of course this accepts both filename and fileobj keywords and defines a particular behavior in the case that it receives both; but the general approach seems pretty much identical.
I use a decorator:
from functools import wraps
def one_of(kwarg_names):
# assert that one and only one of the given kwarg names are passed to the decorated function
def inner(f):
#wraps(f)
def wrapped(*args, **kwargs):
count = 0
for kw in kwargs:
if kw in kwarg_names and kwargs[kw] is not None:
count += 1
assert count == 1, f'exactly one of {kwarg_names} required, got {kwargs}'
return f(*args, **kwargs)
return wrapped
return inner
Used as:
#one_of(['kwarg1', 'kwarg2'])
def my_func(kwarg1='default', kwarg2='default'):
pass
Note that this only accounts for non- None values that are passed as keyword arguments. E.g. multiple of the kwarg_names may still be passed if all but one of them have a value of None.
To allow for passing none of the kwargs simply assert that the count is <= 1.
It sounds like you're looking for function overloading, which isn't implemented in Python 2. In Python 2, your solution is nearly as good as you can expect to get.
You could probably bypass the extra argument problem by allowing your function to process multiple objects and return a generator:
import types
all_types = set([getattr(types, t) for t in dir(types) if t.endswith('Type')])
def func(*args):
for arg in args:
if arg in all_types:
yield getObjByType(arg)
else:
yield getObjByName(arg)
Test:
>>> getObjByName = lambda a: {'Name': a}
>>> getObjByType = lambda a: {'Type': a}
>>> list(func('IntType'))
[{'Name': 'IntType'}]
>>> list(func(types.IntType))
[{'Type': <type 'int'>}]
The built-in sum() can be used to on a list of boolean expressions. In Python, bool is a subclass of int, and in arithmetic operations, True behaves as 1, and False behaves as 0.
This means that this rather short code will test mutual exclusivity for any number of arguments:
def do_something(a=None, b=None, c=None):
if sum([a is not None, b is not None, c is not None]) != 1:
raise TypeError("specify exactly one of 'a', 'b', or 'c'")
Variations are also possible:
def do_something(a=None, b=None, c=None):
if sum([a is not None, b is not None, c is not None]) > 1:
raise TypeError("specify at most one of 'a', 'b', or 'c'")
I occasionally run into this problem as well, and it is hard to find an easily generalisable solution. Say I have more complex combinations of arguments that are delineated by a set of mutually exclusive arguments and want to support additional arguments for each (some of which may be required and some optional), as in the following signatures:
def func(mutex1: str, arg1: bool): ...
def func(mutex2: str): ...
def func(mutex3: int, arg1: Optional[bool] = None): ...
I would use object orientation to wrap the arguments in a set of descriptors (with names depending on the business meaning of the arguments), which can then be validated by something like pydantic:
from typing import Optional
from pydantic import BaseModel, Extra
# Extra.forbid ensures validation error if superfluous arguments are provided
class BaseDescription(BaseModel, extra=Extra.forbid):
pass # Arguments common to all descriptions go here
class Description1(BaseDescription):
mutex1: str
arg1: bool
class Description2(BaseDescription):
mutex2: str
class Description3(BaseDescription):
mutex3: int
arg1: Optional[bool]
You could instantiate these descriptions with a factory:
class DescriptionFactory:
_class_map = {
'mutex1': Description1,
'mutex2': Description2,
'mutex3': Description3
}
#classmethod
def from_kwargs(cls, **kwargs) -> BaseDescription:
kwargs = {k: v for k, v in kwargs.items() if v is not None}
set_fields = kwargs.keys() & cls._class_map.keys()
try:
[set_field] = set_fields
except ValueError:
raise ValueError(f"exactly one of {list(cls._class_map.keys())} must be provided")
return cls._class_map[set_field](**kwargs)
#classmethod
def validate_kwargs(cls, func):
def wrapped(**kwargs):
return func(cls.from_kwargs(**kwargs))
return wrapped
Then you can wrap your actual function implementation like this and use type checking to see which arguments were provided:
#DescriptionFactory.validate_kwargs
def func(desc: BaseDescription):
if isinstance(desc, Description1):
... # use desc.mutex1 and desc.arg1
elif isinstance(desc, Description2):
... # use desc.mutex2
... # etc.
and call as func(mutex1='', arg1=True), func(mutex2=''), func(mutex3=123) and so on.
This is not overall shorter code, but it performs argument validation in a very descriptive way according to your specification, raises useful pydantic errors when validation fails, and results in accurate static types in each branch of the function implementation.
Note that if you're using Python 3.10+, structural pattern matching could simplify some parts of this.