Since we are raising, not excepting the CustomException, I have to
learn for new stuff on handing a stacktrace that exists not as a
raised except but as the exception that will be raised, if that makes
sense. I just want to get rid of the CustomException's internal and
the handler raiser information and only show information relevant
to the caller that called the handler that raised the exception.
I'm struggling a little with cleaning up my Custom Exception's stack
trace. Because this Custom exception will offer early typo
and incorrect coding, I want to clean up it's message and stack trace
to not include references to internal module path and function / method
levels. FE. rather then showing "variable expects types.List[int]",
I want to to show "variable expects List[int].". But that particular
enhancement is not what I am struggling with.
The cleanup enhancement I am struggling with and asking for help with is
this: rather that showing:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<cwd>/fibonacci.py", line 67, in fib
raise ArgumentError("index", (int, List[int], Tuple[int,int]),
my_custom_modules.my_custom_exceptions.argumenterror.ArgumentError: index expects (<class 'int'>, typing.List[int],
typing.Tuple[int, int]) but found (0, 1, 2)
I wish it to more elegantly show:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<cwd>/fibonacci.py", line 67, in fib
raise ArgumentError("index", (int, List[int], Tuple[int,int]),
ArgumentError: index expects (int, list[int], tuple[int, int]) but found (0, 1, 2)
Notice the module structure is reduced to only the Exception class name only.
So I have reduced and simplified the code to make it easier to weed through but to illustrate the problem I still have to keep a directory structure.
Here are links for 3 files, 1 is this text and the other 2 are the code sections shown below.
https://gist.github.com/ismaelharunid/88dd8a246ac42203312b14fe1874f60f/raw/6af13d6c798506c99cbeb68ef457a80da5e153a2/ArgumentError_readme.MD
https://gist.github.com/ismaelharunid/7ef52774d887a4aadc328bb8d08a9fb5/raw/3f3dde00cbe170bf96146964ca0b73d7355d0128/ArgumentError_argumenterror.py
https://gist.githubusercontent.com/ismaelharunid/6a19968b737f360a80bf9a0fb1b8f060/raw/b7bad77c261f9ce5d17b13d6d53f8a409dc08cde/ArgumentError_fibonacci.py
The custom exception code:
#./my_custom_modules/my_custom_exceptions/argumenterror.py
from types import GenericAlias
class ArgumentError(ValueError):
'''
A substitution for ValueError specific for function and method
argument variable annotations which reduces the need for
repetitive validation code and message specing.
Parameters:
===========
name (:str)
The guilty variable argument name.
expects (:type, Generic, [type, Generic])
Annotations for the expected guilty variable value.
found (:Any)
The actual value of the guilty variable is question.
*specs (:*Any)
addition line specs.
**variables (:**Any)
additional variables to make available to the specs.
'''
MessageSpec = "{name} expects {expects!r} but found {found!r}"
def __new__(cls, name, expects, found, *specs, **variables):
"see help(ArgumentError) for correct annotations."
return super().__new__(cls)
def __init__(self, name, expects, found, *specs, **variables):
"see help(ArgumentError) for correct annotations."
expects_ = self.__expects__(expects)
message = self.__message__(name=name,
expects=expects_,
found=found,
**variables)
if specs:
details = tuple(self.__detail__(spec,
name=name,
expects=expects_,
found=found,
**variables)
for spec in specs)
self.__tbinit__(message, details)
else:
self.__tbinit__(message)
def __expects__(self, expects, _depth=0):
'''
internal expects formatting method.
strip "typing." and ("<class ", "'>"), and other extreme
details to keep message sweeter. oh well, next version.
for now let's keep it simple and easily readable.
'''
return expects
def __message__(self, **variables):
"internal message formatting method"
return self.MessageSpec.format(**variables)
def __detail__(self, spec, **variables):
"internal extra message lines formatting method"
return spec.format(**variables)
def __tbinit__(self, *lines):
"internal preprocessor to allow stack and message cleanup"
super().__init__(*lines)
The usage module code:
'''
./fibonacci.py
A fibonacci sequence generator, mostly for annotation demonstration
purposes. Includes a single function fib. See function fib for usage
documentation.
Examples:
=========
from fibonacci import fib
fib(3) # -> 2
fib(-4) # -> -3
fib(-5) # -> 5
fib((-6, 6)) # -> (-8, 5, -3, 2, -1, 1, 0, 1, 1, 2, 3, 5, 8)
fib([-7]) # -> (13, 13)
fib([-8, 8]) # -> (-21, 21)
fib([9, -10, 11]) # -> (34, -55, 89)
raises ArgumentError:
=====================
fib(9, -10)
#ArgumentError: cache expects list[int] but found -10
fib(())
#ArgumentError: index expects (int, list[int], tuple[int, int]) but found ()
fib((0,))
#ArgumentError: index expects (int, list[int], tuple[int, int]) but found (0,)
fib((0,1,2))
#ArgumentError: index expects (int, list[int], tuple[int, int]) but found (0, 1, 2)
'''
from typing import List, Tuple
from my_custom_modules.my_custom_exceptions.argumenterror \
import ArgumentError
def fib(index:[int, Tuple[int,int, List[int]]],
cache:List[int]=[0, 1]):
'''
Returns the nth(index) or sequence of fibonacci number(s).
Parameters:
===========
index :(int | tuple[int, int] | list[*int])
The index or index range (inclusive) of fibonacci number(s)
to return.
cache :(list[int])
For caching purposes only, not for use as a parameter,
but you can always use it to force regeneration but
just be sure you use [0, 1]. Other values would render a
custom sequence and may not handle negative indexes
correctly. It's not a global variable simply to help
support the example. Yeah a bit OCD!
'''
if not (isinstance(index, int)
or (isinstance(index, list)
and all(isinstance(i, int) for i in index))
or (isinstance(index, tuple)
and len(index) == 2
and all(isinstance(i, int) for i in index))):
raise ArgumentError("index", (int, List[int], Tuple[int,int]),
index)
if not (isinstance(cache, list)
and len(cache) >= 2
and all(isinstance(i, int) for i in cache)):
raise ArgumentError("cache", list, cache)
single = isinstance(index, int)
m = abs(index) if single else max(abs(v) for v in index)
while m >= len(cache):
cache.append(sum(cache[-2:]))
if single:
return cache[abs(index)] if index >= 0 or index % 2 else \
-cache[-index]
if isinstance(index, list):
return tuple(cache[abs(i)]
if i >= 0 or i % 2 else
-cache[-i]
for i in index)
return tuple(cache[abs(i)]
if i >= 0 or i % 2 else
-cache[abs(i)]
for i in range(index[0], index[1] + 1))
And finally the testcase code:
from fibonacci import fib
fib(3) # -> 2
fib(-4) # -> -3
fib(-5) # -> 5
fib((-6, 6)) # -> (-8, 5, -3, 2, -1, 1, 0, 1, 1, 2, 3, 5, 8)
fib([-7]) # -> (13, 13)
fib([-8, 8]) # -> (-21, 21)
fib([9, -10, 11]) # -> (34, -55, 89)
fib(9, -10)
#ArgumentError: cache expects list[int] but found -10
fib(())
#ArgumentError: index expects (int, list[int], tuple[int, int]) but found ()
fib((0,))
#ArgumentError: index expects (int, list[int], tuple[int, int]) but found (0,)
fib((0,1,2))
#ArgumentError: index expects (int, list[int], tuple[int, int])
but found (0, 1, 2)
You would not be able hide complete name of a custom exception, either use builtin TypeError for that or you will have to stay with the long name, Unfortunately nothing can be done except for using builtin TypeError:
def raise_wrong_type_exception(name, expects, found):
raise TypeError(f"{name} expects {repr(expects)} but found {repr(found)}")
You can use the above for any builtin exception by passing the error message as a string to the exception. Now use the above function at places where the custom exception was to be raised
Well, I guess I was over ambitious and it was just not even a good idea. So I scaled back to the minimum requirements for what I wanted to accomplish. Basically I find myself spending too much time writing argument checks and it slows me down and even sometimes causes me to loose focus. So, I rethought it and came up with this simple solution.
# ./expects.py
from typing import *
from collections import abc as cabc
NoneType = type(None)
def _expects(typing, depth=None, _depth=0):
if depth is not None and _depth >= depth:
return "..."
if typing is type(None):
return "None"
if isinstance(typing, type):
return typing.__name__
origin = get_origin(typing)
sep, args = ",", None
if origin:
args = get_args(typing)
name = origin.__name__ if isinstance(origin, type) else \
origin._name
if typing._inst:
sep = '|'
elif isinstance(typing, cabc.Sequence):
name, sep, args = "", "|", typing
elif callable(typing):
name = typing.__name__
else:
name = repr(typing)
if args:
items = sep.join(_expects(e, depth, _depth+1) for e in args) \
if depth is None or _depth+1 < depth else \
"..."
return "{:}[{:}]".format(name, items)
return name
__EXPECTS_CACHE__ = {}
def expects(method, name, found, depth=None, cache=True):
typing = get_type_hints(method)[name]
hashkey = (tuple(typing) if isinstance(typing, list) else
typing, depth) # because list is unhashable
expects = None
if cache:
try:
expects = __EXPECTS_CACHE__[hashkey]
except KeyError:
pass
elif cache is None:
__EXPECTS_CACHE__.clear()
if expects is None:
expects = _expects(typing, depth)
if cache:
__EXPECTS_CACHE__[hashkey] = expects
return "{name} expects {expects} but found {found!r}" \
.format(name=name, expects=expects, found=found)
class ArgumentError(ValueError):
def __new__(cls, method, name, found, depth=None):
return super().__new__(cls)
def __init__(self, method, name, found, depth=None):
super().__init__(expects(method, name, found, depth))
The usage is simple and I will doc out the functions after I apply a little polish and testing. But basically you just pass 3 arguments to Argumenterror, which are the , and the , and it creates a nice short information exception. Or alternatively you can pass expects the same arguments to get the message only. Short sweet and fairly light. here is an example usage:
>>> from expects import *
>>> def foo(n:[int,Tuple[int,int]]):
... if not (isinstance(n, int) or (isinstance(n, tuple) and len(n) == 2)):
... raise ArgumentError(foo, "n", n)
...
>>> foo(None)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 3, in foo
expects.ArgumentError: n expects [int|tuple[int,int]] but found None
>>>
Alternatively I could wring a code generators to do the type hinting to arguments checking / validation, that would be sort of cool. But doing dynamic hinting to argument checking is just going to be a drain and slow doen the code especially for functions and methods that get called often or in loops. So that is now off the board. But yeah a code generator to write custom checks would run once and either make a .py file or cache it. Maybe I will try implementing that at some future time using some of the stuff I learned one my earlier implementation.
If I declare two functions a and b:
def a(x):
return x**2
b = lambda x: x**2
I can not use type to differentiate them, since they're both of the same type.
assert type(a) == type(b)
Also, types.LambdaType doesn't help:
>>> import types
>>> isinstance(a, types.LambdaType)
True
>>> isinstance(b, types.LambdaType)
True
One could use __name__ like:
def is_lambda_function(function):
return function.__name__ == "<lambda>"
>>> is_lambda_function(a)
False
>>> is_lambda_function(b)
True
However, since __name__ could have been modified, is_lambda_function is not guaranteed to return the correct result:
>>> a.__name__ = '<lambda>'
>>> is_lambda_function(a)
True
Is there a way which produces a more reliable result than the __name__ attribute?
AFAIK, you cannot reliably in Python 3.
Python 2 used to define a bunch of function types. For that reason, methods, lambdas and plain functions have each their own type.
Python 3 has only one type which is function. There are indeed different side effects where declaring a regular function with def and a lambda: def sets the name to the name (and qualified name) of the function and can set a docstring, while lambda sets the name (and qualified name) to be <lambda>, and sets the docstring to None. But as this can be changed...
If the functions are loaded from a regular Python source (and not typed in an interactive environment), the inspect module allows to access the original Python code:
import inspect
def f(x):
return x**2
g = lambda x: x**2
def is_lambda_func(f):
"""Tests whether f was declared as a lambda.
Returns: True for a lambda, False for a function or method declared with def
Raises:
TypeError if f in not a function
OSError('could not get source code') if f was not declared in a Python module
but (for example) in an interactive session
"""
if not inspect.isfunction(f):
raise TypeError('not a function')
src = inspect.getsource(f)
return not src.startswith('def') and not src.startswith('#') # provision for decorated funcs
g.__name__ = 'g'
g.__qualname__ = 'g'
print(f, is_lambda_func(f))
print(g, is_lambda_func(g))
This will print:
<function f at 0x00000253957B7840> False
<function g at 0x00000253957B78C8> True
By the way, if the problem was serialization of function, a function declared as a lambda can successfully be pickled, provided you give it a unique qualified name:
>>> g = lambda x: 3*x
>>> g.__qualname__ = "g"
>>> pickle.dumps(g)
b'\x80\x03c__main__\ng\nq\x00.'
You can check __code__.co_name. It contains what the name was at the time the function/lambda was compiled:
def a(x):
return x**2
b = lambda x: x**2
def is_lambda_function(f):
return f.__code__.co_name == "<lambda>"
>>> is_lambda_function(a)
False
>>> is_lambda_function(b)
True
And, contrary to __name__, __code__.co_name is read-only...
>>> a.__name__ = "<lambda>"
>>> b.__name__ = "b"
>>> a.__code__.co_name = "<lambda>"
Traceback (most recent call last):
File "<console>", line 1, in <module>
AttributeError: readonly attribute
>>> b.__code__.co_name = "b"
Traceback (most recent call last):
File "<console>", line 1, in <module>
AttributeError: readonly attribute
... so the results will stay the same:
>>> is_lambda_function(a)
False
>>> is_lambda_function(b)
True
I took the chance to dive in cpython's source to see if I could find anything, and I am afraid I have to second Serge's answer: you cannot.
Briefly, this is a lambda's journey in the interpreter:
During parsing, lambdas, just like every other expression, are read into an expr_ty, which is a huge union containing data of every expression.
This expr_ty is then converted to the appropriate type (Lambda, in our case)
After some time we land into the function that compiles lambdas
This function calls assemble, which calls makecode, which initializes a PyCodeObject (functions, methods, as well as lambdas, all end up here).
From this, I don't see anything particular that is specific to lambdas. This, combined with the fact that Python lets you modify pretty much every attribute of objects makes me/us believe what you want to do is not possible.
What is is the significance of doctest in Sphinx? Can someone help me understand its use with a simple example.
Sphinx's doctest is for testing the documentation itself. In other words, it allows for the automatic verification of the documentation's sample code. While it might also verify whether the Python code works as expected, Sphinx is unnecessary for that purpose alone (you could more easily use the standard library's doctest module).
So, a real-world scenario (one I find myself in on a regular basis) goes something like this: a new feature is nearing completion, so I write some documentation to introduce the new feature. The new docs contain one or more code samples. Before publishing the documentation, I run make doctest in my Sphinx documentation directory to verify that the code samples I've written for the audience will actually work.
I haven't used it myself but it is my understanding that it extends the functionality of doctest. For example it adds testsetup and testcleanup directives which you can put your set-up and tear-down logic in. Making it possible for Sphinx to exclude that in the documentation.
Here is a simple example (from the doctest module):
"""
This is the "example" module.
The example module supplies one function, factorial(). For example,
>>> factorial(5)
120
"""
def factorial(n):
"""Return the factorial of n, an exact integer >= 0.
If the result is small enough to fit in an int, return an int.
Else return a long.
>>> [factorial(n) for n in range(6)]
[1, 1, 2, 6, 24, 120]
>>> [factorial(long(n)) for n in range(6)]
[1, 1, 2, 6, 24, 120]
>>> factorial(30)
265252859812191058636308480000000L
>>> factorial(30L)
265252859812191058636308480000000L
>>> factorial(-1)
Traceback (most recent call last):
...
ValueError: n must be >= 0
Factorials of floats are OK, but the float must be an exact integer:
>>> factorial(30.1)
Traceback (most recent call last):
...
ValueError: n must be exact integer
>>> factorial(30.0)
265252859812191058636308480000000L
It must also not be ridiculously large:
>>> factorial(1e100)
Traceback (most recent call last):
...
OverflowError: n too large
"""
import math
if not n >= 0:
raise ValueError("n must be >= 0")
if math.floor(n) != n:
raise ValueError("n must be exact integer")
if n+1 == n: # catch a value like 1e300
raise OverflowError("n too large")
result = 1
factor = 2
while factor <= n:
result *= factor
factor += 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
I'm aware of mutuable vs immutable arguments in Python, and which is which, but here is a weird issue I ran into with mutable arguments. The simplified version is as follows:
def fun1a(tmp):
tmp.append(3)
tmp.append(2)
tmp.append(1)
return True
def fun1(a):
b = fun1a(a)
print a #prints [3,2,1]
return b
def fun2a():
tmp = []
tmp.append(3)
tmp.append(2)
tmp.append(1)
return [True, tmp]
def fun2(a):
[b, a] = fun2a()
print a #prints [3,2,1]
return b
def main():
a=[]
if fun1(a):
print a #prints [3,2,1]
if fun2(b):
print b #prints garbage, e.g. (0,1)
As you can see the only difference is that fun2 points the passed in argument to reference a list created inside fun2a, while fun1 simply appends to the list created in main. In the end, fun1 returns the correct result, while fun2 returns random garbage rather than the result I'd expect. What's the problem here?
Thanks
This isn't so much of a mutable/immutable issue as one of scope.
"b" exists only in fun1 and fun2 bodies. It is not present in the main or global scope (at least intentionally)
--EDIT--
>>> def fun1(b):
... b = b + 1
... return b
...
>>> def fun2(a):
... b = 1
... return b
...
>>> fun1(5)
6
>>> fun2(b)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'b' is not defined
(From my interpreter in terminal)
I'm guessing your 'b' was initialized somewhere else. What happened in the other function is of has no effect on this.
This is me running your exact code:
>>> main()
[3, 2, 1]
[3, 2, 1]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 5, in main
NameError: global name 'b' is not defined
>>> b = 'whatever'
>>> main()
[3, 2, 1]
[3, 2, 1]
[3, 2, 1]
whatever
As others have pointed out, there is no name 'b' in your main() function.
A better way of asserting how your code is behaving is to unit test it. Unit-testing is very easy in Python and a great habit to get into. When I first started writing Python a few years back the guy I paired with insisted on testing everything. Since that day I have continued and have never had to use the Python debugger as a result! I digress...
Consider:
import unittest
class Test(unittest.TestCase):
def test_fun1a_populates_tmp(self):
some_list = []
fun1a(tmp=some_list)
self.assertEquals([3, 2, 1], some_list)
def test_fun1a_returns_true(self):
some_list = []
ret = fun1a(tmp=some_list)
self.assertTrue(ret)
def test_fun1_populates_a(self):
some_list = []
fun1(a=some_list)
self.assertEquals([3, 2, 1], some_list)
def test_fun1_returns_true(self):
some_list = []
ret = fun1(a=some_list)
self.assertTrue(ret)
def test_fun2a_populates_returned_list(self):
ret = fun2a()
self.assertEquals([True, [3, 2, 1]], ret)
def test_fun2_returns_true(self):
some_list = []
ret = fun2(some_list)
self.assertTrue(ret)
def test_fun2_des_not_populate_passed_list(self):
some_list = []
fun2(some_list)
self.assertEqual(0, len(some_list))
if __name__ == '__main__':
unittest.main()
Each of these unit tests pass and document how your functions behave (save for the printing, you can add the tests for those if they are needed). They also provide a harness for when you edit your code, because they should continue to pass or start failing if you break something.
I haven't unit-tested main(), since it is clearly broken.
The problem may be related to the difference between lists and tuples. In fun2, don't put brackets around a,b.
In fun2a, return a tuple of the two objects and not a list. Python should write the varaibles correctly, if that's the problem that you're trying to solve.
Also, you called fun2 with argument b when b was never defined. Of course, the parameter for fun2 is never actually used, because it is rewritten before it is read.
In the end, your code should look like this:
def fun1a(tmp):
tmp.append(3)
tmp.append(2)
tmp.append(1)
return True
def fun1(a):
b = fun1a(a)
print a #prints [3,2,1]
return b
def fun2a():
tmp = []
tmp.append(3)
tmp.append(2)
tmp.append(1)
return (True, tmp)
def fun2():
b, a = fun2a()
print a #prints [3,2,1]
return b
def main():
a=[]
if fun1(a):
print a #prints [3,2,1]
if fun2():
print b #prints garbage, e.g. (0,1)
which should print [3,2,1] both times.