When trying to Unittest validations of arguments in argparse the following works:
mymodule:
def validate_mac_addr(mac_addr):
regex = re.compile(r'^((([a-f0-9]{2}:){5})|(([a-f0-9]{2}-){5}))[a-f0-9]{2}$', re.IGNORECASE)
if re.match(regex, mac_addr) is not None:
return mac_addr
msg = f"[-] Invalid MAC address: '{mac_addr}'"
raise argparse.ArgumentTypeError(msg)
test:
import mymodule
import unittest
def test_mac_address_false(self):
self.assertRaises(Exception, mymodule.validate_mac_addr,"n0:ma:ca:dd:re:ss:here")
But I wanted to catch a the more specific 'ArgumentTypeError' but this is apparently not possible with arssertRaises() in this example!? What is going on with the general usage of Exception in assertRaises()?
BTW
isinstance(argparse.ArgumentTypeError, Exception)
Returns False?!
Ref.: class ArgumentTypeError(Exception):
argparse.ArgumentTypeError is a subclass, not an instance, of Exception, and is the type of exception you should be asserting gets raised.
import argparse
def test_mac_address_false(self):
self.assertRaises(argparse.ArgumentTypeError, mymodule.validate_mac_addr, "n0:ma:ca:dd:re:ss:here")
Related
I'm doing TDD tests for argparser. How can I test arguments with the option required?I need to test all options like:
too many arguments,
no arguments are given,
the wrong argument is given.
I can raise SystemExit, but this is not really what I need:
def test_no_arguments(self):
with patch.object(sys, 'exit') as mock_method:
self.parser.parse_arguments()
self.assertTrue(mock_method.called)
However, without raising system exit I have always errors like this:
zbx-check-mount.py
class CommandLine:
def __init__(self):
self.args_parser = argparse.ArgumentParser(description="Monitoring mounted filesystems",
formatter_class=argparse.RawTextHelpFormatter)
self.parsed_args = None
self.add_arguments()
def add_arguments(self):
"""
Add arguments to parser.
"""
try:
self.args_parser._action_groups.pop() # pylint: disable=protected-access
required = self.args_parser.add_argument_group('required arguments')
required.add_argument('--fs_name', required=True, help='Given filesystem')
except argparse.ArgumentError as err:
log.error('argparse.ArgumentError: %s', err)
sys.exit(1)
def parse_arguments(self, args=None):
"""
Parse added arguments. Then run private method to return values
"""
self.parsed_args = self.args_parser.parse_args()
return self.parsed_args.fs_name,
tests
from pyfakefs.fake_filesystem_unittest import TestCase
import os
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
if sys.version_info[0] == 3:
from unittest.mock import MagicMock, patch
else:
from mock import MagicMock, patch
sys.path.extend([os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..', "bin")])
module_name = __import__('zbx-check-mount')
class TestCommandLine(TestCase):
def setUp(self):
"""
Method called to prepare the test fixture. This is called immediately before calling the test method
"""
self.parser = module_name.CommandLine()
def test_no_arguments(self):
opts = self.parser.parse_arguments([])
assert opts.fs_name
def tearDown(self):
"""
Method called immediately after the test method has been called and the result recorded.
"""
pass
How to avoid this situation and test other options?
In def parse_arguments(self, args=None):, you should pass args on to the parser, as in:
self.args_parser.parse_args(args)
parse_args() parses sys.argv[1:], or if the given argument is None. Otherwise it parses the provided list.
In a full distribution of python there's a unittest file for argparse (test_argparse.py). It's somewhat complex, defining a subclass of ArgumentParser that captures errors and redirects error messages.
Testing argparse is tricky because it looks at sys.argv, with the unittest scripts also use. And it usually tries to exit on errors. This has been discussed in a number of SO questions already.
If I'm interpreting your symptoms correctly, you are having problems in the test harness because your monkey patched implementation of sys.exit actually returns, which the argparse library is not expecting.
Introducing a side_effect that raises an exception, which you can then trap and verify in the unit test, may be sufficient to get around the problem.
I want to use pytest to check if the argparse.ArgumentTypeError exception is raised for an incorrect argument:
import argparse
import os
import pytest
def main(argsIn):
def configFile_validation(configFile):
if not os.path.exists(configFile):
msg = 'Configuration file "{}" not found!'.format(configFile)
raise argparse.ArgumentTypeError(msg)
return configFile
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--configFile', help='Path to configuration file', dest='configFile', required=True, type=configFile_validation)
args = parser.parse_args(argsIn)
def test_non_existing_config_file():
with pytest.raises(argparse.ArgumentTypeError):
main(['--configFile', 'non_existing_config_file.json'])
However, running pytest says During handling of the above exception, another exception occurred: and consequently the test fails. What am I doing wrong?
The problem is that if argument's type converter raises exception ArgumentTypeError agrparse exits with error code 2, and exiting means raising builtin exception SystemExit. So you have to catch that exception and verify that the original exception is of a proper type:
def test_non_existing_config_file():
try:
main(['--configFile', 'non_existing_config_file.json'])
except SystemExit as e:
assert isinstance(e.__context__, argparse.ArgumentError)
else:
raise ValueError("Exception not raised")
Here's the ArgumentTypeError test in the test_argparse.py file (found in the development repository)
ErrorRaisingAgumentParser is a subclass defined at the start of the file, which redefines the parser.error method, so it doesn't exit, and puts the error message on stderr. That part's a bit complicated.
Because of the redirection I described the comment, it can't directly test for ArgumentTypeError. Instead it has to test for its message.
# =======================
# ArgumentTypeError tests
# =======================
class TestArgumentTypeError(TestCase):
def test_argument_type_error(self):
def spam(string):
raise argparse.ArgumentTypeError('spam!')
parser = ErrorRaisingArgumentParser(prog='PROG', add_help=False)
parser.add_argument('x', type=spam)
with self.assertRaises(ArgumentParserError) as cm:
parser.parse_args(['XXX'])
self.assertEqual('usage: PROG x\nPROG: error: argument x: spam!\n',
cm.exception.stderr)
Using pytest you can do the following in order to check that argparse.ArugmentError is raised. Additionally, you can check the error message.
with pytest.raises(SystemExit) as e:
main(['--configFile', 'non_existing_config_file.json'])
assert isinstance(e.value.__context__, argparse.ArgumentError)
assert 'expected err msg' in e.value.__context__.message
Inspired by #Giorgos's answer, here is a small context manager that makes the message extraction a bit more re-usable. I'm defining the following in a common place:
import argparse
import pytest
from typing import Generator, Optional
class ArgparseErrorWrapper:
def __init__(self):
self._error: Optional[argparse.ArgumentError] = None
#property
def error(self):
assert self._error is not None
return self._error
#error.setter
def error(self, value: object):
assert isinstance(value, argparse.ArgumentError)
self._error = value
#contextmanager
def argparse_error() -> Generator[ArgparseErrorWrapper, None, None]:
wrapper = ArgparseErrorWrapper()
with pytest.raises(SystemExit) as e:
yield wrapper
wrapper.error = e.value.__context__
This allows to test for parser errors concisely:
def test_something():
with argparse_error() as e:
# some parse_args call here
...
assert "Expected error message" == str(e.error)
Struggling to succinctly describe this in the title...
I have a module I want to test:
mod.py:
import subprocess
class MyStuff(object):
def my_fun(self):
try:
print subprocess
out = subprocess.check_output(["echo", "pirates"])
except subprocess.CalledProcessError:
print "caught exception"
And the test module test_mod.py:
import unittest
import mock
from mod import MyStuff
import subprocess
class Tests(unittest.TestCase):
def setUp(self):
self.patched_subprocess = mock.patch(
'mod.subprocess', autospec=True)
self.mock_subprocess = self.patched_subprocess.start()
self.my_stuff = MyStuff()
def tearDown(self):
self.patched_subprocess.stop()
def test_my_fun(self):
self.mock_subprocess.check_output = mock.Mock(
side_effect=subprocess.CalledProcessError(0, "hi", "no"))
with self.assertRaises(subprocess.CalledProcessError):
out = self.my_stuff.my_fun()
if __name__ == '__main__':
unittest.main()
I then run python test_mod.py and I see the following output:
<NonCallableMagicMock name='subprocess' spec='module' id='140654009377872'>
.
----------------------------------------------------------------------
Ran 1 test in 0.007s
OK
I'm pleased that the subprocess object has been mocked, but why is the print "caught exception" statement not executed? I'm guessing it's because the real exception getting throw in test_mod.subprocess.CalledProcessException and not subprocess.CalledProcessException as I intend, but I'm not sure how to resolve that. Any suggestion? Thanks for your time.
I solved this eventually...
The problem was I was mocking the entire subprocess module, which included the CalledProcessError exception! That's why it didn't seem to match the exception I was raising in my test module, because it was a completely different object.
The fix is to mock just subprocess.check_output, D'oh!
class AppError(Exception): pass
class MissingInputError(AppError):
em = {1101: "Date input is missing. Please verify.", \
1102: "Key input is missing. Please verify.", \
1103: "Stn input is missing. Please verify."}
# and so on ...
...
def validate(self):
""" Method of Input class to validate input and save it """
params = self.__params
if 'dt' in params:
self.__validateKey(escape(params['dt'][0]))
else:
raise MissingInputError(1101)
if 'key' in params:
self.__validateService(escape(params['key'][0]))
else:
raise MissingInputError(1102)
# and so on ...
Unit testing the above, I know that the following tests in the MissingInput test class:
def testMissingKeyInput(self):
""" Missing key should raise error """
ip = controller.Input(MissingInput.missInputKey)
self.assertRaises(errors.MissingInputError, ip.validate)
def testMissingDtInput(self):
""" Missing dt should raise error """
ip = controller.Input(MissingInput.missInputDt)
self.assertRaises(errors.MissingInputError, ip.validate)
# and so on ...
will correctly detect if a MissingInputError exception was raised.
Is there any way to determine in the test what error number was passed to the exception while calling it, so that I can be sure that the error is being raised for that particular missing input, and not for any other missing inputs?
(P.S: Python 2.4.3).
Tip: If you are stuck with 2.4 to 2.6, use the unittest2 library.
In Python 2.7 and 3.2 a whole bunch of improvements to unittest will arrive. unittest2 is a backport of the new features (and tests) to work with Python 2.4, 2.5 & 2.6.
You can pass a regular expression that runs against the message:
import unittest
class MyError(Exception):
pass
def raiseError():
raise MyError(100)
class TestStuff(unittest.TestCase):
def testError(self):
self.assertRaisesRegexp(MyError, '100', raiseError)
unittest.main()
Does that make sense to you? If you were raising MyError('foo') or MyError(101), the test would fail because those wouldn't match the regular expression of '100'. Fortunately, this method will work against numbers and anything else that you can cast to a string.
See the unittest documentation for details on assertRaisesRegexp.
Alternatively, if you're on Python 2.6 or older, assertRaisesRegexp is not there and you'll have to do something like this:
try:
<code>
except MyError, message:
self.failUnlessEqual(message.args, <expected args>)
else:
self.fail('MyError not raised')
The parameters are found in the args attribute:
>>> class CustomException(Exception):
... pass
...
>>> e = CustomException(42)
>>> e.args
(42,)
I'd bet it is available for Python 2.4 as well.
HTH
Edit: since unit tests is common code, you can use the args argument in it as well:
>>> import unittest
>>> class Test(unittest.TestCase):
... def testA(self):
... try:
... raise CustomException(42)
... except CustomException, e:
... self.assertEquals(e.args[0], 42)
...
>>>
Is there a way to automatically start the debugger at the point at which a unittest fails?
Right now I am just using pdb.set_trace() manually, but this is very tedious as I need to add it each time and take it out at the end.
For Example:
import unittest
class tests(unittest.TestCase):
def setUp(self):
pass
def test_trigger_pdb(self):
#this is the way I do it now
try:
assert 1==0
except AssertionError:
import pdb
pdb.set_trace()
def test_no_trigger(self):
#this is the way I would like to do it:
a=1
b=2
assert a==b
#magically, pdb would start here
#so that I could inspect the values of a and b
if __name__=='__main__':
#In the documentation the unittest.TestCase has a debug() method
#but I don't understand how to use it
#A=tests()
#A.debug(A)
unittest.main()
I think what you are looking for is nose. It works like a test runner for unittest.
You can drop into the debugger on errors, with the following command:
nosetests --pdb
import unittest
import sys
import pdb
import functools
import traceback
def debug_on(*exceptions):
if not exceptions:
exceptions = (AssertionError, )
def decorator(f):
#functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except exceptions:
info = sys.exc_info()
traceback.print_exception(*info)
pdb.post_mortem(info[2])
return wrapper
return decorator
class tests(unittest.TestCase):
#debug_on()
def test_trigger_pdb(self):
assert 1 == 0
I corrected the code to call post_mortem on the exception instead of set_trace.
Third party test framework enhancements generally seem to include the feature (nose and nose2 were already mentioned in other answers). Some more:
pytest supports it.
pytest --pdb
Or if you use absl-py's absltest instead of unittest module:
name_of_test.py --pdb_post_mortem
A simple option is to just run the tests without result collection and letting the first exception crash down the stack (for arbitrary post mortem handling) by e.g.
try: unittest.findTestCases(__main__).debug()
except:
pdb.post_mortem(sys.exc_info()[2])
Another option: Override unittest.TextTestResult's addError and addFailure in a debug test runner for immediate post_mortem debugging (before tearDown()) - or for collecting and handling errors & tracebacks in an advanced way.
(Doesn't require extra frameworks or an extra decorator for test methods)
Basic example:
import unittest, pdb
class TC(unittest.TestCase):
def testZeroDiv(self):
1 / 0
def debugTestRunner(post_mortem=None):
"""unittest runner doing post mortem debugging on failing tests"""
if post_mortem is None:
post_mortem = pdb.post_mortem
class DebugTestResult(unittest.TextTestResult):
def addError(self, test, err):
# called before tearDown()
traceback.print_exception(*err)
post_mortem(err[2])
super(DebugTestResult, self).addError(test, err)
def addFailure(self, test, err):
traceback.print_exception(*err)
post_mortem(err[2])
super(DebugTestResult, self).addFailure(test, err)
return unittest.TextTestRunner(resultclass=DebugTestResult)
if __name__ == '__main__':
##unittest.main()
unittest.main(testRunner=debugTestRunner())
##unittest.main(testRunner=debugTestRunner(pywin.debugger.post_mortem))
##unittest.findTestCases(__main__).debug()
To apply #cmcginty's answer to the successor nose 2 (recommended by nose available on Debian-based systems via apt-get install nose2), you can drop into the debugger on failures and errors by calling
nose2
in your test directory.
For this, you need to have a suitable .unittest.cfg in your home directory or unittest.cfg in the project directory; it needs to contain the lines
[debugger]
always-on = True
errors-only = False
To address the comment in your code "In the documentation the unittest.TestCase has a debug() method but I don't understand how to use it", you can do something like this:
suite = unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__])
suite.debug()
Individual test cases are created like:
testCase = tests('test_trigger_pdb') (where tests is a sub-class of TestCase as per your example). And then you can do testCase.debug() to debug one case.
Here's a built-in, no extra modules, solution:
import unittest
import sys
import pdb
####################################
def ppdb(e=None):
"""conditional debugging
use with: `if ppdb(): pdb.set_trace()`
"""
return ppdb.enabled
ppdb.enabled = False
###################################
class SomeTest(unittest.TestCase):
def test_success(self):
try:
pass
except Exception, e:
if ppdb(): pdb.set_trace()
raise
def test_fail(self):
try:
res = 1/0
#note: a `nosetests --pdb` run will stop after any exception
#even one without try/except and ppdb() does not not modify that.
except Exception, e:
if ppdb(): pdb.set_trace()
raise
if __name__ == '__main__':
#conditional debugging, but not in nosetests
if "--pdb" in sys.argv:
print "pdb requested"
ppdb.enabled = not sys.argv[0].endswith("nosetests")
sys.argv.remove("--pdb")
unittest.main()
call it with python myunittest.py --pdb and it will halt. Otherwise it won't.
Some solution above modifies business logic:
try: # <-- new code
original_code() # <-- changed (indented)
except Exception as e: # <-- new code
pdb.post_mortem(...) # <-- new code
To minimize changes to the original code, we can define a function decorator, and simply decorate the function that's throwing:
def pm(func):
import functools, pdb
#functools.wraps(func)
def func2(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
pdb.post_mortem(e.__traceback__)
raise
return func2
Use:
#pm
def test_xxx(...):
...
Buildt a module with a decorator which post mortems into every type of error except AssertionError. The decorator can be triggered by the logging root level
#!/usr/bin/env python3
'''
Decorator for getting post mortem on errors of a unittest TestCase
'''
import sys
import pdb
import functools
import traceback
import logging
import unittest
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
def debug_on(log_level):
'''
Function decorator for post mortem debugging unittest functions.
Args:
log_level (int): logging levels coesponding to logging stl module
Usecase:
class tests(unittest.TestCase):
#debug_on(logging.root.level)
def test_trigger_pdb(self):
assert 1 == 0
'''
def decorator(f):
#functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except BaseException as err:
info = sys.exc_info()
traceback.print_exception(*info)
if log_level < logging.INFO and type(err) != AssertionError:
pdb.post_mortem(info[2])
return wrapper
return decorator
class Debug_onTester(unittest.TestCase):
#debug_on(logging.root.level)
def test_trigger_pdb(self):
assert 1 == 0
if __name__ == '__main__':
unittest.main()