Python Unit Testing: Automatically Running the Debugger when a test fails - python

Is there a way to automatically start the debugger at the point at which a unittest fails?
Right now I am just using pdb.set_trace() manually, but this is very tedious as I need to add it each time and take it out at the end.
For Example:
import unittest
class tests(unittest.TestCase):
def setUp(self):
pass
def test_trigger_pdb(self):
#this is the way I do it now
try:
assert 1==0
except AssertionError:
import pdb
pdb.set_trace()
def test_no_trigger(self):
#this is the way I would like to do it:
a=1
b=2
assert a==b
#magically, pdb would start here
#so that I could inspect the values of a and b
if __name__=='__main__':
#In the documentation the unittest.TestCase has a debug() method
#but I don't understand how to use it
#A=tests()
#A.debug(A)
unittest.main()

I think what you are looking for is nose. It works like a test runner for unittest.
You can drop into the debugger on errors, with the following command:
nosetests --pdb

import unittest
import sys
import pdb
import functools
import traceback
def debug_on(*exceptions):
if not exceptions:
exceptions = (AssertionError, )
def decorator(f):
#functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except exceptions:
info = sys.exc_info()
traceback.print_exception(*info)
pdb.post_mortem(info[2])
return wrapper
return decorator
class tests(unittest.TestCase):
#debug_on()
def test_trigger_pdb(self):
assert 1 == 0
I corrected the code to call post_mortem on the exception instead of set_trace.

Third party test framework enhancements generally seem to include the feature (nose and nose2 were already mentioned in other answers). Some more:
pytest supports it.
pytest --pdb
Or if you use absl-py's absltest instead of unittest module:
name_of_test.py --pdb_post_mortem

A simple option is to just run the tests without result collection and letting the first exception crash down the stack (for arbitrary post mortem handling) by e.g.
try: unittest.findTestCases(__main__).debug()
except:
pdb.post_mortem(sys.exc_info()[2])
Another option: Override unittest.TextTestResult's addError and addFailure in a debug test runner for immediate post_mortem debugging (before tearDown()) - or for collecting and handling errors & tracebacks in an advanced way.
(Doesn't require extra frameworks or an extra decorator for test methods)
Basic example:
import unittest, pdb
class TC(unittest.TestCase):
def testZeroDiv(self):
1 / 0
def debugTestRunner(post_mortem=None):
"""unittest runner doing post mortem debugging on failing tests"""
if post_mortem is None:
post_mortem = pdb.post_mortem
class DebugTestResult(unittest.TextTestResult):
def addError(self, test, err):
# called before tearDown()
traceback.print_exception(*err)
post_mortem(err[2])
super(DebugTestResult, self).addError(test, err)
def addFailure(self, test, err):
traceback.print_exception(*err)
post_mortem(err[2])
super(DebugTestResult, self).addFailure(test, err)
return unittest.TextTestRunner(resultclass=DebugTestResult)
if __name__ == '__main__':
##unittest.main()
unittest.main(testRunner=debugTestRunner())
##unittest.main(testRunner=debugTestRunner(pywin.debugger.post_mortem))
##unittest.findTestCases(__main__).debug()

To apply #cmcginty's answer to the successor nose 2 (recommended by nose available on Debian-based systems via apt-get install nose2), you can drop into the debugger on failures and errors by calling
nose2
in your test directory.
For this, you need to have a suitable .unittest.cfg in your home directory or unittest.cfg in the project directory; it needs to contain the lines
[debugger]
always-on = True
errors-only = False

To address the comment in your code "In the documentation the unittest.TestCase has a debug() method but I don't understand how to use it", you can do something like this:
suite = unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__])
suite.debug()
Individual test cases are created like:
testCase = tests('test_trigger_pdb') (where tests is a sub-class of TestCase as per your example). And then you can do testCase.debug() to debug one case.

Here's a built-in, no extra modules, solution:
import unittest
import sys
import pdb
####################################
def ppdb(e=None):
"""conditional debugging
use with: `if ppdb(): pdb.set_trace()`
"""
return ppdb.enabled
ppdb.enabled = False
###################################
class SomeTest(unittest.TestCase):
def test_success(self):
try:
pass
except Exception, e:
if ppdb(): pdb.set_trace()
raise
def test_fail(self):
try:
res = 1/0
#note: a `nosetests --pdb` run will stop after any exception
#even one without try/except and ppdb() does not not modify that.
except Exception, e:
if ppdb(): pdb.set_trace()
raise
if __name__ == '__main__':
#conditional debugging, but not in nosetests
if "--pdb" in sys.argv:
print "pdb requested"
ppdb.enabled = not sys.argv[0].endswith("nosetests")
sys.argv.remove("--pdb")
unittest.main()
call it with python myunittest.py --pdb and it will halt. Otherwise it won't.

Some solution above modifies business logic:
try: # <-- new code
original_code() # <-- changed (indented)
except Exception as e: # <-- new code
pdb.post_mortem(...) # <-- new code
To minimize changes to the original code, we can define a function decorator, and simply decorate the function that's throwing:
def pm(func):
import functools, pdb
#functools.wraps(func)
def func2(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
pdb.post_mortem(e.__traceback__)
raise
return func2
Use:
#pm
def test_xxx(...):
...

Buildt a module with a decorator which post mortems into every type of error except AssertionError. The decorator can be triggered by the logging root level
#!/usr/bin/env python3
'''
Decorator for getting post mortem on errors of a unittest TestCase
'''
import sys
import pdb
import functools
import traceback
import logging
import unittest
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
def debug_on(log_level):
'''
Function decorator for post mortem debugging unittest functions.
Args:
log_level (int): logging levels coesponding to logging stl module
Usecase:
class tests(unittest.TestCase):
#debug_on(logging.root.level)
def test_trigger_pdb(self):
assert 1 == 0
'''
def decorator(f):
#functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except BaseException as err:
info = sys.exc_info()
traceback.print_exception(*info)
if log_level < logging.INFO and type(err) != AssertionError:
pdb.post_mortem(info[2])
return wrapper
return decorator
class Debug_onTester(unittest.TestCase):
#debug_on(logging.root.level)
def test_trigger_pdb(self):
assert 1 == 0
if __name__ == '__main__':
unittest.main()

Related

How do I remove a handler from a loguru.logger when using pytest?

I wrote a Thing class that does logging using loguru. At the bottom of the class file I add a handler to the logger. This is what thing.py looks like.
from loguru import logger
class Thing:
def __init__(self):
logger.info("Thing created")
def __enter__(self):
logger.info("Thing entered")
return self
def __exit__(self, exc_type, exc_value, traceback):
logger.info("Thing exited")
logger.add("thing.log")
if __name__ == "__main__":
with Thing() as thing:
logger.info("In with block")
This works fine and it logs to thing.log as expected. What I would like to achieve is that it does not add the handler to thing.log when running tests.
This is my test file:
import pytest
from loguru import logger
from thing import Thing
#pytest.fixture
def thing(mocker):
mocker.patch("thing.logger", logger)
with Thing() as thing:
yield thing
def test_thing(thing, mocker):
mocker.patch("thing.logger", logger)
logger.info("In test")
assert isinstance(thing, Thing)
Now this test passes, but the logs are still written to thing.log (instead to only stdout, which is the default in for a loguru.logger).
How do I make sure that it only logs to the basic loguru.logger when running pytest?
What I tried:
Using monkeypatch instead of using mocker: monkeypatch.setattr("thing.logger", logger)
Patching in only one place (either in the fixture or in the test function)
Patching without replacement: mocker.patch("thing.logger") (so without a replacement logger)
Remove logger.add("thing.log") from thing.py!
You can either specify (as said in the docs) that you want to log to stdout: logger.add(sys.stdout) or just leave it out because the default for loguru.logger is in fact stdout!
The example provided in their docs:
logger.add(sys.stdout, format="{time} - {level} - {message}", filter="sub.module")
EDIT:
if __name__ == "__main__":
logger.add("thing.log")
if __name__ == "__main__":
with Thing() as thing:
#...
Now the logger will log to thing.log when the module is executed directly, but it will NOT add the file handler when the module is imported by another module (e.g. a test file).
Or you can use logger.remove(0) to stop logging when calling thing(mocker)!

How to test required argparse arguments during unittests?

I'm doing TDD tests for argparser. How can I test arguments with the option required?I need to test all options like:
too many arguments,
no arguments are given,
the wrong argument is given.
I can raise SystemExit, but this is not really what I need:
def test_no_arguments(self):
with patch.object(sys, 'exit') as mock_method:
self.parser.parse_arguments()
self.assertTrue(mock_method.called)
However, without raising system exit I have always errors like this:
zbx-check-mount.py
class CommandLine:
def __init__(self):
self.args_parser = argparse.ArgumentParser(description="Monitoring mounted filesystems",
formatter_class=argparse.RawTextHelpFormatter)
self.parsed_args = None
self.add_arguments()
def add_arguments(self):
"""
Add arguments to parser.
"""
try:
self.args_parser._action_groups.pop() # pylint: disable=protected-access
required = self.args_parser.add_argument_group('required arguments')
required.add_argument('--fs_name', required=True, help='Given filesystem')
except argparse.ArgumentError as err:
log.error('argparse.ArgumentError: %s', err)
sys.exit(1)
def parse_arguments(self, args=None):
"""
Parse added arguments. Then run private method to return values
"""
self.parsed_args = self.args_parser.parse_args()
return self.parsed_args.fs_name,
tests
from pyfakefs.fake_filesystem_unittest import TestCase
import os
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
if sys.version_info[0] == 3:
from unittest.mock import MagicMock, patch
else:
from mock import MagicMock, patch
sys.path.extend([os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..', "bin")])
module_name = __import__('zbx-check-mount')
class TestCommandLine(TestCase):
def setUp(self):
"""
Method called to prepare the test fixture. This is called immediately before calling the test method
"""
self.parser = module_name.CommandLine()
def test_no_arguments(self):
opts = self.parser.parse_arguments([])
assert opts.fs_name
def tearDown(self):
"""
Method called immediately after the test method has been called and the result recorded.
"""
pass
How to avoid this situation and test other options?
In def parse_arguments(self, args=None):, you should pass args on to the parser, as in:
self.args_parser.parse_args(args)
parse_args() parses sys.argv[1:], or if the given argument is None. Otherwise it parses the provided list.
In a full distribution of python there's a unittest file for argparse (test_argparse.py). It's somewhat complex, defining a subclass of ArgumentParser that captures errors and redirects error messages.
Testing argparse is tricky because it looks at sys.argv, with the unittest scripts also use. And it usually tries to exit on errors. This has been discussed in a number of SO questions already.
If I'm interpreting your symptoms correctly, you are having problems in the test harness because your monkey patched implementation of sys.exit actually returns, which the argparse library is not expecting.
Introducing a side_effect that raises an exception, which you can then trap and verify in the unit test, may be sufficient to get around the problem.

how to write Unittets for a method which is returning a method

def acquisition_required(method):
def wrapped_method(self, *args, **kwargs):
result=some complex code
if not result:
some code is here
else:
return method(self, *args, **kwargs)
return wrapped_method
I would like to write a Unittest for this
eg.
assertEqual, assertTrue..
But i don't know how to test it i have done unittesting for function returning some values or True/False.
I don't want any code just concept
If your function will be success then the return value will be the reference of getting method. It means you can use the assertEqual method of unittest module. Like below:
import unittest
import your_module
class ConfigParserTestCases(unittest.TestCase):
def test_return_method(self):
self.assertEqual(your_module.acquisition_required(method), method)
if __name__ == "__main__":
unittest.main()
Note: If your implementation contains Exceptions (Eg.: try-except for error handling), you can test the Exception case with "assertRaises" method of unittest module. From the Official Python documentation.
Eg. in your case (if you want to test ValueError exception):
with self.assertRaises(ValueError):
self.assertRaises(your_module.acquisition_required(method))
I manged to do this as below:
with self.assertRaises(ValueError):
self.assertRaises(your_module.acquisition_required(method))

Python mocked exception not being caught

Struggling to succinctly describe this in the title...
I have a module I want to test:
mod.py:
import subprocess
class MyStuff(object):
def my_fun(self):
try:
print subprocess
out = subprocess.check_output(["echo", "pirates"])
except subprocess.CalledProcessError:
print "caught exception"
And the test module test_mod.py:
import unittest
import mock
from mod import MyStuff
import subprocess
class Tests(unittest.TestCase):
def setUp(self):
self.patched_subprocess = mock.patch(
'mod.subprocess', autospec=True)
self.mock_subprocess = self.patched_subprocess.start()
self.my_stuff = MyStuff()
def tearDown(self):
self.patched_subprocess.stop()
def test_my_fun(self):
self.mock_subprocess.check_output = mock.Mock(
side_effect=subprocess.CalledProcessError(0, "hi", "no"))
with self.assertRaises(subprocess.CalledProcessError):
out = self.my_stuff.my_fun()
if __name__ == '__main__':
unittest.main()
I then run python test_mod.py and I see the following output:
<NonCallableMagicMock name='subprocess' spec='module' id='140654009377872'>
.
----------------------------------------------------------------------
Ran 1 test in 0.007s
OK
I'm pleased that the subprocess object has been mocked, but why is the print "caught exception" statement not executed? I'm guessing it's because the real exception getting throw in test_mod.subprocess.CalledProcessException and not subprocess.CalledProcessException as I intend, but I'm not sure how to resolve that. Any suggestion? Thanks for your time.
I solved this eventually...
The problem was I was mocking the entire subprocess module, which included the CalledProcessError exception! That's why it didn't seem to match the exception I was raising in my test module, because it was a completely different object.
The fix is to mock just subprocess.check_output, D'oh!

Run Python unittest so that nothing is printed if successful, only AssertionError() if fails

I have a test module in the standard unittest format
class my_test(unittest.TestCase):
def test_1(self):
[tests]
def test_2(self):
[tests]
etc....
My company has a proprietary test harness that will execute my module as a command line script, and which will catch any errors raised by my module, but requires that my module be mute if successful.
So, I am trying to find a way to run my test module naked, so that if all my tests pass then nothing is printed to the screen, and if a test fails with an AssertionError, that error gets piped through the standard Python error stack (just like any other error would in a normal Python script.)
The docs advocate using the unittest.main() function to run all the tests in a given module like
if __name__ == "__main__":
unittest.main()
The problem is that this wraps the test results in unittest's harness, so that even if all tests are successful, it still prints some fluff to the screen, and if there is an error, it's not simply dumped as a usual python error, but also dressed in the harness.
I've tried redirecting the output to an alternate stream using
with open('.LOG','a') as logf:
suite = unittest.TestLoader().loadTestsFromTestCase(my_test)
unittest.TextTestRunner(stream = logf).run(suite)
The problem here is that EVERYTHING gets piped to the log file (including all notice of errors). So when my companies harness runs the module, it complete's successfully because, as far as it can tell, no errors were raised (because they were all piped to the log file).
Any suggestions on how I can construct a test runner that suppresses all the fluff, and pipes errors through the normal Python error stack? As always, if you think there is a better way to approach this problem, please let me know.
EDIT:
Here is what I ended up using to resolve this. First, I added a "get_test_names()" method to my test class:
class my_test(unittest.TestCase):
etc....
#staticmethod
def get_test_names():
"""Return the names of all the test methods for this class."""
test_names = [ member[0] for memeber in inspect.getmembers(my_test)
if 'test_' in member[0] ]
Then I replaced my call to unittest.main() with the following:
# Unittest catches all errors raised by the test cases, and returns them as
# formatted strings inside a TestResult object. In order for the test
# harness to catch these errors they need to be re-raised, and so I am defining
# this CompareError class to do that.
# For each code error, a CompareError will be raised, with the original error
# stack as the argument. For test failures (i.e. assertion errors) an
# AssertionError is raised.
class CompareError(Exception):
def __init__(self,err):
self.err = err
def __str__(self):
return repr(self.err)
# Collect all tests into a TestSuite()
all_tests = ut.TestSuite()
for test in my_test.get_test_names():
all_tests.addTest(my_test(test))
# Define a TestResult object and run tests
results = ut.TestResult()
all_tests.run(results)
# Re-raise any script errors
for error in results.errors:
raise CompareError(error[1])
# Re-raise any test failures
for failure in results.failures:
raise AssertionError(failure[1])
I came up with this. If you are able to change the command line you might remove the internal io redirection.
import sys, inspect, traceback
# redirect stdout,
# can be replaced by testharness.py > /dev/null at console
class devnull():
def write(self, data):
pass
f = devnull()
orig_stdout = sys.stdout
sys.stdout = f
class TestCase():
def test_1(self):
print 'test_1'
def test_2(self):
raise AssertionError, 'test_2'
def test_3(self):
print 'test_3'
if __name__ == "__main__":
testcase = TestCase()
testnames = [ t[0] for t in inspect.getmembers(TestCase)
if t[0].startswith('test_') ]
for testname in testnames:
try:
getattr(testcase, testname)()
except AssertionError, e:
print >> sys.stderr, traceback.format_exc()
# restore
sys.stdout = orig_stdout

Categories