This question already has answers here:
Closed 10 years ago.
Possible Duplicate:
How do you generate dynamic (parameterized) unit tests in Python?
I have a function to test, under_test, and a set of expected input/output pairs:
[
(2, 332),
(234, 99213),
(9, 3),
# ...
]
I would like each one of these input/output pairs to be tested in its own test_* method. Is that possible?
This is sort of what I want, but forcing every single input/output pair into a single test:
class TestPreReqs(unittest.TestCase):
def setUp(self):
self.expected_pairs = [(23, 55), (4, 32)]
def test_expected(self):
for exp in self.expected_pairs:
self.assertEqual(under_test(exp[0]), exp[1])
if __name__ == '__main__':
unittest.main()
(Also, do I really want to be putting that definition of self.expected_pairs in setUp?)
UPDATE: Trying doublep's advice:
class TestPreReqs(unittest.TestCase):
def setUp(self):
expected_pairs = [
(2, 3),
(42, 11),
(3, None),
(31, 99),
]
for k, pair in expected_pairs:
setattr(TestPreReqs, 'test_expected_%d' % k, create_test(pair))
def create_test (pair):
def do_test_expected(self):
self.assertEqual(get_pre_reqs(pair[0]), pair[1])
return do_test_expected
if __name__ == '__main__':
unittest.main()
This does not work. 0 tests are run. Did I adapt the example incorrectly?
I had to do something similar. I created simple TestCase subclasses that took a value in their __init__, like this:
class KnownGood(unittest.TestCase):
def __init__(self, input, output):
super(KnownGood, self).__init__()
self.input = input
self.output = output
def runTest(self):
self.assertEqual(function_to_test(self.input), self.output)
I then made a test suite with these values:
def suite():
suite = unittest.TestSuite()
suite.addTests(KnownGood(input, output) for input, output in known_values)
return suite
You can then run it from your main method:
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
The advantages of this are:
As you add more values, the number of reported tests increases, which makes you feel like you are doing more.
Each individual test case can fail individually
It's conceptually simple, since each input/output value is converted into one TestCase
Not tested:
class TestPreReqs(unittest.TestCase):
...
def create_test (pair):
def do_test_expected(self):
self.assertEqual(under_test(pair[0]), pair[1])
return do_test_expected
for k, pair in enumerate ([(23, 55), (4, 32)]):
test_method = create_test (pair)
test_method.__name__ = 'test_expected_%d' % k
setattr (TestPreReqs, test_method.__name__, test_method)
If you use this often, you could prettify this by using utility functions and/or decorators, I guess. Note that pairs are not an attribute of TestPreReqs object in this example (and so setUp is gone). Rather, they are "hardwired" in a sense to the TestPreReqs class.
As often with Python, there is a complicated way to provide a simple solution.
In that case, we can use metaprogramming, decorators, and various nifty Python tricks to achieve a nice result. Here is what the final test will look like:
import unittest
# Some magic code will be added here later
class DummyTest(unittest.TestCase):
#for_examples(1, 2)
#for_examples(3, 4)
def test_is_smaller_than_four(self, value):
self.assertTrue(value < 4)
#for_examples((1,2),(2,4),(3,7))
def test_double_of_X_is_Y(self, x, y):
self.assertEqual(2 * x, y)
if __name__ == "__main__":
unittest.main()
When executing this script, the result is:
..F...F
======================================================================
FAIL: test_double_of_X_is_Y(3,7)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/xdecoret/Documents/foo.py", line 22, in method_for_example
method(self, *example)
File "/Users/xdecoret/Documents/foo.py", line 41, in test_double_of_X_is_Y
self.assertEqual(2 * x, y)
AssertionError: 6 != 7
======================================================================
FAIL: test_is_smaller_than_four(4)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/xdecoret/Documents/foo.py", line 22, in method_for_example
method(self, *example)
File "/Users/xdecoret/Documents/foo.py", line 37, in test_is_smaller_than_four
self.assertTrue(value < 4)
AssertionError
----------------------------------------------------------------------
Ran 7 tests in 0.001s
FAILED (failures=2)
which achieves our goal:
it is unobtrusive: we derive from TestCase as usual
we write parametrized tests only once
each example value is considered an individual test
the decorator can be stacked, so it is easy to use sets of examples (e.g., using a function to build the list of values from example files or directories)
The icing on the cake is it works for arbitrary arity of the signature
So how does it work? Basically, the decorator stores the examples in an attribute of the function. We use a metaclass to replace every decorated function with a list of functions. And we replace the unittest.TestCase with our new magic code (to be pasted in the "magic" comment above) is:
__examples__ = "__examples__"
def for_examples(*examples):
def decorator(f, examples=examples):
setattr(f, __examples__, getattr(f, __examples__,()) + examples)
return f
return decorator
class TestCaseWithExamplesMetaclass(type):
def __new__(meta, name, bases, dict):
def tuplify(x):
if not isinstance(x, tuple):
return (x,)
return x
for methodname, method in dict.items():
if hasattr(method, __examples__):
dict.pop(methodname)
examples = getattr(method, __examples__)
delattr(method, __examples__)
for example in (tuplify(x) for x in examples):
def method_for_example(self, method = method, example = example):
method(self, *example)
methodname_for_example = methodname + "(" + ", ".join(str(v) for v in example) + ")"
dict[methodname_for_example] = method_for_example
return type.__new__(meta, name, bases, dict)
class TestCaseWithExamples(unittest.TestCase):
__metaclass__ = TestCaseWithExamplesMetaclass
pass
unittest.TestCase = TestCaseWithExamples
If someone wants to package this nicely, or propose a patch for unittest, feel free! A quote of my name will be appreciated.
The code can be made much simpler and fully encapsulated in the decorator if you are ready to use frame introspection (import the sys module)
def for_examples(*parameters):
def tuplify(x):
if not isinstance(x, tuple):
return (x,)
return x
def decorator(method, parameters=parameters):
for parameter in (tuplify(x) for x in parameters):
def method_for_parameter(self, method=method, parameter=parameter):
method(self, *parameter)
args_for_parameter = ",".join(repr(v) for v in parameter)
name_for_parameter = method.__name__ + "(" + args_for_parameter + ")"
frame = sys._getframe(1) # pylint: disable-msg=W0212
frame.f_locals[name_for_parameter] = method_for_parameter
return None
return decorator
nose (suggested by #Paul Hankin)
#!/usr/bin/env python
# file: test_pairs_nose.py
from nose.tools import eq_ as eq
from mymodule import f
def test_pairs():
for input, output in [ (2, 332), (234, 99213), (9, 3), ]:
yield _test_f, input, output
def _test_f(input, output):
try:
eq(f(input), output)
except AssertionError:
if input == 9: # expected failure
from nose.exc import SkipTest
raise SkipTest("expected failure")
else:
raise
if __name__=="__main__":
import nose; nose.main()
Example:
$ nosetests test_pairs_nose -v
test_pairs_nose.test_pairs(2, 332) ... ok
test_pairs_nose.test_pairs(234, 99213) ... ok
test_pairs_nose.test_pairs(9, 3) ... SKIP: expected failure
----------------------------------------------------------------------
Ran 3 tests in 0.001s
OK (SKIP=1)
unittest (an approach similar to #doublep's one)
#!/usr/bin/env python
import unittest2 as unittest
from mymodule import f
def add_tests(generator):
def class_decorator(cls):
"""Add tests to `cls` generated by `generator()`."""
for f, input, output in generator():
test = lambda self, i=input, o=output, f=f: f(self, i, o)
test.__name__ = "test_%s(%r, %r)" % (f.__name__, input, output)
setattr(cls, test.__name__, test)
return cls
return class_decorator
def _test_pairs():
def t(self, input, output):
self.assertEqual(f(input), output)
for input, output in [ (2, 332), (234, 99213), (9, 3), ]:
tt = t if input != 9 else unittest.expectedFailure(t)
yield tt, input, output
class TestCase(unittest.TestCase):
pass
TestCase = add_tests(_test_pairs)(TestCase)
if __name__=="__main__":
unittest.main()
Example:
$ python test_pairs_unit2.py -v
test_t(2, 332) (__main__.TestCase) ... ok
test_t(234, 99213) (__main__.TestCase) ... ok
test_t(9, 3) (__main__.TestCase) ... expected failure
----------------------------------------------------------------------
Ran 3 tests in 0.000s
OK (expected failures=1)
If you don't want to install unittest2 then add:
try:
import unittest2 as unittest
except ImportError:
import unittest
if not hasattr(unittest, 'expectedFailure'):
import functools
def _expectedFailure(func):
#functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except AssertionError:
pass
else:
raise AssertionError("UnexpectedSuccess")
return wrapper
unittest.expectedFailure = _expectedFailure
Some of the tools available for doing parametrized tests in Python are:
Nose test generators (only for function tests, not TestCase classes)
nose-parametrized by David Wolever (also for TestCase classes)
Unittest template by Boris Feld
Parametrized tests in py.test
parametrized-testcase by Austin Bingham
See also question 1676269 for more answers to this question.
I think Rory's solution is the cleanest and shortest. However, this variation of doublep's "create synthetic functions in a TestCase" also works:
from functools import partial
class TestAllReports(unittest.TestCase):
pass
def test_spamreport(name):
assert classify(getSample(name))=='spamreport', name
for rep in REPORTS:
testname = 'test_' + rep
testfunc = partial(test_spamreport, rep)
testfunc.__doc__ = testname
setattr(TestAllReports, testname, testfunc)
if __name__=='__main__':
unittest.main(argv=sys.argv + ['--verbose'])
Related
I want to test a file called ninja.py wrote in Python3.6.
# File ninja.py
def what_to_do_result(result):
# Send a mail, write something in a file, play a song or whatever
def my_function(a, b):
# Step 1
result = a + b
# Step 2
if result == 3:
what_to_do_result(result)
elif result == 5:
what_to_do_result(result + 1)
else:
return True
I have started writing a test file called test_ninjapy and wrote some unittest. I do use Pytest.
import pytest
class MyTestException(Exception):
pass
def run_side_effect(*args, **kwargs):
raise MyTestException(kwargs["result"])
#pytest.fixture(name="resource")
def setup_fixture():
# Some code here
class TestNinja:
#staticmethod
def setup_method():
# Function called before each test
#staticmethod
def teardown_method():
# Function called after each test
#staticmethod
def test_my_function(mocker, resource):
# How to do this ???
mocker.patch("ninja.what_to_do_result", return_value=None, side_effect=run_side_effect)
# Then the test
assert 1 == 1 # -> This works
with pytest.raises(MyTestException):
ninja_function(a=1, b=2)
assert ninja_function(a=5, b=10)
The point is that I want to mock the function ninja.what_to_do_result and apply a side effect (= run a function).
I want the side effect to use the parameter (kwargs) or the function what_to_do_result.
But I don't know how to do this.
For example:
Because there are multiple possibilities (in the step 2, the call of what_to_do_result could be with 3 & 5, which are linked with 2 differents use cases I wxant to test.
Can you help me?
I did not found the related section in the documentation below.
Link to the documentation: https://github.com/pytest-dev/pytest-mock
How can I create multiple TestCases and run them programmatically? I'm trying to test multiple implementations of a collection on a common TestCase.
I'd prefer to stick to with plain unittest and avoid dependencies.
Here's some resources that I looked at that didn't quite meet what I wanted:
Writing a re-usable parametrized unittest.TestCase method - The accepted answer proposes four different external libraries.
http://eli.thegreenplace.net/2011/08/02/python-unit-testing-parametrized-test-cases -
This approach uses a static method paramerize. I don't understand why you can't pass in a parameter directly into the
TestSubClass.__init__.
How to generate dynamic (parametrized) unit tests in python? - A little bit too black magic.
Here's a minimal (non)working example.
import unittest
MyCollection = set
AnotherCollection = set
# ... many more collections
def maximise(collection, array):
return 2
class TestSubClass(unittest.TestCase):
def __init__(self, collection_class):
unittest.TestCase.__init__(self)
self.collection_class = collection_class
self.maximise_fn = lambda array: maximise(collection_class, array)
def test_single(self):
self.assertEqual(self.maximise_fn([1]), 1)
def test_overflow(self):
self.assertEqual(self.maximise_fn([3]), 1)
# ... many more tests
def run_suite():
suite = unittest.defaultTestLoader
for collection in [MyCollection, AnotherCollection]:
suite.loadTestsFromTestCase(TestSubClass(collection))
unittest.TextTestRunner().run(suite)
def main():
run_suite()
if __name__ == '__main__':
main()
The above approach errors with in loadTestsFromTestCase:
TypeError: issubclass() arg 1 must be a class
How about using pytest with to parametrize fixture:
import pytest
MyCollection = set
AnotherCollection = set
def maximise(collection, array):
return 1
#pytest.fixture(scope='module', params=[MyCollection, AnotherCollection])
def maximise_fn(request):
return lambda array: maximise(request.param, array)
def test_single(maximise_fn):
assert maximise_fn([1]) == 1
def test_overflow(maximise_fn):
assert maximise_fn([3]) == 1
If that's not an option, you can make a mixin to contain test function, and subclasses to provide maximise_fns:
import unittest
MyCollection = set
AnotherCollection = set
def maximise(collection, array):
return 1
class TestCollectionMixin:
def test_single(self):
self.assertEqual(self.maximise_fn([1]), 1)
def test_overflow(self):
self.assertEqual(self.maximise_fn([3]), 1)
class TestMyCollection(TestCollectionMixin, unittest.TestCase):
maximise_fn = lambda self, array: maximise(MyCollection, array)
class TestAnotherCollection(TestCollectionMixin, unittest.TestCase):
maximise_fn = lambda self, array: maximise(AnotherCollection, array)
if __name__ == '__main__':
unittest.main()
I have the following test that does not fail when running an especially long fib assert.
Tests that don't fail properly
#!/usr/env/bin python2.7
import unittest
from fib import fib
from nose.tools import timed
def test_gen(expected, actual):
#timed(.001)
def test_method(self):
return self.assertEqual(expected, actual)
return test_method
if __name__ == '__main__':
all_cases = {
'user': ((fib(40), 102334155), (fib(2), 1), (fib(5), 5)),
}
fails = {}
for username, cases in all_cases.items():
class FibTests(unittest.TestCase):
pass
for index, case in enumerate(cases):
test_name = 'test_{0}_{1}'.format(username, index)
test = test_gen(case[1], case[0])
setattr(FibTests, test_name, test)
suite = unittest.TestLoader().loadTestsFromTestCase(FibTests)
result = unittest.TextTestRunner(verbosity=2).run(suite)
fails[username] = len(result.failures)
print fails
(Slow) Fib.py Implementation
def fib(x):
if x == 0:
return 0
elif x == 1:
return 1
return fib(x - 2) + fib(x - 1)
Tests that fail properly
import unittest
from fib import fib
from nose.tools import timed
def test_gen(expected, actual):
#timed(.001)
def test_method(self):
time.sleep(.2)
return self.assertEqual(expected, actual)
return test_method
You are timing the wrong thing, and never actually calling your test method. You are also going to an awful lot of effort to dynamically create and add your cases to your class that does nothing but act as a container for tests when nose supports generator test cases, which would be much easier to read and follow than what you have here. Also, is this a test file or a piece of product code? If it's a test file, then having all of that code in if __name__ == '__main__' is kind of odd, and if it is a product code file, then having the test_gen function and the unittest and nose import statements in the uncoditionally run part doesn't make much sense. I'd recommend doing it the following way, and not trying to make the test script self-runnable; just launch it with nose.
from fib import fib
from nose.tools import timed
fib = timed(.001)(fib)
def execute(username, fib_arg, expected_output):
result = fib(fib_arg)
assert result == expected_output, ('%s fib(%d) got %d, expected %d'
% (username, fib_arg, result, expected_output))
def test_fib():
for name, datasets in (('user', ((40, 102334155), (2, 1), (5, 5))),):
for arg, expected in datasets:
yield execute, name, arg, expected
I want to be able to get the result of a particular test method and output it inside the teardown method, while using the nose test runner.
There is a very good example here.
But unfortunately, running nosetests example.py does not work, since nose doesn't seem to like the fact that the run method in the superclass is being overridden:
AttributeError: 'ResultProxy' object has no attribute 'wasSuccessful'
Caveat: the following doesn't actually access the test during the tearDown, but it does access each result.
You might want to write a nose plugin (see the API documentation here). The method that you are probably interested in is afterTest(), which is run... after the test. :) Though, depending on your exact application, handleError()/handleFailure() or finalize() might actually be more useful.
Here is an example plugin that accesses the result of a test immediately after it is executed.
from nose.plugins import Plugin
import logging
log = logging.getLogger('nose.plugins.testnamer')
class ReportResults(Plugin):
def __init__(self, *args, **kwargs):
super(ReportResults, self).__init__(*args, **kwargs)
self.passes = 0
self.failures = 0
def afterTest(self, test):
if test.passed:
self.passes += 1
else:
self.failures += 1
def finalize(self, result):
print "%d successes, %d failures" % (self.passes, self.failures)
This trivial example merely reports the number of passes and failures (like the link you included, but I'm sure you can extend it to do something more interesting (here's another fun idea). To use this, make sure that it is installed in Nose (or load it into a custom runner), and then activate it with --with-reportresults.
If you are OK with adding some boilerplate code to the tests, something like the following might work.
In MyTest1, tearDown is called at the end of each test, and the value of self.result has been set to a tuple containing the method name and a dictionary (but you could set that to whatever you like). The inspect module is used to get the method name, so tearDown knows which test just ran.
In MyTest2, all the results are saved in a dictionary (results), which you can do with what you like in the tearDownClass method.
import inspect
import unittest
class MyTest1(unittest.TestCase):
result = None
def tearDown(self):
print "tearDown:", self.result
def test_aaa(self):
frame = inspect.currentframe()
name = inspect.getframeinfo(frame).function
del frame
self.result = (name, None)
x = 1 + 1
self.assertEqual(x, 2)
self.result = (name, dict(x=x))
def test_bbb(self):
frame = inspect.currentframe()
name = inspect.getframeinfo(frame).function
del frame
self.result = (name, None)
# Intentional fail.
x = -1
self.assertEqual(x, 0)
self.result = (name, dict(x=x))
class MyTest2(unittest.TestCase):
results = {}
#classmethod
def tearDownClass(cls):
print "tearDownClass:", cls.results
def test_aaa(self):
frame = inspect.currentframe()
name = inspect.getframeinfo(frame).function
del frame
self.results[name] = None
x = 1 + 1
self.assertEqual(x, 2)
self.results[name] = dict(x=x)
def test_bbb(self):
frame = inspect.currentframe()
name = inspect.getframeinfo(frame).function
del frame
self.results[name] = None
x = -1
self.assertEqual(x, 0)
self.results[name] = dict(x=x)
if __name__ == '__main__':
unittest.main()
I have a following function in Python and I want to test with unittest that if the function gets 0 as argument, it throws a warning. I already tried assertRaises, but since I don't raise the warning, that doesn't work.
def isZero(i):
if i != 0:
print "OK"
else:
warning = Warning("the input is 0!")
print warning
return i
Starting with Python 3.2, you can simply use assertWarns() method.
with self.assertWarns(Warning):
do_something()
You can use the catch_warnings context manager. Essentially this allows you to mock the warnings handler, so that you can verify details of the warning. See the official docs for a fuller explanation and sample test code.
import warnings
def fxn():
warnings.warn("deprecated", DeprecationWarning)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
fxn()
# Verify some things
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "deprecated" in str(w[-1].message)
You can write your own assertWarns function to incapsulate catch_warnings context. I've just implemented it the following way, with a mixin:
class WarningTestMixin(object):
'A test which checks if the specified warning was raised'
def assertWarns(self, warning, callable, *args, **kwds):
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
result = callable(*args, **kwds)
self.assertTrue(any(item.category == warning for item in warning_list))
A usage example:
class SomeTest(WarningTestMixin, TestCase):
'Your testcase'
def test_something(self):
self.assertWarns(
UserWarning,
your_function_which_issues_a_warning,
5, 10, 'john', # args
foo='bar' # kwargs
)
The test will pass if at least one of the warnings issued by your_function is of type UserWarning.
#ire_and_curses' answer is quite useful and, I think, canonical. Here is another way to do the same thing. This one requires Michael Foord's excellent Mock library.
import unittest, warnings
from mock import patch_object
def isZero( i):
if i != 0:
print "OK"
else:
warnings.warn( "the input is 0!")
return i
class Foo(unittest.TestCase):
#patch_object(warnings, 'warn')
def test_is_zero_raises_warning(self, mock_warn):
isZero(0)
self.assertTrue(mock_warn.called)
if __name__ == '__main__':
unittest.main()
The nifty patch_object lets you mock out the warn method.
One problem with the warnings.catch_warnings approach is that warnings produced in different tests can interact in strange ways through global state kept in __warningregistry__ attributes.
To address this, we should clear the __warningregistry__ attribute of every module before every test that checks warnings.
class MyTest(unittest.TestCase):
def setUp(self):
# The __warningregistry__'s need to be in a pristine state for tests
# to work properly.
for v in sys.modules.values():
if getattr(v, '__warningregistry__', None):
v.__warningregistry__ = {}
def test_something(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", MySpecialWarning)
...
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, MySpecialWarning)
This is how Python 3's assertWarns() method is implemented.
Building off the answer from #ire_and_curses,
class AssertWarns(warnings.catch_warnings):
"""A Python 2 compatible version of `unittest.TestCase.assertWarns`."""
def __init__(self, test_case, warning_type):
self.test_case = test_case
self.warning_type = warning_type
self.log = None
super(AssertWarns, self).__init__(record=True, module=None)
def __enter__(self):
self.log = super(AssertWarns, self).__enter__()
return self.log
def __exit__(self, *exc_info):
super(AssertWarns, self).__exit__(*exc_info)
self.test_case.assertEqual(type(self.log[0]), self.warning_type)
This can be called similarly to unittest.TestCase.assertWarns:
with AssertWarns(self, warnings.WarningMessage):
warnings.warn('test warning!')
where self is a unittest.TestCase.
Per Melebius' answer, you can use self.assertWarns().
Additionally, if you want to check the warning message as well, you can use self.assertWarnsRegex() for that greater specificity:
import warnings
from unittest import TestCase
class MyCustomWarning(Warning):
...
def is_zero(i: int) -> int:
if i != 0:
print("OK")
else:
warnings.warn("the input is 0!", MyCustomWarning)
return i
class TestIsZero(TestCase):
def test_when_then_input_is_zero(self):
regex = "the input is 0"
with self.assertWarnsRegex(MyCustomWarning, expected_regex=regex):
_ = is_zero(0)
This test will fail if the regex is not found in the warning message.