How can generate automatically test case number in unittest? - python

how can I generate automatically the numbers of test cases in unittest? I mean something like test_01, test_02, test_{generate number}.
import unittest
class TestSum(unittest.TestCase):
def test_01_sum(self):
self.assertEqual(sum([1, 2, 3]), 6, "Should be 6")
def test_02_sum_tuple(self):
self.assertEqual(sum((1, 2, 2)), 6, "Should be 6")
if name == '__main__':
unittest.main()

To achieve this at runtime, you can actually rename the test methods for your test class:
def generate_test_numbers(test_class):
counter = 1
for method_name in dir(test_class):
if not method_name.startswith('test_N_'):
continue
method = getattr(test_class, method_name)
if not callable(method):
continue
new_method_name = method_name.replace('_N_', '_{:02d}_'.format(counter))
counter += 1
setattr(test_class, new_method_name, method)
delattr(test_class, method_name)
return test_class
You can either simply call this function from main:
generate_test_numbers(TestSum)
or as #VPfB suggested it, use it as a class decorator:
#generate_test_numbers
class TestSum(unittest.TestCase):
def test_N_sum(self):
self.assertEqual(sum([1, 2, 3]), 6, "Should be 6")
def test_N_sum_tuple(self):
self.assertEqual(sum((1, 2, 2)), 6, "Should be 6")
Either will output (with -v)
test_01_sum (__main__.TestSum) ... ok
test_02_sum_tuple (__main__.TestSum) ... FAIL
======================================================================
FAIL: test_02_sum_tuple (__main__.TestSum)
----------------------------------------------------------------------
Traceback (most recent call last):
File "q.py", line 8, in test_N_sum_tuple
self.assertEqual(sum((1, 2, 2)), 6, "Should be 6")
AssertionError: 5 != 6 : Should be 6
----------------------------------------------------------------------
Ran 2 tests in 0.001s
FAILED (failures=1)

Related

Error generated when running unit test 'None'

I am new to python and trying to run a unit test. My function works well when using the print method, but while trying to develop a test module I keep getting errors.
my function in the python file(work.py)
def lee(n):
for i in range(1,n+1):
print (i)
my unit test module
import unittest
import work
class TestWork(unittest.TestCase):
def test_lee(self):
result = work.lee(3)
self.assertEqual(result, [1,2,3])
if __name__ == '__main__':
unittest.main()
errors generated
======================================================================
FAIL: test_lee (__main__.TestWork)
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\Users\test_work.py", line
12, in test_lee
self.assertEqual(result, [1,2,3])
AssertionError: None != [1, 2, 3]
Here,
def lee(n):
list = []
for i in range(1,n+1):
list.append(i)
return list
As for printing, do print(work.lee(3))

Insufficient output from unittest subTest elements under pytest

I'm interested in using unittest's subTest for looping through some very similar tests. I found that, when I run tests written in this way under pytest (or nosetests), the output does not contain information about the individual failures. Taking the example from the docs:
import unittest
class NumbersTest(unittest.TestCase):
def test_even(self):
"""
Test that numbers between 0 and 5 are all even.
"""
for i in range(0, 6):
with self.subTest(i=i):
self.assertEqual(i % 2, 0)
if __name__ == '__main__':
unittest.main()
If I run python test_even.py, it clearly shows three failures, as expected:
======================================================================
FAIL: test_even (__main__.NumbersTest) (i=1)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test_even.py", line 10, in test_even
self.assertEqual(i % 2, 0)
AssertionError: 1 != 0
======================================================================
FAIL: test_even (__main__.NumbersTest) (i=3)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test_even.py", line 10, in test_even
self.assertEqual(i % 2, 0)
AssertionError: 1 != 0
======================================================================
FAIL: test_even (__main__.NumbersTest) (i=5)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test_even.py", line 10, in test_even
self.assertEqual(i % 2, 0)
AssertionError: 1 != 0
----------------------------------------------------------------------
Ran 1 test in 0.002s
FAILED (failures=3)
However, if I run pytest -v test_even.py, it only tells me there was a failure in this test. I can't see which elements failed:
test_even.py::NumbersTest::test_even FAILED [100%]
======================================================= FAILURES =======================================================
________________________________________________ NumbersTest.test_even _________________________________________________
self = <test_even.NumbersTest testMethod=test_even>
def test_even(self):
"""
Test that numbers between 0 and 5 are all even.
"""
for i in range(0, 6):
with self.subTest(i=i):
> self.assertEqual(i % 2, 0)
E AssertionError: 1 != 0
test_even.py:10: AssertionError
=============================================== 1 failed in 0.15 seconds ===============================================
Is there a way to show up the individual failures? Ideally, I'd also like some sort of output for the ones that passed, just to reassure myself that the test discovery is working properly!
It seems that pytest does not yet support subTest. One solution might be to ditch unittest altogether and write native pytest tests:
import pytest
#pytest.mark.parametrize("test_input", range(0, 6))
def test_even(test_input):
assert test_input % 2 == 0
if __name__ == '__main__':
pytest.main([__file__])
Once pytest-subtests is added to the environment, this just works with the script from the original question:
$ pytest test_even.py
============================= test session starts ==============================
platform linux -- Python 3.10.4, pytest-7.1.2, pluggy-1.0.0
rootdir: /net/home/h04/hadru/python
plugins: subtests-0.7.0
collected 1 item
test_even.py . [100%]
=================================== FAILURES ===================================
_________________________ NumbersTest.test_even (i=1) __________________________
self = <test_even.NumbersTest testMethod=test_even>
def test_even(self):
"""
Test that numbers between 0 and 5 are all even.
"""
for i in range(0, 6):
with self.subTest(i=i):
> self.assertEqual(i % 2, 0)
E AssertionError: 1 != 0
test_even.py:10: AssertionError
_________________________ NumbersTest.test_even (i=3) __________________________
self = <test_even.NumbersTest testMethod=test_even>
def test_even(self):
"""
Test that numbers between 0 and 5 are all even.
"""
for i in range(0, 6):
with self.subTest(i=i):
> self.assertEqual(i % 2, 0)
E AssertionError: 1 != 0
test_even.py:10: AssertionError
_________________________ NumbersTest.test_even (i=5) __________________________
self = <test_even.NumbersTest testMethod=test_even>
def test_even(self):
"""
Test that numbers between 0 and 5 are all even.
"""
for i in range(0, 6):
with self.subTest(i=i):
> self.assertEqual(i % 2, 0)
E AssertionError: 1 != 0
test_even.py:10: AssertionError
=========================== short test summary info ============================
SUBFAIL test_even.py::NumbersTest::test_even - AssertionError: 1 != 0
SUBFAIL test_even.py::NumbersTest::test_even - AssertionError: 1 != 0
SUBFAIL test_even.py::NumbersTest::test_even - AssertionError: 1 != 0
========================= 3 failed, 1 passed in 0.10s ==========================

Python function decorator example returning errors

The code below suppose to log the function name, the number of positional arguments and keyword arguments
def log(original_function):
def new_function(*args, **kwargs):
with open("log.txt", "w") as logfile:
logfile.write("%s,%s,%s" % (log, len(args),len(kwargs.keys())))
return original_function(*args, **kwargs)
return new_function
def my_function(message):
print(message)
>>> my_function = log(my_function)
>>> A = (2,3,4,5)
>>> B = {'Anthony' : 6, 'James' : 7}
>>> my_function(*A, **B)
I get this error:
return original_function(*args, **kwargs)
TypeError: my_function() got an unexpected keyword argument 'Anthony'
I have gone through the previous pages of the textbook i am learning from and i have followed all the syntax... i seem to think.
OK, I have edited your code a bit to fix indentation problems and the number of parameters you pass to my_function (6 instead of one)
def log(original_function):
def new_function(*args, **kwargs):
with open("log.txt", "w") as logfile:
logfile.write("%s,%s,%s" % (log, len(args),len(kwargs)))
return original_function(*args, **kwargs)
return new_function
def my_function(message):
print(message)
my_function = log(my_function)
#A = (2,3,4,5)
#B = {'Anthony' : 6, 'James' : 7}
#my_function(*A, **B)
my_function("hello there")
produces
hello there
and
cat log.txt
produces
<function log at 0x7faf64be85f0>,1,0
If you were to define my_function as
def my_function(a, b, c, d, Anthony, James):
print(a, b, c, d, Anthony, James)
then
my_function = log(my_function)
A = (2,3,4,5)
B = {'Anthony' : 6, 'James' : 7}
my_function(*A, **B)
would produce:
(2, 3, 4, 5, 6, 7) # in python 2.7; 2, 3, 4, 5, 6, 7 in python3
Side note: avoid calling variables with CamelCase, use lowercase instead. Leave CamelCase to class names.
You are giving as parameter of your function my_function(message) the following :
my_function(2, 3, 4, 5, Anthony=6, James=7)
Your decorator looks like this
def decorator(func):
def wrapper(*args, **kwargs):
# Some code
return func(*args, **kwargs)
return wrapper
And then you decorate your function :
new_func = decorator(my_function)
So new_func is a wrapper of my_function. But you are calling the function like this :
A = (2, 3, 4, 5)
B = {'Anthony' : 6, 'James' : 7}
new_func(*A, **B)
Which is equivalent to
new_func(2, 3, 4, 5, Anthony=6, James=7)
The stack would look like this :
new_func(2, 3, 4, 5, Anthony=6, James=7)
> wrapper(2, 3, 4, 5, Anthony=6, James=7)
> my_function(2, 3, 4, 5, Anthony=6, James=7)
And my_function only allow one parameter : message

Exception ItemNotString not caught with assertRaises

import unittest
import filterList
class TestFilterList(unittest.TestCase):
""" docstring for TestFilterList
"""
def setUp(self):
self._filterby = 'B'
def test_checkListItem(self):
self.flObj = filterList.FilterList(['hello', 'Boy', 1], self._filterby)
self.assertRaises(filterList.ItemNotString, self.flObj.checkListItem)
def test_filterList(self):
self.flObj = filterList.FilterList(['hello', 'Boy'], self._filterby)
self.assertEquals(['Boy'], self.flObj.filterList())
if __name__ == '__main__':
unittest.main()
My above test test_checkListItem() fails , for the below filterList module:
import sys
import ast
class ItemNotString(Exception):
pass
class FilterList(object):
"""docstring for FilterList
"""
def __init__(self, lst, filterby):
super(FilterList, self).__init__()
self.lst = lst
self._filter = filterby
self.checkListItem()
def checkListItem(self):
for index, item in enumerate(self.lst):
if type(item) == str:
continue
else:
raise ItemNotString("%i item '%s' is not of type string" % (index+1, item))
print self.filterList()
return True
def filterList(self):
filteredList = []
for eachItem in self.lst:
if eachItem.startswith(self._filter):
filteredList.append(eachItem)
return filteredList
if __name__ == "__main__":
try:
filterby = sys.argv[2]
except IndexError:
filterby = 'H'
flObj = FilterList(ast.literal_eval(sys.argv[1]), filterby)
#flObj.checkListItem()
Why does the test fail with the error:
======================================================================
ERROR: test_checkListItem (__main__.TestFilterList)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test_filterList.py", line 13, in test_checkListItem
self.flObj = filterList.FilterList(['hello', 'Boy', 1], self._filterby)
File "/Users/sanjeevkumar/Development/python/filterList.py", line 16, in __init__
self.checkListItem()
File "/Users/sanjeevkumar/Development/python/filterList.py", line 23, in checkListItem
raise ItemNotString("%i item '%s' is not of type string" % (index+1, item))
ItemNotString: 3 item '1' is not of type string
----------------------------------------------------------------------
Ran 2 tests in 0.000s
FAILED (errors=1)
Also, is the approach of the filterList module correct ?
The exception is not being caught by your assertRaises call because it's being raised on the previous line. If you look carefully at the traceback, you'll see that the checkListItem was called by the FilterList class's __init__ method, which in turn was called when you try to create self.flObj in your test.

Python library 'unittest': Generate multiple tests programmatically [duplicate]

This question already has answers here:
Closed 10 years ago.
Possible Duplicate:
How do you generate dynamic (parameterized) unit tests in Python?
I have a function to test, under_test, and a set of expected input/output pairs:
[
(2, 332),
(234, 99213),
(9, 3),
# ...
]
I would like each one of these input/output pairs to be tested in its own test_* method. Is that possible?
This is sort of what I want, but forcing every single input/output pair into a single test:
class TestPreReqs(unittest.TestCase):
def setUp(self):
self.expected_pairs = [(23, 55), (4, 32)]
def test_expected(self):
for exp in self.expected_pairs:
self.assertEqual(under_test(exp[0]), exp[1])
if __name__ == '__main__':
unittest.main()
(Also, do I really want to be putting that definition of self.expected_pairs in setUp?)
UPDATE: Trying doublep's advice:
class TestPreReqs(unittest.TestCase):
def setUp(self):
expected_pairs = [
(2, 3),
(42, 11),
(3, None),
(31, 99),
]
for k, pair in expected_pairs:
setattr(TestPreReqs, 'test_expected_%d' % k, create_test(pair))
def create_test (pair):
def do_test_expected(self):
self.assertEqual(get_pre_reqs(pair[0]), pair[1])
return do_test_expected
if __name__ == '__main__':
unittest.main()
This does not work. 0 tests are run. Did I adapt the example incorrectly?
I had to do something similar. I created simple TestCase subclasses that took a value in their __init__, like this:
class KnownGood(unittest.TestCase):
def __init__(self, input, output):
super(KnownGood, self).__init__()
self.input = input
self.output = output
def runTest(self):
self.assertEqual(function_to_test(self.input), self.output)
I then made a test suite with these values:
def suite():
suite = unittest.TestSuite()
suite.addTests(KnownGood(input, output) for input, output in known_values)
return suite
You can then run it from your main method:
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
The advantages of this are:
As you add more values, the number of reported tests increases, which makes you feel like you are doing more.
Each individual test case can fail individually
It's conceptually simple, since each input/output value is converted into one TestCase
Not tested:
class TestPreReqs(unittest.TestCase):
...
def create_test (pair):
def do_test_expected(self):
self.assertEqual(under_test(pair[0]), pair[1])
return do_test_expected
for k, pair in enumerate ([(23, 55), (4, 32)]):
test_method = create_test (pair)
test_method.__name__ = 'test_expected_%d' % k
setattr (TestPreReqs, test_method.__name__, test_method)
If you use this often, you could prettify this by using utility functions and/or decorators, I guess. Note that pairs are not an attribute of TestPreReqs object in this example (and so setUp is gone). Rather, they are "hardwired" in a sense to the TestPreReqs class.
As often with Python, there is a complicated way to provide a simple solution.
In that case, we can use metaprogramming, decorators, and various nifty Python tricks to achieve a nice result. Here is what the final test will look like:
import unittest
# Some magic code will be added here later
class DummyTest(unittest.TestCase):
#for_examples(1, 2)
#for_examples(3, 4)
def test_is_smaller_than_four(self, value):
self.assertTrue(value < 4)
#for_examples((1,2),(2,4),(3,7))
def test_double_of_X_is_Y(self, x, y):
self.assertEqual(2 * x, y)
if __name__ == "__main__":
unittest.main()
When executing this script, the result is:
..F...F
======================================================================
FAIL: test_double_of_X_is_Y(3,7)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/xdecoret/Documents/foo.py", line 22, in method_for_example
method(self, *example)
File "/Users/xdecoret/Documents/foo.py", line 41, in test_double_of_X_is_Y
self.assertEqual(2 * x, y)
AssertionError: 6 != 7
======================================================================
FAIL: test_is_smaller_than_four(4)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/xdecoret/Documents/foo.py", line 22, in method_for_example
method(self, *example)
File "/Users/xdecoret/Documents/foo.py", line 37, in test_is_smaller_than_four
self.assertTrue(value < 4)
AssertionError
----------------------------------------------------------------------
Ran 7 tests in 0.001s
FAILED (failures=2)
which achieves our goal:
it is unobtrusive: we derive from TestCase as usual
we write parametrized tests only once
each example value is considered an individual test
the decorator can be stacked, so it is easy to use sets of examples (e.g., using a function to build the list of values from example files or directories)
The icing on the cake is it works for arbitrary arity of the signature
So how does it work? Basically, the decorator stores the examples in an attribute of the function. We use a metaclass to replace every decorated function with a list of functions. And we replace the unittest.TestCase with our new magic code (to be pasted in the "magic" comment above) is:
__examples__ = "__examples__"
def for_examples(*examples):
def decorator(f, examples=examples):
setattr(f, __examples__, getattr(f, __examples__,()) + examples)
return f
return decorator
class TestCaseWithExamplesMetaclass(type):
def __new__(meta, name, bases, dict):
def tuplify(x):
if not isinstance(x, tuple):
return (x,)
return x
for methodname, method in dict.items():
if hasattr(method, __examples__):
dict.pop(methodname)
examples = getattr(method, __examples__)
delattr(method, __examples__)
for example in (tuplify(x) for x in examples):
def method_for_example(self, method = method, example = example):
method(self, *example)
methodname_for_example = methodname + "(" + ", ".join(str(v) for v in example) + ")"
dict[methodname_for_example] = method_for_example
return type.__new__(meta, name, bases, dict)
class TestCaseWithExamples(unittest.TestCase):
__metaclass__ = TestCaseWithExamplesMetaclass
pass
unittest.TestCase = TestCaseWithExamples
If someone wants to package this nicely, or propose a patch for unittest, feel free! A quote of my name will be appreciated.
The code can be made much simpler and fully encapsulated in the decorator if you are ready to use frame introspection (import the sys module)
def for_examples(*parameters):
def tuplify(x):
if not isinstance(x, tuple):
return (x,)
return x
def decorator(method, parameters=parameters):
for parameter in (tuplify(x) for x in parameters):
def method_for_parameter(self, method=method, parameter=parameter):
method(self, *parameter)
args_for_parameter = ",".join(repr(v) for v in parameter)
name_for_parameter = method.__name__ + "(" + args_for_parameter + ")"
frame = sys._getframe(1) # pylint: disable-msg=W0212
frame.f_locals[name_for_parameter] = method_for_parameter
return None
return decorator
nose (suggested by #Paul Hankin)
#!/usr/bin/env python
# file: test_pairs_nose.py
from nose.tools import eq_ as eq
from mymodule import f
def test_pairs():
for input, output in [ (2, 332), (234, 99213), (9, 3), ]:
yield _test_f, input, output
def _test_f(input, output):
try:
eq(f(input), output)
except AssertionError:
if input == 9: # expected failure
from nose.exc import SkipTest
raise SkipTest("expected failure")
else:
raise
if __name__=="__main__":
import nose; nose.main()
Example:
$ nosetests test_pairs_nose -v
test_pairs_nose.test_pairs(2, 332) ... ok
test_pairs_nose.test_pairs(234, 99213) ... ok
test_pairs_nose.test_pairs(9, 3) ... SKIP: expected failure
----------------------------------------------------------------------
Ran 3 tests in 0.001s
OK (SKIP=1)
unittest (an approach similar to #doublep's one)
#!/usr/bin/env python
import unittest2 as unittest
from mymodule import f
def add_tests(generator):
def class_decorator(cls):
"""Add tests to `cls` generated by `generator()`."""
for f, input, output in generator():
test = lambda self, i=input, o=output, f=f: f(self, i, o)
test.__name__ = "test_%s(%r, %r)" % (f.__name__, input, output)
setattr(cls, test.__name__, test)
return cls
return class_decorator
def _test_pairs():
def t(self, input, output):
self.assertEqual(f(input), output)
for input, output in [ (2, 332), (234, 99213), (9, 3), ]:
tt = t if input != 9 else unittest.expectedFailure(t)
yield tt, input, output
class TestCase(unittest.TestCase):
pass
TestCase = add_tests(_test_pairs)(TestCase)
if __name__=="__main__":
unittest.main()
Example:
$ python test_pairs_unit2.py -v
test_t(2, 332) (__main__.TestCase) ... ok
test_t(234, 99213) (__main__.TestCase) ... ok
test_t(9, 3) (__main__.TestCase) ... expected failure
----------------------------------------------------------------------
Ran 3 tests in 0.000s
OK (expected failures=1)
If you don't want to install unittest2 then add:
try:
import unittest2 as unittest
except ImportError:
import unittest
if not hasattr(unittest, 'expectedFailure'):
import functools
def _expectedFailure(func):
#functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except AssertionError:
pass
else:
raise AssertionError("UnexpectedSuccess")
return wrapper
unittest.expectedFailure = _expectedFailure
Some of the tools available for doing parametrized tests in Python are:
Nose test generators (only for function tests, not TestCase classes)
nose-parametrized by David Wolever (also for TestCase classes)
Unittest template by Boris Feld
Parametrized tests in py.test
parametrized-testcase by Austin Bingham
See also question 1676269 for more answers to this question.
I think Rory's solution is the cleanest and shortest. However, this variation of doublep's "create synthetic functions in a TestCase" also works:
from functools import partial
class TestAllReports(unittest.TestCase):
pass
def test_spamreport(name):
assert classify(getSample(name))=='spamreport', name
for rep in REPORTS:
testname = 'test_' + rep
testfunc = partial(test_spamreport, rep)
testfunc.__doc__ = testname
setattr(TestAllReports, testname, testfunc)
if __name__=='__main__':
unittest.main(argv=sys.argv + ['--verbose'])

Categories