Timed nose tests not failing properly - python

I have the following test that does not fail when running an especially long fib assert.
Tests that don't fail properly
#!/usr/env/bin python2.7
import unittest
from fib import fib
from nose.tools import timed
def test_gen(expected, actual):
#timed(.001)
def test_method(self):
return self.assertEqual(expected, actual)
return test_method
if __name__ == '__main__':
all_cases = {
'user': ((fib(40), 102334155), (fib(2), 1), (fib(5), 5)),
}
fails = {}
for username, cases in all_cases.items():
class FibTests(unittest.TestCase):
pass
for index, case in enumerate(cases):
test_name = 'test_{0}_{1}'.format(username, index)
test = test_gen(case[1], case[0])
setattr(FibTests, test_name, test)
suite = unittest.TestLoader().loadTestsFromTestCase(FibTests)
result = unittest.TextTestRunner(verbosity=2).run(suite)
fails[username] = len(result.failures)
print fails
(Slow) Fib.py Implementation
def fib(x):
if x == 0:
return 0
elif x == 1:
return 1
return fib(x - 2) + fib(x - 1)
Tests that fail properly
import unittest
from fib import fib
from nose.tools import timed
def test_gen(expected, actual):
#timed(.001)
def test_method(self):
time.sleep(.2)
return self.assertEqual(expected, actual)
return test_method

You are timing the wrong thing, and never actually calling your test method. You are also going to an awful lot of effort to dynamically create and add your cases to your class that does nothing but act as a container for tests when nose supports generator test cases, which would be much easier to read and follow than what you have here. Also, is this a test file or a piece of product code? If it's a test file, then having all of that code in if __name__ == '__main__' is kind of odd, and if it is a product code file, then having the test_gen function and the unittest and nose import statements in the uncoditionally run part doesn't make much sense. I'd recommend doing it the following way, and not trying to make the test script self-runnable; just launch it with nose.
from fib import fib
from nose.tools import timed
fib = timed(.001)(fib)
def execute(username, fib_arg, expected_output):
result = fib(fib_arg)
assert result == expected_output, ('%s fib(%d) got %d, expected %d'
% (username, fib_arg, result, expected_output))
def test_fib():
for name, datasets in (('user', ((40, 102334155), (2, 1), (5, 5))),):
for arg, expected in datasets:
yield execute, name, arg, expected

Related

How to control the incremental test case in Pytest

#pytest.mark.incremental
class Test_aws():
def test_case1(self):
----- some code here ----
result = someMethodTogetResult
assert result[0] == True
orderID = result[1]
def test_case2(self):
result = someMethodTogetResult # can be only perform once test case 1 run successfully.
assert result == True
def test_deleteOrder_R53HostZonePrivate(self):
result = someMethodTogetResult
assert result[0] == True
The current behavior is if test 1 passes then test 2 runs and if test 2 passes then test 3 runs.
What I need is:
If test_case 3 should be run if test_case 1 passed. test_case 2 should not change any behavior. Any thoughts here?
I guess you are looking for pytest-dependency which allows setting conditional run dependencies between tests. Example:
import random
import pytest
class TestAWS:
#pytest.mark.dependency
def test_instance_start(self):
assert random.choice((True, False))
#pytest.mark.dependency(depends=['TestAWS::test_instance_start'])
def test_instance_stop(self):
assert random.choice((True, False))
#pytest.mark.dependency(depends=['TestAWS::test_instance_start'])
def test_instance_delete(self):
assert random.choice((True, False))
test_instance_stop and test_instance_delete will run only if test_instance_start succeeds and skip otherwise. However, since test_instance_delete does not depend on test_instance_stop, the former will execute no matter what the result of the latter test is. Run the example test class several times to verify the desired behaviour.
To complement hoefling's answer, another option is to use pytest-steps to perform incremental testing. This can help you in particular if you wish to share some kind of incremental state/intermediate results between the steps.
However it does not implement advanced dependency mechanisms like pytest-dependency, so use the package that better suits your goal.
With pytest-steps, hoefling's example would write:
import random
from pytest_steps import test_steps, depends_on
def step_instance_start():
assert random.choice((True, False))
#depends_on(step_instance_start)
def step_instance_stop():
assert random.choice((True, False))
#depends_on(step_instance_start)
def step_instance_delete():
assert random.choice((True, False))
#test_steps(step_instance_start, step_instance_stop, step_instance_delete)
def test_suite(test_step):
# Execute the step
test_step()
EDIT: there is a new 'generator' mode to make it even easier:
import random
from pytest_steps import test_steps, optional_step
#test_steps('step_instance_start', 'step_instance_stop', 'step_instance_delete')
def test_suite():
# First step (Start)
assert random.choice((True, False))
yield
# Second step (Stop)
with optional_step('step_instance_stop') as stop_step:
assert random.choice((True, False))
yield stop_step
# Third step (Delete)
with optional_step('step_instance_delete') as delete_step:
assert random.choice((True, False))
yield delete_step
Check the documentation for details. (I'm the author of this package by the way ;) )
You can use pytest-ordering package to order your tests using pytest mark. The author of the package explains the usage here
Example:
#pytest.mark.first
def test_first():
pass
#pytest.mark.second
def test_2():
pass
#pytest.mark.order5
def test_5():
pass

mocking API calls in unit testing

I am fairly new to Python and having a hard time wrapping my head around how to mock patch API calls in unit testing.
FYI, I am using Python 2.7 and using nosetest for my unit testing needs.
I have the following module (myRedis.py) which I want to unit test:
import logging
import redis
redispool = None
class myRedis(object):
def __init__(self, redisHost, redisPort, redisDBNum):
if not redisPort.isdigit():
raise TypeError('Exception: Expected int for redisPort')
if not redisDBNum.isdigit():
raise TypeError('Exception: Expected int for redisDBNum')
self._redis_instance = None
self._redishost = redisHost
self._redisport = redisPort
self._redisdb = redisDBNum
global redispool
redispool = redis.ConnectionPool(host=self._redishost,
port=self._redisport,
db=self._redisdb)
def redis_connect(self):
LOGGER.info('Connecting Redis client to %s:%s:%s', self._redishost,
self._redisport, self._redisdb)
self._redis_instance = redis.StrictRedis(connection_pool=redispool)
def write_redis(self, key, value):
retval = self._redis_instance.set(key, value)
LOGGER.info('Writing data to redis (%s, %s). Retval=%s', key, value, retval)
return retval
def read_redis(self, key):
retval = self._redis_instance.get(key)
LOGGER.info('Reading data from redis: key=%s. Retval=%s', key, retval)
return retval
As far as unit testing goes, I have the following so far.
from nose.tools import *
import mock
from myRedis import myRedis
def setup():
pass
def teardown():
pass
#mock.patch('redis.StrictRedis')
def test_mock_redis_StrictRedis(mock_conn_pool):
mock_conn_pool.return_value = True
assert(True)
def test_myRedis():
assert_raises(TypeError, myRedis,
'test', '1234', 'a11')
assert_raises(TypeError, myRedis,
'test', 'a1234', '11')
myRedisObj = myRedis('localhost', '8888', '11')
assert_equal(myRedisObj._redishost, 'localhost')
assert_equal(myRedisObj._redisport, '8888')
assert_equal(myRedisObj._redisdb, '11')
myRedisObj.redis_connect()
#oclRedis.read_redis('test')
#oclRedis.write_redis('test', 'test')
I am able to patch the redis.StrictRedis() call without any problems. But how do I patch the redis' get() and set() calls since they are invoked on an object (_redis_instance in myRedis.py). I tried a few different versions of #mock.patch.object, but that didn't work for me. Looking for some guidance.
Thanks in advance.
What you should patch are not the actual calls from your object but actually the object itself that those calls are invoking.
In your code it would something along the lines of:
from nose.tools import *
import mock
import unittest
from red import myRedis
def setup():
pass
def teardown():
pass
#mock.patch('redis.StrictRedis')
def test_mock_redis_StrictRedis(mock_conn_pool):
mock_conn_pool.return_value = True
assert(True)
def test_myRedis_wrong_args():
assert_raises(TypeError, myRedis,
'test', '1234', 'a11')
assert_raises(TypeError, myRedis,
'test', 'a1234', '11')
def test_myRedis_ok():
myRedisObj = myRedis('localhost', '8888', '11')
assert_equal(myRedisObj._redishost, 'localhost')
assert_equal(myRedisObj._redisport, '8888')
assert_equal(myRedisObj._redisdb, '11')
#mock.patch('redis.StrictRedis.set')
def test_myRedis_write(mock_strict_redis_set):
mock_strict_redis_set.return_value = {}
myRedisObj = myRedis('localhost', '8888', '11')
redis_connect = myRedisObj.redis_connect()
connect = myRedisObj.write_redis('1', '2')
assert connect == {}
As you can see I modified your tests to test one thing at a time. This is something that you generally want to do to avoid side-effects and guarantee test isolation.
Consider taking a look at the docs: https://docs.python.org/dev/library/unittest.mock.html
Finally indenting is key in Python, consider proper indenting in your code snippets going forward

Python parameterized unittest by subclassing TestCase

How can I create multiple TestCases and run them programmatically? I'm trying to test multiple implementations of a collection on a common TestCase.
I'd prefer to stick to with plain unittest and avoid dependencies.
Here's some resources that I looked at that didn't quite meet what I wanted:
Writing a re-usable parametrized unittest.TestCase method - The accepted answer proposes four different external libraries.
http://eli.thegreenplace.net/2011/08/02/python-unit-testing-parametrized-test-cases -
This approach uses a static method paramerize. I don't understand why you can't pass in a parameter directly into the
TestSubClass.__init__.
How to generate dynamic (parametrized) unit tests in python? - A little bit too black magic.
Here's a minimal (non)working example.
import unittest
MyCollection = set
AnotherCollection = set
# ... many more collections
def maximise(collection, array):
return 2
class TestSubClass(unittest.TestCase):
def __init__(self, collection_class):
unittest.TestCase.__init__(self)
self.collection_class = collection_class
self.maximise_fn = lambda array: maximise(collection_class, array)
def test_single(self):
self.assertEqual(self.maximise_fn([1]), 1)
def test_overflow(self):
self.assertEqual(self.maximise_fn([3]), 1)
# ... many more tests
def run_suite():
suite = unittest.defaultTestLoader
for collection in [MyCollection, AnotherCollection]:
suite.loadTestsFromTestCase(TestSubClass(collection))
unittest.TextTestRunner().run(suite)
def main():
run_suite()
if __name__ == '__main__':
main()
The above approach errors with in loadTestsFromTestCase:
TypeError: issubclass() arg 1 must be a class
How about using pytest with to parametrize fixture:
import pytest
MyCollection = set
AnotherCollection = set
def maximise(collection, array):
return 1
#pytest.fixture(scope='module', params=[MyCollection, AnotherCollection])
def maximise_fn(request):
return lambda array: maximise(request.param, array)
def test_single(maximise_fn):
assert maximise_fn([1]) == 1
def test_overflow(maximise_fn):
assert maximise_fn([3]) == 1
If that's not an option, you can make a mixin to contain test function, and subclasses to provide maximise_fns:
import unittest
MyCollection = set
AnotherCollection = set
def maximise(collection, array):
return 1
class TestCollectionMixin:
def test_single(self):
self.assertEqual(self.maximise_fn([1]), 1)
def test_overflow(self):
self.assertEqual(self.maximise_fn([3]), 1)
class TestMyCollection(TestCollectionMixin, unittest.TestCase):
maximise_fn = lambda self, array: maximise(MyCollection, array)
class TestAnotherCollection(TestCollectionMixin, unittest.TestCase):
maximise_fn = lambda self, array: maximise(AnotherCollection, array)
if __name__ == '__main__':
unittest.main()

Python injecting random number into tests

I have written code like this :
def choice(states):
states = list(states)
rnd = random.random()
for state, p in states:
rnd -= p
if rnd <= 0:
return state
And I need to create some tests :
import unittest
class Tests(unittest.TestCase):
def test_choice(self):
assertEquals(choice(states),something_equl)
How am I supposed to inject my own random number into test? to get deterministic results?
Mock the random.random() function, example:
import random
import unittest
import mock
def choice(states):
states = list(states)
rnd = random.random()
for state, p in states:
rnd -= p
if rnd <= 0:
return state
class Tests(unittest.TestCase):
#mock.patch('random.random')
def test_first_state_fires(self, random_call):
random_call.return_value = 1
self.assertEquals(choice([(1, 1)]), 1)
#mock.patch('random.random')
def test_returns_none(self, random_call):
random_call.return_value = 2
self.assertIsNone(choice([(1, 1)]))
You can use the unittest.mock library to patch out the random() function. The library is part of Python 3.3 and up, you can install it separately as mock for older versions:
try:
from unittest import mock
except ImportError:
import mock
class Tests(unittest.TestCase):
#mock.patch('random.random')
def test_choice(self, mock_random):
mock_random.return_value = 0.42
assertEquals(choice(states),something_equl)
I would like to improve the response with a full script to better understand and be adaptable to other cases.
import random
from unittest import TestCase, mock
def get_random_words(): # Simple function using choice
l = []
for _ in range(3):
l.append(random.random(0, 10))
return "".join([str(n) for n in l])
class TestRandom(TestCase):
#mock.patch('random.random') # *(1)
def test_get_random_words(self, mock_random):
mock_random.side_effect = [1,7,3,6] # *(2)
result = get_random_words()
self.assertEqual(result, '173', 'Does not generate correct numbers')
Considerations
*(1) For this example, the function is inside the same file, but in case it is in another file you must change the path of the patch
Ex: #mock.patch('your_package.your_file.your_function.random.random')
*(2) For this case, the get_random_words function calls random.random 3 times. This is why you must put equal or more items inside mock_random.side_effect. This is because if it has fewer items it will throw the StopIteration error.

Python library 'unittest': Generate multiple tests programmatically [duplicate]

This question already has answers here:
Closed 10 years ago.
Possible Duplicate:
How do you generate dynamic (parameterized) unit tests in Python?
I have a function to test, under_test, and a set of expected input/output pairs:
[
(2, 332),
(234, 99213),
(9, 3),
# ...
]
I would like each one of these input/output pairs to be tested in its own test_* method. Is that possible?
This is sort of what I want, but forcing every single input/output pair into a single test:
class TestPreReqs(unittest.TestCase):
def setUp(self):
self.expected_pairs = [(23, 55), (4, 32)]
def test_expected(self):
for exp in self.expected_pairs:
self.assertEqual(under_test(exp[0]), exp[1])
if __name__ == '__main__':
unittest.main()
(Also, do I really want to be putting that definition of self.expected_pairs in setUp?)
UPDATE: Trying doublep's advice:
class TestPreReqs(unittest.TestCase):
def setUp(self):
expected_pairs = [
(2, 3),
(42, 11),
(3, None),
(31, 99),
]
for k, pair in expected_pairs:
setattr(TestPreReqs, 'test_expected_%d' % k, create_test(pair))
def create_test (pair):
def do_test_expected(self):
self.assertEqual(get_pre_reqs(pair[0]), pair[1])
return do_test_expected
if __name__ == '__main__':
unittest.main()
This does not work. 0 tests are run. Did I adapt the example incorrectly?
I had to do something similar. I created simple TestCase subclasses that took a value in their __init__, like this:
class KnownGood(unittest.TestCase):
def __init__(self, input, output):
super(KnownGood, self).__init__()
self.input = input
self.output = output
def runTest(self):
self.assertEqual(function_to_test(self.input), self.output)
I then made a test suite with these values:
def suite():
suite = unittest.TestSuite()
suite.addTests(KnownGood(input, output) for input, output in known_values)
return suite
You can then run it from your main method:
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
The advantages of this are:
As you add more values, the number of reported tests increases, which makes you feel like you are doing more.
Each individual test case can fail individually
It's conceptually simple, since each input/output value is converted into one TestCase
Not tested:
class TestPreReqs(unittest.TestCase):
...
def create_test (pair):
def do_test_expected(self):
self.assertEqual(under_test(pair[0]), pair[1])
return do_test_expected
for k, pair in enumerate ([(23, 55), (4, 32)]):
test_method = create_test (pair)
test_method.__name__ = 'test_expected_%d' % k
setattr (TestPreReqs, test_method.__name__, test_method)
If you use this often, you could prettify this by using utility functions and/or decorators, I guess. Note that pairs are not an attribute of TestPreReqs object in this example (and so setUp is gone). Rather, they are "hardwired" in a sense to the TestPreReqs class.
As often with Python, there is a complicated way to provide a simple solution.
In that case, we can use metaprogramming, decorators, and various nifty Python tricks to achieve a nice result. Here is what the final test will look like:
import unittest
# Some magic code will be added here later
class DummyTest(unittest.TestCase):
#for_examples(1, 2)
#for_examples(3, 4)
def test_is_smaller_than_four(self, value):
self.assertTrue(value < 4)
#for_examples((1,2),(2,4),(3,7))
def test_double_of_X_is_Y(self, x, y):
self.assertEqual(2 * x, y)
if __name__ == "__main__":
unittest.main()
When executing this script, the result is:
..F...F
======================================================================
FAIL: test_double_of_X_is_Y(3,7)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/xdecoret/Documents/foo.py", line 22, in method_for_example
method(self, *example)
File "/Users/xdecoret/Documents/foo.py", line 41, in test_double_of_X_is_Y
self.assertEqual(2 * x, y)
AssertionError: 6 != 7
======================================================================
FAIL: test_is_smaller_than_four(4)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/xdecoret/Documents/foo.py", line 22, in method_for_example
method(self, *example)
File "/Users/xdecoret/Documents/foo.py", line 37, in test_is_smaller_than_four
self.assertTrue(value < 4)
AssertionError
----------------------------------------------------------------------
Ran 7 tests in 0.001s
FAILED (failures=2)
which achieves our goal:
it is unobtrusive: we derive from TestCase as usual
we write parametrized tests only once
each example value is considered an individual test
the decorator can be stacked, so it is easy to use sets of examples (e.g., using a function to build the list of values from example files or directories)
The icing on the cake is it works for arbitrary arity of the signature
So how does it work? Basically, the decorator stores the examples in an attribute of the function. We use a metaclass to replace every decorated function with a list of functions. And we replace the unittest.TestCase with our new magic code (to be pasted in the "magic" comment above) is:
__examples__ = "__examples__"
def for_examples(*examples):
def decorator(f, examples=examples):
setattr(f, __examples__, getattr(f, __examples__,()) + examples)
return f
return decorator
class TestCaseWithExamplesMetaclass(type):
def __new__(meta, name, bases, dict):
def tuplify(x):
if not isinstance(x, tuple):
return (x,)
return x
for methodname, method in dict.items():
if hasattr(method, __examples__):
dict.pop(methodname)
examples = getattr(method, __examples__)
delattr(method, __examples__)
for example in (tuplify(x) for x in examples):
def method_for_example(self, method = method, example = example):
method(self, *example)
methodname_for_example = methodname + "(" + ", ".join(str(v) for v in example) + ")"
dict[methodname_for_example] = method_for_example
return type.__new__(meta, name, bases, dict)
class TestCaseWithExamples(unittest.TestCase):
__metaclass__ = TestCaseWithExamplesMetaclass
pass
unittest.TestCase = TestCaseWithExamples
If someone wants to package this nicely, or propose a patch for unittest, feel free! A quote of my name will be appreciated.
The code can be made much simpler and fully encapsulated in the decorator if you are ready to use frame introspection (import the sys module)
def for_examples(*parameters):
def tuplify(x):
if not isinstance(x, tuple):
return (x,)
return x
def decorator(method, parameters=parameters):
for parameter in (tuplify(x) for x in parameters):
def method_for_parameter(self, method=method, parameter=parameter):
method(self, *parameter)
args_for_parameter = ",".join(repr(v) for v in parameter)
name_for_parameter = method.__name__ + "(" + args_for_parameter + ")"
frame = sys._getframe(1) # pylint: disable-msg=W0212
frame.f_locals[name_for_parameter] = method_for_parameter
return None
return decorator
nose (suggested by #Paul Hankin)
#!/usr/bin/env python
# file: test_pairs_nose.py
from nose.tools import eq_ as eq
from mymodule import f
def test_pairs():
for input, output in [ (2, 332), (234, 99213), (9, 3), ]:
yield _test_f, input, output
def _test_f(input, output):
try:
eq(f(input), output)
except AssertionError:
if input == 9: # expected failure
from nose.exc import SkipTest
raise SkipTest("expected failure")
else:
raise
if __name__=="__main__":
import nose; nose.main()
Example:
$ nosetests test_pairs_nose -v
test_pairs_nose.test_pairs(2, 332) ... ok
test_pairs_nose.test_pairs(234, 99213) ... ok
test_pairs_nose.test_pairs(9, 3) ... SKIP: expected failure
----------------------------------------------------------------------
Ran 3 tests in 0.001s
OK (SKIP=1)
unittest (an approach similar to #doublep's one)
#!/usr/bin/env python
import unittest2 as unittest
from mymodule import f
def add_tests(generator):
def class_decorator(cls):
"""Add tests to `cls` generated by `generator()`."""
for f, input, output in generator():
test = lambda self, i=input, o=output, f=f: f(self, i, o)
test.__name__ = "test_%s(%r, %r)" % (f.__name__, input, output)
setattr(cls, test.__name__, test)
return cls
return class_decorator
def _test_pairs():
def t(self, input, output):
self.assertEqual(f(input), output)
for input, output in [ (2, 332), (234, 99213), (9, 3), ]:
tt = t if input != 9 else unittest.expectedFailure(t)
yield tt, input, output
class TestCase(unittest.TestCase):
pass
TestCase = add_tests(_test_pairs)(TestCase)
if __name__=="__main__":
unittest.main()
Example:
$ python test_pairs_unit2.py -v
test_t(2, 332) (__main__.TestCase) ... ok
test_t(234, 99213) (__main__.TestCase) ... ok
test_t(9, 3) (__main__.TestCase) ... expected failure
----------------------------------------------------------------------
Ran 3 tests in 0.000s
OK (expected failures=1)
If you don't want to install unittest2 then add:
try:
import unittest2 as unittest
except ImportError:
import unittest
if not hasattr(unittest, 'expectedFailure'):
import functools
def _expectedFailure(func):
#functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except AssertionError:
pass
else:
raise AssertionError("UnexpectedSuccess")
return wrapper
unittest.expectedFailure = _expectedFailure
Some of the tools available for doing parametrized tests in Python are:
Nose test generators (only for function tests, not TestCase classes)
nose-parametrized by David Wolever (also for TestCase classes)
Unittest template by Boris Feld
Parametrized tests in py.test
parametrized-testcase by Austin Bingham
See also question 1676269 for more answers to this question.
I think Rory's solution is the cleanest and shortest. However, this variation of doublep's "create synthetic functions in a TestCase" also works:
from functools import partial
class TestAllReports(unittest.TestCase):
pass
def test_spamreport(name):
assert classify(getSample(name))=='spamreport', name
for rep in REPORTS:
testname = 'test_' + rep
testfunc = partial(test_spamreport, rep)
testfunc.__doc__ = testname
setattr(TestAllReports, testname, testfunc)
if __name__=='__main__':
unittest.main(argv=sys.argv + ['--verbose'])

Categories