How to control the incremental test case in Pytest - python

#pytest.mark.incremental
class Test_aws():
def test_case1(self):
----- some code here ----
result = someMethodTogetResult
assert result[0] == True
orderID = result[1]
def test_case2(self):
result = someMethodTogetResult # can be only perform once test case 1 run successfully.
assert result == True
def test_deleteOrder_R53HostZonePrivate(self):
result = someMethodTogetResult
assert result[0] == True
The current behavior is if test 1 passes then test 2 runs and if test 2 passes then test 3 runs.
What I need is:
If test_case 3 should be run if test_case 1 passed. test_case 2 should not change any behavior. Any thoughts here?

I guess you are looking for pytest-dependency which allows setting conditional run dependencies between tests. Example:
import random
import pytest
class TestAWS:
#pytest.mark.dependency
def test_instance_start(self):
assert random.choice((True, False))
#pytest.mark.dependency(depends=['TestAWS::test_instance_start'])
def test_instance_stop(self):
assert random.choice((True, False))
#pytest.mark.dependency(depends=['TestAWS::test_instance_start'])
def test_instance_delete(self):
assert random.choice((True, False))
test_instance_stop and test_instance_delete will run only if test_instance_start succeeds and skip otherwise. However, since test_instance_delete does not depend on test_instance_stop, the former will execute no matter what the result of the latter test is. Run the example test class several times to verify the desired behaviour.

To complement hoefling's answer, another option is to use pytest-steps to perform incremental testing. This can help you in particular if you wish to share some kind of incremental state/intermediate results between the steps.
However it does not implement advanced dependency mechanisms like pytest-dependency, so use the package that better suits your goal.
With pytest-steps, hoefling's example would write:
import random
from pytest_steps import test_steps, depends_on
def step_instance_start():
assert random.choice((True, False))
#depends_on(step_instance_start)
def step_instance_stop():
assert random.choice((True, False))
#depends_on(step_instance_start)
def step_instance_delete():
assert random.choice((True, False))
#test_steps(step_instance_start, step_instance_stop, step_instance_delete)
def test_suite(test_step):
# Execute the step
test_step()
EDIT: there is a new 'generator' mode to make it even easier:
import random
from pytest_steps import test_steps, optional_step
#test_steps('step_instance_start', 'step_instance_stop', 'step_instance_delete')
def test_suite():
# First step (Start)
assert random.choice((True, False))
yield
# Second step (Stop)
with optional_step('step_instance_stop') as stop_step:
assert random.choice((True, False))
yield stop_step
# Third step (Delete)
with optional_step('step_instance_delete') as delete_step:
assert random.choice((True, False))
yield delete_step
Check the documentation for details. (I'm the author of this package by the way ;) )

You can use pytest-ordering package to order your tests using pytest mark. The author of the package explains the usage here
Example:
#pytest.mark.first
def test_first():
pass
#pytest.mark.second
def test_2():
pass
#pytest.mark.order5
def test_5():
pass

Related

Pytest mocking: pass kwargs through side_effect function

I want to test a file called ninja.py wrote in Python3.6.
# File ninja.py
def what_to_do_result(result):
# Send a mail, write something in a file, play a song or whatever
def my_function(a, b):
# Step 1
result = a + b
# Step 2
if result == 3:
what_to_do_result(result)
elif result == 5:
what_to_do_result(result + 1)
else:
return True
I have started writing a test file called test_ninjapy and wrote some unittest. I do use Pytest.
import pytest
class MyTestException(Exception):
pass
def run_side_effect(*args, **kwargs):
raise MyTestException(kwargs["result"])
#pytest.fixture(name="resource")
def setup_fixture():
# Some code here
class TestNinja:
#staticmethod
def setup_method():
# Function called before each test
#staticmethod
def teardown_method():
# Function called after each test
#staticmethod
def test_my_function(mocker, resource):
# How to do this ???
mocker.patch("ninja.what_to_do_result", return_value=None, side_effect=run_side_effect)
# Then the test
assert 1 == 1 # -> This works
with pytest.raises(MyTestException):
ninja_function(a=1, b=2)
assert ninja_function(a=5, b=10)
The point is that I want to mock the function ninja.what_to_do_result and apply a side effect (= run a function).
I want the side effect to use the parameter (kwargs) or the function what_to_do_result.
But I don't know how to do this.
For example:
Because there are multiple possibilities (in the step 2, the call of what_to_do_result could be with 3 & 5, which are linked with 2 differents use cases I wxant to test.
Can you help me?
I did not found the related section in the documentation below.
Link to the documentation: https://github.com/pytest-dev/pytest-mock

unittest - How to test internal parameter in a function?

I'm having some issue while creating unittest for internal parameter.
My structure is:
[1] my_animal.py contains Myclass and method: do_bite()
my_animal.py
class Myclass():
def do_bite(self):
return 1
[2] my_module.py contains jobMain("") which is using the method from my_animal.py
my_module.py
import sys
from someclass import Myclass
def jobMain(directoryPath):
flag = -1
result = Myclass()
if result.do_bite() is None:
flag = 0
if result.do_bite() is 1:
flag = 1
if result.do_bite() is 2:
flag = 2
[3] my_test.py contains the unittest to test jobMain in my_module.py
my_test.py
# Mock Myclass.dobite to None
#pytest.fixture
def mock_dobite0():
with mock.patch('my_module.Myclass') as mocked_animal:
mocked_animal.return_value.do_bite.return_value = None
yield
# Mock Myclass.dobite to 1
#pytest.fixture
def mock_dobite1():
with mock.patch('my_module.Myclass') as mocked_animal:
mocked_animal.return_value.do_bite.return_value = 1
yield
# Mock Myclass.dobite to 2
#pytest.fixture
def mock_dobite2():
with mock.patch('my_module.Myclass') as mocked_animal:
mocked_animal.return_value.do_bite.return_value = 2
yield
# My unittest to test dobite() method
def test_dobite0(mock_Myclass, mock_dobite0):
jobMain("")
def test_dobite1(mock_Myclass, mock_dobite1):
jobMain("")
def test_dobite2(mock_Myclass, mock_dobite2):
jobMain("")
My question is: How to test 'flag' parameter inside JobMain?
'flag' para must be assigned the correct value.( eg: dobite = 1 => flag = 1)
The variable para only exists in the scope of jobMain. If you want to use the variable outside jobMain the most common ways are
1) return the value
This is quite obvious. Since jobMain is a function, it returns a value. Without an explicit return statement you return None. You could just
def jobmain(pth):
# do stuff and assign flag
return flag
# and inside tests
assert jobmain("") == 1
2) Use a class instead
If you want the jobMain to remember some state, then it is common practice to use objects. Then flag would be attribute of the object and could be accessed from outside, after you call any method (function) of JobMain. For example
class JobMain:
def __init__(self):
self.flag = -1
def run(self, pth):
result = Myclass()
if result.do_bite() is None:
self.flag = 0
if result.do_bite() is 1:
self.flag = 1
if result.do_bite() is 2:
self.flag = 2
# and inside test
job = JobMain()
job.run()
assert job.flag == 1
Note
I just copy-pasted your code for setting the flag. Note that you call do_bite() many times, if the resulting value is None or 1. Also, when testing against a number, one should use == instead of is.
How to test 'flag' parameter inside JobMain?
You don't. It's an internal variable. Testing it would be glass-box testing; the test will break if the implementation changes.
Instead, test the effect of flag. This is black-box testing. Only the interface is tested. If the implementation changes the test still works allowing the code to be aggressively refactored.
Note: If you don't hard code result = Myclass() you don't need to mock. Pass it in as an argument with the default being Myclass().
def jobMain(directoryPath, result=Myclass()):
Then you don't need to patch Myclass(). Instead, pass in a mock object.
# I don't know unittest.mock very well, but something like this.
mock = Mock(Myclass)
mock.do_bite.return_value = 2
jobMain('', result=mock)
This also makes the code more flexible outside of testing.

Skip specified Python unit-test from command line

I have bunch of unit-tests in my unit-test file. However, one of the tests I would like to skip only when running the unit-tests from command line. I know how to always skip it (#unittest.skip), but I want to somehow skip it only when running the unit-test file from command line. Is this possible?
Something like this:
test_all_my_tests.py -exclude test_number_five()
Thanks
Great question.
One idea could be command with arguments and in the arguments specify which tests to skip.
Then in your script you would parse the passed arguments and call your tests accordingly.
Your input would look like:
test_all_my_tests.py -exclude 5
and in the python script it would check for a "-exclude" argument and take the following argument as well.
Good luck!
You can have a look at #unittest.skipIf() or even implement your own skip-decorator:
Example
Here is a working example, where I implemented a custom decorator
def skipIfOverCounter(obj):
This decorator is attached to all tests like this:
#skipIfOverCounter
def test_upper(self):
The decorator increments a count and compares it to the console argument.
Output
Implemented 3 unit tests:
test_upper()
test_isupper()
test_split()
The I called python .\unittests.py 0
Skipped test 0
Ran 'test_isupper'
Ran 'test_split'
With param = 1: python .\unittests.py 1
Skipped test 1
Ran 'test_split'
Ran 'test_upper'
Skip the last test: python .\unittests.py 2
Skipped test 2
Ran 'test_isupper'
Ran 'test_upper'
Full working sample
import sys
import unittest
SKIP_INDEX = 0
COUNTER = 0
if len(sys.argv) > 1:
SKIP_INDEX = int(sys.argv.pop())
def skipIfOverCounter(obj):
global COUNTER
global SKIP_INDEX
if SKIP_INDEX == COUNTER:
print(f"Skipped test {COUNTER}")
COUNTER = COUNTER + 1
return unittest.skip("Skipped test")
COUNTER = COUNTER + 1
return obj
class TestStringMethods(unittest.TestCase):
#skipIfOverCounter
def test_upper(self):
print("Ran 'test_upper'")
self.assertEqual('foo'.upper(), 'FOO')
#skipIfOverCounter
def test_isupper(self):
print("Ran 'test_isupper'")
self.assertTrue('FOO'.isupper())
self.assertFalse('Foo'.isupper())
#skipIfOverCounter
def test_split(self):
print("Ran 'test_split'")
s = 'hello world'
self.assertEqual(s.split(), ['hello', 'world'])
# check that s.split fails when the separator is not a string
with self.assertRaises(TypeError):
s.split(2)
if __name__ == '__main__':
unittest.main()
You could even extend this by adapting the decorator to only execute the first two tests or something like this

Split a test in different functions with pytest

I'm using pytest and have multiple tests to run to check an issue.
I would like to split all tests into different functions like this:
# test_myTestSuite.py
#pytest.mark.issue(123)
class MyTestSuite():
def test_part_1():
result = do_something()
assert result == True
def test_part_2():
result = do_an_other_something()
assert result == 'ok'
of course, I implemented issue in conftest.py
# conftest.py
def pytest_addoption(parser):
group = parser.getgroup('Issues')
group.addoption('--issue', action='store',
dest='issue', default=0,
help='')
but I don't know how to hook once after testing MyTestSuite and check that all tests of MyTestSuite correctly passed.
Does anyone have any ideas?
PS: this is my first post on StackOverflow.
Try to use the return function as most simple type of positive debug conformation as shown below.
#pytest.mark.issue(123)
class MyTestSuite():
def test_part_1():
result = do_something()
assert result == True
return 'tp1', True
def test_part_2():
result = do_an_other_something()
assert result == 'ok'
return 'tp2', True
..and then where you run your tests from:
x = MyTestSuite().test_part_1()
if x[1] == True:
print 'Test %s completed correctly' % x[0]
The result after running test1:
Test tp1 completed correctly, or...
AssertionError.
Collecting assertion errors:
collected_errors = []
def test_part_1():
testname = 'tp1'
try:
result = do_something()
assert result == True
return testname, True
except Exception as error:
info = (testname, error)
collected_errors.append(info)
More assertion flavours you can find here on SO.

Timed nose tests not failing properly

I have the following test that does not fail when running an especially long fib assert.
Tests that don't fail properly
#!/usr/env/bin python2.7
import unittest
from fib import fib
from nose.tools import timed
def test_gen(expected, actual):
#timed(.001)
def test_method(self):
return self.assertEqual(expected, actual)
return test_method
if __name__ == '__main__':
all_cases = {
'user': ((fib(40), 102334155), (fib(2), 1), (fib(5), 5)),
}
fails = {}
for username, cases in all_cases.items():
class FibTests(unittest.TestCase):
pass
for index, case in enumerate(cases):
test_name = 'test_{0}_{1}'.format(username, index)
test = test_gen(case[1], case[0])
setattr(FibTests, test_name, test)
suite = unittest.TestLoader().loadTestsFromTestCase(FibTests)
result = unittest.TextTestRunner(verbosity=2).run(suite)
fails[username] = len(result.failures)
print fails
(Slow) Fib.py Implementation
def fib(x):
if x == 0:
return 0
elif x == 1:
return 1
return fib(x - 2) + fib(x - 1)
Tests that fail properly
import unittest
from fib import fib
from nose.tools import timed
def test_gen(expected, actual):
#timed(.001)
def test_method(self):
time.sleep(.2)
return self.assertEqual(expected, actual)
return test_method
You are timing the wrong thing, and never actually calling your test method. You are also going to an awful lot of effort to dynamically create and add your cases to your class that does nothing but act as a container for tests when nose supports generator test cases, which would be much easier to read and follow than what you have here. Also, is this a test file or a piece of product code? If it's a test file, then having all of that code in if __name__ == '__main__' is kind of odd, and if it is a product code file, then having the test_gen function and the unittest and nose import statements in the uncoditionally run part doesn't make much sense. I'd recommend doing it the following way, and not trying to make the test script self-runnable; just launch it with nose.
from fib import fib
from nose.tools import timed
fib = timed(.001)(fib)
def execute(username, fib_arg, expected_output):
result = fib(fib_arg)
assert result == expected_output, ('%s fib(%d) got %d, expected %d'
% (username, fib_arg, result, expected_output))
def test_fib():
for name, datasets in (('user', ((40, 102334155), (2, 1), (5, 5))),):
for arg, expected in datasets:
yield execute, name, arg, expected

Categories