pytest never fails regardless of the assertions being done in first method. The same behavior is observed when I change the order of methods also. It fails only if I change something in the last method's assertions.
from cpu import CPU
#pytest.fixture
def cpu():
return CPU()
def test_00e0(cpu):
cpu.sp = 0xa
cpu.stack[cpu.sp] = 0x220
cpu.pc = 0x200
cpu.i_00ee()
assert cpu.sp == 0x9
assert cpu.pc == 0x220
def test_00e0(cpu):
cpu.display[0][0] = 1
cpu.i_00e0()
assert sum([sum(x) for x in cpu.display]) == 0
assert cpu.draw_flag == True```
Both your test methods have the same name, so the 2nd is overwriting the 1st. Give them different names and you should be OK.
Related
I'm having some issue while creating unittest for internal parameter.
My structure is:
[1] my_animal.py contains Myclass and method: do_bite()
my_animal.py
class Myclass():
def do_bite(self):
return 1
[2] my_module.py contains jobMain("") which is using the method from my_animal.py
my_module.py
import sys
from someclass import Myclass
def jobMain(directoryPath):
flag = -1
result = Myclass()
if result.do_bite() is None:
flag = 0
if result.do_bite() is 1:
flag = 1
if result.do_bite() is 2:
flag = 2
[3] my_test.py contains the unittest to test jobMain in my_module.py
my_test.py
# Mock Myclass.dobite to None
#pytest.fixture
def mock_dobite0():
with mock.patch('my_module.Myclass') as mocked_animal:
mocked_animal.return_value.do_bite.return_value = None
yield
# Mock Myclass.dobite to 1
#pytest.fixture
def mock_dobite1():
with mock.patch('my_module.Myclass') as mocked_animal:
mocked_animal.return_value.do_bite.return_value = 1
yield
# Mock Myclass.dobite to 2
#pytest.fixture
def mock_dobite2():
with mock.patch('my_module.Myclass') as mocked_animal:
mocked_animal.return_value.do_bite.return_value = 2
yield
# My unittest to test dobite() method
def test_dobite0(mock_Myclass, mock_dobite0):
jobMain("")
def test_dobite1(mock_Myclass, mock_dobite1):
jobMain("")
def test_dobite2(mock_Myclass, mock_dobite2):
jobMain("")
My question is: How to test 'flag' parameter inside JobMain?
'flag' para must be assigned the correct value.( eg: dobite = 1 => flag = 1)
The variable para only exists in the scope of jobMain. If you want to use the variable outside jobMain the most common ways are
1) return the value
This is quite obvious. Since jobMain is a function, it returns a value. Without an explicit return statement you return None. You could just
def jobmain(pth):
# do stuff and assign flag
return flag
# and inside tests
assert jobmain("") == 1
2) Use a class instead
If you want the jobMain to remember some state, then it is common practice to use objects. Then flag would be attribute of the object and could be accessed from outside, after you call any method (function) of JobMain. For example
class JobMain:
def __init__(self):
self.flag = -1
def run(self, pth):
result = Myclass()
if result.do_bite() is None:
self.flag = 0
if result.do_bite() is 1:
self.flag = 1
if result.do_bite() is 2:
self.flag = 2
# and inside test
job = JobMain()
job.run()
assert job.flag == 1
Note
I just copy-pasted your code for setting the flag. Note that you call do_bite() many times, if the resulting value is None or 1. Also, when testing against a number, one should use == instead of is.
How to test 'flag' parameter inside JobMain?
You don't. It's an internal variable. Testing it would be glass-box testing; the test will break if the implementation changes.
Instead, test the effect of flag. This is black-box testing. Only the interface is tested. If the implementation changes the test still works allowing the code to be aggressively refactored.
Note: If you don't hard code result = Myclass() you don't need to mock. Pass it in as an argument with the default being Myclass().
def jobMain(directoryPath, result=Myclass()):
Then you don't need to patch Myclass(). Instead, pass in a mock object.
# I don't know unittest.mock very well, but something like this.
mock = Mock(Myclass)
mock.do_bite.return_value = 2
jobMain('', result=mock)
This also makes the code more flexible outside of testing.
I'm having trouble with pytest-mock and mocking open.
The code I wish to test looks like:
import re
import os
def get_uid():
regex = re.compile('Serial\s+:\s*(\w+)')
uid = "NOT_DEFINED"
exists = os.path.isfile('/proc/cpuinfo')
if exists:
with open('/proc/cpuinfo', 'r') as file:
cpu_details = file.read()
uid = regex.search(cpu_details).group(1)
return uid
So the test file is:
import os
import pytest
from cpu_info import uid
#pytest.mark.usefixtures("mocker")
class TestCPUInfo(object):
def test_no_proc_cpuinfo_file(self):
mocker.patch(os.path.isfile).return_value(False)
result = uid.get_uid()
assert result == "NOT_FOUND"
def test_no_cpu_info_in_file(self):
file_data = """
Hardware : BCM2835
Revision : a020d3
"""
mocker.patch('__builtin__.open', mock_open(read_data=file_data))
result = uid.get_uid()
assert result == "NOT_DEFINED"
def test_cpu_info(self):
file_data = """
Hardware : BCM2835
Revision : a020d3
Serial : 00000000e54cf3fa
"""
mocker.patch('__builtin__.open', mock_open(read_data=file_data))
result = uid.get_uid()
assert result == "00000000e54cf3fa"
The test run gives:
pytest
======================================= test session starts ========================================
platform linux -- Python 3.5.3, pytest-4.4.1, py-1.8.0, pluggy-0.9.0
rootdir: /home/robertpostill/software/gateway
plugins: mock-1.10.4
collected 3 items
cpu_info/test_cpu_info.py FFF [100%]
============================================= FAILURES =============================================
______________________________ TestCPUInfo.test_no_proc_cpuingo_file _______________________________
self = <test_cpu_info.TestCPUInfo object at 0x75e6eaf0>
def test_no_proc_cpuingo_file(self):
> mocker.patch(os.path.isfile).return_value(False)
E NameError: name 'mocker' is not defined
cpu_info/test_cpu_info.py:9: NameError
___________________________________ TestCPUInfo.test_no_cpu_info ___________________________________
self = <test_cpu_info.TestCPUInfo object at 0x75e69d70>
def test_no_cpu_info(self):
file_data = """
Hardware : BCM2835
Revision : a020d3
"""
> mocker.patch('__builtin__.open', mock_open(read_data=file_data))
E NameError: name 'mocker' is not defined
cpu_info/test_cpu_info.py:18: NameError
____________________________________ TestCPUInfo.test_cpu_info _____________________________________
self = <test_cpu_info.TestCPUInfo object at 0x75e694f0>
def test_cpu_info(self):
file_data = """
Hardware : BCM2835
Revision : a020d3
Serial : 00000000e54cf3fa
"""
> mocker.patch('__builtin__.open', mock_open(read_data=file_data))
E NameError: name 'mocker' is not defined
cpu_info/test_cpu_info.py:28: NameError
===================================== 3 failed in 0.36 seconds =====================================
I think I've declared the mocker fixture correctly but it would seem not... What am I doing wrong?
There are not that many issues with mock usage in your tests. In fact, there are only two:
Accessing mocker fixture
If you need to access the return value of a fixture, include its name in the test function arguments, for example:
class TestCPUInfo:
def test_no_proc_cpuinfo_file(self, mocker):
mocker.patch(...)
pytest will automatically map the test argument value to fixture value when running the tests.
Using mocker.patch
mocker.patch is just a shim to unittest.mock.patch, nothing more; it's there merely for convenience so that you don't have to import unittest.mock.patch everywhere. This means that mocker.patch has the same signature as unittest.mock.patch and you can always consult the stdlib's docs when in doubt of using it correctly.
In you case, mocker.patch(os.path.isfile).return_value(False) is not a correct usage of patch method. From the docs:
target should be a string in the form 'package.module.ClassName'.
...
patch() takes arbitrary keyword arguments. These will be passed to the Mock (or new_callable) on construction.
This means that the line
mocker.patch(os.path.isfile).return_value(False)
should be
mocker.patch('os.path.isfile', return_value=False)
Discrepancies between tested behaviour and real implementation logic
All that is left now are errors that have something to do with your implementation; you have to either adapt the tests to test the correct behaviour or fix the implementation errors.
Examples:
assert result == "NOT_FOUND"
will always raise because "NOT_FOUND" isn't even present in the code.
assert result == "NOT_DEFINED"
will always raise because uid = "NOT_DEFINED" will always be overwritten with regex search result and thus never returned.
Working example
Assuming your tests are the single source of truth, I fixed two errors with mock usage described above and adapted the implementation of get_uid() to make the tests pass:
import os
import re
def get_uid():
regex = re.compile(r'Serial\s+:\s*(\w+)')
exists = os.path.isfile('/proc/cpuinfo')
if not exists:
return 'NOT_FOUND'
with open('/proc/cpuinfo', 'r') as file:
cpu_details = file.read()
match = regex.search(cpu_details)
if match is None:
return 'NOT_DEFINED'
return match.group(1)
Tests:
import pytest
import uid
class TestCPUInfo:
def test_no_proc_cpuinfo_file(self, mocker):
mocker.patch('os.path.isfile', return_value=False)
result = uid.get_uid()
assert result == "NOT_FOUND"
def test_no_cpu_info_in_file(self, mocker):
file_data = """
Hardware : BCM2835
Revision : a020d3
"""
mocker.patch('builtins.open', mocker.mock_open(read_data=file_data))
result = uid.get_uid()
assert result == "NOT_DEFINED"
def test_cpu_info(self, mocker):
file_data = """
Hardware : BCM2835
Revision : a020d3
Serial : 00000000e54cf3fa
"""
mocker.patch('builtins.open', mocker.mock_open(read_data=file_data))
result = uid.get_uid()
assert result == "00000000e54cf3fa"
Note that I'm using Python 3, so I can't patch __builtin__ and resort to patching builtins; aside from that, the code should be identical to Python 2 variant. Also, since mocker is used anyway, I used mocker.mock_open, thus saving me the additional import of unittest.mock.mock_open.
#pytest.mark.incremental
class Test_aws():
def test_case1(self):
----- some code here ----
result = someMethodTogetResult
assert result[0] == True
orderID = result[1]
def test_case2(self):
result = someMethodTogetResult # can be only perform once test case 1 run successfully.
assert result == True
def test_deleteOrder_R53HostZonePrivate(self):
result = someMethodTogetResult
assert result[0] == True
The current behavior is if test 1 passes then test 2 runs and if test 2 passes then test 3 runs.
What I need is:
If test_case 3 should be run if test_case 1 passed. test_case 2 should not change any behavior. Any thoughts here?
I guess you are looking for pytest-dependency which allows setting conditional run dependencies between tests. Example:
import random
import pytest
class TestAWS:
#pytest.mark.dependency
def test_instance_start(self):
assert random.choice((True, False))
#pytest.mark.dependency(depends=['TestAWS::test_instance_start'])
def test_instance_stop(self):
assert random.choice((True, False))
#pytest.mark.dependency(depends=['TestAWS::test_instance_start'])
def test_instance_delete(self):
assert random.choice((True, False))
test_instance_stop and test_instance_delete will run only if test_instance_start succeeds and skip otherwise. However, since test_instance_delete does not depend on test_instance_stop, the former will execute no matter what the result of the latter test is. Run the example test class several times to verify the desired behaviour.
To complement hoefling's answer, another option is to use pytest-steps to perform incremental testing. This can help you in particular if you wish to share some kind of incremental state/intermediate results between the steps.
However it does not implement advanced dependency mechanisms like pytest-dependency, so use the package that better suits your goal.
With pytest-steps, hoefling's example would write:
import random
from pytest_steps import test_steps, depends_on
def step_instance_start():
assert random.choice((True, False))
#depends_on(step_instance_start)
def step_instance_stop():
assert random.choice((True, False))
#depends_on(step_instance_start)
def step_instance_delete():
assert random.choice((True, False))
#test_steps(step_instance_start, step_instance_stop, step_instance_delete)
def test_suite(test_step):
# Execute the step
test_step()
EDIT: there is a new 'generator' mode to make it even easier:
import random
from pytest_steps import test_steps, optional_step
#test_steps('step_instance_start', 'step_instance_stop', 'step_instance_delete')
def test_suite():
# First step (Start)
assert random.choice((True, False))
yield
# Second step (Stop)
with optional_step('step_instance_stop') as stop_step:
assert random.choice((True, False))
yield stop_step
# Third step (Delete)
with optional_step('step_instance_delete') as delete_step:
assert random.choice((True, False))
yield delete_step
Check the documentation for details. (I'm the author of this package by the way ;) )
You can use pytest-ordering package to order your tests using pytest mark. The author of the package explains the usage here
Example:
#pytest.mark.first
def test_first():
pass
#pytest.mark.second
def test_2():
pass
#pytest.mark.order5
def test_5():
pass
I'm using pytest and have multiple tests to run to check an issue.
I would like to split all tests into different functions like this:
# test_myTestSuite.py
#pytest.mark.issue(123)
class MyTestSuite():
def test_part_1():
result = do_something()
assert result == True
def test_part_2():
result = do_an_other_something()
assert result == 'ok'
of course, I implemented issue in conftest.py
# conftest.py
def pytest_addoption(parser):
group = parser.getgroup('Issues')
group.addoption('--issue', action='store',
dest='issue', default=0,
help='')
but I don't know how to hook once after testing MyTestSuite and check that all tests of MyTestSuite correctly passed.
Does anyone have any ideas?
PS: this is my first post on StackOverflow.
Try to use the return function as most simple type of positive debug conformation as shown below.
#pytest.mark.issue(123)
class MyTestSuite():
def test_part_1():
result = do_something()
assert result == True
return 'tp1', True
def test_part_2():
result = do_an_other_something()
assert result == 'ok'
return 'tp2', True
..and then where you run your tests from:
x = MyTestSuite().test_part_1()
if x[1] == True:
print 'Test %s completed correctly' % x[0]
The result after running test1:
Test tp1 completed correctly, or...
AssertionError.
Collecting assertion errors:
collected_errors = []
def test_part_1():
testname = 'tp1'
try:
result = do_something()
assert result == True
return testname, True
except Exception as error:
info = (testname, error)
collected_errors.append(info)
More assertion flavours you can find here on SO.
Pytest allows you to hook into the teardown phase for each test by implementing a function called pytest_runtest_teardown in a plugin:
def pytest_runtest_teardown(item, nextitem):
pass
Is there an attribute or method on item that I can use to determine whether the test that just finished running passed or failed? I couldn't find any documentation for pytest.Item and hunting through the source code and playing around in ipdb didn't reveal anything obvious.
You may also consider call.excinfo in pytest_runtest_makereport:
def pytest_runtest_makereport(item, call):
if call.when == 'setup':
print('Called after setup for test case is executed.')
if call.when == 'call':
print('Called after test case is executed.')
print('-->{}<--'.format(call.excinfo))
if call.when == 'teardown':
print('Called after teardown for test case is executed.')
The call object contains a whole bunch of additional information (test start time, stop time, etc.).
Refer:
http://doc.pytest.org/en/latest/_modules/_pytest/runner.html
def pytest_runtest_makereport(item, call):
when = call.when
duration = call.stop-call.start
keywords = dict([(x,1) for x in item.keywords])
excinfo = call.excinfo
sections = []
if not call.excinfo:
outcome = "passed"
longrepr = None
else:
if not isinstance(excinfo, ExceptionInfo):
outcome = "failed"
longrepr = excinfo
elif excinfo.errisinstance(pytest.skip.Exception):
outcome = "skipped"
r = excinfo._getreprcrash()
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
if call.when == "call":
longrepr = item.repr_failure(excinfo)
else: # exception in setup or teardown
longrepr = item._repr_failure_py(excinfo,
style=item.config.option.tbstyle)
for rwhen, key, content in item._report_sections:
sections.append(("Captured %s %s" %(key, rwhen), content))
return TestReport(item.nodeid, item.location,
keywords, outcome, longrepr, when,
sections, duration)
The Node class don't have any information regarding the status of the last test, however we do have the status of the total number of failed tests (in item.session.testsfailed), and we can use it:
We can add a new member to the item.session object (not so nice, but you gotta love python!). This member will save the status of the last testsfailed - item.session.last_testsfailed_status.
If testsfailed > last_testsfailed_status - the last test the run just failed.
import pytest
import logging
logging.basicConfig(
level='INFO',
handlers=(
logging.StreamHandler(),
logging.FileHandler('log.txt')
)
)
#pytest.mark.hookwrapper
def pytest_runtest_teardown(item, nextitem):
outcome = yield
if not hasattr(item.session, 'last_testsfailed_status'):
item.session.last_testsfailed_status = 0
if item.session.testsfailed and item.session.testsfailed > item.session.last_testsfailed_status:
logging.info('Last test failed')
item.session.last_testsfailed_status = item.session.testsfailed
Initially, I was also struggling to get the Test Status, so that I can use it to make a custom report.
But, after further analysis of pytest_runtest_makereport hook function, I was able to see various attributes of 3 params (item, call, and report).
Let me just list some of it out:
Call:
excinfo (this further drills down to carry traceback if any)
start (start time of the test in float value since epoch time)
stop (stop time of the test in float value since epoch time)
when (can take values - setup, call, teardown)
item:
_fixtureinfo (contains info abt any fixtures you have used)
nodeid (the test_name assumed by pytest)
cls (contains the class info of test, by info I mean the variables which were declared and accessed in the class of test)
funcargs (what parameters you have passed to your test along with its values)
report:
outcome (this carries the test status)
longrepr (contains the failure info including the traceback)
when (can take values - setup, call, teardown. please note that depending on its value the report will carry the values)
FYI: there are other attributes for all the above 3 params, I have mentioned in few.
Below is the code snipped depicting, of how I have hooked the function and used.
def pytest_runtest_makereport(item, call, __multicall__):
report = __multicall__.execute()
if (call.when == "call") and hasattr(item, '_failed_expect'):
report.outcome = "failed"
summary = 'Failed Expectations:%s' % len(item._failed_expect)
item._failed_expect.append(summary)
report.longrepr = str(report.longrepr) + '\n' + ('\n'.join(item._failed_expect))
if call.when == "call":
ExTest.name = item.nodeid
func_args = item.funcargs
ExTest.parameters_used = dict((k, v) for k, v in func_args.items() if v and not hasattr(v, '__dict__'))
# [(k, v) for k, v in func_args.items() if v and not hasattr(v, '__dict__')]
t = datetime.fromtimestamp(call.start)
ExTest.start_timestamp = t.strftime('%Y-%m-%d::%I:%M:%S %p')
ExTest.test_status = report.outcome
# TODO Get traceback info (call.excinfo.traceback)
return report
Hook wrappers are the way to go - allow all the default hooks to run & then look at their results.
Below example shows 2 methods for detecting whether a test has failed (add it to your conftest.py)
#pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
# Because this is a hookwrapper, calling `yield` lets the actual hooks run & returns a `_Result`
result = yield
# Get the actual `TestReport` which the hook(s) returned, having done the hard work for you
report = result.get_result()
# Method 1: `report.longrepr` is either None or a failure representation
if report.longrepr:
logging.error('FAILED: %s', report.longrepr)
else:
logging.info('Did not fail...')
# Method 2: `report.outcome` is always one of ['passed', 'failed', 'skipped']
if report.outcome == 'failed':
logging.error('FAILED: %s', report.longrepr)
elif report.outcome == 'skipped':
logging.info('Skipped')
else: # report.outcome == 'passed'
logging.info('Passed')
See TestReport documentation for details of longrepr and outcome
(It doesn't use pytest_runtest_teardown as the OP requested but it does easily let you check for failure)