py.test boolean switch between parameterized fixtures - python

How can you switch between parameterizations without doing every combination in between? For example, the test below will run 24 tests (4*3*2 for asdf, jkl and BOOLEAN respectively).
But I just want to switch between asdf and jkl as outputs, so there should only be 7 tests total, because I just want asdf_or_jkl to return one of asdf or jkl's parameterizations, not the product of the three arguments. Is it possible to do this using py.test's fixtures, or do you need to do something else?
import pytest
#pytest.fixture(params=list('asdf'))
def asdf(request):
return request.param
#pytest.fixture(params=list('jkl'))
def jkl(request):
return request.param
#pytest.fixture(params=[True, False])
def BOOLEAN(request):
return request.param
#pytest.fixture
def asdf_or_jkl(BOOLEAN, asdf, jkl):
if BOOLEAN:
return asdf
else:
return jkl
def test_2(asdf_or_jkl):
assert asdf_or_jkl == 0
Thanks!

what about this:
import pytest
list1=list('ASDF')
list2=list('jkl')
def alternate(list1, list2):
num = min(len(list1), len(list2))
result = [None]*(num*2)
result[::2] = list1[:num]
result[1::2] = list2[:num]
result.extend(list1[num:])
result.extend(list2[num:])
return result
#pytest.fixture(params=alternate(list1, list2))
def asdf_or_jkl(request):
return request.param
def test_2(asdf_or_jkl):
assert asdf_or_jkl == 0

Related

How to provide default values to single parameterized pytest fixture with arguments?

I am using pytest parametrized fixtures, which have variable return values.
This is a simplified example:
import pytest
#pytest.fixture
def mocked_value(mock_param):
if mock_param:
mocked_value = "true"
else:
mocked_value = "false"
yield mocked_value
#pytest.mark.parametrize(("mock_param"), [True, False])
def test_both_mocked_parameters(mocked_value, mock_param):
assert mocked_value == str(mock_param).lower()
#pytest.mark.parametrize(("mock_param"), [True])
def test_just_one_mocked_param(mocked_value, mock_param):
assert mocked_value == "true"
Is there a way to make the pytest fixture have a default param given to it? Something like this, except built into the single fixture definition:
def _mocked_function(mock_param):
if mock_param:
mocked_value = "true"
else:
mocked_value = "false"
return mocked_value
#pytest.fixture
def mocked_value_with_default():
yield _mocked_function(True)
#pytest.fixture
def mocked_value(mock_param):
yield _mocked_function(mock_param)
#pytest.mark.parametrize(("mock_param"), [True, False])
def test_both_mocked_parameters(mocked_value, mock_param):
assert mocked_value == str(mock_param).lower()
def test_just_one_mocked_param(mocked_value_with_default):
assert mocked_value_with_default == "true"
The above works, however it would be much cleaner to have just one fixture definition handling both use cases. How do I do this?
You could use fixture parametrization:
https://docs.pytest.org/en/stable/example/parametrize.html#indirect-parametrization

Is there a way to nest fixture parametization in pytest?

I have an example where I have essentially a nested list of fixtures i.e fn2 below depends on the value of the fixture fn1 and is not know ahead of time. Each of these two functions needs to call a server to retrieve a list of parameters.
#pytest.fixture(params=generated_list())
def fn1(request):
return request.param
#pytest.fixture(params=generates_list_2(fn1))
def fn2(request):
return request.param
def test_fn(fn2):
assert fn2 == 0
While it's not a great answer this will work. It exploits a hook in the test generation procedure to manually generate the combinations of params.
def generated_list():
return x
def generates_list_2(fn1):
return x
def pytest_generate_tests(metafunc):
if 'fn1' in metafunc.fixturenames and 'fn2' in metafunc.fixturenames:
metafunc.parametrize(
"fn1, fn2",
[(i, j) for i in generated_list() for j in generated_list_2(i)]
)
#pytest.fixture()
def fn1(request):
return request.param
#pytest.fixture()
def fn2(request):
return request.param
def test_fn(fn1, fn2):
assert fn2 == 0
Yeah, you can nest fixtures. Just use one fixture as one of the arguments of other fixture as you would use a fixture in a test.
Your code would look like this:
#pytest.fixture(params=generated_list())
def fn1(request):
return request.param
#pytest.fixture(params=generates_list_2(fn1))
def fn2(request, fn1):
return request.param
def test_fn(fn2):
assert fn2 == 0

Split a test in different functions with pytest

I'm using pytest and have multiple tests to run to check an issue.
I would like to split all tests into different functions like this:
# test_myTestSuite.py
#pytest.mark.issue(123)
class MyTestSuite():
def test_part_1():
result = do_something()
assert result == True
def test_part_2():
result = do_an_other_something()
assert result == 'ok'
of course, I implemented issue in conftest.py
# conftest.py
def pytest_addoption(parser):
group = parser.getgroup('Issues')
group.addoption('--issue', action='store',
dest='issue', default=0,
help='')
but I don't know how to hook once after testing MyTestSuite and check that all tests of MyTestSuite correctly passed.
Does anyone have any ideas?
PS: this is my first post on StackOverflow.
Try to use the return function as most simple type of positive debug conformation as shown below.
#pytest.mark.issue(123)
class MyTestSuite():
def test_part_1():
result = do_something()
assert result == True
return 'tp1', True
def test_part_2():
result = do_an_other_something()
assert result == 'ok'
return 'tp2', True
..and then where you run your tests from:
x = MyTestSuite().test_part_1()
if x[1] == True:
print 'Test %s completed correctly' % x[0]
The result after running test1:
Test tp1 completed correctly, or...
AssertionError.
Collecting assertion errors:
collected_errors = []
def test_part_1():
testname = 'tp1'
try:
result = do_something()
assert result == True
return testname, True
except Exception as error:
info = (testname, error)
collected_errors.append(info)
More assertion flavours you can find here on SO.

In pytest, how to skip or xfail certain fixtures?

I have a heavily-fixtured test function which fails (as it should) with certain fixture inputs. How can I indicate this? This is what I'm doing now, and maybe there's a better way. I'm pretty new to py.test so I'd appreciate any tips.
The next part is all the input fixtures. FYI, example_datapackage_path is defined in conf.test
#pytest.fixture(params=[None, 'pooled_col', 'phenotype_col'])
def metadata_key(self, request):
return request.param
#pytest.fixture(params=[None, 'feature_rename_col'])
def expression_key(self, request):
return request.param
#pytest.fixture(params=[None, 'feature_rename_col'])
def splicing_key(self, request):
return request.param
#pytest.fixture
def datapackage(self, example_datapackage_path, metadata_key,
expression_key, splicing_key):
with open(example_datapackage_path) as f:
datapackage = json.load(f)
datatype_to_key = {'metadata': metadata_key,
'expression': expression_key,
'splicing': splicing_key}
for datatype, key in datatype_to_key.iteritems():
if key is not None:
resource = name_to_resource(datapackage, datatype)
if key in resource:
resource.pop(key)
return datapackage
#pytest.fixture
def datapackage_dir(self, example_datapackage_path):
return os.path.dirname(example_datapackage_path)
And here's the test itself.
def test_from_datapackage(self, datapackage, datapackage_dir):
import flotilla
from flotilla.external import get_resource_from_name
study = flotilla.Study.from_datapackage(datapackage, datapackage_dir,
load_species_data=False)
metadata_resource = get_resource_from_name(datapackage, 'metadata')
expression_resource = get_resource_from_name(datapackage,
'expression')
splicing_resource = get_resource_from_name(datapackage, 'splicing')
phenotype_col = 'phenotype' if 'phenotype_col' \
not in metadata_resource else metadata_resource['phenotype_col']
pooled_col = None if 'pooled_col' not in metadata_resource else \
metadata_resource['pooled_col']
expression_feature_rename_col = 'gene_name' if \
'feature_rename_col' not in expression_resource \
else expression_resource['feature_rename_col']
splicing_feature_rename_col = 'gene_name' if \
'feature_rename_col' not in splicing_resource \
else splicing_resource['feature_rename_col']
assert study.metadata.phenotype_col == phenotype_col
assert study.metadata.pooled_col == pooled_col
assert study.expression.feature_rename_col \
== expression_feature_rename_col
assert study.splicing.feature_rename_col == splicing_feature_rename_col
What I would like to do is in metadata_key, say that when the parameter is pooled_col or phenotype_col, that it will fail. I looked in pytest: Skip and xfail: dealing with tests that can not succeed, but it only talked about skip and xfail for parametrized test, but not fixtures.
In your datapackage or expression_key fixtures you can use pytest.xfail and pytest.skip as described here. For example:
#pytest.fixture
def datapackage(self, example_datapackage_path, metadata_key,
expression_key, splicing_key):
if metadata_key == 'pooled_col':
pytest.skip('metadata key is "pooled_col"')
...
You can also use pytest.mark.xfail in fixture parameters as in this example:
#pytest.fixture(params=['a', pytest.mark.xfail('b'), 'c'])
def fx1(request):
return request.param
def test_spam(fx1):
assert fx1
If you prefer to skip these tests this seems to work:
#pytest.fixture(
params=['a', pytest.mark.skipif(True, reason='reason')('b'), 'c'])
def fx1(request):
return request.param
def test_spam(fx1):
assert fx1

py.test: Get expected values for test functions for different fixtures from configuration

I want to setup tests using py.test using two (or more) fixtures for different levels in my project.
For each fixture i want to execute different test functions which shall get the expected values by parameters depending on the fixture used for the test.
This is what i want to do:
def getGroups(boardstr, xlen, ylen):
board = Board(boardstr, xlen, ylen)
groups = MyClass.findGroups(board.get_field())
return groups
#pytest.fixture(scope='module')
def groups_3():
# ... setup for level 3
return getGroups(boardstr, xlen, ylen)
#pytest.fixture(scope='module')
def groups_10():
# ... setup for level 10
return getGroups(boardstr, xlen, ylen)
# this is my test data, which i want to use
expected_values = {
"groups_3": {
"test_total_groups": 9,
"test_total_clickable_groups": 5,
"test_total_colors": 3
},
"groups_10": {
"test_total_groups": 22,
"test_total_clickable_groups": 7,
"test_total_colors": 3
},
}
# "groups" shall be the fixture for the following test functions
# and the test methods shall be executed with groups_3 and groups_10 as fixture
def test_total_groups(groups, expected):
assert len(groups) == expected
def test_total_clickable_groups(groups, expected):
assert len([grp for grp in groups if grp.clickable is True]) == expected
def test_total_colors(groups, expected):
assert len(np.unique([grp.color for grp in groups])) == expected
is there a way to achive this using py.test? Is this possible by using the parametrization feature?
I tried some variants like:
#pytest.mark.parametrize("groups, expected", [
(groups_3(), 5),
(groups_10(), 7),
])
def test_total_clickable_groups(groups, expected):
assert len([grp for grp in groups if grp.clickable is True]) == expected
but i didnt succeed.
Perhaps it is useful for somebody. I found a way to do it (inspired by holgerkrekel.net/2009/05/13/parametrizing-python-tests-generalized/):
def getGroups(boardstr, xlen, ylen):
board = Board(boardstr, xlen, ylen)
groups = MyClass.findGroups(board.get_field())
return groups
def groups_3():
# ... setup for level 3
return getGroups(boardstr, xlen, ylen)
def groups_10():
# ... setup for level 10
return getGroups(boardstr, xlen, ylen)
# decorator function
def params(funcarglist):
def wrapper(function):
function.funcarglist = funcarglist
return function
return wrapper
def pytest_generate_tests(metafunc):
# called once per each test function
for funcargs in getattr(metafunc.function, 'funcarglist', ()):
# schedule a new test function run with applied **funcargs
metafunc.addcall(funcargs=funcargs)
class TestClass:
groups_3 = groups_3()
groups_10 = groups_10()
#params([dict(g=groups_3, e=9),
dict(g=groups_10, e=22)])
def test_total_groups(self, g, e):
assert len(g) == e
#params([dict(g=groups_3, e=5),
dict(g=groups_10, e=7)])
def test_total_clickable_groups(self, g, e):
assert len([grp for grp in g if grp.clickable is True]) == e
#params([dict(g=groups_3, e=3),
dict(g=groups_10, e=3)])
def test_total_colors(self, g, e):
assert len(np.unique([grp.color for grp in g])) == e

Categories