I have a heavily-fixtured test function which fails (as it should) with certain fixture inputs. How can I indicate this? This is what I'm doing now, and maybe there's a better way. I'm pretty new to py.test so I'd appreciate any tips.
The next part is all the input fixtures. FYI, example_datapackage_path is defined in conf.test
#pytest.fixture(params=[None, 'pooled_col', 'phenotype_col'])
def metadata_key(self, request):
return request.param
#pytest.fixture(params=[None, 'feature_rename_col'])
def expression_key(self, request):
return request.param
#pytest.fixture(params=[None, 'feature_rename_col'])
def splicing_key(self, request):
return request.param
#pytest.fixture
def datapackage(self, example_datapackage_path, metadata_key,
expression_key, splicing_key):
with open(example_datapackage_path) as f:
datapackage = json.load(f)
datatype_to_key = {'metadata': metadata_key,
'expression': expression_key,
'splicing': splicing_key}
for datatype, key in datatype_to_key.iteritems():
if key is not None:
resource = name_to_resource(datapackage, datatype)
if key in resource:
resource.pop(key)
return datapackage
#pytest.fixture
def datapackage_dir(self, example_datapackage_path):
return os.path.dirname(example_datapackage_path)
And here's the test itself.
def test_from_datapackage(self, datapackage, datapackage_dir):
import flotilla
from flotilla.external import get_resource_from_name
study = flotilla.Study.from_datapackage(datapackage, datapackage_dir,
load_species_data=False)
metadata_resource = get_resource_from_name(datapackage, 'metadata')
expression_resource = get_resource_from_name(datapackage,
'expression')
splicing_resource = get_resource_from_name(datapackage, 'splicing')
phenotype_col = 'phenotype' if 'phenotype_col' \
not in metadata_resource else metadata_resource['phenotype_col']
pooled_col = None if 'pooled_col' not in metadata_resource else \
metadata_resource['pooled_col']
expression_feature_rename_col = 'gene_name' if \
'feature_rename_col' not in expression_resource \
else expression_resource['feature_rename_col']
splicing_feature_rename_col = 'gene_name' if \
'feature_rename_col' not in splicing_resource \
else splicing_resource['feature_rename_col']
assert study.metadata.phenotype_col == phenotype_col
assert study.metadata.pooled_col == pooled_col
assert study.expression.feature_rename_col \
== expression_feature_rename_col
assert study.splicing.feature_rename_col == splicing_feature_rename_col
What I would like to do is in metadata_key, say that when the parameter is pooled_col or phenotype_col, that it will fail. I looked in pytest: Skip and xfail: dealing with tests that can not succeed, but it only talked about skip and xfail for parametrized test, but not fixtures.
In your datapackage or expression_key fixtures you can use pytest.xfail and pytest.skip as described here. For example:
#pytest.fixture
def datapackage(self, example_datapackage_path, metadata_key,
expression_key, splicing_key):
if metadata_key == 'pooled_col':
pytest.skip('metadata key is "pooled_col"')
...
You can also use pytest.mark.xfail in fixture parameters as in this example:
#pytest.fixture(params=['a', pytest.mark.xfail('b'), 'c'])
def fx1(request):
return request.param
def test_spam(fx1):
assert fx1
If you prefer to skip these tests this seems to work:
#pytest.fixture(
params=['a', pytest.mark.skipif(True, reason='reason')('b'), 'c'])
def fx1(request):
return request.param
def test_spam(fx1):
assert fx1
Related
I am using pytest parametrized fixtures, which have variable return values.
This is a simplified example:
import pytest
#pytest.fixture
def mocked_value(mock_param):
if mock_param:
mocked_value = "true"
else:
mocked_value = "false"
yield mocked_value
#pytest.mark.parametrize(("mock_param"), [True, False])
def test_both_mocked_parameters(mocked_value, mock_param):
assert mocked_value == str(mock_param).lower()
#pytest.mark.parametrize(("mock_param"), [True])
def test_just_one_mocked_param(mocked_value, mock_param):
assert mocked_value == "true"
Is there a way to make the pytest fixture have a default param given to it? Something like this, except built into the single fixture definition:
def _mocked_function(mock_param):
if mock_param:
mocked_value = "true"
else:
mocked_value = "false"
return mocked_value
#pytest.fixture
def mocked_value_with_default():
yield _mocked_function(True)
#pytest.fixture
def mocked_value(mock_param):
yield _mocked_function(mock_param)
#pytest.mark.parametrize(("mock_param"), [True, False])
def test_both_mocked_parameters(mocked_value, mock_param):
assert mocked_value == str(mock_param).lower()
def test_just_one_mocked_param(mocked_value_with_default):
assert mocked_value_with_default == "true"
The above works, however it would be much cleaner to have just one fixture definition handling both use cases. How do I do this?
You could use fixture parametrization:
https://docs.pytest.org/en/stable/example/parametrize.html#indirect-parametrization
I have an example where I have essentially a nested list of fixtures i.e fn2 below depends on the value of the fixture fn1 and is not know ahead of time. Each of these two functions needs to call a server to retrieve a list of parameters.
#pytest.fixture(params=generated_list())
def fn1(request):
return request.param
#pytest.fixture(params=generates_list_2(fn1))
def fn2(request):
return request.param
def test_fn(fn2):
assert fn2 == 0
While it's not a great answer this will work. It exploits a hook in the test generation procedure to manually generate the combinations of params.
def generated_list():
return x
def generates_list_2(fn1):
return x
def pytest_generate_tests(metafunc):
if 'fn1' in metafunc.fixturenames and 'fn2' in metafunc.fixturenames:
metafunc.parametrize(
"fn1, fn2",
[(i, j) for i in generated_list() for j in generated_list_2(i)]
)
#pytest.fixture()
def fn1(request):
return request.param
#pytest.fixture()
def fn2(request):
return request.param
def test_fn(fn1, fn2):
assert fn2 == 0
Yeah, you can nest fixtures. Just use one fixture as one of the arguments of other fixture as you would use a fixture in a test.
Your code would look like this:
#pytest.fixture(params=generated_list())
def fn1(request):
return request.param
#pytest.fixture(params=generates_list_2(fn1))
def fn2(request, fn1):
return request.param
def test_fn(fn2):
assert fn2 == 0
I'm using pytest and have multiple tests to run to check an issue.
I would like to split all tests into different functions like this:
# test_myTestSuite.py
#pytest.mark.issue(123)
class MyTestSuite():
def test_part_1():
result = do_something()
assert result == True
def test_part_2():
result = do_an_other_something()
assert result == 'ok'
of course, I implemented issue in conftest.py
# conftest.py
def pytest_addoption(parser):
group = parser.getgroup('Issues')
group.addoption('--issue', action='store',
dest='issue', default=0,
help='')
but I don't know how to hook once after testing MyTestSuite and check that all tests of MyTestSuite correctly passed.
Does anyone have any ideas?
PS: this is my first post on StackOverflow.
Try to use the return function as most simple type of positive debug conformation as shown below.
#pytest.mark.issue(123)
class MyTestSuite():
def test_part_1():
result = do_something()
assert result == True
return 'tp1', True
def test_part_2():
result = do_an_other_something()
assert result == 'ok'
return 'tp2', True
..and then where you run your tests from:
x = MyTestSuite().test_part_1()
if x[1] == True:
print 'Test %s completed correctly' % x[0]
The result after running test1:
Test tp1 completed correctly, or...
AssertionError.
Collecting assertion errors:
collected_errors = []
def test_part_1():
testname = 'tp1'
try:
result = do_something()
assert result == True
return testname, True
except Exception as error:
info = (testname, error)
collected_errors.append(info)
More assertion flavours you can find here on SO.
Is there a possibility to access the request.param from the test function?
Setup Fixture with params
#pytest.fixture(params=[0,10,20,30])
def wallet(request):
return Wallet(request.param)
Test
def test_init_balance(wallet):
assert wallet.balance == 10
EDIT: Added a collections.namedtuple solution
What I got working until now
#pytest.fixture(params=[20,30,40])
def wallet(request):
FixtureHelper = collections.namedtuple('fixtureHelper', ['wallet', 'request'])
fh = FixtureHelper(Wallet(request.param), request)
return fh
Then accessing it in the test
def test_spend_cash(wallet):
wallet.wallet.spend_cash(wallet.request.param)
I would still appreciate a better solution!
Good answers in this duplicate question: In pytest, how can I access the parameters passed to a test?
You can access it with request.node.callspec.params:
def test_init_balance(request, wallet):
assert wallet.balance == request.node.callspec.params.get("wallet")
Or you can refactor the fixtures slightly:
#pytest.fixture(params=[0, 10, 20, 30])
def balance(request):
return request.param
#pytest.fixture
def wallet(balance):
return Wallet(balance)
def test_init_balance(wallet, balance):
assert wallet.balance == balance
For example:
#pytest.fixture()
def connection():
return make_connection()
#pytest.fixture()
def database(connection):
connection = request.fixtures['connection']
return create_database(connection)
#pytest.fixture()
def table(request):
database = request.fixtures['database']
return create_table(database)
#pytest.mark.usefixtures('database')
def test_whatever(connection, table):
insert_some_data(table)
connection.execute(...)
...
Can I do that with current version of Pytest - make fixtures depending on other fixtures in that non-hierarchical way?
You can do it this way:
#pytest.fixture()
def fixture1():
return 'fixture1'
#pytest.fixture()
def fixture2(request):
if 'fixture1' in request._funcargs:
# fixture1 was requested earlier
# but most likely you don't need that check because getfuncargvalue always works
fixture1_instance = request.getfuncargvalue('fixture1')
return do_stuff(fixture1_instance)
def test_whatever(fixture1, fixture2):
result = do_some_stuff(fixture1)
assert result.supermethod(fixture2)