Access Pytest Fixture Params In Test - python

Is there a possibility to access the request.param from the test function?
Setup Fixture with params
#pytest.fixture(params=[0,10,20,30])
def wallet(request):
return Wallet(request.param)
Test
def test_init_balance(wallet):
assert wallet.balance == 10
EDIT: Added a collections.namedtuple solution
What I got working until now
#pytest.fixture(params=[20,30,40])
def wallet(request):
FixtureHelper = collections.namedtuple('fixtureHelper', ['wallet', 'request'])
fh = FixtureHelper(Wallet(request.param), request)
return fh
Then accessing it in the test
def test_spend_cash(wallet):
wallet.wallet.spend_cash(wallet.request.param)
I would still appreciate a better solution!

Good answers in this duplicate question: In pytest, how can I access the parameters passed to a test?
You can access it with request.node.callspec.params:
def test_init_balance(request, wallet):
assert wallet.balance == request.node.callspec.params.get("wallet")
Or you can refactor the fixtures slightly:
#pytest.fixture(params=[0, 10, 20, 30])
def balance(request):
return request.param
#pytest.fixture
def wallet(balance):
return Wallet(balance)
def test_init_balance(wallet, balance):
assert wallet.balance == balance

Related

How to provide default values to single parameterized pytest fixture with arguments?

I am using pytest parametrized fixtures, which have variable return values.
This is a simplified example:
import pytest
#pytest.fixture
def mocked_value(mock_param):
if mock_param:
mocked_value = "true"
else:
mocked_value = "false"
yield mocked_value
#pytest.mark.parametrize(("mock_param"), [True, False])
def test_both_mocked_parameters(mocked_value, mock_param):
assert mocked_value == str(mock_param).lower()
#pytest.mark.parametrize(("mock_param"), [True])
def test_just_one_mocked_param(mocked_value, mock_param):
assert mocked_value == "true"
Is there a way to make the pytest fixture have a default param given to it? Something like this, except built into the single fixture definition:
def _mocked_function(mock_param):
if mock_param:
mocked_value = "true"
else:
mocked_value = "false"
return mocked_value
#pytest.fixture
def mocked_value_with_default():
yield _mocked_function(True)
#pytest.fixture
def mocked_value(mock_param):
yield _mocked_function(mock_param)
#pytest.mark.parametrize(("mock_param"), [True, False])
def test_both_mocked_parameters(mocked_value, mock_param):
assert mocked_value == str(mock_param).lower()
def test_just_one_mocked_param(mocked_value_with_default):
assert mocked_value_with_default == "true"
The above works, however it would be much cleaner to have just one fixture definition handling both use cases. How do I do this?
You could use fixture parametrization:
https://docs.pytest.org/en/stable/example/parametrize.html#indirect-parametrization

pytest fixture for mocked object setup?

I'm using pytest and I have 5+ tests that have the exactly same first five lines. Is it possible to create a setup function for the repeated code?
#mock.patch('abcde.xyz')
def test_1(mocked_function):
x_mock = mock.Mock(X)
x_mock.producer = mock.Mock()
x_mock.producer.func2 = lambda : None
mocked_function.return_value = x_mock # xyz() returns x_mock
......
#mock.patch('abcde.xyz')
def test_2(mocked_function):
x_mock = mock.Mock(X)
x_mock.producer = mock.Mock()
x_mock.producer.func2 = lambda : None
mocked_function.return_value = x_mock
......
#mock..... # more
You should consider using a fixture as it recommended over classic setup/teardown methods.
From pytest documentation:
While these setup/teardown methods are simple and familiar to those coming from a unittest or nose background, you may also consider using pytest’s more powerful fixture mechanism which leverages the concept of dependency injection, allowing for a more modular and more scalable approach for managing test state, especially for larger projects and for functional testing.
For your example - considering that mocked_function is itself a fixture - it would be:
#pytest.fixture()
def mock_abcde_xyz(mocker):
mocker.patch("abcde.xyz")
#pytest.fixture()
#pytest.mark.usefixtures("mock_abcde_xyz")
def patched_mocked_function(mocker, mocked_function):
x_mock = mocker.Mock(X)
x_mock.producer = mocker.Mock()
x_mock.producer.func2 = lambda : None
mocked_function.return_value = x_mock
return mocked_function
def test_1(patched_mocked_function):
......
def test_2(patched_mocked_function):
......
Note that I used pytest-mock instead of mock so that you can use 'mocker' fixture.
If you don't want pytest-mock, just do:
#pytest.fixture()
#mock.patch('abcde.xyz')
def patched_mocked_function(mocked_function):
x_mock = mock.Mock(X)
x_mock.producer = mock.Mock()
x_mock.producer.func2 = lambda : None
mocked_function.return_value = x_mock
return mocked_function
def test_1(patched_mocked_function):
......
def test_2(patched_mocked_function):
......
Yes, you can implement a setUp() (or setUpClass()) method.
Another way would be to implement a helper function, as you would in any other Python code.

Is there a way to nest fixture parametization in pytest?

I have an example where I have essentially a nested list of fixtures i.e fn2 below depends on the value of the fixture fn1 and is not know ahead of time. Each of these two functions needs to call a server to retrieve a list of parameters.
#pytest.fixture(params=generated_list())
def fn1(request):
return request.param
#pytest.fixture(params=generates_list_2(fn1))
def fn2(request):
return request.param
def test_fn(fn2):
assert fn2 == 0
While it's not a great answer this will work. It exploits a hook in the test generation procedure to manually generate the combinations of params.
def generated_list():
return x
def generates_list_2(fn1):
return x
def pytest_generate_tests(metafunc):
if 'fn1' in metafunc.fixturenames and 'fn2' in metafunc.fixturenames:
metafunc.parametrize(
"fn1, fn2",
[(i, j) for i in generated_list() for j in generated_list_2(i)]
)
#pytest.fixture()
def fn1(request):
return request.param
#pytest.fixture()
def fn2(request):
return request.param
def test_fn(fn1, fn2):
assert fn2 == 0
Yeah, you can nest fixtures. Just use one fixture as one of the arguments of other fixture as you would use a fixture in a test.
Your code would look like this:
#pytest.fixture(params=generated_list())
def fn1(request):
return request.param
#pytest.fixture(params=generates_list_2(fn1))
def fn2(request, fn1):
return request.param
def test_fn(fn2):
assert fn2 == 0

Is it possible to retrieve other requested fixtures from fixture request?

For example:
#pytest.fixture()
def connection():
return make_connection()
#pytest.fixture()
def database(connection):
connection = request.fixtures['connection']
return create_database(connection)
#pytest.fixture()
def table(request):
database = request.fixtures['database']
return create_table(database)
#pytest.mark.usefixtures('database')
def test_whatever(connection, table):
insert_some_data(table)
connection.execute(...)
...
Can I do that with current version of Pytest - make fixtures depending on other fixtures in that non-hierarchical way?
You can do it this way:
#pytest.fixture()
def fixture1():
return 'fixture1'
#pytest.fixture()
def fixture2(request):
if 'fixture1' in request._funcargs:
# fixture1 was requested earlier
# but most likely you don't need that check because getfuncargvalue always works
fixture1_instance = request.getfuncargvalue('fixture1')
return do_stuff(fixture1_instance)
def test_whatever(fixture1, fixture2):
result = do_some_stuff(fixture1)
assert result.supermethod(fixture2)

In pytest, how to skip or xfail certain fixtures?

I have a heavily-fixtured test function which fails (as it should) with certain fixture inputs. How can I indicate this? This is what I'm doing now, and maybe there's a better way. I'm pretty new to py.test so I'd appreciate any tips.
The next part is all the input fixtures. FYI, example_datapackage_path is defined in conf.test
#pytest.fixture(params=[None, 'pooled_col', 'phenotype_col'])
def metadata_key(self, request):
return request.param
#pytest.fixture(params=[None, 'feature_rename_col'])
def expression_key(self, request):
return request.param
#pytest.fixture(params=[None, 'feature_rename_col'])
def splicing_key(self, request):
return request.param
#pytest.fixture
def datapackage(self, example_datapackage_path, metadata_key,
expression_key, splicing_key):
with open(example_datapackage_path) as f:
datapackage = json.load(f)
datatype_to_key = {'metadata': metadata_key,
'expression': expression_key,
'splicing': splicing_key}
for datatype, key in datatype_to_key.iteritems():
if key is not None:
resource = name_to_resource(datapackage, datatype)
if key in resource:
resource.pop(key)
return datapackage
#pytest.fixture
def datapackage_dir(self, example_datapackage_path):
return os.path.dirname(example_datapackage_path)
And here's the test itself.
def test_from_datapackage(self, datapackage, datapackage_dir):
import flotilla
from flotilla.external import get_resource_from_name
study = flotilla.Study.from_datapackage(datapackage, datapackage_dir,
load_species_data=False)
metadata_resource = get_resource_from_name(datapackage, 'metadata')
expression_resource = get_resource_from_name(datapackage,
'expression')
splicing_resource = get_resource_from_name(datapackage, 'splicing')
phenotype_col = 'phenotype' if 'phenotype_col' \
not in metadata_resource else metadata_resource['phenotype_col']
pooled_col = None if 'pooled_col' not in metadata_resource else \
metadata_resource['pooled_col']
expression_feature_rename_col = 'gene_name' if \
'feature_rename_col' not in expression_resource \
else expression_resource['feature_rename_col']
splicing_feature_rename_col = 'gene_name' if \
'feature_rename_col' not in splicing_resource \
else splicing_resource['feature_rename_col']
assert study.metadata.phenotype_col == phenotype_col
assert study.metadata.pooled_col == pooled_col
assert study.expression.feature_rename_col \
== expression_feature_rename_col
assert study.splicing.feature_rename_col == splicing_feature_rename_col
What I would like to do is in metadata_key, say that when the parameter is pooled_col or phenotype_col, that it will fail. I looked in pytest: Skip and xfail: dealing with tests that can not succeed, but it only talked about skip and xfail for parametrized test, but not fixtures.
In your datapackage or expression_key fixtures you can use pytest.xfail and pytest.skip as described here. For example:
#pytest.fixture
def datapackage(self, example_datapackage_path, metadata_key,
expression_key, splicing_key):
if metadata_key == 'pooled_col':
pytest.skip('metadata key is "pooled_col"')
...
You can also use pytest.mark.xfail in fixture parameters as in this example:
#pytest.fixture(params=['a', pytest.mark.xfail('b'), 'c'])
def fx1(request):
return request.param
def test_spam(fx1):
assert fx1
If you prefer to skip these tests this seems to work:
#pytest.fixture(
params=['a', pytest.mark.skipif(True, reason='reason')('b'), 'c'])
def fx1(request):
return request.param
def test_spam(fx1):
assert fx1

Categories