Parametrize session fixture based on config option - python

I'm using pytest and have something like the following in conftest.py:
def pytest_addoption(parser):
parser.addoption('--foo', required=True, help='Foo name.')
#pytest.fixture(scope='session')
def foo(pytestconfig):
with Foo(pytestconfig.getoption('foo')) as foo_obj:
yield foo_obj
I'd like to change the --foo option to
parser.addoption('--foo', action='append', help='Foo names.')
and have a separate Foo object with session scope generated for each provided name. Ordinarily, I'd use pytest_generate_tests to parametrize a fixture in this way. That is,
def pytest_generate_tests(metafunc):
if 'foo' in metafunc.fixturenames:
metafunc.parametrize('foo', map(Foo, metafunc.config.getoption('foo')))
However, If I'm understanding correctly how pytest_generate_tests works, Foo objects will be created separately for each test function thus defeating the whole point of a session fixture.

Seems like the easiest way to solve this is to make the fixture indirect. That is, you pass the configuration values to the fixture, but then let it manage its own setup. That way pytest will honour the fixture's scope setting. For example:
def pytest_generate_tests(metafunc):
# assuming --foo has been set as a possible parameter for pytest
if "foo" in metafunc.fixturenames and metafunc.config.option.foo is not None:
metafunc.parametrize("foo", metafunc.config.option.foo, indirect=True)
#pytest.fixture(scope='session')
def foo(request):
if not hasattr(request, 'param'):
pytest.skip('no --foo option set')
elif isinstance(request.param, str):
return Foo(request.param)
else:
raise ValueError("invalid internal test config")
Altogether this looks like:
conftest.py
def pytest_addoption(parser):
parser.addoption('--foo', action='append', help='Foo value')
test_something.py
import pytest
# keep track of how many Foos have been instantiated
FOO_COUNTER = 0
class Foo:
def __init__(self, value):
global FOO_COUNTER
self.value = value
self.count = FOO_COUNTER
FOO_COUNTER += 1
print(f'creating {self} number {self.count}')
def __repr__(self):
return f'Foo({self.value})'
def pytest_generate_tests(metafunc):
if "foo" in metafunc.fixturenames and metafunc.config.option.foo is not None:
metafunc.parametrize("foo", metafunc.config.option.foo, indirect=True)
#pytest.fixture(scope='session')
def foo(request):
if not hasattr(request, 'param'):
pytest.skip('no --foo option set')
elif isinstance(request.param, str):
return Foo(request.param)
else:
raise ValueError("invalid internal test config")
def test_bar(foo):
assert isinstance(foo, Foo)
assert foo.value in list('abx')
def test_baz(foo):
assert isinstance(foo, Foo)
assert foo.value in list('aby')
# test name is to encourage pytest to run this test last (because of side
# effects of Foo class has on FOO_COUNTER)
def test_zzz_last():
# only passes when exactly two foo options set
assert FOO_COUNTER == 2
If run with exactly two --foo options then the test_zzz_last passes. One or three --foo options and this test fails. This demonstrates that exactly one instance of Foo is created per --foo option, and each instance is shared between tests. Zero --foo options will cause any test requiring the foo fixture to be skipped.
If a --foo option is given a value that is not a, b, x or y then then both test_bar and test_baz will fail. Thus we can see that our configuration options are making it into the foo fixture.

Related

How to get caller name inside pytest fixture?

Assume we have:
#pytest.fixture()
def setup():
print('All set up!')
return True
def foo(setup):
print('I am using a fixture to set things up')
setup_done=setup
I'm looking for a way to get to know caller function name (in this case: foo) from within setup fixture.
So far I have tried:
import inspect
#pytest.fixture()
def setup():
daddy_function_name = inspect.stack()[1][3]
print(daddy_function_name)
print('All set up!')
return True
But what gets printed is: call_fixture_func
How do I get foo from printing daddy_function_name?
You can use the built-in request fixture in your own fixture:
The request fixture is a special fixture providing information of the requesting test function.
Its node attribute is the
Underlying collection node (depends on current request scope).
import pytest
#pytest.fixture()
def setup(request):
return request.node.name
def test_foo(setup):
assert setup == "test_foo"

Pytest - How to override fixture parameter list from command line?

Consider the following fixture
#pytest.fixture(params=['current', 'legacy'])
def baseline(request):
return request.param
I wonder if there is a way to launch pytest so it overrides the fixture parameter list with the value(s) given on the command line, i.e.:
pytest --baseline legacy tests/
The above should effectively result in params=['legacy'].
Go with dynamic parametrization via Metafunc.parametrize:
# conftest.py
import pytest
#pytest.fixture
def baseline(request):
return request.param
def pytest_addoption(parser):
parser.addoption('--baseline', action='append', default=[],
help='baseline (one or more possible)')
def pytest_generate_tests(metafunc):
default_opts = ['current', 'legacy']
baseline_opts = metafunc.config.getoption('baseline') or default_opts
if 'baseline' in metafunc.fixturenames:
metafunc.parametrize('baseline', baseline_opts, indirect=True)
Usage without parameters yields two default tests:
$ pytest test_spam.py -sv
...
test_spam.py::test_eggs[current] PASSED
test_spam.py::test_eggs[legacy] PASSED
Passing --baseline overwrites the defaults:
$ pytest test_spam.py -sv --baseline=foo --baseline=bar --baseline=baz
...
test_spam.py::test_eggs[foo] PASSED
test_spam.py::test_eggs[bar] PASSED
test_spam.py::test_eggs[baz] PASSED
You can also implement "always-in-use" defaults, so additional params are always added to them:
def pytest_addoption(parser):
parser.addoption('--baseline', action='append', default=['current', 'legacy'],
help='baseline (one or more possible)')
def pytest_generate_tests(metafunc):
baseline_opts = metafunc.config.getoption('baseline')
if 'baseline' in metafunc.fixturenames and baseline_opts:
metafunc.parametrize('baseline', baseline_opts, indirect=True)
Now the test invocation will always include current and legacy params:
$ pytest test_spam.py -sv --baseline=foo --baseline=bar --baseline=baz
...
test_spam.py::test_eggs[current] PASSED
test_spam.py::test_eggs[legacy] PASSED
test_spam.py::test_eggs[foo] PASSED
test_spam.py::test_eggs[bar] PASSED
test_spam.py::test_eggs[baz] PASSED

How can I test if a pytest fixture raises an exception?

Use case: In a pytest test suite I have a #fixture which raises exceptions if command line options for its configuration are missing. I've written a test for this fixture using xfail:
import pytest
from <module> import <exception>
#pytest.mark.xfail(raises=<exception>)
def test_fixture_with_missing_options_raises_exception(rc_visard):
pass
However the output after running the tests does not state the test as passed but "xfailed" instead:
============================== 1 xfailed in 0.15 seconds ========================
In addition to that I am not able to test if the fixture raises the exception for specific missing command line options.
Is there a better approach to do this? Can I mock the pytest command line options somehow that I do not need to call specific tests via pytest --<commandline-option-a> <test-file-name>::<test-name>.
initial setup
Suppose you have a simplified project with conftest.py containing the following code:
import pytest
def pytest_addoption(parser):
parser.addoption('--foo', action='store', dest='foo', default='bar',
help='--foo should be always bar!')
#pytest.fixture
def foo(request):
fooval = request.config.getoption('foo')
if fooval != 'bar':
raise ValueError('expected foo to be "bar"; "{}" provided'.format(fooval))
It adds a new command line arg --foo and a fixture foo returning the passed arg, or bar if not specified. If anything else besides bar passed via --foo, the fixture raises a ValueError.
You use the fixture as usual, for example
def test_something(foo):
assert foo == 'bar'
Now let's test that fixture.
preparations
In this example, we need to do some simple refactoring first. Move the fixture and related code to some file called something else than conftest.py, for example, my_plugin.py:
# my_plugin.py
import pytest
def pytest_addoption(parser):
parser.addoption('--foo', action='store', dest='foo', default='bar',
help='--foo should be always bar!')
#pytest.fixture
def foo(request):
fooval = request.config.getoption('foo')
if fooval != 'bar':
raise ValueError('expected foo to be "bar"; "{}" provided'.format(fooval))
In conftest.py, ensure the new plugin is loaded:
# conftest.py
pytest_plugins = ['my_plugin']
Run the existing test suite to ensure we didn't break anything, all tests should still pass.
activate pytester
pytest provides an extra plugin for writing plugin tests, called pytester. It is not activated by default, so you should do that manually. In conftest.py, extend the plugins list with pytester:
# conftest.py
pytest_plugins = ['my_plugin', 'pytester']
writing the tests
Once pytester is active, you get a new fixture available called testdir. It can generate and run pytest test suites from code. Here's what our first test will look like:
# test_foo_fixture.py
def test_all_ok(testdir):
testdata = '''
def test_sample(foo):
assert True
'''
testconftest = '''
pytest_plugins = ['my_plugin']
'''
testdir.makeconftest(testconftest)
testdir.makepyfile(testdata)
result = testdir.runpytest()
result.assert_outcomes(passed=1)
It should be pretty obvious what happens here: we provide the tests code as string and testdir will generate a pytest project from it in some temporary directory. To ensure our foo fixture is available in the generated test project, we pass it in the generated conftest same way as we do in the real one. testdir.runpytest() starts the test run, producing a result that we can inspect.
Let's add another test that checks whether foo will raise a ValueError:
def test_foo_valueerror_raised(testdir):
testdata = '''
def test_sample(foo):
assert True
'''
testconftest = '''
pytest_plugins = ['my_plugin']
'''
testdir.makeconftest(testconftest)
testdir.makepyfile(testdata)
result = testdir.runpytest('--foo', 'baz')
result.assert_outcomes(error=1)
result.stdout.fnmatch_lines([
'*ValueError: expected foo to be "bar"; "baz" provided'
])
Here we execute the generated tests with --foo baz and verify afterwards if one test ended with an error and the error output contains the expected error message.

Define a pytest fixture providing multiple arguments to test function

With pytest, I can define a fixture like so:
#pytest.fixture
def foo():
return "blah"
And use it in a test like so:
def test_blah(foo):
assert foo == "blah"
That's all very well. But what I want to do is define a single fixture function that "expands" to provide multiple arguments to a test function. Something like this:
#pytest.multifixture("foo,bar")
def foobar():
return "blah", "whatever"
def test_stuff(foo, bar):
assert foo == "blah" and bar == "whatever"
I want to define the two objects foo and bar together (not as separate fixtures) because they are related in some fashion. I may sometimes also want to define a fixture that depends on another fixture, but have the second fixture incorporate the result of the first and return it along with its own addition:
#pytest.fixture
def foo():
return "blah"
#pytest.multifixture("foo,bar")
def foobar():
f = foo()
return f, some_info_related_to(f)
This example may seem silly, but in some cases foo is something like a Request object, and the bar object needs to be linked to that same request object. (That is, I can't define foo and bar as independent fixtures because I need both to be derived from a single request.)
In essence, what I want to do is decouple the name of the fixture function from the name of the test-function argument, so that I can define a fixture which is "triggered" by a particular set of argument names in a test function signature, not just a single argument whose name is the same as that of the fixture function.
Of course, I can always just return a tuple as the result of the fixture and then unpack it myself inside the test function. But given that pytest provides various magical tricks for automatically matching names to arguments, it seems like it's not unthinkable that it could magically handle this as well. Is such a thing possible with pytest?
You can now do this using pytest-cases:
from pytest_cases import fixture
#fixture(unpack_into="foo,bar")
def foobar():
return "blah", "whatever"
def test_stuff(foo, bar):
assert foo == "blah" and bar == "whatever"
See the documentation for more details (I'm the author by the way)
note: this solution not working if your fixture depends on another fixtures with parameters
Don't really know if there are any default solution in pytest package, but you can make a custom one:
import pytest
from _pytest.mark import MarkInfo
def pytest_generate_tests(metafunc):
test_func = metafunc.function
if 'use_multifixture' in [name for name, ob in vars(test_func).items() if isinstance(ob, MarkInfo)]:
result, func = test_func.use_multifixture.args
params_names = result.split(',')
params_values = list(func())
metafunc.parametrize(params_names, [params_values])
def foobar():
return "blah", "whatever"
#pytest.mark.use_multifixture("foo,bar", foobar)
def test_stuff(foo, bar):
assert foo == "blah" and bar == "whatever"
def test_stuff2():
assert 'blah' == "blah"
So we defined pytest_generate_tests metafunction. This function
checks if multifixture mark is on the test
if the mark is on - it takes variables names "foo,bar" and fucntion foobar that will be executed on generation
#pytest.mark.multifixture("foo,bar", foobar)
You can do this with two pytest fixtures, like so:
import pytest
#pytest.fixture
def foo():
return [object()]
# value derived from foo
#pytest.fixture
def bar(foo):
return foo[0]
# totally independent fixture
#pytest.fixture
def baz():
return object()
def test_fixtures(foo, bar, baz):
assert foo[0] is bar
assert foo[0] is not baz
# both assertions will pass
Here the foo and bar fixtures have a specific relation between their values (referencing the same object). This is the same result as you wanted from your multi fixture. (the baz fixture is included for comparison, and uses an unrelated instance of object().
If both values are derived from some shared context you can put the shared context in a fixture, and then derive the final results independently.
#pytest.fixture
def shared():
return [object()]
#pytest.fixture
def derived_1(shared):
return shared[0]
#pytest.fixture
def derived_2(shared):
return shared[-1]
def test_derived(derived_1, derived_2):
assert derived_1 is derived_2

Why pytest_generate_tests hook gives an 'fixture not found' error, while it defines a fixture?

I want to implement the following using external data (arguments) via pytest_generate_tests. This example works:
#pytest.mark.parametrize('case', [1,2,3,4])
def test_regression(case):
print case
assert True
Imagine, i retrieve test data via argv option. So, i've created conftest.py, added option --data, added fixture data and added pytest_generate_tests hook. Please pay attention, that if i do not declare data fixture this will not work (but in the example there is not fixture declaration): http://pytest.org/latest/example/parametrize.html#generating-parameters-combinations-depending-on-command-line
import pytest
def pytest_addoption(parser):
parser.addoption('--data', action='store', default='', help='Specify testing data')
#pytest.fixture
def data(request):
return request.config.getoption('--data')
def pytest_generate_tests(metafunc):
if 'data' in metafunc.funcargnames:
# imagine data.cases = [1,2,3,4,5]
metafunc.parametrize('case', [1,2,3,4,5])
For exampple, i have argument data, that containts itself some test data & some test cases. So, i define conftest.py the following way:
# conftest.py
import pytest
def pytest_addoption(parser):
parser.addoption('--data', action='store', default='', help='Specify testing data')
#pytest.fixture
def data(request):
return request.config.getoption('--data')
def pytest_generate_tests(metafunc):
if 'data' in metafunc.fixturenames:
# lets imagine data.cases = [1,2,3,4,5]
metafunc.parametrize('case', [1,2,3,4,5])
# test.py (just removed #pytest.mark.parametrize line)
def test_regression(case):
print case
assert True
The example above will give an error: fixture 'case' not found. But if i substitute case with data it will work:
# conftest.py
import pytest
def pytest_addoption(parser):
parser.addoption('--data', action='store', default='', help='Specify testing data')
#pytest.fixture
def data(request):
return request.config.getoption('--data')
def pytest_generate_tests(metafunc):
if 'data' in metafunc.fixturenames:
# lets imagine data.cases = [1,2,3,4,5]
metafunc.parametrize('data', [1,2,3,4,5])
# test.py (just removed #pytest.mark.parametrize line)
def test_regression(data):
print case
assert True
But i need test parameter named case. What i am doing wrong?
I faced the nearly same problem today:
I can not give you the actual root cause but the problem seems to be that the function parameter which gets passed to the test function is expected to be a fixture. So if you are using data it is working as your are using a fixture.
If you are using case there is no fixture found for case.
I solved this by doing the following:
def pytest_generate_tests(metafunc):
if 'func' in metafunc.fixturenames:
# do some stuff
metafunc.parametrize('func', all_combinations)
def test_function_prototypes(func):
assert func
This will throw the fixture 'func' not found error. I solved it by adding the following lines:
#pytest.fixture
def func(request):
return request.param # pass the param to the test function
I didnt find something in the docs concerning the need to supply this function.
I also observed that uncommenting the fixture again lets the code still work. I guess its related to caching...

Categories