Define a pytest fixture providing multiple arguments to test function - python

With pytest, I can define a fixture like so:
#pytest.fixture
def foo():
return "blah"
And use it in a test like so:
def test_blah(foo):
assert foo == "blah"
That's all very well. But what I want to do is define a single fixture function that "expands" to provide multiple arguments to a test function. Something like this:
#pytest.multifixture("foo,bar")
def foobar():
return "blah", "whatever"
def test_stuff(foo, bar):
assert foo == "blah" and bar == "whatever"
I want to define the two objects foo and bar together (not as separate fixtures) because they are related in some fashion. I may sometimes also want to define a fixture that depends on another fixture, but have the second fixture incorporate the result of the first and return it along with its own addition:
#pytest.fixture
def foo():
return "blah"
#pytest.multifixture("foo,bar")
def foobar():
f = foo()
return f, some_info_related_to(f)
This example may seem silly, but in some cases foo is something like a Request object, and the bar object needs to be linked to that same request object. (That is, I can't define foo and bar as independent fixtures because I need both to be derived from a single request.)
In essence, what I want to do is decouple the name of the fixture function from the name of the test-function argument, so that I can define a fixture which is "triggered" by a particular set of argument names in a test function signature, not just a single argument whose name is the same as that of the fixture function.
Of course, I can always just return a tuple as the result of the fixture and then unpack it myself inside the test function. But given that pytest provides various magical tricks for automatically matching names to arguments, it seems like it's not unthinkable that it could magically handle this as well. Is such a thing possible with pytest?

You can now do this using pytest-cases:
from pytest_cases import fixture
#fixture(unpack_into="foo,bar")
def foobar():
return "blah", "whatever"
def test_stuff(foo, bar):
assert foo == "blah" and bar == "whatever"
See the documentation for more details (I'm the author by the way)

note: this solution not working if your fixture depends on another fixtures with parameters
Don't really know if there are any default solution in pytest package, but you can make a custom one:
import pytest
from _pytest.mark import MarkInfo
def pytest_generate_tests(metafunc):
test_func = metafunc.function
if 'use_multifixture' in [name for name, ob in vars(test_func).items() if isinstance(ob, MarkInfo)]:
result, func = test_func.use_multifixture.args
params_names = result.split(',')
params_values = list(func())
metafunc.parametrize(params_names, [params_values])
def foobar():
return "blah", "whatever"
#pytest.mark.use_multifixture("foo,bar", foobar)
def test_stuff(foo, bar):
assert foo == "blah" and bar == "whatever"
def test_stuff2():
assert 'blah' == "blah"
So we defined pytest_generate_tests metafunction. This function
checks if multifixture mark is on the test
if the mark is on - it takes variables names "foo,bar" and fucntion foobar that will be executed on generation
#pytest.mark.multifixture("foo,bar", foobar)

You can do this with two pytest fixtures, like so:
import pytest
#pytest.fixture
def foo():
return [object()]
# value derived from foo
#pytest.fixture
def bar(foo):
return foo[0]
# totally independent fixture
#pytest.fixture
def baz():
return object()
def test_fixtures(foo, bar, baz):
assert foo[0] is bar
assert foo[0] is not baz
# both assertions will pass
Here the foo and bar fixtures have a specific relation between their values (referencing the same object). This is the same result as you wanted from your multi fixture. (the baz fixture is included for comparison, and uses an unrelated instance of object().
If both values are derived from some shared context you can put the shared context in a fixture, and then derive the final results independently.
#pytest.fixture
def shared():
return [object()]
#pytest.fixture
def derived_1(shared):
return shared[0]
#pytest.fixture
def derived_2(shared):
return shared[-1]
def test_derived(derived_1, derived_2):
assert derived_1 is derived_2

Related

Parametrize session fixture based on config option

I'm using pytest and have something like the following in conftest.py:
def pytest_addoption(parser):
parser.addoption('--foo', required=True, help='Foo name.')
#pytest.fixture(scope='session')
def foo(pytestconfig):
with Foo(pytestconfig.getoption('foo')) as foo_obj:
yield foo_obj
I'd like to change the --foo option to
parser.addoption('--foo', action='append', help='Foo names.')
and have a separate Foo object with session scope generated for each provided name. Ordinarily, I'd use pytest_generate_tests to parametrize a fixture in this way. That is,
def pytest_generate_tests(metafunc):
if 'foo' in metafunc.fixturenames:
metafunc.parametrize('foo', map(Foo, metafunc.config.getoption('foo')))
However, If I'm understanding correctly how pytest_generate_tests works, Foo objects will be created separately for each test function thus defeating the whole point of a session fixture.
Seems like the easiest way to solve this is to make the fixture indirect. That is, you pass the configuration values to the fixture, but then let it manage its own setup. That way pytest will honour the fixture's scope setting. For example:
def pytest_generate_tests(metafunc):
# assuming --foo has been set as a possible parameter for pytest
if "foo" in metafunc.fixturenames and metafunc.config.option.foo is not None:
metafunc.parametrize("foo", metafunc.config.option.foo, indirect=True)
#pytest.fixture(scope='session')
def foo(request):
if not hasattr(request, 'param'):
pytest.skip('no --foo option set')
elif isinstance(request.param, str):
return Foo(request.param)
else:
raise ValueError("invalid internal test config")
Altogether this looks like:
conftest.py
def pytest_addoption(parser):
parser.addoption('--foo', action='append', help='Foo value')
test_something.py
import pytest
# keep track of how many Foos have been instantiated
FOO_COUNTER = 0
class Foo:
def __init__(self, value):
global FOO_COUNTER
self.value = value
self.count = FOO_COUNTER
FOO_COUNTER += 1
print(f'creating {self} number {self.count}')
def __repr__(self):
return f'Foo({self.value})'
def pytest_generate_tests(metafunc):
if "foo" in metafunc.fixturenames and metafunc.config.option.foo is not None:
metafunc.parametrize("foo", metafunc.config.option.foo, indirect=True)
#pytest.fixture(scope='session')
def foo(request):
if not hasattr(request, 'param'):
pytest.skip('no --foo option set')
elif isinstance(request.param, str):
return Foo(request.param)
else:
raise ValueError("invalid internal test config")
def test_bar(foo):
assert isinstance(foo, Foo)
assert foo.value in list('abx')
def test_baz(foo):
assert isinstance(foo, Foo)
assert foo.value in list('aby')
# test name is to encourage pytest to run this test last (because of side
# effects of Foo class has on FOO_COUNTER)
def test_zzz_last():
# only passes when exactly two foo options set
assert FOO_COUNTER == 2
If run with exactly two --foo options then the test_zzz_last passes. One or three --foo options and this test fails. This demonstrates that exactly one instance of Foo is created per --foo option, and each instance is shared between tests. Zero --foo options will cause any test requiring the foo fixture to be skipped.
If a --foo option is given a value that is not a, b, x or y then then both test_bar and test_baz will fail. Thus we can see that our configuration options are making it into the foo fixture.

Shorter option names for pytest fixture variants with long names

I am testing a function with several incoming datasets defined as fixtures, but the fixture names get quite cumbersome to distinguish them from one another.
#pytest.fixture()
def dataset_with_foo():
pass
#pytest.fixture()
def dataset_with_bar():
pass
#pytest.fixture()
def dataset_with_foo_and_bar():
pass
def test_something(dataset_with_foo_and_bar):
pass
Is there a way to define some kind of alias for the option name to be shorter? For instance, something like:
#usesfixture("dataset_with_foo_and_bar", option_name="dataset")
def test_something(dataset):
pass
Create a super fixture and helper function to get desired fixture with one fixture.
import pytest
#pytest.fixture
def super_fixture(fixture1,fixture2,fixture3):
local_vars = locals()
def helper(fixture_name):
return local_vars.get(fixture_name)
return helper
def test_a(super_fixture):
# getting fixture1
assert super_fixture("fixture1")
Ok, the best way I could find to do it is by using deferred parametrized fixtures:
#pytest.fixture()
def dataset(request):
mapping = {
"with-foo": create_dataset_with_foo(),
"with-bar": create_dataset_with_bar(),
"with-foo-and-bar": create_dataset_with_foo_and_bar(),
}
return mapping[request.param]
def create_dataset_with_foo():
pass
def create_dataset_with_bar():
pass
def create_dataset_with_foo_and_bar():
pass
#pytest.mark.parametrize("dataset", ["with-foo"], indirect=True)
def test_something(dataset):
pass
#pytest.mark.parametrize("dataset", ["with-foo-and-bar"], indirect=True)
def test_something(dataset):
pass
There has been other attempts using pytest-lazy-fixture or specialized decorator, but I find it a bit too hacky..
https://gist.github.com/wcooley/7472b8de6edb1e8ceda560843c0519a8

Pytest: Setting up mocks with functions without side effects

I have a question related the answer of this question:
pytest: setup a mock for every test function
I like the idea of using functions that receive the mock objects via arguments. In this way, the setting up of mocks can be reused. I conclude from the answer that mock objects are mutable in Python and changing them inside the function will have the side effect that they are changed outside. However, I consider it as dangerous to have side effects. So, I suggest the followoing:
def test(self, mock1):
mock1 = setup_mock1_to_always_xxx(mock1)
...
with
def setup_mock1_to_always_xxx(mock1):
# create a copy to avoid side effects
mock1 = mock1.copy() # how to copy a Mock?
mock1.return_value = "my return value"
return mock1
Would this be possible?
I would suggest injecting mocks using pytest fixtures instead of manual mock copying. The function-scoped fixtures (the default ones) reevaluate for each test. Example: assume you have a module
# mod.py
def spam():
return eggs()
def eggs():
return "eggs"
and a test
from unittest.mock import patch
from mod import spam
#patch("mod.eggs")
def test_bacon(mock1):
mock1.return_value = "bacon"
assert spam() == "bacon"
and now you want to refactor it into testing against bacon and bacon with eggs. Move out the patching inside a fixture:
import pytest
from unittest.mock import patch
from mod import spam
#pytest.fixture
def eggs_mock():
with patch("mod.eggs") as mock1:
yield mock1
def test_bacon(eggs_mock):
eggs_mock.return_value = "bacon"
assert spam() == "bacon"
def test_bacon_with_eggs(eggs_mock):
eggs_mock.return_value = "bacon with eggs"
assert spam() == "bacon with eggs"
You now have two different mocks of the mod.eggs function, one unique mock in each test.
unittest-style tests
This approach also works with unittest test classes, although the injection is a bit more verbose since unittest.TestCases don't accept arguments in test methods. This is the same approach as described in this answer of mine. In the example below, I store the eggs_mock fixture return value in a Tests instance attribute via using an additional autouse fixture:
from unittest import TestCase
from unittest.mock import patch
import pytest
from mod import spam
#pytest.fixture
def eggs_mock():
with patch("mod.eggs") as mock1:
yield mock1
class Tests(TestCase):
#pytest.fixture(autouse=True)
def inject_eggs_mock(self, eggs_mock):
self._eggs_mock = eggs_mock
def test_bacon(self):
self._eggs_mock.return_value = "bacon"
assert spam() == "bacon"
def test_bacon_with_eggs(self):
self._eggs_mock.return_value = "bacon with eggs"
assert spam() == "bacon with eggs"

How to share object from fixture to all tests using pytest?

What is the best way to define an object in a fixture with session scope and autouse=True, so it will be available to all tests?
#pytest.fixture(scope='session', autouse=True)
def setup_func(request):
obj = SomeObj()
Next thing, I want some magic that previously created obj will appear in each test context without the need of each test to define the setup_func fixture.
def test_one():
obj.do_something_fancy()
My recommendation would to add the fixture to conftest.py and make sure to return the object you want to produce from the fixture.
As noted, this makes "autouse" kind of useless.
In the root directory for your tests, add the fixture to a file named conftest.py:
#pytest.fixture(scope='session', autouse=True)
def someobj(request):
return SomeObj()
Any test file beneath the root file will have access to this fixture (for example test_foo.py):
def test_foo(someobj):
assert isinstance(someobj, SomeObj)
Another approach, would be to use a global variable defined in the same test or imported from a module.
For example in conftest.py:
someobj = None
#pytest.fixture(scope='session', autouse=True)
def prep_someobj(request):
someobj = SomeObj()
Then in your test:
from . import conftest
def test_foo():
assert isinstance(conftest.someobj, SomeObj)
In my opinion this is less readable and more cumbersome than the first method.
A more general pattern for this is to return locals() at the end of your conftest and you'll be able to easily reference anything created in the fixture.
conftest.py
#pytest.fixture(scope='session')
def setup_func(request):
obj1 = SomeObj()
obj2 = SomeObj()
return locals()
test_stuff.py
def test_one(setup_func):
setup_func['obj1'].do_something_fancy()
def test_two(setup_func):
setup_func['obj2'].do_something_fancy()
Another possibility is to wrap your tests in a class and use class variables to only define the object instance once. This assumes you are able to wrap all tests in a single class and so this answer may address a less general, but similar use case. For example,
class SomeObj():
"""This object definition may exist in another module and be imported."""
def __init__(self):
self.x = 5
def do_something_fancy(self, y):
return self.x * y
class TestX():
# Object instance to share across tests
someobj = SomeObj()
def test_x(self):
assert TestX.someobj.x == 5
def test_fancy(self):
fancy_factor = 10
result = TestX.someobj.do_something_fancy(fancy_factor)
assert result == 50

py.test: how to get the current test's name from the setup method?

I am using py.test and wonder if/how it is possible to retrieve the name of the currently executed test within the setup method that is invoked before running each test. Consider this code:
class TestSomething(object):
def setup(self):
test_name = ...
def teardown(self):
pass
def test_the_power(self):
assert "foo" != "bar"
def test_something_else(self):
assert True
Right before TestSomething.test_the_power becomes executed, I would like to have access to this name in setup as outlined in the code via test_name = ... so that test_name == "TestSomething.test_the_power".
Actually, in setup, I allocate some resource for each test. In the end, looking at the resources that have been created by various unit tests, I would like to be able to see which one was created by which test. Best thing would be to just use the test name upon creation of the resource.
You can also do this using the Request Fixture like this:
def test_name1(request):
testname = request.node.name
assert testname == 'test_name1'
You can also use the PYTEST_CURRENT_TEST environment variable set by pytest for each test case.
PYTEST_CURRENT_TEST environment variable
To get just the test name:
os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
The setup and teardown methods seem to be legacy methods for supporting tests written for other frameworks, e.g. nose. The native pytest methods are called setup_method as well as teardown_method which receive the currently executed test method as an argument. Hence, what I want to achieve, can be written like so:
class TestSomething(object):
def setup_method(self, method):
print "\n%s:%s" % (type(self).__name__, method.__name__)
def teardown_method(self, method):
pass
def test_the_power(self):
assert "foo" != "bar"
def test_something_else(self):
assert True
The output of py.test -s then is:
============================= test session starts ==============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.3
plugins: cov
collected 2 items
test_pytest.py
TestSomething:test_the_power
.
TestSomething:test_something_else
.
=========================== 2 passed in 0.03 seconds ===========================
Short answer:
Use fixture called request
This fixture has the following interesting attributes:
request.node.originalname = the name of the function/method
request.node.name = name of the function/method and ids of the parameters
request.node.nodeid = relative path to the test file, name of the test class (if in a class), name of the function/method and ids of the parameters
Long answer:
I inspected the content of request.node. Here are the most interesting attributes I found:
class TestClass:
#pytest.mark.parametrize("arg", ["a"])
def test_stuff(self, request, arg):
print("originalname:", request.node.originalname)
print("name:", request.node.name)
print("nodeid:", request.node.nodeid)
Prints the following:
originalname: test_stuff
name: test_stuff[a]
nodeid: relative/path/to/test_things.py::TestClass::test_stuff[a]
NodeID is the most promising if you want to completely identify the test (including the parameters). Note that if the test is as a function (instead of in a class), the class name (::TestClass) is simply missing.
You can parse nodeid as you wish, for example:
components = request.node.nodeid.split("::")
filename = components[0]
test_class = components[1] if len(components) == 3 else None
test_func_with_params = components[-1]
test_func = test_func_with_params.split('[')[0]
test_params = test_func_with_params.split('[')[1][:-1].split('-')
In my example this results to:
filename = 'relative/path/to/test_things.py'
test_class = 'TestClass'
test_func = 'test_stuff'
test_params = ['a']
# content of conftest.py
#pytest.fixture(scope='function', autouse=True)
def test_log(request):
# Here logging is used, you can use whatever you want to use for logs
log.info("STARTED Test '{}'".format(request.node.name))
def fin():
log.info("COMPLETED Test '{}' \n".format(request.node.name))
request.addfinalizer(fin)
Try my little wrapper function which returns the full name of the test, the file and the test name. You can use whichever you like later.
I used it within conftest.py where fixtures do not work as far as I know.
def get_current_test():
full_name = os.environ.get('PYTEST_CURRENT_TEST').split(' ')[0]
test_file = full_name.split("::")[0].split('/')[-1].split('.py')[0]
test_name = full_name.split("::")[1]
return full_name, test_file, test_name
You might have multiple tests, in which case...
test_names = [n for n in dir(self) if n.startswith('test_')]
...will give you all the functions and instance variables that begin with "test_" in self. As long as you don't have any variables named "test_something" this will work.
You can also define a method setup_method(self, method) instead of setup(self) and that will be called before each test method invocation. Using this, you're simply given each method as a parameter. See: http://pytest.org/latest/xunit_setup.html
You could give the inspect module are try.
import inspect
def foo():
print "My name is: ", inspect.stack()[0][3]
foo()
Output: My name is: foo
Try type(self).__name__ perhaps?

Categories