I have inherited some code that implements pytest.mark.skipif for a few tests. Reading through the pytest docs, I am aware that I can add conditions, possibly check for environment variables, or use more advanced features of pytest.mark to control groups of tests together. Unfortunately nothing in the docs so far seems to solve my problem.
I'm looking to simply turn off any test skipping, but without modifying any source code of the tests. I just want to run pytest in a mode where it does not honor any indicators for test skipping. Does such a solution exist with pytest?
Create a conftest.py with the following contents:
import pytest
import _pytest.skipping
def pytest_addoption(parser):
parser.addoption(
"--no-skips",
action="store_true",
default=False, help="disable skip marks")
#pytest.hookimpl(tryfirst=True)
def pytest_cmdline_preparse(config, args):
if "--no-skips" not in args:
return
def no_skip(*args, **kwargs):
return
_pytest.skipping.skip = no_skip
the use --no-skip in command line to run all testcases even if some testcases with pytest.mark.skip decorator
A workaround to ignore skip marks is to remove them programmatically. Create a conftest.py with the following contents:
def pytest_collection_modifyitems(items):
for item in items:
for node in reversed(item.listchain()):
node.own_markers = [m for m in node.own_markers if m.name not in ('skip', 'skipif')]
However, this messes with pytest internals and can easily break on pytest updates; the proper way of ignoring skips should be defining your custom skipping mechanism, for example:
#pytest.hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
mark = item.get_closest_marker(name='myskip')
if mark:
condition = next(iter(mark.args), True)
reason = mark.kwargs.get('reason', 'custom skipping mechanism')
item.add_marker(pytest.mark.skipif(not os.getenv('PYTEST_RUN_FORCE_SKIPS', False) and condition, reason=reason), append=False)
Annotate the tests with #pytest.mark.myskip instead of #pytest.mark.skip and #pytest.mark.myskip(condition, reason) instead of #pytest.mark.skipif(condition, reason):
#pytest.mark.myskip
def test_skip():
assert True
#pytest.mark.myskip(1 == 1, reason='my skip')
def test_skipif():
assert True
On a regular run, myskip will behave same way as pytest.mark.skip/pytest.mark.skipif. Setting PYTEST_RUN_FORCE_SKIPS will disable it:
$ PYTEST_RUN_FORCE_SKIPS=1 pytest -v
...
test_spam.py::test_skip PASSED
test_spam.py::test_skipif PASSED
...
Of course, you shouldn't use pytest.mark.skip/pytest.mark.skipif anymore as they are won't be influenced by the PYTEST_RUN_FORCE_SKIPS env var.
Here is a short working solution based on the answer from hoefling:
Add in your conftest.py:
from typing import Any, List
from typing_extensions import Final
NO_SKIP_OPTION: Final[str] = "--no-skip"
def pytest_addoption(parser):
parser.addoption(NO_SKIP_OPTION, action="store_true", default=False, help="also run skipped tests")
def pytest_collection_modifyitems(config,
items: List[Any]):
if config.getoption(NO_SKIP_OPTION):
for test in items:
test.own_markers = [marker for marker in test.own_markers if marker.name not in ('skip', 'skipif')]
Ok the implementation does not allow for this with zero modifications. You’ll need a custom marker. Add the following to your conftest.py then change all skipif marks to custom_skipif. Use pytest --no-skips.
import pytest
from _pytest.mark.evaluate import MarkEvaluator
def pytest_addoption(parser):
parser.addoption(
"--no-skips", action="store_true", default=False, help="disable custom_skip marks"
)
#hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
if item.config.getoption('--no-skips'):
return
# Check if skip or skipif are specified as pytest marks
item._skipped_by_mark = False
eval_skipif = MarkEvaluator(item, "custom_skipif")
if eval_skipif.istrue():
item._skipped_by_mark = True
pytest.skip(eval_skipif.getexplanation())
for skip_info in item.iter_markers(name="custom_skip"):
item._skipped_by_mark = True
if "reason" in skip_info.kwargs:
pytest.skip(skip_info.kwargs["reason"])
elif skip_info.args:
pytest.skip(skip_info.args[0])
else:
pytest.skip("unconditional skip")
item._evalxfail = MarkEvaluator(item, "xfail")
check_xfail_no_run(item)
The implementation is copied and modified from pytest itself in skipping.py.
An easy workaround is to monkeypatch pytest.mark.skipif in your conftest.py:
import pytest
old_skipif = pytest.mark.skipif
def custom_skipif(*args, **kwargs):
return old_skipif(False, reason='disabling skipif')
pytest.mark.skipif = custom_skipif
Related
Long story short, I want to be able to skip some tests if the session is being run against our production API. The environment that the tests are run against is set with a command-line option.
I came across the idea of using the pytest_namespace to track global variables, so I set that up in my conftest.py file.
def pytest_namespace():
return {'global_env': ''}
I take in the command line option and set various API urls (from a config.ini file) in a fixture in conftest.py.
#pytest.fixture(scope='session', autouse=True)
def configInfo(pytestconfig):
global data
environment = pytestconfig.getoption('--ENV')
print(environment)
environment = str.lower(environment)
pytest.global_env = environment
config = configparser.ConfigParser()
config.read('config.ini') # local config file
configData = config['QA-CONFIG']
if environment == 'qa':
configData = config['QA-CONFIG']
if environment == 'prod':
configData = config['PROD-CONFIG']
(...)
Then I've got the test I want to skip, and it's decorated like so:
#pytest.mark.skipif(pytest.global_env in 'prod',
reason="feature not in Prod yet")
However, whenever I run the tests against prod, they don't get skipped. I did some fiddling around, and found that:
a) the global_env variable is accessible through another fixture
#pytest.fixture(scope="session", autouse=True)
def mod_header(request):
log.info('\n-----\n| '+pytest.global_env+' |\n-----\n')
displays correctly in my logs
b) the global_env variable is accessible in a test, correctly logging the env.
c) pytest_namespace is deprecated
So, I'm assuming this has to do with when the skipif accesses that global_env vs. when the fixtures do in the test session. I also find it non-ideal to use a deprecated functionality.
My question is:
how do I get a value from the pytest command line option into a skipif?
Is there a better way to be trying this than the pytest_namespace?
Looks like true way to Control skipping of tests according to command line option is mark tests as skip dynamically:
add option using pytest_addoption hook like this:
def pytest_addoption(parser):
parser.addoption(
"--runslow", action="store_true", default=False, help="run slow tests"
)
Use pytest_collection_modifyitems hook to add marker like this:
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
Add mark to you test:
#pytest.mark.slow
def test_func_slow():
pass
If you want to use the data from the CLI in a test, for example, it`s credentials, enough to specify a skip option when retrieving them from the pytestconfig:
add option using pytest_addoption hook like this:
def pytest_addoption(parser):
parser.addoption(
"--credentials",
action="store",
default=None,
help="credentials to ..."
)
use skip option when get it from pytestconfig
#pytest.fixture(scope="session")
def super_secret_fixture(pytestconfig):
credentials = pytestconfig.getoption('--credentials', skip=True)
...
use fixture as usual in you test:
def test_with_fixture(super_secret_fixture):
...
In this case you will got something like this it you not send --credentials option to CLI:
Skipped: no 'credentials' option found
It is better to use _pytest.config.get_config instead of deprecated pytest.config If you still wont to use pytest.mark.skipif like this:
#pytest.mark.skipif(not _pytest.config.get_config().getoption('--credentials'), reason="--credentials was not specified")
The problem with putting global code in fixtures is that markers are evaluated before fixtures, so when skipif is evaluated, configInfo didn't run yet and pytest.global_env will be empty. I'd suggest to move the configuration code from the fixture to pytest_configure hook:
# conftest.py
import configparser
import pytest
def pytest_addoption(parser):
parser.addoption('--ENV')
def pytest_configure(config):
environment = config.getoption('--ENV')
pytest.global_env = environment
...
The configuration hook is guaranteed to execute before the tests are collected and the markers are evaluated.
Is there a better way to be trying this than the pytest_namespace?
Some ways I know of:
Simply assign a module variable in pytest_configure (pytest.foo = 'bar', like I did in the example above).
Use the config object as it is shared throughout the test session:
def pytest_configure(config):
config.foo = 'bar'
#pytest.fixture
def somefixture(pytestconfig):
assert pytestconfig.foo == 'bar'
def test_foo(pytestconfig):
assert pytestconfig.foo == 'bar'
Outside of the fixtures/tests, you can access the config via pytest.config, for example:
#pytest.mark.skipif(pytest.config.foo == 'bar', reason='foo is bar')
def test_baz():
...
Use caching; this has an additional feature of persisting data between the test runs:
def pytest_configure(config):
config.cache.set('foo', 'bar')
#pytest.fixture
def somefixture(pytestconfig):
assert pytestconfig.cache.get('foo', None)
def test_foo(pytestconfig):
assert pytestconfig.cache.get('foo', None)
#pytest.mark.skipif(pytest.config.cache.get('foo', None) == 'bar', reason='foo is bar')
def test_baz():
assert True
When using 1. or 2., make sure you don't unintentionally overwrite pytest stuff with your own data; prefixing your own variables with a unique name is a good idea. When using caching, you don't have this problem.
So what I would like to achieve is mocking functions in various modules automatically with pytest. So I defined this in my conftest.py:
import sys
import __builtin__
from itertools import chain
# Fixture factory magic START
NORMAL_MOCKS = [
"logger", "error", "logging", "base_error", "partial"]
BUILTIN_MOCKS = ["exit"]
def _mock_factory(name, builtin):
def _mock(monkeypatch, request):
module = __builtin__ if builtin else request.node.module.MODULE
ret = Mock()
monkeypatch.setattr(module, name, ret)
return ret
return _mock
iterable = chain(
((el, False) for el in NORMAL_MOCKS),
((el, True) for el in BUILTIN_MOCKS))
for name, builtin in iterable:
fname = "mock_{name}".format(name=name)
_tmp_fn = pytest.fixture(name=fname)(_mock_factory(name, builtin))
_tmp_fn.__name__ = fname
setattr(
sys.modules[__name__],
"mock_{name}".format(name=name), _tmp_fn)
# Fixture normal factory magic END
This works and all, but I would like to omit the usage of the NORMAL_MOCKS and BUILTIN_MOCKS lists. So basically in a pytest hook I should be able to see that say there is a mock_foo fixture, but it's not registered yet, so I create a mock for it with the factory and register it. I just couldn't figure out how to do this. Basically I was looking into the pytest_runtest_setup function, but could not figure out how to do the actual fixture registration. So basically I would like to know with which hook/call can I register new fixture functions programatically from this hook.
One of the ways is to parameterize the tests at the collection/generation stage, i.e. before the test execution begins: https://docs.pytest.org/en/latest/example/parametrize.html
# conftest.py
import pytest
def mock_factory(name):
return name
def pytest_generate_tests(metafunc):
for name in metafunc.fixturenames:
if name.startswith('mock_'):
metafunc.parametrize(name, [mock_factory(name[5:])])
# test_me.py
def test_me(request, mock_it):
print(mock_it)
A very simple solution. But the downside is that the test is reported as parametrized when it actually is not:
$ pytest -s -v -ra
====== test session starts ======
test_me.py::test_me[it] PASSED
====== 1 passed in 0.01 seconds ======
To fully simulate the function args without the parametrization, you can make a less obvious trick:
# conftest.py
import pytest
def mock_factory(name):
return name
#pytest.hookimpl(hookwrapper=True)
def pytest_runtest_protocol(item, nextitem):
for name in item.fixturenames:
if name.startswith('mock_') and name not in item.funcargs:
item.funcargs[name] = mock_factory(name[5:])
yield
The pytest_runtest_setup hook is also a good place for this, as long as I've just tried.
Note that you do not register the fixture in that case. It is too late for the fixture registration, as all the fixtures are gathered and prepared much earlier at the collection/parametrization stages. In this stage, you can only execute the tests and provide the values. It is your responsibility to calculate the fixture values and to destroy them afterward.
The snippet below is a pragmatic solution to "how to dynamically add fixtures".
Disclaimer: I don't have expertise on pytest. I'm not saying this is what pytest was designed for, I just looked at the source code and came up with this and it seems to work. The fact that I use "private" attributes means it might not work with all versions (currently I'm on pytest 7.1.3)
from _pytest.fixtures import FixtureDef
from _pytest.fixtures import SubRequest
import pytest
#pytest.fixture(autouse=True) # autouse is relevant, as then the fixture registration happens in-time. It's too late if requiring the fixture without autouse e.g. like `#pytest.mark.usefixtures("add_fixture_dynamically")`
def add_fixture_dynamically(request: SubRequest):
"""
Conditionally and dynamically adds another fixture. It's conditional on the presence of:
#pytest.mark.my_mark()
"""
marker = request.node.get_closest_marker("my_mark")
# don't register fixture if marker is not present:
if marker is None:
return
def your_fixture(): # the name of the fixture must match the parameter name, like other fixtures
return "hello"
# register the fixture just-in-time
request._fixturemanager._arg2fixturedefs[your_fixture.__name__] = [
FixtureDef(
argname=your_fixture.__name__,
func=your_fixture,
scope="function",
fixturemanager=request._fixturemanager,
baseid=None,
params=None,
),
]
yield # runs the test. Could be wrapped in try/except/finally
# suppress warning (works if this and `add_fixture_dynamically` are in `conftest.py`)
def pytest_configure(config):
"""Prevents printing of the warning 'PytestUnknownMarkWarning: Unknown pytest.mark.<fixture_name>'"""
config.addinivalue_line("markers", "my_mark")
#pytest.mark.my_mark()
def test_adding_fixture_dynamically(your_fixture):
assert your_fixture == "hello"
Is there a way to save the value of parameter, provided by pytest fixture:
Here is an example of conftest.py
# content of conftest.py
import pytest
def pytest_addoption(parser):
parser.addoption("--parameter", action="store", default="default",
help="configuration file path")
#pytest.fixture
def param(request):
parameter = request.config.getoption("--parameter")
return parameter
Here is an example of pytest module:
# content of my_test.py
def test_parameters(param):
assert param == "yes"
OK - everything works fine, but is there a way to get the value of param outside the test - for example with some build-in pytest function pytest.get_fixture_value["parameter"]
EDITED - DETAILED EXPLANATION WHAT I WANT TO ACHIEV
I am writing an module, that deploys and after that provides parameters to tests, writen in pytest. My idea is if someones test looks like that:
class TestApproachI:
#load_params_as_kwargs(parameters_A)
def setup_class(cls, param_1, param_2, ... , param_n):
# code of setup_class
def teardown_class(cls):
# some code
def test_01(self):
# test code
And this someone gives me a configuration file, that explains with what parameters to run his code, I will analyze those parameters (in some other script) and I will run his tests with the command pytest --parameters=path_to_serialized_python_tuple test_to_run where this tuple will contain the provided values for this someone parameters in the right order. And I will tell that guy (with the tests) to add this decorator to all the tests he wants me to provide parameters. This decorator would look like this:
class TestApproachI:
# this path_to_serialized_tuple should be provided by 'pytest --parameters=path_to_serialized_python_tuple test_to_run'
#load_params(path_to_serialized_tuple)
def setup_class(cls, param_1, param_2, ... , param_n):
# code of setup_class
def teardown_class(cls):
# some code
def test_01(self):
# test code
The decorator function should look like that:
def load_params(parameters):
def decorator(func_to_decorate):
#wraps(func_to_decorate)
def wrapper(self):
# deserialize the tuple and decorates replaces the values of test parameters
return func_to_decorate(self, *parameters)
return wrapper
return decorator
Set that parameter as os environment variable, and than use it anywhere in your test through os.getenv('parameter')
So, you can use like,
#pytest.fixture
def param(request):
parameter = request.config.getoption("--parameter")
os.environ["parameter"]=parameter
return parameter
#pytest.mark.usefixtures('param')
def test_parameters(param):
assert os.getenv('parameter') == "yes"
I am using pytest-lazy-fixture to get the value any fixture:
first install it using pip install pytest-lazy-fixture or pipenv install pytest-lazy-fixture
then, simply assign the fixture to a variable like this if you want:
fixture_value = pytest.lazy_fixture('fixture')
the fixture has to wrapped with quotations
You can use the pytest function config.cache, like this
def function_1(request):
request.config.cache.set("user_data", "name")
...
def function_2(request):
request.config.cache.get("user_data", None)
...
Here is more info about it
https://docs.pytest.org/en/latest/reference/reference.html#std-fixture-cache
https://docs.pytest.org/en/6.2.x/cache.html
Admittedly it is not the best way to do it to start with and more importantly the fixture parameters are resolved i.e. Options.get_option() is called before everything else.
Recommendations and suggestions would be appreciated.
From config.py
class Options(object):
option = None
#classmethod
def get_option(cls):
return cls.option
From conftest.py
#pytest.yield_fixture(scope='session', autouse=True)
def session_setup():
Options.option = pytest.config.getoption('--remote')
def pytest_addoption(parser):
parser.addoption("--remote", action="store_true", default=False, help="Runs tests on a remote service.")
#pytest.yield_fixture(scope='function', params=Options.get_option())
def setup(request):
if request.param is None:
raise Exception("option is none")
Don't use custom Options class but directly ask for option from config.
pytest_generate_tests may be used for parametrizing fixture-like argument for tests.
conftest.py
def pytest_addoption(parser):
parser.addoption("--pg_tag", action="append", default=[],
help=("Postgres server versions. "
"May be used several times. "
"Available values: 9.3, 9.4, 9.5, all"))
def pytest_generate_tests(metafunc):
if 'pg_tag' in metafunc.fixturenames:
tags = set(metafunc.config.option.pg_tag)
if not tags:
tags = ['9.5']
elif 'all' in tags:
tags = ['9.3', '9.4', '9.5']
else:
tags = list(tags)
metafunc.parametrize("pg_tag", tags, scope='session')
#pytest.yield_fixture(scope='session')
def pg_server(pg_tag):
# pg_tag is parametrized parameter
# the fixture is called 1-3 times depending on --pg_tag cmdline
Edit: Replaced old example with metafunc.parametrize usage.
There is an example in the latest docs on how to do this. It's a little buried and honestly I glazed over it the first time reading through the documentation: https://docs.pytest.org/en/latest/parametrize.html#basic-pytest-generate-tests-example.
Basic pytest_generate_tests example
Sometimes you may want to implement your own parametrization scheme or
implement some dynamism for determining the parameters or scope of a
fixture. For this, you can use the pytest_generate_tests hook which is
called when collecting a test function. Through the passed in metafunc
object you can inspect the requesting test context and, most
importantly, you can call metafunc.parametrize() to cause
parametrization.
For example, let’s say we want to run a test taking string inputs
which we want to set via a new pytest command line option. Let’s first
write a simple test accepting a stringinput fixture function argument:
# content of test_strings.py
def test_valid_string(stringinput):
assert stringinput.isalpha()
Now we add a conftest.py file containing the addition of a command
line option and the parametrization of our test function:
# content of conftest.py
def pytest_addoption(parser):
parser.addoption("--stringinput", action="append", default=[],
help="list of stringinputs to pass to test functions")
def pytest_generate_tests(metafunc):
if 'stringinput' in metafunc.fixturenames:
metafunc.parametrize("stringinput",
metafunc.config.getoption('stringinput'))
If we now pass two stringinput values, our test will run twice:
$ pytest -q --stringinput="hello" --stringinput="world" test_strings.py`
..
2 passed in 0.12 seconds
I want to implement the following using external data (arguments) via pytest_generate_tests. This example works:
#pytest.mark.parametrize('case', [1,2,3,4])
def test_regression(case):
print case
assert True
Imagine, i retrieve test data via argv option. So, i've created conftest.py, added option --data, added fixture data and added pytest_generate_tests hook. Please pay attention, that if i do not declare data fixture this will not work (but in the example there is not fixture declaration): http://pytest.org/latest/example/parametrize.html#generating-parameters-combinations-depending-on-command-line
import pytest
def pytest_addoption(parser):
parser.addoption('--data', action='store', default='', help='Specify testing data')
#pytest.fixture
def data(request):
return request.config.getoption('--data')
def pytest_generate_tests(metafunc):
if 'data' in metafunc.funcargnames:
# imagine data.cases = [1,2,3,4,5]
metafunc.parametrize('case', [1,2,3,4,5])
For exampple, i have argument data, that containts itself some test data & some test cases. So, i define conftest.py the following way:
# conftest.py
import pytest
def pytest_addoption(parser):
parser.addoption('--data', action='store', default='', help='Specify testing data')
#pytest.fixture
def data(request):
return request.config.getoption('--data')
def pytest_generate_tests(metafunc):
if 'data' in metafunc.fixturenames:
# lets imagine data.cases = [1,2,3,4,5]
metafunc.parametrize('case', [1,2,3,4,5])
# test.py (just removed #pytest.mark.parametrize line)
def test_regression(case):
print case
assert True
The example above will give an error: fixture 'case' not found. But if i substitute case with data it will work:
# conftest.py
import pytest
def pytest_addoption(parser):
parser.addoption('--data', action='store', default='', help='Specify testing data')
#pytest.fixture
def data(request):
return request.config.getoption('--data')
def pytest_generate_tests(metafunc):
if 'data' in metafunc.fixturenames:
# lets imagine data.cases = [1,2,3,4,5]
metafunc.parametrize('data', [1,2,3,4,5])
# test.py (just removed #pytest.mark.parametrize line)
def test_regression(data):
print case
assert True
But i need test parameter named case. What i am doing wrong?
I faced the nearly same problem today:
I can not give you the actual root cause but the problem seems to be that the function parameter which gets passed to the test function is expected to be a fixture. So if you are using data it is working as your are using a fixture.
If you are using case there is no fixture found for case.
I solved this by doing the following:
def pytest_generate_tests(metafunc):
if 'func' in metafunc.fixturenames:
# do some stuff
metafunc.parametrize('func', all_combinations)
def test_function_prototypes(func):
assert func
This will throw the fixture 'func' not found error. I solved it by adding the following lines:
#pytest.fixture
def func(request):
return request.param # pass the param to the test function
I didnt find something in the docs concerning the need to supply this function.
I also observed that uncommenting the fixture again lets the code still work. I guess its related to caching...