How to access fixture's values as input to parametrized test - python

In py.test I need to dynamically define tests, depending on tests I defined in a file.
So what I was thinking is to define a fixture in conftest.py that reads the file and returns a dictionary with the tests.
File tests.json:
{
"test1": "text",
"test2": "42",
"test3": 1
}
I then define a fixture in conftest.py to return the dictionary with the tests:
def pytest_addoption(parser):
parser.addoption(
"--tests",
default="tests.json",
)
#pytest.fixture
def mytests(request):
testfile = request.config.getoption("--tests")
with open(testfile) as f:
tests = json.load(f)
return tests
and then I can use a parametrized test as follows in test_pytest.py:
#pytest.mark.parametrize("test_name", [(key) for key, value in mytests.items()])
def test1(test_name):
print(test_name)
which does not work as, at this point, py.test does not seem to 'know' that mytests is a fixture. I get an error
E NameError: name 'mytests' is not defined
How to handle this correctly? I just want to be able to either run all the test that are defined in the json file, or to be able to select a single test from it with the -k option if py.test.
How to do it?
Based on some comments given below I tried to implement something as follows:
#pytest.hookimpl
def pytest_generate_tests(metafunc):
if "myparam" in metafunc.fixturenames:
with open(metafunc.config.option.tests) as f:
tests = json.load(f)
# add parametrization for each fixture name
for name, value in tests.items():
print(name, value)
metafunc.parametrize("mparam", (name, value))
def test1(myparam):
print(myparam)
But with this I got an error
ERROR test_pytest.py - ValueError: duplicate 'myparam'

As mentioned in the comments, you cannot use a fixture in mark.parametrize. Fixtures can only be used in test functions and other fixtures.
To have dynamic parametrization like in this case, you can implement the hook function pytest_generate_tests:
#pytest.hookimpl
def pytest_generate_tests(metafunc):
if "test_name" in metafunc.fixturenames:
testfile = metafunc.config.getoption("--tests")
with open(testfile) as f:
tests = json.load(f)
metafunc.parametrize("test_name", tests.items())
def test1(test_name):
print(test_name)
This will parametrize all tests with a "test_name" argument (e.g. fixture) with the items in the config file.
Running this with the given json file will result in something like:
$ python -m pytest -s
...
collected 3 items
test_pytest.py ('test1', 'text')
.('test2', '42')
.('test3', 1)
.

Related

pytest: Run general test with dynamic input from files

I'm currently trying to write a python test module with pytest which reads several different information from a bunch of files. These informations shall be used as inputs (fixtures) for 2 general tests. These 2 tests shall be run for each fixture (so for each file).
What I want are dynamic fixtures generated from the content of an undefined amount of text files (jsons).
Then I have two pytest "test_*" functions which are fed with this undefined amount of fixtures.
Is such a thing possible with pytest? I already tried the lib pytest-cases, but I were unable to achieve my desired solution.
That is my current code base, where I want the yield to return a "for each *.json" fixture:
from pytest_cases import parametrize_with_cases, get_case_id
class Foo:
def matching_events(self):
# - glob for *.json files
# - read the rule files and events
# generate the fixture objects and return them for each *.json file
yield "test_rule_name", {}, {}
def mismatching_events(self):
# - glob for *.json files
# - read the rule files and events
# generate the fixture objects and return them for each *.json file
yield "test_rule_name", {}, {}
def case_id_generator(case_fun):
"""Custom test case id"""
return "#%s#" % case_fun
#parametrize_with_cases("rule_name, rule_definition, events", cases=Foo, prefix="matching_", ids=case_id_generator)
def test_rule_match(rule_name, rule_definition, events):
assert isinstance(rule_name, str)
assert isinstance(rule_definition, dict)
assert isinstance(events, dict)
# do some more things with the rule definition and evnets
#parametrize_with_cases("rule_name, rule_definition, events", cases=Foo, prefix="mismatching_", ids=case_id_generator)
def test_rule_mismatch(rule_name, rule_definition, events):
assert isinstance(rule_name, str)
assert isinstance(rule_definition, dict)
assert isinstance(events, dict)
# do some more things with the rule definition and evnets

pytest: How to access tmp_path in pytest_sessionstart()?

I'm using pytest to write some unit tests.
I know I can access the tmp_path temporary directory in any test or fixture, but is it possible to access it in the pytest_sessionstart() method too?
Essentially, this is a sample of what I'm trying to achieve
def pytest_sessionstart(session, tmp_path):
"""Create hello.txt before any test is ran and make available to all tests"""
p = tmp_path.join("hello.txt")
p.write("content")
Thanks
The recommended way to create a temp file for all the tests is to use a session scoped fixture with the inbuilt tmp_path_factory fixture.
From pytest docs :
# contents of conftest.py
import pytest
#pytest.fixture(scope="session")
def image_file(tmp_path_factory):
img = compute_expensive_image()
fn = tmp_path_factory.mktemp("data").join("img.png")
img.save(str(fn))
return fn
# contents of test_image.py
def test_histogram(image_file):
img = load_image(image_file)
# compute and test histogram

Python Unit test for method contains request

I have a method, which contains external rest-api calls.
ex:
def get_dataset():
url=requests.get("http://api:5001/get_trainingdata")
filename=url.text[0]
return filename
When I do #patch for this function, I can able to do unittest. But, coverage in not covering whole function.
How can write unittest case for this method with full coverage?
My testcase
#mock.patch('api.get_dataset')
def test_using_decorator1(self, mocked_get_dataset):
file = [{"file":"ddddd"}]
mocked_get_dataset.return_value = Mock()
mocked_get_dataset.return_value.json.return_value = file
filename = file[0]
self.assertEqual(filename, file[0])

Database read only settings are not loaded from a conftest.py in a subfolder

The thing is that I need to have a conftest with a special setting for a set of tests. To use a live database in readonly mode only for these tests. Other tests from unit folder working with an empty db created by pytest. If I run pytest tests or pytest tests/unit conftest from tests_readonly_db are not recognized. It is recognized when I put conftest file in unit folder. And run any of these two commands.
But it works perfectly when I run pytest tests/unit/tests_readonly_db with conftest in tests_readonly_db folder. Also tried to creat another subfolder in tests_readonly_db, put tests in there, with conftest.py staying level up. Doesn't work.
I wonder if there is way to implement desired behaviour. Found some related answers, but couldn't fully understand how it helps in my case. For example:
https://stackoverflow.com/a/13686206/7744657
But if
conftest.py files from sub directories are by default not loaded at tool startup.
Why it was loaded from a unit folder?
conftest.py
import pytest
#pytest.fixture(scope='session')
def django_db_setup():
"""Avoid creating/setting up the test database"""
pass
#pytest.fixture
def db_access_without_rollback_and_truncate(request, django_db_setup, django_db_blocker):
django_db_blocker.unblock()
request.addfinalizer(django_db_blocker.restore)
App structure:
EDIT
I've just tested this with a simple and stupid fixture. And it works. So it's actually a problem with this specific database settings not being discovered in some cases. This simple fixture seems to work in any case.
import pytest
#pytest.fixture(scope='session')
def django_db_setup():
"""Avoid creating/setting up the test database"""
pass
#pytest.fixture
def db_access_without_rollback_and_truncate(request, django_db_setup, django_db_blocker):
django_db_blocker.unblock()
request.addfinalizer(django_db_blocker.restore)
#pytest.fixture
def msg():
return 'AAAAAAAAAAA----------------------------------------AAAAAAAAAA'
tests_views.py
#pytest.mark.django_db()
#pytest.mark.parametrize('uri, args', [
some parameters.....
])
def test_views(uri, args, client, msg):
print(msg)
username = "john doe"
password = "123456"
client.login(username=username, password=password)
if args:
response = client.get(reverse(uri, args=args))
else:
response = client.get(reverse(uri))
assert response.status_code == 200

Proper way to organize testcases that involve a data file for each testcase?

I'm writing a module that involves parsing html for data and creating an object from it. Basically, I want to create a set of testcases where each case is an html file paired with a golden/expected pickled object file.
As I make changes to the parser, I would like to run this test suite to ensure that each html page is parsed to equal the 'golden' file (essentially a regression suite)
I can see how to code this as a single test case, where I would load all file pairs from some directory and then iterate through them. But I believe this would end up being reported as a single test case, pass or fail. But I want a report that says, for example, 45/47 pages parsed successfully.
How do I arrange this?
I've done similar things with the unittest framework by writing a function which creates and returns a test class. This function can then take in whatever parameters you want and customise the test class accordingly. You can also customise the __doc__ attribute of the test function(s) to get customised messages when running the tests.
I quickly knocked up the following example code to illustrate this. Instead of doing any actual testing, it uses the random module to fail some tests for demonstration purposes. When created, the classes are inserted into the global namespace so that a call to unittest.main() will pick them up. Depending on how you run your tests, you may wish to do something different with the generated classes.
import os
import unittest
# Generate a test class for an individual file.
def make_test(filename):
class TestClass(unittest.TestCase):
def test_file(self):
# Do the actual testing here.
# parsed = do_my_parsing(filename)
# golden = load_golden(filename)
# self.assertEquals(parsed, golden, 'Parsing failed.')
# Randomly fail some tests.
import random
if not random.randint(0, 10):
self.assertEquals(0, 1, 'Parsing failed.')
# Set the docstring so we get nice test messages.
test_file.__doc__ = 'Test parsing of %s' % filename
return TestClass
# Create a single file test.
Test1 = make_test('file1.html')
# Create several tests from a list.
for i in range(2, 5):
globals()['Test%d' % i] = make_test('file%d.html' % i)
# Create them from a directory listing.
for dirname, subdirs, filenames in os.walk('tests'):
for f in filenames:
globals()['Test%s' % f] = make_test('%s/%s' % (dirname, f))
# If this file is being run, run all the tests.
if __name__ == '__main__':
unittest.main()
A sample run:
$ python tests.py -v
Test parsing of file1.html ... ok
Test parsing of file2.html ... ok
Test parsing of file3.html ... ok
Test parsing of file4.html ... ok
Test parsing of tests/file5.html ... ok
Test parsing of tests/file6.html ... FAIL
Test parsing of tests/file7.html ... ok
Test parsing of tests/file8.html ... ok
======================================================================
FAIL: Test parsing of tests/file6.html
----------------------------------------------------------------------
Traceback (most recent call last):
File "generic.py", line 16, in test_file
self.assertEquals(0, 1, 'Parsing failed.')
AssertionError: Parsing failed.
----------------------------------------------------------------------
Ran 8 tests in 0.004s
FAILED (failures=1)
The nose testing framework supports this. http://www.somethingaboutorange.com/mrl/projects/nose/
Also see here: How to generate dynamic (parametrized) unit tests in python?
Here's what I would do (untested):
files = os.listdir("/path/to/dir")
class SomeTests(unittest.TestCase):
def _compare_files(self, file_name):
with open('/path/to/dir/%s-golden' % file_name, 'r') as golden:
with open('/path/to/dir/%s-trial' % file_name, 'r') as trial:
assert golden.read() == trial.read()
def test_generator(file_name):
def test(self):
self._compare_files(file_name):
return test
if __name__ == '__main__':
for file_name in files:
test_name = 'test_%s' % file_name
test = test_generator(file_name)
setattr(SomeTests, test_name, test)
unittest.main()

Categories