I made a pytest which tests all files in given directory.
#pytest.mark.dir
def test_dir(target_dir):
for filename in os.listdir(target_dir):
test_single(filename)
def test_single(filename):
...
...
assert( good or bad )
The target_dir is supplied from command line:
pytest -m dir --target_dir=/path/to/my_dir
pytest_addoption() is used to parse the command line (code is ommited for clarity).
The output from the test gives single pass/fail mark even though test_single() runs hudreds of times. Would it be possible to get a pass/fail mark for each file?
I think the way to go is to parametrize your test function so that target_dir is effectively split into individual files in a fixture filename:
# conftest.py
import os
def pytest_addoption(parser):
parser.addoption("--target_dir", action="store")
def pytest_generate_tests(metafunc):
option_value = metafunc.config.option.target_dir
if "filename" in metafunc.fixturenames and option_value is not None:
metafunc.parametrize("filename", os.listdir(option_value))
# test.py
import pytest
#pytest.mark.dir
def test_file(filename):
# insert your assertions
pass
Related
i'm writing a test of this function
def create_folder_if_not_exists(
sdk: looker_sdk,
folder_name: str,
parent_folder_name: str) -> dict:
folder = sdk.search_folders(name=folder_name)[0]
try:
parent_id = sdk.search_folders(name=parent_folder_name)[0].id
logger.info(f'Creating folder "{folder_name}"')
folder = sdk.create_folder(
body=models.CreateFolder(
name=folder_name,
parent_id=parent_id
)
)
return folder
except looker_sdk.error.SDKError as err:
logger.error(err.args[0])
return folder
This is my current test, using the python pytest library, but i keep getting this for my test Failed: DID NOT RAISE <class 'looker_sdk.error.SDKError'>
def test_create_folder_if_not_exists_parent1(mocker):
# Tests if a folder has parent id of 1 we raise an exception
sdk = fake_methods_data.MockSDK()
sf_data = fake_methods_data.MockSearchFolder(
name='goog', parent_id=1, id=3)
mocker.patch.object(sdk, "search_folders")
mocker.patch.object(sdk, "create_folder",
side_effect=[looker_sdk.error.SDKError])
sdk.search_folders.return_value = [sf_data]
with pytest.raises(looker_sdk.error.SDKError) as err:
test = fc.create_folder_if_not_exists(
sdk=sdk, folder_name='googn', parent_folder_name='1')
assert str(err.value) == 'test'
assert test.parent_id == 1
assert test.name == 'googn'
Does anyone know how to force a function to return a class error using pytest ? I've been looking at this [stackoverflow] (Mocking a function to raise an Exception to test an except block) but am struggling to get it to work. Hoping for some other thoughts.
This sounds like something I have done for work (open-source software dev stuff). In my case, I needed to test an except block raised when an executable file could not be run on a particular OS version. In our testing framework we use pytest and monkeypatch to test things. I've included the relevant bits of code below, along with some explanation about what is happening. I think this is probably what you mean by 'patch the sdk error', and I believe that is probably what you need to do. If anything is unclear, or you have more questions, let me know.
In conftest.py I define pytest fixtures that get used for tests in more than one test file. Here, I mock the scenario I want to test, using monkeypatch to fake the results I want from the parts of the get_version() function I'm not trying to test.
# conftest.py
import subprocess
import shutil
import os
import re
import platform
from pathlib import Path
import pytest
#pytest.fixture
def executable_incompatible_with_os(monkeypatch):
"""
Mocks an executable file that is incompatible with the OS.
(This situation likely only applies to blastall.)
"""
def mock_which(*args, **kwargs):
"""Mock an absolute file path."""
return args[0]
def mock_isfile(*args, **kwargs):
"""Mock a call to `os.path.isfile()`."""
return True
def mock_access(*args, **kwargs):
"""Mock a call to `os.access()`."""
return True
def mock_subprocess(*args, **kwargs):
"""Mock a call to `subprocess.run()` with an incompatible program."""
raise OSError
# Replace calls to existing methods with my mocked versions
monkeypatch.setattr(shutil, "which", mock_which)
monkeypatch.setattr(Path, "is_file", mock_isfile)
monkeypatch.setattr(os.path, "isfile", mock_isfile)
monkeypatch.setattr(os, "access", mock_access)
monkeypatch.setattr(subprocess, "run", mock_subprocess)
In test_aniblastall.py I test parts of aniblastall.py. In this case, I'm testing the behaviour when an OSError is raised; the code that raises the error in the test is in conftest.py. The entire pytest fixture I defined there is passed as a parameter to the test.
# test_aniblastall.py
from pathlib import Path
import unittest
# Test case 4: there is an executable file, but it will not run on the OS
def test_get_version_os_incompatible(executable_incompatible_with_os):
"""Test behaviour when the program can't run on the operating system.
This will happen with newer versions of MacOS."""
test_file_4 = Path("/os/incompatible/blastall")
assert (
aniblastall.get_version(test_file_4)
== f"blastall exists at {test_file_4} but could not be executed"
)
aniblastall.py contains the function the error should be raised from.
# aniblastall.py
import logging
import os
import platform
import re
import shutil
import subprocess
from pathlib import Path
def get_version(blast_exe: Path = pyani_config.BLASTALL_DEFAULT) -> str:
"""
The following circumstances are explicitly reported as strings
- no executable at passed path
- non-executable file at passed path (this includes cases where the user doesn't have execute permissions on the file)
- no version info returned
- executable cannot be run on this OS
"""
logger = logging.getLogger(__name__)
try:
blastall_path = Path(shutil.which(blast_exe)) # type:ignore
except TypeError:
return f"{blast_exe} is not found in $PATH"
if not blastall_path.is_file(): # no executable
return f"No blastall at {blastall_path}"
# This should catch cases when the file can't be executed by the user
if not os.access(blastall_path, os.X_OK): # file exists but not executable
return f"blastall exists at {blastall_path} but not executable"
if platform.system() == "Darwin":
cmdline = [blast_exe, "-version"]
else:
cmdline = [blast_exe]
try:
result = subprocess.run(
cmdline, # type: ignore
shell=False,
stdout=subprocess.PIPE, # type: ignore
stderr=subprocess.PIPE,
check=False, # blastall doesn't return 0
)
except OSError:
logger.warning("blastall executable will not run", exc_info=True)
return f"blastall exists at {blastall_path} but could not be executed"
version = re.search( # type: ignore
r"(?<=blastall\s)[0-9\.]*", str(result.stderr, "utf-8")
).group()
if 0 == len(version.strip()):
return f"blastall exists at {blastall_path} but could not retrieve version"
return f"{platform.system()}_{version} ({blastall_path})"
This is super valuable #baileythegreen, however my problem was far simpler. I had an if/else and the else had the try/catch error code piece. I was so focused on that I didn't check the simple part of if it was even getting to the else. :(
how to create a new pytest command line flag that takes in an argument.
For example I have the following:
test_A.py::test_a
#pytest.mark.lvl1
def test_a():...
.
.
.
test_B.py::test_b
#pytest.mark.lvl10
def test_b():...
test_C.py::test_c
#pytest.mark.lvl20
def test_c():...
On command line i only want to run tests with marker levels less than equal to lvl10 (so lvl1 to lvl10 tests)
How can i do this without having to manually type on commandline pytest -m 'lvl1 or lvl2 or lvl3 ...'
I want to create a new command line pytest arg like:
pytest --lte="lvl10"(lte is less than equal)
I was thinking somewhere along the lines where I want to define the --lte flag to do the following:
markers =[]
Do a pytest collect to find all tests that contain a marker that has 'lvl' in it and add that marker to the markers list only if the integer after 'lvl' is less than equal to 10 (lvl10). Then call a pytest -m on that list of markers ('lvl1 or lvl2 or lvl3 ...')
If you modify your marker to accept level as an argument, you can then run all tests less than or equal to the specified level, by adding a custom pytest_runtest_setup to your conftest.py
Sample Test
#pytest.mark.lvl(1)
def test_a():
...
conftest.py
import pytest
def pytest_addoption(parser):
parser.addoption(
"--level", type=int, action="store", metavar="num",
help="only run tests matching the specified level or lower",
)
def pytest_configure(config):
# register the "lvl" marker
config.addinivalue_line(
"markers", "lvl(num): mark test to run only on named environment"
)
def pytest_runtest_setup(item):
test_level = next(item.iter_markers(name="lvl")).args[0]
req_level = item.config.getoption("--level")
if test_level > req_level:
pytest.skip(f"tests with level less than or equal to {req_level} was requested")
Sample invocation
pytest --level 10
I'm trying to test file parsing with pytest. I have a directory tree that looks something like this for my project:
project
project/
cool_code.py
setup.py
setup.cfg
test/
test_read_files.py
test_files/
data_file1.txt
data_file2.txt
My setup.py file looks something like this:
from setuptools import setup
setup(
name = 'project',
description = 'The coolest project ever!',
setup_requires = ['pytest-runner'],
tests_require = ['pytest'],
)
My setup.cfg file looks something like this:
[aliases]
test=pytest
I've written several unit tests with pytest to verify that files are properly read. They work fine when I run pytest from within the "test" directory. However, if I execute any of the following from my project directory, the tests fail because they cannot find data files in test_files:
>> py.test
>> python setup.py pytest
The test seems to be sensitive to the directory from which pytest is executed.
How can I get pytest unit tests to discover the files in "data_files" for parsing when I call it from either the test directory or the project root directory?
One solution is to define a rootdir fixture with the path to the test directory, and reference all data files relative to this. This can be done by creating a test/conftest.py (if not already created) with some code like this:
import os
import pytest
#pytest.fixture
def rootdir():
return os.path.dirname(os.path.abspath(__file__))
Then use os.path.join in your tests to get absolute paths to test files:
import os
def test_read_favorite_color(rootdir):
test_file = os.path.join(rootdir, 'test_files/favorite_color.csv')
data = read_favorite_color(test_file)
# ...
One solution is to try multiple paths to find the files.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from coolprogram import *
import os
def test_file_locations():
"""Possible locations where test data could be found."""
return(['./test_files',
'./tests/test_files',
])
def find_file(filename):
""" Searches for a data file to use in tests """
for location in test_file_locations():
filepath = os.path.join(location, filename)
if os.path.exists(filepath):
return(filepath)
raise IOError('Could not find test file.')
def test_read_favorite_color():
""" Test that favorite color is read properly """
filename = 'favorite_color.csv'
test_file = find_file(filename)
data = read_favorite_color(test_file)
assert(data['first_name'][1] == 'King')
assert(data['last_name'][1] == 'Arthur')
assert(data['correct_answers'][1] == 2)
assert(data['cross_bridge'][1] == True)
assert(data['favorite_color'][1] == 'green')
One way is to pass a dictionary of command name and custom command class to cmdclass argument of setup function.
Another way is like here, posted it here for quick reference.
pytest-runner will install itself on every invocation of setup.py. In some cases, this causes delays for invocations of setup.py that will never invoke pytest-runner. To help avoid this contingency, consider requiring pytest-runner only when pytest is invoked:
pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
# ...
setup(
#...
setup_requires=[
#... (other setup requirements)
] + pytest_runner,
)
Make sure all the data you read in your test module is relative to the location of setup.py directory.
In OP's case data file path would be test/test_files/data_file1.txt,
I made a project with same structure and read the data_file1.txt with some text in it and it works for me.
I am taking value of setup from command line argument while running the test with py.test framework.
group.addoption("--setup", "--sC=", action="store", dest="setup", help="setup.")
def pytest_configure(config):
print "config.option.setup: ", config.option.setup
Here, I am able to get the setup file name with config.option.setup, but the same file name which I pass here, I want to fetch it from my test script.
If I put the same line in my test script, I get below error:
> print "config.option.setup_config: ", config.option.setup_config
E NameError: global name 'config' is not defined
Can someone please let me know how can I access config.option.setup in my test script?
pytest_configure must be in the file conftest.py. See example:
option = None
def pytest_addoption(parser):
parser.addoption("--setup", "--sC=", action="store", dest="setup", help="setup.")
def pytest_configure(config):
global option
option = config.option
print "config.option.setup: ", config.option.setup
You have to create a fixture that extracts this value from pytest's request.
# content of conftest.py
import pytest
def pytest_addoption(parser):
parser.addoption("--setup", action="store", help="setup.")
#pytest.fixture
def setup_option(request):
return request.config.getoption("--setup")
# basic usage:
# content of test_anything.py
def test_that(setup_option):
print("setup_option: %s" % setup_option)
Where and how does py.test look for fixtures? I have the same code in 2 files in the same folder. When I delete conftest.py, cmdopt cannot be found running test_conf.py (also in same folder. Why is sonoftest.py not searched?
# content of test_sample.py
def test_answer(cmdopt):
if cmdopt == "type1":
print ("first")
elif cmdopt == "type2":
print ("second")
assert 0 # to see what was printed
content of conftest.py
import pytest
def pytest_addoption(parser):
parser.addoption("--cmdopt", action="store", default="type1",
help="my option: type1 or type2")
#pytest.fixture
def cmdopt(request):
return request.config.getoption("--cmdopt")
content of sonoftest.py
import pytest
def pytest_addoption(parser):
parser.addoption("--cmdopt", action="store", default="type1",
help="my option: type1 or type2")
#pytest.fixture
def cmdopt(request):
return request.config.getoption("--cmdopt")
The docs say
http://pytest.org/latest/fixture.html#fixture-function
pytest finds the test_ehlo because of the test_ prefix. The test function needs a function argument named smtp. A matching fixture
function is discovered by looking for a fixture-marked function named
smtp.
smtp() is called to create an instance.
test_ehlo() is called and fails in the last line of the test function.
py.test will import conftest.py and all Python files that match the python_files pattern, by default test_*.py. If you have a test fixture, you need to include or import it from conftest.py or from the test files that depend on it:
from sonoftest import pytest_addoption, cmdopt
Here is the order and where py.test looks for fixtures (and tests) (taken from here):
py.test loads plugin modules at tool startup in the following way:
by loading all builtin plugins
by loading all plugins registered through setuptools entry points.
by pre-scanning the command line for the -p name option and loading the specified plugin before actual command line parsing.
by loading all conftest.py files as inferred by the command line invocation (test files and all of its parent directories). Note that
conftest.py files from sub directories are by default not loaded at
tool startup.
by recursively loading all plugins specified by the pytest_plugins variable in conftest.py files
I had the same issue and spent a lot of time to find out a simple solution, this example is for others that have a similar situation as I had.
conftest.py:
import pytest
pytest_plugins = [
"some_package.sonoftest"
]
def pytest_addoption(parser):
parser.addoption("--cmdopt", action="store", default="type1",
help="my option: type1 or type2")
#pytest.fixture
def cmdopt(request):
return request.config.getoption("--cmdopt")
some_package/sonoftest.py:
import pytest
#pytest.fixture
def sono_cmdopt(request):
return request.config.getoption("--cmdopt")
some_package/test_sample.py
def test_answer1(cmdopt):
if cmdopt == "type1":
print ("first")
elif cmdopt == "type2":
print ("second")
assert 0 # to see what was printed
def test_answer2(sono_cmdopt):
if sono_cmdopt == "type1":
print ("first")
elif sono_cmdopt == "type2":
print ("second")
assert 0 # to see what was printed
You can find a similar example here: https://github.com/pytest-dev/pytest/issues/3039#issuecomment-464489204
and other here https://stackoverflow.com/a/54736376/6655459
Description from official pytest documentation: https://docs.pytest.org/en/latest/reference.html?highlight=pytest_plugins#pytest-plugins
As a note that the respective directories referred to in
some_package.test_sample" need to have __init__.py files for the plugins to be loaded by pytest