I've written a small module for work and I'm adding some unittesting. The module writes files that need to be read back in for testing purposes and I'm using tempfile.TemporaryDirectory(). I create the temporary directory in the setUp() method of my test class and tear it down in the tearDown() method because clearly, I don't want to trash up the file structure when tests are run in prod. My understanding is that the setUp and tearDown methods are run before and after each test and while this adds some overhead to the testing process (i.e., each test takes about 0.5 seconds to run), I'm not all that worried about the time.
My test class
class MyUtilsTest(unittest.TestCase):
def setUp(self) -> None:
self.dir_name = tempfile.TemporaryDirectory()
file_name = "file_used_for_testing.pptx"
self.presentation_path = self.dir_name.name + "/" + file_name
# Do some things, including writing some stuff to the temporary directory
def tearDown(self) -> None:
self.dir_name.cleanup()
def test1(self) -> None:
self.assertTrue(
os.path.isfile(self.presentation_path), f"{self.presentation_path} exists."
)
def test2(self) -> None:
prs = Presentation(self.presentation_path)
self.assertEqual(len(prs.slides), 4)
def test3(self) -> None:
prs = Presentation(pptx=self.presentation_path)
titles = [prs.slides[s].placeholders[0].text for s in np.arange(0, 4)]
self.assertEqual(
titles, ["Test title", "Image title", "Table title", "Bullet title"]
)
# More tests that rely in files in self.dir_name here
When I have my test class set up like this, all of my tests run and pass without issue, but I get a linter warning:
I'm new-ish to writing software-like code in Python and in doing research on this linter warning, I ran into several questions on SO (e.g., Python: How do I make temporary files in my test suite?). I've tried editing my test class to incorporate using with tempfile.TemporaryDirectory() as dir_name: but when I do this, my tests fail. For example, when I edit the setup() method (and remove the tearDown()) to
def setUp(self) -> None:
with tempfile.TemporaryDirectory() as dir_name:
file_name = "file_used_for_testing.pptx"
self.presentation_path = dir_name + "/" + file_name
# Do some things, including writing some stuff to the temporary directory
the later tests can't find the temp directories, I suspect because the temp dir was destroyed after leaving the context/function and prior to the test running.
How can I keep the context manager open for subsequent tests that rely on the files that are produced in the setUp() method?
Assuming you don't need to recreate the directory for each test case, you can override unittest.TestCase.run() to customize the behavior of the test execution. docs
For example:
import tempfile, unittest
class MyTest(unittest.TestCase):
def run(self, result=None):
with tempfile.TemporaryDirectory() as dir_name:
self.dir_name = dir_name
super().run()
def test(self):
print(self.dir_name)
self.assertIsNotNone(self.dir_name)
Related
this might be a dup but I couldn't find exactly what I was looking for. Feel free to link any previous answer.
I need to write a python script (bash also would be ok) that continuously watches a directory. When the content of this directory changes (because another program generates a new directory inside of it), I want to run automatically a command line that has the name of the newly created directory as an argument.
Example:
I need to watch directory /home/tmp/:
the actual content of the directory is:
$ ls /home/tmp
Patient Patient2 Patient3
Suddenly, Patient4 dir arrives in /home/tmp.
I want a code that runs automatically
$ my_command --target_dir /home/tmp/Patient4/
I hope I'm clear in explaining what I need.
Thanks
The answer that i found works on Linux only, and it makes use of the pyinotify wrapper. below is the wroking code:
class EventProcessor(pyinotify.ProcessEvent):
_methods = ["IN_CREATE",
# "IN_OPEN",
# "IN_ACCESS",
]
def process_generator(cls, method):
def _method_name(self, event):
if event.maskname=="IN_CREATE|IN_ISDIR":
print(f"Starting pipeline for {event.pathname}")
os.system(f"clearlung --single --automatic --base_dir {event.pathname} --target_dir CT " + \
f"--model {MODEL} --subroi --output_dir {OUTPUT} --tag 0 --history_path {HISTORY}")
pass
_method_name.__name__ = "process_{}".format(method)
setattr(cls, _method_name.__name__, _method_name)
for method in EventProcessor._methods:
process_generator(EventProcessor, method)
class PathWatcher():
"""Class to watch for changes"""
def __init__(self, path_to_watch) -> None:
"""Base constructor"""
self.path = path_to_watch
if not os.path.isdir(self.path):
raise FileNotFoundError()
def watch(self,):
"""Main method of the PathWatcher class"""
print(f"Waiting for changes in {self.path}...")
watch_manager = pyinotify.WatchManager()
event_notifier = pyinotify.Notifier(watch_manager, EventProcessor())
watch_this = os.path.abspath(self.path)
watch_manager.add_watch(watch_this, pyinotify.ALL_EVENTS)
event_notifier.loop()
i'm writing a test of this function
def create_folder_if_not_exists(
sdk: looker_sdk,
folder_name: str,
parent_folder_name: str) -> dict:
folder = sdk.search_folders(name=folder_name)[0]
try:
parent_id = sdk.search_folders(name=parent_folder_name)[0].id
logger.info(f'Creating folder "{folder_name}"')
folder = sdk.create_folder(
body=models.CreateFolder(
name=folder_name,
parent_id=parent_id
)
)
return folder
except looker_sdk.error.SDKError as err:
logger.error(err.args[0])
return folder
This is my current test, using the python pytest library, but i keep getting this for my test Failed: DID NOT RAISE <class 'looker_sdk.error.SDKError'>
def test_create_folder_if_not_exists_parent1(mocker):
# Tests if a folder has parent id of 1 we raise an exception
sdk = fake_methods_data.MockSDK()
sf_data = fake_methods_data.MockSearchFolder(
name='goog', parent_id=1, id=3)
mocker.patch.object(sdk, "search_folders")
mocker.patch.object(sdk, "create_folder",
side_effect=[looker_sdk.error.SDKError])
sdk.search_folders.return_value = [sf_data]
with pytest.raises(looker_sdk.error.SDKError) as err:
test = fc.create_folder_if_not_exists(
sdk=sdk, folder_name='googn', parent_folder_name='1')
assert str(err.value) == 'test'
assert test.parent_id == 1
assert test.name == 'googn'
Does anyone know how to force a function to return a class error using pytest ? I've been looking at this [stackoverflow] (Mocking a function to raise an Exception to test an except block) but am struggling to get it to work. Hoping for some other thoughts.
This sounds like something I have done for work (open-source software dev stuff). In my case, I needed to test an except block raised when an executable file could not be run on a particular OS version. In our testing framework we use pytest and monkeypatch to test things. I've included the relevant bits of code below, along with some explanation about what is happening. I think this is probably what you mean by 'patch the sdk error', and I believe that is probably what you need to do. If anything is unclear, or you have more questions, let me know.
In conftest.py I define pytest fixtures that get used for tests in more than one test file. Here, I mock the scenario I want to test, using monkeypatch to fake the results I want from the parts of the get_version() function I'm not trying to test.
# conftest.py
import subprocess
import shutil
import os
import re
import platform
from pathlib import Path
import pytest
#pytest.fixture
def executable_incompatible_with_os(monkeypatch):
"""
Mocks an executable file that is incompatible with the OS.
(This situation likely only applies to blastall.)
"""
def mock_which(*args, **kwargs):
"""Mock an absolute file path."""
return args[0]
def mock_isfile(*args, **kwargs):
"""Mock a call to `os.path.isfile()`."""
return True
def mock_access(*args, **kwargs):
"""Mock a call to `os.access()`."""
return True
def mock_subprocess(*args, **kwargs):
"""Mock a call to `subprocess.run()` with an incompatible program."""
raise OSError
# Replace calls to existing methods with my mocked versions
monkeypatch.setattr(shutil, "which", mock_which)
monkeypatch.setattr(Path, "is_file", mock_isfile)
monkeypatch.setattr(os.path, "isfile", mock_isfile)
monkeypatch.setattr(os, "access", mock_access)
monkeypatch.setattr(subprocess, "run", mock_subprocess)
In test_aniblastall.py I test parts of aniblastall.py. In this case, I'm testing the behaviour when an OSError is raised; the code that raises the error in the test is in conftest.py. The entire pytest fixture I defined there is passed as a parameter to the test.
# test_aniblastall.py
from pathlib import Path
import unittest
# Test case 4: there is an executable file, but it will not run on the OS
def test_get_version_os_incompatible(executable_incompatible_with_os):
"""Test behaviour when the program can't run on the operating system.
This will happen with newer versions of MacOS."""
test_file_4 = Path("/os/incompatible/blastall")
assert (
aniblastall.get_version(test_file_4)
== f"blastall exists at {test_file_4} but could not be executed"
)
aniblastall.py contains the function the error should be raised from.
# aniblastall.py
import logging
import os
import platform
import re
import shutil
import subprocess
from pathlib import Path
def get_version(blast_exe: Path = pyani_config.BLASTALL_DEFAULT) -> str:
"""
The following circumstances are explicitly reported as strings
- no executable at passed path
- non-executable file at passed path (this includes cases where the user doesn't have execute permissions on the file)
- no version info returned
- executable cannot be run on this OS
"""
logger = logging.getLogger(__name__)
try:
blastall_path = Path(shutil.which(blast_exe)) # type:ignore
except TypeError:
return f"{blast_exe} is not found in $PATH"
if not blastall_path.is_file(): # no executable
return f"No blastall at {blastall_path}"
# This should catch cases when the file can't be executed by the user
if not os.access(blastall_path, os.X_OK): # file exists but not executable
return f"blastall exists at {blastall_path} but not executable"
if platform.system() == "Darwin":
cmdline = [blast_exe, "-version"]
else:
cmdline = [blast_exe]
try:
result = subprocess.run(
cmdline, # type: ignore
shell=False,
stdout=subprocess.PIPE, # type: ignore
stderr=subprocess.PIPE,
check=False, # blastall doesn't return 0
)
except OSError:
logger.warning("blastall executable will not run", exc_info=True)
return f"blastall exists at {blastall_path} but could not be executed"
version = re.search( # type: ignore
r"(?<=blastall\s)[0-9\.]*", str(result.stderr, "utf-8")
).group()
if 0 == len(version.strip()):
return f"blastall exists at {blastall_path} but could not retrieve version"
return f"{platform.system()}_{version} ({blastall_path})"
This is super valuable #baileythegreen, however my problem was far simpler. I had an if/else and the else had the try/catch error code piece. I was so focused on that I didn't check the simple part of if it was even getting to the else. :(
I made a pytest which tests all files in given directory.
#pytest.mark.dir
def test_dir(target_dir):
for filename in os.listdir(target_dir):
test_single(filename)
def test_single(filename):
...
...
assert( good or bad )
The target_dir is supplied from command line:
pytest -m dir --target_dir=/path/to/my_dir
pytest_addoption() is used to parse the command line (code is ommited for clarity).
The output from the test gives single pass/fail mark even though test_single() runs hudreds of times. Would it be possible to get a pass/fail mark for each file?
I think the way to go is to parametrize your test function so that target_dir is effectively split into individual files in a fixture filename:
# conftest.py
import os
def pytest_addoption(parser):
parser.addoption("--target_dir", action="store")
def pytest_generate_tests(metafunc):
option_value = metafunc.config.option.target_dir
if "filename" in metafunc.fixturenames and option_value is not None:
metafunc.parametrize("filename", os.listdir(option_value))
# test.py
import pytest
#pytest.mark.dir
def test_file(filename):
# insert your assertions
pass
I'm trying to test file parsing with pytest. I have a directory tree that looks something like this for my project:
project
project/
cool_code.py
setup.py
setup.cfg
test/
test_read_files.py
test_files/
data_file1.txt
data_file2.txt
My setup.py file looks something like this:
from setuptools import setup
setup(
name = 'project',
description = 'The coolest project ever!',
setup_requires = ['pytest-runner'],
tests_require = ['pytest'],
)
My setup.cfg file looks something like this:
[aliases]
test=pytest
I've written several unit tests with pytest to verify that files are properly read. They work fine when I run pytest from within the "test" directory. However, if I execute any of the following from my project directory, the tests fail because they cannot find data files in test_files:
>> py.test
>> python setup.py pytest
The test seems to be sensitive to the directory from which pytest is executed.
How can I get pytest unit tests to discover the files in "data_files" for parsing when I call it from either the test directory or the project root directory?
One solution is to define a rootdir fixture with the path to the test directory, and reference all data files relative to this. This can be done by creating a test/conftest.py (if not already created) with some code like this:
import os
import pytest
#pytest.fixture
def rootdir():
return os.path.dirname(os.path.abspath(__file__))
Then use os.path.join in your tests to get absolute paths to test files:
import os
def test_read_favorite_color(rootdir):
test_file = os.path.join(rootdir, 'test_files/favorite_color.csv')
data = read_favorite_color(test_file)
# ...
One solution is to try multiple paths to find the files.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from coolprogram import *
import os
def test_file_locations():
"""Possible locations where test data could be found."""
return(['./test_files',
'./tests/test_files',
])
def find_file(filename):
""" Searches for a data file to use in tests """
for location in test_file_locations():
filepath = os.path.join(location, filename)
if os.path.exists(filepath):
return(filepath)
raise IOError('Could not find test file.')
def test_read_favorite_color():
""" Test that favorite color is read properly """
filename = 'favorite_color.csv'
test_file = find_file(filename)
data = read_favorite_color(test_file)
assert(data['first_name'][1] == 'King')
assert(data['last_name'][1] == 'Arthur')
assert(data['correct_answers'][1] == 2)
assert(data['cross_bridge'][1] == True)
assert(data['favorite_color'][1] == 'green')
One way is to pass a dictionary of command name and custom command class to cmdclass argument of setup function.
Another way is like here, posted it here for quick reference.
pytest-runner will install itself on every invocation of setup.py. In some cases, this causes delays for invocations of setup.py that will never invoke pytest-runner. To help avoid this contingency, consider requiring pytest-runner only when pytest is invoked:
pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
# ...
setup(
#...
setup_requires=[
#... (other setup requirements)
] + pytest_runner,
)
Make sure all the data you read in your test module is relative to the location of setup.py directory.
In OP's case data file path would be test/test_files/data_file1.txt,
I made a project with same structure and read the data_file1.txt with some text in it and it works for me.
Closed. This question needs details or clarity. It is not currently accepting answers.
Want to improve this question? Add details and clarify the problem by editing this post.
Closed 8 years ago.
Improve this question
I have testing files wirtten in Python : test1.py test2.py...
Before executing any of them I need to initialize them with a file called initialize.py that takes arguments.
The testing files must stay as light and easy to write as they can be.
I want to create a script that:
Takes input arguments
Start the initialize.py file with those arguments
Start the test file using the variables created by initialize.py
I thought about a few ways:
Import the two files : it doesn't work because with import you can use the return argument on your main script but you can't give input argument
Transform both files into functions : it's not an issue with initialize.py but as I said I want to keep the test file to be as easy and light as possible so if I can avoid that it's better.
The perfect solution would be to simply "copy" the code from initialize and put it in the beggining of the test file (or the opposite). May be create a temporary file containing both code but i don't think it very clean.
To sum up : it's as if I had 100 files that starts with the same 25 lines and I want to put those 25 lines in one file and import them each time.
An other way to see things is 3 files:
#File1.py
var1 = sys.argv(1)
#File2.py
var2 = var1+"second"
#File3.py
var3 = var1+var2+"third"
print var3
I want to start ./File1.py first
And get "first second thrid"
I succeed with
#! /usr/bin/python
import sys
import subprocess
source_content = "#! /usr/bin/python\n"+"import sys\n"
sourcefile = "file2.py"
txt_file = open(sourcefile)
source_content += txt_file.read()
sourcefile = "file3.py"
txt_file = open(sourcefile)
source_content += txt_file.read()
destinationfile = "Copyfile2.py"
target = open (destinationfile, 'w')
target.write(source_content)
target.close()
chmodFile = "chmod 777 "+destinationfile
chmod = subprocess.Popen(chmodFile, shell=True)
chmod.wait()
arguments = str("./"+destinationfile+" ")
arguments += " ".join(map(str,sys.argv[1:len(sys.argv)]))
startTest = subprocess.Popen(arguments, shell=True)
startTest.wait()
But I had to delete the "#! /usr/bin/python" from test2 and test and rename var1 to sys.arg[1] on thoses same files.
And I don't think it's a nice solution...
How about you use the unittest module?
import unittest
class BaseTest(unittest.TestCase):
def initialisation_script_stuff(blah,etc):
foo
def setUp(self):
common_setup_stuff()
def tearDown(self):
whatever
Now you can just inherit from BaseTest in each of your test files.
from moo import BaseTest
class CoolTest(BaseTest):
def setUp(self):
BaseTest.setUp(self)
args = BaseTest.initialisation_script_stuff()
do_stuff(args)
def testNumberOne(self):
self.assertEqual(1,1)
Alternatively if you want to stay away from standard unit testing methods...
Assuming the directory structure:
all_tests\
__init__.py
some_tests\
__init__.py
test1.py
test2.py
other _tests\
__init__.py
etc
Some naming conventions:
each test py file has a function named run that runs the test
each test py file has a name starting with 'test'
each of the test grouping folders has a name ending in '_tests'
Make a single script called run_tests.py (or something like that...)
def run_tests():
import os
import importlib
import re
dTests = {}
lFolders = [s for s in os.listdir('all_tests') if re.match('.*_tests$',s)]
for sFolder in lFolders:
sFolderPath = os.path.join('all_tests',sFolder)
lTestFileNames = [s for s in os.listdir(sFolderPath) if re.match('^test.*py$',s)]
for sFileName in lTestFileNames:
sSubPath = '{0}.{1}'.format(sFolder,sFileName.split('.')[0])
dTests[sSubPath] = importlib.import_module('all_tests.{0}'.format(sSubPath))
#now you have all the tests...
for k in dTests:
stuff = initialisation_stuff()
test_result = dTests[k].run(stuff)
do_whatever_you_want(test_result)
if __name__ == "__main__":
run_tests()
Now you need absolutely no boiler plate code in your test files. Just so long as you follow the formula