Check that a function raises a warning with nose tests - python

I'm writing unit tests using nose, and I'd like to check whether a function raises a warning (the function uses warnings.warn). Is this something that can easily be done?

def your_code():
# ...
warnings.warn("deprecated", DeprecationWarning)
# ...
def your_test():
with warnings.catch_warnings(record=True) as w:
your_code()
assert len(w) > 1
Instead of just checking the lenght, you can check it in-depth, of course:
assert str(w.args[0]) == "deprecated"
In python 2.7 or later, you can do this with the last check as:
assert str(w[0].message[0]) == "deprecated"

There are (at least) two ways of doing this. You can catch the warning in the list of warnings.WarningMessages in test or use mock to patch the imported warnings in your module.
I think the patch version is more general.
raise_warning.py:
import warnings
def should_warn():
warnings.warn('message', RuntimeWarning)
print('didn\'t I warn you?')
raise_warning_tests.py:
import unittest
from mock import patch
import raise_warning
class TestWarnings(unittest.TestCase):
#patch('raise_warning.warnings.warn')
def test_patched(self, mock_warnings):
"""test with patched warnings"""
raise_warning.should_warn()
self.assertTrue(mock_warnings.called)
def test_that_catches_warning(self):
"""test by catching warning"""
with raise_warning.warnings.catch_warnings(True) as wrn:
raise_warning.should_warn()
# per-PEP8 check for empty sequences by their Truthiness
self.assertTrue(wrn)

Related

How can I rewrite this fixture call so it won't be called directly?

I defined the following fixture in a test file:
import os
from dotenv import load_dotenv, find_dotenv
from packaging import version # for comparing version numbers
load_dotenv(find_dotenv())
VERSION = os.environ.get("VERSION")
API_URL = os.environ.get("API_URL")
#pytest.fixture()
def skip_before_version():
"""
Creates a fixture that takes parameters
skips a test if it depends on certain features implemented in a certain version
:parameter target_version:
:parameter type: string
"""
def _skip_before(target_version):
less_than = version.parse(current_version) < version.parse(VERSION)
return pytest.mark.skipif(less_than)
return _skip_before
skip_before = skip_before_version()("0.0.1")
I want to use skip_before as a fixture in certain tests. I call it like this:
##skip_before_version("0.0.1") # tried this before and got the same error, so tried reworking it...
#when(parsers.cfparse("{categories} are added as categories"))
def add_categories(skip_before, create_tree, categories): # now putting the fixture alongside parameters
pass
When I run this, I get the following error:
Fixture "skip_before_version" called directly. Fixtures are not meant to be called directly,
but are created automatically when test functions request them as parameters.
See https://docs.pytest.org/en/stable/fixture.html for more information about fixtures, and
https://docs.pytest.org/en/stable/deprecations.html#calling-fixtures-directly about how to update your code.
How is this still being called directly? How can I fix this?
If I understand your goal correctly, you want to be able to skip tests based on a version restriction specifier. There are many ways to do that; I can suggest an autouse fixture that will skip the test based on a custom marker condition. Example:
import os
import pytest
from packaging.specifiers import SpecifierSet
VERSION = "1.2.3" # read from environment etc.
#pytest.fixture(autouse=True)
def skip_based_on_version_compat(request):
# get the version_compat marker
version_compat = request.node.get_closest_marker("version_compat")
if version_compat is None: # test is not marked
return
if not version_compat.args: # no specifier passed to marker
return
spec_arg = version_compat.args[0]
spec = SpecifierSet(spec_arg)
if VERSION not in spec:
pytest.skip(f"Current version {VERSION} doesn't match test specifiers {spec_arg!r}.")
The fixture skip_based_on_version_compat will be invoked on each test, but only do something if the test is marked with #pytest.mark.version_compat. Example tests:
#pytest.mark.version_compat(">=1.0.0")
def test_current_gen():
assert True
#pytest.mark.version_compat(">=2.0.0")
def test_next_gen():
raise NotImplementedError()
With VERSION = "1.2.3", the first test will be executed, the second one will be skipped. Notice the invocation of pytest.skip to immediately skip the test. Returning pytest.mark.skip in the fixture will bring you nothing since the markers are already evaluated long before that.
Also, I noticed you are writing gherkin tests (using pytest-bdd presumably). With the above approach, skipping the whole scenarios should be also possible:
#pytest.mark.version_compat(">=1.0.0")
#scenario("my.feature", "my scenario")
def test_scenario():
pass
Alternatively, you can mark the scenarios in feature files:
Feature: Foo
Lorem ipsum dolor sit amet.
#version_compat(">=1.0.0")
Scenario: doing future stuff
Given foo is implemented
When I see foo
Then I do bar
and use pytest-bdd-own hooks:
def pytest_bdd_apply_tag(tag, function):
matcher = re.match(r'^version_compat\("(?P<spec_arg>.*)"\)$', tag)
spec_arg = matcher.groupdict()["spec_arg"]
spec = SpecifierSet(spec_arg)
if VERSION not in spec:
marker = pytest.mark.skip(
reason=f"Current version {VERSION} doesn't match restriction {spec_arg!r}."
)
marker(function)
return True
Unfortunately, neither custom fixtures nor markers will work with skipping in single steps (and you will still be skipping the whole scenario since it is an atomic test unit in gherkin). I didn't find a reliable way to befriend pytest-bdd steps with pytest stuff; looks like they are simply ignored. Nevertheless, you can easily write a custom decorator serving the same purpose:
import functools
def version_compat(spec_arg):
def deco(func):
#functools.wraps(func)
def wrapper(*args, **kwargs):
spec = SpecifierSet(spec_arg)
if VERSION not in spec:
pytest.skip(f"Current version {VERSION} doesn't match test specifiers {spec_arg!r}.")
return func(*args, **kwargs)
return wrapper
return deco
Using version_compat deco in a step:
#when('I am foo')
#version_compat(">=2.0.0")
def i_am_foo():
...
Pay attention to the ordering - placing decorators outside of pytest-bdd's own stuff will not trigger them (I guess worth opening an issue, but meh).

Python 3 how to write unit tests for try except outside functions in modules

I would like to know how to write Python 3 unittest for try exceptblocks
that are defined outside of function definitions in Python's module.
Imagine that in package/module.py I have a block of code like:
import os
try:
CONSTANT = os.environ['NOT_EXISTING_KEY']
except KeyError:
CONSTANT = False
finally:
del os
(please don't mind the actual code, I know I could have used os.getenv('NOT_EXISTING_KEY', False)in this specific case, what I am interested in is really testing that the try-except block in a module (outside of a function) behaves as expected.
How can I write a unit test that checks that package.module.CONSTANT is set to the expected value?
In the unittest file (I use pytest) I have something like:
from package.module import CONSTANT
def test_constant_true():
assert CONSTANT == 'expected_value'
to test that if the try block executed correctly then CONSTANT is as expected.
I don't know, however, how to mock the import machinery so that the os.environ in the try block raises an exception and I can test that CONSTANT is set to False.
How can I do that?
You can use monkeypatch to set the environment variable, but you have to reload the module for the change to take effect:
from importlib import reload
from package import module
def test_constant_true(monkeypatch):
monkeypatch.setenv('MY_KEY', '42')
reload(module)
assert module.CONSTANT == '42'
def test_constant_false():
reload(module)
assert not module.CONSTANT
Given this content of package/module.py:
import os
try:
CONSTANT = os.environ['MY_KEY']
except KeyError:
CONSTANT = False
You could mock the environment using mock.patch.dict and import the value inside your unit test method. Like so:
from unittest import TestCase, mock
class YourTest(TestCase):
#mock.patch.dict('os.environ', {'NOT_EXISTING_KEY': 'value'})
def test_constant_key_defined(self, mocked):
""" Tests when the key is defined """
from package.module import CONSTANT
self.assertEqual(CONSTANT, 'value')
def test_constant_key_not_defined(self):
""" Tests when the key is not defined """
from package.module import CONSTANT
self.assertEqual(CONSTANT, 'value')
You may use the importlib.reload, like #mrbean-bremen's answer, which I am not familiar with.

How does #pytest.mark.filterwarnings work?

According to the docs you can ignore warnings like this:
#pytest.mark.filterwarnings("ignore:api v1")
def test_foo():
which gives:
But there doesn't seem to be any documentation on this mini-language (is it even a minilanguage?)
How is the match done?
I'm asking this because the following test doesn't ignore the DeprecationWarning raised by importing boto3:
#pytest.mark.filterwarnings("ignore:DeprecationWarning")
def test_ignore_warnings():
import boto3
Pytest outputs:
============================================================================================================================== warnings summary ===============================================================================================================================
/home/rob/dev/time-series/.venv/lib/python3.7/site-packages/botocore/awsrequest.py:624
/home/rob/dev/time-series/.venv/lib/python3.7/site-packages/botocore/awsrequest.py:624: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working
class HeadersDict(collections.MutableMapping):
-- Docs: https://docs.pytest.org/en/latest/warnings.html
==================================================================================================================== 1 passed, 1 warnings in 0.36 seconds =====================================================================================================================
The filters work the same way as when you use -W argument with python command (see python --help). The format is described in the documentation of the warnings module. In short it's action:message:category:module:line where action is probably mandatory but the other parts can be omitted.
"ignore:api v1" would try to match the message by defining "a string containing a regular expression that the start of the warning message must match". Since you actually want to match category you can skip message. This means that you just seem to be missing one colon after ignore so this is the correct format:
#pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_ignore_warnings():
import boto3
However, you might apparently still get the warning if it happens during an import of a package outside of a test function. In this case you might need to specify the filter globally as a pytest's argument:
pytest -W "ignore::DeprecationWarning" ./tests/
...or add it to pytest.ini:
[pytest]
filterwarnings =
ignore::DeprecationWarning
If such a global exclusion is undesirable, you can try to limit it to a particular module:
ignore::DeprecationWarning:boto3
Testing
For testing purposes, you can use the following code:
import warnings
def something():
warnings.warn("Test", DeprecationWarning)
#pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_ignore_warnings():
something()

In pytest how do I temporarily disable capture in a class method?

In pytest (3.04; python 3.4) I'm trying to disable output capture under certain circumstances. I'm trying to follow the example on this doc page. However, I'm unable to specify capsys as a funcarg. As a follow-up, I'd like to accept both funcarg and on-funcarg arguments so that I can use a class method that takes one or more normal arguments. How can I do that?
Ideally, this class method would work:
def always_print(self, message, capsys):
with capsys.disabled():
print(message)
But, I can't even get this to work:
def always_print(capsys):
with capsys.disabled():
print('FIXME')
Getting the error:
...
> always_print()
E TypeError: always_print() missing 1 required positional argument: 'capsys'
Edit 1:
Piotr's answer solved my specific issue. However, I also discovered two important caveats that I hadn't picked up in the documentation or other posts, so sharing here for others's benefit:
it appears that capsys.disabled() only applies to stdout and not stderr, which is where I was originally sending my debug messages per *nix best practice.
If you set a file handle to sys.stdout before calling capsys.disabled(), then due to the magic file discriptor mangling that pytest does, this will not work.
So, for example, you'll need to do it this way (say, if your kwargs may contain an optional "file" keyword, like the built-in print() does:
fhandle = kwargs.get('file', sys.stdout) #will not work!
with capsys.disabled():
fhandle = kwargs.get('file', sys.stdout) #must be in context
print(message, file=fhandle)
Well, capsys is a build-in fixture for tests. You should get it as a test's argument and pass it further
def always_print(capsys):
with capsys.disabled():
print('FIXME')
def test_always_print(capsys):
always_print(capsys)
It will work if you run it with pytest command.
Edit:
To avoid verbosity, you can prepare some global capsys variable for all tests (based on the answer how to share a variable across modules for all tests in py.test):
# globals.py
capsys = None
# conftest.py
import pytest
import globals as gbl
from _pytest.capture import capsys
#pytest.fixture(autouse=True)
def populate_globals(request):
gbl.capsys = capsys(request)
# my_tests.py
import globals as gbl
def test_foo():
with gbl.capsys.disabled():
print('You can see me')
def test_bar():
with gbl.capsys.disabled():
print('You can see me too')

How to patch a module's internal functions with mock?

By "internal function", I mean a function that is called from within the same module it is defined in.
I am using the mock library, specifically the patch decorators, in my unit tests. They're Django unit tests, but this should apply to any python tests.
I have one module with several functions, many of which call each other. For example (fictitious code, ignore the lack of decimal.Decimal):
TAX_LOCATION = 'StateName, United States'
def add_tax(price, user):
tax = 0
if TAX_LOCATION == 'StateName, UnitedStates':
tax = price * .75
return (tax, price+tax)
def build_cart(...):
# build a cart object for `user`
tax, price = add_tax(cart.total, cart.user)
return cart
These are part of a deeper calling chain (func1 -> func2 -> build_cart -> add_tax), all of which are in the same module.
In my unit tests, I'd like to disable taxes to get consistent results. As I see it, my two options are 1) patch out TAX_LOCATION (with an empty string, say) so that add_tax doesn't actually do anything or 2) patch out add_tax to simply return (0, price).
However, when I try to patch either of these the patch seems to work externally (I can import the patched part inside the test and print it out, getting expected values), but seems to have no effect internally (the results I get from the code behave as if the patch were not applied).
My tests are like this (again, fictitious code):
from mock import patch
from django.test import TestCase
class MyTests(TestCase):
#patch('mymodule.TAX_LOCATION', '')
def test_tax_location(self):
import mymodule
print mymodule.TAX_LOCATION # ''
mymodule.func1()
self.assertEqual(cart.total, original_price) # fails, tax applied
#patch('mymodule.add_tax', lambda p, u: (0, p))
def test_tax_location(self):
import mymodule
print mymodule.add_tax(50, None) # (0, 50)
mymodule.func1()
self.assertEqual(cart.total, original_price) # fails, tax applied
Does anyone know if it's possible for mock to patch out functions used internally like this, or am I out of luck?
The answer: Clean up your darned imports
#patch('mymodule.TAX_LOCATION', '') did indeed patch things appropriately, but since our imports at the time were very haphazard -- sometimes we imported mymodule.build_cart, sometimes we imported project.mymodule.build_cart -- instances of the "full" import were not patched at all. Mock couldn't be expected to know about the two separate import paths... without being told explicitly, anyway.
We've since standardized all our imports on the longer path, and things behave much more nicely now.
another option is to explicitly call patch on the function:
mock.patch('function_name')
and to support both running directly or from py.test etc:
mock.patch(__name__ + '.' + 'function_name')
I'd like to add solution other than accepted one. You can also patch the module before it's been imported in any other modules and remove patch at the end of your test case.
#import some modules that don't use module you are going to patch
import unittest
from mock import patch
import json
import logging
...
patcher = patch('some.module.path.function', lambda x: x)
patcher.start()
import some.module.path
class ViewGetTests(unittest.TestCase):
#classmethod
def tearDownClass(cls):
patcher.stop()
I'm pretty sure your problem is that you are importing 'mymodule' inside your test functions, and therefore the patch decorator has no chance of actually patching. Do the import at the top of the module, like any other import.
If your module is in a folder with an __init__.py file that has from [module_file] import * make sure your patch argument has the folder and file name (module_folder.module_file), or the patch will succeed (no 'module does not have this attribute' error) but not function (calls will go to the actual function not the mock), no matter how the function under test is imported.

Categories