How can I monkey patch the builtin input and print functions using Pytest so as to capture the output of someone else's code and test it with pytest before refactoring?
For example, I have acquired some code similar to this:
class QueryProcessor:
def __init__(self ...):
...
def write_search_result(self, was_found):
print('yes' if was_found else 'no')
def read_query(self):
return Query(input().split())
I don't want to read dozens of input parameters from stdin, and I don't want to print the outputs. I want to use the functions I've written that sift through a directory full of mytest.in and mytest.out files and pass the inputs to pytest using #pytest.mark.parametrize(...).
But I can't figure out how to patch the awkward read… and write… functions in this class.
I suspect it's something along the lines of:
#yptest.mark.parametrize("inputs…, expected outputs…", data_reading_func())
def test_QueryProcessor(monkeypatch, inputs…, expected outputs…):
"""Docstring
"""
q = QueryProcessor()
def my_replacement_read():
...
return [...]
def my_replacement_write():
...
return [...]
monkeypatch.???
assert ...
Can you help?
Many thanks
While awaiting a response, I came up with the following myself. I think the ideal answer will be what I've done implemented in the way #hoefling suggests—using patch.
#pytest.mark.parametrize("m, n, commands, expec", helpers.get_external_inputs_outputs('spampath', helpers.read_spam_input_output))
def test_QueryProcessor(monkeypatch, m, n, commands, expec):
def mock_process_queries(cls):
for cmd in commands:
cls.process_query(Query(cmd.split())) # also mocks read_query()
def mock_write_search_result(cls, was_found):
outputs.append('yes' if was_found else 'no')
monkeypatch.setattr('test.QueryProcessor.process_queries', mock_process_queries)
monkeypatch.setattr('test.QueryProcessor.write_search_result', mock_write_search_result)
outputs = []
proc = QueryProcessor(m)
proc.process_queries()
assert outputs == expec
UPDATE:
#pytest.mark.parametrize("m, n, commands, expec",
helpers.get_external_inputs_outputs(
'spampath',
helpers.read_input_output))
def test_QueryProcessor_mockpatch(m, n, commands, expec):
commands.insert(0,n)
mock_stdout = io.StringIO()
with patch('spammodule.input', side_effect=commands):
with patch('sys.stdout', mock_stdout):
proc = hash_chains.QueryProcessor(m)
proc.process_queries()
assert mock_stdout.getvalue().split('\n')[:-1] == expec
Hey there I guess it is too late for you but for the other people asking themselves how to monkeypatch input(), I did it like this:
monkeypatch.setattr(builtins, 'input', lambda *args, **kwargs: 'Yes, I like monkeypatching')
So I would refactor the code you posted in your own answer its update to (Assuming that commands is callable, since you specified it as a side_effect):
# don't forget the imports
import builtins
import io
import sys
#pytest.mark.parametrize("m, n, commands, expec",
helpers.get_external_inputs_outputs('spampath',helpers.read_input_output))
def test_QueryProcessor_mockpatch(monkeypatch, m, n, commands, expec):
commands.insert(0,n)
mock_stdout = io.StringIO()
monkeypatch.setattr(builtins, 'input', lambda description: commands())
monkeypatch.setattr(sys, 'stdout', mock_stdout)
proc = hash_chains.QueryProcessor(m)
proc.process_queries()
assert mock_stdout.getvalue().split('\n')[:-1] == expec
Related
I wrote a generic framework that help me to bench-mark code critical sections.
Here is an explanation of the framework and in the end is the problem I am facing and few ideas I have for solutions.
Basically, I am looking for more elegant solutions
Suppose I have a function that does this (in pseudo code):
#Pseudo Code - Don't expect it to run
def foo():
do_begin()
do_critical()
some_value = do_end()
return some_value
I want to run "do_critical" section many times in loop and measure the time but still get the return value.
so, I wrote BenchMarker class that its api is something like that:
#Pseudo Code - Don't expect it to run
bm = BenchMarker(first=do_begin, critical=do_critical, end=do_end)
bm.start_benchmarking()
returned_value = bm.returned_value
benchmark_result = bm.time
This Benckmarker internally performing the following:
#Pseudo Code - Don't expect it to run
class BenchMarker:
def __init__(self):
.....
def start_benchmarking(self):
first()
t0 = take_time
for i in range(n_loops):
critical()
t1 = take_time
self.time = (t1-t0)/n_loops
value = end()
self.returned_value = value
Important to mention that I also able to pass context between first, critical and end functions, but I omitted it for simplicity as this is not the gist of my question.
This framework is working like a charm until the following use case:
I have the following code
#Pseudo Code - Don't expect it to run
def bar():
do_begin()
with some_context_manager() as ctx:
do_critical()
some_value = do_end()
return some_value
Now, after this long introduction (sorry ...), I am getting to the real question.
I don't want to run the "with statement" in the time measuring loop, but the critical code needs the context manger.
so what I basically want is equivalent to the following decomposing of bar:
first -> do_begin() + "what happens in the with before the with body"
critical -> do_critical()
end -> "what happens after the with body" + do_end()
Two Solutions I thought of (but I don't like):
Solution 1
Mimic what with does under the hood
In end of first()m create the context manager object + run it's enter() function
In the start of end(), call the context manager exit() function
Solution 2
Framework Enhancement to handle CM
Add to the framework a "context work mode" (flag, whatever ...) on which the "start_benchmarking" flow will look like this:
#Pseudo Code - Don't expect it to run
def start_benchmarking(self):
first() #including instantiating the context manager
ctx = get_the_context_manager_created_in_first()
with ctx ...:
t0 = take_time
for i in range(n_loops):
critical()
t1 = take_time
self.time = (t1-t0)/n_loops
value = end()
self.returned_value = value
Any other, more elegant, solutions?
this is way over-complicated. and i cannot quite figure out why you'd actually want to do this, but assuming that you have reasons, just create a function that does your timing for you:
def run_func_n_times(n_times, func, *args, **kwargs):
start = time.time()
for _ in range(n_times):
res = func(*args, **kwargs)
return res, (time.time() - start) / n_times
no need for a class, just a simple func:
def example():
do_begin()
print('look, i am here')
with ctx() as blah:
res, timed = run_func_n_times(27, f, foo, bar)
do_end()
I am trying to follow a very simple multiprocessing example:
import multiprocessing as mp
def cube(x):
return x**3
pool = mp.Pool(processes=2)
results = [pool.apply_async(cube, args=x) for x in range(1,7)]
However, on my windows machine, I am not able to get the result (on ubuntu 12.04LTS it runs perfectly).
If I inspect results, I see the following:
[<multiprocessing.pool.ApplyResult object at 0x01FF0910>,
<multiprocessing.pool.ApplyResult object at 0x01FF0950>,
<multiprocessing.pool.ApplyResult object at 0x01FF0990>,
<multiprocessing.pool.ApplyResult object at 0x01FF09D0>,
<multiprocessing.pool.ApplyResult object at 0x01FF0A10>,
<multiprocessing.pool.ApplyResult object at 0x01FF0A50>]
If I run results[0].ready() I always get False.
If I run results[0].get() the python interpreter freezes, waiting to get the result that never comes.
The example is as simple as it gets, so I am thinking this is a low level bug relating to the OS (I am on Windows 7). But perhaps someone else has a better idea?
There are a couple of mistakes here. First, you must declare the Pool inside an if __name__ == "__main__": guard when running on Windows. Second, you have to pass the args keyword argument a sequence, even if you're only passing one argument. So putting that together:
import multiprocessing as mp
def cube(x):
return x**3
if __name__ == "__main__":
pool = mp.Pool(processes=2)
results = [pool.apply_async(cube, args=(x,)) for x in range(1,7)]
print([result.get() for result in results])
Output:
[1, 8, 27, 64, 125, 216]
Edit:
Oh, as moarningsun mentions, multiprocessing does not work well in the interactive interpreter:
Note
Functionality within this package requires that the __main__ module be
importable by the children. This is covered in Programming guidelines
however it is worth pointing out here. This means that some examples,
such as the multiprocessing.Pool examples will not work in the
interactive interpreter.
So you'll need to actually execute the code as a script to test it properly.
I was running python 3 and the IDE was spyder in anaconda (windows ) and so this trick doesn't work for me. I tried a lot but couldn't make any difference. I got the reason for my problem and is the same listed by dano in his note. But after a long day of searching I got some solution and it helped me to run the same code my windows machine. This website helped me to get the solution:
http://python.6.x6.nabble.com/Multiprocessing-Pool-woes-td5047050.html
Since I was using the python 3, I changed the program a little like this:
from types import FunctionType
import marshal
def _applicable(*args, **kwargs):
name = kwargs['__pw_name']
code = marshal.loads(kwargs['__pw_code'])
gbls = globals() #gbls = marshal.loads(kwargs['__pw_gbls'])
defs = marshal.loads(kwargs['__pw_defs'])
clsr = marshal.loads(kwargs['__pw_clsr'])
fdct = marshal.loads(kwargs['__pw_fdct'])
func = FunctionType(code, gbls, name, defs, clsr)
func.fdct = fdct
del kwargs['__pw_name']
del kwargs['__pw_code']
del kwargs['__pw_defs']
del kwargs['__pw_clsr']
del kwargs['__pw_fdct']
return func(*args, **kwargs)
def make_applicable(f, *args, **kwargs):
if not isinstance(f, FunctionType): raise ValueError('argument must be a function')
kwargs['__pw_name'] = f.__name__ # edited
kwargs['__pw_code'] = marshal.dumps(f.__code__) # edited
kwargs['__pw_defs'] = marshal.dumps(f.__defaults__) # edited
kwargs['__pw_clsr'] = marshal.dumps(f.__closure__) # edited
kwargs['__pw_fdct'] = marshal.dumps(f.__dict__) # edited
return _applicable, args, kwargs
def _mappable(x):
x,name,code,defs,clsr,fdct = x
code = marshal.loads(code)
gbls = globals() #gbls = marshal.loads(gbls)
defs = marshal.loads(defs)
clsr = marshal.loads(clsr)
fdct = marshal.loads(fdct)
func = FunctionType(code, gbls, name, defs, clsr)
func.fdct = fdct
return func(x)
def make_mappable(f, iterable):
if not isinstance(f, FunctionType): raise ValueError('argument must be a function')
name = f.__name__ # edited
code = marshal.dumps(f.__code__) # edited
defs = marshal.dumps(f.__defaults__) # edited
clsr = marshal.dumps(f.__closure__) # edited
fdct = marshal.dumps(f.__dict__) # edited
return _mappable, ((i,name,code,defs,clsr,fdct) for i in iterable)
After this function , the above problem code is also changed a little like this:
from multiprocessing import Pool
from poolable import make_applicable, make_mappable
def cube(x):
return x**3
if __name__ == "__main__":
pool = Pool(processes=2)
results = [pool.apply_async(*make_applicable(cube,x)) for x in range(1,7)]
print([result.get(timeout=10) for result in results])
And I got the output as :
[1, 8, 27, 64, 125, 216]
I am thinking that this post may be useful for some of the windows users.
I would like to use a decorator on a function that I will subsequently pass to a multiprocessing pool. However, the code fails with "PicklingError: Can't pickle : attribute lookup __builtin__.function failed". I don't quite see why it fails here. I feel certain that it's something simple, but I can't find it. Below is a minimal "working" example. I thought that using the functools function would be enough to let this work.
If I comment out the function decoration, it works without an issue. What is it about multiprocessing that I'm misunderstanding here? Is there any way to make this work?
Edit: After adding both a callable class decorator and a function decorator, it turns out that the function decorator works as expected. The callable class decorator continues to fail. What is it about the callable class version that keeps it from being pickled?
import random
import multiprocessing
import functools
class my_decorator_class(object):
def __init__(self, target):
self.target = target
try:
functools.update_wrapper(self, target)
except:
pass
def __call__(self, elements):
f = []
for element in elements:
f.append(self.target([element])[0])
return f
def my_decorator_function(target):
#functools.wraps(target)
def inner(elements):
f = []
for element in elements:
f.append(target([element])[0])
return f
return inner
#my_decorator_function
def my_func(elements):
f = []
for element in elements:
f.append(sum(element))
return f
if __name__ == '__main__':
elements = [[random.randint(0, 9) for _ in range(5)] for _ in range(10)]
pool = multiprocessing.Pool(processes=4)
results = [pool.apply_async(my_func, ([e],)) for e in elements]
pool.close()
f = [r.get()[0] for r in results]
print(f)
The problem is that pickle needs to have some way to reassemble everything that you pickle. See here for a list of what can be pickled:
http://docs.python.org/library/pickle.html#what-can-be-pickled-and-unpickled
When pickling my_func, the following components need to be pickled:
An instance of my_decorator_class, called my_func.
This is fine. Pickle will store the name of the class and pickle its __dict__ contents. When unpickling, it uses the name to find the class, then creates an instance and fills in the __dict__ contents. However, the __dict__ contents present a problem...
The instance of the original my_func that's stored in my_func.target.
This isn't so good. It's a function at the top-level, and normally these can be pickled. Pickle will store the name of the function. The problem, however, is that the name "my_func" is no longer bound to the undecorated function, it's bound to the decorated function. This means that pickle won't be able to look up the undecorated function to recreate the object. Sadly, pickle doesn't have any way to know that object it's trying to pickle can always be found under the name __main__.my_func.
You can change it like this and it will work:
import random
import multiprocessing
import functools
class my_decorator(object):
def __init__(self, target):
self.target = target
try:
functools.update_wrapper(self, target)
except:
pass
def __call__(self, candidates, args):
f = []
for candidate in candidates:
f.append(self.target([candidate], args)[0])
return f
def old_my_func(candidates, args):
f = []
for c in candidates:
f.append(sum(c))
return f
my_func = my_decorator(old_my_func)
if __name__ == '__main__':
candidates = [[random.randint(0, 9) for _ in range(5)] for _ in range(10)]
pool = multiprocessing.Pool(processes=4)
results = [pool.apply_async(my_func, ([c], {})) for c in candidates]
pool.close()
f = [r.get()[0] for r in results]
print(f)
You have observed that the decorator function works when the class does not. I believe this is because functools.wraps modifies the decorated function so that it has the name and other properties of the function it wraps. As far as the pickle module can tell, it is indistinguishable from a normal top-level function, so it pickles it by storing its name. Upon unpickling, the name is bound to the decorated function so everything works out.
I also had some problem using decorators in multiprocessing. I'm not sure if it's the same problem as yours:
My code looked like this:
from multiprocessing import Pool
def decorate_func(f):
def _decorate_func(*args, **kwargs):
print "I'm decorating"
return f(*args, **kwargs)
return _decorate_func
#decorate_func
def actual_func(x):
return x ** 2
my_swimming_pool = Pool()
result = my_swimming_pool.apply_async(actual_func,(2,))
print result.get()
and when I run the code I get this:
Traceback (most recent call last):
File "test.py", line 15, in <module>
print result.get()
File "somedirectory_too_lengthy_to_put_here/lib/python2.7/multiprocessing/pool.py", line 572, in get
raise self._value
cPickle.PicklingError: Can't pickle <type 'function'>: attribute lookup __builtin__.function failed
I fixed it by defining a new function to wrap the function in the decorator function, instead of using the decorator syntax
from multiprocessing import Pool
def decorate_func(f):
def _decorate_func(*args, **kwargs):
print "I'm decorating"
return f(*args, **kwargs)
return _decorate_func
def actual_func(x):
return x ** 2
def wrapped_func(*args, **kwargs):
return decorate_func(actual_func)(*args, **kwargs)
my_swimming_pool = Pool()
result = my_swimming_pool.apply_async(wrapped_func,(2,))
print result.get()
The code ran perfectly and I got:
I'm decorating
4
I'm not very experienced at Python, but this solution solved my problem for me
If you want the decorators too bad (like me), you can also use the exec() command on the function string, to circumvent the mentioned pickling.
I wanted to be able to pass all the arguments to an original function and then use them successively. The following is my code for it.
At first, I made a make_functext() function to convert the target function object to a string. For that, I used the getsource() function from the inspect module (see doctumentation here and note that it can't retrieve source code from compiled code etc.). Here it is:
from inspect import getsource
def make_functext(func):
ft = '\n'.join(getsource(func).split('\n')[1:]) # Removing the decorator, of course
ft = ft.replace(func.__name__, 'func') # Making function callable with 'func'
ft = ft.replace('#§ ', '').replace('#§', '') # For using commented code starting with '#§'
ft = ft.strip() # In case the function code was indented
return ft
It is used in the following _worker() function that will be the target of the processes:
def _worker(functext, args):
scope = {} # This is needed to keep executed definitions
exec(functext, scope)
scope['func'](args) # Using func from scope
And finally, here's my decorator:
from multiprocessing import Process
def parallel(num_processes, **kwargs):
def parallel_decorator(func, num_processes=num_processes):
functext = make_functext(func)
print('This is the parallelized function:\n', functext)
def function_wrapper(funcargs, num_processes=num_processes):
workers = []
print('Launching processes...')
for k in range(num_processes):
p = Process(target=_worker, args=(functext, funcargs[k])) # use args here
p.start()
workers.append(p)
return function_wrapper
return parallel_decorator
The code can finally be used by defining a function like this:
#parallel(4)
def hello(args):
#§ from time import sleep # use '#§' to avoid unnecessary (re)imports in main program
name, seconds = tuple(args) # unpack args-list here
sleep(seconds)
print('Hi', name)
... which can now be called like this:
hello([['Marty', 0.5],
['Catherine', 0.9],
['Tyler', 0.7],
['Pavel', 0.3]])
... which outputs:
This is the parallelized function:
def func(args):
from time import sleep
name, seconds = tuple(args)
sleep(seconds)
print('Hi', name)
Launching processes...
Hi Pavel
Hi Marty
Hi Tyler
Hi Catherine
Thanks for reading, this is my very first post. If you find any mistakes or bad practices, feel free to leave a comment. I know that these string conversions are quite dirty, though...
If you use this code for your decorator:
import multiprocessing
from types import MethodType
DEFAULT_POOL = []
def run_parallel(_func=None, *, name: str = None, context_pool: list = DEFAULT_POOL):
class RunParallel:
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
process = multiprocessing.Process(target=self.func, name=name, args=args, kwargs=kwargs)
context_pool.append(process)
process.start()
def __get__(self, instance, owner):
return self if instance is None else MethodType(self, instance)
if _func is None:
return RunParallel
else:
return RunParallel(_func)
def wait_context(context_pool: list = DEFAULT_POOL, kill_others_if_one_fails: bool = False):
finished = []
for process in context_pool:
process.join()
finished.append(process)
if kill_others_if_one_fails and process.exitcode != 0:
break
if kill_others_if_one_fails:
# kill unfinished processes
for process in context_pool:
if process not in finished:
process.kill()
# wait for every process to be dead
for process in context_pool:
process.join()
Then you can use it like this, in these 4 examples:
#run_parallel
def m1(a, b="b"):
print(f"m1 -- {a=} {b=}")
#run_parallel(name="mym2", context_pool=DEFAULT_POOL)
def m2(d, cc="cc"):
print(f"m2 -- {d} {cc=}")
a = 1/0
class M:
#run_parallel
def c3(self, k, n="n"):
print(f"c3 -- {k=} {n=}")
#run_parallel(name="Mc4", context_pool=DEFAULT_POOL)
def c4(self, x, y="y"):
print(f"c4 -- {x=} {y=}")
if __name__ == "__main__":
m1(11)
m2(22)
M().c3(33)
M().c4(44)
wait_context(kill_others_if_one_fails=True)
The output will be:
m1 -- a=11 b='b'
m2 -- 22 cc='cc'
c3 -- k=33 n='n'
(followed by the exception raised in method m2)
I have a five or six resources that have nice 'with' handlers, and normally I'd do this:
with res1, res2, res3, res4, res5, res6:
do1
do2
However, sometimes one or more of these resources should not be activated. Which leads to very ugly repetitive code:
with res1, res3, res4, res6: # these always acquired
if res2_enabled:
with res2:
if res5_enabled:
with res5:
do1
do2
else:
do1
do2
else if res5_enabled:
with res5:
...
There must be clean easy ways to do this surely?
You could create a wrapper object that supports the with statement, and do the checking in there. Something like:
with wrapper(res1), wrapper(res2), wrapper(res3):
...
or a wrapper than handles all of them:
with wrapper(res1, res2, res3):
...
The definition for you wrapper would be:
class wrapper(object):
def __init__(self, *objs):
...
def __enter__(self):
initialize objs here
def __exit__(self):
release objects here
If I understand you correctly you can do this:
from contextlib import contextmanager, nested
def enabled_resources(*resources):
return nested(*(res for res,enabled in resources if enabled))
# just for testing
#contextmanager
def test(n):
print n, "entered"
yield
resources = [(test(n), n%2) for n in range(10)]
# you want
# resources = [(res1, res1_enabled), ... ]
with enabled_resources(*resources):
# do1, do2
pass
Original Poster here; here is my approach refined so far:
I can add (or monkey-patch) the bool operator __nonzero__ onto the with objects, returning whether they are enabled. Then, when objects are mutually exclusive, I can have:
with res1 or res2 or res3 or res4:
...
When an resource is togglable, I can create an empty withable that is a nop; wither seems a nice name for it:
class sither:
#classmethod
def __enter__(cls): pass
#classmethod
def __exit__(cls,*args): pass
...
with res1 or wither, res2 or wither:
...
I can also use this keeping the toggling out of the withable objects:
with res1 if res1enabled else wither, res2 if res2enabled else wither:
..
Finally, those I have most control over, I can integrate the enabled checking into the class itself such that when used and not enabled, they are nop:
with res1, res2, res3:
...
The with statement is absolutely adorable, it just seems a bit unentrenched yet. It will be interesting to see what finesse others come up with in this regard...
I have a following function in Python and I want to test with unittest that if the function gets 0 as argument, it throws a warning. I already tried assertRaises, but since I don't raise the warning, that doesn't work.
def isZero(i):
if i != 0:
print "OK"
else:
warning = Warning("the input is 0!")
print warning
return i
Starting with Python 3.2, you can simply use assertWarns() method.
with self.assertWarns(Warning):
do_something()
You can use the catch_warnings context manager. Essentially this allows you to mock the warnings handler, so that you can verify details of the warning. See the official docs for a fuller explanation and sample test code.
import warnings
def fxn():
warnings.warn("deprecated", DeprecationWarning)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
fxn()
# Verify some things
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "deprecated" in str(w[-1].message)
You can write your own assertWarns function to incapsulate catch_warnings context. I've just implemented it the following way, with a mixin:
class WarningTestMixin(object):
'A test which checks if the specified warning was raised'
def assertWarns(self, warning, callable, *args, **kwds):
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
result = callable(*args, **kwds)
self.assertTrue(any(item.category == warning for item in warning_list))
A usage example:
class SomeTest(WarningTestMixin, TestCase):
'Your testcase'
def test_something(self):
self.assertWarns(
UserWarning,
your_function_which_issues_a_warning,
5, 10, 'john', # args
foo='bar' # kwargs
)
The test will pass if at least one of the warnings issued by your_function is of type UserWarning.
#ire_and_curses' answer is quite useful and, I think, canonical. Here is another way to do the same thing. This one requires Michael Foord's excellent Mock library.
import unittest, warnings
from mock import patch_object
def isZero( i):
if i != 0:
print "OK"
else:
warnings.warn( "the input is 0!")
return i
class Foo(unittest.TestCase):
#patch_object(warnings, 'warn')
def test_is_zero_raises_warning(self, mock_warn):
isZero(0)
self.assertTrue(mock_warn.called)
if __name__ == '__main__':
unittest.main()
The nifty patch_object lets you mock out the warn method.
One problem with the warnings.catch_warnings approach is that warnings produced in different tests can interact in strange ways through global state kept in __warningregistry__ attributes.
To address this, we should clear the __warningregistry__ attribute of every module before every test that checks warnings.
class MyTest(unittest.TestCase):
def setUp(self):
# The __warningregistry__'s need to be in a pristine state for tests
# to work properly.
for v in sys.modules.values():
if getattr(v, '__warningregistry__', None):
v.__warningregistry__ = {}
def test_something(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", MySpecialWarning)
...
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, MySpecialWarning)
This is how Python 3's assertWarns() method is implemented.
Building off the answer from #ire_and_curses,
class AssertWarns(warnings.catch_warnings):
"""A Python 2 compatible version of `unittest.TestCase.assertWarns`."""
def __init__(self, test_case, warning_type):
self.test_case = test_case
self.warning_type = warning_type
self.log = None
super(AssertWarns, self).__init__(record=True, module=None)
def __enter__(self):
self.log = super(AssertWarns, self).__enter__()
return self.log
def __exit__(self, *exc_info):
super(AssertWarns, self).__exit__(*exc_info)
self.test_case.assertEqual(type(self.log[0]), self.warning_type)
This can be called similarly to unittest.TestCase.assertWarns:
with AssertWarns(self, warnings.WarningMessage):
warnings.warn('test warning!')
where self is a unittest.TestCase.
Per Melebius' answer, you can use self.assertWarns().
Additionally, if you want to check the warning message as well, you can use self.assertWarnsRegex() for that greater specificity:
import warnings
from unittest import TestCase
class MyCustomWarning(Warning):
...
def is_zero(i: int) -> int:
if i != 0:
print("OK")
else:
warnings.warn("the input is 0!", MyCustomWarning)
return i
class TestIsZero(TestCase):
def test_when_then_input_is_zero(self):
regex = "the input is 0"
with self.assertWarnsRegex(MyCustomWarning, expected_regex=regex):
_ = is_zero(0)
This test will fail if the regex is not found in the warning message.