I'm trying to write a unittest for a piece of code, which runs a function twice.
## in [module.py]
def check_my_documents(t: str, v: str, r: str):
is_texture_data = os.path.isdir(t)
is_zip_data = os.path.isfile(v)
is_csv_data = os.path.isfile(r)
return (is_texture_data, is_zip_data, is_csv_data)
#patch('module.os.path.isfile')
#patch('module.os.path.isdir')
def test_check_my_documents(mocked_file, mocked_dir):
expected_result = (True, True, False)
mocked_file.side_effect = [True, False]
mocked_dir.side_effect = [True]
assert check_print_files(t, v, r) === expected_result
I thought this would work, but it returns /Users/.asdf/installs/python/3.9.2/lib/python3.9/unittest/mock.py:1154: StopIteration
Edit: I am using Pytest
It seems that you have not fully understood how to write unittest tests...
The unittest module is intended to allow you to use classes derived from unittest.TestCase. You'd better stick to that...
When you use more that one #patch decorator, the mocks are added as new parameters in reverse order, because the last #patch is the innest one and because of that will provide the first parameter.
Once this is observed, you should end with a code close to:
import unittest
from unittest.mock import patch
import module
class MyTest(unittest.TestCase):
#patch('os.path.isfile')
#patch('os.path.isdir')
def test_check_my_documents(self, mocked_dir, mocked_file):
expected_result = (True, True, False)
mocked_file.side_effect = [True, False]
mocked_dir.side_effect = [True]
self.assertEqual(expected_result, check_print_files('t', 'v', 'r'))
unittest.main(verbosity=2)
Related
End goal: I want to be able to quickly mock the input() built-in function in pytest, and replace it with an iterator that generates a (variable) list of strings. This is my current version, which works:
from typing import Callable
import pytest
def _create_patched_input(str_list: list[str]) -> Callable:
str_iter = iter(str_list.copy())
def patched_input(prompt: str) -> str: # has the same signature as input
val = next(str_iter)
print(prompt + val, end="\n"),
return val
return patched_input
#pytest.fixture
def _mock_input(monkeypatch, input_string_list: list[str]):
patched_input = _create_patched_input(input_string_list)
monkeypatch.setattr("builtins.input", patched_input)
def mock_input(f):
return pytest.mark.usefixtures("_mock_input")(f)
# Beginning of test code
def get_name(prompt: str) -> str:
return input(prompt)
#mock_input
#pytest.mark.parametrize(
"input_string_list",
(["Alice", "Bob", "Carol"], ["Dale", "Evie", "Frank", "George"]),
)
def test_get_name(input_string_list):
for name in input_string_list:
assert get_name("What is your name?") == name
However, this feels incomplete for a few reasons:
It requires the parameter name in the parameterize call to be input_string_list, which feels brittle.
If I move the fixtures into another function, I need to import both mock_input and _mock_input.
What would feel correct to me is to have a decorator (factory) that can be used like #mock_input(strings), such that you could use it like
#mock_input(["Alice", "Bob", "Carol"])
def test_get_name():
....
or, more in line with my use case,
#pytest.mark.parametrize(
"input_list", # can be named whatever
(["Alice", "Bob", "Carol"], ["Dale", "Evie", "Frank", "George"]),
)
#mock_input(input_list)
def test_get_name():
....
The latter I don't think you can do, as pytest wont recognize it as a fixture. What's the best way to do this?
I'd use indirect parametrization for mock_input, since it cannot work without receiving parameters. Also, I would refactor mock_input into a fixture that does passing through the arguments it receives, performing the mocking on the way. For example, when using unittest.mock.patch():
import pytest
from unittest.mock import patch
#pytest.fixture
def inputs(request):
texts = requests.param # ["Alice", "Bob", "Carol"] etc
with patch('builtins.input', side_effect=texts):
yield texts
Or, if you want to use monkeypatch, the code gets a bit more complex:
#pytest.fixture
def inputs(monkeypatch, request):
texts = requests.param
it = iter(texts)
def fake_input(prefix):
return next(it)
monkeypatch.setattr('builtins.input', fake_input)
yield texts
Now use inputs as test argument and parametrize it indirectly:
#pytest.mark.parametrize(
'inputs',
(["Alice", "Bob", "Carol"], ["Dale", "Evie", "Frank", "George"]),
indirect=True
)
def test_get_name(inputs):
for name in inputs:
assert get_name("What is your name?") == name
I am trying to test my method that simply checks if a letter is in either of the lists of letters. If it is in list a, then I assign myletter to an imported object called yes; Otherwise, I assign myletter to an imported object called no.
I tried to write a testing file using MagicMock for it, but since I am new to the concept of testing using mock and magic mock, I am not sure how to further continue. I would appreciate any tips or any solutions on how to go with it and further continue.
Should I create mock object for each of the lists and another mock object for myletter to check it agains those two mock objects or how else I should continue on doing it?
My code:
from mydir.newdir import yes
from mydir.newdir import no
def yes_no_det(check_letter: str):
first_list = ['a', 'b', 'c']
second_list = ['d', 'e', 'f']
myletter = None
if check_letter:
if check_letter in first_list:
myletter = yes.att
elif check_letter in second_list:
myletter = no.att
return myletter
My testing so far:
import unittest
from mock import MagicMock, patch
class letter_check_test(TestCase):
with patch('mydir.newdir.check_letter_py.yes') as mock_yes,\
patch('mydir.newdir.check_letter_py.no') as mock_no:
from mydir.newdir.check_letter_py import yes_no_det
mock_check_letter = MagicMock()
mock_yes = mock_yes.att
mock_no = mock_no.att
def test_check_letter(self):
# I am not sure how further to continue or what to check here
Updated possibly right testing code:
class letter_check_test(TestCase):
def test_yes_det():
assert yes_no_det('a') == mydir.newdir.yes
def test_no_det():
assert yes_no_det('f') == mydir.newdir.no
def test_neither_det():
assert yes_no_det('x') == None
Thank you for any answers. Please let me know if I need to change/delete this post or anything without downgrading or reporting it. I would really appreciate that!
In this case, it was simply use of assert or assertequal code. For example:
class letter_check_testYES(Testing, TestingsCases):
def test_letter_check(self):
with patch('mydir.newdir.check_letter_py.yes') as YES:
from mydir.newdir.check_letter_py import yes_no_det
assert yes_no_det('a') == YES
class letter_check_testNO(Testing, TestingsCases):
def test_letter_check(self):
with patch('mydir.newdir.check_letter_py.no') as NO:
from mydir.newdir.check_letter_py import yes_no_det
assert yes_no_det('f') == YES
class letter_check_test(Testing, TestingsCases):
from mydir.newdir.check_letter_py import yes_no_det
assert yes_no_det('f') == NONE
I am using pytest parametrized fixtures, which have variable return values.
This is a simplified example:
import pytest
#pytest.fixture
def mocked_value(mock_param):
if mock_param:
mocked_value = "true"
else:
mocked_value = "false"
yield mocked_value
#pytest.mark.parametrize(("mock_param"), [True, False])
def test_both_mocked_parameters(mocked_value, mock_param):
assert mocked_value == str(mock_param).lower()
#pytest.mark.parametrize(("mock_param"), [True])
def test_just_one_mocked_param(mocked_value, mock_param):
assert mocked_value == "true"
Is there a way to make the pytest fixture have a default param given to it? Something like this, except built into the single fixture definition:
def _mocked_function(mock_param):
if mock_param:
mocked_value = "true"
else:
mocked_value = "false"
return mocked_value
#pytest.fixture
def mocked_value_with_default():
yield _mocked_function(True)
#pytest.fixture
def mocked_value(mock_param):
yield _mocked_function(mock_param)
#pytest.mark.parametrize(("mock_param"), [True, False])
def test_both_mocked_parameters(mocked_value, mock_param):
assert mocked_value == str(mock_param).lower()
def test_just_one_mocked_param(mocked_value_with_default):
assert mocked_value_with_default == "true"
The above works, however it would be much cleaner to have just one fixture definition handling both use cases. How do I do this?
You could use fixture parametrization:
https://docs.pytest.org/en/stable/example/parametrize.html#indirect-parametrization
#pytest.mark.incremental
class Test_aws():
def test_case1(self):
----- some code here ----
result = someMethodTogetResult
assert result[0] == True
orderID = result[1]
def test_case2(self):
result = someMethodTogetResult # can be only perform once test case 1 run successfully.
assert result == True
def test_deleteOrder_R53HostZonePrivate(self):
result = someMethodTogetResult
assert result[0] == True
The current behavior is if test 1 passes then test 2 runs and if test 2 passes then test 3 runs.
What I need is:
If test_case 3 should be run if test_case 1 passed. test_case 2 should not change any behavior. Any thoughts here?
I guess you are looking for pytest-dependency which allows setting conditional run dependencies between tests. Example:
import random
import pytest
class TestAWS:
#pytest.mark.dependency
def test_instance_start(self):
assert random.choice((True, False))
#pytest.mark.dependency(depends=['TestAWS::test_instance_start'])
def test_instance_stop(self):
assert random.choice((True, False))
#pytest.mark.dependency(depends=['TestAWS::test_instance_start'])
def test_instance_delete(self):
assert random.choice((True, False))
test_instance_stop and test_instance_delete will run only if test_instance_start succeeds and skip otherwise. However, since test_instance_delete does not depend on test_instance_stop, the former will execute no matter what the result of the latter test is. Run the example test class several times to verify the desired behaviour.
To complement hoefling's answer, another option is to use pytest-steps to perform incremental testing. This can help you in particular if you wish to share some kind of incremental state/intermediate results between the steps.
However it does not implement advanced dependency mechanisms like pytest-dependency, so use the package that better suits your goal.
With pytest-steps, hoefling's example would write:
import random
from pytest_steps import test_steps, depends_on
def step_instance_start():
assert random.choice((True, False))
#depends_on(step_instance_start)
def step_instance_stop():
assert random.choice((True, False))
#depends_on(step_instance_start)
def step_instance_delete():
assert random.choice((True, False))
#test_steps(step_instance_start, step_instance_stop, step_instance_delete)
def test_suite(test_step):
# Execute the step
test_step()
EDIT: there is a new 'generator' mode to make it even easier:
import random
from pytest_steps import test_steps, optional_step
#test_steps('step_instance_start', 'step_instance_stop', 'step_instance_delete')
def test_suite():
# First step (Start)
assert random.choice((True, False))
yield
# Second step (Stop)
with optional_step('step_instance_stop') as stop_step:
assert random.choice((True, False))
yield stop_step
# Third step (Delete)
with optional_step('step_instance_delete') as delete_step:
assert random.choice((True, False))
yield delete_step
Check the documentation for details. (I'm the author of this package by the way ;) )
You can use pytest-ordering package to order your tests using pytest mark. The author of the package explains the usage here
Example:
#pytest.mark.first
def test_first():
pass
#pytest.mark.second
def test_2():
pass
#pytest.mark.order5
def test_5():
pass
I'm trying to test a function that I made that iterates through a list, and calls os.path.exists for each item in the list. My test is passing the function a list of 2 objects. I need os.path.exists to return True for one of them and False for the other. I have tried this:
import mock
import os
import unittest
class TestClass(unittest.TestCase):
values = {1 : True, 2 : False}
def side_effect(arg):
return values[arg]
def testFunction(self):
with mock.patch('os.path.exists') as m:
m.return_value = side_effect # 1
m.side_effect = side_effect # 2
arglist = [1, 2]
ret = test(argList)
Using either but not both of line #1 and #2 give NameError: global name 'side_effect' is not defined
I found this question and modified my code like so:
import mock
import os
class TestClass(unittest.TestCase):
values = {1 : True, 2 : False}
def side_effect(arg):
return values[arg]
def testFunction(self):
mockobj = mock(spec=os.path.exists)
mockobj.side_effect = side_effect
arglist = [1, 2]
ret = test(argList)
And this produces TypeError: 'module' object is not callable.
I also tried switching these lines:
mockobj = mock(spec=os.path.exists)
mockobj.side_effect = side_effect
for this
mockobj = mock(spec=os.path)
mockobj.exists.side_effect = side_effect
and this
mockobj = mock(spec=os)
mockobj.path.exists.side_effect = side_effect
with the same error being produced. Can anyone point out what it is that I am doing wrong and what I can do to get this to work?
EDIT:
After posting my answer below I realised that my first bit of code actually works as well, I just needed m.side_effect = TestClass.side_effect instead of m.side_effect = side_effect.
So after a bit more research and trial and error, with most of the examples here: http://www.voidspace.org.uk/python/mock/patch.html, I solved my problem.
import mock
import os
def side_effect(arg):
if arg == 1:
return True
else:
return False
class TestClass(unittest.TestCase):
patcher = mock.patch('os.path.exists')
mock_thing = patcher.start()
mock_thing.side_effect = side_effect
arg_list = [1, 2]
ret = test(arg_list)
self.assertItemsEqual([1], ret)
test calls os.path.exist for each item in arg_list, and returns a list of all items that os.path.exist returned True for. This test now passes how I want it.
you could have done self.side_effect I believe. since the initial definition was not global, calling side_effect looks inside the global scope