I have function :
def is_car_exist(make, model):
url = f'https://vpic.nhtsa.dot.gov/api/vehicles/GetModelsForMake/\
{make.capitalize()}?format=json'
data = requests.get(url).json()['Results']
return any(model.capitalize() == car['Model_Name'] for car in data)
How write test to test requests method inside function ?
Related
I would like to use a list (converted into a generator) to serve as a mock for my API calls (using unittest.mock). My function is:
def monitor_order(order_id)
order_info = client.get_order_status(order_id)
order_status = order_info['status']
while order_status != 'filled':
print('order_status: ', order_status)
time.sleep(5)
order_info = client.get_order_status(order_id)
order_status = order_info['status']
return order_info
My test function is:
#patch('my_package.client.get_order_status')
def test_monitor_order(mocked_get_order_status):
order_states = [
dict(status='open'),
dict(status='open'),
dict(status='filled'),
]
# Make into a generator
status_changes = (status for status in order_states)
mocked_get_order_status.return_value = next(order_states)
# Execute function to test
monitor_order("dummy_order")
However, I can see that the status is always 'open' when executing the test:
order_status: open
order_status: open
order_status: open
I think I understand why it's wrong, but how could I implement it correctly?
To achieve what you want, you can rewrite your test as follows:
#patch('my_package.client.get_order_status')
def test_monitor_order(mocked_get_order_status):
order_states = [
dict(status='open'),
dict(status='open'),
dict(status='filled'),
]
mocked_get_order_status.side_effect = order_states
# Execute function to test
monitor_order("dummy_order")
I am testing a function doing two times a "requests.post". Si I stub the call to this function. However, I want that the first stub return a fake data, and the second stub another fake data. How to do it ?
Currently, I have :
#mock.patch('requests.post', side_effect=mocked_object)
def test_function_ok(self, mock_post):
...
And I want something like this :
#mock.patch_once('requests.post', side_effect=mocked_1)
#mock.patch_once('requests.post', side_effect=mocked_2)
def test_function_ok(self, mock_post):
...
Thanks to MrBean Bremen. However, I must call the elements in the list, like this :
#mock.patch('requests.post', side_effect=[mocked_1(), mocked_2()])
def test_function_ok(self, mock_post):
...
You can send different data to be mocked to the request twice it will mock the method and return the expected data that is specified in return_value
#patch('the_function_path_you_want_to_mock')
def test_data(self, mock_obj):
mock_obj.return_value.function_to_be_mocked.return_value = [data_to_mocked]
response = self.client.post('/request', data=json.dumps(self.input_data), content_type='application/vnd.api+json', )
mock_obj.return_value.function_to_be_mocked.return_value = [data_to_mocked]
response = self.client.post('/request', data=json.dumps(self.input_data), content_type='application/vnd.api+json', )
I am trying to get the failed test case names from a output.xml using robot api in python, I am able to get the count for failed/passed tests using the below code but could not find any methods to get test case names.
Thanks in advance.
from robot.api import ExecutionResult
result = ExecutionResult('output.xml')
result.configure(stat_config={'suite_stat_level': 2,
'tag_stat_combine': 'tagANDanother'})
stats = result.statistics
print stats.total.critical.failed
print stats.total.critical.passed
print stats.tags.combined[0].total
Probably you need ResultVisitor. Something like that should help:
from robot.api import ExecutionResult, ResultVisitor
class Visitor(ResultVisitor):
def __init__(self):
self.failed = []
def end_test(self, test):
if test.status == "FAIL":
self.failed.append(test)
visitor = Visitor()
result = ExecutionResult('output.xml')
result.visit(visitor)
print(visitor.failed)
Documentation could be found at https://robot-framework.readthedocs.io/en/v3.1.2/autodoc/robot.result.html#module-robot.result.visitor
Brand new to this library
Here is the call stack of my mocked object
[call(),
call('test'),
call().instance('test'),
call().instance().database('test'),
call().instance().database().snapshot(),
call().instance().database().snapshot().__enter__(),
call().instance().database().snapshot().__enter__().execute_sql('SELECT * FROM users'),
call().instance().database().snapshot().__exit__(None, None, None),
call().instance().database().snapshot().__enter__().execute_sql().__iter__()]
Here is the code I have used
#mock.patch('testmodule.Client')
def test_read_with_query(self, mock_client):
mock = mock_client()
pipeline = TestPipeline()
records = pipeline | ReadFromSpanner(TEST_PROJECT_ID, TEST_INSTANCE_ID, self.database_id).with_query('SELECT * FROM users')
pipeline.run()
print mock_client.mock_calls
exit()
I want to mock this whole stack that eventually it gives me some fake data which I will provide as a return value.
The code being tested is
spanner_client = Client(self.project_id)
instance = spanner_client.instance(self.instance_id)
database = instance.database(self.database_id)
with database.snapshot() as snapshot:
results = snapshot.execute_sql(self.query)
So my requirements is that the results variable should contain the data I will provide.
How can I provide a return value to such a nested calls
Thanks
Create separate MagicMock instances for the instance, database and snapshot objects in the code under test. Use return_value to configure the return values of each method. Here is an example. I simplified the method under test to just be a free standing function called mut.
# test_module.py : the module under test
class Client:
pass
def mut(project_id, instance_id, database_id, query):
spanner_client = Client(project_id)
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.snapshot() as snapshot:
results = snapshot.execute_sql(query)
return results
# test code (pytest)
from unittest.mock import MagicMock
from unittest import mock
from test_module import mut
#mock.patch('test_module.Client')
def test_read_with_query(mock_client_class):
mock_client = MagicMock()
mock_instance = MagicMock()
mock_database = MagicMock()
mock_snapshot = MagicMock()
expected = 'fake query results'
mock_client_class.return_value = mock_client
mock_client.instance.return_value = mock_instance
mock_instance.database.return_value = mock_database
mock_database.snapshot.return_value = mock_snapshot
mock_snapshot.execute_sql.return_value = expected
mock_snapshot.__enter__.return_value = mock_snapshot
observed = mut(29, 42, 77, 'select *')
mock_client_class.assert_called_once_with(29)
mock_client.instance.assert_called_once_with(42)
mock_instance.database.assert_called_once_with(77)
mock_database.snapshot.assert_called_once_with()
mock_snapshot.__enter__.assert_called_once_with()
mock_snapshot.execute_sql.assert_called_once_with('select *')
assert observed == expected
This test is kind of portly. Consider breaking it apart by using a fixture and a before function that sets up the mocks.
Either set the value directly to your Mock instance (those enters and exit should have not seen) with:
mock.return_value.instance.return_value.database.return_value.snapshot.return_value.execute_sql.return_value = MY_MOCKED_DATA
or patch and set return_value to target method, something like:
#mock.patch('database_engine.execute_sql', return_value=MY_MOCKED_DATA)
Basically, I realize that I am writing the same test case (test_update_with_only_1_field) for a similar URL for multiple models
from django.test import RequestFactory, TestCase
class BaseApiTest(TestCase):
def setUp(self):
superuser = User.objects.create_superuser('test', 'test#api.com', 'testpassword')
self.factory = RequestFactory()
self.user = superuser
self.client.login(username=superuser.username, password='testpassword')
class SomeModelApiTests(base_tests.BaseApiTest):
def test_update_with_only_1_field(self):
"""
Tests for update only 1 field
GIVEN the following shape and related are valid
WHEN we update only with just 1 field
THEN we expect the update to be successful
"""
shape_data = {
'name': 'test shape',
'name_en': 'test shape en',
'name_zh_hans': 'test shape zh hans',
'serial_number': 'test shape serial number',
'model_name': {
'some_field': '123'
}
}
data = json.dumps(shape_data)
response = self.client.post(reverse('shape-list-create'), data, 'application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
some_model = response.data['some_model']
new_some_field = '12345'
data = json.dumps({'some_field': new_some_field, 'id': response.data['some_model']['id']})
response = self.client.put(reverse('some-model', args=[some_model['id']]), data, 'application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(new_some_field, response.data['some_field'])
I need to do this for more than 10 times. Which I have already done so.
the only difference each time, is the following phrases "some_model", "some-model", and "some_field"
I was wondering if there's a faster way to do this.
I can think abstractly two ways:
create a template in a text editor that somehow can generate the final test case which I then copy and paste. I am using sublime text 3 though I am okay to switch to another text editor
There's a way I can write slightly more code in the form of converting this test case into a behavior class that the individual test class can call. aka composition.
Which one makes more sense or there's a different way to do this?
Please note that BaseApi class is also inherited by other test class that do NOT have that repetitive test case method.
I guess what you want is "parameterized tests", standard unittest could do this with parameterized package:
import unittest
from parameterized import parameterized
class SomeModelApiTests(unittest.TestCase):
#parameterized.expand([
('case1', 'm1', 'f1', 'nf1'),
('case1', 'm2', 'f2', 'nf2'),
])
def test_update_with_only_1_field(self, dummy_subtest_name, model_name, field_name, new_field_value):
print(model_name, field_name, new_field_value)
will yields:
test_update_with_only_1_field_0_case1 (t.SomeModelApiTests) ... m1 f1 nf1
ok
test_update_with_only_1_field_1_case1 (t.SomeModelApiTests) ... m2 f2 nf2
ok
pytest testing framework has better support builtin on parameterized tests, worth looking at.
You could create a list / dict of "some_model" to test, and use subtest (https://docs.python.org/3/library/unittest.html#distinguishing-test-iterations-using-subtests) for each of your "some_model" items.
my_list_of_model = [FirstModel, SecondModel]
for my_model in my_list_of_model:
with subTest(model=mymodel):
# Testing model here
If you want a different TestCase for each of your model, I think the multiple inheritance is the way to go:
class BaseApiTestCase(TestCase):
def setUp():
# Setup stuff
class RepetitiveTestCaseMixin:
# Class to do the repetitive stuff
def test_update_should_work(self):
# Do some thing with self.model and self.field here
class ModelTestCase(BaseApiTestCase, RepetitiveTestCaseMixin):
#classmethod
def setUpClass(cls):
super().setUpClass()
cls.model = MyModel
cls.field = 'some_field'
Projects I work on we sometimes use mixin + "customization hooks" when a test needs to repeated. (and endpoints like the "shape-list-create" is subject to change/refactored)
Example for question:
class TestUpdateWithOnly1FieldMixin(object):
some_model = None
some_field = None
some_model2 = None
def get_some_model(self):
return self.some_model
def get_some_field(self):
return self.some_field
def get_some_model2(self):
return self.some_model2
def test_update_with_only_1_field(self):
some_model = self.get_some_model()
# represents some-model in example
some_model2 = self.get_some_model2()
some_field = self.get_some_field()
shape_data = {
'name': 'test shape',
'name_en': 'test shape en',
'name_zh_hans': 'test shape zh hans',
'serial_number': 'test shape serial number',
'model_name': {
some_field: '123'
}
}
data = json.dumps(shape_data)
response = self.client.post(reverse('shape-list-create'), data, 'application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
some_model_data = response.data[some_model]
class SomeModelApiTests(base_tests.BaseApiTest, TestUpdateWithOnly1FieldMixin):
some_model = 'choose your model'
some_field = 'some_field'
some_model2 = 'some-model'
def get_some_field(self):
# Do customization
return 'some-field after customize'
How to split the customization hooks and what to put in mixin etc is based on the situation.
In my opinion the goal is to have the actual test case easy to follow. (Maybe move the "post shape-list-create" into a separate function as it might not really be relevant for that test case)
Another example, going a bit overboard with customizations but just to give an idea.
class TestWithGoodNameMixin(object):
some_model = None
some_field = None
# "Customization hooks"
def get_shape_data(self):
return {self.some_field: 'x'}
def create_model(self, shape_data):
response = self.client.post(reverse('shape-list-create'), shape_data,
'application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
return response[self.some_model]
def create_put_data(self, some_model_data):
# Add default implementation
pass
# .....
def test_update_with_only_1_field(self):
shape_data = self.get_shape_data()
some_model_data = self.create_model(shape_data)
data = self.create_put_data(some_model_data)
response = self.put_data(data)
self.assert_put_response(response)
You can use pytest package for unit testing.
It is very simple and easy to use.
#pytest.mark.parametrize() decorator can be used to achieve that functionality.
An example for parametrized test cases is as follows:
import pytest
class SampleTesting(object):
data_for_test = [
('{inputdata1:value1}','output1'),
('{inputdata1:value2}','output2')
]
#pytest.mark.parametrized('input_data, expected_output', data_for_test)
def test_sample_function(self, input_data, expected_output):
response = function_to_be_tested(input_data)
assert response == expected_output
You can read more about this decorator in the docs'
You can also use the #pytest.fixture() decorator to setup the test function.