I would like to use a list (converted into a generator) to serve as a mock for my API calls (using unittest.mock). My function is:
def monitor_order(order_id)
order_info = client.get_order_status(order_id)
order_status = order_info['status']
while order_status != 'filled':
print('order_status: ', order_status)
time.sleep(5)
order_info = client.get_order_status(order_id)
order_status = order_info['status']
return order_info
My test function is:
#patch('my_package.client.get_order_status')
def test_monitor_order(mocked_get_order_status):
order_states = [
dict(status='open'),
dict(status='open'),
dict(status='filled'),
]
# Make into a generator
status_changes = (status for status in order_states)
mocked_get_order_status.return_value = next(order_states)
# Execute function to test
monitor_order("dummy_order")
However, I can see that the status is always 'open' when executing the test:
order_status: open
order_status: open
order_status: open
I think I understand why it's wrong, but how could I implement it correctly?
To achieve what you want, you can rewrite your test as follows:
#patch('my_package.client.get_order_status')
def test_monitor_order(mocked_get_order_status):
order_states = [
dict(status='open'),
dict(status='open'),
dict(status='filled'),
]
mocked_get_order_status.side_effect = order_states
# Execute function to test
monitor_order("dummy_order")
Related
I have function :
def is_car_exist(make, model):
url = f'https://vpic.nhtsa.dot.gov/api/vehicles/GetModelsForMake/\
{make.capitalize()}?format=json'
data = requests.get(url).json()['Results']
return any(model.capitalize() == car['Model_Name'] for car in data)
How write test to test requests method inside function ?
I need to get list of model fields like:
#instance.register
class Todo(Document):
title = fields.StringField(required=True, default='Name')
description = fields.StringField()
created_at = fields.DateTimeField()
created_by = fields.StringField()
priority = fields.IntegerField()
to
[
'title',
'description',
'created_at',
'created_by',
'priority'
]
So, I have function that returns list of fields
def get_class_properties(cls):
attributes = inspect.getmembers(cls, lambda a: not (inspect.isroutine(a)))
return [attr for attr in attributes if not (attr[0].startswith('__') and attr[0].endswith('__'))][1]
But usage gives me this error
umongo.exceptions.NoDBDefinedError: init must be called to define a db
Usage:
properties=get_class_properties(Todo)
UPD
Here is my mongo initialization code:
async def mongo_client(app):
conf = app["config"]["mongo"]
client = AsyncIOMotorClient(host=conf["host"], port=conf["port"])
db = client[conf["db"]]
instance.init(db)
await Todo.ensure_indexes()
app["db_client"]: AsyncIOMotorClient = client
app["db"] = db
yield
await app["db_client"].close()
This is a copy/paste of this answer from the author of this library:
As far as I remeber, this exception raises when you're trying to use
lazy clients without initializing them properly. Any lazy class of
uMongo expects that the used database will be specified before the
usage. Everything that you need is to specify the used database and
invoke the init method of your lazy instance, like this:
from motor.motor_asyncio import AsyncIOMotorClient
from umongo import MotorAsyncIOInstance
client = AsyncIOMotorClient("mongodb://user:password#host:port/")
client = client["test_database"]
lazy_umongo = MotorAsyncIOInstance()
lazy_umongo.init(client)
As an example you can look into Auth/Auth microservice code, where
documents defined and store in the separate files from the actual
usage. Also these files with code as examples (documents.py and
prepare_mongodb.py) will help you to find a solution.
The trick was that
properties=get_class_properties(Todo)
invokes earlier than
async def mongo_client(app):
Solution is use things in right order (see comments to code)
async def init_app(argv=None):
app = web.Application(middlewares=[deserializer_middleware], logger=logger)
app["config"] = config
conf = app["config"]["mongo"]
client = AsyncIOMotorClient(host=conf["host"], port=conf["port"])
db = client[conf["db"]]
instance.init(db)
# Remove this line:
# app.cleanup_ctx.append(mongo_client)
app.cleanup_ctx.append(api_client)
register_routes(app)
return app
def register_routes(app: web.Application):
# Use here:
todo_resource = RestResource(
entity='todo',
factory=Todo,
properties=get_class_properties(Todo)
)
todo_resource.register(app.router)
Brand new to this library
Here is the call stack of my mocked object
[call(),
call('test'),
call().instance('test'),
call().instance().database('test'),
call().instance().database().snapshot(),
call().instance().database().snapshot().__enter__(),
call().instance().database().snapshot().__enter__().execute_sql('SELECT * FROM users'),
call().instance().database().snapshot().__exit__(None, None, None),
call().instance().database().snapshot().__enter__().execute_sql().__iter__()]
Here is the code I have used
#mock.patch('testmodule.Client')
def test_read_with_query(self, mock_client):
mock = mock_client()
pipeline = TestPipeline()
records = pipeline | ReadFromSpanner(TEST_PROJECT_ID, TEST_INSTANCE_ID, self.database_id).with_query('SELECT * FROM users')
pipeline.run()
print mock_client.mock_calls
exit()
I want to mock this whole stack that eventually it gives me some fake data which I will provide as a return value.
The code being tested is
spanner_client = Client(self.project_id)
instance = spanner_client.instance(self.instance_id)
database = instance.database(self.database_id)
with database.snapshot() as snapshot:
results = snapshot.execute_sql(self.query)
So my requirements is that the results variable should contain the data I will provide.
How can I provide a return value to such a nested calls
Thanks
Create separate MagicMock instances for the instance, database and snapshot objects in the code under test. Use return_value to configure the return values of each method. Here is an example. I simplified the method under test to just be a free standing function called mut.
# test_module.py : the module under test
class Client:
pass
def mut(project_id, instance_id, database_id, query):
spanner_client = Client(project_id)
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.snapshot() as snapshot:
results = snapshot.execute_sql(query)
return results
# test code (pytest)
from unittest.mock import MagicMock
from unittest import mock
from test_module import mut
#mock.patch('test_module.Client')
def test_read_with_query(mock_client_class):
mock_client = MagicMock()
mock_instance = MagicMock()
mock_database = MagicMock()
mock_snapshot = MagicMock()
expected = 'fake query results'
mock_client_class.return_value = mock_client
mock_client.instance.return_value = mock_instance
mock_instance.database.return_value = mock_database
mock_database.snapshot.return_value = mock_snapshot
mock_snapshot.execute_sql.return_value = expected
mock_snapshot.__enter__.return_value = mock_snapshot
observed = mut(29, 42, 77, 'select *')
mock_client_class.assert_called_once_with(29)
mock_client.instance.assert_called_once_with(42)
mock_instance.database.assert_called_once_with(77)
mock_database.snapshot.assert_called_once_with()
mock_snapshot.__enter__.assert_called_once_with()
mock_snapshot.execute_sql.assert_called_once_with('select *')
assert observed == expected
This test is kind of portly. Consider breaking it apart by using a fixture and a before function that sets up the mocks.
Either set the value directly to your Mock instance (those enters and exit should have not seen) with:
mock.return_value.instance.return_value.database.return_value.snapshot.return_value.execute_sql.return_value = MY_MOCKED_DATA
or patch and set return_value to target method, something like:
#mock.patch('database_engine.execute_sql', return_value=MY_MOCKED_DATA)
I'm trying to write unittests for my own Elasticsearch client. It uses the client from elasticsearch-py.
Most of my tests are fine, but when running a test on my own search() function (which uses the search() function from Elasticsearch client) I get very random behaviour. This is the way my test is implemented:
def setUp(self) -> None:
self.es = ESClient(host="localhost")
self.es_acc = ESClient()
self.connection_res = (False, {})
self.t = self.es_acc.get_connection_status(self._callback)
self.t.join()
# Create test index and index some documents
self.es.create_index(self.TEST_INDEX)
names = ["Gregor", "Alice", "Per Svensson", "Mats Hermelin", "Mamma Mia"
, "Eva Dahlgren", "Per Morberg", "Maja Larsson", "Ola Salo", "Magrecievic Holagrostokovic"]
self.num_docs = len(names)
self.payload = []
random.seed(123)
for i, name in enumerate(names):
n = name.split(" ")
fname = n[0]
lname = n[1] if len(n) > 1 else n[0]
self.payload.append({"name": {"first": fname, "last": lname}, "age": random.randint(-100, 100),
"timestamp": datetime.utcnow() - timedelta(days=1 * i)})
self.es.upload(self.TEST_INDEX, self.payload, ids=list(range(len(names))))
def test_search(self):
# Test getting docs based on ids
ids = ["1", "4", "9"]
status, hits = self.es.search(self.TEST_INDEX, ids=ids) # Breakpoint
docs = hits["hits"]["hits"]
self.assertTrue(status, "Status not correct for search!")
returned_ids = [d["_id"] for d in docs]
names = [d["_source"]["name"] for d in docs]
self.assertListEqual(sorted(returned_ids), ids, "Returned ids from search not correct!")
self.assertListEqual(names, [self.payload[i]["name"] for i in [1, 4, 9]], "Returned source from search not correct!")
In setUp() I'm just uploading a few documents to test on, so there should always be 10 documents to test on. Below is an excerpt from my search() function.
if ids:
try:
q = Query().ids(ids).compile_and_get()
res = self.es.search(index=index, body=q)
print(res)
return True, res
except exceptions.ElasticsearchException as e:
self._handle_elastic_exceptions("search", e, index=index)
return False, {}
I've implemented Query. Anyway, when I just run the test, I ALMOST always get 0 hits. But if I debug the application, with a breakpoint in test_search() on the row where I make the call to search() and step, everything works fine. If I put it just one line below, I get 0 hits again. What is going on? Why is it not blocking correctly?
It seems like I found my solution!
I did not understand that setUp was called on every test method. This was actually not the problem however.
The problem is that for some tests, uploading documents simply took to much time (which was done in setUp) and so when the test started, the documents did not exist yet! Solution: add sleep(1) to the end of setUp.
I made a custom airflow operator, this operator takes an input and the output of this operator is on XCOM.
What I want to achieve is to call the operator with some defined input, parse the output as Python callable inside the Branch Operator and then pass the parsed output to another task that calls the same operator tree:
CustomOperator_Task1 = CustomOperator(
data={
'type': 'custom',
'date': '2017-11-12'
},
task_id='CustomOperator_Task1',
dag=dag)
data = {}
def checkOutput(**kwargs):
result = kwargs['ti'].xcom_pull(task_ids='CustomOperator_Task1')
if result.success = True:
data = result.data
return "CustomOperator_Task2"
return "Failure"
BranchOperator_Task = BranchPythonOperator(
task_id='BranchOperator_Task ',
dag=dag,
python_callable=checkOutput,
provide_context=True,
trigger_rule="all_done")
CustomOperator_Task2 = CustomOperator(
data= data,
task_id='CustomOperator_Task2',
dag=dag)
CustomOperator_Task1 >> BranchOperator_Task >> CustomOperator_Task2
In task CustomOperator_Task2 I would want to pass the parsed data from BranchOperator_Task. Right now it is always empty {}
What is the best way to do that?
I see your issue now. Setting the data variable like you are won't work because of how Airflow works. An entirely different process will be running the next task, so it won't have the context of what data was set to.
Instead, BranchOperator_Task has to push the parsed output into another XCom so CustomOperator_Task2 can explicitly fetch it.
def checkOutput(**kwargs):
ti = kwargs['ti']
result = ti.xcom_pull(task_ids='CustomOperator_Task1')
if result.success:
ti.xcom_push(key='data', value=data)
return "CustomOperator_Task2"
return "Failure"
BranchOperator_Task = BranchPythonOperator(
...)
CustomOperator_Task2 = CustomOperator(
data_xcom_task_id=BranchOperator_Task.task_id,
data_xcom_key='data',
task_id='CustomOperator_Task2',
dag=dag)
Then your operator might look something like this.
class CustomOperator(BaseOperator):
#apply_defaults
def __init__(self, data_xcom_task_id, data_xcom_key, *args, **kwargs):
self.data_xcom_task_id = data_xcom_task_id
self.data_xcom_key = data_xcom_key
def execute(self, context):
data = context['ti'].xcom_pull(task_ids=self.data_xcom_task_id, key=self.data_xcom_key)
...
Parameters may not be required if you just want to hardcode them. It depends on your use case.
As your comment suggests, the return value from your custom operator is None, therefore your xcom_pull should expect to be empty.
Please use xcom_push explicitly, as the default behavior of airflow could change over time.