I have a signal_handler connected through a decorator, something like this very simple one:
#receiver(post_save, sender=User,
dispatch_uid='myfile.signal_handler_post_save_user')
def signal_handler_post_save_user(sender, *args, **kwargs):
# do stuff
What I want to do is to mock it with the mock library http://www.voidspace.org.uk/python/mock/ in a test, to check how many times django calls it. My code at the moment is something like:
def test_cache():
with mock.patch('myapp.myfile.signal_handler_post_save_user') as mocked_handler:
# do stuff that will call the post_save of User
self.assert_equal(mocked_handler.call_count, 1)
The problem here is that the original signal handler is called even if mocked, most likely because the #receiver decorator is storing a copy of the signal handler somewhere, so I'm mocking the wrong code.
So the question: how do I mock my signal handler to make my test work?
Note that if I change my signal handler to:
def _support_function(*args, **kwargs):
# do stuff
#receiver(post_save, sender=User,
dispatch_uid='myfile.signal_handler_post_save_user')
def signal_handler_post_save_user(sender, *args, **kwargs):
_support_function(*args, **kwargs)
and I mock _support_function instead, everything works as expected.
Possibly a better idea is to mock out the functionality inside the signal handler rather than the handler itself. Using the OP's code:
#receiver(post_save, sender=User, dispatch_uid='myfile.signal_handler_post_save_user')
def signal_handler_post_save_user(sender, *args, **kwargs):
do_stuff() # <-- mock this
def do_stuff():
... do stuff in here
Then mock do_stuff:
with mock.patch('myapp.myfile.do_stuff') as mocked_handler:
self.assert_equal(mocked_handler.call_count, 1)
So, I ended up with a kind-of solution: mocking a signal handler simply means to connect the mock itself to the signal, so this exactly is what I did:
def test_cache():
with mock.patch('myapp.myfile.signal_handler_post_save_user', autospec=True) as mocked_handler:
post_save.connect(mocked_handler, sender=User, dispatch_uid='test_cache_mocked_handler')
# do stuff that will call the post_save of User
self.assertEquals(mocked_handler.call_count, 1) # standard django
# self.assert_equal(mocked_handler.call_count, 1) # when using django-nose
Notice that autospec=True in mock.patch is required in order to make post_save.connect to correctly work on a MagicMock, otherwise django will raise some exceptions and the connection will fail.
You can mock a django signal by mocking the ModelSignal class at django.db.models.signals.py like this:
#patch("django.db.models.signals.ModelSignal.send")
def test_overwhelming(self, mocker_signal):
obj = Object()
That should do the trick. Note that this will mock ALL signals no matter which object you are using.
If by any chance you use the mocker library instead, it can be done like this:
from mocker import Mocker, ARGS, KWARGS
def test_overwhelming(self):
mocker = Mocker()
# mock the post save signal
msave = mocker.replace("django.db.models.signals")
msave.post_save.send(KWARGS)
mocker.count(0, None)
with mocker:
obj = Object()
It's more lines but it works pretty well too :)
take a look at mock_django . It has support for signals
https://github.com/dcramer/mock-django/blob/master/tests/mock_django/signals/tests.py
In django 1.9 you can mock all receivers with something like this
# replace actual receivers with mocks
mocked_receivers = []
for i, receiver in enumerate(your_signal.receivers):
mock_receiver = Mock()
your_signal.receivers[i] = (receiver[0], mock_receiver)
mocked_receivers.append(mock_receiver)
... # whatever your test does
# ensure that mocked receivers have been called as expected
for mocked_receiver in mocked_receivers:
assert mocked_receiver.call_count == 1
mocked_receiver.assert_called_with(*your_args, sender="your_sender", signal=your_signal, **your_kwargs)
This replaces all receivers with mocks, eg ones you've registered, ones pluggable apps have registered and ones that django itself has registered. Don't be suprised if you use this on post_save and things start breaking.
You may want to inspect the receiver to determine if you actually want to mock it.
There is a way to mock django signals with a small class.
You should keep in mind that this would only mock the function as a django signal handler and not the original function; for example, if a m2mchange trigers a call to a function that calls your handler directly, mock.call_count would not be incremented. You would need a separate mock to keep track of those calls.
Here is the class in question:
class LocalDjangoSignalsMock():
def __init__(self, to_mock):
"""
Replaces registered django signals with MagicMocks
:param to_mock: list of signal handlers to mock
"""
self.mocks = {handler:MagicMock() for handler in to_mock}
self.reverse_mocks = {magicmock:mocked
for mocked,magicmock in self.mocks.items()}
django_signals = [signals.post_save, signals.m2m_changed]
self.registered_receivers = [signal.receivers
for signal in django_signals]
def _apply_mocks(self):
for receivers in self.registered_receivers:
for receiver_index in xrange(len(receivers)):
handler = receivers[receiver_index]
handler_function = handler[1]()
if handler_function in self.mocks:
receivers[receiver_index] = (
handler[0], self.mocks[handler_function])
def _reverse_mocks(self):
for receivers in self.registered_receivers:
for receiver_index in xrange(len(receivers)):
handler = receivers[receiver_index]
handler_function = handler[1]
if not isinstance(handler_function, MagicMock):
continue
receivers[receiver_index] = (
handler[0], weakref.ref(self.reverse_mocks[handler_function]))
def __enter__(self):
self._apply_mocks()
return self.mocks
def __exit__(self, *args):
self._reverse_mocks()
Example usage
to_mock = [my_handler]
with LocalDjangoSignalsMock(to_mock) as mocks:
my_trigger()
for mocked in to_mock:
assert(mocks[mocked].call_count)
# 'function {0} was called {1}'.format(
# mocked, mocked.call_count)
As you mentioned,
mock.patch('myapp.myfile._support_function') is correct but mock.patch('myapp.myfile.signal_handler_post_save_user') is wrong.
I think the reason is:
When init you test, some file import the signal's realization python file, then #receive decorator create a new signal connection.
In the test, mock.patch('myapp.myfile._support_function') will create another signal connection, so the original signal handler is called even if mocked.
Try to disconnect the signal connection before mock.patch('myapp.myfile._support_function'), like
post_save.disconnect(signal_handler_post_save_user)
with mock.patch("review.signals. signal_handler_post_save_user", autospec=True) as handler:
#do stuff
Related
I have a database handler that utilizes SQLAlchemy ORM to communicate with a database. As part of SQLAlchemy's recommended practices, I interact with the session by using it as a context manager. How can I test what a function called inside the context manager using that context manager has done?
EDIT: I realized the file structure mattered due to the complexity in introduced. I re-structured the code below to more closely mirror what the end file structure will be like, and what a common production repo in my environment would look like, with code being defined in one file and tests in a completely separate file.
For example:
Code File (delete_things_from_table.py):
from db_handler import delete, SomeTable
def delete_stuff(handler):
stmt = delete(SomeTable)
with handler.Session.begin() as session:
session.execute(stmt)
session.commit()
Test File:
import pytest
import delete_things_from_table as dlt
from db_handler import Handler
def test_delete_stuff():
handler = db_handler()
dlt.delete_stuff(handler):
# Test that session.execute was called
# Test the value of 'stmt'
# Test that session.commit was called
I am not looking for a solution specific to SQLAlchemy; I am only utilizing this to highlight what I want to test within a context manager, and any strategies for testing context managers are welcome.
After sleeping on it, I came up with a solution. I'd love additional/less complex solutions if there are any available, but this works:
import pytest
import delete_things_from_table as dlt
from db_handler import Handler
class MockSession:
def __init__(self):
self.execute_params = []
self.commit_called = False
def execute(self, *args, **kwargs):
self.execute_params.append(["call", args, kwargs])
return self
def commit(self):
self.commit_called = True
return self
def begin(self):
return self
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def test_delete_stuff(monkeypatch):
handler = db_handler()
# Parens in 'MockSession' below are Important, pass an instance not the class
monkeypatch.setattr(handler, Session, MockSession())
dlt.delete_stuff(handler):
# Test that session.execute was called
assert len(handler.Session.execute_params)
# Test the value of 'stmt'
assert str(handler.Session.execute_params[0][1][0]) == "DELETE FROM some_table"
# Test that session.commit was called
assert handler.Session.commit_called
Some key things to note:
I created a static mock instead of a MagicMock as it's easier to control the methods/data flow with a custom mock class
Since the SQLAlchemy session context manager requires a begin() to start the context, my mock class needed a begin. Returning self in begin allows us to test the values later.
context managers rely on on the magic methods __enter__ and __exit__ with the argument signatures you see above.
The mocked class contains mocked methods which alter instance variables allowing us to test later
This relies on monkeypatch (there are other ways I'm sure), but what's important to note is that when you pass your mock class you want to patch in an instance of the class and not the class itself. The parentheses make a world of difference.
I don't think it's an elegant solution, but it's working. I'll happily take any suggestions for improvement.
Using django-cacheops, I want to test that my views are getting cached as I intend them to be. In my test case I'm connecting cacheops cache_read signal to a handler that should increment a value in the cache for hits or misses. However, the signal is never fired. Does anyone know the correct way to connect a django signal handler in a testcase, purely for use in that testcase?
here's what I have so far
from cacheops.signals import cache_read
cache.set('test_cache_hits', 0)
cache.set('test_cache_misses', 0)
def cache_log(sender, func, hit, **kwargs):
# never called
if hit:
cache.incr('test_cache_hits')
else:
cache.incr('test_cache_misses')
class BootstrapTests(TestCase):
#classmethod
def setUpClass(cls):
super(BootstrapTests, cls).setUpClass()
cache_read.connect(cache_log)
assert cache_read.has_listeners()
def test_something_that_should_fill_and_retrieve_cache(self):
....
hits = cache.get('test_cache_hits') # always 0
I've also tried connecting the signal handler at the module level, and in the regular testcase setUp method, all with the same result.
EDIT:
Here's my actual test code, plus the object I'm testing. I'm using the cached_as decorator to cache a function. This test is currently failing.
boostrap.py
class BootstrapData(object):
def __init__(self, app, person=None):
self.app = app
def get_homepage_dict(self, context={}):
url_name = self.app.url_name
#cached_as(App.objects.filter(url_name=url_name), extra=context)
def _get_homepage_dict():
if self.app.homepage is None:
return None
concrete_module_class = MODULE_MAPPING[self.app.homepage.type]
serializer_class_name = f'{concrete_module_class.__name__}Serializer'
serializer_class = getattr(api.serializers, serializer_class_name)
concrete_module = concrete_module_class.objects.get(module=self.app.homepage)
serializer = serializer_class(context=context)
key = concrete_module_class.__name__
return {
key: serializer.to_representation(instance=concrete_module)
}
return _get_homepage_dict()
test_bootstrap.py
class BootstrapDataTest(TestCase):
def setUp(self):
super(BootstrapDataTest, self).setUp()
def set_signal(signal=None, **kwargs):
self.signal_calls.append(kwargs)
self.signal_calls = []
cache_read.connect(set_signal, dispatch_uid=1, weak=False)
self.app = self.setup_basic_app() # creates an 'App' model and saves it
def tearDown(self):
cache_read.disconnect(dispatch_uid=1)
def test_boostrap_data_is_cached(self):
obj = BootstrapData(self.app)
obj.get_homepage_dict()
# fails, self.signal_calls == []
self.assertEqual(self.signal_calls, [{'sender': App, 'func': None, 'hit': False }])
self.signal_calls = []
obj.get_homepage_dict()
self.assertEqual(self.signal_calls, [{'sender': App, 'func': None, 'hit': True}])
I can't see why this is happening but I will try to make a useful answer anyway.
First, if you want to test whether cache works you shouldn't rely on its own side effects to check that, and signals are side effects of its primary function - preventing db calls. Try testing that:
def test_it_works(self):
with self.assertNumQueries(1):
obj.get_homepage_dict()
with self.assertNumQueries(0):
obj.get_homepage_dict()
Second, if you want to know what's going on you may dig in adding prints everywhere including cacheops code and see where it stops. Alternatively, you can make a test for me to see, the instruction is here https://github.com/Suor/django-cacheops#writing-a-test.
Last, your test is a bit wrong. For #cached_as() sender would be None and func would be decorated function.
In this specific case, it turned out to be that my test cases subclassed django rest framework's APITestCase, which in turn subclasses django's SimpleTestCase.
looking in the cacheops sources, I found that those tests subclass TransactionTestCase, and switching out the test case fixed this issue.
Would be interested to know why this is the case but the issue is solved for now.
I'm trying to assert that a post_save signal receiver is called when an instance of my Client model is saved.
The signal receiver looks as follow:
# reports/signals.py
#receiver(post_save, sender=Client)
def create_client_draft(sender, instance=None, created=False, **kwargs):
"""Guarantees a DraftSchedule exists for each Client post save"""
print('called') # Log to stdout when called
if created and not kwargs.get('raw', False):
DraftSchedule.objects.get_or_create(client=instance)
I've set up a test that looks like this
#pytest.mark.django_db
#patch('reports.signals.create_client_draft')
def test_auto_create_draftschedule_on_client_creation(mock_signal):
client = mixer.blend(Client) # Creates a Client with random data
assert mock_signal.call_count == 1
I would expect this test to pass since the called print statement appears in captured stdout when the test is ran.
However, the test runner seems to think my mock function was never called at all.
mock_signal = <MagicMock name='create_client_draft' id='139903470431088'>
#pytest.mark.django_db
#patch('reports.signals.create_client_draft')
def test_auto_create_draftschedule_on_client_creation(mock_signal):
client = mixer.blend(Client)
> assert mock_signal.call_count == 1
E AssertionError: assert 0 == 1
E + where 0 = <MagicMock name='create_client_draft' id='139903470431088'>.call_count
reports/tests/test_signals.py:36: AssertionError
---------------------------------------------------------------------------------------------------------------------------------------------------- Captured stdout call -----------------------------------------------------------------------------------------------------------------------------------------------------
called
The print statement seems to suggest that the function was called during the test, whereas the test assertion suggests otherwise. Am I missing something obvious here with the mocking library?
Patching mock objects only works for callers that look up the method at run time. Signal handlers are held in a table, so they don't look up your mocked version.
It's a bit hacky, but you could have your signal handler call a helper function. Then the helper function could be mocked.
# reports/signals.py
#receiver(post_save, sender=Client)
def create_client_draft_handler(sender, instance=None, created=False, **kwargs):
create_client_draft(sender, instance, created, **kwargs)
def create_client_draft(sender, instance=None, created=False, **kwargs):
"""Guarantees a DraftSchedule exists for each Client post save
This function can be mocked, because it's called by name.
"""
print('called') # Log to stdout when called
if created and not kwargs.get('raw', False):
DraftSchedule.objects.get_or_create(client=instance)
If I send a Signal from a module function (a django view function as it happens), that is not inside a Class, it's not obvious (to me) what the sender should be - if anything? Is sender=None appropriate in this case?
Alternatively, the function is invoked by an HTTP request, which I currently pass in as a separate argument - should I pass that instead?
Option A:
from django.dispatch import Signal
my_signal = Signal(
providing_args=['my_arg', 'request']
)
# this is a view function
def do_something(request):
# ... do useful stuff
my_signal.send(
sender=None,
my_arg="Hello",
request=request
)
Option B:
from django.dispatch import Signal
my_signal = Signal(
providing_args=['my_arg']
)
# this is a view function
def do_something(request):
# ... do useful stuff
my_signal.send(
sender=request,
my_arg="Hello",
)
[UPDATE]
Option A has it. There's nothing useful that the receiver can do with the sender in this case (i.e. it's not an object), so set it to None.
The django.dispatch.Dispatcher source simply says it should be
"...[t]he sender of the signal. Either a specific object or None."
which then ties in with the receiver via connect(), for which the sender's significance is:
"The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender"
which, I admit, isn't particularly clear, but in your case, I would say to use sender=None because there's nothing concrete to hook to, as the request is transient.
A function is an object in Python, so you can just set the sender to be a reference your function, like this:
def my_func():
my_signal.send(sender=my_func, my_arg="Hello")
I've been using testbed, webtest, and nose to test my Python GAE app, and it is a great setup. I'm now implementing something similar to Nick's great example of using the deferred library, but I can't figure out a good way to test the parts of the code triggered by DeadlineExceededError.
Since this is in the context of a taskqueue, it would be painful to construct a test that took more than 10 minutes to run. Is there a way to temporarily set the taskqueue time limit to a few seconds for the purpose of testing? Or perhaps some other way to elegantly test the execution of code in the except DeadlineExceededError block?
Abstract the "GAE context" for your code. in production provide real "GAE implementation" for testing provide a mock own that will raise the DeadlineExceededError. The test should not depend on any timeout, should be fast.
Sample abstraction (just glue):
class AbstractGAETaskContext(object):
def task_spired(): pass # this will throw exception in mock impl
# here you define any method that you call into GAE, to be mocked
def defered(...): pass
If you don't like abstraction, you can do monkey patching for testing only, also you need to define the task_expired function to be your hook for testing.
task_expired should be called during your task implementation function.
*UPDATED*This the 3rd solution:
First I want to mention that the Nick's sample implementation is not so great, the Mapper class has to many responsabilities(deferring, query data, update in batch); and this make the test hard to made, a lot of mocks need to be defined. So I extract the deferring responsabilities in a separate class. You only want to test that deferring mechanism, what actually is happen(the update, query, etc) should be handled in other test.
Here is deffering class, also this no more depends on GAE:
class DeferredCall(object):
def __init__(self, deferred):
self.deferred = deferred
def run(self, long_execution_call, context, *args, **kwargs):
''' long_execution_call should return a tuple that tell us how was terminate operation, with timeout and the context where was abandoned '''
next_context, timeouted = long_execution_call(context, *args, **kwargs)
if timeouted:
self.deferred(self.run, next_context, *args, **kwargs)
Here is the test module:
class Test(unittest.TestCase):
def test_defer(self):
calls = []
def mock_deferrer(callback, *args, **kwargs):
calls.append((callback, args, kwargs))
def interrupted(self, context):
return "new_context", True
d = DeferredCall()
d.run(interrupted, "init_context")
self.assertEquals(1, len(calls), 'a deferred call should be')
def test_no_defer(self):
calls = []
def mock_deferrer(callback, *args, **kwargs):
calls.append((callback, args, kwargs))
def completed(self, context):
return None, False
d = DeferredCall()
d.run(completed, "init_context")
self.assertEquals(0, len(calls), 'no deferred call should be')
How will look the Nick's Mapper implementation:
class Mapper:
...
def _continue(self, start_key, batch_size):
... # here is same code, nothing was changed
except DeadlineExceededError:
# Write any unfinished updates to the datastore.
self._batch_write()
# Queue a new task to pick up where we left off.
##deferred.defer(self._continue, start_key, batch_size)
return start_key, True ## make compatible with DeferredCall
self.finish()
return None, False ## make it comaptible with DeferredCall
runner = _continue
Code where you register the long running task; this only depend on the GAE deferred lib.
import DeferredCall
import PersonMapper # this inherits the Mapper
from google.appengine.ext import deferred
mapper = PersonMapper()
DeferredCall(deferred).run(mapper.run)