How to test that tornado read_message got nothing to read - python

I have a Tornado chat and I'm doing some tests, most client messages generate a reply from the server, but others must not generate any reply.
I managed to do it with this code, waiting for the read timeout to occur, there is a better way to do it?
import json
import tornado
from tornado.httpclient import HTTPRequest
from tornado.web import Application
from tornado.websocket import websocket_connect
from tornado.testing import AsyncHTTPTestCase, gen_test
class RealtimeHandler(tornado.websocket.WebSocketHandler):
def on_message(self, message):
if message != 'Hi':
self.write_message('Hi there')
return
class ChatTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
('/rt', RealtimeHandler),
])
#gen_test
def test_no_reply(self):
request = HTTPRequest('ws://127.0.0.1:%d/rt' % self.get_http_port())
ws = yield websocket_connect(request)
ws.write_message('Hi')
with self.assertRaises(tornado.ioloop.TimeoutError):
response = yield ws.read_message()
Also there is a problem when test ends
======================================================================
ERROR: test_no_reply (myproj.tests.ChatTestCase)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/ubuntu/my_env/local/lib/python2.7/site-packages/tornado/testing.py", line 120, in __call__
result = self.orig_method(*args, **kwargs)
File "/home/ubuntu/my_env/local/lib/python2.7/site-packages/tornado/testing.py", line 506, in post_coroutine
self._test_generator.throw(e)
StopIteration

In general, it's difficult to test for a negative: how long do you wait before you conclude that the thing you're testing for will never happen? It's better to rearrange things so that the test can be expressed in positive terms. That's difficult to do in this toy example, but consider the following handler:
class RealtimeHandler(tornado.websocket.WebSocketHandler):
def on_message(self, message):
if int(message) % 2 == 1:
self.write_message('%s is odd' % message)
In this case you could test it by sending the messages 1, 2, and 3, and asserting that you get two responses, "1 is odd" and "3 is odd".
The StopIteration failure you see is slightly surprising to me: I would not expect a timeout to be catchable within the #gen_test method, so doing so may have unexpected results, but I wouldn't have expected it to turn into a StopIteration. In any case, it's better to restructure the test so that you don't have to rely on timeouts. And if you do need a timeout, use gen.with_timeout so you can control the timeout from inside the test instead of relying on the one from outside in #gen_test.

Just to illustrate the #Ben Darnell answer.
from tornado import gen
class ChatTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
('/rt', RealtimeHandler),
])
#gen_test
def test_no_reply(self):
request = HTTPRequest('ws://127.0.0.1:%d/rt' % self.get_http_port())
ws = yield websocket_connect(request)
ws.write_message('Hi')
with self.assertRaises(gen.TimeoutError):
response = yield gen.with_timeout(datetime.timedelta(seconds=4), ws.read_message()

Related

pytest: How to force raising Exceptions during unit-testing?

In my python code, I am expecting exceptions could possibly be raised after calling method requests.Session.request(), for example these:
requests.exceptions.ConnectTimeout
requests.exceptions.ReadTimeout
requests.exceptions.Timeout
When any of these expected exceptions are raised, I handle them appropriately, for example possibly a retry situation.
My question, I am using py.test for unit testing, and I purposely want to inject raising exceptions from specific parts of my code. For example, the function that calls requests.Session.request(), instead of returning a valid requests.Response, it raises a requests.exception.
What I want to make sure that my code successfully handles expected and unexpected exceptions coming from other packages, which include those exceptions from requests.
Maybe... Is there a #decorator that I could add to the aforementioned function to raise exceptions upon request during unit testing?
Suggestions for doing exceptions injections for unit testing? (proper phrasing of my question would be greatly appreciated.)
Thanks for the responses!!!
Here is the entire singleton class that creates requests.Session and calls requests.Session.request():
class MyRequest(metaclass=Singleton):
def __init__(self, retry_tries=3, retry_backoff=0.1, retry_codes=None):
self.session = requests.session()
if retry_codes is None:
retry_codes = set(REQUEST_RETRY_HTTP_STATUS_CODES)
self.session.mount(
'http',
HTTPAdapter(
max_retries=Retry(
total=retry_tries,
backoff_factor=retry_backoff,
status_forcelist=retry_codes,
),
),
)
def request(self, request_method, request_url, **kwargs):
try:
return self.session.request(method=request_method, url=request_url, **kwargs)
except Exception as ex:
log.warning(
"Session Request: Failed: {}".format(get_exception_message(ex)),
extra={
'request_method': request_method,
'request_url': request_url
}
)
raise
You can make use of py.test raises, check it here: http://doc.pytest.org/en/latest/assert.html#assertions-about-expected-exceptions
Taking into account your code you could do something along the lines of the following:
from requests.exceptions import ConnectTimeout, ReadTimeout, Timeout
from unittest.mock import patch
import pytest
class TestRequestService:
#patch('path_to_module.MyRequest')
def test_custom_request(self, my_request_mock):
my_request_mock.request.side_effect = ConnectTimeout
with pytest.raises(ConnectTimeout):
my_request_mock.request(Mock(), Mock())
Moreover, you could make use of pytest.parametrize(http://doc.pytest.org/en/latest/parametrize.html) as well:
from requests.exceptions import ConnectTimeout, ReadTimeout, Timeout
from unittest.mock import patch
import pytest
class TestRequestService:
#pytest.mark.parametrize("expected_exception", [ConnectTimeout, ReadTimeout, Timeout])
#patch('path_to_module.MyRequest')
def test_custom_request(self, my_request_mock, expected_exception):
my_request_mock.request.side_effect = expected_exception
with pytest.raises(expected_exception):
my_request_mock.request(Mock(), Mock())
Here you can find some more examples about parametrize: http://layer0.authentise.com/pytest-and-parametrization.html
In my application I am catching exception requests.exceptions.ConnectionError
and returning message which is in expected variable below.
So the test looks like this:
import pytest
import requests
expected = {'error': 'cant connect to given url'}
class MockConnectionError:
def __init__(self, *args, **kwargs):
raise requests.exceptions.ConnectionError
def test_project_method(monkeypatch):
monkeypatch.setattr("requests.get", MockConnectionError)
response = project_method('http://some.url.com/')
assert response == expected
Patching, mocking and dependecy-injection are techniques to inject fake objects. Patching is sometimes hard to do right, on the other hand dependency injection requires that have to change the code you want to test.
This is just a simple example how to use dependency-injection. First the code we want to test:
import requests
...
def fetch_data(url, get=requests.get):
return get(url).json()
# this is how we use fetch_data in productive code:
answer = fetch_data("www.google.com?" + term)
And this is then the test:
import pytest
def test_fetch():
def get_with_timeout(url):
raise ConnectTimeout("message")
with pytest.raises(ConnectTimeout) as e:
# and now we inject the fake get method:
fetch_data("https://google.com", get=get_with_timeout)
assert e.value == "message"
In your example above, the mocking technique would be as follows:
def test_exception():
class TimeoutSessionMock:
def get(self, *args, **kwargs):
raise ConnectTimeout("message")
mr = MyRequest()
mr.session = TimeoutSessionMock()
with pytest.raises(ConnectTimeout) as e:
mr.request("get", "http://google.com")
assert e.value == "message"

How to do a HTTP Get request with a deferred inside another (cascade defer)?

I am want to do a GET request to check if the return code is what I expect. This request occurrs inside a function called by a addCallback of a general deferred chain, as is showed in the bellow code.
My specif question if: How to make the return of line -D- arrives at line -E-?
It seems that the callback function "cbResponse" (line -D-) never is called. My first attempt was do the request and return to the callback chain the result of the request (line -A-). It fails, because the deferr object hasn't the attribute result.
The second attemp (line -B-), was return the deferred object itself. It doesn't returns the result too.
The third attemp (line -C-), whas return anything, but it obviously hasn't the response code of the request.
Thanks a lot!
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from twisted.internet import reactor, defer
class Test (object):
#classmethod
def getRequest (self, result):
print "Function getRequest"
agent = Agent(reactor)
d2 = agent.request('GET',
'http://www.google.com',
Headers({'User-Agent': ['Twisted Web Client Example']}),
None)
d2.addCallback(Test.cbResponse)
# 1 st attempt: return the result of d2. Fail: exceptions.AttributeError: Deferred instance has no attribute 'result'
return d2.result # --> line A
# 2nd attempt: return only the deferr object d2. Don't fail, but I can't get the result of the above request
### return d2 # --> line B
# 3rd attemp: return None (without return).
# --> line C
#classmethod
def cbResponse(response):
print 'Function cbResponse %s', response.code
# This is the return value I want to pass back to deferredChain function (called at line E)
return response.code # line --> D
#classmethod
def deferredChain(self):
d = defer.Deferred()
d.addCallback(Test.getRequest) # line --> E
d.callback("success")
return d.result # line --> F
if __name__ == '__main__':
tst = Test()
rtn = tst.deferredChain()
print "RTN: %s " % rtn
You're using Twisted Agent which requires running reactor to work properly, see linked examples in docs. Your code sample will work just fine if you start Twisted reactor.
if __name__ == '__main__':
tst = Test()
rtn = tst.deferredChain()
reactor.run()
print "RTN: %s " % rtn
Twisted Treq is an interesting framework built on top of agents, it promises to give you python-requests like API for Twisted async HTTP client.
You are calling tst.deferredChain() synchronously and trying to read d.result within it and this is not correct. The correct solution is letting it return a deferred as well and attach to it a callback.

Python Tornado Async Fetching of URLs

In the following code example I have a function do_async_thing which appears to return a Future, even though I'm not sure why?
import tornado.ioloop
import tornado.web
import tornado.httpclient
#tornado.gen.coroutine
def do_async_thing():
http = tornado.httpclient.AsyncHTTPClient()
response = yield http.fetch("http://www.google.com/")
return response.body
class MainHandler(tornado.web.RequestHandler):
def get(self):
x = do_async_thing()
print(x) # <tornado.concurrent.Future object at 0x10753a6a0>
self.set_header("Content-Type", "application/json")
self.write('{"foo":"bar"}')
self.finish()
if __name__ == "__main__":
app = tornado.web.Application([
(r"/foo/?", MainHandler),
])
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
You'll see that I yield the call to fetch and in doing so I should have forced the value to be realised (and subsequently been able to access the body field of the response).
What's more interesting is how I can even access the body field on a Future and not have it error (as far as I know a Future has no such field/property/method)
So does anyone know how I can:
Resolve the Future so I get the actual value
Modify this example so the function do_async_thing makes multiple async url fetches
Now it's worth noting that because I was still getting a Future back I thought I would try adding a yield to prefix the call to do_async_thing() (e.g. x = yield do_async_thing()) but that gave me back the following error:
tornado.gen.BadYieldError: yielded unknown object <generator object get at 0x1023bc308>
I also looked at doing something like this for the second point:
def do_another_async_thing():
http = tornado.httpclient.AsyncHTTPClient()
a = http.fetch("http://www.google.com/")
b = http.fetch("http://www.github.com/")
return a, b
class MainHandler(tornado.web.RequestHandler):
def get(self):
y = do_another_async_thing()
print(y)
But again this returns:
<tornado.concurrent.Future object at 0x102b966d8>
Where as I would've expected a tuple of Futures at least? At this point I'm unable to resolve these Futures without getting an error such as:
tornado.gen.BadYieldError: yielded unknown object <generator object get at 0x1091ac360>
Update
Below is an example that works (as per answered by A. Jesse Jiryu Davis)
But I've also added another example where by I have a new function do_another_async_thing which makes two async HTTP requests (but evaluating their values are a little bit more involved as you'll see):
def do_another_async_thing():
http = tornado.httpclient.AsyncHTTPClient()
a = http.fetch("http://www.google.com/")
b = http.fetch("http://www.github.com/")
return a, b
#tornado.gen.coroutine
def do_async_thing():
http = tornado.httpclient.AsyncHTTPClient()
response = yield http.fetch("http://www.google.com/")
return response.body
class MainHandler(tornado.web.RequestHandler):
#tornado.gen.coroutine
def get(self):
x = yield do_async_thing()
print(x) # displays HTML response
fa, fb = do_another_async_thing()
fa = yield fa
fb = yield fb
print(fa.body, fb.body) # displays HTML response for each
It's worth clarifying: you might expect the two yield statements for do_another_async_thing to cause a blockage. But here is a breakdown of the steps that are happening:
do_another_async_thing returns immediately a tuple with two Futures
we yield the first tuple which causes the program to be blocked until the value is realised
the value is realised and so we move to the next line
we yield again, causing the program to block until the value is realised
but as both futures were created at the same time and run concurrently the second yield returns practically instantly
Coroutines return futures. To wait for the coroutine to complete, the caller must also be a coroutine, and must yield the future. So:
#gen.coroutine
def get(self):
x = yield do_async_thing()
For more info see Refactoring Tornado Coroutines.

Django Testing: See traceback where wrong Response gets created

This pattern is from the django docs:
class SimpleTest(unittest.TestCase):
def test_details(self):
client = Client()
response = client.get('/customer/details/')
self.assertEqual(response.status_code, 200)
From: https://docs.djangoproject.com/en/1.8/topics/testing/tools/#default-test-client
If the test fails, the error message does not help very much. For example if the status_code is 302, then I see 302 != 200.
The question is now: Where does the wrong HTTPResponse get created?
I would like to see the stacktrace of the interpreter where the wrong HTTPResponse object get created.
I read the docs for the assertions of django but found no matching method.
Update
This is a general question: How to see the wanted information immediately if the assertion fails? Since these assertions (self.assertEqual(response.status_code, 200)) are common, I don't want to start debugging.
Update 2016
I had the same idea again, found the current answer not 100% easy. I wrote a new answer, which has a simple to use solution (subclass of django web client): Django: assertEqual(response.status_code, 200): I want to see useful stack of functions calls
I think it could be achieved by creating a TestCase subclass that monkeypatches django.http.response.HttpResponseBase.__init__() to record a stack trace and store it on the Response object, then writing an assertResponseCodeEquals(response, status_code=200) method that prints the stored stack trace on failure to show where the Response was created.
I could actually really use a solution for this myself, and might look at implementing it.
Update:
Here's a v1 implementation, which could use some refinement (eg only printing relevant lines of the stack trace).
import mock
from traceback import extract_stack, format_list
from django.test.testcases import TestCase
from django.http.response import HttpResponseBase
orig_response_init = HttpResponseBase.__init__
def new_response_init(self, *args, **kwargs):
orig_response_init(self, *args, **kwargs)
self._init_stack = extract_stack()
class ResponseTracebackTestCase(TestCase):
#classmethod
def setUpClass(cls):
cls.patcher = mock.patch.object(HttpResponseBase, '__init__', new_response_init)
cls.patcher.start()
#classmethod
def tearDownClass(cls):
cls.patcher.stop()
def assertResponseCodeEquals(self, response, status_code=200):
self.assertEqual(response.status_code, status_code,
"Response code was '%s', expected '%s'" % (
response.status_code, status_code,
) + '\n' + ''.join(format_list(response._init_stack))
)
class MyTestCase(ResponseTracebackTestCase):
def test_index_page_returns_200(self):
response = self.client.get('/')
self.assertResponseCodeEquals(response, 200)
How do I see the traceback if the assertion fails without debugging
If the assertion fails, there isn't a traceback. The client.get() hasn't failed, it just returned a different response than you were expecting.
You could use a pdb to step through the client.get() call, and see why it is returning the unexpected response.
Maybe this could work for you:
class SimpleTest(unittest.TestCase):
#override_settings(DEBUG=True)
def test_details(self):
client = Client()
response = client.get('/customer/details/')
self.assertEqual(response.status_code, 200, response.content)
Using #override_settings to have DEBUG=True will have the stacktrace just as if you were running an instance in DEBUG mode.
Secondly, in order to provide the content of the response, you need to either print it or log it using the logging module, or add it as your message for the assert method. Without a debugger, once you assert, it is too late to print anything useful (usually).
You can also configure logging and add a handler to save messages in memory, and print all of that; either in a custom assert method or in a custom test runner.
I was inspired by the solution that #Fush proposed but my code was using assertRedirects which is a longer method and was a bit too much code to duplicate without feeling bad about myself.
I spent a bit of time figuring out how I could just call super() for each assert and came up with this. I've included 2 example assert methods - they would all basically be the same. Maybe some clever soul can think of some metaclass magic that does this for all methods that take 'response' as their first argument.
from bs4 import BeautifulSoup
from django.test.testcases import TestCase
class ResponseTracebackTestCase(TestCase):
def _display_response_traceback(self, e, content):
soup = BeautifulSoup(content)
assert False, u'\n\nOriginal Traceback:\n\n{}'.format(
soup.find("textarea", {"id": "traceback_area"}).text
)
def assertRedirects(self, response, *args, **kwargs):
try:
super(ResponseTracebackTestCase, self).assertRedirects(response, *args, **kwargs)
except Exception as e:
self._display_response_traceback(e, response.content)
def assertContains(self, response, *args, **kwargs):
try:
super(ResponseTracebackTestCase, self).assertContains(response, *args, **kwargs)
except Exception as e:
self._display_response_traceback(e, response.content)
I subclassed the django web client, to get this:
Usage
def test_foo(self):
...
MyClient().get(url, assert_status=200)
Implementation
from django.test import Client
class MyClient(Client):
def generic(self, method, path, data='',
content_type='application/octet-stream', secure=False,
assert_status=None,
**extra):
if assert_status:
return self.assert_status(assert_status, super(MyClient, self).generic, method, path, data, content_type, secure, **extra)
return super(MyClient, self).generic(method, path, data, content_type, secure, **extra)
#classmethod
def assert_status(cls, status_code, method_pointer, *args, **kwargs):
assert hasattr(method_pointer, '__call__'), 'Method pointer needed, looks like the result of a method call: %r' % (method_pointer)
def new_init(self, *args, **kwargs):
orig_response_init(self, *args, **kwargs)
if not status_code == self.status_code:
raise HTTPResponseStatusCodeAssertionError('should=%s is=%s' % (status_code, self.status_code))
def reraise_exception(*args, **kwargs):
raise
with mock.patch('django.core.handlers.base.BaseHandler.handle_uncaught_exception', reraise_exception):
with mock.patch.object(HttpResponseBase, '__init__', new_init):
return method_pointer(*args, **kwargs)
Conclusion
This results in a long exception if a http response with a wrong status code was created. If you are not afraid of long exceptions, you see very fast the root of the problem. That's what I want, I am happy.
Credits
This was based on other answers of this question.

Using Nose to test txmongo dependent code

I want to use nose to test an application that I am writing using twisted and txmongo. I can't even get simple use cases like the following working:
from nose.twistedtools import reactor, deferred, threaded_reactor
import logging
from twisted.internet import defer
import txmongo
log = logging.getLogger("common.test.test_db")
conn = txmongo.lazyMongoConnectionPool('localhost', 27017, 4)
#deferred()
def test_mongo():
tdb = conn.test
#defer.inlineCallbacks
def cb(oid):
assert oid
obj = yield tdb.test.find({"_id":oid})
log.error("In callback")
assert obj
d = tdb.test.save({"s":1, "b":2})
d.addCallback(cb)
return d
However, this always return the following:
E
======================================================================
ERROR: common.test.test_db.test_mongo
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Volumes/Users/jce/.pyenv/celery/lib/python2.6/site-packages/nose/case.py", line 186, in runTest
self.test(*self.arg)
File "/Volumes/Users/jce/.pyenv/celery/lib/python2.6/site-packages/nose/twistedtools.py", line 138, in errback
failure.raiseException()
File "/Volumes/Users/jce/.pyenv/celery/lib/python2.6/site-packages/twisted/python/failure.py", line 326, in raiseException
raise self.type, self.value, self.tb
RuntimeWarning: not connected
----------------------------------------------------------------------
Ran 1 test in 0.006s
FAILED (errors=1)
I tried manually adding a threaded_reactor() call, but it didn't help.
edit
I removed the "lazy" connections, and modified the code, and now it works... I'm still curious as to why the "lazy" didn't work. The working code is as follows:
dbconn = txmongo.MongoConnectionPool('localhost', 27017, 4)
#deferred()
def test_mongo():
#defer.inlineCallbacks
def cb(conn):
tdb = conn.test
oid = yield tdb.test.save({"s":1, "b":2})
assert oid
log.error(str(oid))
obj = yield tdb.test.find({"_id":oid})
assert obj
log.error(str(obj))
dbconn.addCallback(cb)
return dbconn
MongoConnectionPool will return a deferred, which is fired when the connection is established passing the connection handler as argument to the callback. You should conn = yield MongoConnectionPool().
lazyMongoConnectionPool will return the connection handler directly, without waiting for the connection to be established.
Lazy is usually used by web servers and other services that doesn't require immediate connection when your service starts. If you want to do so, don't use the lazy method.

Categories