I'm using google app engine with python and want to run some tests using nosetest.
I want each test to run the same setup function. I have already a lot of tests, so I don't want to go through them all and copy&paste the same function. can I define somewhere one setup function and each test would run it first?
thanks.
You can write your setup function and apply it using the with_setup decorator:
from nose.tools import with_setup
def my_setup():
...
#with_setup(my_setup)
def test_one():
...
#with_setup(my_setup)
def test_two():
...
If you want to use the same setup for several test-cases you can use a similar method.
First you create the setup function, then you apply it to all the TestCases with a decorator:
def my_setup(self):
#do the setup for the test-case
def apply_setup(setup_func):
def wrap(cls):
cls.setup = setup_func
return cls
return wrap
#apply_setup(my_setup)
class MyTestCaseOne(unittest.TestCase):
def test_one(self):
...
def test_two(self):
...
#apply_setup(my_setup)
class MyTestCaseTwo(unittest.TestCase):
def test_one(self):
...
Or another way could be to simply assign your setup:
class MyTestCaseOne(unittest.TestCase):
setup = my_setup
Related
I have a python method like
import external_object
from external_lib1 import ExternalClass1
from external_lib2 import Hook
class MyClass(self):
def my_method(self):
ExternalClass.get('arg1') #should be mocked and return a specific value with this arg1
ExternalClass.get('arg2') #should be mocked and return a specific value with this arg2
def get_hook(self):
return Hook() # return a mock object with mocked method on it
def my_method(self):
object_1 = external_object.instance_type_1('args') # those are two different object instanciate from the same lib.
object_2 = external_object.instance_type_2('args')
object_1.method_1('arg') # should return what I want when object_1 mocked
object_2.method_2 ('arg') # should return what I want when object_2 mocked
In my test I would like to realise what I put in comments.
I could manage to do it, but every time it gets really messy.
I use to call flexmock for some stuff (by example ExternalClass.get('arg1') would be mock with a flexmock(ExternalClass).should_return('arg').with_args('arg') # etc...) but I'm tired of using different test libs to mock.
I would like to use only the mock library but I struggle to find a consistent way of doing it.
I like to use python's unittest lib. Concretely the unittest.mock which is a great lib to customize side effects and return value in unit tested functions.
They can be used as follows:
class Some(object):
"""
You want to test this class
external_lib is an external component we cannot test
"""
def __init__(self, external_lib):
self.lib = external_lib
def create_index(self, unique_index):
"""
Create an index.
"""
try:
self.lib.create(index=unique_index) # mock this
return True
except MyException as e:
self.logger.error(e.__dict__, color="red")
return False
class MockLib():
pass
class TestSome(unittest.TestCase):
def setUp(self):
self.lib = MockLib()
self.some = Some(self.lib)
def test_create_index(self):
# This will test the method returns True if everything went fine
self.some.create_index = MagicMock(return_value={})
self.assertTrue(self.some.create_index("test-index"))
def test_create_index_fail(self):
# This will test the exception is handled and return False
self.some.create_index = MagicMock(side_effect=MyException("error create"))
self.assertFalse(self.some.create_index("test-index"))
Put the TestSome() class file somewhere like your-codebase-path/tests and run:
python -m unittest -v
I hope it's useful.
I have a set of python/selenium/unittest tests contained in a single class:
class TestProject(unittest.TestClass):
def test001_create_project(self):
...
def test002_project_wizard_page1(self, projectName, projectDescription):
....
def test003_project_wizard_page2(self):
....
def test004_project_wizard_finish(self):
....
I need to run the test methods in a block in the above order because they walk through a wizard on my web application. However, I want to be able to pass a variety of parameters to the test methods like projectName, projectDescription, etc.
Using unittest, how can I run a set of maybe 10 iterations of the tests passing in different parameters to test002_project_wizard_page1?
Your numbered tests are really just parts of a single test. What you should define as separate tests are functions that use your parameter sets.
class TestProject(unittest.TestCase):
def _create_project(self):
...
def _page1(self, projectName, projectDescription):
...
def _page2(self):
...
def _finish(self):
...
def _run(self, name, descr):
self._create_project()
self._page1(name, descr)
self._page2()
self._finish()
def test001(self):
self._run("foo", "do foo")
def test002(self):
self._run("bar", "do bar")
# etc
An interesting module that can reduce some of the boilerplate is the ddt project.
import ddt
#ddt.ddt
class TestProject(unittest.TestCase):
def _create_project(self):
...
def _page1(self, projectName, projectDescription):
...
def _page2(self):
...
def _finish(self):
...
#ddt.data(
("foo", "do foo"),
("bar", "do bar"),
# etc
)
#ddt.unpack
def test_run(self, name, descr):
self._create_project()
self._page1(name, descr)
self._page2()
self._finish()
Any reason that you cannot use pytest? It provides this functionality out of the box.
For me, it looks like you wanna test under a template, with different parameters. How about codes like the following:
import unittest
class TestProject(unittest.TestClass):
def mytest001_create_project(self):
...
def mytest002_project_wizard_page1(self, projectName, projectDescription):
....
def mytest003_project_wizard_page2(self):
....
def mytest004_project_wizard_finish(self):
....
def mytest_in_order(self, project_name, project_description):
self.mytest001_create_project()
self.mytest002_project_wizard_page1(project_name, project_description)
self.mytest003_project_wizard_page2()
self.mytest004_project_wizard_finish()
def test_in_batch(self):
project_names = ['a', 'b']
project_descriptions = ['aa', 'bb']
for project_name, project_description in zip(project_names, project_descriptions):
self.mytest_in_order(project_name, project_description)
Take a look at Python unit testing: parametrized test cases post on Eli Bendersky's blog. It states that
You can't easily pass arguments into a unittest.TestCase from outside.
However, it provides an implementation for class ParametrizedTestCase(unittest.TestCase) which can be used to add parameters to the unittest.TestCase class. That will solve your parameters problem and I believe, each individual test methods are already being run in order.
I have a situation where I'm trying to modify the arguments passed to a decorator on one of my class methods. The code looks something like this:
class MyClass(object):
#tryagain(retries=3)
def mymethod(self, arg):
... do stuff ...
My problem is I'd like to alter the "retries" variable to something less than 3 when running my unit tests, but keep it at "3" for the production code. Unfortunately, it doesn't look like I can do something like this:
#tryagain(retries=self.retries)
def mymethod(self, arg):
... do stuff ...
or
#tryagain(retries=MyClass.retries)
def mymethod(self, arg):
... do stuff ...
because the class isn't defined at the point the arguments are passed to the decorator (as near as I can tell).
I also tried to add the variable within the module like so:
retries = 1
def MyClass(object):
#tryagain(retries=retries)
def mymethod(self, arg):
... do stuff ...
but then I can't seem to modify the value of "retries" from within my unit tests. Is there another way to accomplish what I'm trying to do?
I assume you try to reduce the number of retrials to increase test speed.
If so, modifying the number of retries variable doesn't seem to be the best approach. Instead, you could unit test the function mymethod without decorator first, and then create a mock function of mymethod. Let's call it mock_mymethod, decorate it with #tryagain and test if the logic of `tryagain actually works.
Check the mock module to see how to create a mock instance, this article about mock is also worth reading.
You could use an environment variable, set from your calling code (it might be good to put a default in here
import os
# ...
class MyClass(object):
#tryagain(retries=int(os.environ['project_num_retries']))
def mymethod(self, arg):
print("mymethod")
Or use a "globals"-type module, for example: project_settings.py containing:
num_retries = 3
Then
import project_settings
class MyClass(object):
#tryagain(retries=project_settings.num_retries)
def mymethod(self, arg):
print("mymethod")
But I'm not sure decorating your code with test information is how you really should go about it -- what about:
class MyClass(object):
def mymethod(self, arg):
print("mymethod")
Then in something like unittests.py:
DEV_TESTS = True # Change to False for production
num_retries = 3 if not DEV_TESTS else 1
import <your class>
class UnitTests():
def __init__(self):
self.c = <your_class>.MyClass()
#tryagain(retries=num_retries)
def test_mymethod(self):
self.c.mymethod("Foo")
t = UnitTests()
t.test_mymethod()
If you were so inclined, this unittests.py could be used with something like python's unittest package with:
DEV_TESTS = True # Change to False for production
num_retries = 3 if not DEV_TESTS else 1
import unittest
import <your class>
class UnitTests(unittest.TestCase):
def setUp(self):
self.c = <your class>.MyClass()
#tryagain(retries=num_retries)
def test_mymethod(self):
self.c.mymethod("Foo")
Note, I used the following simple example of a #tryagain decorator, yours may be more complicated and require some tuning of the examples:
def tryagain(retries):
def wrap(f):
def wrapped_f(*args,**kwargs):
for _ in xrange(retries):
f(*args,**kwargs)
return wrapped_f
return wrap
I'm mocking my RpcClient class for all of my unit tests like this:
import unittest2
from mock import patch
#patch('listeners.RpcClient')
class SomeTestCase(unittest2.TestCase):
test_something(self, mock_api):
...
test_something_else(self, mock_api):
...
For most of my tests I don't want to do any assertions using the mock object, all I want to do is patch the class so the RpcClient doesn't attempt to connect and fire requests for each of my tests (I have it hooked up to a post save event on one of my models).
Can I avoid passing in mock_api into every single one of my tests?
I ended up doing the mocking in setUp using patcher.start():
def setUp(self):
self.rpc_patcher = patch('listeners.RpcClient')
self.MockClass = rpc_patcher.start()
def tearDown(self):
self.rpc_patcher.stop()
So I don't have to decorate any of my test cases and don't have to add any extra arguments to my tests.
More info:
http://docs.python.org/dev/library/unittest.mock#patch-methods-start-and-stop
Can you just set a default parameter to mock_api?
def test_something(self, mock_api=None):
...
def test_something_else(self, mock_api=None):
You could make use mock.patch as a context manager for when you call the SUT. Something like:
import unittest2
from mock import patch
class SomeTestCase(unittest2.TestCase):
def call_sut(self, *args, **kwargs):
with mock.patch('listeners.RpcClient'):
# call SUT
def test_something(self):
self.call_sut()
# make assertions
def test_something_else(self):
self.call_sut()
# make assertions
I'm researching new version of pytest (2.3) and getting very excited about the new functionality where you
"can precisely control teardown by registering one or multiple
teardown functions as soon as they have performed some actions which
need undoing, eliminating the no need for a separate “teardown”
decorator"
from here
It's all pretty clear when it's used as function, but how to use it in the class?
class Test(object):
#pytest.setup(scope='class')
def stp(self):
self.propty = "something"
def test_something(self):
... # some code
# need to add something to the teardown
def test_something_else(self):
... # some code
# need to add even more to the teardown
Ok, I got it working by having a 'session'-wide funcarg finalizer:
#pytest.fixture(scope = "session")
def finalizer():
return Finalizer()
class Finalizer(object):
def __init__(self):
self.fin_funcs = []
def add_fin_func(self, func):
self.fin_funcs.append(func)
def remove_fin_func(self, func):
try:
self.fin_funcs.remove(func)
except:
pass
def execute(self):
for func in reversed(self.fin_funcs):
func()
self.fin_funcs = []
class TestSomething(object):
#classmethod
#pytest.fixture(scope = "class", autouse = True)
def setup(self, request, finalizer):
self.finalizer = finalizer
request.addfinalizer(self.finalizer.execute)
self.finalizer.add_fin_func(lambda: some_teardown())
def test_with_teardown(self):
#some test
self.finalizer.add_fin_func(self.additional_teardown)
def additional_teardown(self):
#additional teardown
Thanks #hpk42 for answering e-mails and helping me get the final version.
NOTE: together with xfailing the rest of the steps and improved scenarios this now makes a pretty good Test-Step structure
Indeed, there are no good examples for teardown yet. The request object has a addfinalizer method. Here is an example usage:
#pytest.setup(scope=...)
def mysetup(request):
...
request.addfinalizer(finalizerfunction)
...
The finalizerfunction will be called when all tests withing the scope finished execution.