I'm trying to create a pytest plugin that runs a test multiple times to see if it ever passes, skipping on failures unless it's the last run. I use a custom marker for those tests, then pytest_generate_tests to parametrize the test, then modify the result in pytest_pyfunc_call. However, with this approach, the test function needs to actually specify the parameter name, is there some way to hide this parameter from the actual function?
Here's the code for the plugin:
from typing import Optional
import pytest
from _pytest.python import Metafunc, Function
from pluggy.callers import _Result
def pytest_configure(config):
config.addinivalue_line(
"markers", "brute_force: Brute force the test multiple times."
)
class Memory:
def __init__(self, attempts: int):
self.attempts = attempts
self.attempt = 0
self.has_passed = False
def pytest_generate_tests(metafunc: Metafunc):
marker = metafunc.definition.get_closest_marker("brute_force")
if not marker:
return
num_attempts = marker.args and marker.args[0] or 3
memory = Memory(num_attempts)
metafunc.parametrize("memory", [memory for _ in range(num_attempts)])
#pytest.hookimpl(hookwrapper=True)
def pytest_pyfunc_call(pyfuncitem: Function):
memory: Optional[Memory] = pyfuncitem.callspec.params.pop('memory', None)
if memory is not None:
memory.attempt +=1
if memory.has_passed:
raise pytest.skip("already passed")
outcome: _Result = yield
if memory is not None:
if outcome.excinfo is None:
memory.has_passed = True
elif memory.attempt != memory.attempts:
pytest.skip("may pass later")
And here is an example of a test, I want to not have the function to have to specify memory as an argument:
import pytest
num = -1
#pytest.mark.brute_force(3)
def test_simple(memory):
global num
num += 1
assert 10 / num
Related
Most python profilers are made for python programs or scripts, in my case I'm working with a python plugin for a third-party app (blender 3d), therefore the profiling needs to be sampled in real-time while the user is interacting with the plugin.
I'm currently trying an injection strategy, which consists of procedurally searching through all plugin modules, & injecting a profiler wrapper to every single function.
see below, this is what my current profiler looks like
I'm wondering if there are other profilers out there that can be used for run-time scenarios such as plugins
class ModuleProfiler:
#profiler is running?
allow = False #must be True in order to start the profiler
activated = False #read only indication if profiler has been activated
#please define your plugin main module here
plugin_main_module = "MyBlenderPlugin"
#function calls registry
registry = {}
#ignore parameters, typically ui functions/modules
ignore_fcts = [
"draw",
"foo",
]
ignore_module = [
"interface_drawing",
]
event_prints = True #print all event?
#classmethod
def print_registry(cls):
"""print all registered benchmark"""
#generate total
for k,v in cls.registry.copy().items():
cls.registry[k]["averagetime"] = v["runtime"]/v["calls"]
print("")
print("PROFILER: PRINTING OUTCOME")
sorted_registry = dict(sorted(cls.registry.items(), key=lambda item:item[1]["runtime"], reverse=False))
for k,v in sorted_registry.items():
print("\n",k,":")
for a,val in v.items():
print(" "*6,a,":",val)
return None
#classmethod
def update_registry(cls, fct, exec_time=0):
"""update internal benchmark with new data"""
key = f"{fct.__module__}.{fct.__name__}"
r = cls.registry.get(key)
if (r is None):
cls.registry[key] = {}
cls.registry[key]["calls"] = 0
cls.registry[key]["runtime"] = 0
r = cls.registry[key]
r["calls"] +=1
r["runtime"] += exec_time
return None
#classmethod
def profile_wrap(cls, fct):
"""wrap any functions with our benchmark & call-counter"""
#ignore some function?
if (fct.__name__ in cls.ignore_fcts):
return fct
import functools
import time
#functools.wraps(fct)
def inner(*args,**kwargs):
t = time.time()
r = fct(*args,**kwargs)
cls.update_registry(fct, exec_time=time.time()-t)
if cls.event_prints:
print(f"PROFILER : {fct.__module__}.{fct.__name__} : {time.time()-t}")
return r
return inner
#classmethod
def start(cls):
"""inject the wrapper for every functions of every sub-modules of our plugin
used for benchmark or debugging purpose only"""
if (not cls.allow):
return None
cls.activated = True
import types
import sys
def is_function(obj):
"""check if given object is a function"""
return isinstance(obj, types.FunctionType)
print("")
#for all modules in sys.modules
for mod_k,mod in sys.modules.copy().items():
#separate module componments names
mod_list = mod_k.split('.')
#fileter what isn't ours
if (mod_list[0]!=cls.plugin_main_module):
continue
#ignore some modules?
if any([m in cls.ignore_module for m in mod_list]):
continue
print("PROFILER_SEARCH : ",mod_k)
#for each objects found in module
for ele_k,ele in mod.__dict__.items():
#if it does not have a name, skip
if (not hasattr(ele,"__name__")):
continue
#we have a global function
elif is_function(ele):
print(f" INJECT LOCAL_FUNCTION: {mod_k}.{ele_k}")
mod.__dict__[ele_k] = cls.profile_wrap(ele)
#then we have a homebrewed class? search for class.fcts
#class.fcts implementation is not flawless, need to investigate issue(s)
elif repr(ele).startswith(f"<class '{cls.plugin_main_module}."):
for class_k,class_e in ele.__dict__.items():
if is_function(class_e):
print(f" INJECT CLASS_FUNCTION: {mod_k}.{ele_k}.{class_k}")
setattr( mod.__dict__[ele_k], class_k, cls.profile_wrap(class_e),) #class.__dict__ are mapping proxies, need to assign this way,
continue
print("")
return None
ModuleProfiler.allow = True
ModuleProfiler.plugin_main_module = "MyModule"
ModuleProfiler.start()
I'm having some issue while creating unittest for internal parameter.
My structure is:
[1] my_animal.py contains Myclass and method: do_bite()
my_animal.py
class Myclass():
def do_bite(self):
return 1
[2] my_module.py contains jobMain("") which is using the method from my_animal.py
my_module.py
import sys
from someclass import Myclass
def jobMain(directoryPath):
flag = -1
result = Myclass()
if result.do_bite() is None:
flag = 0
if result.do_bite() is 1:
flag = 1
if result.do_bite() is 2:
flag = 2
[3] my_test.py contains the unittest to test jobMain in my_module.py
my_test.py
# Mock Myclass.dobite to None
#pytest.fixture
def mock_dobite0():
with mock.patch('my_module.Myclass') as mocked_animal:
mocked_animal.return_value.do_bite.return_value = None
yield
# Mock Myclass.dobite to 1
#pytest.fixture
def mock_dobite1():
with mock.patch('my_module.Myclass') as mocked_animal:
mocked_animal.return_value.do_bite.return_value = 1
yield
# Mock Myclass.dobite to 2
#pytest.fixture
def mock_dobite2():
with mock.patch('my_module.Myclass') as mocked_animal:
mocked_animal.return_value.do_bite.return_value = 2
yield
# My unittest to test dobite() method
def test_dobite0(mock_Myclass, mock_dobite0):
jobMain("")
def test_dobite1(mock_Myclass, mock_dobite1):
jobMain("")
def test_dobite2(mock_Myclass, mock_dobite2):
jobMain("")
My question is: How to test 'flag' parameter inside JobMain?
'flag' para must be assigned the correct value.( eg: dobite = 1 => flag = 1)
The variable para only exists in the scope of jobMain. If you want to use the variable outside jobMain the most common ways are
1) return the value
This is quite obvious. Since jobMain is a function, it returns a value. Without an explicit return statement you return None. You could just
def jobmain(pth):
# do stuff and assign flag
return flag
# and inside tests
assert jobmain("") == 1
2) Use a class instead
If you want the jobMain to remember some state, then it is common practice to use objects. Then flag would be attribute of the object and could be accessed from outside, after you call any method (function) of JobMain. For example
class JobMain:
def __init__(self):
self.flag = -1
def run(self, pth):
result = Myclass()
if result.do_bite() is None:
self.flag = 0
if result.do_bite() is 1:
self.flag = 1
if result.do_bite() is 2:
self.flag = 2
# and inside test
job = JobMain()
job.run()
assert job.flag == 1
Note
I just copy-pasted your code for setting the flag. Note that you call do_bite() many times, if the resulting value is None or 1. Also, when testing against a number, one should use == instead of is.
How to test 'flag' parameter inside JobMain?
You don't. It's an internal variable. Testing it would be glass-box testing; the test will break if the implementation changes.
Instead, test the effect of flag. This is black-box testing. Only the interface is tested. If the implementation changes the test still works allowing the code to be aggressively refactored.
Note: If you don't hard code result = Myclass() you don't need to mock. Pass it in as an argument with the default being Myclass().
def jobMain(directoryPath, result=Myclass()):
Then you don't need to patch Myclass(). Instead, pass in a mock object.
# I don't know unittest.mock very well, but something like this.
mock = Mock(Myclass)
mock.do_bite.return_value = 2
jobMain('', result=mock)
This also makes the code more flexible outside of testing.
This question already has an answer here:
How can I access the overall test result of a pytest test run during runtime?
(1 answer)
Closed 2 years ago.
After running all the test I would like to receive a counter of all tests run, failed tests and successful tests. This information shall later on be used by another tool which parses the data.
My answer here can be modified to include a counter. Once again modify / create the conftest.py
Add
from collections import OrderedDict
test_results = OrderedDict()
def pytest_configure(config):
"""Intial setup of test_results counters"""
global test_results
test_results['amount_of_failed_tests'] = 0
test_results['amount_of_tests'] = 0
test_results['amount_of_successful_tests'] = 0
And modify pytest_runtest_makereport as follows:
#pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""The actuall wrapper that gets called before and after every test"""
global test_results
outcome = yield
rep = outcome.get_result()
if rep.when == "call":
full_name, test_file, test_name = get_current_test()
test_results['amount_of_tests'] += 1
if rep.failed:
test_results['amount_of_failed_tests'] += 1
# return the last error msg either by pytest.fail or from any exception raised
test_results[test_name] = "Failure"
test_results[test_name_msg] = f"{call.excinfo.typename} - {call.excinfo.value}"
else:
test_results['amount_of_successful_tests'] += 1
test_results[test_name] = "Success"
test_results[test_name_msg] = ''
The result can once again be accessed in the end:
def pytest_unconfigure(config):
"""Called when pytest is about to end. Can be used to print the result dict or
to pipe the data into a file"""
print(test_results)
How can I create multiple TestCases and run them programmatically? I'm trying to test multiple implementations of a collection on a common TestCase.
I'd prefer to stick to with plain unittest and avoid dependencies.
Here's some resources that I looked at that didn't quite meet what I wanted:
Writing a re-usable parametrized unittest.TestCase method - The accepted answer proposes four different external libraries.
http://eli.thegreenplace.net/2011/08/02/python-unit-testing-parametrized-test-cases -
This approach uses a static method paramerize. I don't understand why you can't pass in a parameter directly into the
TestSubClass.__init__.
How to generate dynamic (parametrized) unit tests in python? - A little bit too black magic.
Here's a minimal (non)working example.
import unittest
MyCollection = set
AnotherCollection = set
# ... many more collections
def maximise(collection, array):
return 2
class TestSubClass(unittest.TestCase):
def __init__(self, collection_class):
unittest.TestCase.__init__(self)
self.collection_class = collection_class
self.maximise_fn = lambda array: maximise(collection_class, array)
def test_single(self):
self.assertEqual(self.maximise_fn([1]), 1)
def test_overflow(self):
self.assertEqual(self.maximise_fn([3]), 1)
# ... many more tests
def run_suite():
suite = unittest.defaultTestLoader
for collection in [MyCollection, AnotherCollection]:
suite.loadTestsFromTestCase(TestSubClass(collection))
unittest.TextTestRunner().run(suite)
def main():
run_suite()
if __name__ == '__main__':
main()
The above approach errors with in loadTestsFromTestCase:
TypeError: issubclass() arg 1 must be a class
How about using pytest with to parametrize fixture:
import pytest
MyCollection = set
AnotherCollection = set
def maximise(collection, array):
return 1
#pytest.fixture(scope='module', params=[MyCollection, AnotherCollection])
def maximise_fn(request):
return lambda array: maximise(request.param, array)
def test_single(maximise_fn):
assert maximise_fn([1]) == 1
def test_overflow(maximise_fn):
assert maximise_fn([3]) == 1
If that's not an option, you can make a mixin to contain test function, and subclasses to provide maximise_fns:
import unittest
MyCollection = set
AnotherCollection = set
def maximise(collection, array):
return 1
class TestCollectionMixin:
def test_single(self):
self.assertEqual(self.maximise_fn([1]), 1)
def test_overflow(self):
self.assertEqual(self.maximise_fn([3]), 1)
class TestMyCollection(TestCollectionMixin, unittest.TestCase):
maximise_fn = lambda self, array: maximise(MyCollection, array)
class TestAnotherCollection(TestCollectionMixin, unittest.TestCase):
maximise_fn = lambda self, array: maximise(AnotherCollection, array)
if __name__ == '__main__':
unittest.main()
I want to be able to get the result of a particular test method and output it inside the teardown method, while using the nose test runner.
There is a very good example here.
But unfortunately, running nosetests example.py does not work, since nose doesn't seem to like the fact that the run method in the superclass is being overridden:
AttributeError: 'ResultProxy' object has no attribute 'wasSuccessful'
Caveat: the following doesn't actually access the test during the tearDown, but it does access each result.
You might want to write a nose plugin (see the API documentation here). The method that you are probably interested in is afterTest(), which is run... after the test. :) Though, depending on your exact application, handleError()/handleFailure() or finalize() might actually be more useful.
Here is an example plugin that accesses the result of a test immediately after it is executed.
from nose.plugins import Plugin
import logging
log = logging.getLogger('nose.plugins.testnamer')
class ReportResults(Plugin):
def __init__(self, *args, **kwargs):
super(ReportResults, self).__init__(*args, **kwargs)
self.passes = 0
self.failures = 0
def afterTest(self, test):
if test.passed:
self.passes += 1
else:
self.failures += 1
def finalize(self, result):
print "%d successes, %d failures" % (self.passes, self.failures)
This trivial example merely reports the number of passes and failures (like the link you included, but I'm sure you can extend it to do something more interesting (here's another fun idea). To use this, make sure that it is installed in Nose (or load it into a custom runner), and then activate it with --with-reportresults.
If you are OK with adding some boilerplate code to the tests, something like the following might work.
In MyTest1, tearDown is called at the end of each test, and the value of self.result has been set to a tuple containing the method name and a dictionary (but you could set that to whatever you like). The inspect module is used to get the method name, so tearDown knows which test just ran.
In MyTest2, all the results are saved in a dictionary (results), which you can do with what you like in the tearDownClass method.
import inspect
import unittest
class MyTest1(unittest.TestCase):
result = None
def tearDown(self):
print "tearDown:", self.result
def test_aaa(self):
frame = inspect.currentframe()
name = inspect.getframeinfo(frame).function
del frame
self.result = (name, None)
x = 1 + 1
self.assertEqual(x, 2)
self.result = (name, dict(x=x))
def test_bbb(self):
frame = inspect.currentframe()
name = inspect.getframeinfo(frame).function
del frame
self.result = (name, None)
# Intentional fail.
x = -1
self.assertEqual(x, 0)
self.result = (name, dict(x=x))
class MyTest2(unittest.TestCase):
results = {}
#classmethod
def tearDownClass(cls):
print "tearDownClass:", cls.results
def test_aaa(self):
frame = inspect.currentframe()
name = inspect.getframeinfo(frame).function
del frame
self.results[name] = None
x = 1 + 1
self.assertEqual(x, 2)
self.results[name] = dict(x=x)
def test_bbb(self):
frame = inspect.currentframe()
name = inspect.getframeinfo(frame).function
del frame
self.results[name] = None
x = -1
self.assertEqual(x, 0)
self.results[name] = dict(x=x)
if __name__ == '__main__':
unittest.main()