I'm pretty new to twisted and I'm attempting to write some unit tests using the trial test framework. My tests run and pass as expected, but for some reason trial is hanging between tests. I have to hit CTRL+C after each test to get it to move on to the next one. I'm guessing I have something configured incorrectly or I'm not calling some method I should be to tell trial the test is done.
Here is the class under test:
from twisted.internet import reactor, defer
import threading
import time
class SomeClass:
def doSomething(self):
return self.asyncMethod()
def asyncMethod(self):
d = defer.Deferred()
t = SomeThread(d)
t.start()
return d
class SomeThread(threading.Thread):
def __init__(self, d):
super(SomeThread, self).__init__()
self.d = d
def run(self):
time.sleep(2) # pretend to do something
retVal = 123
self.d.callback(retVal)
Here is the unit test class:
from twisted.trial import unittest
import tested
class SomeTest(unittest.TestCase):
def testOne(self):
sc = tested.SomeClass()
d = sc.doSomething()
return d.addCallback(self.allDone)
def allDone(self, retVal):
self.assertEquals(retVal, 123)
def testTwo(self):
sc = tested.SomeClass()
d = sc.doSomething()
return d.addCallback(self.allDone2)
def allDone2(self, retVal):
self.assertEquals(retVal, 123)
This is what the command line output looks like:
me$ trial test.py
test
SomeTest
testOne ... ^C [OK]
testTwo ... ^C [OK]
-------------------------------------------------------------------------------
Ran 2 tests in 8.499s
PASSED (successes=2)
I guess your problem has to do with your threads. Twisted is not thread-safe, and if you need to interface with threads you should let the reactor handle things by using deferToThread, callInThread, callFromThread.
See here for info on how to be thread-safe with Twisted.
Related
Sentry can track performance for celery tasks and API endpoints
https://docs.sentry.io/product/performance/
I have custom script that are lunching by crone and do set of similar tasks
I want to incorporated sentry_sdk into my script to get performance tracing of my tasks
Any advise how to do it with
https://getsentry.github.io/sentry-python/api.html#sentry_sdk.capture_event
You don't need use capture_event
I would suggest to use sentry_sdk.start_transaction instead. It also allows track your function performance.
Look at my example
from time import sleep
from sentry_sdk import Hub, init, start_transaction
init(
dsn="dsn",
traces_sample_rate=1.0,
)
def sentry_trace(func):
def wrapper(*args, **kwargs):
transaction = Hub.current.scope.transaction
if transaction:
with transaction.start_child(op=func.__name__):
return func(*args, **kwargs)
else:
with start_transaction(op=func.__name__, name=func.__name__):
return func(*args, **kwargs)
return wrapper
#sentry_trace
def b():
for i in range(1000):
print(i)
#sentry_trace
def c():
sleep(2)
print(1)
#sentry_trace
def a():
sleep(1)
b()
c()
if __name__ == '__main__':
a()
After starting this code you can see basic info of transaction a with childs b and c
I just got thrown into the deep end with my new contract. The current system uses the python logging module to do timed log-file rotation. The problem is that the log-file of the process running as a daemon gets rotated correctly, while the other log-file of the process instances that get created and destroyed when done does not rotate. Ever. I have now got to find a solution to this problem. After 2 days of research on the internet and python documentation I'm only halfway out of the dark. Since I'm new to the logging module I can't see the answer to the problem since I'm probably looking with my eyes closed!
The process is started with:
python /admin/bin/fmlog.py -l 10 -f /tmp/fmlog/fmapp_log.log -d
where:
-l 10 => DEBUG logging-level
-f ... => Filename to log to for app-instance
-d => run as daemon
The following shows a heavily edited version of my code:
#!/usr/bin python
from comp.app import app, yamlapp
...
from comp.utils.log4_new import *
# Exceptions handling class
class fmlogException(compException): pass
class fmlog(app):
# Fmlog application class
def __init__(self, key, config, **kwargs):
# Initialise the required variables
app.__init__(self, key, config, **kwargs)
self._data = {'sid': self._id}
...
def process(self, tid=None):
if tid is not None:
self.logd("Using thread '%d'." % (tid), data=self._data)
# Run the fmlog process
self.logi("Processing this '%s'" % (filename), data=self._data)
...
def __doDone__(self, success='Failure', msg='', exception=None):
...
self.logd("Process done!")
if __name__ == '__main__':
def main():
with yamlapp(filename=config, cls=fmlog, configcls=fmlogcfg, sections=sections, loglevel=loglevel, \
logfile=logfile, excludekey='_dontrun', sortkey='_priority', usethreads=threads, maxthreads=max, \
daemon=daemon, sleep=sleep) as a:
a.run()
main()
The yamlapp process (sub-class of app) is instantiated and runs as a daemon until manually stopped. This process will only create 1 or more instance(s) of the fmlog class and call the process() function when needed (certain conditions met). Up to x instances can be created per thread if the yamlapp process is run in thread-mode.
The app process code:
#!/usr/bin/env python
...
from comp.utils.log4_new import *
class app(comp.base.comp, logconfig, log):
def __init__(self, cls, **kwargs):
self.__setdefault__('_configcls', configitem)
self.__setdefault__('_daemon', True)
self.__setdefault__('_maxthreads', 5)
self.__setdefault__('_usethreads', False)
...
comp.base.comp.__init__(self, **kwargs)
logconfig.__init__(self, prog(), **getlogkwargs(**kwargs))
log.__init__(self, logid=prog())
def __enter__(self):
self.logi(msg="Starting application '%s:%s' '%d'..." % (self._cls.__name__, \
self.__class__.__name__, os.getpid()))
return self
def ...
def run(self):
...
if self._usethreads:
...
while True:
self.logd(msg="Start of run iteration...")
if not self._usethreads:
while not self._q.empty():
item = self._q.get()
try:
item.process()
self.logd(msg="End of run iteration...")
time.sleep(self._sleep)
The logging config and setup is done via the log4_new.py classes:
#!/usr/bin/env python
import logging
import logging.handlers
import re
class logconfig(comp):
def __init__(self, logid, **kwargs):
comp.__init__(self, **kwargs)
self.__setdefault__('_logcount', 20)
self.__setdefault__('_logdtformat', None)
self.__setdefault__('_loglevel', DEBUG)
self.__setdefault__('_logfile', None)
self.__setdefault__('_logformat', '[%(asctime)-15s][%(levelname)5s] %(message)s')
self.__setdefault__('_loginterval', 'S')
self.__setdefault__('_logintervalnum', 30)
self.__setdefault__('_logsuffix', '%Y%m%d%H%M%S')
self._logid = logid
self.__loginit__()
def __loginit__(self):
format = logging.Formatter(self._logformat, self._logdtformat)
if self._logfile:
hnd = logging.handlers.TimedRotatingFileHandler(self._logfile, when=self._loginterval, interval=self._logintervalnum, backupCount=self._logcount)
hnd.suffix = self._logsuffix
hnd.extMatch = re.compile(strftoregex(self._logsuffix))
else:
hnd = logging.StreamHandler()
hnd.setFormatter(format)
l = logging.getLogger(self._logid)
for h in l.handlers:
l.removeHandler(h)
l.setLevel(self._loglevel)
l.addHandler(hnd)
class log():
def __init__(self, logid):
self._logid = logid
def __log__(self, msg, level=DEBUG, data=None):
l = logging.getLogger(self._logid)
l.log(level, msg, extra=data)
def logd(self, msg, **kwargs):
self.__log__(level=DEBUG, msg=msg, **kwargs)
def ...
def logf(self, msg, **kwargs):
self.__log__(level=FATAL, msg=msg, **kwargs)
def getlogkwargs(**kwargs):
logdict = {}
for key, value in kwargs.iteritems():
if key.startswith('log'): logdict[key] = value
return logdict
Logging is done as expected: logs from yamlapp (sub-class of app) is written to fmapp_log.log, and logs from fmlog is written to fmlog.log.
The problem is that fmapp_log.log is rotated as expected, but fmlog.log is never rotated. How do I solve this? I know the process must run continuously for the rotation to happen, that is why only one logger is used. I suspect another handle must be created for the fmlog process which must never be destroyed when the process exits.
Requirements:
The app (framework or main) log and the fmlog (process) log must be to different files.
Both log-files must be time-rotated.
Hopefully someone will understand the above and be able to give me a couple of pointers.
I'm testing a class, with many test methods. However, each method has a unique context. I then write my code as following:
class TestSomeClass(unittest.TestCase):
def test_a():
with a_context() as c:
pass
def test_b():
with b_context() as c:
pass
def test_c():
with c_context() as c:
pass
However, the context managers are irrelevant to the test case, and produce temporary files. So as to not pollute the file system when the test fails, I would like to use each context manager in a setup/teardown scenario.
I've looked at nose's with_setup, but the docs say that is meant for functions only, not methods. Another way is to move the test methods to separate classes each with a setup/teardown function. What's a good way to do this?
First of all, I'm not sure why what you have isn't working. I wrote some test code, and it shows that the exit code always gets called, under the unittest.main() execution environment. (Note, I did not test nose, so maybe that's why I couldn't replicate your failure.) Maybe your context manager is broken?
Here's my test:
import unittest
import contextlib
import sys
#contextlib.contextmanager
def context_mgr():
print "setting up context"
try:
yield
finally:
print "tearing down context"
class TestSomeClass(unittest.TestCase):
def test_normal(self):
with context_mgr() as c:
print "normal task"
def test_raise(self):
with context_mgr() as c:
print "raise task"
raise RuntimeError
def test_exit(self):
with context_mgr() as c:
print "exit task"
sys.exit(1)
if __name__ == '__main__':
unittest.main()
By running that with $ python test_test.py I see tearing down context for all 3 tests.
Anyway, to answer your question, if you want a separate setup and teardown for each test, then you need to put each test in its own class. You can set up a parent class to do most of the work for you, so there isn't too much extra boilerplate:
class TestClassParent(unittest.TestCase):
context_guard = context_mgr()
def setUp(self):
#do common setup tasks here
self.c = self.context_guard.__enter__()
def tearDown(self):
#do common teardown tasks here
self.context_guard.__exit__(None,None,None)
class TestA(TestClassParent):
context_guard = context_mgr('A')
def test_normal(self):
print "task A"
class TestB(TestClassParent):
context_guard = context_mgr('B')
def test_normal(self):
print "task B"
This produces the output:
$ python test_test.py
setting up context: A
task A
tearing down context: A
.setting up context: B
task B
tearing down context: B
.
----------------------------------------------------------------------
Ran 2 tests in 0.000s
OK
I want to make a stub to prevent time.sleep(..) to sleep to improve the unit test execution time.
What I have is:
import time as orgtime
class time(orgtime):
'''Stub for time.'''
_sleep_speed_factor = 1.0
#staticmethod
def _set_sleep_speed_factor(sleep_speed_factor):
'''Sets sleep speed.'''
time._sleep_speed_factor = sleep_speed_factor
#staticmethod
def sleep(duration):
'''Sleeps or not.'''
print duration * time._sleep_speed_factor
super.sleep(duration * time._sleep_speed_factor)
However, I get the following error on the second code line above (class definition):
TypeError: Error when calling the metaclass bases
module.__init__() takes at most 2 arguments (3 given).
How to fix the error?
You can use mock library in your tests.
import time
from mock import patch
class MyTestCase(...):
#patch('time.sleep', return_value=None)
def my_test(self, patched_time_sleep):
time.sleep(666) # Should be instant
The accepted answer is still valid. However, unittest.mock is since Python 3.3 an official part of the Python standard library.
import time
from unittest import TestCase
from unittest.mock import patch
class TestMyCase(TestCase):
#patch('time.sleep', return_value=None)
def test_my_method(self, patched_time_sleep):
time.sleep(60) # Should be instant
# the mock should only be called once
self.assertEqual(1, patched_time_sleep.call_count)
# or
patched_time_sleep.assert_called_once()
# alternative version using a context manager
def test_my_method_alternative(self):
with patch('time.sleep', return_value=None) as patched_time_sleep:
time.sleep(60) # Should be instant
# the mock should only be called once
self.assertEqual(1, patched_time_sleep.call_count)
# or
patched_time_sleep.assert_called_once()
I'm using pytest and have following fixture to monkey patch time.sleep:
import pytest
#pytest.fixture
def sleepless(monkeypatch):
def sleep(seconds):
pass
monkeypatch.setattr(time, 'sleep', sleep)
Then in test which I need to "speedup" the sleep, I just use this fixture:
import time
def test_sleep(sleepless):
time.sleep(60)
So when you run this test, you will see that it completes in much shorter time:
= 1 passed in 0.02 seconds =
What about:
import time
from time import sleep as originalsleep
def newsleep(seconds):
sleep_speed_factor = 10.0
originalsleep(seconds/sleep_speed_factor)
time.sleep = newsleep
This is working for me. I am inlcuding it at the beginning of the test I want to speed up, at the end I set back the original sleep just in case. Hope it helps
Here's what I did to prevent the test from sleeping:
If I have a module mymodule.py that imports and uses sleep in a function that I want to test:
from time import sleep
def some_func()
sleep(5)
# ...do some things
I then have my test import sleep from the module that is using it, like this:
#mock.patch('mymodule.sleep')
def test_some_func(mock_sleep):
mock_sleep.return_value = None
# ...continue my test
using freezegun package can help you to do this.
# fake.py
import functools
from datetime import datetime, timedelta
from unittest import mock
from freezegun import freeze_time
def fake_sleep(func):
freezegun_control = None
def fake_sleep(seconds):
nonlocal freezegun_control
utcnow = datetime.utcnow()
if freezegun_control is not None:
freezegun_control.stop()
freezegun_control = freeze_time(utcnow + timedelta(seconds=seconds))
freezegun_control.start()
#functools.wraps(func)
def wrapper(*args, **kwargs):
with mock.patch('time.sleep', fake_sleep):
rv = func(*args, **kwargs)
if freezegun_control is not None:
freezegun_control.stop()
return rv
return wrapper
# test.py
from fake import fake_sleep
import time
#fake_sleep
def test_sleep():
now = datetime.utcnow()
for sleep_seconds in range(10):
for i in range(1, 10):
time.sleep(sleep_seconds)
assert datetime.utcnow() - now >= timedelta(
seconds=i * sleep_seconds)
common demo: please see the freezegun README
pytest demo: Gist fake sleep function fixture
I'm researching new version of pytest (2.3) and getting very excited about the new functionality where you
"can precisely control teardown by registering one or multiple
teardown functions as soon as they have performed some actions which
need undoing, eliminating the no need for a separate “teardown”
decorator"
from here
It's all pretty clear when it's used as function, but how to use it in the class?
class Test(object):
#pytest.setup(scope='class')
def stp(self):
self.propty = "something"
def test_something(self):
... # some code
# need to add something to the teardown
def test_something_else(self):
... # some code
# need to add even more to the teardown
Ok, I got it working by having a 'session'-wide funcarg finalizer:
#pytest.fixture(scope = "session")
def finalizer():
return Finalizer()
class Finalizer(object):
def __init__(self):
self.fin_funcs = []
def add_fin_func(self, func):
self.fin_funcs.append(func)
def remove_fin_func(self, func):
try:
self.fin_funcs.remove(func)
except:
pass
def execute(self):
for func in reversed(self.fin_funcs):
func()
self.fin_funcs = []
class TestSomething(object):
#classmethod
#pytest.fixture(scope = "class", autouse = True)
def setup(self, request, finalizer):
self.finalizer = finalizer
request.addfinalizer(self.finalizer.execute)
self.finalizer.add_fin_func(lambda: some_teardown())
def test_with_teardown(self):
#some test
self.finalizer.add_fin_func(self.additional_teardown)
def additional_teardown(self):
#additional teardown
Thanks #hpk42 for answering e-mails and helping me get the final version.
NOTE: together with xfailing the rest of the steps and improved scenarios this now makes a pretty good Test-Step structure
Indeed, there are no good examples for teardown yet. The request object has a addfinalizer method. Here is an example usage:
#pytest.setup(scope=...)
def mysetup(request):
...
request.addfinalizer(finalizerfunction)
...
The finalizerfunction will be called when all tests withing the scope finished execution.