Change default_retry_delay for a celery task at runtime? - python

Is there a way to modify the retry delay for celery tasks at runtime? Or is there a global config value that can be changed to override the 180s default value?
I have set up tasks with exponential back-off (as described here: Retry Celery tasks with exponential back off) but I want to override this value when integration testing.
The reason is that I often end up triggering the 180s default value if exceptions are raised within an exception handler, which seems to bypass and ignore the countdown argument.
class BaseTask(celery.Task):
def on_retry(self, exc, task_id, args, kwargs, einfo):
"""Log the exceptions at retry."""
logger.exception(exc)
logger.warning('Retry: {}.'.format(self.request))
super().on_retry(exc, task_id, args, kwargs, einfo)
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""Log the exceptions on failure."""
logger.exception(exc)
logger.error('Failure: {}.'.format(self.request))
super().on_failure(exc, task_id, args, kwargs, einfo)
#property
def backoff_countdown(self):
return int(random.uniform(2, 4) ** self.request.retries)
#celery.task(bind=True, base=BaseTask)
def process(self, data):
try:
return some_task(data)
except Exception as exc:
raise self.retry(exc=exc, coundown=self.backoff_countdown)
Regardless of what I set for self.backoff_countdown (even just returning 1) I end up with tasks being retried in 180s, which makes it really hard to run integration tests with reasonable timeouts.

Refer to document http://docs.celeryproject.org/en/latest/userguide/tasks.html#using-a-custom-retry-delay, can set default_retry_delay or set the countdown value.
#celery.task(bind=True, default_retry_delay=30 * 60) # retry in 30 minutes.
def process(self, data):
try:
return some_task(data)
except Exception as exc:
# Retry in 5 minutes
raise self.retry(exc=exc, countdown=5 * 60)

Related

Timeout a C++ function from Python

I have python-cpp bindings implemented (using boost-python) such that calling foop() from Python runs a C++ function fooc(). I would like to set a timeout from Python such that foop returns after t seconds. The solutions here work fine for Python functions, but not with foop b/c I'm unable to interrupt the C++ code -- example below for calling run_for_timeout(foop). Is there a way to do this from Python (i.e. without implementing the timer functionality in C++)?
import signal
class CallbackValueError(ValueError):
"""Raise for improper data values with callback functions and their utils."""
pass
class TimeoutError(RuntimeError):
pass
def run_for_timeout(func, args=(), kwargs=None, timeout=5):
"""Run a function until it times-out.
Note that ``timeout`` = 0 does not imply no timeout, but rather there is
no time for the function to run and this function raises an error
Parameters
----------
func : function
args : tuple
kwargs : dict | None
timeout : int
(seconds)
Returns
-------
result : object | None
Return object from function, or None if it timed out
Raises
------
CallbackValueError
"""
if timeout <= 0:
raise CallbackValueError("{}s is a nonsensical value for the "
"timeout function".format(timeout))
def handler(signum, frame):
raise TimeoutError()
# Set the timeout handler
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout)
if kwargs is None:
kwargs = {}
try:
result = func(*args, **kwargs)
except TimeoutError as e:
result = None
finally:
# Function returned before timeout, so cancel the timer
signal.alarm(0)
return result

How can I test on_failure in celery

My celery task has a base class were an on_failure method is implemented.
In my test, I patched one of the methods that the task is calling to, to raise an exception but on_faliure is never called.
Base class
class BaseTask(celery.Task):
abstract = True
def on_failure(self, exc, task_id, args, kwargs, einfo):
print("error")
Task
#celery.task(bind=True, base=BaseTask)
def multiple(self, a, b):
logic.method(a, b)
Test
#patch('tasks.logic.method')
def test_something(self, mock):
# arrange
mock.side_effect = NotImplementedError
# act
with self.assertRaises(NotImplementedError):
multiple(1, 2)
When running celery and an exception is raised everything works fine.
CELERY_ALWAYS_EAGER is activated.
how can I make on_faliure run?
From a discussion on a issue in celery GitHub: on_failure test is "already done on the Celery level (verifying if on_failure is called)" and "write a test to test whatever your on_failure does instead". You could define a function inside the on_failure method and test it, or call on_failure like a classmethod:
import TestCase
from billiard.einfo import ExceptionInfo
class TestTask(TestCase):
def test_on_failure(self):
"Testing on failure method"
exc = Exception("Test")
task_id = "test_task_id"
args = ["argument 1"]
kwargs = {"key": "value"}
einfo = ExceptionInfo
# call on_failure method
multiple.on_failure(exc, task_id, args, kwargs, einfo)
# assert something appened
ExceptionInfo is the same type of object celery uses; multiple is your task as you defined it in your question.
Hope this helps

celery: get function name by task id?

I am using celery on_failure handler to logging all failed tasks for debugging and analysis. And I want to know the task name(function name) of the failed task, how can I get that?
from celery import Task
class DebugTask(Task):
abstract = True
def after_return(self, *args, **kwargs):
print('Task returned: {0!r}'.format(self.request))
def on_failure(self, exc, task_id, args, kwargs, einfo):
func_name = get_func_name_by_task_id(task_id) # how do I do this?
print "{} failed".format(func_name) # expected out: add failed.
#app.task(base=DebugTask)
def add(x, y):
return x + y
PS: I know there is task_id, but query function name by task_id every time is not fun,
Quick look at the documentation shows Task.name.

Time out decorator on a multprocessing function

I have this decorator taken directly from an example I found on the net:
class TimedOutExc(Exception):
pass
def timeout(timeout):
def decorate(f):
def handler(signum, frame):
raise TimedOutExc()
def new_f(*args, **kwargs):
old = signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout)
try:
result = f(*args, **kwargs)
except TimedOutExc:
return None
finally:
signal.signal(signal.SIGALRM, old)
signal.alarm(0)
return result
new_f.func_name = f.func_name
return new_f
return decorate
It throws an exception if the f function times out.
Well, it works but when I use this decorator on a multiprocessing function and stops due to a time out, it doesn't terminate the processes involved in the computation. How can I do that?
I don't want to launch an exception and stop the program. Basically what I want is when f times out, have it return None and then terminate the processes involved.
While I agree with the main point of Aaron's answer, I would like to elaborate a bit.
The processes launched by multiprocessing must be stopped in the function to be decorated; I don't think that this can be done generally and simply from the decorator itself (the decorated function is the only entity that knows what calculations it launched).
Instead of having the decorated function catch SIGALARM, you can also catch your custom TimedOutExc exception–this might be more flexible. Your example would then become:
import signal
import functools
class TimedOutExc(Exception):
"""
Raised when a timeout happens
"""
def timeout(timeout):
"""
Return a decorator that raises a TimedOutExc exception
after timeout seconds, if the decorated function did not return.
"""
def decorate(f):
def handler(signum, frame):
raise TimedOutExc()
#functools.wraps(f) # Preserves the documentation, name, etc.
def new_f(*args, **kwargs):
old_handler = signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout)
result = f(*args, **kwargs) # f() always returns, in this scheme
signal.signal(signal.SIGALRM, old_handler) # Old signal handler is restored
signal.alarm(0) # Alarm removed
return result
return new_f
return decorate
#timeout(10)
def function_that_takes_a_long_time():
try:
# ... long, parallel calculation ...
except TimedOutExc:
# ... Code that shuts down the processes ...
# ...
return None # Or exception raised, which means that the calculation is not complete
I doubt that can be done with a decorator: A decorator is a wrapper for a function; the function is a black box. There is no communication between the decorator and the function it wraps.
What you need to do is rewrite your function's code to use the SIGALRM handler to terminate any processes it has started.

celery task and customize decorator

I'm working on a project using django and celery(django-celery). Our team decided to wrap all data access code within (app-name)/manager.py(NOT wrap into Managers like the django way), and let code in (app-name)/task.py only dealing with assemble and perform tasks with celery(so we don't have django ORM dependency in this layer).
In my manager.py, I have something like this:
def get_tag(tag_name):
ctype = ContentType.objects.get_for_model(Photo)
try:
tag = Tag.objects.get(name=tag_name)
except ObjectDoesNotExist:
return Tag.objects.none()
return tag
def get_tagged_photos(tag):
ctype = ContentType.objects.get_for_model(Photo)
return TaggedItem.objects.filter(content_type__pk=ctype.pk, tag__pk=tag.pk)
def get_tagged_photos_count(tag):
return get_tagged_photos(tag).count()
In my task.py, I like to wrap them into tasks (then maybe use these tasks to do more complicated tasks), so I write this decorator:
import manager #the module within same app containing data access functions
class mfunc_to_task(object):
def __init__(mfunc_type='get'):
self.mfunc_type = mfunc_type
def __call__(self, f):
def wrapper_f(*args, **kwargs):
callback = kwargs.pop('callback', None)
mfunc = getattr(manager, f.__name__)
result = mfunc(*args, **kwargs)
if callback:
if self.mfunc_type == 'get':
subtask(callback).delay(result)
elif self.mfunc_type == 'get_or_create':
subtask(callback).delay(result[0])
else:
subtask(callback).delay()
return result
return wrapper_f
then (still in task.py):
##task
#mfunc_to_task()
def get_tag():
pass
##task
#mfunc_to_task()
def get_tagged_photos():
pass
##task
#mfunc_to_task()
def get_tagged_photos_count():
pass
Things work fine without #task.
But, after applying that #task decorator(to the top as celery documentation instructed), things just start to fall apart. Apparently, every time the mfunc_to_task.__call__ gets called, the same task.get_tag function gets passed as f. So I ended up with the same wrapper_f every time, and now the only thing I cat do is to get a single tag.
I'm new to decorators. Any one can help me understand what went wrong here, or point out other ways to achieve the task? I really hate to write the same task wrap code for every of my data access functions.
Not quite sure why passing arguments won't work?
if you use this example:
#task()
def add(x, y):
return x + y
lets add some logging to the MyCoolTask:
from celery import task
from celery.registry import tasks
import logging
import celery
logger = logging.getLogger(__name__)
class MyCoolTask(celery.Task):
def __call__(self, *args, **kwargs):
"""In celery task this function call the run method, here you can
set some environment variable before the run of the task"""
logger.info("Starting to run")
return self.run(*args, **kwargs)
def after_return(self, status, retval, task_id, args, kwargs, einfo):
#exit point of the task whatever is the state
logger.info("Ending run")
pass
and create an extended class (extending MyCoolTask, but now with arguments):
class AddTask(MyCoolTask):
def run(self,x,y):
if x and y:
result=add(x,y)
logger.info('result = %d' % result)
return result
else:
logger.error('No x or y in arguments')
tasks.register(AddTask)
and make sure you pass the kwargs as json data:
{"x":8,"y":9}
I get the result:
[2013-03-05 17:30:25,853: INFO/MainProcess] Starting to run
[2013-03-05 17:30:25,855: INFO/MainProcess] result = 17
[2013-03-05 17:30:26,739: INFO/MainProcess] Ending run
[2013-03-05 17:30:26,741: INFO/MainProcess] Task iamscheduler.tasks.AddTask[6a62641d-16a6-44b6-a1cf-7d4bdc8ea9e0] succeeded in 0.888684988022s: 17
Instead of use decorator why you don't create a base class that extend celery.Task ?
In this way all your tasks can extend your customized task class, where you can implement your personal behavior by using methods __call__ and after_return
.
You can also define common methods and object for all your task.
class MyCoolTask(celery.Task):
def __call__(self, *args, **kwargs):
"""In celery task this function call the run method, here you can
set some environment variable before the run of the task"""
return self.run(*args, **kwargs)
def after_return(self, status, retval, task_id, args, kwargs, einfo):
#exit point of the task whatever is the state
pass

Categories