Celery use wrong broker - python

I have setup from the the docs
celery.py file:
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cars.settings')
app = Celery('cars')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
supervisord config
[program:bbay-celery]
command = /opt/webapps/bbay/env/bin/celery worker -A cars.celery:app ; Command to start app
directory = /opt/webapps/bbay/
user = bbay ; User to run as
numprocs = 1
stdout_logfile = /opt/webapps/bbay/logs/celery.log ; Where to write log messages
redirect_stderr = true ; Save stderr in the same log
environment=LANG='en_US.UTF-8',LC_ALL='en_US.UTF-8',DJANGO_SETTINGS_MODULE='cars.settings',CELERYD_CHDIR='/opt/webapps/bbay/'
autostart = true
autorestart = true
startsecs = 10
django settings :
BROKER_URL = 'redis://localhost:6379/0'
when i start worker all seems fine and correct broker url used:
(env)bbay#djproj:/opt/webapps/bbay$ celery -A cars.celery:app beat
celery beat v3.1.17 (Cipater) is starting.
__ - ... __ - _
Configuration ->
. broker -> redis://localhost:6379/0
. loader -> celery.loaders.app.AppLoader
. scheduler -> celery.beat.PersistentScheduler
. db -> celerybeat-schedule
. logfile -> [stderr]#%INFO
. maxinterval -> now (0s)
Here is my task:
#shared_task
def send_mail_task(template, context, send_to):
....
Here is how i use it:
send_mail_task.delay('email/confirmation_message.html', context, [user.email, ])
But when the task called it tried to connect to default broker ( host
'127.0.0.1:5672' ). Here is stacktrace:
Stacktrace (most recent call last):
File "django/core/handlers/base.py", line 111, in get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "django/views/decorators/csrf.py", line 57, in wrapped_view
return view_func(*args, **kwargs)
File "django/views/generic/base.py", line 69, in view
return self.dispatch(request, *args, **kwargs)
File "rest_framework/views.py", line 452, in dispatch
response = self.handle_exception(exc)
File "rest_framework/views.py", line 449, in dispatch
response = handler(request, *args, **kwargs)
File "accounts/api/views.py", line 132, in post
send_mail_task.delay('email/contact_seller.html', context, [profile.user.email, ])
File "celery/app/task.py", line 453, in delay
return self.apply_async(args, kwargs)
File "celery/app/task.py", line 555, in apply_async
**dict(self._get_exec_options(), **options)
File "celery/app/base.py", line 355, in send_task
reply_to=reply_to or self.oid, **options
File "celery/app/amqp.py", line 305, in publish_task
**kwargs
File "kombu/messaging.py", line 168, in publish
routing_key, mandatory, immediate, exchange, declare)
File "kombu/connection.py", line 457, in _ensured
interval_max)
File "kombu/connection.py", line 369, in ensure_connection
interval_start, interval_step, interval_max, callback)
File "kombu/utils/__init__.py", line 243, in retry_over_time
return fun(*args, **kwargs)
File "kombu/connection.py", line 237, in connect
return self.connection
File "kombu/connection.py", line 741, in connection
self._connection = self._establish_connection()
File "kombu/connection.py", line 696, in _establish_connection
conn = self.transport.establish_connection()
File "kombu/transport/pyamqp.py", line 112, in establish_connection
conn = self.Connection(**opts)
File "amqp/connection.py", line 165, in __init__
self.transport = self.Transport(host, connect_timeout, ssl)
File "amqp/connection.py", line 186, in Transport
return create_transport(host, connect_timeout, ssl)
File "amqp/transport.py", line 299, in create_transport
return TCPTransport(host, connect_timeout)
File "amqp/transport.py", line 95, in __init__
raise socket.error(last_err)
So what is wrong and how to make celery connect to the specified broker and where in celery docs is it?

The problem was that i missed celery in __init__.py. __init__.py should contain following ( from the docs ):
from __future__ import absolute_import
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from .celery import app as celery_app

Ensure the broker url on you config or settings file has the specified prefix as on the namespace app.config_from_object('django.conf:settings', namespace='CELERY')
so changed my BROKER_URL to CELERY_BROKER_URL

In my case the wrong broker issue was due to incorrect celery start command.
I've used 'celery beat -A=myapp', and broker was incorrect. Then i changed it to 'celery -A myapp beat' and it used correct broker from settings.

It may cuased by kombu.
at first my env were:
kombu-4.6.11
celery-4.4.7
celery could not get settings by config_from_object !
after changed to:
kombu-4.0.2
celery-4.0.2
problem solved!

Related

Celery error “Received 0x00 while expecting 0xce”

I use celery 4.3.0 and rabbitmq 3.6.6. There is also a Haproxy for balancing rabbitmq, but right now it only uses one rabbitmq node.
Timeout client/server in Haproxy 24 hours.
Sometimes I get an error and the task execution status is not returned, while the tasks continue to run:
Received 0x00 while expecting 0xce
Traceback (most recent call last):
File "/var/www/serv/app/mod_axmsg/dispatch.py", line 376, in execute
result_value = func_meta.func(*task.args_list, **task.kwargs_dict)
File "/var/www/serv/app/mod_rmc/libs/health.py", line 91, in update_all_health
health.update_all_health()
File "/var/www/serv/app/mod_core/libs/health.py", line 515, in update_all_health
result.join()
File "/usr/local/serv/lib/python3.5/site-packages/celery/result.py", line 765, in join
interval=interval, no_ack=no_ack, on_interval=on_interval,
File "/usr/local/serv/lib/python3.5/site-packages/celery/result.py", line 226, in get
on_message=on_message,
File "/usr/local/serv/lib/python3.5/site-packages/celery/backends/asynchronous.py", line 188, in wait_for_pending
for _ in self._wait_for_pending(result, **kwargs):
File "/usr/local/serv/lib/python3.5/site-packages/celery/backends/asynchronous.py", line 255, in _wait_for_pending
on_interval=on_interval):
File "/usr/local/serv/lib/python3.5/site-packages/celery/backends/asynchronous.py", line 56, in drain_events_until
yield self.wait_for(p, wait, timeout=1)
File "/usr/local/serv/lib/python3.5/site-packages/celery/backends/asynchronous.py", line 65, in wait_for
wait(timeout=timeout)
File "/usr/local/serv/lib/python3.5/site-packages/celery/backends/rpc.py", line 63, in drain_events
return self._connection.drain_events(timeout=timeout)
File "/usr/local/serv/lib/python3.5/site-packages/kombu/connection.py", line 323, in drain_events
return self.transport.drain_events(self.connection, **kwargs)
File "/usr/local/serv/lib/python3.5/site-packages/kombu/transport/pyamqp.py", line 103, in drain_events
return connection.drain_events(**kwargs)
File "/usr/local/serv/lib/python3.5/site-packages/amqp/connection.py", line 505, in drain_events
while not self.blocking_read(timeout):
File "/usr/local/serv/lib/python3.5/site-packages/amqp/connection.py", line 510, in blocking_read
frame = self.transport.read_frame()
File "/usr/local/serv/lib/python3.5/site-packages/amqp/transport.py", line 280, in read_frame
'Received {0:#04x} while expecting 0xce'.format(ch))
amqp.exceptions.UnexpectedFrame: Received 0x00 while expecting 0xce
My code: celery.py
import logging
from app import db
from celery import Celery
LOG = logging.getLogger(__name__)
def make_celery(flask_app):
broker = flask_app.config['AMQP_URL']
celery = Celery('mod_celery', broker=broker, backend="rpc://")
celery.conf.update(flask_app.config)
class ContextTask(celery.Task):
def __call__(self, *args, **kwargs):
with flask_app.app_context():
return self.run(*args, **kwargs)
celery.Task = ContextTask
return celery
def in_tx(task_function):
def tx_wrapper(*args):
try:
task_function(*args)
db.session.commit()
except Exception as ex:
msg = 'Could not execute task {}. Reason: {}.' \
.format(task_function.__name__, str(ex))
LOG.error(msg)
db.session.rollback()
return tx_wrapper
And file tasks.py
from app import celery
from .celery import in_tx
#celery.task(name='app.mod_celery.update_client_health_task')
#in_tx
def update_client_health_task(client_id):
from app.mod_core.libs.health import update_client_health
update_client_health(client_id)
What could be the problem? Please don't offer me redis.
I'm not a developer and I didn't write it the code, but I really need to figure it out. I'm new to python
Thanks.

Integrating Celery with Flask using the application factory pattern: maximum recursion depth error

I am working from the cookiecutter Flask template, which uses the application factory pattern. I had Celery working for tasks that did not use the application context, but one of my tasks does need to know it; it makes a database query and updates a database object. Right now I have not a circular import error (though I've had them with other attempts) but a maximum recursion depth error.
I consulted this blog post about how to use Celery with the application factory pattern, and I'm trying to follow this Stack Overflow answer closely, since it has a structure apparently also derived from cookiecutter Flask.
Relevant portions of my project structure:
cookiecutter_mbam
│ celeryconfig.py
│
└───cookiecutter_mbam
| __init__.py
│ app.py
│ run_celery.py
│
└───utility
| celery_utils.py
|
└───derivation
| tasks.py
|
└───storage
| tasks.py
|
└───xnat
tasks.py
__init__.py:
"""Main application package."""
from celery import Celery
celery = Celery('cookiecutter_mbam', config_source='cookiecutter_mbam.celeryconfig')
Relevant portion of app.py:
from cookiecutter_mbam import celery
def create_app(config_object='cookiecutter_mbam.settings'):
"""An application factory, as explained here: http://flask.pocoo.org/docs/patterns/appfactories/.
:param config_object: The configuration object to use.
"""
app = Flask(__name__.split('.')[0])
app.config.from_object(config_object)
init_celery(app, celery=celery)
register_extensions(app)
# ...
return app
run_celery.py:
from cookiecutter_mbam.app import create_app
from cookiecutter_mbam import celery
from cookiecutter_mbam.utility.celery_utils import init_celery
app = create_app(config_object='cookiecutter_mbam.settings')
init_celery(app, celery)
celeryconfig.py:
broker_url = 'redis://localhost:6379'
result_backend = 'redis://localhost:6379'
task_serializer = 'json'
result_serializer = 'json'
accept_content = ['json']
enable_utc = True
imports = {'cookiecutter_mbam.xnat.tasks', 'cookiecutter_mbam.storage.tasks', 'cookiecutter_mbam.derivation.tasks'}
Relevant portion of celery_utils.py:
def init_celery(app, celery):
"""Add flask app context to celery.Task"""
class ContextTask(celery.Task):
def __call__(self, *args, **kwargs):
with app.app_context():
return self.run(*args, **kwargs)
celery.Task = ContextTask
return celery
When I try to start the worker using celery -A cookiecutter_mbam.run_celery:celery worker I get a RecursionError: maximum recursion depth exceeded while calling a Python object error. (I also have tried several other ways to invoke the worker, all with the same error.) Here's an excerpt from the stack trace:
Traceback (most recent call last):
File "/Users/katie/anaconda/bin/celery", line 11, in <module>
sys.exit(main())
File "/Users/katie/anaconda/lib/python3.6/site-packages/celery/__main__.py", line 16, in main
_main()
File "/Users/katie/anaconda/lib/python3.6/site-packages/celery/bin/celery.py", line 322, in main
cmd.execute_from_commandline(argv)
File "/Users/katie/anaconda/lib/python3.6/site-packages/celery/bin/celery.py", line 496, in execute_from_commandline
super(CeleryCommand, self).execute_from_commandline(argv)))
File "/Users/katie/anaconda/lib/python3.6/site-packages/celery/bin/base.py", line 275, in execute_from_commandline
return self.handle_argv(self.prog_name, argv[1:])
File "/Users/katie/anaconda/lib/python3.6/site-packages/celery/bin/celery.py", line 488, in handle_argv
return self.execute(command, argv)
File "/Users/katie/anaconda/lib/python3.6/site-packages/celery/bin/celery.py", line 420, in execute
).run_from_argv(self.prog_name, argv[1:], command=argv[0])
File "/Users/katie/anaconda/lib/python3.6/site-packages/celery/bin/worker.py", line 221, in run_from_argv
*self.parse_options(prog_name, argv, command))
File "/Users/katie/anaconda/lib/python3.6/site-packages/celery/bin/base.py", line 398, in parse_options
self.parser = self.create_parser(prog_name, command)
File "/Users/katie/anaconda/lib/python3.6/site-packages/celery/bin/base.py", line 414, in create_parser
self.add_arguments(parser)
File "/Users/katie/anaconda/lib/python3.6/site-packages/celery/bin/worker.py", line 277, in add_arguments
default=conf.worker_state_db,
File "/Users/katie/anaconda/lib/python3.6/site-packages/celery/utils/collections.py", line 126, in __getattr__
return self[k]
File "/Users/katie/anaconda/lib/python3.6/site-packages/celery/utils/collections.py", line 429, in __getitem__
return getitem(k)
File "/Users/katie/anaconda/lib/python3.6/site-packages/celery/utils/collections.py", line 278, in __getitem__
return mapping[_key]
File "/Users/katie/anaconda/lib/python3.6/collections/__init__.py", line 989, in __getitem__
if key in self.data:
File "/Users/katie/anaconda/lib/python3.6/site-packages/celery/utils/collections.py", line 126, in __getattr__
return self[k]
File "/Users/katie/anaconda/lib/python3.6/collections/__init__.py", line 989, in __getitem__
if key in self.data:
File "/Users/katie/anaconda/lib/python3.6/site-packages/celery/utils/collections.py", line 126, in __getattr__
return self[k]
I understand the basic sense of this error -- something is calling itself infinitely. Maybe create_app. But I can't see why, and I don't know how to go about debugging this.
I'm also getting this when I try to load my site:
File "~/cookiecutter_mbam/cookiecutter_mbam/xnat/tasks.py", line 14, in <module>
#celery.task
AttributeError: module 'cookiecutter_mbam.celery' has no attribute 'task'
I did not have this problem when I was using the make_celery method described here, but that method creates circular import problems when you need your tasks to access the application context. Pointers on how to do this correctly with the Cookiecutter Flask template would be much appreciated.
I'm suspicious of that bit of code that's making the Flask app available to celery. It's skipping over some essential code by going directly to run(). (See https://github.com/celery/celery/blob/master/celery/app/task.py#L387)
Try calling the inherited __call__. Here's a snippet from one of my (working) apps.
# Arrange for tasks to have access to the Flask app
TaskBase = celery.Task
class ContextTask(TaskBase):
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs) ## << here
celery.Task = ContextTask
I also don't see where you're creating an instance of Celery and configuring it. I assume you have
celery = Celery(__name__)
and then need to
celery.config_from_object(...)
from somewhere within init_celery()
This is solved. I had my configcelery.py in the wrong place. I needed to move it to the package directory, not the parent repo directory. It is incredibly unintuitive/uninformative that a misplaced config file, rather than causing an "I can't find that file"-type error, causes an infinite recursion. But at least I finally saw it and corrected it.

Celery Beat unable to find database models (django.db.utils.OperationalError)

I have to add few periodic tasks. I'm using Celery - Redis in Django platform.
When I execute the method from shell_plus all is well . However Celery Beat is unable to find the database instance properly.
Celery version = 4.1.0. I had previously installed django-celery-beats etc
Database = MySQL
Where am i wrong.
Thanks in advance.
Celery Command
(venv)$:/data/project/(sesh/dev)$ celery -A freightquotes worker -B -E -l INFO --autoscale=2,1
settings.py
CELERY_BROKER_URL = 'redis://127.0.0.1:6379'
CELERY_BROKER_TRANSPORT = 'redis'
CELERY_BROKER_TRANSPORT_OPTIONS = {'visibility_timeout': 604800}
CELERY_RESULT_BACKEND = BROKER_URL
CELERY_TASK_RESULT_EXPIRES = datetime.timedelta(days=1) # Take note of the CleanUp task in middleware/tasks.py
CELERY_MAX_CACHED_RESULTS = 1000
CELERYBEAT_SCHEDULER = "djcelery.schedulers.DatabaseScheduler"
CELERY_TRACK_STARTED = True
CELERY_SEND_EVENTS = True
CELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml']
REDIS_CONNECT_RETRY = True
REDIS_DB = 0
BROKER_POOL_LIMIT = 2
CELERYD_CONCURRENCY = 1
CELERYD_TASK_TIME_LIMIT = 600
CELERY_BEAT_SCHEDULE = {
'test': {
'task': 'loads.tasks.test',
'schedule': crontab(minute='*/1'),
},
init.py
from __future__ import absolute_import, unicode_literals
from .celery import app as celery_app
__all__ = ['celery_app']
celery.py
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings.base')
app = Celery('project')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
#app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
loads/tasks.py
#task()
def test():
x = [i.id for i in Load.objects.all()]
print (x)
Error
[2017-11-30 03:52:00,032: ERROR/ForkPoolWorker-2] Task loads.tasks.test[0020e4ae-5e52-49d8-863f-e51c2acfd7a7] raised unexpected: OperationalError('no such table: loads_load',)
Traceback (most recent call last):
File "/data/project/venv/lib/python3.4/site-packages/django/db/backends/utils.py", line 65, in execute
return self.cursor.execute(sql, params)
File "/data/project/venv/lib/python3.4/site-packages/django/db/backends/sqlite3/base.py", line 328, in execute
return Database.Cursor.execute(self, query, params)
sqlite3.OperationalError: no such table: loads_load
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/project/venv/lib/python3.4/site-packages/celery/app/trace.py", line 374, in trace_task
R = retval = fun(*args, **kwargs)
File "/data/project/venv/lib/python3.4/site-packages/celery/app/trace.py", line 629, in __protected_call__
return self.run(*args, **kwargs)
File "/data/project/loads/tasks.py", line 146, in test
x = [i.id for i in Load.objects.all()]
File "/data/project/venv/lib/python3.4/site-packages/django/db/models/query.py", line 250, in __iter__
self._fetch_all()
File "/data/project/venv/lib/python3.4/site-packages/django/db/models/query.py", line 1103, in _fetch_all
self._result_cache = list(self._iterable_class(self))
File "/data/project/venv/lib/python3.4/site-packages/django/db/models/query.py", line 53, in __iter__
results = compiler.execute_sql(chunked_fetch=self.chunked_fetch)
File "/data/project/venv/lib/python3.4/site-packages/django/db/models/sql/compiler.py", line 886, in execute_sql
raise original_exception
File "/data/project/venv/lib/python3.4/site-packages/django/db/models/sql/compiler.py", line 876, in execute_sql
cursor.execute(sql, params)
File "/data/project/venv/lib/python3.4/site-packages/django/db/backends/utils.py", line 65, in execute
return self.cursor.execute(sql, params)
File "/data/project/venv/lib/python3.4/site-packages/django/db/utils.py", line 94, in __exit__
six.reraise(dj_exc_type, dj_exc_value, traceback)
File "/data/project/venv/lib/python3.4/site-packages/django/utils/six.py", line 685, in reraise
raise value.with_traceback(tb)
File "/data/project/venv/lib/python3.4/site-packages/django/db/backends/utils.py", line 65, in execute
return self.cursor.execute(sql, params)
File "/data/project/venv/lib/python3.4/site-packages/django/db/backends/sqlite3/base.py", line 328, in execute
return Database.Cursor.execute(self, query, params)
django.db.utils.OperationalError: no such table: loads_load
I found some answer.
We have few files in settings like base, dev, prod and local. The database settings is different in each one of them.
Its working when i point the celery app to the local which has all database config. In this case i had to copy all celery config from base to local.
I tried to use django.conf.settings as os.environ.setdefault , but didnt work.
So the answer is incorrect configuration. if we have all in one file we are fine. if we split we have to find some work around.
Edit
Since the issue was finding the right settings file. I now start the celery by setting the module
DJANGO_SETTINGS_MODULE='project.settings.dev' celery -A project worker -B -E -l INFO --autoscale=2,1

Django + Celery + Supervisord + Redis error when setting

I am going through the setting of the following components on CentOS server. I get supervisord task to get the web site up and running, but I am blocked on setting the supervisor for celery. It seems that it recognizes the tasks, but when I try to execute the tasks, it won't connect to them. My redis is up and running on port 6380
Django==1.10.3
amqp==1.4.9
billiard==3.3.0.23
celery==3.1.25
kombu==3.0.37
pytz==2016.10
my celeryd.ini
[program:celeryd]
command=/root/myproject/myprojectenv/bin/celery worker -A mb --loglevel=INFO
environment=PATH="/root/myproject/myprojectenv/bin/",VIRTUAL_ENV="/root/myproject/myprojectenv",PYTHONPATH="/root/myproject/myprojectenv/lib/python2.7:/root/myproject/myprojectenv/lib/python2.7/site-packages"
directory=/home/.../myapp/
user=nobody
numprocs=1
stdout_logfile=/home/.../myapp/log_celery/worker.log
sterr_logfile=/home/.../myapp/log_celery/worker.log
autostart=true
autorestart=true
startsecs=10
; Need to wait for currently executing tasks to finish at shutdown.
; Increase this if you have very long running tasks.
stopwaitsecs = 1200
; When resorting to send SIGKILL to the program to terminate it
; send SIGKILL to its whole process group instead,
; taking care of its children as well.
killasgroup=true
; Set Celery priority higher than default (999)
; so, if rabbitmq(redis) is supervised, it will start first.
priority=1000
The process starts and when I go to the project folder and do:
>python manage.py celery status
celery#ssd-1v: OK
1 node online.
When I open the log file of celery I see that the tasks are loaded.
[tasks]
. mb.tasks.add
. mb.tasks.update_search_index
. orders.tasks.order_created
my mb/tasks.py
from mb.celeryapp import app
import django
django.setup()
#app.task
def add(x, y):
print(x+y)
return x + y
my mb/celeryapp.py
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mb.settings")
app = Celery('mb', broker='redis://localhost:6380/', backend='redis://localhost:6380/')
app.conf.broker_url = 'redis://localhost:6380/0'
app.conf.result_backend = 'redis://localhost:6380/'
app.conf.timezone = 'Europe/Sofia'
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
my mb/settings.py:
...
WSGI_APPLICATION = 'mb.wsgi.application'
BROKER_URL = 'redis://localhost:6380/0'
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
...
when I run:
python manage.py shell
>>> from mb.tasks import add
>>> add.name
'mb.tasks.add'
>>> result=add.delay(1,1)
>>> result.ready()
False
>>> result.status
'PENDING'
And as mentioned earlier I do not see any change in the log anymore.
If I try to run from the command line:
/root/myproject/myprojectenv/bin/celery worker -A mb --loglevel=INFO
Running a worker with superuser privileges when the
worker accepts messages serialized with pickle is a very bad idea!
If you really want to continue then you have to set the C_FORCE_ROOT
environment variable (but please think about this before you do).
User information: uid=0 euid=0 gid=0 egid=0
But I suppose that's normal since I run it after with user nobody. Interesting thing is that the command just celery status (without python manage.py celery status) gives an error on connection, probably because it is looking for different port for redis, but the process of supervisord starts normally... and when I call 'celery worker -A mb' it says it's ok. Any ideas?
(myprojectenv) [root#ssd-1v]# celery status
Traceback (most recent call last):
File "/root/myproject/myprojectenv/bin/celery", line 11, in <module>
sys.exit(main())
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/__main__.py", line 3
0, in main
main()
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/celery.py", line
81, in main
cmd.execute_from_commandline(argv)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/celery.py", line
793, in execute_from_commandline
super(CeleryCommand, self).execute_from_commandline(argv)))
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/base.py", line 3
11, in execute_from_commandline
return self.handle_argv(self.prog_name, argv[1:])
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/celery.py", line
785, in handle_argv
return self.execute(command, argv)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/celery.py", line
717, in execute
).run_from_argv(self.prog_name, argv[1:], command=argv[0])
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/base.py", line 3
15, in run_from_argv
sys.argv if argv is None else argv, command)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/base.py", line 3
77, in handle_argv
return self(*args, **options)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/base.py", line 2
74, in __call__
ret = self.run(*args, **kwargs)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/celery.py", line
473, in run
replies = I.run('ping', **kwargs)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/celery.py", line
325, in run
return self.do_call_method(args, **kwargs)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/celery.py", line
347, in do_call_method
return getattr(i, method)(*args)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/app/control.py", line 100, in ping
return self._request('ping')
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/app/control.py", line 71, in _request
timeout=self.timeout, reply=True,
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/app/control.py", line 316, in broadcast
limit, callback, channel=channel,
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/kombu/pidbox.py", line 283, in _broadcast
chan = channel or self.connection.default_channel
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/kombu/connection.py", line 771, in default_channel
self.connection
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/kombu/connection.py", line 756, in connection
self._connection = self._establish_connection()
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/kombu/connection.py", line 711, in _establish_connection
conn = self.transport.establish_connection()
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/kombu/transport/pyamqp.py", line 116, in establish_connection
conn = self.Connection(**opts)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/amqp/connection.py", line 165, in __init__
self.transport = self.Transport(host, connect_timeout, ssl)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/amqp/connection.py", line 186, in Transport
return create_transport(host, connect_timeout, ssl)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/amqp/transport.py", line 299, in create_transport
return TCPTransport(host, connect_timeout)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/amqp/transport.py", line 95, in __init__
raise socket.error(last_err)
socket.error: [Errno 111] Connection refused
Any help will be highly appreciated.
UPDATE:
when I run
$:python manage.py shell
>>from mb.tasks import add
>>add
<#task: mb.tasks.add of mb:0x**2b3f6d0**>
the 0x2b3f6d0is different from what celery claims to be its memory space in its log, namely:
[config]
- ** ---------- .> app: mb:0x3495bd0
- ** ---------- .> transport: redis://localhost:6380/0
- ** ---------- .> results: disabled://
- *** --- * --- .> concurrency: 1 (prefork)
Ok, the answer in this case was that the gunicorn file was actually starting the project from the common python library, instead of the virtual env

celery 3.0.12 typeerror :run() got multiple values for values for keyword argument 'detach'

I want to run the periodic tasks using celery and celerybeat.am using
celery 3.0.12 django-celery 3.0.11 kombu 2.5.3 and billiard 2.7.3.19 python 2.6 django 1.4.2. Can anyone please tell where its going wrong or missing any configuration part?
Traceback (most recent call last):
File "manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/usr/lib/python2.6/site-packages/django/core/management/__init__.py", line 443, in execute_from_command_line
utility.execute()
File "/usr/lib/python2.6/site-packages/django/core/management/__init__.py", line 382, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/lib/python2.6/site-packages/django_celery-3.0.11-py2.6.egg/djcelery/management/base.py", line 74, in run_from_argv
return super(CeleryCommand, self).run_from_argv(argv)
File "/usr/lib/python2.6/site-packages/django/core/management/base.py", line 196, in run_from_argv
self.execute(*args, **options.__dict__)
File "/usr/lib/python2.6/site-packages/django_celery-3.0.11-py2.6.egg/djcelery/management/base.py", line 67, in execute
super(CeleryCommand, self).execute(*args, **options)
File "/usr/lib/python2.6/site-packages/django/core/management/base.py", line 232, in execute
output = self.handle(*args, **options)
File "/usr/lib/python2.6/site-packages/django_celery-3.0.11-py2.6.egg/djcelery/management/commands/celerybeat.py", line 24, in handle
beat.run(*args, **options)
TypeError: run() got multiple values for keyword argument 'detach'
TIA:)
update:
task.py
from celery.utils.log import get_task_logger
import celery
from celery.task.schedules import crontab
from celery.task import periodic_task
#periodic_task(run_every=crontab(hour='*', minute='10', day_of_week='*'))
def spam_task():
"""
am fetching the user's spam and sending mail to spam users
using django EmailMultiAlternatives
"""
#periodic_task(run_every=crontab(hour='*', minute='10', day_of_week='*'))
def notify():
"""
fetching the activities and notifying to users by sending mail
"""
settings.py
CELERY_IMPORTS = ("myapp.tasks", )
CELERY_ENABLE_UTC = True
CELERY_TIMEZONE = 'America/Chicago'
import djcelery
djcelery.setup_loader()
BROKER_URL = 'django://'
BROKER_HOST = 'localhost'
BROKER_PORT = 5672
BROKER_USER = 'guest'
BROKER_PASSWORD = 'guest'
BROKER_VHOST = '/'
CELERY_EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
CELERY_EMAIL_TASK_CONFIG = {
'queue' : 'email',
'rate_limit' : '10/m',
'name': 'djcelery_email_send',
'ignore_result': True }

Categories