Celery error “Received 0x00 while expecting 0xce” - python

I use celery 4.3.0 and rabbitmq 3.6.6. There is also a Haproxy for balancing rabbitmq, but right now it only uses one rabbitmq node.
Timeout client/server in Haproxy 24 hours.
Sometimes I get an error and the task execution status is not returned, while the tasks continue to run:
Received 0x00 while expecting 0xce
Traceback (most recent call last):
File "/var/www/serv/app/mod_axmsg/dispatch.py", line 376, in execute
result_value = func_meta.func(*task.args_list, **task.kwargs_dict)
File "/var/www/serv/app/mod_rmc/libs/health.py", line 91, in update_all_health
health.update_all_health()
File "/var/www/serv/app/mod_core/libs/health.py", line 515, in update_all_health
result.join()
File "/usr/local/serv/lib/python3.5/site-packages/celery/result.py", line 765, in join
interval=interval, no_ack=no_ack, on_interval=on_interval,
File "/usr/local/serv/lib/python3.5/site-packages/celery/result.py", line 226, in get
on_message=on_message,
File "/usr/local/serv/lib/python3.5/site-packages/celery/backends/asynchronous.py", line 188, in wait_for_pending
for _ in self._wait_for_pending(result, **kwargs):
File "/usr/local/serv/lib/python3.5/site-packages/celery/backends/asynchronous.py", line 255, in _wait_for_pending
on_interval=on_interval):
File "/usr/local/serv/lib/python3.5/site-packages/celery/backends/asynchronous.py", line 56, in drain_events_until
yield self.wait_for(p, wait, timeout=1)
File "/usr/local/serv/lib/python3.5/site-packages/celery/backends/asynchronous.py", line 65, in wait_for
wait(timeout=timeout)
File "/usr/local/serv/lib/python3.5/site-packages/celery/backends/rpc.py", line 63, in drain_events
return self._connection.drain_events(timeout=timeout)
File "/usr/local/serv/lib/python3.5/site-packages/kombu/connection.py", line 323, in drain_events
return self.transport.drain_events(self.connection, **kwargs)
File "/usr/local/serv/lib/python3.5/site-packages/kombu/transport/pyamqp.py", line 103, in drain_events
return connection.drain_events(**kwargs)
File "/usr/local/serv/lib/python3.5/site-packages/amqp/connection.py", line 505, in drain_events
while not self.blocking_read(timeout):
File "/usr/local/serv/lib/python3.5/site-packages/amqp/connection.py", line 510, in blocking_read
frame = self.transport.read_frame()
File "/usr/local/serv/lib/python3.5/site-packages/amqp/transport.py", line 280, in read_frame
'Received {0:#04x} while expecting 0xce'.format(ch))
amqp.exceptions.UnexpectedFrame: Received 0x00 while expecting 0xce
My code: celery.py
import logging
from app import db
from celery import Celery
LOG = logging.getLogger(__name__)
def make_celery(flask_app):
broker = flask_app.config['AMQP_URL']
celery = Celery('mod_celery', broker=broker, backend="rpc://")
celery.conf.update(flask_app.config)
class ContextTask(celery.Task):
def __call__(self, *args, **kwargs):
with flask_app.app_context():
return self.run(*args, **kwargs)
celery.Task = ContextTask
return celery
def in_tx(task_function):
def tx_wrapper(*args):
try:
task_function(*args)
db.session.commit()
except Exception as ex:
msg = 'Could not execute task {}. Reason: {}.' \
.format(task_function.__name__, str(ex))
LOG.error(msg)
db.session.rollback()
return tx_wrapper
And file tasks.py
from app import celery
from .celery import in_tx
#celery.task(name='app.mod_celery.update_client_health_task')
#in_tx
def update_client_health_task(client_id):
from app.mod_core.libs.health import update_client_health
update_client_health(client_id)
What could be the problem? Please don't offer me redis.
I'm not a developer and I didn't write it the code, but I really need to figure it out. I'm new to python
Thanks.

Related

Why am I getting pymongo.errors.AutoReconnect: connection pool paused

I'm getting this error of autoreconnect, and there are around 100 connections in the logs during this call to .objects. Here is the document:
class NotificationDoc(Document):
patient_id = StringField(max_length=32)
type = StringField(max_length=32)
sms_sent = BooleanField(default=False)
email_sent = BooleanField(default=False)
sending_time = DateTimeField(default=datetime.utcnow)
def __unicode__(self):
return "{}_{}_{}".format(self.patient_id, self.type, self.sending_time.strftime('%Y-%m-%d'))
and this is the query call that causes the issue:
docs = NotificationDoc.objects(sending_time__lte=from_date)
This is the complete stacktrace. How can I understand what is going on?
pymongo.errors.AutoReconnect: connection pool paused
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/celery/app/trace.py", line 451, in trace_task
R = retval = fun(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/celery/app/trace.py", line 734, in __protected_call__
return self.run(*args, **kwargs)
File "/app/src/reminder/configurations/app/worker.py", line 106, in schedule_recall
schedule_recall_use_case.execute()
File "/app/src/reminder/domain/notification/recall/use_cases/schedule_recall.py", line 24, in execute
notifications_sent = self.notifications_provider.find_recalled_notifications_for_date(no_recalls_before_date)
File "/app/src/reminder/data_providers/database/odm/repositories.py", line 42, in find_recalled_notifications_for_date
if NotificationDoc.objects(sending_time__lte=from_date).count() > 0:
File "/usr/local/lib/python3.8/site-packages/mongoengine/queryset/queryset.py", line 144, in count
return super().count(with_limit_and_skip)
File "/usr/local/lib/python3.8/site-packages/mongoengine/queryset/base.py", line 423, in count
count = count_documents(
File "/usr/local/lib/python3.8/site-packages/mongoengine/pymongo_support.py", line 38, in count_documents
return collection.count_documents(filter=filter, **kwargs)
File "/usr/local/lib/python3.8/site-packages/pymongo/collection.py", line 1502, in count_documents
return self.__database.client._retryable_read(
File "/usr/local/lib/python3.8/site-packages/pymongo/mongo_client.py", line 1307, in _retryable_read
with self._secondaryok_for_server(read_pref, server, session) as (
File "/usr/local/lib/python3.8/contextlib.py", line 113, in __enter__
return next(self.gen)
File "/usr/local/lib/python3.8/site-packages/pymongo/mongo_client.py", line 1162, in _secondaryok_for_server
with self._get_socket(server, session) as sock_info:
File "/usr/local/lib/python3.8/contextlib.py", line 113, in __enter__
return next(self.gen)
File "/usr/local/lib/python3.8/site-packages/pymongo/mongo_client.py", line 1099, in _get_socket
with server.get_socket(
File "/usr/local/lib/python3.8/contextlib.py", line 113, in __enter__
return next(self.gen)
File "/usr/local/lib/python3.8/site-packages/pymongo/pool.py", line 1371, in get_socket
sock_info = self._get_socket(all_credentials)
File "/usr/local/lib/python3.8/site-packages/pymongo/pool.py", line 1436, in _get_socket
self._raise_if_not_ready(emit_event=True)
File "/usr/local/lib/python3.8/site-packages/pymongo/pool.py", line 1407, in _raise_if_not_ready
_raise_connection_failure(
File "/usr/local/lib/python3.8/site-packages/pymongo/pool.py", line 250, in _raise_connection_failure
raise AutoReconnect(msg) from error
pymongo.errors.AutoReconnect: mongo:27017: connection pool paused
turned out to be celery is leaving connections open, so here is the solution
from mongoengine import disconnect
from celery.signals import task_prerun
#task_prerun.connect
def on_task_init(*args, **kwargs):
disconnect(alias='default')
connect(db, host=host, port=port, maxPoolSize=400, minPoolSize=200, alias='default')
This could be related to the fact that PyMongo is not fork-safe. If you're using a process pool in any way, which includes server software like uWSGI and even some configurations of popular ASGI servers.
Every time you run a query in PyMongo, that MongoClient object becomes fork-unsafe. A MongoClient object that has never run any queries is fork-safe. Created Database and Collection objects will reference back to their parent MongoClient without checking for existence.
I do see you are using MongoEngine, which uses PyMongo underneath. I don't know the semantics of that particular library but I would assume they are the same.
In short: you must re-create your connections as part of your forking process.
pip install -U pymongo; is already fix; see this https://github.com/mongodb/mongo-python-driver/pull/944

Celery not accepting pickle even after allowing it

I'm trying to write a celery application that passes numpy arrays (or any arbitrary objects) to the workers. As far as I can tell, this requires serialization to occur via pickle (NB: I'm aware of the security implications but this isn't a concern in this case).
However, even after trying every possible way I could find to allow pickle as a serializer, I keep getting the following kombu exception:
kombu.exceptions.ContentDisallowed: Refusing to deserialize untrusted
content of type pickle (application/x-python-serialize)
My current files are currently:
# tasks.py
from celery import Celery
app = Celery(
'tasks',
broker='redis://localhost',
accept_content=['pickle'],
task_serializer='pickle'
)
#app.task
def adding(x, y):
return x + y
if __name__ == '__main__':
import numpy as np
adding.apply_async((np.array([1]), np.array([1])), serializer='pickle')
In addition I have a config file:
# celeryconfig.py
print('configuring...')
accept_content = ['pickle', 'application/x-python-serialize']
task_serializer = 'pickle'
result_serializer = 'pickle'
from kombu import serialization
serialization.register_pickle()
serialization.enable_insecure_serializers()
However, if I run the worker (celery -A tasks worker --loglevel=info) and then execute the code that makes an async call (python tasks.py), I get the following traceback. Am I missing something?
[2018-06-16 11:46:23,617: CRITICAL/MainProcess] Unrecoverable error: ContentDisallowed('Refusing to deserialize untrusted content of type pickle (application/x-python-serialize)',)
Traceback (most recent call last):
File "/opt/anaconda/envs/Python3/lib/python3.6/site-packages/celery/worker/worker.py", line 205, in start
self.blueprint.start(self)
File "/opt/anaconda/envs/Python3/lib/python3.6/site-packages/celery/bootsteps.py", line 119, in start
step.start(parent)
File "/opt/anaconda/envs/Python3/lib/python3.6/site-packages/celery/bootsteps.py", line 369, in start
return self.obj.start()
File "/opt/anaconda/envs/Python3/lib/python3.6/site-packages/celery/worker/consumer/consumer.py", line 322, in start
blueprint.start(self)
File "/opt/anaconda/envs/Python3/lib/python3.6/site-packages/celery/bootsteps.py", line 119, in start
step.start(parent)
File "/opt/anaconda/envs/Python3/lib/python3.6/site-packages/celery/worker/consufrom celery import Celery
mer/consumer.py", line 598, in start
c.loop(*c.loop_args())
File "/opt/anaconda/envs/Python3/lib/python3.6/site-packages/celery/worker/loops.py", line 91, in asynloop
next(loop)
File "/opt/anaconda/envs/Python3/lib/python3.6/site-packages/kombu/asynchronous/hub.py", line 354, in create_loop
cb(*cbargs)
File "/opt/anaconda/envs/Python3/lib/python3.6/site-packages/kombu/transport/redis.py", line 1040, in on_readable
self.cycle.on_readable(fileno)
File "/opt/anaconda/envs/Python3/lib/python3.6/site-packages/kombu/transport/redis.py", line 337, in on_readable
chan.handlers[type]()
File "/opt/anaconda/envs/Python3/lib/python3.6/site-packages/kombu/transport/redis.py", line 724, in _brpop_read
self.connection._deliver(loads(bytes_to_str(item)), dest)
File "/opt/anaconda/envs/Python3/lib/python3.6/site-packages/kombu/transport/virtual/base.py", line 983, in _deliver
callback(message)
File "/opt/anaconda/envs/Python3/lib/python3.6/site-packages/kombu/transport/virtual/base.py", line 633, in _callback
return callback(message)
File "/opt/anaconda/envs/Python3/lib/python3.6/site-packages/kombu/messaging.py", line 624, in _receive_callback
return on_m(message) if on_m else self.receive(decoded, message)
File "/opt/anaconda/envs/Python3/lib/python3.6/site-packages/celery/worker/consumer/consumer.py", line 572, in on_task_received
callbacks,
File "/opt/anaconda/envs/Python3/lib/python3.6/site-packages/celery/worker/strategy.py", line 136, in task_message_handler
if body is None and 'args' not in message.payload:
File "/opt/anaconda/envs/Python3/lib/python3.6/site-packages/kombu/message.py", line 207, in payload
return self._decoded_cache if self._decoded_cache else self.decode()
File "/opt/anaconda/envs/Python3/lib/python3.6/site-packages/kombu/message.py", line 192, in decode
self._decoded_cache = self._decode()
File "/opt/anaconda/envs/Python3/lib/python3.6/site-packages/kombu/message.py", line 197, in _decode
self.content_encoding, accept=self.accept)
File "/opt/anaconda/envs/Python3/lib/python3.6/site-packages/kombu/serialization.py", line 253, in loads
raise self._for_untrusted_content(content_type, 'untrusted')
kombu.exceptions.ContentDisallowed: Refusing to deserialize untrusted content of type pickle (application/x-python-serialize)
For anyone coming to this question:
The answer was to use the app.config_from_object method:
import celeryconfig
app.config_from_object(celeryconfig)

Can't initiate get request in web.py

I'm trying to run two servers using web.py and initiating calls from one to another. Both servers start normally but when I try to call a url the below stack trace is thrown.
import web
urls = (
'/ping', 'Ping',
'/acqlock/+(.*)', 'Acquire',
)
class MSA(web.application):
def run(self, port=8081, *middleware):
func = self.wsgifunc(*middleware)
return web.httpserver.runsimple(func, ('127.0.0.1', port))
app = MSA(urls, globals())
if __name__ == "__main__":
app.run(port=8081)
class Acquire:
def GET(self, resource_name):
print resource_name
response = app.request('http://127.0.0.1:8080/acqlock/' + resource_name, method='GET')
return response
But I keep getting this error after calling the /acqlock.
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\web\wsgiserver\__init__.py", line 1245, in communicate
req.respond()
File "C:\Python27\lib\site-packages\web\wsgiserver\__init__.py", line 775, in respond
self.server.gateway(self).respond()
File "C:\Python27\lib\site-packages\web\wsgiserver\__init__.py", line 2018, in respond
response = self.req.server.wsgi_app(self.env, self.start_response)
File "C:\Python27\lib\site-packages\web\httpserver.py", line 306, in __call__
return self.app(environ, xstart_response)
File "C:\Python27\lib\site-packages\web\httpserver.py", line 274, in __call__
return self.app(environ, start_response)
File "C:\Python27\lib\site-packages\web\application.py", line 279, in wsgi
result = self.handle_with_processors()
File "C:\Python27\lib\site-packages\web\application.py", line 249, in handle_with_processors
return process(self.processors)
File "C:\Python27\lib\site-packages\web\application.py", line 246, in process
raise self.internalerror()
File "C:\Python27\lib\site-packages\web\application.py", line 515, in internalerror
parent = self.get_parent_app()
File "C:\Python27\lib\site-packages\web\application.py", line 500, in get_parent_app
if self in web.ctx.app_stack:
AttributeError: 'ThreadedDict' object has no attribute 'app_stack'
Use requests library for this.
import requests
response = requests.request(method='GET', url ='http://127.0.0.1:8080/acqlock/' + resource_name)
Note: You have used port 8080 in url even though you have hosted the web.py in 8081

I wrote a project with tornado, but this exception is always in my log file

This is the error log:
[I 160308 11:09:59 web:1908] 200 GET /admin/realtime (117.93.180.216) 107.13ms
[E 160308 11:09:59 http1connection:54] Uncaught exception
Traceback (most recent call last):
File "/usr/local/lib/python3.4/dist-packages/tornado/http1connection.py", line 238, in _read_message
delegate.finish()
File "/usr/local/lib/python3.4/dist-packages/tornado/httpserver.py", line 290, in finish
self.delegate.finish()
File "/usr/local/lib/python3.4/dist-packages/tornado/web.py", line 1984, in finish
self.execute()
File "/usr/local/lib/python3.4/dist-packages/blueware-1.0.10/blueware/hooks/framework_tornado/web.py", line 480, in _bw_wrapper__RequestDispatcher_execute
future = wrapped(*args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/tornado/web.py", line 2004, in execute
**self.handler_kwargs)
File "/usr/local/lib/python3.4/dist-packages/blueware-1.0.10/blueware/hooks/framework_tornado/web.py", line 448, in _bw_wrapper_RequestHandler___init___
return wrapped(*args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/tornado/web.py", line 185, in init
self.initialize(**kwargs)
File "/usr/local/lib/python3.4/dist-packages/tornado/web.py", line 2714, in wrapper
self.redirect(url)
File "/usr/local/lib/python3.4/dist-packages/tornado/web.py", line 671, in redirect
self.finish()
File "/usr/local/lib/python3.4/dist-packages/blueware-1.0.10/blueware/hooks/framework_tornado/web.py", line 309, in _bw_wrapper_RequestHandler_finish_
return wrapped(*args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/tornado/web.py", line 934, in finish
self.flush(include_footers=True)
File "/usr/local/lib/python3.4/dist-packages/tornado/web.py", line 870, in flush
for transform in self._transforms:
TypeError: 'NoneType' object is not iterable
[I 160308 11:10:00 web:1908] 200 GET /admin/order?order_type=1&order_status=1&page=0&action=allreal (49.89.27.173) 134.53ms
Can anyone tell me how to solve this problem? Thank you very much
I assume that OneAPM (blueware agent) is compatible with your python and Tornado version, however it's can be tricky.
Solution
Move self.redirect(url) from your handler initialize method to get method, like this
class MyHandler(tornado.web.RequestHandler):
def get(self):
self.redirect('/some_url')
or use RedirectHandler.
Every action that could finish request needs to be called in context of http-verb method (get, post, put and so on). The common mistake is making authetication/authorization in __init__ or initialize.
More detail
In Tornado's source there is a note about _transforms that is initialized in the constructor with None and set in_execute (oversimplifying - after headers_received).
A transform modifies the result of an HTTP request (e.g., GZip encoding).
Applications are not expected to create their own OutputTransforms
or interact with them directly; the framework chooses which transforms
(if any) to apply.
Reproduce
Sample that triggers this error. I'm including this only as a cross-check that blueware is not the cause:
import tornado.ioloop
import tornado.web
class SomeHandler(tornado.web.RequestHandler):
def initialize(self, *args, **kwargs):
url = '/some'
self.redirect(url)
# ^ this is wrong
def get(self):
# redirect should be here
self.write("Hello")
def make_app():
return tornado.web.Application([
(r"/", SomeHandler),
])
if __name__ == "__main__":
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
And stacktrace:
ERROR:tornado.application:Uncaught exception
Traceback (most recent call last):
File "/tmp/py3/lib/python3.4/site-packages/tornado/http1connection.py", line 238, in _read_message
delegate.finish()
File "/tmp/py3/lib/python3.4/site-packages/tornado/httpserver.py", line 289, in finish
self.delegate.finish()
File "/tmp/py3/lib/python3.4/site-packages/tornado/web.py", line 2022, in finish
self.execute()
File "/tmp/py3/lib/python3.4/site-packages/tornado/web.py", line 2042, in execute
**self.handler_kwargs)
File "/tmp/py3/lib/python3.4/site-packages/tornado/web.py", line 183, in __init__
self.initialize(**kwargs)
File "test.py", line 8, in initialize
self.redirect(url)
File "/tmp/py3/lib/python3.4/site-packages/tornado/web.py", line 666, in redirect
self.finish()
File "/tmp/py3/lib/python3.4/site-packages/tornado/web.py", line 932, in finish
self.flush(include_footers=True)
File "/tmp/py3/lib/python3.4/site-packages/tornado/web.py", line 868, in flush
for transform in self._transforms:
TypeError: 'NoneType' object is not iterable

Celery use wrong broker

I have setup from the the docs
celery.py file:
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cars.settings')
app = Celery('cars')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
supervisord config
[program:bbay-celery]
command = /opt/webapps/bbay/env/bin/celery worker -A cars.celery:app ; Command to start app
directory = /opt/webapps/bbay/
user = bbay ; User to run as
numprocs = 1
stdout_logfile = /opt/webapps/bbay/logs/celery.log ; Where to write log messages
redirect_stderr = true ; Save stderr in the same log
environment=LANG='en_US.UTF-8',LC_ALL='en_US.UTF-8',DJANGO_SETTINGS_MODULE='cars.settings',CELERYD_CHDIR='/opt/webapps/bbay/'
autostart = true
autorestart = true
startsecs = 10
django settings :
BROKER_URL = 'redis://localhost:6379/0'
when i start worker all seems fine and correct broker url used:
(env)bbay#djproj:/opt/webapps/bbay$ celery -A cars.celery:app beat
celery beat v3.1.17 (Cipater) is starting.
__ - ... __ - _
Configuration ->
. broker -> redis://localhost:6379/0
. loader -> celery.loaders.app.AppLoader
. scheduler -> celery.beat.PersistentScheduler
. db -> celerybeat-schedule
. logfile -> [stderr]#%INFO
. maxinterval -> now (0s)
Here is my task:
#shared_task
def send_mail_task(template, context, send_to):
....
Here is how i use it:
send_mail_task.delay('email/confirmation_message.html', context, [user.email, ])
But when the task called it tried to connect to default broker ( host
'127.0.0.1:5672' ). Here is stacktrace:
Stacktrace (most recent call last):
File "django/core/handlers/base.py", line 111, in get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "django/views/decorators/csrf.py", line 57, in wrapped_view
return view_func(*args, **kwargs)
File "django/views/generic/base.py", line 69, in view
return self.dispatch(request, *args, **kwargs)
File "rest_framework/views.py", line 452, in dispatch
response = self.handle_exception(exc)
File "rest_framework/views.py", line 449, in dispatch
response = handler(request, *args, **kwargs)
File "accounts/api/views.py", line 132, in post
send_mail_task.delay('email/contact_seller.html', context, [profile.user.email, ])
File "celery/app/task.py", line 453, in delay
return self.apply_async(args, kwargs)
File "celery/app/task.py", line 555, in apply_async
**dict(self._get_exec_options(), **options)
File "celery/app/base.py", line 355, in send_task
reply_to=reply_to or self.oid, **options
File "celery/app/amqp.py", line 305, in publish_task
**kwargs
File "kombu/messaging.py", line 168, in publish
routing_key, mandatory, immediate, exchange, declare)
File "kombu/connection.py", line 457, in _ensured
interval_max)
File "kombu/connection.py", line 369, in ensure_connection
interval_start, interval_step, interval_max, callback)
File "kombu/utils/__init__.py", line 243, in retry_over_time
return fun(*args, **kwargs)
File "kombu/connection.py", line 237, in connect
return self.connection
File "kombu/connection.py", line 741, in connection
self._connection = self._establish_connection()
File "kombu/connection.py", line 696, in _establish_connection
conn = self.transport.establish_connection()
File "kombu/transport/pyamqp.py", line 112, in establish_connection
conn = self.Connection(**opts)
File "amqp/connection.py", line 165, in __init__
self.transport = self.Transport(host, connect_timeout, ssl)
File "amqp/connection.py", line 186, in Transport
return create_transport(host, connect_timeout, ssl)
File "amqp/transport.py", line 299, in create_transport
return TCPTransport(host, connect_timeout)
File "amqp/transport.py", line 95, in __init__
raise socket.error(last_err)
So what is wrong and how to make celery connect to the specified broker and where in celery docs is it?
The problem was that i missed celery in __init__.py. __init__.py should contain following ( from the docs ):
from __future__ import absolute_import
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from .celery import app as celery_app
Ensure the broker url on you config or settings file has the specified prefix as on the namespace app.config_from_object('django.conf:settings', namespace='CELERY')
so changed my BROKER_URL to CELERY_BROKER_URL
In my case the wrong broker issue was due to incorrect celery start command.
I've used 'celery beat -A=myapp', and broker was incorrect. Then i changed it to 'celery -A myapp beat' and it used correct broker from settings.
It may cuased by kombu.
at first my env were:
kombu-4.6.11
celery-4.4.7
celery could not get settings by config_from_object !
after changed to:
kombu-4.0.2
celery-4.0.2
problem solved!

Categories