Airflow PythonVirtualenvOperator, No such file or directory: 'virtualenv' - python

I am trying to run the Apache Airflow PythonVirtualenvOperator in one of my DAGs but Airflow is throwing the following error:
[2020-12-14 20:06:32,291] {python_operator.py:316} INFO - Executing cmd
['virtualenv', '/tmp/venvwtqb3rki', '--python=python3.8']
[2020-12-14 20:06:32,301] {taskinstance.py:1150} ERROR - [Errno 2] No such file or directory: 'virtualenv'
Traceback (most recent call last):
File "/opt/airflow/airflow_env/lib/python3.8/site-packages/airflow/models/taskinstance.py", line 984, in _run_raw_task
result = task_copy.execute(context=context)
File "/opt/airflow/airflow_env/lib/python3.8/site-packages/airflow/operators/python_operator.py", line 113, in execute
return_value = self.execute_callable()
File "/opt/airflow/airflow_env/lib/python3.8/site-packages/airflow/operators/python_operator.py", line 292, in execute_callable
self._execute_in_subprocess(self._generate_virtualenv_cmd(tmp_dir))
File "/opt/airflow/airflow_env/lib/python3.8/site-packages/airflow/operators/python_operator.py", line 317, in _execute_in_subprocess
output = subprocess.check_output(cmd,
File "/usr/lib/python3.8/subprocess.py", line 411, in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
File "/usr/lib/python3.8/subprocess.py", line 489, in run
with Popen(*popenargs, **kwargs) as process:
File "/usr/lib/python3.8/subprocess.py", line 854, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "/usr/lib/python3.8/subprocess.py", line 1702, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
I have Airflow and all my DAGs running as an airflow user. I thought maybe airflow can't find the virutalenv command in its path during task setup/execution.
Here is the code that I have in place currently to test.
import logging
import datetime
from airflow import DAG
import airflow
from airflow.hooks.S3_hook import S3Hook
from airflow.contrib.hooks import aws_hook
from airflow.models import Variable
from airflow.operators.python_operator import PythonOperator, PythonVirtualenvOperator
from airflow.utils.dates import days_ago
import time
default_args = {
'owner':'airflow',
'depends_on_past': False,
'start_date': days_ago(2),
'retries': 0
}
dag = DAG (
dag_id = 'list_reqestor',
default_args = default_args,
catchup=False,
schedule_interval = None
)
def setup_driver(ti):
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
"""
Sets up Apache libcloud AWS ec2 node driver.
Args:
region: AWS region to perform credential check.
"""
try:
shopper_logger.info("Setting up node deployment driver.")
region = Variable.get('REGION')
cls = get_driver(Provider.EC2)
a_hook = aws_hook.AwsHook()
driver = cls(creds,region=region)
ti.xcom_push(XCOM_REQUESTOR_LIB_CLOUD_DRIVER, driver)
time.sleep(30)
setup_driver_task = PythonVirtualenvOperator(
task_id='setup_driver_task',
python_callable=setup_driver,
retries=0,
requirements=['apache-libcloud'],
python_version="3.8",
system_site_packages=False,
provide_context=True,
xcom_push=True,
dag=dag
)
setup_driver
I am not sure what I am missing.

Most likely it is due the lack of the " virtualenv " in your airflow's enviroment.
You can check (when in the same enviroment as airflow) with virtualenv --version in your terminal.
If it does not find the "virtualenv", just install it with:
pip install virtualenv
and it should work

Related

'S3Hook' object has no attribute 'download_file' in AIRFLOW DAG

2023-02-05 11:32:43,293] {{taskinstance.py:887}} INFO - Executing <Task(PythonOperator): download_from_s3> on 2023-02-05T11:32:34.016335+00:00
[2023-02-05 11:32:43,299] {{standard_task_runner.py:53}} INFO - Started process 87503 to run task
[2023-02-05 11:32:43,474] {{logging_mixin.py:112}} INFO - Running %s on host %s <TaskInstance: s3_download.download_from_s3 2023-02-05T11:32:34.016335+00:00 [running]> 67c7842be21b
[2023-02-05 11:32:43,555] {{taskinstance.py:1128}} ERROR - 'S3Hook' object has no attribute 'download_file'
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 966, in _run_raw_task
result = task_copy.execute(context=context)
File "/usr/local/lib/python3.7/site-packages/airflow/operators/python_operator.py", line 113, in execute
return_value = self.execute_callable()
File "/usr/local/lib/python3.7/site-packages/airflow/operators/python_operator.py", line 118, in execute_callable
return self.python_callable(*self.op_args, **self.op_kwargs)
File "/usr/local/airflow/dags/dwnld_frm_awss3.py", line 12, in download_from_s3
file_name = hook.download_file(key=key, bucket_name=bucket_name, local_path=local_path)
AttributeError: 'S3Hook' object has no attribute 'download_file'
[2023-02-05 11:32:43,570] {{taskinstance.py:1185}} INFO - Marking task as FAILED.dag_id=s3_download, task_id=download_from_s3, execution_date=20230205T113234, start_date=20230205T113243, end_date=20230205T113243
Getting error of download_file
My code is
import os
from datetime import datetime
from airflow.models import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.hooks.S3_hook import S3Hook
from airflow.contrib.hooks.aws_hook import AwsHook
# Function of the DAG
def download_from_s3(key: str, bucket_name: str, local_path: str) -> str:
hook = S3Hook('my_conn_S3')
file_name = hook.download_file(key=key, bucket_name=bucket_name, local_path=local_path)
return file_name
with DAG(
dag_id='s3_download',
schedule_interval='#daily',
start_date=datetime(2023, 2, 4),
catchup=False
) as dag:
task_download_from_s3 = PythonOperator(
task_id='download_from_s3',
python_callable=download_from_s3,
op_kwargs={
'key': 'sample.txt',
'bucket_name': 'airflow-sample-s3-bucket',
'local_path': '/usr/local/airflow/'
}
)
The imports suggests that you are using older version of Airflow.
You should install Amazon backport provider then import the hook as from airflow.providers.amazon.aws.hooks.s3 import S3Hook
Note that Airflow 1.10 is end-of-life for 2+ years, you should upgrade Airflow version as soon as possible. To upgrade Airflow you can follow this guide.

Getting Errno 13: Permission Denied as a root user

So basically, I just want to load chess engine which is in the same folder with script
_, engine = await chess.engine.popen_uci('./engine')
What is wrong with this? I am running the script as a root user.
Also, if I load from just 'engine', it says file doesn't exist.
UPT: getting this error
_, self.engine = await chess.engine.popen_uci(r'' + os.path.join(os.getcwd(), 'engine'))
File "/usr/local/lib/python3.8/dist-packages/chess/engine.py", line 2642, in popen_uci
transport, protocol = await UciProtocol.popen(command, setpgrp=setpgrp, **popen_args)
File "/usr/local/lib/python3.8/dist-packages/chess/engine.py", line 1214, in popen
return await asyncio.get_running_loop().subprocess_exec(cls, *command, **popen_args) # type: ignore
File "/usr/lib/python3.8/asyncio/base_events.py", line 1630, in subprocess_exec
transport = await self._make_subprocess_transport(
File "/usr/lib/python3.8/asyncio/unix_events.py", line 197, in _make_subprocess_transport
transp = _UnixSubprocessTransport(self, protocol, args, shell,
File "/usr/lib/python3.8/asyncio/base_subprocess.py", line 36, in __init__
self._start(args=args, shell=shell, stdin=stdin, stdout=stdout,
File "/usr/lib/python3.8/asyncio/unix_events.py", line 789, in _start
self._proc = subprocess.Popen(
File "/usr/lib/python3.8/subprocess.py", line 858, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "/usr/lib/python3.8/subprocess.py", line 1704, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
PermissionError: [Errno 13] Permission denied: '/root/ChessBot/engine'```
If we check documentation, in the playing section, you can see that the paths are assigned from the root like this:
r"C:\Users\xxxxx\Downloads\stockfish_14_win_x64\stockfish_14_win_x64_avx2.exe"
or "/usr/bin/stockfish".
Although the engine you want to use is in the same folder where you run the program, the path may be obtained directly from the path where Python is installed, I would recommend using _, engine = await chess.engine.popen_uci(r''+os.path.join(os.getcwd(), 'engine')) instead.
Resolved this by myself now. Need to execute this on engine file
$ chmod 755 engine
So basically, it changes files permissions to the required ones.

Airflow triggers Sagemaker job in test mode

I've an Airflow(v1.10.12) dag that triggers a Sagemaker Processor job as part of one of it's tasks. I've written few tests(pytest 6.2.2) to check the basic sanity of the dag.
It seems like just fetching the dag by id from DagBag triggers a Sagemaker job. i.e When I do pytest test_file_name.py, a job is triggered which isn't ideal.
from airflow.models import DagBag
class TestSagemakerDAG:
#classmethod
def setup(cls):
cls.dagbag = DagBag()
cls.dag = DagBag().get_dag(dag_id='sagemaker-processor')
def test_dag_loaded(self):
"""
To verify if dags are loaded onto the dagabag
:return:
"""
assert self.dagbag.import_errors == {}
assert self.dag is not None
assert len(self.dag.tasks) == 2
For more clarity this is how the Sagemaker Processor job(sagemaker 2.24.1) definition looks like
def initiate_sage_maker_job(self, session):
return Processor(
image_uri=self.ecr_uri,
role=self.iam_role,
instance_count=self.instance_count,
instance_type=self.instance_type,
base_job_name=self.processor_name,
sagemaker_session=session,
).run()
And the boto3(v 1.16.63) session is generated as
def get_session(self):
boto_session = boto3.session.Session()
client = boto_session.client('sagemaker', region_name=self.region)
session = sagemaker.session.Session(boto_session=boto_session, sagemaker_client=client)
return session
Finally, the Dag itself
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
with DAG('sagemaker-processor',
default_args=default_args,
schedule_interval='#hourly',
) as dag:
t1 = BashOperator(
task_id='print_current_date',
bash_command='date'
)
t2 = PythonOperator(
task_id='sagemaker_trigger', python_callable=initiate_sage_maker_job()
)
t1 >> t2
I'm just trying to import dags from a folder and check import errors, check upstream and downstream list.
On a side note, I've made sure the Dag is turned off on the Airflow UI and or to execute airflow scheduler to start queueing tasks. It's really just a standard test I want to execute using Pytest.
The issue pops up as follows
Job Name: airflow-ecr-test-2021-02-26-21-10-39-935
Inputs: []
Outputs: []
[2021-02-26 16:10:39,935] {session.py:854} INFO - Creating processing-job with name airflow-ecr-test-2021-02-26-21-10-39-935
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "/Applications/PyCharm.app/Contents/plugins/python/helpers/pydev/_pydev_bundle/pydev_umd.py", line 197, in runfile
pydev_imports.execfile(filename, global_vars, local_vars) # execute the script
File "/Applications/PyCharm.app/Contents/plugins/python/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "/Users/Ajeya.Kempegowda/dags/sample_mwaa.py", line 31, in <module>
initiate_sage_maker_job()
File "/Users/Ajeya.Kempegowda/anaconda3/envs/airflow/lib/python3.7/site-packages/sagemaker/processing.py", line 180, in run
experiment_config=experiment_config,
File "/Users/Ajeya.Kempegowda/anaconda3/envs/airflow/lib/python3.7/site-packages/sagemaker/processing.py", line 695, in start_new
processor.sagemaker_session.process(**process_args)
File "/Users/Ajeya.Kempegowda/anaconda3/envs/airflow/lib/python3.7/site-packages/sagemaker/session.py", line 856, in process
self.sagemaker_client.create_processing_job(**process_request)
File "/Users/Ajeya.Kempegowda/anaconda3/envs/airflow/lib/python3.7/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/Users/Ajeya.Kempegowda/anaconda3/envs/airflow/lib/python3.7/site-packages/botocore/client.py", line 676, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (ExpiredTokenException) when calling the CreateProcessingJob operation: The security token included in the request is expired
The error displayed says TokenExpired but it really stems while creating a job itself.
Is there something obvious I'm missing while testing airflow? My understanding is that airflow scheduler should queue up dags and only when told to execute(Turn on dag on Airflow UI/CLI) the tasks must be triggered.
Any help would be appreciated. Thanks!

Celery Beat unable to find database models (django.db.utils.OperationalError)

I have to add few periodic tasks. I'm using Celery - Redis in Django platform.
When I execute the method from shell_plus all is well . However Celery Beat is unable to find the database instance properly.
Celery version = 4.1.0. I had previously installed django-celery-beats etc
Database = MySQL
Where am i wrong.
Thanks in advance.
Celery Command
(venv)$:/data/project/(sesh/dev)$ celery -A freightquotes worker -B -E -l INFO --autoscale=2,1
settings.py
CELERY_BROKER_URL = 'redis://127.0.0.1:6379'
CELERY_BROKER_TRANSPORT = 'redis'
CELERY_BROKER_TRANSPORT_OPTIONS = {'visibility_timeout': 604800}
CELERY_RESULT_BACKEND = BROKER_URL
CELERY_TASK_RESULT_EXPIRES = datetime.timedelta(days=1) # Take note of the CleanUp task in middleware/tasks.py
CELERY_MAX_CACHED_RESULTS = 1000
CELERYBEAT_SCHEDULER = "djcelery.schedulers.DatabaseScheduler"
CELERY_TRACK_STARTED = True
CELERY_SEND_EVENTS = True
CELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml']
REDIS_CONNECT_RETRY = True
REDIS_DB = 0
BROKER_POOL_LIMIT = 2
CELERYD_CONCURRENCY = 1
CELERYD_TASK_TIME_LIMIT = 600
CELERY_BEAT_SCHEDULE = {
'test': {
'task': 'loads.tasks.test',
'schedule': crontab(minute='*/1'),
},
init.py
from __future__ import absolute_import, unicode_literals
from .celery import app as celery_app
__all__ = ['celery_app']
celery.py
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings.base')
app = Celery('project')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
#app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
loads/tasks.py
#task()
def test():
x = [i.id for i in Load.objects.all()]
print (x)
Error
[2017-11-30 03:52:00,032: ERROR/ForkPoolWorker-2] Task loads.tasks.test[0020e4ae-5e52-49d8-863f-e51c2acfd7a7] raised unexpected: OperationalError('no such table: loads_load',)
Traceback (most recent call last):
File "/data/project/venv/lib/python3.4/site-packages/django/db/backends/utils.py", line 65, in execute
return self.cursor.execute(sql, params)
File "/data/project/venv/lib/python3.4/site-packages/django/db/backends/sqlite3/base.py", line 328, in execute
return Database.Cursor.execute(self, query, params)
sqlite3.OperationalError: no such table: loads_load
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/project/venv/lib/python3.4/site-packages/celery/app/trace.py", line 374, in trace_task
R = retval = fun(*args, **kwargs)
File "/data/project/venv/lib/python3.4/site-packages/celery/app/trace.py", line 629, in __protected_call__
return self.run(*args, **kwargs)
File "/data/project/loads/tasks.py", line 146, in test
x = [i.id for i in Load.objects.all()]
File "/data/project/venv/lib/python3.4/site-packages/django/db/models/query.py", line 250, in __iter__
self._fetch_all()
File "/data/project/venv/lib/python3.4/site-packages/django/db/models/query.py", line 1103, in _fetch_all
self._result_cache = list(self._iterable_class(self))
File "/data/project/venv/lib/python3.4/site-packages/django/db/models/query.py", line 53, in __iter__
results = compiler.execute_sql(chunked_fetch=self.chunked_fetch)
File "/data/project/venv/lib/python3.4/site-packages/django/db/models/sql/compiler.py", line 886, in execute_sql
raise original_exception
File "/data/project/venv/lib/python3.4/site-packages/django/db/models/sql/compiler.py", line 876, in execute_sql
cursor.execute(sql, params)
File "/data/project/venv/lib/python3.4/site-packages/django/db/backends/utils.py", line 65, in execute
return self.cursor.execute(sql, params)
File "/data/project/venv/lib/python3.4/site-packages/django/db/utils.py", line 94, in __exit__
six.reraise(dj_exc_type, dj_exc_value, traceback)
File "/data/project/venv/lib/python3.4/site-packages/django/utils/six.py", line 685, in reraise
raise value.with_traceback(tb)
File "/data/project/venv/lib/python3.4/site-packages/django/db/backends/utils.py", line 65, in execute
return self.cursor.execute(sql, params)
File "/data/project/venv/lib/python3.4/site-packages/django/db/backends/sqlite3/base.py", line 328, in execute
return Database.Cursor.execute(self, query, params)
django.db.utils.OperationalError: no such table: loads_load
I found some answer.
We have few files in settings like base, dev, prod and local. The database settings is different in each one of them.
Its working when i point the celery app to the local which has all database config. In this case i had to copy all celery config from base to local.
I tried to use django.conf.settings as os.environ.setdefault , but didnt work.
So the answer is incorrect configuration. if we have all in one file we are fine. if we split we have to find some work around.
Edit
Since the issue was finding the right settings file. I now start the celery by setting the module
DJANGO_SETTINGS_MODULE='project.settings.dev' celery -A project worker -B -E -l INFO --autoscale=2,1

Django + Celery + Supervisord + Redis error when setting

I am going through the setting of the following components on CentOS server. I get supervisord task to get the web site up and running, but I am blocked on setting the supervisor for celery. It seems that it recognizes the tasks, but when I try to execute the tasks, it won't connect to them. My redis is up and running on port 6380
Django==1.10.3
amqp==1.4.9
billiard==3.3.0.23
celery==3.1.25
kombu==3.0.37
pytz==2016.10
my celeryd.ini
[program:celeryd]
command=/root/myproject/myprojectenv/bin/celery worker -A mb --loglevel=INFO
environment=PATH="/root/myproject/myprojectenv/bin/",VIRTUAL_ENV="/root/myproject/myprojectenv",PYTHONPATH="/root/myproject/myprojectenv/lib/python2.7:/root/myproject/myprojectenv/lib/python2.7/site-packages"
directory=/home/.../myapp/
user=nobody
numprocs=1
stdout_logfile=/home/.../myapp/log_celery/worker.log
sterr_logfile=/home/.../myapp/log_celery/worker.log
autostart=true
autorestart=true
startsecs=10
; Need to wait for currently executing tasks to finish at shutdown.
; Increase this if you have very long running tasks.
stopwaitsecs = 1200
; When resorting to send SIGKILL to the program to terminate it
; send SIGKILL to its whole process group instead,
; taking care of its children as well.
killasgroup=true
; Set Celery priority higher than default (999)
; so, if rabbitmq(redis) is supervised, it will start first.
priority=1000
The process starts and when I go to the project folder and do:
>python manage.py celery status
celery#ssd-1v: OK
1 node online.
When I open the log file of celery I see that the tasks are loaded.
[tasks]
. mb.tasks.add
. mb.tasks.update_search_index
. orders.tasks.order_created
my mb/tasks.py
from mb.celeryapp import app
import django
django.setup()
#app.task
def add(x, y):
print(x+y)
return x + y
my mb/celeryapp.py
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mb.settings")
app = Celery('mb', broker='redis://localhost:6380/', backend='redis://localhost:6380/')
app.conf.broker_url = 'redis://localhost:6380/0'
app.conf.result_backend = 'redis://localhost:6380/'
app.conf.timezone = 'Europe/Sofia'
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
my mb/settings.py:
...
WSGI_APPLICATION = 'mb.wsgi.application'
BROKER_URL = 'redis://localhost:6380/0'
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
...
when I run:
python manage.py shell
>>> from mb.tasks import add
>>> add.name
'mb.tasks.add'
>>> result=add.delay(1,1)
>>> result.ready()
False
>>> result.status
'PENDING'
And as mentioned earlier I do not see any change in the log anymore.
If I try to run from the command line:
/root/myproject/myprojectenv/bin/celery worker -A mb --loglevel=INFO
Running a worker with superuser privileges when the
worker accepts messages serialized with pickle is a very bad idea!
If you really want to continue then you have to set the C_FORCE_ROOT
environment variable (but please think about this before you do).
User information: uid=0 euid=0 gid=0 egid=0
But I suppose that's normal since I run it after with user nobody. Interesting thing is that the command just celery status (without python manage.py celery status) gives an error on connection, probably because it is looking for different port for redis, but the process of supervisord starts normally... and when I call 'celery worker -A mb' it says it's ok. Any ideas?
(myprojectenv) [root#ssd-1v]# celery status
Traceback (most recent call last):
File "/root/myproject/myprojectenv/bin/celery", line 11, in <module>
sys.exit(main())
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/__main__.py", line 3
0, in main
main()
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/celery.py", line
81, in main
cmd.execute_from_commandline(argv)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/celery.py", line
793, in execute_from_commandline
super(CeleryCommand, self).execute_from_commandline(argv)))
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/base.py", line 3
11, in execute_from_commandline
return self.handle_argv(self.prog_name, argv[1:])
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/celery.py", line
785, in handle_argv
return self.execute(command, argv)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/celery.py", line
717, in execute
).run_from_argv(self.prog_name, argv[1:], command=argv[0])
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/base.py", line 3
15, in run_from_argv
sys.argv if argv is None else argv, command)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/base.py", line 3
77, in handle_argv
return self(*args, **options)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/base.py", line 2
74, in __call__
ret = self.run(*args, **kwargs)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/celery.py", line
473, in run
replies = I.run('ping', **kwargs)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/celery.py", line
325, in run
return self.do_call_method(args, **kwargs)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/bin/celery.py", line
347, in do_call_method
return getattr(i, method)(*args)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/app/control.py", line 100, in ping
return self._request('ping')
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/app/control.py", line 71, in _request
timeout=self.timeout, reply=True,
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/celery/app/control.py", line 316, in broadcast
limit, callback, channel=channel,
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/kombu/pidbox.py", line 283, in _broadcast
chan = channel or self.connection.default_channel
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/kombu/connection.py", line 771, in default_channel
self.connection
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/kombu/connection.py", line 756, in connection
self._connection = self._establish_connection()
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/kombu/connection.py", line 711, in _establish_connection
conn = self.transport.establish_connection()
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/kombu/transport/pyamqp.py", line 116, in establish_connection
conn = self.Connection(**opts)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/amqp/connection.py", line 165, in __init__
self.transport = self.Transport(host, connect_timeout, ssl)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/amqp/connection.py", line 186, in Transport
return create_transport(host, connect_timeout, ssl)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/amqp/transport.py", line 299, in create_transport
return TCPTransport(host, connect_timeout)
File "/root/myproject/myprojectenv/lib/python2.7/site-packages/amqp/transport.py", line 95, in __init__
raise socket.error(last_err)
socket.error: [Errno 111] Connection refused
Any help will be highly appreciated.
UPDATE:
when I run
$:python manage.py shell
>>from mb.tasks import add
>>add
<#task: mb.tasks.add of mb:0x**2b3f6d0**>
the 0x2b3f6d0is different from what celery claims to be its memory space in its log, namely:
[config]
- ** ---------- .> app: mb:0x3495bd0
- ** ---------- .> transport: redis://localhost:6380/0
- ** ---------- .> results: disabled://
- *** --- * --- .> concurrency: 1 (prefork)
Ok, the answer in this case was that the gunicorn file was actually starting the project from the common python library, instead of the virtual env

Categories