I'm following one of the various tutorials out on the internet and set up a Flask/RabbitMQ/Celery app using Docker/Docker Compose. The containers all appear to run successfully but when I hit the endpoint, the app stalls. The task appears to be stuck in PENDING and never actually completes. There are no errors in the Docker output, so I'm really confused why this isn't working. The only output I see when I hit my endpoint is this:
rabbit_1 | 2021-05-13 01:38:07.942 [info] <0.760.0> accepting AMQP connection <0.760.0> (172.19.0.4:45414 -> 172.19.0.2:5672)
rabbit_1 | 2021-05-13 01:38:07.943 [info] <0.760.0> connection <0.760.0> (172.19.0.4:45414 -> 172.19.0.2:5672): user 'rabbitmq' authenticated and granted access to vhost '/'
rabbit_1 | 2021-05-13 01:38:07.952 [info] <0.776.0> accepting AMQP connection <0.776.0> (172.19.0.4:45416 -> 172.19.0.2:5672)
rabbit_1 | 2021-05-13 01:38:07.953 [info] <0.776.0> connection <0.776.0> (172.19.0.4:45416 -> 172.19.0.2:5672): user 'rabbitmq' authenticated and granted access to vhost '/'
I'm really not sure what I am doing wrong as the documentation hasn't been much help.
Dockerfile
FROM python:3
COPY ./requirements.txt /app/requirements.txt
WORKDIR /app
RUN pip install -r requirements.txt
COPY . /app
ENTRYPOINT [ "python" ]
CMD ["app.py","--host=0.0.0.0"]
Flask app.py
from workerA import add_nums
from flask import (
Flask,
request,
jsonify,
)
app = Flask(__name__)
#app.route("/add")
def add():
first_num = request.args.get('f')
second_num = request.args.get('s')
result = add_nums.delay(first_num, second_num)
return jsonify({'result': result.get()}), 200
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
Celery workerA.py
from celery import Celery
# Celery configuration
CELERY_BROKER_URL = 'amqp://rabbitmq:rabbitmq#rabbit:5672/'
CELERY_RESULT_BACKEND = 'rpc://'
# Initialize Celery
celery = Celery('workerA', broker=CELERY_BROKER_URL, backend=CELERY_RESULT_BACKEND)
#celery.task()
def add_nums(a, b):
return a + b
docker-compose.yml
version: "3"
services:
web:
build:
context: .
dockerfile: Dockerfile
restart: always
ports:
- "5000:5000"
depends_on:
- rabbit
volumes:
- .:/app
rabbit:
hostname: rabbit
image: rabbitmq:management
environment:
- RABBITMQ_DEFAULT_USER=rabbitmq
- RABBITMQ_DEFAULT_PASS=rabbitmq
ports:
- "5673:5672"
- "15672:15672"
worker_1:
build:
context: .
hostname: worker_1
entrypoint: celery
command: -A workerA worker --loglevel=info -Q workerA
volumes:
- .:/app
links:
- rabbit
depends_on:
- rabbit
Alright, after much research I determined that the issue was the queue name for the task. Celery was using the default name for the queue and it was causing some problems. I adjusted my decorated like so:
#celery.task(queue='workerA')
def add_nums(a, b):
return a + b
And now it works!
Related
FastAPI app:
import fastapi as _fastapi
from celery import Celery
from celery.result import AsyncResult
app = _fastapi.FastAPI()
celery_app = Celery(
"worker",
broker_url="amqp://guest:guest#rabbit:5672//",
result_backend="rpc://",
)
celery_app.conf.task_routes = {"celery_worker.test_celery": "test-queue"}
celery_app.conf.update(task_track_started=True)
#app.get("/{word}")
async def root(word: str):
task = celery_app.send_task("celery_worker.test_celery", args=[word])
return {"message": "Word received", "id": f"{task}"}
#app.get("/api/result/{task_id}")
async def result(task_id: str):
task = AsyncResult(task_id)
# Task Not Ready
if not task.ready():
return {"status": task.status}
# Task done: return the value
task_result= task.get()
result = task_result.get("result")
return {"task_id": str(task_id),
"status": task_result.get("status"),
"result": result,
}
Dockerfile:
FROM python:3.10-slim
WORKDIR /app
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
COPY ./requirements.txt .
RUN pip install --upgrade pip && pip install -r requirements.txt --no-cache-dir
COPY . .
docker-compose.yml:
version: '3.8'
services:
ylab:
container_name: ylab
build:
context: .
command: "uvicorn main:app --reload --host 0.0.0.0"
ports:
- "8000:8000"
networks:
- api_network
rabbit:
container_name: rabbit
image: rabbitmq:3.10.7-management
ports:
- "15672:15672"
- "5672:5672"
networks:
- api_network
celery_worker:
container_name: celery_worker
build:
context: .
command: celery -A main.celery_app worker --loglevel=INFO
networks:
- api_network
networks:
api_network:
name: api_network
The root() function works well. I can send messages, return a task id, and see all the messages in the RabbitMQ queue, but the result() function for any task id returns task.ready() == False
Can anyone tell me what is the error in this code?
Services info:
RabbitMQ 3.10.7
Celery:
celery#415bde516932 v5.2.3 (dawn-chorus)
Linux-5.10.0-18-amd64-x86_64-with-glibc2.31 2023-02-05 12:02:49
app: worker:0x7f3679306c20
transport: amqp://guest:**#rabbit:5672//
results: rpc://
concurrency: 8 (prefork)
task events: OFF (enable -E to monitor tasks in this worker)
[queues]
.> celery exchange=celery(direct) key=celery
According to the documentation for task_track_started:
If True the task will report its status as ‘started’ when the task is
executed by a worker.
But in your code, you don't seem to have anything consuming the tasks that you're placing on the queue. They will stay in PENDING state forever.
I started by writing your code to use automatic task routing, using <func>.delay to call a task rather than the lower-level send_task method:
import time
import fastapi as _fastapi
from celery import Celery
from celery.result import AsyncResult
app = _fastapi.FastAPI()
celery_app = Celery(
"worker",
broker_url="amqp://guest:guest#rabbit:5672//",
result_backend="rpc://",
)
celery_app.conf.update(task_track_started=True)
#celery_app.task
def test_celery(word):
time.sleep(10)
return word.upper()
#app.get("/{word}")
async def root(word: str):
task = test_celery.delay(word)
return {"message": "Word received", "id": f"{task}"}
#app.get("/api/result/{task_id}")
async def result(task_id: str):
task = AsyncResult(task_id)
# Task Not Ready
if not task.ready():
return {"status": task.status}
# Task done: return the value
task_result= task.get()
return {"task_id": str(task_id),
"result": task_result,
}
When running the above code, a connection to /foo results in:
{"message":"Word received","id":"34bfe48d-6ab3-4dec-ad7d-aa567315a609"}
A subsequent call to /api/result/34bfe48d-6ab3-4dec-ad7d-aa567315a609 yields:
{"status":"STARTED"}
And if we wait for 10 seconds, the same request results in:
{"task_id":"34bfe48d-6ab3-4dec-ad7d-aa567315a609","result":"FOO"}
We've demonstrated that things work correctly when using automatic task routing. So why isn't your original code working? There are three problems:
You don't have anything watch test-queue.
You're delivering tasks into test-queue, but your Celery worker is watching the default celery queue. You need to use the -Q argument to have it watch test-queue instead:
celery_worker:
container_name: celery_worker
build:
context: .
command: celery -A main.celery_app worker --loglevel=INFO -Q test-queue
networks:
- api_network
You don't have any tasks defined.
If you add the -Q test-queue argument from the previous step and restart the environment, attempts to connect to /foo will result in the following traceback in your Celery worker:
celery_worker | [2023-02-05 14:12:40,864: ERROR/MainProcess] Received unregistered task of type 'celery_worker.test_celery'.
celery_worker | The message has been ignored and discarded.
[...]
celery_worker | Traceback (most recent call last):
celery_worker | File "/usr/local/lib/python3.10/site-packages/celery/worker/consumer/consumer.py", line 591, in on_task_received
celery_worker | strategy = strategies[type_]
celery_worker | KeyError: 'celery_worker.test_celery'
We can fix that by registering the appropriate task with Celery:
#celery_app.task(name="celery_worker.test_celery")
def test_celery(word):
time.sleep(10)
return word.upper()
With the previous two changes, your code will successfully submit the task to Celery and Celery will pass it to the test_celery function. However, calls to /api/result/<id> will fail with:
File "/app/./main.py", line 39, in result
result = task_result.get("result")
AttributeError: 'str' object has no attribute 'get'
You need to to modiofy your result function so that it looks more like:
#app.get("/api/result/{task_id}")
async def result(task_id: str):
task = AsyncResult(task_id)
# Task Not Ready
if not task.ready():
return {"status": task.status}
# Task done: return the value
task_result = task.get()
return {
"task_id": str(task_id),
"result": task_result,
}
With these three changes, your original code works as intended. The complete modified code looks like:
import time
import fastapi
from celery import Celery
from celery.result import AsyncResult
app = fastapi.FastAPI()
celery_app = Celery(
"worker",
broker_url="amqp://guest:guest#rabbit:5672//",
result_backend="rpc://",
)
celery_app.conf.task_routes = {"celery_worker.test_celery": "test-queue"}
celery_app.conf.update(task_track_started=True)
#celery_app.task(name="celery_worker.test_celery")
def test_celery(word):
time.sleep(10)
return word.upper()
#app.get("/{word}")
async def root(word: str):
task = celery_app.send_task("celery_worker.test_celery", args=[word])
return {"message": "Word received", "id": f"{task}"}
#app.get("/api/result/{task_id}")
async def result(task_id: str):
task = AsyncResult(task_id)
# Task Not Ready
if not task.ready():
return {"status": task.status}
# Task done: return the value
task_result = task.get()
return {
"task_id": str(task_id),
"result": task_result,
}
I have a dockenizer flask api app that runs in localhost:5000. The api runs with no problem. But when I tried to use it by another app, which I cannot change, it uses localhost:5000/some_path.
I'd like to redirect from localhost:5000/some_path to localhost:5000.
I have read that I can use a prefix in my flask api app, but I'd prefer another approach. I don't want to mess with the code.
Is there a redirect/middleware or another way to redirect this traffic?
docker-compose.yml:
# Use root/example as user/password credentials
version: "3.1"
services:
my-db:
image: mariadb
restart: always
environment:
MARIADB_ROOT_PASSWORD: example
ports:
- 3306:3306
volumes:
- ./0_schema.sql:/docker-entrypoint-initdb.d/0_schema.sql
- ./1_data.sql:/docker-entrypoint-initdb.d/1_data.sql
adminer:
image: adminer
restart: always
environment:
ADMINER_DEFAULT_SERVER: my-db
ports:
- 8080:8080
my-api:
build: ../my-awesome-api/
ports:
- 5000:5000
If you use a web server to serve your application you could manage it with it, for example with nginx you could do:
location = /some_path {
return 301 /;
}
Or you can use a middleware:
class PrefixMiddleware(object):
def __init__(self, app, prefix=""):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
if environ["PATH_INFO"].startswith(self.prefix):
environ["PATH_INFO"] = environ["PATH_INFO"][len(self.prefix) :]
environ["SCRIPT_NAME"] = self.prefix
return self.app(environ, start_response)
else:
#handle not found
Then register your middleware by adding the prefix to "ignore"
app = Flask(__name__)
app.wsgi_app = PrefixMiddleware(biosfera_fe.wsgi_app, prefix="/some_path")
I have a flask app that needs to make a request to a grpc server when a request is made to the flask endpoint.
#main.route("/someroute", methods=["POST"])
def some_function():
# Do something here
make_grpc_request(somedata)
return create_response(data=None, message="Something happened")
def make_grpc_request(somedata):
channel = grpc.insecure_channel('localhost:30001')
stub = some_proto_pb2_grpc.SomeServiceStub(channel)
request = some_proto_pb2.SomeRequest(id=1)
response = stub.SomeFunction(request)
logger.info(response)
But I keep getting an error InactiveRpcError of RPC that terminated with: StatusCode.UNAVAILABLE failed to connect to all addresses
Just putting the client code inside a normal .py file works fine, and making the request inside BloomRPC works fine too so it couldn't be a server issue.
Is this something to do with how flask works and I'm just missing something?
I have also tried using https://github.com/public/sonora without any success like this:
with sonora.client.insecure_web_channel("localhost:30001") as channel:
stub = some_proto_pb2_grpc.SomeServiceStub(channel)
request = some_proto_pb2.SomeRequest(id=1)
response = stub.SomeFunction(request)
docker-compose.yml
version: "3.7"
services:
core-profile: #This is where the grpc requests are sent to
container_name: core-profile
build:
context: ./app/profile/
target: local
volumes:
- ./app/profile/:/usr/src/app/
env_file:
- ./app/profile/database.env
- ./app/profile/jwt.env
- ./app/profile/oauth2-dev.env
environment:
- APP_PORT=50051
- PYTHONUNBUFFERED=1
- POSTGRES_HOST=core-profile-db
ports:
- 30001:50051
expose:
- 50051
depends_on:
- core-profile-db
core-profile-db:
image: postgres:10-alpine
expose:
- 5432
ports:
- 54321:5432
env_file:
- ./app/profile/database.env
app-flask-server-db:
image: postgres:10-alpine
expose:
- 5433
ports:
- 54333:5433
env_file:
- ./app/flask-server/.env
flask-server:
build:
context: ./app/flask-server/
dockerfile: Dockerfile-dev
volumes:
- ./app/flask-server:/usr/src/app/
env_file:
- ./app/flask-server/.env
environment:
- FLASK_ENV=docker
ports:
- 5000:5000
depends_on:
- app-flask-server-db
volumes:
app-flask-server-db:
name: app-flask-server-db
Your Python app (service) should reference the gRPC service as core-profile:50051.
The host name is the Compose service core-profile and, because the Python service is also within the Compose network, it must use 50051.
localhost:30001 is how you'd access it from the Compose host
I'm trying a FastAPI based API with celery, redis, and rabitMQ as the background tasks.
when doing docker-compose up, the redis, rabbit, and flower parts work, I'm able to access the flower dashboard.
but it then gets stuck in the celery part.
the error:
rabbitmq_1 | 2020-09-08 06:32:38.552 [info] <0.716.0> connection <0.716.0> (172.22.0.6:49290 -> 172.22.0.2:5672): user 'user' authenticated and granted access to vhost '/'
celery-flower_1 | [W 200908 06:32:41 control:44] 'stats' inspect method failed
celery-flower_1 | [W 200908 06:32:41 control:44] 'active_queues' inspect method failed
celery-flower_1 | [W 200908 06:32:41 control:44] 'registered' inspect method failed
celery-flower_1 | [W 200908 06:32:41 control:44] 'scheduled' inspect method failed
celery-flower_1 | [W 200908 06:32:41 control:44] 'active' inspect method failed
celery-flower_1 | [W 200908 06:32:41 control:44] 'reserved' inspect method failed
celery-flower_1 | [W 200908 06:32:41 control:44] 'revoked' inspect method failed
celery-flower_1 | [W 200908 06:32:41 control:44] 'conf' inspect method failed
My docker-compose file:
version: "3.7"
services:
rabbitmq:
image: "bitnami/rabbitmq:3.7"
ports:
- "4000:4000"
- "5672:5672"
volumes:
- "rabbitmq_data:/bitnami"
redis:
image: "bitnami/redis:5.0.4"
environment:
- REDIS_PASSWORD=password123
ports:
- "5000:5000"
volumes:
- "redis_data:/bitnami/redis/data"
celery-flower:
image: gregsi/latest-celery-flower-docker:latest
environment:
- AMQP_USERNAME=user
- AMQP_PASSWORD=bitnami
- AMQP_ADMIN_USERNAME=user
- AMQP_ADMIN_PASSWORD=bitnami
- AMQP_HOST=rabbitmq
- AMQP_PORT=5672
- AMQP_ADMIN_HOST=rabbitmq
- AMQP_ADMIN_PORT=15672
- FLOWER_BASIC_AUTH=user:test
ports:
- "5555:5555"
depends_on:
- rabbitmq
- redis
fastapi:
build: .
ports:
- "8000:8000"
depends_on:
- rabbitmq
- redis
volumes:
- "./:/app"
command: "poetry run uvicorn app/app/main:app --bind 0.0.0.0:8000"
worker:
build: .
depends_on:
- rabbitmq
- redis
volumes:
- "./:/app"
command: "poetry run celery worker -A app.app.worker.celery_worker -l info -Q test-queue -c 1"
volumes:
rabbitmq_data:
driver: local
redis_data:
driver: local
My celery app:
celery_app = Celery(
"worker",
backend="redis://:password123#redis:6379/0",
broker="amqp://user:bitnami#rabbitmq:5672//"
)
celery_app.conf.task_routes = {
"app.app.worker.celery_worker.compute_stock_indicators": "stocks-queue"
}
celery_app.conf.update(task_track_started=True)
celery worker:
#celery_app.task(acks_late=True)
def compute_stock_indicators(stocks: list, background_task):
stocks_with_indicators = {}
for stock in stocks:
current_task.update_state(state=Actions.STARTED,
meta={f"starting to fetch {stock}'s indicators"})
stock_indicators = fetch_stock_indicators(stock) # Fetch the stock most recent indicators
current_task.update_state(state=Actions.FINISHED,
meta={f"{stock}'s indicators fetched"})
stocks_with_indicators.update({stock: stock_indicators})
current_task.update_state(state=Actions.PROGRESS,
meta={f"predicting {stocks}s..."})
The Fast API function:
log = logging.getLogger(__name__)
rabbit = RabbitMQHandler(host='localhost', port=5672, level="DEBUG")
log.addHandler(rabbit)
def celery_on_message(body):
"""
Logs the initiation of the function
"""
log.warning(body)
def background_on_message(task):
"""
logs the function when it is added to queue
"""
log.warning(task.get(on_message=celery_on_message, propagate=False))
app = FastAPI(debug=True)
#app.post("/")
async def initiator(stocks: FrozenSet, background_task: BackgroundTasks, ):
"""
:param stocks: stocks to be analyzed
:type stocks: set
:param background_task: initiate the tasks queue
:type background_task: starlette.background.BackgroundTasks
"""
log.warning(msg=f'beginning analysis on: {stocks}')
task_name = "app.app.worker.celery_worker.compute_stock_indicators"
task = celery_app.send_task(task_name, args=[stocks, background_task])
background_task.add_task(background_on_message, task)
return {"message": "Stocks indicators successfully calculated,stocks sent to prediction"}
On the docker-compose, on the worker section, the command reads:
command: "poetry run celery worker -A app.app.worker.celery_worker -l info -Q test-queue -c 1"
So essentially you are asking the worker to "watch" a queue named test-queue.
But on the celery_app, on the following section:
celery_app.conf.task_routes = {
"app.app.worker.celery_worker.compute_stock_indicators": "stocks-queue"
}
you are defining a queue named stocks-queue.
Either change the docker-compose's or the celery_app's queue name to match the other.
if you use Docker Toolbox on windows , so you should add port 5555 to VM virtualBOX network:
frist run following command on cmd:
docker-machine stop default
then open VM virtualBOX , go to Settings >Networks > advanced>port forwarding >add a row with port 5555 and leave name field
click OK and on cmd, run following command:
docker-machine start default
I created a simple library project in microservices to study and implement FastAPI.
Docker starts 5 main services:
books
db-book
author
db-author
nginx
Everything works as expected, making requests with postman I have no problem.
Structure
Problem description
I added a test directory where I test endpoints.
Example of (incomplete) author test
from starlette.testclient import TestClient
from app.main import app
from app.api.author import authors
import logging
log = logging.getLogger('__name__')
import requests
client = TestClient(app)
def test_get_authors():
response = client.get("/")
assert response.status_code == 200
def test_get_author():
response = client.get("/1")
assert response.status_code == 200
$> docker-compose exec author_service pytest .
returns this
============================================================================================================= test session starts =============================================================================================================
platform linux -- Python 3.8.3, pytest-5.3.2, py-1.9.0, pluggy-0.13.1
rootdir: /app
collected 2 items
tests/test_author.py FF [100%]
================================================================================================================== FAILURES ===================================================================================================================
______________________________________________________________________________________________________________ test_get_authors _______________________________________________________________________________________________________________
def test_get_authors():
response = client.get("/")
> assert response.status_code == 200
E assert 404 == 200
E + where 404 = <Response [404]>.status_code
tests/test_author.py:12: AssertionError
_______________________________________________________________________________________________________________ test_get_author _______________________________________________________________________________________________________________
def test_get_author():
response = client.get("/1")
> assert response.status_code == 200
E assert 404 == 200
E + where 404 = <Response [404]>.status_code
tests/test_author.py:16: AssertionError
============================================================================================================== 2 failed in 0.35s ==============================================================================================================
I tried to start the tests directly from the container shell but nothing the same.
This problem occurs only with tests that are done following the documentation (using starlette / fastapi) and with requests
You can find the complete project here
Library Microsrevices example
Environment
OS:[Linux Fedora 32]
FastAPI Version [0.55.1]:
Python: [Python 3.8.3]
docker-compose file
version: '3.7'
services:
book_service:
build: ./book-service
command: uvicorn app.main:app --reload --host 0.0.0.0 --port 8000
volumes:
- ./book-service/:/app/
ports:
- 8001:8000
environment:
- DATABASE_URI=postgresql://book_db_username:book_db_password#book_db/book_db_dev
- AUTHOR_SERVICE_HOST_URL=http://author_service:8000/api/v1/authors/
depends_on:
- book_db
book_db:
image: postgres:12.1-alpine
volumes:
- postgres_data_book:/var/lib/postgresql/data/
environment:
- POSTGRES_USER=book_db_username
- POSTGRES_PASSWORD=book_db_password
- POSTGRES_DB=book_db_dev
author_service:
build: ./author-service
command: uvicorn app.main:app --reload --host 0.0.0.0 --port 8000
volumes:
- ./author-service/:/app/
ports:
- 8002:8000
environment:
- DATABASE_URI=postgresql://author_db_username:author_db_password#author_db/author_db_dev
depends_on:
- author_db
author_db:
image: postgres:12.1-alpine
volumes:
- postgres_data_author:/var/lib/postgres/data
environment:
- POSTGRES_USER=author_db_username
- POSTGRES_PASSWORD=author_db_password
- POSTGRES_DB=author_db_dev
nginx:
image: nginx:latest
ports:
- "8080:8080"
volumes:
- ./nginx_config.conf:/etc/nginx/conf.d/default.conf
depends_on:
- author_service
- book_service
volumes:
postgres_data_book:
postgres_data_author:
Fixed using docker0 network ip address and with requests. Tests can now be started using 172.13.0.1 on port 8080
the main problem here are your endpoints on test file
Test example fixed:
from starlette.testclient import TestClient
from app.main import app
from app.api.author import authors
import logging
log = logging.getLogger('__name__')
import requests
client = TestClient(app)
def test_get_authors():
response = client.get("/authors") # this must be your API endpoint to test
assert response.status_code == 200
def test_get_author():
response = client.get("/authors/1") # this must be your API endpoint to test
assert response.status_code == 200