I am trying to run my flask app in 2 ways: by use docker run command and by use a compose file. When i use following commands everything is working fine:
docker container run --name flask-database -d --network flask_network
-e POSTGRES_USER=admin -e POSTGRES_PASSWORD=admin -e POSTGRES_DB=flask_db -v postgres_data:/var/lib/postgresql/data -p
5432:5432 postgres:13
docker container run -p 5000:5000 --network flask_network flask_app
But when I am trying to use my compose file (by docker compose up) i see error:
File "/app/main_python_files/routes.py", line 11, in home
web | cur.execute('SELECT * FROM books;')
web | psycopg2.errors.UndefinedTable: relation "books" does not exist
web | LINE 1: SELECT * FROM books;
When i have to change in my compose file? I will be very grateful for response! Here is my compose file:
version: '3.7'
services:
flask-database:
container_name: flask-database
image: postgres:13
restart: always
ports:
- 5432:5432
networks:
- flask_network
environment:
- POSTGRES_USER=admin
- POSTGRES_PASSWORD=admin
- POSTGRES_DB=flask_db
volumes:
- postgres_data:/var/lib/postgresql/data
web:
container_name: web
#build: web
image: flask_app
restart: always
ports:
- 5000:5000
networks:
- flask_network
depends_on:
- flask-database
links:
- flask-database
networks:
flask_network: {}
volumes:
postgres_data: {}
Related
I'm building an API that fetches data from a MySQL database using Docker. I've tried everything and I always get this error: 2005 (HY000): Unknown MySQL server host 'db' (-3). Here is my docker compose file:
version: '3'
services:
web:
container_name: nginx
image: nginx
volumes:
- ./nginx/nginx.conf:/tmp/nginx.conf
environment:
- FLASK_SERVER_ADDR=backend:9091
- DB_PASSWORD=password
- DB_USER=user
- DB_HOST=db
command: /bin/bash -c "envsubst < /tmp/nginx.conf > /etc/nginx/conf.d/default.conf && nginx -g 'daemon off;'"
ports:
- 80:80
networks:
- local
depends_on:
- backend
backend:
container_name: app
build: flask
environment:
- FLASK_SERVER_PORT=9091
- DB_PASSWORD=password
volumes:
- flask:/tmp/app_data
restart: unless-stopped
networks:
- local
depends_on:
- db
links:
- db
db:
container_name: db
image: mysql
restart: unless-stopped
volumes:
- ./mysql:/docker-entrypoint-initdb.d
environment:
- MYSQL_ROOT_PASSWORD=password
- MYSQL_DATABASE=database
- MYSQL_USER=user
- MYSQL_PASSWORD=password
ports:
- 3306:3306
networks:
local:
volumes:
flask:
driver: local
db:
driver: local
Inside the flask directory I have my Dockerfile like so:
FROM ubuntu:latest
WORKDIR /src
RUN apt -y update
RUN apt -y upgrade
RUN apt install -y python3
RUN apt install -y python3-pip
COPY . .
RUN chmod +x -R .
RUN pip install -r requirements.txt --no-cache-dir
CMD ["python3","app.py"]
Finally, on my app.py file I try to connect to the database with the name of the Docker container. I have tried using localhost and it still gives me the same error. This is the part of the code I use to access it:
conn = mysql.connector.connect(
host="db",
port=3306,
user="user",
password="password",
database="database")
What is it that I'm doing wrong?
The containers aren't on the same networks:, which could be why you're having trouble.
I'd recommend deleting all of the networks: blocks in the file, both the blocks at the top level and the blocks in the web and backend containers. Compose will create a network named default for you and attach all of the containers to that network. Networking in Compose in the Docker documentation has more details on this setup.
The links: block is related to an obsolete Docker networking mode, and I've seen it implicated in problems in other SO questions. You should remove it as well.
You also do not need to manually specify container_name: in most cases. For the Nginx container, the Docker Hub nginx image already knows how to do the envsubst processing so you do not need to override its command:.
This should leave you with:
version: '3.8'
services:
web:
image: nginx
volumes:
- ./nginx/nginx.conf:/etc/nginx/templates/default.conf.template
environment: { ... }
ports:
- 80:80
depends_on:
- backend
backend:
build: flask
environment: { ... }
volumes:
- flask:/tmp/app_data
restart: unless-stopped
depends_on:
- db
db:
image: mysql
restart: unless-stopped
volumes:
- ./mysql:/docker-entrypoint-initdb.d
- db:/var/lib/mysql
environment: { ... }
ports:
- 3306:3306
volumes:
flask:
db:
testdriven.io
docker build -f project/Dockerfile.prod -t registry.heroku.com/mighty-savannah-85236/web ./project
Successfully built 3df1e0c4eea4
Successfully tagged registry.heroku.com/mighty-savannah-85236/web:latest
docker run --name fastapi-tdd -e PORT=8765 -e DATABASE_URL=sqlite://sqlite.db -p 5003:8765 registry.heroku.com/mighty-savannah-85236/web:latest
nc: getaddrinfo for host "web-db" port 5432: Name or service not known
docker-compose file
services:
web:
build: ./project
command: uvicorn app.main:app --reload --workers 1 --host 0.0.0.0 --port 8000
volumes:
- ./project:/usr/src/app
ports:
- 8004:8000
environment:
- ENVIRONMENT=dev
- TESTING=0
- DATABASE_URL=postgres://postgres:postgres#web-db:5432/web_dev
- DATABASE_TEST_URL=postgres://postgres:postgres#web-db:5432/web_test
depends_on:
- web-db
web-db:
build:
context: ./project/db
dockerfile: Dockerfile
expose:
- 5432
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
It seems your container is trying to connect to web-db:5432 which given the port likely is a Postgres database server. And as web-db is not a real domain most likely what happens is, that there should be another container called web-db which probably should be a Postgres database which your container wants to connect to.
This connection will only work though if both containers - the one you are starting and the Postgres database container - are in the same user-defined Docker network as only then Docker service discovery works. You might wanna have a look at the Docker documentation for this.
But essentially you need to create a Docker network using
docker network create my-network
and then attach both containers - again, your container and the Postgres database - to that network using the --network option.
Additionally your Postgres container must be called web-db so that the service discovery will work.
So the skeleton of the command to start the DB would be the following:
docker run --name web-db --network my-network -p 5432:5432 your-database-image
The command to start your application would be
docker run --name fastapi-tdd --network my-network -e PORT=8765 -e DATABASE_URL=sqlite://sqlite.db -p 5003:8765 registry.heroku.com/mighty-savannah-85236/web:latest
Might also be worth exploring Docker-compose to simplify this whole process.
Edit
Now with your docker-compose.yaml file the same rule applies. Both containers need to be in the same user-defined network bridge, which can be declared using networks: (be aware: don't put it into services:).
services:
web:
build: ./project
command: uvicorn app.main:app --reload --workers 1 --host 0.0.0.0 --port 8000
volumes:
- ./project:/usr/src/app
# attach this container to the network
networks:
- my-network
ports:
- 8004:8000
environment:
- ENVIRONMENT=dev
- TESTING=0
- DATABASE_URL=postgres://postgres:postgres#web-db:5432/web_dev
- DATABASE_TEST_URL=postgres://postgres:postgres#web-db:5432/web_test
depends_on:
- web-db
web-db:
# attach this container to the network
networks:
- my-network
# name this container web-db
container_name: web-db
build:
context: ./project/db
dockerfile: Dockerfile
expose:
- 5432
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
# declare the network ressource
networks:
my-network
Now a connection should be possible. Be aware that you also need to configure PostgreSQL correctly to allow you to connect to it setting listen_address='*' in postgresql.conf.
I am using docker to run 3 tests written in Python to check if an API is working correctly.
The structure of the folder is the following :
requirements.txt
one.Dockerfile + two.Dockerfile + three.Dockerfile
docker-compose.yml
test1.py + test2.py + test3.py
I want each file test.py to write its output on a unique result.log file in a dedicated volume.
Thing is I can find my way around by locating all three "result.log" files by using (find + regex) and some commands on a .sh file but that is probably not the best way to do so.
I know I am doing something wrong with the volumes but can figure what despite looking at dozens of webpages. Could someone point the obvious ?
one.Dockerfile extract :
FROM ubuntu:latest
COPY requirements.txt /data/
RUN apt-get update && apt-get install python3-pip -y
WORKDIR /data/
RUN pip install -r requirements.txt
ADD test1.py /data/test1.py
EXPOSE 8000
CMD python3 test1.py >> log.txt
docker-compose.yml extract :
version: "3.9"
services:
fastapi:
image: fastapi:homemade
container_name: fastapi
environment:
- LOG=1
volumes:
- /var/lib/docker/containers/fastapi:/crucial
networks:
- ntw
ports:
- "8000:8000"
test1:
build:
context: .
dockerfile: ./one.Dockerfile
image: test_build1
container_name: test1
environment:
- LOG=1
depends_on:
- fastapi
volumes:
- /var/lib/docker/containers/test1:/crucial
networks:
- ntw
test2:
build:
context: .
dockerfile: ./two.Dockerfile
image: test_build2
container_name: test2
environment:
- LOG=1
depends_on:
- test1
volumes:
- /var/lib/docker/containers/test2:/crucial
networks:
- ntw
test3:
build:
context: .
dockerfile: ./three.Dockerfile
image: test_build3
container_name: test3
environment:
- LOG=1
depends_on:
- test2
volumes:
- /var/lib/docker/containers/test3:/crucial
networks:
- ntw
networks:
ntw:
volumes:
crucial:
driver: local
driver_opts:
o: bind
type: none
device: crucial
I'm trying to create a django/nginx/gunicorn/postgres docker-compose configuration.
Every time I call docker-compose down, I noticed that my postgres db was getting wiped. I did a little digging, and when I call docker-compose up, my named volume is not being created like i've seen in other tutorials.
What am I doing wrong?
Here is my yml file (if it helps, I'm using macOS to run my project)
version: "3"
volumes:
postgres:
driver: local
services:
database:
image: "postgres:latest" # use latest postgres
container_name: database
environment:
- POSTGRES_USER=REDACTED
- POSTGRES_PASSWORD=REDACTED
- POSTGRES_DB=REDACTED
volumes:
- postgres:/postgres
ports:
- 5432:5432
nginx:
image: nginx:latest
container_name: nginx
ports:
- "8000:8000"
volumes:
- ./src:/src
- ./config/nginx:/etc/nginx/conf.d
- ./src/static:/static
depends_on:
- web
migrate:
build: .
container_name: migrate
depends_on:
- database
command: bash -c "python manage.py makemigrations && python manage.py migrate"
volumes:
- ./src:/src
web:
build: .
container_name: django
command: gunicorn Project.wsgi:application --bind 0.0.0.0:8000
depends_on:
- migrate
- database
volumes:
- ./src:/src
- ./src/static:/static
expose:
- "8000"
You need to mount the data directory at /var/lib/postgresql/data
volumes:
- postgres:/var/lib/postgresql/data
I am unable to run multiple commands for scripts (py) in rulsmalldata service. Can you please provide me solutions.
version: "3"
networks:
mlflow:
external: true
services:
redis:
restart: always
image: redis:latest
command:
- --loglevel warning
container_name: "redis"
rulsmalldata:
image: rulsmalldata:mlflow-project-latest
command: bash -c "python mlflow_model_run.py && python mlflow_model_serve.py && python mlflow_model_output.py"
networks:
- mlflow
ports:
- "80:80"
environment:
MLFLOW_TRACKING_URI: <<TRACKING_URI>>
REDIS_ADDRESS: redis
AZURE_STORAGE_CONNECTION_STRING: 'DefaultEndpointsProtocol=https;AccountName=<<Name>>;AccountKey=<<KEY>>EndpointSuffix=core.windows.net'