I am attempting to deploy my server using Gunicorn over https. However, no matter what nginx configuration I use, I always get an attribute error in Gunicorn. I don't think the problem lies with Nginx though, but with gunicorn. But I don't know how to fix it. Here is the command I'm using to start my server:
gunicorn -b 0.0.0.0:8000 --certfile=/etc/ssl/cert_chain.crt --keyfile=/etc/ssl/server.key pyhub2.wsgi
And here is my nginx configuration file:
server {
# port to listen on. Can also be set to an IP:PORT
listen 80;
server_name www.xxxxx.co;
rewrite ^ https://$server_name$request_uri? permanent;
include "/opt/bitnami/nginx/conf/vhosts/*.conf";
}
server {
# port to listen on. Can also be set to an IP:PORT
listen 443;
ssl on;
ssl_certificate /etc/ssl/cert_chain.crt;
ssl_certificate_key /etc/ssl/server.key;
server_name www.xxxx.co;
access_log /opt/bitnami/nginx/logs/access.log;
error_log /opt/bitnami/nginx/logs/error.log;
location /xxxx.txt {
root /home/bitnami;
}
location / {
proxy_set_header X-Forwarded-For $scheme;
proxy_buffering off;
proxy_pass https://0.0.0.0:8000;
}
location /status {
stub_status on;
access_log off;
allow 127.0.0.1;
deny all;
}
# PageSpeed
#pagespeed on;
#pagespeed FileCachePath /opt/bitnami/nginx/var/ngx_pagespeed_cache;
# Ensure requests for pagespeed optimized resources go to the pagespeed
# handler and no extraneous headers get set.
#location ~ "\.pagespeed\.([a-z]\.)?[a-z]{2}\.[^.]{10}\.[^.]+" { add_header "" ""; }
#location ~ "^/ngx_pagespeed_static/" { }
#location ~ "^/ngx_pagespeed_beacon$" { }
#location /ngx_pagespeed_statistics { allow 127.0.0.1; deny all; }
#location /ngx_pagespeed_message { allow 127.0.0.1; deny all; }
location /static/ {
autoindex on;
alias /opt/bitnami/apps/django/django_projects/PyHub2/static/;
}
location /admin {
proxy_pass https://127.0.0.1:8000;
allow 96.241.66.109;
deny all;
}
location /robots.txt {
root /opt/bitnami/apps/django/django_projects/PyHub2;
}
include "/opt/bitnami/nginx/conf/vhosts/*.conf";
}
The following is the error that I get whenever attempting to connect:
Traceback (most recent call last):
File "/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/arbiter.py", line 515, in spawn_worker
worker.init_process()
File "/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/workers/base.py", line 126, in init_process
self.run()
File "/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/workers/sync.py", line 119, in run
self.run_for_one(timeout)
File "/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/workers/sync.py", line 66, in run_for_one
self.accept(listener)
File "/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/workers/sync.py", line 30, in accept
self.handle(listener, client, addr)
File "/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/workers/sync.py", line 141, in handle
self.handle_error(req, client, addr, e)
File "/opt/bitnami/python/lib/python2.7/site-packages/gunicorn/workers/base.py", line 213, in handle_error
self.log.exception("Error handling request %s", req.uri)
AttributeError: 'NoneType' object has no attribute 'uri'
[2015-12-29 22:12:26 +0000] [1887] [INFO] Worker exiting (pid: 1887)
[2015-12-30 03:12:26 +0000] [1921] [INFO] Booting worker with pid: 1921
And my wsgi per request of Klaus D.
"""
WSGI config for pyhub2 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pyhub2.settings")
application = get_wsgi_application()
If nginx is handling the SSL negotiation and gunicorn is running upstream, you shouldn't need to pass --certfile=/etc/ssl/cert_chain.crt --keyfile=/etc/ssl/server.key when launching gunicorn.
You might try a Nginx configuration to the tune of:
upstream app_server {
server 127.0.0.1:8000;
}
server {
listen 80;
listen [::]:80 default_server ipv6only=on;
server_name www.xxxxx.co;
# Redirect to SSL
rewrite ^ https://$server_name$request_uri? permanent;
include "/opt/bitnami/nginx/conf/vhosts/*.conf";
}
server {
# Listen for SSL requests
listen 443;
server_name www.xxxx.co;
ssl on;
ssl_certificate /etc/ssl/cert_chain.crt;
ssl_certificate_key /etc/ssl/server.key;
client_max_body_size 4G;
keepalive_timeout 5;
location = /favicon.ico { access_log off; log_not_found off; }
access_log /opt/bitnami/nginx/logs/access.log;
error_log /opt/bitnami/nginx/logs/error.log;
location /xxxx.txt {
root /home/bitnami;
}
location /status {
stub_status on;
access_log off;
allow 127.0.0.1;
deny all;
}
location /static {
autoindex on;
alias /opt/bitnami/apps/django/django_projects/PyHub2/static;
}
location /admin {
include proxy_params;
proxy_set_header X-Forwarded-For $scheme;
proxy_set_header X-Forwarded-Proto $scheme;
# Proxy to upstream app_server
proxy_pass http://app_server;
allow 96.241.66.109;
deny all;
}
location /robots.txt {
root /opt/bitnami/apps/django/django_projects/PyHub2;
}
location / {
try_files $uri #app_proxy;
}
location #app_proxy {
# Handle requests, proxy to SSL
include proxy_params;
proxy_set_header X-Forwarded-For $scheme;
proxy_set_header X-Forwarded-Proto $scheme;
# Proxy to upstream app_server
proxy_pass http://app_server;
}
include "/opt/bitnami/nginx/conf/vhosts/*.conf";
}
Also, you might try launching gunicorn with the --check-config flag to check for configuration errors outside of SSL, and ensure that you're able to access :8000 locally without SSL.
Related
Is it secure to set SECURE_SSL_REDIRECT=False if I have nginx setup as a reverse proxy serving the site over https?
I can access the site over SSL if this is set this to False, where as if it is True, I receive too many redirects response.
NOTE: nginx and django run from within docker containers.
My nginx.conf looks like:
upstream config {
server web:8000;
}
server {
listen 80;
server_name _;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl;
server_name _;
ssl_certificate /etc/ssl/certs/cert.com.chained.crt;
ssl_certificate_key /etc/ssl/certs/cert.com.key;
location / {
proxy_pass http://config;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
proxy_redirect off;
}
location /staticfiles/ {
alias /home/app/web/staticfiles/;
}
}
EDIT: Added http to https redirect in nginx.conf.
You need to add the following to your "location /" block:
location / {
...
proxy_set_header X-Forwarded-Proto $scheme;
...
}
I know it's not appropriate place to ask question about nginx but I stack with some issue for a few days and still have no idea how to solve a problem.
I would like to use nginx to redirect user from domain.com:3001 to sub.domain.com. Application on port 3001 is running in docker container, I didn't add any files in directory sites-available/sites-enabled. I have added two server blocks (vhosts) in my conf.d directory. In server block I set $upstream and resolver according to record in my /etc/resolv.conf file. The problem is that when I test in browser sub.domain.com every time I receive information that IP address could not be connected with any server (DNS_PROBE_FINISHED_NXDOMAIN) or 50x errors.
However, when I run curl sub.domain.com from the server I receive 200 with index.html response, this doesn't work when I run the same command from my local PC. Server domain is in private network. Have you any idea what my configuration files lack of?? Maybe there is some issue with the listen port when app is running in docker or maybe there is something wrong with the version of nginx? When I installed nginx there was empty conf.d directory, with no default.conf. I am lost...
Any help will be highly appreciated.
Here is my configuration files:
server.conf:
server
{
listen 80;
listen 443 ssl;
server_name sub.domain.net;
#charset koi8-r;
#access_log /var/log/nginx/host.access.log main;
ssl_certificate /etc/nginx/ssl/cer.crt;
ssl_certificate_key /etc/nginx/ssl/private.key;
#set_real_ip_from 127.0.0.1;
#real_ip_header X-Real-IP;
#real_ip_recursive on;
# location / {
# root /usr/share/nginx/html;
# index index.html index.htm;
# }
location / {
resolver 10.257.10.4;
set $upstream https://127.0.0.1:3000;
proxy_pass $upstream;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-Proto $scheme;`enter code here`
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
nginx.conf
#user nginx;
worker_processes 1;
#error_log /var/log/nginx/error.log;
#error_log /var/log/nginx/error.log notice;
#error_log /var/log/nginx/error.log info;
#pid /var/run/nginx.pid;
include /etc/nginx/modules.conf.d/*.conf;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
#log_format main '$remote_addr - $remote_user [$time_local]
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
#tcp_nodelay on;
#gzip on;
#gzip_disable "MSIE [1-6]\.(?!.*SV1)";
server_tokens off;
include /etc/nginx/conf.d/*.conf;
}
# override global parameters e.g. worker_rlimit_nofile
include /etc/nginx/*global_params;
I'm using a dockerimage based on https://github.com/tiangolo/uwsgi-nginx-flask-docker/tree/master/python3.6. I am running a python app inside that accepts a POST, does some processing on the json body, and returns a simple json response back. A post like this:
curl -H "Content-Type: application/json" -X POST http://10.4.5.168:5002/test -d '{"test": "test"}'
works fine. If, however, I post a larger json file, I get a 504: Gateway Timeout.
curl -H "Content-Type: application/json" -X POST http://10.4.5.168:5002/test -d #some_6mb_file.json
I have a feeling that there is an issue with the communication between Nginx and Uwsgi, but I'm not sure how to fix it.
EDIT: I jumped inside the docker container and restarted nginx manually to get better logging. I'm receiving the following error:
2018/12/21 20:47:45 [error] 611#611: *1 upstream timed out (110:
Connection timed out) while reading response header from upstream,
client: 10.4.3.168, server: , request: "POST
/model/refuel_verification_model/predict HTTP/1.1", upstream:
"uwsgi://unix:///tmp/uwsgi.sock", host: "10.4.3.168:5002"
From inside the container, I started a second instance of my Flask app, running without Nginx and Uwsgi and it worked fine. The response took approximately 5 seconds to be returned (due to the processing time of the data.)
Configurations:
/etc/nginx/nginx.conf:
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
daemon off;
/etc/nginx/conf.d/nginx.conf:
server {
listen 80;
location / {
try_files $uri #app;
}
location #app {
include uwsgi_params;
uwsgi_pass unix:///tmp/uwsgi.sock;
}
location /static {
alias /app/static;
}
}
/etc/nginx/conf.d/upload.conf:
client_max_body_size 128m;
client_body_buffer_size 128m;
I encountered this behavior, when proxying to aiohttp (Python) apps.
In my case, in the location block for the proxy, I needed to disable caching:
Removed from the block:
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
As a result, the working config is something like this:
server {
listen 80;
location / {
try_files $uri #app;
}
location #app {
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_redirect off;
proxy_buffering off;
proxy_pass http://myapp;
}
location /static {
alias /app/static;
}
}
There was an issue with Tensorflow. I had loaded a tensorflow model during the app initialization and then tried to use it later. Because of the threading done by the webserver and the "non-thread-safe" nature of Tensorflow, the processing hung leading to the timeout.
It is slightly over 4 hours now, and I cannot get my nginx server to work with my SSL certificates for my Django application.
Here is my nginx.conf:
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
upstream app {
server django:5000;
}
server {
listen 80;
server_name www.example.com;
rewrite ^/(.*) https://www.example.com/$1 permanent;
}
server {
listen 443 ssl;
server_name www.example.com;
charset utf-8;
ssl on;
ssl_certificate /etc/nginx/ssl/1_www.example.com_bundle.crt;
ssl_certificate_key /etc/nginx/ssl/example_rsa.key;
location / {
try_files $uri #proxy_to_app;
}
location #proxy_to_app {
proxy_pass http://app;
proxy_redirect off;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
}
}
}
I did manage to redirect all http traffic to https, but when I visit https://www.example.com I get a beautiful ERR_CONNECTION_REFUSED.
The only related part I can think of in my Django application is:
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
but I do think that the problem is at nginx level. I am using Django Cookiecutter by pydanny. The unmodified nginx.conf file can be found here. I did edit the Dockerfile to ADD my certificates.
Thanks!
This is a conf file I use for that case. You might want to compare or follow the same approach:
# nginx config file for example.com
upstream django {
server unix:/srv/www/app/run/app.sock;
}
server {
listen 80 default_server;
server_name example.com www.example.com;
return 301 https://example.com$request_uri;
}
server {
listen 443 ssl;
server_name www.example.com;
ssl_certificate /etc/nginx/ssl/app.crt;
ssl_certificate_key /etc/nginx/ssl/app.key;
return 301 https://example.com$request_uri;
}
server {
listen 443 ssl;
server_name example.com;
ssl_certificate /etc/nginx/ssl/app.crt;
ssl_certificate_key /etc/nginx/ssl/app.key;
proxy_set_header X-Forwarded-Protocol $scheme;
add_header Strict-Transport-Security "max-age=31536000";
charset utf-8;
gzip on;
gzip_disable "msie6";
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_min_length 256;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/vnd.ms-fontobject application/x-font-ttf font/opentype image/svg+xml image/x-icon;
location /media {
alias /srv/www/app/app/media;
}
location /static {
alias /srv/www/app/app/collected_static;
}
location / {
uwsgi_pass django;
include /srv/www/app/run/uwsgi_params;
}
location /api/v1/app/upload {
uwsgi_max_temp_file_size 1M;
}
location /favicon.ico {
alias /srv/www/app/app/collected_static/img/favicon.ico;
}
}
On django side, only for production environment, I do:
# SSL
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
That's it!
I'm trying to bring this supervisord+nginx+tornado setup to work. Since the Tornado-files are reachable via IP:8000 and IP:80 shows me the 'Welcome to nginx' I thought that maybe my nginx.conf contains errors. I aim to let this deliver my tornado-site to the users at port 80.
nginx.conf looks like this:
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
use epoll;
}
http {
# Enumerate all the Tornado servers here
upstream frontends {
server 127.0.0.1:8000;
server 127.0.0.1:8001;
server 127.0.0.1:8002;
server 127.0.0.1:8003;
}
include /etc/nginx/mime.types;
default_type application/octet-stream;
access_log /var/log/nginx/access.log;
keepalive_timeout 65;
proxy_read_timeout 200;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
gzip on;
gzip_min_length 1000;
gzip_proxied any;
gzip_types text/plain text/css text/xml
application/x-javascript application/xml
application/atom+xml text/javascript;
# Only retry if there was a communication error, not a timeout
# on the Tornado server (to avoid propagating "queries of death"
# to all frontends)
proxy_next_upstream error;
server {
listen 80;
# Allow file uploads
client_max_body_size 50M;
location static/ {
root /srv/www/url/tornado/;
if ($query_string) {
expires max;
}
}
location = /favicon.ico {
rewrite (.*) /static/favicon.ico;
}
location = /robots.txt {
rewrite (.*) /static/robots.txt;
}
location / {
proxy_pass_header Server;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_pass http://frontends;
}
}
}
help is much appreciated.
This is the supervisord.conf:
[include]
files = *.supervisor
[supervisord]
[supervisorctl]
serverurl = unix://supervisord.sock
[unix_http_server]
file = supervisord.sock
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[program:main]
process_name = main-%(process_num)s
command = python /srv/www/url/tornado/main.py
--port=%(process_num)s
--log_file_prefix=%(here)s/logs/%(program_name)s-%(process_num)s.log
numprocs = 4
numprocs_start = 8000
Wild guess: add a server_name param to nginx config.
Also, what does supervisor say when you start your app?
If it's okay (started), take a look at tornado and nginx logs.
If you provide them here, figuring out the answer would be much easier.:)