I built a react app using the create-react-app and npm run buildcommands and connected it to node with server.jsfile in the directory created by create-react-app.
When running the command node server locally it works perfectly fine however when I pushed the changes to my nginx server I started to get a 502 bad gateway status. Why is this happening? Node is running when I get this error.
Here is the server.js code
onst express = require('express');
const path = require('path');
const app = express();
app.use('/js', express.static(path.join(__dirname, 'src/')));
app.use('/css', express.static(path.join(__dirname, 'src/')));
app.use(express.static(path.join(__dirname , '/public/build')));
// Handles any requests that don't match the ones above
app.get('/', (req,res) =>{
res.sendFile(path.join(__dirname , "/public/build/index.html"));
});
const port = process.env.PORT || 5000;
app.listen(port);
console.log('App is listening on port ' + port);
the error log
[error] 7422#7422: *4477 connect() failed (111: Connection refused) while connecting to upstream, client: 162.84.158.175, server: anthonyjimenez.me, request: "GET / HTTP/2.0", upstream: "http://127.0.0.1:3000/", host: "anthonyjimenez.me"
and the config file
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
go to /nginx/sites-enabled/default and add the following blocks. You will have to tell Nginx and forward this request this way
server {
listen 443 ssl ;
ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem; //ssl
ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem; //ssl
index index.html index.htm index.nginx-debian.html;
server_name example.com;
location / {
proxy_pass http://localhost:3001; //Your port goes here
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
Related
I am running a NodeJS app behind nginx reverse proxy. When I POST large requests, I get HTTP 413 entity too large error. However, I've tried setting the client_max_body_size to 1000M at every level of my nginx config and I'm still getting the error. Yes, I restarted nginx several times and tried setting the max size in several locations, but it didn't help.
I only have 2 nginx configs - the main one, and the virtual host one, both of which I included below.
Here is the error I receive:
{'message': 'request entity too large', 'error': {'message': 'request entity too large', 'expected': 107707, 'length': 107707, 'limit': 102400, 'type': 'entity.too.large'}, 'title': 'Error'}
Here is my main config:
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
client_max_body_size 1000M;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_session_timeout 1d;
ssl_session_cache shared:SSL:1m;
ssl_session_tickets off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;
# OCSP stapling
ssl_stapling on;
ssl_stapling_verify on;
# Cloudflare OCSP DNS resolvers
resolver 1.1.1.1 1.0.0.1;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
Here is my virtual host config:
server {
listen 443 ssl http2;
server_name example.com;
client_max_body_size 1000M;
# ssl certificates
ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
ssl_trusted_certificate /etc/letsencrypt/live/example.com/chain.pem;
# Strict Transport Security (HSTS)
add_header Strict-Transport-Security "max-age=63072000" always;
location / {
root /var/www/example;
try_files $uri $uri/ /index.html;
}
location /api {
client_max_body_size 1000M;
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
Turns out this was actually related to NodeJS Express settings. I updated the following lines in my app.js file to include the limit and this fixed the issue:
app.use(express.json({ limit: "1000mb", extended: true }));
app.use(express.urlencoded({ limit: "1000mb", extended: true }));
I had a Node JS server running with Express, that is being used as a web server. It connects to my database to run queries for the end user.
I have a VPS set up on Digital Ocean, with a Node App running on port 3000. When I access the Node app on ip:3000 it runs fine and as fast as to be expected. If I set up a reverse proxy with nginx, or a firewall rule that forwards traffic from port 80 to port 3000, parts of the page seem to run extremely slowly, or not at all. I can't seem to find a link as to why, as some of the database queries run fine, but some don't load at all and cause the page to hang. If I access the site using port 3000, the site still continues to run fine, even with nginx running. It's only the access from port 80 that is slow.
My NGINX conf is:
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/$
##
# Virtual Host Configs
##
server_names_hash_bucket_size 64;
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
#mail {
# # See sample authentication script at:
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
#
# # auth_http localhost/auth.php;
# # pop3_capabilities "TOP" "USER";
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
#
# server {
# listen localhost:110;
# protocol pop3;
# proxy on;
# }
#
# server {
# listen localhost:143;
# protocol imap;
# proxy on;
# }
#}
My example.com file is (where 'example.com' is my site address):
server {
listen 80;
listen [::]:80;
root /var/www/example.com/html;
index index.html index.htm index.nginx-debian.html;
server_name example.com www.example.com;
location / {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
I recommend using PM2 to start instance of your node app in production https://github.com/Unitech/pm2
Try following NGINX configurations
upstream prod_nodejs_upstream {
server 127.0.0.1:3000;
keepalive 64;
}
server {
listen 80;
server_name example.com;
root /home/www/example;
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_max_temp_file_size 0;
proxy_pass http://prod_nodejs_upstream/;
proxy_redirect off;
proxy_read_timeout 240s;
}
}
Once these changes applied you must restart NGINX using commands sudo nginx -t and then sudo systemctl restart nginx
Please update configuration with as below and share output of file so that time taken by upstream can be measured
upstream prod_nodejs_upstream {
server 127.0.0.1:3000;
keepalive 64;
}
server {
listen 80;
server_name example.com;
root /home/www/example;
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_max_temp_file_size 0;
proxy_pass http://prod_nodejs_upstream/;
proxy_redirect off;
proxy_read_timeout 240s;
}
log_format apm '"$time_local" client=$remote_addr '
'method=$request_method request="$request" '
'request_length=$request_length '
'status=$status bytes_sent=$bytes_sent '
'body_bytes_sent=$body_bytes_sent '
'referer=$http_referer '
'user_agent="$http_user_agent" '
'upstream_addr=$upstream_addr '
'upstream_status=$upstream_status '
'request_time=$request_time '
'upstream_response_time=$upstream_response_time '
'upstream_connect_time=$upstream_connect_time '
'upstream_header_time=$upstream_header_time';
}
I am using a nodejs app behind nginx. This application serves protected images. I use nodejs to authenticate the users, and then issue an x-accel-redirect to an internal location to serve the image. This part is working fine.
Client requests route /auth/images/products/XYZ.jpg
Nodejs receives this request, checks authorization and then passes the x-accel-redirect header to the internal route /images/products/XYZ.jpg, which nginx then serves.
What is not working is that I would like nginx to issue a 304 response based on an etag comparison. All I ever get are 200 responses.
My nginx config is below. Please let me know what I might need to change to get the correct 304 response. Thanks in advance.
# set up upstream nodejs server
upstream nodejs {
server 127.0.0.1:4000;
keepalive 8;
}
# forward http to https
server {
listen 80 default_server; # default handler for this port
server_name domain.com;
#access_log /home/nodejs/logs/nginx.log;
#error_log /home/nodejs/logs/nginx_error.log;
return 301 https://$server_name$request_uri;
}
# main server
server {
listen 443 default_server; # default handler for this port
server_name domain.com;
# security settings
ssl on;
ssl_prefer_server_ciphers on;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers HIGH:!RC4:!3DES:!aDSS:!aNULL:!kPSK:!kSRP:!MD5:#STRENGTH:+SHA1:+kRSA;
ssl_session_cache shared:TLSSL:16m;
ssl_session_timeout 10m;
ssl_certificate /home/nodejs/project/ssl/dsa_bundle.crt;
ssl_certificate_key /home/nodejs/project/ssl/dsa.key;
ssl_stapling on; # selfsigned=off
ssl_stapling_verify on; # selfsigned=off
add_header Strict-Transport-Security max-age=63072000;
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
resolver 8.8.4.4 8.8.8.8 valid=300s;
resolver_timeout 10s;
# other config
charset UTF-8;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# use sendfile when possible
sendfile on;
tcp_nopush on;
tcp_nodelay on;
# timeouts
client_body_timeout 12;
client_header_timeout 12;
keepalive_timeout 5;
send_timeout 10;
#buffers
client_body_buffer_size 100K;
client_header_buffer_size 1k;
client_max_body_size 10m;
large_client_header_buffers 2 1k;
#compression
gzip on;
gzip_vary on;
gzip_min_length 10240;
gzip_buffers 10240 32k;
gzip_comp_level 4;
gzip_proxied any;
gzip_types text/plain text/css text/x-component
text/xml application/xml application/xhtml+xml application/json
application/atom+xml
text/javascript application/javascript application/x-javascript
application/rtf application/msword
application/vnd.ms-powerpoint application/vnd.ms-excel
application/vnd.ms-fontobject application/vnd.wap.wml
application/x-font-ttf application/x-font-opentype;
# logging
access_log /home/nodejs/logs/nginx.log;
error_log /home/nodejs/logs/nginx_error.log;
location ~ ^\/images\/products\/(.*)\.jpg$ {
root /home/nodejs/project/private;
try_files $uri /images/products/Coming_Soon.jpg =404;
#error_page 404 /images/products/Coming_Soon_sm.png;
error_log /dev/null crit;
access_log on;
etag on;
add_header Etag $upstream_http_etag;
expires 24h;
add_header Pragma "public";
add_header Cache-Control "public, must-revalidate, proxy-revalidate";
internal;
}
# unprotected static content
location ~ ^/(admin/assets/|admin/includes/|admin/js/|images/|fonts/|css/|img/|js/|lib/|views/) {
root /home/nodejs/project/public;
# error_page 404 /something_here??.png;
access_log off;
expires 24h;
add_header Pragma "public";
add_header Cache-Control "public, must-revalidate, proxy-revalidate";
}
# Set up node gateway for maintenance
location / {
# if maintenance page exists use that first
root /home/nodejs/project/public;
try_files /maintenance.html #proxy;
access_log on;
}
# set up node proxy
location #proxy {
proxy_pass http://nodejs; # can work with http internally. It's faster
proxy_redirect off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
send_timeout 60s;
access_log on;
}
}
Your nodejs application needs to return the Status: 304 Not Modified header, then Nginx will return it for you.
I am trying to config nginx with nodejs ( sails.js framework ).
Nginx listen requests on port 80 and pass to 8080. All the request work fine ( all is post ), except the upload file post request.
Below is my nginx config file :
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off
upstream node {
# One failed response will take a server out of circulation for 20 seconds.
server localhost:8080 fail_timeout=20s;
keepalive 512;
}
server {
listen 80 default_server;
listen 8191;
listen 443 ssl;
ssl on;
ssl_certificate /home/ubuntu/APP/cert.pem;
ssl_certificate_key /home/ubuntu/APP/key.pem;
server_name localhost;
location / {
proxy_pass https://localhost:8080;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
# define buffers, necessary for proper communication to prevent 502s
proxy_buffer_size 128k;
proxy_buffers 4 256k;
proxy_busy_buffers_size 256k;
}
}
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
##
# nginx-naxsi config
##
# Uncomment it if you installed nginx-naxsi
##
#include /etc/nginx/naxsi_core.rules;
##
# nginx-passenger config
##
# Uncomment it if you installed nginx-passenger
##
#passenger_root /usr;
#passenger_ruby /usr/bin/ruby;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
Have you tried uncommenting these lines?
#passenger_root /usr;
#passenger_ruby /usr/bin/ruby;
I get the following error when I try to upload files to my node.js based web app:
2014/05/20 04:30:20 [error] 31070#0: *5 upstream prematurely closed connection while reading response header from upstream, client: ... [clipped]
I'm using a front-end proxy here:
upstream app_mywebsite {
server 127.0.0.1:3000;
}
server {
listen 0.0.0.0:80;
server_name {{ MY IP}} mywebsite;
access_log /var/log/nginx/mywebsite.log;
# pass the request to the node.js server with the correct headers and much more can be added, see nginx config options
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://app_mywebsite;
proxy_redirect off;
# web socket support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
This is my nginx.conf file:
user www-data;
worker_processes 4;
pid /run/nginx.pid;
events {
worker_connections 2048;
multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 20;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
# default_type application/octet-stream;
default_type text/html;
charset UTF-8;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
gzip_vary on;
gzip_proxied any;
gzip_min_length 256;
gzip_comp_level 5;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
##
# nginx-naxsi config
##
# Uncomment it if you installed nginx-naxsi
##
#include /etc/nginx/naxsi_core.rules;
##
# nginx-passenger config
##
# Uncomment it if you installed nginx-passenger
##
#passenger_root /usr;
#passenger_ruby /usr/bin/ruby;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
Any idea on how to better debug this? The things I've found haven't really worked (e.g. removing the tailing slash from my proxy_pass
Try adding the following to your server{} block, I was able to solve an Nginx reverse proxy issue by defining these proxy attributes:
# define buffers, necessary for proper communication to prevent 502s
proxy_buffer_size 128k;
proxy_buffers 4 256k;
proxy_busy_buffers_size 256k;
The issue may be caused by PM2. If you're enabled watching, the app will restart on every single file change(and new uploads too). The solution could be disabling watching completely or adding the uploads folder to ignore list.
More: https://pm2.keymetrics.io/docs/usage/watch-and-restart/
So in the end I ended up changing in my keepalive from 20 to 64 and it seems to handle large files fine now. The bummer about it is that I re-wrote from scratch the image upload library I was using node-imager, but at least I learned something from it.
server {
location / {
keepalive 64
}
}
Try adding the following below to the http section of your /etc/nginx/nginx.conf:
fastcgi_read_timeout 400s;
and restart nginx.
Futher reading: nginx docs
Try this:
client_max_body_size - Maximum uploadable file size
http {
send_timeout 10m;
client_header_timeout 10m;
client_body_timeout 10m;
client_max_body_size 100m;
large_client_header_buffers 8 32k;
}
and server section:
server {
location / {
proxy_buffer_size 32k;
}
}
large_client_header_buffers 8 32k and proxy_buffer_size 32k
- is enough for most scripts, but you can try 64k, 128k, 256k...
(sorry, im not english speaking) =)