How to serve multiple node apps in a single digitalocean vps server? - node.js

Below given is my /etc/nginx/sites-available/default file. I have added the two new location blocks with their respective localhost links, only the root one, with localhost 8000 is being server by nginx. The other two links doesn't work.
Example
http://111.111.111.111 = Works
http://111.111.111.111/app1 = Doesn't work
http://111.111.111.111/app2 = Doesn't work
http://111.111.111.111:3000 = Doesnt't work
http://111.111.111.111:4000 = Doesnt't work
How do I fix the following file so that I can access the three node apps running on three ports(3000, 4000 and 8000). Thanks in advance for any help
server {
listen 80 default_server;
listen [::]:80 default_server;
# SSL configuration
#
# listen 443 ssl default_server;
# listen [::]:443 ssl default_server;
#
# Note: You should disable gzip for SSL traffic.
# See: https://bugs.debian.org/773332
#
# Read up on ssl_ciphers to ensure a secure configuration.
# See: https://bugs.debian.org/765782
#
# Self signed certs generated by the ssl-cert package
# Don't use them in a production server!
#
# include snippets/snakeoil.conf;
# root /var/www/html;
# Add index.php to the list if you are using PHP
index index.html index.htm index.nginx-debian.html;
server_name _;
location /app1 {
proxy_pass http://localhost:4000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
location /app2 {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
location / {
proxy_pass http://localhost:8000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
# pass PHP scripts to FastCGI server
#
#location ~ \.php$ {
# include snippets/fastcgi-php.conf;
#
# # With php-fpm (or other unix sockets):
# fastcgi_pass unix:/var/run/php/php7.0-fpm.sock;
# # With php-cgi (or other tcp sockets):
# fastcgi_pass 127.0.0.1:9000;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
Below is mt etc/nginx/nginx.conf file
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
#mail {
# # See sample authentication script at:
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
#
# # auth_http localhost/auth.php;
# # pop3_capabilities "TOP" "USER";
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
#
# server {
# listen localhost:110;
# protocol pop3;
# proxy on;
# }
#
# server {
# listen localhost:143;
# protocol imap;
# proxy on;
# }
#}

For a full configuration, check out my answer to Configuring Load Balancer to Route to different pages of instance?. You don't need the root /var/www/html; as you aren't serving a static html page.
You need the proper forwarding headers:
location /app1 {
proxy_pass http://localhost:4000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
You should also use 127.0.0.1 over localhost, and create a block for each upstream node.js server nginx is acting as a reverse-proxy for:
upstream root {
server 127.0.0.1:8000;
keepalive 256;
}
upstream app1 {
server 127.0.0.1:4000
keepalive 256;
}
upstream app2 {
server 127.0.0.1:3000
keepalive 256;
}
server {
listen 80 default_server;
location / {
proxy_pass http://root;
proxy_pass_header Access-Control-Allow-Origin;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass_header Set-Cookie;
proxy_pass_header X-UA-Compatible;
proxy_pass_header Server;
proxy_buffers 64 16k;
proxy_buffer_size 16k;
proxy_busy_buffers_size 64k;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $http_connection;
proxy_read_timeout 10;
proxy_redirect off;
}
location /app1 {
proxy_pass http://app1;
proxy_pass_header Access-Control-Allow-Origin;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass_header Set-Cookie;
proxy_pass_header X-UA-Compatible;
proxy_pass_header Server;
proxy_buffers 64 16k;
proxy_buffer_size 16k;
proxy_busy_buffers_size 64k;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $http_connection;
proxy_read_timeout 10;
proxy_redirect off;
}
location /app2 {
proxy_pass http://app2;
proxy_pass_header Access-Control-Allow-Origin;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass_header Set-Cookie;
proxy_pass_header X-UA-Compatible;
proxy_pass_header Server;
proxy_buffers 64 16k;
proxy_buffer_size 16k;
proxy_busy_buffers_size 64k;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $http_connection;
proxy_read_timeout 10;
proxy_redirect off;
}
}

Related

Nginx the event was not signaled for 5s

I am hosting my web application on NGINX server.Till now it worked fine, but I don't know why I am getting the errors present in the image below.
I don't know why these errors occur, but as a trial and error method I thought my ssl certificated got expired so I updated it. Same errors got repeated.And also checked my conf.d file, not sure that everything is good.
Here is my conf file
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
send_timeout 100s;
keepalive_timeout 95;
#ssl_session_cache shared:SSL:10m;
#ssl_session_timeout 10m;
client_body_in_file_only clean;
client_body_buffer_size 32K;
client_max_body_size 300M;
server {
listen 80;
listen 443 ssl;
server_name sample.com;
ssl_certificate ..\ssl\mbxxxx.crt;
ssl_certificate_key ..\ssl\mbkey.pem;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
location / {
proxy_http_version 1.1;
client_max_body_size 300M;
proxy_read_timeout 300s;
proxy_connect_timeout 95s;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Real-IP $http_referer;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header content-type "application/json";
proxy_set_header X-NginX-Proxy true;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header REMOTE_ADDR $remote_addr;
proxy_set_header Access-Control-Allow-Origin *;
proxy_set_header Connection 'upgrade';
proxy_pass http://127.0.0.1:xxxx;
}
location /api {
proxy_http_version 1.1;
client_max_body_size 300M;
proxy_read_timeout 300s;
proxy_connect_timeout 95s;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Real-IP $http_referer;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header content-type "application/json";
proxy_set_header X-NginX-Proxy true;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header REMOTE_ADDR $remote_addr;
proxy_set_header Access-Control-Allow-Origin *;
proxy_set_header Connection 'upgrade';
proxy_pass http://127.0.0.1:xxxx;
}
error_page 405 =200 $uri;
# error_page 500 502 503 504 /50x.html;
#location = /50x.html {
#root html;
#}
}
}
enter code here
And there are no CORS restrictions.Any suggestions and reference docs would be great help.
And I don't know that this question servers my request or not.
Thanks in Advance.
So when I am doing some research on how to solve this issue, I found an answer that I have to remove passphrase in SSL certificate.I didn't get it. So what I have done is, I updated SSL certificate then I run my application. But not succeeded. So I thought nginx should be restarted after updating SSL certificate. Shockingly after restarting nginx, it worked fine.
You can specify passphrase in text file, and connect it via ssl_password_file directive. Something like this:
listen 3001 ssl;
ssl_certificate cert.pem;
ssl_certificate_key key.pem;
ssl_password_file pass.txt

Node.JS & nginx slow every 5-20 request

I have a Node.Js, nginx setup on a ubuntu 16 server.
It all works, but sometimes when I request my service, the request takes op to 1 minute.
Can someone see if there is anything I'am doing wrong?
server {
listen 443 ssl;
ssl_certificate /etc/letsencrypt/live/domain/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/domain/privkey.pem;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers ECDHE-RSA-AES256-SHA384:AES256-SHA256:RC4:HIGH:!MD5:!aNULL:!eNULL:!NULL:!DH:!EDH:!AESGCM;
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
server_name domain;
location / {
proxy_pass http://my-nat-ip:4000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_connect_timeout 300;
proxy_send_timeout 300;
proxy_read_timeout 300;
send_timeout 300;
proxy_buffers 32 4m;
proxy_busy_buffers_size 25m;
proxy_buffer_size 512k;
proxy_ignore_headers "Cache-Control" "Expires";
proxy_max_temp_file_size 0;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
client_max_body_size 1024m;
client_body_buffer_size 4m;
proxy_intercept_errors off;
proxy_buffering off;
}
}
There is a problem with your Node code.
Unfortunately you didn't include it so it's impossible to tell you where.

Nginx Reverse Proxy Subdomain for Node Applications

I am quite new to NGINX configuation. I have looked through many posts looking for someone else who ran into this problem and found a working solution.
I have 2 node.js web-applications that will run on 2 separate ports (81, 82 for example).
My initial intention was to use a reverse proxy so that visitors could hit the same physical box, but be served different content based on the domain they used.
I was successful in having to separate sites render based on their domain. One of the applications has a subdomain tied to it (app.exampleb.com) and it seems that any time I try to access that subdomain, nginx serves me a page stating "that I have configured the server successfully"...... Thanks nginx.
I have placed the following in my nginx.conf file:
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
##
# nginx-naxsi config
##
# Uncomment it if you installed nginx-naxsi
##
#include /etc/nginx/naxsi_core.rules;
##
# nginx-passenger config
##
# Uncomment it if you installed nginx-passenger
##
#passenger_root /usr;
#passenger_ruby /usr/bin/ruby;
server {
listen 80;
server_name examplea.com;
location / {
proxy_pass http://localhost:81;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
server {
listen 80;
server_name app.examplea.com;
location / {
proxy_pass http://localhost:81;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
server {
listen 80;
server_name exampleb.com;
location / {
proxy_pass http://localhost:82;
}
}
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
The full config should be this:
server {
listen 80;
server_name example.com;
location / {
proxy_pass http://localhost:81;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
Place it inside the http {} directive in nginx.conf
Remember to restart nginx

Nginx not proxying websockets (using socket.io and nodejs)

I have a nodejs application running on port 8000 and the following nginx configs to proxy my requests (8800 for http and 8443 for https):
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
gzip on;
upstream node {
server 127.0.0.1:8000;
}
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 8800;
access_log /usr/local/etc/nginx/access.log;
error_log /usr/local/etc/nginx/error.log;
rewrite_log on;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://node;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
}
server {
listen 8443;
ssl on;
ssl_certificate /usr/local/etc/nginx/server.crt;
ssl_certificate_key /usr/local/etc/nginx/server.key;
ssl_session_timeout 5m;
ssl_protocols SSLv2 SSLv3 TLSv1;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
access_log /usr/local/etc/nginx/access.log;
error_log /usr/local/etc/nginx/error.log;
rewrite_log on;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://node;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
}
}
When I try to run my application at http://localhost:8800 and check Chrome Dev Tools, the websocket request (ws://localhost) returns a successful '101 Switching protocols'. But the same request (wss://localhost) doesn't work when I run it on the https://localhost (with self certified SSL certs)
Also, when I try to debug the request on nginx with
curl -i -N -H "Connection: Upgrade" -H "Upgrade: websocket" http://localhost:8800/
or
curl -i -N -k -H "Connection: Upgrade" -H "Upgrade: websocket" https://localhost:8443/
the response is 502 Bad Gateway.
I am trying to figure out why nginx isn't proxying the request properly.

Getting 404 on Nginx on EC2 and meteor

I am having a weird issue with nginx throwing a 404 error on javascript files.
See it here:
http://ec2-54-85-163-197.compute-1.amazonaws.com/
Because the meteor application throws a 404, press escape and view the source of the page because meteor has javascript that redirects the entire page if javascript fails to load (A complete fail by meteor IMHO)
You will find that I am properly proxying from port 3000 running node to port 80 through the rendering of the main page.
If you go to http://ec2-54-85-163-197.compute-1.amazonaws.com:3000/, you will see the hello world render just fine.
My /etc/nginx/nginx.conf is the default conf file.
Here is the file /etc/nginx/sites-available/ec2-54-85-163-197.compute-1.amazonaws.com
server {
listen 80;
server_name ec2-54-85-163-197.compute-1.amazonaws.com;
root /home/meteor/can_i_help_you/can_i_help_you;
access_log "/home/ubuntu/logs/access.log";
error_log "/home/ubuntu/logs/error.log" error;
charset utf-8;
default_type application/octet-stream;
sendfile on;
# would be awesome if your mobile-app can utilize keep-alives!
keepalive_timeout 65;
# enable gzip
gzip on;
gzip_comp_level 6;
gzip_vary on;
gzip_min_length 1000;
gzip_proxied any;
gzip_buffers 16 8k;
# we only gzip these mime-types (since there's no use to gzip jpegs)
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
# tell-client to cache all 'assets'
location ~* \.(?:jpg|jpeg|gif|png|ico|gz|svg|svgz|mp4|ogg|ogv|webm)$ {
expires 1M;
access_log off;
add_header Cache-Control "public";
}
# disable logging for some `common` files
# Disable logging for favicon
location = /favicon.ico {
log_not_found off;
access_log off;
}
# Disable logging for robots.txt
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
# Prevent clients from accessing hidden files (starting with a dot)
location ~* (^|/)\. {
return 403;
}
# Prevent clients from accessing to backup/config/source files
location ~* (\.(bak|config|sql|fla|psd|ini|log|sh|inc|swp|dist)|~)$ {
return 403;
}
# reverse-proxy here, if your have multiple machine/cores would be better to use UPSTREAM so nginx can load-balance requests
#try_files $uri $uri/ #silly;
location / {
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_redirect off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_set_header Connection "";
proxy_pass http://ec2-54-85-163-197.compute-1.amazonaws.com/;
}
#location / {
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header Host $http_host;
# proxy_set_header X-NginX-Proxy true;
#
# proxy_pass http://ec2-54-85-163-197.compute-1.amazonaws.com:3000/;
# proxy_redirect off;
#}
}

Resources