Nginx reverse proxy Serving Node.js app static file - node.js

I have a Laravel application in which one route /onlineAds will take me to another application (an SPA one) built with Vue.Js as front and Node.js as Back. So I'm trying to use Nginx as a reverse proxy in order to serve my SpaApp's static files but without any success.
My conf is as follow:
/ => will be serverd from "C:/laragon/www/laravel_App/public/"
/onlineAds/(*) => will be serverd from "C:/laragon/www/VueNodeApp/dist/"
/api/(*) => will be proxied to nodeJs server
Here is what I tried to do with Nginx:
server {
listen 8080;
server_name domain.test *.domain.test;
root "C:/laragon/www/laravel_App/public/";
index index.html index.htm index.php;
location / {
try_files $uri $uri/ /index.php$is_args$args;
autoindex on;
}
location ~* ^\/onlineAds(.*)$ {
alias "C:/laragon/www/craiglist/dist/";
#try_files $uri $uri/ /index.html;
}
location ~* ^\/api(.*)$ {
proxy_set_header Host $host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 300;
proxy_pass http://localhost:8081;
}
location ~ \.php$ {
include snippets/fastcgi-php.conf;
fastcgi_pass php_upstream;
#fastcgi_pass unix:/run/php/php7.0-fpm.sock;
}
charset utf-8;
location = /favicon.ico { access_log off; log_not_found off; }
location = /robots.txt { access_log off; log_not_found off; }
location ~ /\.ht {
deny all;
}
}
What am I doing wrong?

An alias statement within a regular expression location requires the full path to the file. See this document for details.
For example:
location ~* ^\/onlineAds(.*)$ {
alias "C:/laragon/www/craiglist/dist$1";
if (!-e $request_filename) { rewrite ^ /onlineAds/index.html last; }
}
The use of try_files with alias is avoided due to this issue. See this caution on the use of if.
Assuming that the URI /js/foo.js could be located in C:/laragon/www/laravel_App/public/js/foo.js or C:/laragon/www/craiglist/dist/js/foo.js, you could ask Nginx to try both locations using try_files with a common root directory.
For example:
location /js/ {
root "C:/laragon/www";
try_files /laravel_App/public$uri /craiglist/dist$uri =404;
}
location /css/ {
root "C:/laragon/www";
try_files /laravel_App/public$uri /craiglist/dist$uri =404;
}

Related

i can't enable CORS in nginx

I can't Enable CORS on my API Gateway instance, this is how it looks:
Everything is installed on an nginx server under ubuntu 20.04.
React Font-end: https://example.com
-nginx
server {
listen 80;
listen [::]:80;
server_name example.com www.example.com;
root /home/ubuntu/front;
index index.html index.htm;
location ~* \.(?:manifest|appcache|html?|xml|json)$ {
expires -1;
# access_log logs/static.log; # I don't usually include a static log
}
location ~* \.(?:css|js)$ {
try_files $uri =404;
expires 1y;
access_log off;
add_header Cache-Control "public";
}
# Any route containing a file extension (e.g. /devicesfile.js)
location ~ ^.+\..+$ {
try_files $uri =404;
}
# Any route that doesn't have a file extension (e.g. /devices)
location / {
try_files $uri $uri/ /index.html;
}
return 301 https://example.com$request_uri;
}
server {
listen 443 ssl; # managed by Certbot
listen [::]:443 ssl;
ssl_certificate /etc/letsencrypt/live/xxx.com/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/xxx.com/privkey.pem; # managed by Certbot
server_name example.com www.example.com;
root /home/ubuntu/front;
index index.html index.htm;
location ~* \.(?:manifest|appcache|html?|xml|json)$ {
expires -1;
# access_log logs/static.log; # I don't usually include a static log
}
location ~* \.(?:css|js)$ {
try_files $uri =404;
expires 1y;
access_log off;
add_header Cache-Control "public";
}
# Any route containing a file extension (e.g. /devicesfile.js)
location ~ ^.+\..+$ {
try_files $uri =404;
}
# Any route that doesn't have a file extension (e.g. /devices)
location / {
try_files $uri $uri/ /index.html;
}
}
express Back-end: https://api.mydomain.com
code add to js
app.use(cors());
´
- nginx
upstream api {
server xx.xx.xx.xx;
}
server {
server_name api.mydomain.com;
location / {
proxy_pass http://127.0.0.1:4001;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_connect_timeout 30;
proxy_send_timeout 30;
}
listen [::]:443 ssl ipv6only=on; # managed by Certbot
listen 443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/xx.xxxx.com/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/livexx.xxxx.com/privkey.pem; # managed by Certbot
}
minio objectstorage: https://minio.example.com
server {
listen 443 ssl;
server_name minio.example.com;
ssl_certificate /etc/minio/certs/public.crt;
ssl_certificate_key /etc/minio/certs/private.key;
# To allow special characters in headers
ignore_invalid_headers off;
# Allow any size file to be uploaded.
# Set to a value such as 1000m; to restrict file size to a specific value
client_max_body_size 1000m;
# To disable buffering
proxy_buffering off;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_connect_timeout 300;
# Default is HTTP/1, keepalive is only enabled in HTTP/1.1
proxy_http_version 1.1;
proxy_set_header Connection "";
chunked_transfer_encoding off;
proxy_pass https://localhost:9000;
# Ajouter les headers de contrôle d'accès CORS
#add_header 'Access-Control-Allow-Origin' '*' always;
#add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always;
#add_header 'Access-Control-Allow-Headers' 'Origin, X-Requested-With, Content-Type, Accept' always;
#add_header 'Access-Control-Allow-Credentials' 'true' always;
}
i can manage to make http requests from example.com to api.example.com without having cors errors
but when i do an http request fomr example.com to api.example.com then from api.example.com to mini.example.com(or any api), i have cors error " Access to xmlhttprequest at https://api.example.com/upload from https://example.com has been blocked by cors policy: no 'access-control-allow-origin' header is present on the requested ressource
proxy_set_header Access-Control-Allow-Origin "*";
sovled the issue

NGINX Config for Express Static Files

I have nginx running and I also have an express backend server with a react front end. The issue I'm having is with the static files from express. For instance I have some handlebar view files with a header that calls to css/style.css and the js directory as well which currently works in Chrome but not in IE, Edge or Safari. In those browsers the console shows a 404 and the styling doesn't apply of course.
I call for the style.css from my handlebars view page like so:
<link rel="stylesheet" href="css/style.css">
It should be setup so that if I were to visit http://sitename.com/css/style.css I would see the style.css from /var/www/sitename.com/html/node/public/css/style.css location. Which actually appears to work in Chrome but not in other browsers.
I have this statement in my express app
app.use(express.static('public'));
I have a directory structure like so:
/var/www/sitename.com/html/node (node express app is running from here)
/var/www/sitename.com/html/node/public (public folder for static files from express)
-> css (folder)
-> js (folder)
my nginx is setup as follows:
server {
listen 80;
server_name _;
root /var/www/sitename.com/html;
index index.php index.html;
server_name sitename.com www.sitename.com;
location /phpmyadmin {
try_files $uri $uri/ =404;
}
location / {
root /var/www/sitename.com/html/node/public/;
try_files $uri #backend;
}
location #backend {
proxy_pass http://localhost:42134;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
}
# set max upload size
client_max_body_size 2G;
fastcgi_buffers 64 4K;
access_log /var/log/nginx/http_access.log combined;
error_log /var/log/nginx/http_error.log;
location = /favicon.ico {
log_not_found off;
access_log off;
}
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
location ~ \.php$
{
try_files $uri =404;
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
include fastcgi_params;
}
location ~* \.(htaccess|htpasswd) {
deny all;
}
# set long EXPIRES header on static assets
location ~* \.(?:jpg|jpeg|gif|bmp|ico|png|css|js|swf)$ {
expires 30d;
access_log off;
}
}
The solution ended up being to remove the express.static line in the app.js and then in the nginx config I had to rearrange things as so:
server {
listen 80;
server_name _;
root /var/www/sitename.com/html;
index index.php index.html;
server_name sitename.com www.sitename.com;
location /phpmyadmin {
root /var/www/sitename.com/html;
try_files $uri $uri/ =404;
}
location /dashboard {
index index.php index.html;
try_files $uri #backend;
}
location /static {
root /var/www/sitename.com/html;
try_files $uri $uri/ =404;
}
location / {
index index.php index.html;
try_files $uri #backend;
}
location #backend {
proxy_pass http://localhost:42134;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# Following is necessary for Websocket support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
# set max upload size
client_max_body_size 2G;
fastcgi_buffers 64 4K;
access_log /var/log/nginx/http_access.log combined;
error_log /var/log/nginx/http_error.log;
location = /favicon.ico {
log_not_found off;
access_log off;
}
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
location ~ \.php$
{
try_files $uri =404;
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
include fastcgi_params;
}
location ~* \.(htaccess|htpasswd) {
deny all;
}
# set long EXPIRES header on static assets
location ~* \.(?:jpg|jpeg|gif|bmp|ico|png|css|js|swf)$ {
expires 30d;
access_log off;
}
}

Prerender with nginx and node.js returns 504

If I understand things correctly I can setup nginx in a way that it handles crawlers (instead of nodejs doing it). So I removed app.use(require('prerender-node').set('prerenderToken', 'token')) from express configuration and made the following nginx setup (I do not use prerender token):
# Proxy / load balance (if more than one node.js server used) traffic to our node.js instances
upstream my_server_upstream {
server 127.0.0.1:9000;
keepalive 64;
}
server {
listen 80;
server_name test.local.io;
access_log /var/log/nginx/test_access.log;
error_log /var/log/nginx/test_error.log;
root /var/www/client;
# Static content
location ~ ^/(components/|app/|bower_components/|assets/|robots.txt|humans.txt|favicon.ico) {
root /;
try_files /var/www/.tmp$uri /var/www/client$uri =404;
access_log off;
sendfile off;
}
# Route traffic to node.js for specific route: e.g. /socket.io-client
location ~ ^/(api/|user/|en/user/|ru/user/|auth/|socket.io-client/|sitemap.xml) {
proxy_redirect off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_set_header Connection "";
proxy_http_version 1.1;
proxy_pass_header X-CSRFToken;
sendfile off;
# Tells nginx to use the upstream server
proxy_pass http://my_server_upstream;
}
location / {
root /var/www/client;
index index.html;
try_files $uri #prerender;
access_log off;
sendfile off;
}
location #prerender {
set $prerender 0;
if ($http_user_agent ~* "baiduspider|twitterbot|facebookexternalhit|rogerbot|linkedinbot|embedly|quora link preview|showyoubot|outbrain|pinterest|slackbot|vkShare|W3C_Validator") {
set $prerender 1;
}
if ($args ~ "_escaped_fragment_") {
set $prerender 1;
}
if ($http_user_agent ~ "Prerender") {
set $prerender 0;
}
#resolve using Google's DNS server to force DNS resolution and prevent caching of IPs
resolver 8.8.8.8;
if ($prerender = 1) {
#setting prerender as a variable forces DNS resolution since nginx caches IPs and doesnt play well with load balancing
set $prerender "127.0.0.1:3000";
rewrite .* /$scheme://$host$request_uri? break;
proxy_pass http://$prerender;
}
if ($prerender = 0) {
rewrite .* /index.html$is_args$args break;
}
}
}
But when I test it by curl test.local.io?_escaped_fragment_= I get got 504 in 344ms for http://test.local.io
Node version is 6.9.1. I use vagrant to setup environment.
The above configuration works fine. All it was missing is an entry in /etc/hosts : 127.0.0.1 test.local.io

Node.js apps and Drupal Nginx Conflict

I'm currently running two Ghost Node.js blogs on my VPS. They were working fine when I used proxy_pass in their respective .conf files.
For example:
proxy_pass http://127.0.0.1:2468;
I have another blog on port 2368. But when I introduced a Drupal site onto my VPS I assumed that it would work fine because my .conf setting file was reading a path and URL.
Like this:
server_name example.com;
root /var/www/example;
What happens is when I go to the 3 domains that are pointing at my server, they all display the Drupal site. I can't understand why it's overriding the settings. All three sites have separate config exampledomain.conf Nginx files.
Does anyone have any ideas? I've been trying to work this out for days now!
DRUPAL SERVER BLOCK 1
server {
server_name leafylane.com;
root /var/www/leafylane; ## <-- Your only path reference.
# Enable compression, this will help if you have for instance advagg‎ module
# by serving Gzip versions of the files.
gzip_static on;
location = /favicon.ico {
log_not_found off;
access_log off;
}
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
# This matters if you use drush prior to 5.x
# After 5.x backups are stored outside the Drupal install.
#location = /backup {
# deny all;
#}
# Very rarely should these ever be accessed outside of your lan
location ~* \.(txt|log)$ {
allow 192.168.0.0/16;
deny all;
}
location ~ \..*/.*\.php$ {
return 403;
}
# No no for private
location ~ ^/sites/.*/private/ {
return 403;
}
# Block access to "hidden" files and directories whose names begin with a
# period. This includes directories used by version control systems such
# as Subversion or Git to store control files.
location ~ (^|/)\. {
return 403;
}
location / {
# This is cool because no php is touched for static content
try_files $uri #rewrite;
}
location #rewrite {
# You have 2 options here
# For D7 and above:
# Clean URLs are handled in drupal_environment_initialize().
rewrite ^ /index.php;
# For Drupal 6 and bwlow:
# Some modules enforce no slash (/) at the end of the URL
# Else this rewrite block wouldn't be needed (GlobalRedirect)
#rewrite ^/(.*)$ /index.php?q=$1;
}
location ~ \.php$ {
fastcgi_split_path_info ^(.+\.php)(/.+)$;
#NOTE: You should have "cgi.fix_pathinfo = 0;" in php.ini
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $request_filename;
fastcgi_intercept_errors on;
fastcgi_pass unix:/var/run/php5-fpm.sock;
}
# Fighting with Styles? This little gem is amazing.
# This is for D6
#location ~ ^/sites/.*/files/imagecache/ {
# This is for D7 and D8
location ~ ^/sites/.*/files/styles/ {
try_files $uri #rewrite;
}
location ~* \.(js|css|png|jpg|jpeg|gif|ico)$ {
expires max;
log_not_found off;
}
}
SERVER BLOCK 2
server {
listen 0.0.0.0:8080;
server_name tomcusack.com;
access_log /var/log/nginx/tomcusack.com.log;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header HOST $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://127.0.0.1:2368;
proxy_redirect off;
}
}
server {
listen 0.0.0.0:8080;
server_name www.tomcusack.com;
access_log /var/log/nginx/tomcusack.com.log;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header HOST $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://127.0.0.1:2368;
proxy_redirect off;
}
}
SERVER BLOCK 3
server {
listen 0.0.0.0:8080;
server_name sancho-panza.co.uk;
access_log /var/log/nginx/sancho-panza.co.uk.log;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header HOST $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://127.0.0.1:2468;
proxy_redirect off;
}
}
server {
listen 0.0.0.0:8080;
server_name www.sancho-panza.co.uk;
access_log /var/log/nginx/sancho-panza.co.uk.log;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header HOST $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://127.0.0.1:2468;
proxy_redirect off;
}
}
Give this a shot. I identified quite a few issues with your original server blocks, and made some assumptions about what you are trying to do. Let me know if i'm mistaken.
You have a Drupal installation, and two Ghost blogs. All of which you wish to serve on port 80 from your VPS machine based on which URL is requested. Each of which need to accept both www and non-www requests.
Your original server blocks had a few mistakes, such as using multiple blocks for www/non-www which I have simplified. Note that you only need to separate these into different blocks if you plan on handling the www differently from non-www.
As a final note, make sure that you use "sudo nginx -s reload" to reload the config files, as that will spit out more detailed debugging information if you have any syntax errors
server {
listen 80 default_server;
listen [::]:80 default_server ipv6only=on;
server_name leafylane.com www.leafylane.com;
root /var/www/leafylane;
gzip_static on;
location = /favicon.ico {
log_not_found off;
access_log off;
}
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
location ~* \.(txt|log)$ {
allow 192.168.0.0/16;
deny all;
}
location ~ \..*/.*\.php$ {
return 403;
}
location ~ ^/sites/.*/private/ {
return 403;
}
location ~ (^|/)\. {
return 403;
}
location / {
try_files $uri #rewrite;
}
location #rewrite {
rewrite ^ /index.php;
}
location ~ \.php$ {
fastcgi_split_path_info ^(.+\.php)(/.+)$;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $request_filename;
fastcgi_intercept_errors on;
fastcgi_pass unix:/var/run/php5-fpm.sock;
}
location ~ ^/sites/.*/files/styles/ {
try_files $uri #rewrite;
}
location ~* \.(js|css|png|jpg|jpeg|gif|ico)$ {
expires max;
log_not_found off;
}
}
server {
listen 80;
server_name tomcusack.com www.tomcusack.com;
access_log /var/log/nginx/tomcusack.com.log;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header HOST $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://127.0.0.1:2368;
proxy_redirect off;
}
}
server {
listen 80;
server_name sancho-panza.co.uk www.sancho-panza.co.uk;
access_log /var/log/nginx/sancho-panza.co.uk.log;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header HOST $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://127.0.0.1:2468;
proxy_redirect off;
}
}

I can't serve static by nginx

I have the following nodejs structure which is resides in /home/ubuntu/project directory:
sever
site
|-css
| |-styles.css
|-img
| |-sprite.png
|-js
|-script.js
I'm trying to serve static assets by nginx, so I wrote the following location:
upstream myapp_upstream {
server 127.0.0.1:3000;
keepalive 64;
}
server {
listen 80;
server_name www.myapp.com;
error_page 400 404 500 502 503 504 /50x.html;
location /50x.html {
internal;
root /usr/share/nginx/www;
}
location ~ ^/(images/|img/|javascript/|js/|css/|stylesheets/|flash/|media/|static/|robots.txt|humans.txt|favicon.ico|home/|html|xml) {
root /home/ubuntu/project/site;
access_log off;
expires max;
}
location / {
proxy_redirect off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_set_header Connection "";
proxy_http_version 1.1;
proxy_pass http://myapp_upstream;
proxy_intercept_errors on;
}
}
But when I try to open up my site in a browser I get failed status on all requested assets. Whet's the problem?
EDIT:
My route to css for example is:
http://www.myapp.com/css/styles.css
Well,
Add a / to the root path.
root /usr/share/nginx/www;
should be
root /usr/share/nginx/www/;
Use an alias for the assets like:
alias /home/ubuntu/project/site/; (again, add the last /)
These is a mess for me:
location ~ ^/(images/|img/|javascript/|js/|css/|stylesheets/|flash/|media/|static/|robots.txt|humans.txt|favicon.ico|home/|html|xml)
You should check these http://wiki.nginx.org/NginxHttpCoreModule#location
I dont see these folders images/, javascript/, stylesheets/, flash/, media/, static/ and home/ in your sitemap.
And these both |html|xml are looking for the route /html or /xml not the .html or .xml files.
Then try:
location ~ ^/(robots.txt|humans.txt) {
alias /home/ubuntu/project/site/;
access_log off;
expires max;
}
location ~* \.(?:ico|css|js|gif|jpe?g|png)$ { //add here all the file extensions needed.
alias /home/ubuntu/project/site/;
access_log off;
expires max;
}

Resources