i can't enable CORS in nginx - node.js

I can't Enable CORS on my API Gateway instance, this is how it looks:
Everything is installed on an nginx server under ubuntu 20.04.
React Font-end: https://example.com
-nginx
server {
listen 80;
listen [::]:80;
server_name example.com www.example.com;
root /home/ubuntu/front;
index index.html index.htm;
location ~* \.(?:manifest|appcache|html?|xml|json)$ {
expires -1;
# access_log logs/static.log; # I don't usually include a static log
}
location ~* \.(?:css|js)$ {
try_files $uri =404;
expires 1y;
access_log off;
add_header Cache-Control "public";
}
# Any route containing a file extension (e.g. /devicesfile.js)
location ~ ^.+\..+$ {
try_files $uri =404;
}
# Any route that doesn't have a file extension (e.g. /devices)
location / {
try_files $uri $uri/ /index.html;
}
return 301 https://example.com$request_uri;
}
server {
listen 443 ssl; # managed by Certbot
listen [::]:443 ssl;
ssl_certificate /etc/letsencrypt/live/xxx.com/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/xxx.com/privkey.pem; # managed by Certbot
server_name example.com www.example.com;
root /home/ubuntu/front;
index index.html index.htm;
location ~* \.(?:manifest|appcache|html?|xml|json)$ {
expires -1;
# access_log logs/static.log; # I don't usually include a static log
}
location ~* \.(?:css|js)$ {
try_files $uri =404;
expires 1y;
access_log off;
add_header Cache-Control "public";
}
# Any route containing a file extension (e.g. /devicesfile.js)
location ~ ^.+\..+$ {
try_files $uri =404;
}
# Any route that doesn't have a file extension (e.g. /devices)
location / {
try_files $uri $uri/ /index.html;
}
}
express Back-end: https://api.mydomain.com
code add to js
app.use(cors());
´
- nginx
upstream api {
server xx.xx.xx.xx;
}
server {
server_name api.mydomain.com;
location / {
proxy_pass http://127.0.0.1:4001;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_connect_timeout 30;
proxy_send_timeout 30;
}
listen [::]:443 ssl ipv6only=on; # managed by Certbot
listen 443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/xx.xxxx.com/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/livexx.xxxx.com/privkey.pem; # managed by Certbot
}
minio objectstorage: https://minio.example.com
server {
listen 443 ssl;
server_name minio.example.com;
ssl_certificate /etc/minio/certs/public.crt;
ssl_certificate_key /etc/minio/certs/private.key;
# To allow special characters in headers
ignore_invalid_headers off;
# Allow any size file to be uploaded.
# Set to a value such as 1000m; to restrict file size to a specific value
client_max_body_size 1000m;
# To disable buffering
proxy_buffering off;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_connect_timeout 300;
# Default is HTTP/1, keepalive is only enabled in HTTP/1.1
proxy_http_version 1.1;
proxy_set_header Connection "";
chunked_transfer_encoding off;
proxy_pass https://localhost:9000;
# Ajouter les headers de contrôle d'accès CORS
#add_header 'Access-Control-Allow-Origin' '*' always;
#add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always;
#add_header 'Access-Control-Allow-Headers' 'Origin, X-Requested-With, Content-Type, Accept' always;
#add_header 'Access-Control-Allow-Credentials' 'true' always;
}
i can manage to make http requests from example.com to api.example.com without having cors errors
but when i do an http request fomr example.com to api.example.com then from api.example.com to mini.example.com(or any api), i have cors error " Access to xmlhttprequest at https://api.example.com/upload from https://example.com has been blocked by cors policy: no 'access-control-allow-origin' header is present on the requested ressource

proxy_set_header Access-Control-Allow-Origin "*";
sovled the issue

Related

Different location for a specific path

im trying to serve a different location based on a specific path structure and user agent to serve dynamic rendering to crawler.
What i needs is that every path following this structure: /user//artwork/ and with a user agent of any bot to serve a nodejs express app with reverse proxy, everything else just send to the root of a static html dir.
Here what i tried with no success:
server {
index index.html;
server_name domain.app;
location ~/user/(.*)/artwork/(.*) {
set $prerender 0;
if ($http_user_agent ~* "baiduspider|twitterbot|facebookexternalhit|rogerbot|linkedinbot|embedly|quora link preview|showyoubot|outbrain|pinterest|slackbot|vkShare|W3C_Validator") {
set $prerender 1;
}
if ($uri ~ "\.(js|css|xml|less|png|jpg|jpeg|gif|pdf|doc|txt|ico|rss|zip|mp3|rar|exe|wmv|doc|avi|ppt|mpg|mpeg|tif|wav|mov|psd|ai|xls|mp4|m4a|swf|dat|dmg|iso|flv|m4v|torrent|ttf|woff)") {
set $prerender 0;
}
#resolve using Google's DNS server to force DNS resolution and prevent caching of IPs
resolver 8.8.8.8;
if ($prerender = 1) {
set $prerender "localhost:3010";
proxy_pass http://$prerender;
}
root /home/user/domain.app/dist;
try_files $uri $uri/ /index.html;
}
location / {
root /home/user/domain.app/dist;
#try_files $uri $uri/ =404;
try_files $uri $uri/ /index.html;
}
listen [::]:443 ssl ipv6only=on; # managed by Certbot
listen 443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/domain.app/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/domain.app/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
if ($host = domain.app) {
return 301 https://$host$request_uri;
} # managed by Certbot
listen 80 default_server;
server_name domain.app;
return 404; # managed by Certbot
}
It doesnt seem to serve anything different when its a bot user agent (using Googlebot user agent on google chrome Network condition setting)
EDIT:
I made it this way and its working:
server {
index index.html;
server_name domain.app;
location ~*/robots.txt {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-NginX-Proxy true;
proxy_set_header Host $http_host;
proxy_cache_bypass $http_upgrade;
proxy_redirect off;
proxy_pass http://localhost:3010;
}
location ~*/user/(.*)/artwork/(.*) {
set $prerender 0;
if ($http_user_agent ~* "googlebot|bingbot|yandex|baiduspider|twitterbot|facebookexternalhit|rogerbot|linkedinbot|embedly|quora link preview|showyoubot|outbrain|pinterest\/0\.|pinterestbot|slackbot|vkShare|W3C_Validator|whatsapp>
set $prerender 1;
}
if ($args ~ "_escaped_fragment_") {
set $prerender 1;
}
if ($http_user_agent ~ "Prerender") {
set $prerender 0;
}
if ($uri ~* "\.(js|css|xml|less|png|jpg|jpeg|gif|pdf|doc|txt|ico|rss|zip|mp3|rar|exe|wmv|doc|avi|ppt|mpg|mpeg|tif|wav|mov|psd|ai|xls|mp4|m4a|swf|dat|dmg|iso|flv|m4v|torrent|ttf|woff|svg|eot)") {
set $prerender 0;
}
#resolve using Google's DNS server to force DNS resolution and prevent caching of IPs
resolver 8.8.8.8;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-NginX-Proxy true;
proxy_set_header Host $http_host;
proxy_cache_bypass $http_upgrade;
proxy_redirect off;
if ($prerender = 1) {
proxy_pass http://localhost:3010;
break;
}
root /home/user/domain.app/dist;
try_files $uri $uri/ /index.html;
}
location / {
root /home/user/domain.app/dist;
#try_files $uri $uri/ =404;
try_files $uri $uri/ /index.html;
}
listen [::]:443 ssl ipv6only=on; # managed by Certbot
listen 443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/domain.app/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/domain.app/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
if ($host = domain.app) {
return 301 https://$host$request_uri;
} # managed by Certbot
listen 80 default_server;
server_name domain.app;
return 404; # managed by Certbot
}
This is working but probably a bit messy.
For some reasons, set $prerender "localhost:3010"; and then using $prerender proxy_pass http://$prerender; wasn't working so i use it directly like this: proxy_pass http://localhost:3010;
If anyone have advice to make it beter i would be happy to clean it up :)

Error: Request failed with status code 405 , payload return HTML not JSON like intended

I'm setting up a webapps using Node JS + React + NGINX on AWS
and then when i want to access the url /auth
it return me some HTML code instead of JSON like i wanted to be
i tested the code on LOCALHOST and it works fine
I tried to set the folder permission, because i think maybe user permission is the problem
I also tried editing some stuff on the Nginx.conf
below is my app.conf for the nginx
upstream webapp{
server 127.0.0.1:3018;
}
server_names_hash_bucket_size 64;
server_names_hash_max_size 512;
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name 127.0.0.1;
server_name_in_redirect off;
if ($http_x_forwarded_proto = 'http'){
return 301 https://$host$request_uri;
}
location / {
root /home/website/client/build;
try_files $uri /index.html;
log_not_found off;
access_log off;
}
#error_page 405 =200 $uri;
if ( $http_user_agent ~* (nmap|nikto|wikto|sf|sqlmap|bsqlbf|w3af|acunetix|havij|appscan) ) {
return 403;
}
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block;";
add_header Strict-Transport-Security "max-age=2592000; includeSubDomains" always;
}
action {…}
​
payload: Object { isAuth: false, error: true }
​
type: "auth_member"
i expect the output code like this one
but instead the web give me something else
you have to define the upstrem location to be proxed to:
# this is an example you can modify the path on as you get it
location /api {
proxy_pass http://webapp;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}

Nginx Reverse Proxy Not Matching Hostname

I have a simple API that I currently have in Apache defined by:
<VirtualHost *:80>
ServerName http://exampleapi.org
ServerAlias http://exampleapi.org
ProxyPreserveHost On
ProxyPass /api http://localhost:3000
</VirtualHost>
I needed to migrate it Nginx for various reasons so in /etc/nginx/conf.d/<domain>.confI went with:
server {
listen 80;
listen [::]:80;
server_name http://exampleapi.org;
# API endpoint
location = / {
proxy_pass http://127.0.0.1:4000;
}
}
The problem appears to be that I have another file/site in /etc/nginx/conf.d/<domain2>.confand it's always matching that, as I can clearly see from the access logs. So where in the other config is it matching everything? (Note the site name like example.com has been obfuscated with <domain>).
server {
server_name SITE_URL <domain>;
server_tokens off;
access_log /var/log/nginx/access.log;
# Max request size
client_max_body_size 20M;
large_client_header_buffers 4 256k;
root /usr/local/learninglocker/current/webapp/ui/dist/public;
# xAPI endpoints
location ~* ^/data/xAPI(.*)$ {
proxy_pass http://127.0.0.1:8081/data/xAPI$1$is_args$args;
}
# API endpoints
location = /api {
rewrite /api / break;
proxy_redirect off;
proxy_pass http://127.0.0.1:8080;
}
location ~* ^/api(.*)$ {
proxy_pass http://127.0.0.1:8080$1$is_args$args;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
# All other traffic directed to statics or Node server
location / {
try_files $uri #node_server;
}
# Node UI server
location #node_server {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
# Load configuration files for the default server block.
error_page 404 /404.html;
location = /40x.html {
root /usr/share/nginx/html;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
# We don't need .ht files with nginx.
location ~ /\.ht {
deny all;
}
listen [::]:443 ssl ipv6only=on; # managed by Certbot
listen 443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/<domain>/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/<domain>/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
if ($host = <domain>) {
return 301 https://$host$request_uri;
} # managed by Certbot
listen 80;
listen [::]:80;
server_name SITE_URL <domain>;
return 404; # managed by Certbot
}
The server_name directive is the host header value to be looking for. It does not include the protocol.
server {
listen 80;
listen [::]:80;
server_name exampleapi.org;
# API endpoint
location / {
proxy_pass http://127.0.0.1:4000;
}
}
Also, note I changed location = / to location / because with the = it would only match that exact path nothing else which I assumed was not the plan.

NGINX Config for Express Static Files

I have nginx running and I also have an express backend server with a react front end. The issue I'm having is with the static files from express. For instance I have some handlebar view files with a header that calls to css/style.css and the js directory as well which currently works in Chrome but not in IE, Edge or Safari. In those browsers the console shows a 404 and the styling doesn't apply of course.
I call for the style.css from my handlebars view page like so:
<link rel="stylesheet" href="css/style.css">
It should be setup so that if I were to visit http://sitename.com/css/style.css I would see the style.css from /var/www/sitename.com/html/node/public/css/style.css location. Which actually appears to work in Chrome but not in other browsers.
I have this statement in my express app
app.use(express.static('public'));
I have a directory structure like so:
/var/www/sitename.com/html/node (node express app is running from here)
/var/www/sitename.com/html/node/public (public folder for static files from express)
-> css (folder)
-> js (folder)
my nginx is setup as follows:
server {
listen 80;
server_name _;
root /var/www/sitename.com/html;
index index.php index.html;
server_name sitename.com www.sitename.com;
location /phpmyadmin {
try_files $uri $uri/ =404;
}
location / {
root /var/www/sitename.com/html/node/public/;
try_files $uri #backend;
}
location #backend {
proxy_pass http://localhost:42134;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
}
# set max upload size
client_max_body_size 2G;
fastcgi_buffers 64 4K;
access_log /var/log/nginx/http_access.log combined;
error_log /var/log/nginx/http_error.log;
location = /favicon.ico {
log_not_found off;
access_log off;
}
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
location ~ \.php$
{
try_files $uri =404;
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
include fastcgi_params;
}
location ~* \.(htaccess|htpasswd) {
deny all;
}
# set long EXPIRES header on static assets
location ~* \.(?:jpg|jpeg|gif|bmp|ico|png|css|js|swf)$ {
expires 30d;
access_log off;
}
}
The solution ended up being to remove the express.static line in the app.js and then in the nginx config I had to rearrange things as so:
server {
listen 80;
server_name _;
root /var/www/sitename.com/html;
index index.php index.html;
server_name sitename.com www.sitename.com;
location /phpmyadmin {
root /var/www/sitename.com/html;
try_files $uri $uri/ =404;
}
location /dashboard {
index index.php index.html;
try_files $uri #backend;
}
location /static {
root /var/www/sitename.com/html;
try_files $uri $uri/ =404;
}
location / {
index index.php index.html;
try_files $uri #backend;
}
location #backend {
proxy_pass http://localhost:42134;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# Following is necessary for Websocket support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
# set max upload size
client_max_body_size 2G;
fastcgi_buffers 64 4K;
access_log /var/log/nginx/http_access.log combined;
error_log /var/log/nginx/http_error.log;
location = /favicon.ico {
log_not_found off;
access_log off;
}
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
location ~ \.php$
{
try_files $uri =404;
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
include fastcgi_params;
}
location ~* \.(htaccess|htpasswd) {
deny all;
}
# set long EXPIRES header on static assets
location ~* \.(?:jpg|jpeg|gif|bmp|ico|png|css|js|swf)$ {
expires 30d;
access_log off;
}
}

Nginx reverse proxy Serving Node.js app static file

I have a Laravel application in which one route /onlineAds will take me to another application (an SPA one) built with Vue.Js as front and Node.js as Back. So I'm trying to use Nginx as a reverse proxy in order to serve my SpaApp's static files but without any success.
My conf is as follow:
/ => will be serverd from "C:/laragon/www/laravel_App/public/"
/onlineAds/(*) => will be serverd from "C:/laragon/www/VueNodeApp/dist/"
/api/(*) => will be proxied to nodeJs server
Here is what I tried to do with Nginx:
server {
listen 8080;
server_name domain.test *.domain.test;
root "C:/laragon/www/laravel_App/public/";
index index.html index.htm index.php;
location / {
try_files $uri $uri/ /index.php$is_args$args;
autoindex on;
}
location ~* ^\/onlineAds(.*)$ {
alias "C:/laragon/www/craiglist/dist/";
#try_files $uri $uri/ /index.html;
}
location ~* ^\/api(.*)$ {
proxy_set_header Host $host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 300;
proxy_pass http://localhost:8081;
}
location ~ \.php$ {
include snippets/fastcgi-php.conf;
fastcgi_pass php_upstream;
#fastcgi_pass unix:/run/php/php7.0-fpm.sock;
}
charset utf-8;
location = /favicon.ico { access_log off; log_not_found off; }
location = /robots.txt { access_log off; log_not_found off; }
location ~ /\.ht {
deny all;
}
}
What am I doing wrong?
An alias statement within a regular expression location requires the full path to the file. See this document for details.
For example:
location ~* ^\/onlineAds(.*)$ {
alias "C:/laragon/www/craiglist/dist$1";
if (!-e $request_filename) { rewrite ^ /onlineAds/index.html last; }
}
The use of try_files with alias is avoided due to this issue. See this caution on the use of if.
Assuming that the URI /js/foo.js could be located in C:/laragon/www/laravel_App/public/js/foo.js or C:/laragon/www/craiglist/dist/js/foo.js, you could ask Nginx to try both locations using try_files with a common root directory.
For example:
location /js/ {
root "C:/laragon/www";
try_files /laravel_App/public$uri /craiglist/dist$uri =404;
}
location /css/ {
root "C:/laragon/www";
try_files /laravel_App/public$uri /craiglist/dist$uri =404;
}

Resources