Error: Request failed with status code 405 , payload return HTML not JSON like intended - node.js

I'm setting up a webapps using Node JS + React + NGINX on AWS
and then when i want to access the url /auth
it return me some HTML code instead of JSON like i wanted to be
i tested the code on LOCALHOST and it works fine
I tried to set the folder permission, because i think maybe user permission is the problem
I also tried editing some stuff on the Nginx.conf
below is my app.conf for the nginx
upstream webapp{
server 127.0.0.1:3018;
}
server_names_hash_bucket_size 64;
server_names_hash_max_size 512;
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name 127.0.0.1;
server_name_in_redirect off;
if ($http_x_forwarded_proto = 'http'){
return 301 https://$host$request_uri;
}
location / {
root /home/website/client/build;
try_files $uri /index.html;
log_not_found off;
access_log off;
}
#error_page 405 =200 $uri;
if ( $http_user_agent ~* (nmap|nikto|wikto|sf|sqlmap|bsqlbf|w3af|acunetix|havij|appscan) ) {
return 403;
}
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block;";
add_header Strict-Transport-Security "max-age=2592000; includeSubDomains" always;
}
action {…}
​
payload: Object { isAuth: false, error: true }
​
type: "auth_member"
i expect the output code like this one
but instead the web give me something else

you have to define the upstrem location to be proxed to:
# this is an example you can modify the path on as you get it
location /api {
proxy_pass http://webapp;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}

Related

i can't enable CORS in nginx

I can't Enable CORS on my API Gateway instance, this is how it looks:
Everything is installed on an nginx server under ubuntu 20.04.
React Font-end: https://example.com
-nginx
server {
listen 80;
listen [::]:80;
server_name example.com www.example.com;
root /home/ubuntu/front;
index index.html index.htm;
location ~* \.(?:manifest|appcache|html?|xml|json)$ {
expires -1;
# access_log logs/static.log; # I don't usually include a static log
}
location ~* \.(?:css|js)$ {
try_files $uri =404;
expires 1y;
access_log off;
add_header Cache-Control "public";
}
# Any route containing a file extension (e.g. /devicesfile.js)
location ~ ^.+\..+$ {
try_files $uri =404;
}
# Any route that doesn't have a file extension (e.g. /devices)
location / {
try_files $uri $uri/ /index.html;
}
return 301 https://example.com$request_uri;
}
server {
listen 443 ssl; # managed by Certbot
listen [::]:443 ssl;
ssl_certificate /etc/letsencrypt/live/xxx.com/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/xxx.com/privkey.pem; # managed by Certbot
server_name example.com www.example.com;
root /home/ubuntu/front;
index index.html index.htm;
location ~* \.(?:manifest|appcache|html?|xml|json)$ {
expires -1;
# access_log logs/static.log; # I don't usually include a static log
}
location ~* \.(?:css|js)$ {
try_files $uri =404;
expires 1y;
access_log off;
add_header Cache-Control "public";
}
# Any route containing a file extension (e.g. /devicesfile.js)
location ~ ^.+\..+$ {
try_files $uri =404;
}
# Any route that doesn't have a file extension (e.g. /devices)
location / {
try_files $uri $uri/ /index.html;
}
}
express Back-end: https://api.mydomain.com
code add to js
app.use(cors());
´
- nginx
upstream api {
server xx.xx.xx.xx;
}
server {
server_name api.mydomain.com;
location / {
proxy_pass http://127.0.0.1:4001;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_connect_timeout 30;
proxy_send_timeout 30;
}
listen [::]:443 ssl ipv6only=on; # managed by Certbot
listen 443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/xx.xxxx.com/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/livexx.xxxx.com/privkey.pem; # managed by Certbot
}
minio objectstorage: https://minio.example.com
server {
listen 443 ssl;
server_name minio.example.com;
ssl_certificate /etc/minio/certs/public.crt;
ssl_certificate_key /etc/minio/certs/private.key;
# To allow special characters in headers
ignore_invalid_headers off;
# Allow any size file to be uploaded.
# Set to a value such as 1000m; to restrict file size to a specific value
client_max_body_size 1000m;
# To disable buffering
proxy_buffering off;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_connect_timeout 300;
# Default is HTTP/1, keepalive is only enabled in HTTP/1.1
proxy_http_version 1.1;
proxy_set_header Connection "";
chunked_transfer_encoding off;
proxy_pass https://localhost:9000;
# Ajouter les headers de contrôle d'accès CORS
#add_header 'Access-Control-Allow-Origin' '*' always;
#add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always;
#add_header 'Access-Control-Allow-Headers' 'Origin, X-Requested-With, Content-Type, Accept' always;
#add_header 'Access-Control-Allow-Credentials' 'true' always;
}
i can manage to make http requests from example.com to api.example.com without having cors errors
but when i do an http request fomr example.com to api.example.com then from api.example.com to mini.example.com(or any api), i have cors error " Access to xmlhttprequest at https://api.example.com/upload from https://example.com has been blocked by cors policy: no 'access-control-allow-origin' header is present on the requested ressource
proxy_set_header Access-Control-Allow-Origin "*";
sovled the issue

Nginx reverse proxy Serving Node.js app static file

I have a Laravel application in which one route /onlineAds will take me to another application (an SPA one) built with Vue.Js as front and Node.js as Back. So I'm trying to use Nginx as a reverse proxy in order to serve my SpaApp's static files but without any success.
My conf is as follow:
/ => will be serverd from "C:/laragon/www/laravel_App/public/"
/onlineAds/(*) => will be serverd from "C:/laragon/www/VueNodeApp/dist/"
/api/(*) => will be proxied to nodeJs server
Here is what I tried to do with Nginx:
server {
listen 8080;
server_name domain.test *.domain.test;
root "C:/laragon/www/laravel_App/public/";
index index.html index.htm index.php;
location / {
try_files $uri $uri/ /index.php$is_args$args;
autoindex on;
}
location ~* ^\/onlineAds(.*)$ {
alias "C:/laragon/www/craiglist/dist/";
#try_files $uri $uri/ /index.html;
}
location ~* ^\/api(.*)$ {
proxy_set_header Host $host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 300;
proxy_pass http://localhost:8081;
}
location ~ \.php$ {
include snippets/fastcgi-php.conf;
fastcgi_pass php_upstream;
#fastcgi_pass unix:/run/php/php7.0-fpm.sock;
}
charset utf-8;
location = /favicon.ico { access_log off; log_not_found off; }
location = /robots.txt { access_log off; log_not_found off; }
location ~ /\.ht {
deny all;
}
}
What am I doing wrong?
An alias statement within a regular expression location requires the full path to the file. See this document for details.
For example:
location ~* ^\/onlineAds(.*)$ {
alias "C:/laragon/www/craiglist/dist$1";
if (!-e $request_filename) { rewrite ^ /onlineAds/index.html last; }
}
The use of try_files with alias is avoided due to this issue. See this caution on the use of if.
Assuming that the URI /js/foo.js could be located in C:/laragon/www/laravel_App/public/js/foo.js or C:/laragon/www/craiglist/dist/js/foo.js, you could ask Nginx to try both locations using try_files with a common root directory.
For example:
location /js/ {
root "C:/laragon/www";
try_files /laravel_App/public$uri /craiglist/dist$uri =404;
}
location /css/ {
root "C:/laragon/www";
try_files /laravel_App/public$uri /craiglist/dist$uri =404;
}

Prerender with nginx and node.js returns 504

If I understand things correctly I can setup nginx in a way that it handles crawlers (instead of nodejs doing it). So I removed app.use(require('prerender-node').set('prerenderToken', 'token')) from express configuration and made the following nginx setup (I do not use prerender token):
# Proxy / load balance (if more than one node.js server used) traffic to our node.js instances
upstream my_server_upstream {
server 127.0.0.1:9000;
keepalive 64;
}
server {
listen 80;
server_name test.local.io;
access_log /var/log/nginx/test_access.log;
error_log /var/log/nginx/test_error.log;
root /var/www/client;
# Static content
location ~ ^/(components/|app/|bower_components/|assets/|robots.txt|humans.txt|favicon.ico) {
root /;
try_files /var/www/.tmp$uri /var/www/client$uri =404;
access_log off;
sendfile off;
}
# Route traffic to node.js for specific route: e.g. /socket.io-client
location ~ ^/(api/|user/|en/user/|ru/user/|auth/|socket.io-client/|sitemap.xml) {
proxy_redirect off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_set_header Connection "";
proxy_http_version 1.1;
proxy_pass_header X-CSRFToken;
sendfile off;
# Tells nginx to use the upstream server
proxy_pass http://my_server_upstream;
}
location / {
root /var/www/client;
index index.html;
try_files $uri #prerender;
access_log off;
sendfile off;
}
location #prerender {
set $prerender 0;
if ($http_user_agent ~* "baiduspider|twitterbot|facebookexternalhit|rogerbot|linkedinbot|embedly|quora link preview|showyoubot|outbrain|pinterest|slackbot|vkShare|W3C_Validator") {
set $prerender 1;
}
if ($args ~ "_escaped_fragment_") {
set $prerender 1;
}
if ($http_user_agent ~ "Prerender") {
set $prerender 0;
}
#resolve using Google's DNS server to force DNS resolution and prevent caching of IPs
resolver 8.8.8.8;
if ($prerender = 1) {
#setting prerender as a variable forces DNS resolution since nginx caches IPs and doesnt play well with load balancing
set $prerender "127.0.0.1:3000";
rewrite .* /$scheme://$host$request_uri? break;
proxy_pass http://$prerender;
}
if ($prerender = 0) {
rewrite .* /index.html$is_args$args break;
}
}
}
But when I test it by curl test.local.io?_escaped_fragment_= I get got 504 in 344ms for http://test.local.io
Node version is 6.9.1. I use vagrant to setup environment.
The above configuration works fine. All it was missing is an entry in /etc/hosts : 127.0.0.1 test.local.io

Node.js apps and Drupal Nginx Conflict

I'm currently running two Ghost Node.js blogs on my VPS. They were working fine when I used proxy_pass in their respective .conf files.
For example:
proxy_pass http://127.0.0.1:2468;
I have another blog on port 2368. But when I introduced a Drupal site onto my VPS I assumed that it would work fine because my .conf setting file was reading a path and URL.
Like this:
server_name example.com;
root /var/www/example;
What happens is when I go to the 3 domains that are pointing at my server, they all display the Drupal site. I can't understand why it's overriding the settings. All three sites have separate config exampledomain.conf Nginx files.
Does anyone have any ideas? I've been trying to work this out for days now!
DRUPAL SERVER BLOCK 1
server {
server_name leafylane.com;
root /var/www/leafylane; ## <-- Your only path reference.
# Enable compression, this will help if you have for instance advagg‎ module
# by serving Gzip versions of the files.
gzip_static on;
location = /favicon.ico {
log_not_found off;
access_log off;
}
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
# This matters if you use drush prior to 5.x
# After 5.x backups are stored outside the Drupal install.
#location = /backup {
# deny all;
#}
# Very rarely should these ever be accessed outside of your lan
location ~* \.(txt|log)$ {
allow 192.168.0.0/16;
deny all;
}
location ~ \..*/.*\.php$ {
return 403;
}
# No no for private
location ~ ^/sites/.*/private/ {
return 403;
}
# Block access to "hidden" files and directories whose names begin with a
# period. This includes directories used by version control systems such
# as Subversion or Git to store control files.
location ~ (^|/)\. {
return 403;
}
location / {
# This is cool because no php is touched for static content
try_files $uri #rewrite;
}
location #rewrite {
# You have 2 options here
# For D7 and above:
# Clean URLs are handled in drupal_environment_initialize().
rewrite ^ /index.php;
# For Drupal 6 and bwlow:
# Some modules enforce no slash (/) at the end of the URL
# Else this rewrite block wouldn't be needed (GlobalRedirect)
#rewrite ^/(.*)$ /index.php?q=$1;
}
location ~ \.php$ {
fastcgi_split_path_info ^(.+\.php)(/.+)$;
#NOTE: You should have "cgi.fix_pathinfo = 0;" in php.ini
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $request_filename;
fastcgi_intercept_errors on;
fastcgi_pass unix:/var/run/php5-fpm.sock;
}
# Fighting with Styles? This little gem is amazing.
# This is for D6
#location ~ ^/sites/.*/files/imagecache/ {
# This is for D7 and D8
location ~ ^/sites/.*/files/styles/ {
try_files $uri #rewrite;
}
location ~* \.(js|css|png|jpg|jpeg|gif|ico)$ {
expires max;
log_not_found off;
}
}
SERVER BLOCK 2
server {
listen 0.0.0.0:8080;
server_name tomcusack.com;
access_log /var/log/nginx/tomcusack.com.log;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header HOST $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://127.0.0.1:2368;
proxy_redirect off;
}
}
server {
listen 0.0.0.0:8080;
server_name www.tomcusack.com;
access_log /var/log/nginx/tomcusack.com.log;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header HOST $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://127.0.0.1:2368;
proxy_redirect off;
}
}
SERVER BLOCK 3
server {
listen 0.0.0.0:8080;
server_name sancho-panza.co.uk;
access_log /var/log/nginx/sancho-panza.co.uk.log;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header HOST $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://127.0.0.1:2468;
proxy_redirect off;
}
}
server {
listen 0.0.0.0:8080;
server_name www.sancho-panza.co.uk;
access_log /var/log/nginx/sancho-panza.co.uk.log;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header HOST $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://127.0.0.1:2468;
proxy_redirect off;
}
}
Give this a shot. I identified quite a few issues with your original server blocks, and made some assumptions about what you are trying to do. Let me know if i'm mistaken.
You have a Drupal installation, and two Ghost blogs. All of which you wish to serve on port 80 from your VPS machine based on which URL is requested. Each of which need to accept both www and non-www requests.
Your original server blocks had a few mistakes, such as using multiple blocks for www/non-www which I have simplified. Note that you only need to separate these into different blocks if you plan on handling the www differently from non-www.
As a final note, make sure that you use "sudo nginx -s reload" to reload the config files, as that will spit out more detailed debugging information if you have any syntax errors
server {
listen 80 default_server;
listen [::]:80 default_server ipv6only=on;
server_name leafylane.com www.leafylane.com;
root /var/www/leafylane;
gzip_static on;
location = /favicon.ico {
log_not_found off;
access_log off;
}
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
location ~* \.(txt|log)$ {
allow 192.168.0.0/16;
deny all;
}
location ~ \..*/.*\.php$ {
return 403;
}
location ~ ^/sites/.*/private/ {
return 403;
}
location ~ (^|/)\. {
return 403;
}
location / {
try_files $uri #rewrite;
}
location #rewrite {
rewrite ^ /index.php;
}
location ~ \.php$ {
fastcgi_split_path_info ^(.+\.php)(/.+)$;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $request_filename;
fastcgi_intercept_errors on;
fastcgi_pass unix:/var/run/php5-fpm.sock;
}
location ~ ^/sites/.*/files/styles/ {
try_files $uri #rewrite;
}
location ~* \.(js|css|png|jpg|jpeg|gif|ico)$ {
expires max;
log_not_found off;
}
}
server {
listen 80;
server_name tomcusack.com www.tomcusack.com;
access_log /var/log/nginx/tomcusack.com.log;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header HOST $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://127.0.0.1:2368;
proxy_redirect off;
}
}
server {
listen 80;
server_name sancho-panza.co.uk www.sancho-panza.co.uk;
access_log /var/log/nginx/sancho-panza.co.uk.log;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header HOST $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://127.0.0.1:2468;
proxy_redirect off;
}
}

What MongoDB user role can make POST calls?

I just have created a MongoDB instance on Ubuntu 14.04, with authentication by username:password.
The user I've created is like this:
{
"_id" : "myDatabase.myUser",
"user" : "myUser",
"db" : "myDatabase",
"roles" : [ { "role" : "readWrite", "db" : "myDatabase" } ]
}
And the URI String that I use on my REST API written in Node.js (with Express and Mongoose) is like:
mongodb://myUser:password#localhost:27017/myDatabase
The Connection is OK and the GET methods works fine, but when I use a POST method, like a signup by email/password, the response is:
Status Code:405 Not Allowed
Any idea? Thanks in advance!
FYI: I'm using Nginx as reverse proxy and Web Server for the Frontend (AngularJS app), and the config is:
server {
listen 80;
server_name example.com;
access_log /var/log/nginx/nginx.access.log;
error_log /var/log/nginx/nginx.error.log;
location / {
expires -1;
add_header Pragma "no-cache";
add_header Cache-Control "no-store, no-cache, must-revalidate, post-check=0, pre-check=0";
root /usr/share/www;
try_files $uri $uri/ /index.html =404;
}
location /api/v1 {
proxy_set_header "Access-Control-Allow-Origin";
proxy_set_header "Access-Control-Allow-Methods" "GET, POST, OPTIONS, PUT, DELETE";
proxy_set_header "Access-Control-Allow-Headers" "X-Requested-With,Accept,Content-Type, Origin";
proxy_pass http://127.0.0.1:3000/api/v1;
proxy_buffering on;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header origin "http://example.com";
}
}
I think that it isn't a mongodb restriction. Mongo can't know if the request is POST or GET. Have you verified if the request arrives at nodejs server? I think that nginx is who returns the 405 status code.
It's possible that the failure is due to try to return a static page as response to the POST request. Try adding into nginx.conf file:
# To dispatch static pages on POST request
error_page 405 = 200 $uri;
I have added the following and nginx at least works!:
proxy_redirect off;
The nginx config for default now is:
server {
listen 80;
server_name example.com;
access_log /var/log/nginx/nginx.access.log;
error_log /var/log/nginx/nginx.error.log;
location / {
expires -1;
add_header Pragma "no-cache";
add_header Cache-Control "no-store, no-cache, must-revalidate, post-check=0, pre-check=0";
root /usr/share/www;
try_files $uri $uri/ /index.html =404;
}
location /api/v1 {
proxy_set_header 'Access-Control-Allow-Origin' 'http:/example.com';
proxy_set_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS, PUT, DELETE';
proxy_set_header 'Access-Control-Allow-Headers' 'X-Requested-With,Accept,Content-Type, Origin';
proxy_pass http://127.0.0.1:3000/api/v1;
proxy_redirect off;
proxy_buffering on;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header origin "http://example.com";
}
}
I hope this be valid for somebody.

Resources