Nginx: proxy_pass + websocket + basic authentication + Safari = endless loop in access log - node.js

Safari (Desktop & iOS)
Meteor web application protected by nginx basic authentication.
I see the following access log records in an endless loop when I visit the app on Safari. Chrome works as expected. No record appears in nginx error logs. My guess is that for some reason the user/password auth does not work and the request gets redirected in a loop, causing new sockets / sockjs connections to be opened.
The application does not produce any output, a white screen of death is shown.
144.MY.IP.ADDR - - [25/Sep/2018:17:48:06 -0400] "GET /sockjs/958/msx234wb/websocket HTTP/1.1" 401 195 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Safari/605.1.15"
144.MY.IP.ADDR - username [25/Sep/2018:17:48:06 -0400] "POST /sockjs/656/mgln1mi5/xhr_send HTTP/1.1" 204 0 "https://my.site.com/" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Safari/605.1.15"
144.MY.IP.ADDR - username [25/Sep/2018:17:48:06 -0400] "POST /sockjs/958/x9wngcy3/xhr HTTP/1.1" 200 12 "https://my.site.com/" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Safari/605.1.15"
144.MY.IP.ADDR - username [25/Sep/2018:17:48:06 -0400] "POST /sockjs/958/x9wngcy3/xhr_send HTTP/1.1" 204 0 "https://my.site.com/" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Safari/605.1.15"
144.MY.IP.ADDR - username [25/Sep/2018:17:48:06 -0400] "GET /sockjs/info?cb=35tsuy5ber HTTP/1.1" 200 90 "https://my.site.com/" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Safari/605.1.15"
144.MY.IP.ADDR - username [25/Sep/2018:17:48:06 -0400] "POST /sockjs/958/x9wngcy3/xhr_send HTTP/1.1" 204 0 "https://my.site.com/" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Safari/605.1.15"
Here is my nginx configuration:
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 80;
listen 443 ssl http2;
server_name my.site.com;
ssl_certificate /etc/letsencrypt/live/my.site.com/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/my.site.com/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
add_header Strict-Transport-Security "max-age=31557600; includeSubDomains";
add_header X-Content-Type-Options "nosniff" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Xss-Protection "1";
ssl_stapling on;
ssl_stapling_verify on;
root html; # irrelevant
index index.html; # irrelevant
location / {
# forward http to https
if ($scheme = http) {
return 301 https://$server_name$request_uri;
}
proxy_pass http://localhost:8080;
proxy_redirect off;
proxy_intercept_errors on;
proxy_http_version 1.1; # recommended with keepalive connections - http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_http_version
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host; # pass the host header - http://wiki.nginx.org/HttpProxyModule#proxy_pass
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Proto http;
proxy_set_header X-Forwarded-Host $host:$server_port;
proxy_set_header X-Forwarded-For $remote_addr; # preserve client IP
proxy_set_header X-Nginx-Proxy true;
auth_basic "Restricted Access"; # auth realm
auth_basic_user_file .htpasswd-users; # htpasswd file
# the root path (/) MUST NOT be cached
if ($uri != '/') {
expires 30d;
}
}
}
I have no idea why this happens where Chrome works as expected and safari does not.

Here is the solution. Saved by the magic proxy_read_timeout line:
location / {
auth_basic "Restricted Access"; # auth realm
auth_basic_user_file .htpasswd-users-paco; # htpasswd file
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host $http_host;
proxy_pass "http://127.0.0.1:SOME_PORT";
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_read_timeout 86400;
# the root path (/) MUST NOT be cached
if ($uri != '/') {
expires 30d;
}
}

nginx
proxy_hide_header Upgrade;
Apache
Header unset Upgrade
https://megamorf.gitlab.io/2019/08/27/safari-nsposixerrordomain-100-error-with-nginx-and-apache/

Related

WebSocket Nginx 426 Upgrade required

I have a webchat (example.com/chat) listening on tcp://0.0.0.0:50500 on my server.
I've also configured nginx reverse proxy to send data from my example.com/chat to 0.0.0.0:50500.
My site nginx conf goes like this:
map $http_upgrade $connection_upgrade {
default Upgrade;
'' close;
}
server {
server_name example.com www.example.com ;
listen 5.4.....0:443 ssl http2 ;
listen [2a0......:a4]:443 ssl http2 ;
ssl_certificate "/var/www......._51.crt";
ssl_certificate_key "/var/www/.......51.key";
add_header Strict-Transport-Security "max-age=31536000" always;
charset utf-8;
gzip on;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/css text/xml application/javascript text/plain application/json image/svg+xml image/x-icon;
gzip_comp_level 1;
set $root_path /var/www/user/data/www/example.com;
root $root_path;
disable_symlinks if_not_owner from=$root_path;
location / {
proxy_pass http://127.0.0.1:81;
proxy_redirect http://127.0.0.1:81/ /;
include /etc/nginx/proxy_params;
}
location ~ ^/chat {
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://0.0.0.0:50500;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_read_timeout 300s;
proxy_buffering off;
}
location ~* ^.+\.(jpg|jpeg|gif|png|svg|js|css|mp3|ogg|mpeg|avi|zip|gz|bz2|rar|swf|ico|7z|doc|docx|map|ogg|otf|pdf|tff|tif|txt|wav|webp|woff|woff2|xls|xlsx|xml)$ {
try_files $uri $uri/ #fallback;
expires 30d;
}
location #fallback {
proxy_pass http://127.0.0.1:81;
proxy_redirect http://127.0.0.1:81/ /;
include /etc/nginx/proxy_params;
}
include "/etc/nginx/fastpanel2-sites/fastuser/example.com.includes";
include /etc/nginx/fastpanel2-includes/*.conf;
error_log /var/www/user/data/logs/example.com-frontend.error.log;
access_log /var/www/user/data/logs/example.com-frontend.access.log;
}
server {
server_name example.com www.example.com ;
listen 5.4.....0:80;
listen [2a.....:a4]:80;
return 301 https://$host$request_uri;
error_log /var/www/user/data/logs/example.com-frontend.error.log;
access_log /var/www/user/data/logs/example.com-frontend.access.log;
}
The webchat is configured to use these settings:
SOCKET_CHAT_URL="wss://example.com"
SOCKET_CHAT_PORT=50500
Since I have an upgrade header, the 426 Upgrade required error looks strange to me.
I know there are a lot of similar threads related to this issue, however, they all suggest creating an upgrade header that I already have.
I've tried to:
Use both SOCKET_CHAT_URL="ws://example.com" and "wss://example.com"
Changing the proxy_pass line to https: https://0.0.0.0:50500; << in this case the /chat page goes nginx 504 timeout.
changing the WebSocket line to the server IP: wss://123.312.123.321
wss://example.com/chat format << in this case the page closes a websocket connection instantly
Also, my header:
General
Request URL: https://example.com/chat
Request Method: GET
Status Code: 426
Remote Address: 5**.***.***.*50:443
Referrer Policy: strict-origin-when-cross-origin
Response Headers
date: Mon, 06 Sep 2021 21:11:50 GMT
sec-websocket-version: 13
server: nginx/1.18.0
strict-transport-security: max-age=31536000
upgrade: websocket
x-powered-by: Ratchet/0.4.3
Request Headers
:authority: example.com
:method: GET
:path: /chat
:scheme: https
accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
accept-encoding: gzip, deflate, br
accept-language: uk-UA,uk;q=0.9
cache-control: max-age=0
sec-ch-ua: "Chromium";v="94", "Google Chrome";v="94", ";Not A Brand";v="99"
sec-ch-ua-mobile: ?0
sec-ch-ua-platform: "Windows"
sec-fetch-dest: document
sec-fetch-mode: navigate
sec-fetch-site: none
sec-fetch-user: ?1
upgrade-insecure-requests: 1
user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.31 Safari/537.36
Okay, the server runs on HTTP/2, and WebSockets are not supported on it.
Also, it is not possible to switch only 1 site to HTTP/1.1 with Nginx, you should switch an entire server for that.
We've switched to sockets.io

Nodejs Nginx 502 Bad Gateway

I have set up my server on Digital Ocean and running it on port: 3000 using pm2. when I curl to my server in the do it responds correctly.
curl http://localhost:3000/
This is my nginx configuration.
server {
listen 80;
#root /var/www/html;
# Add index.php to the list if you are using PHP
#index index.html index.htm index.nginx-debian.html;
server_name api.skreem.co;
access_log /var/log/nginx/api.skreem.access.log;
error_log /var/log/nginx/api.skreem.error.log;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://localhost:3000;
proxy_set_header Host $http_host;
proxy_cache_bypass $http_upgrade;
proxy_redirect off;
}
}
I am getting logs on my Nginx access and error logs as follows
#access_log
106.51.111.247 - - [20/Mar/2020:09:27:34 +0000] "GET /favicon.ico HTTP/1.1" 502 584 "http://api.skreem.co/api/v2/" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/
537.36"
#error_log
2020/03/20 09:27:34 [error] 1651#1651: *121 connect() failed (111: Connection refused) while connecting to upstream, client: 106.51.111.247, server: api.skreem.co, request: "GET /favicon.ico HTTP/1.1", upstream: "ht
tp://127.0.0.1:3000/favicon.ico", host: "api.skreem.co", referrer: "http://api.skreem.co/api/v2/"
On my Pm2 monitor I am not getting any Logs. I have rechecked the proxy_pass but still not able to get nginx to pass it on to the nodejs server.
Am getting
502 Bad Gateway error

POST params are not passes from my reverse proxy to my backend server

I have a nginx server which provides static web content and a reverse proxy which exchange with my backend API.
I configured with certbot my SSL certificate.
My problem is that the POST params are not passed from my reverse-proxy t my backend server.
My GET params are retrieved properly.
I've logged my POST request in Nginx
"POST /Users/signup HTTP/2.0" 500 1181 "https://MYDNS/signup" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36" [{\x22username\x22:\x22testet\x22}]
My nginx configuration file
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name MYDNS;
root /var/www/MYDNS;
ssl_certificate /etc/letsencrypt/live/MYDNS/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/MYDNS/privkey.pem;
error_log /var/log/nginx/MYDNS.error.log warn;
access_log /var/log/nginx/MYDNS.postdata-access.log post_logs;
location /Users {
proxy_pass http://127.0.0.1:3001;
proxy_http_version 1.1;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
server {
listen 80;
listen [::]:80;
server_name MYDNS;
return 301 https://MYDNS$request_uri;
}
In my backend server
in my app.js file
var Users =require('./routes/Users');
app.use('/Users',Users);
In my routes/Users.js file
router.post('/signin', function (req, res, next) {
console.log(req.params.username); --> undefined
console.log(req.body.username); --> undefined
});
I also tried to change
proxy_pass http://127.0.0.1:3001 to
proxy_pass http://localhost:3001
On my server, I've tested
curl -i -H "Content-Type: application/json" -H "Accept: application/json" -X POST -d '{"username": "this_is_param_1"}' http://localhost:3001/Users/signin
but the result is the same.
Any idea please ?
I've made a mistake and disable bodyParser in my backend server
It's OK now

nodejs nginx 504 gateway timeout reverese proxy

I am trying to run a NodeJS application on port 2000 of an ubuntu server.
the application runs using pm2.
As far as I can tell the application is running fine as i can use the command:
curl http://localhost:2000
And I get the web page back almost instantly.
The problem I have is that I am trying to use nginx as the reverse proxy and for SSL. From an SSL perspective this works great as trying to browse to the website 'http://dreamingoftech.uk' I get redirected to the HTTPS version and I get the green padlock. Happy days.
However unfortunately I don't get the webpage but a 504 error.
The strange thing is, if I run:
time -p GET -H 'Host: dreamingoftech.uk' http://127.0.0.1:2000
I do eventually get the webpage returned (although i have to admit that I don't really know what this command is doing)
I am using the basic nginx.conf file except with these values added:
proxy_connect_timeout 600;
proxy_send_timeout 600;
proxy_read_timeout 600;
send_timeout 600;
and my sites-available/default file is as follows:
#HTTP
server {
listen 80;
listen [::]:80 default_server ipv6only=on;
return 301 https://$host$request_uri;
}
# HTTPS — proxy all requests to the Node app
server {
# Enable HTTP/2
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name dreamingoftech.uk;
# Use the Let’s Encrypt certificates
ssl_certificate /etc/letsencrypt/live/dreamingoftech.uk/fullchain.pe$
ssl_certificate_key /etc/letsencrypt/live/dreamingoftech.uk/privkey.$
# Include the SSL configuration from cipherli.st
include snippets/ssl-params.conf;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-NginX-Proxy true;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_pass http://127.0.0.1:2000;
proxy_ssl_session_reuse off;
proxy_set_header Host $http_host;
proxy_cache_bypass $http_upgrade;
proxy_redirect off;
proxy_http_version 1.1;
}
In the access logs I get this error, although I am not sure if this is a red herring:
"GET / HTTP/2.0" 504 665 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36"
My question really is could anyone advise me on the correct setup to get my website to load in a timely manner. I can't understand why a higher timeout is needed if the server is capable of delivering the page almost instantly (although again this could be my naivity).
Thanks in advance
Steve

POST response body empty on NGINX server

I am running a Node.js application on a NGINX server with an SSL certificate. I just realized that all of my POST/PUT requests that the application attempts to return to the front end are empty; essentially the response is an empty string. The application works fine in a local environment as well as Heroku, with all images and GET requests resolving as they should.
I am expecting JSON to come back in my HTTP responses, whether the request was successful or had errors. Right now I am getting an empty string. Below is a screenshot of my response, you will notice that the Content Length header is completely missing.
Screenshot of my headers
Essentially, is there something wrong with my NGINX proxy that is preventing my HTTP responses to come back blank?
Below is my NGINX configuration file
server {
listen 80;
server_name blah.com;
rewrite ^/(.*) https://blah.com/$1 permanent;
}
server {
# SSL configuration
#
listen 443 ssl default_server;
ssl_certificate XXX;
ssl_certificate_key XXX;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH';
root /var/www/html;
# Add index.php to the list if you are using PHP
index index.html index.htm index.nginx-debian.html;
server_name blah.com;
location / {
proxy_pass http://127.0.0.1:8080;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
Bellow are the access logs that application is generating:
24.123.110.242 - - [27/Jul/2017:21:10:45 +0000] "POST /sign-up HTTP/1.1" 400 5 "https://app.quiqmath.com/sign-up" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36"
24.123.110.242 - - [27/Jul/2017:21:10:47 +0000] "POST /socket.io/?__sails_io_sdk_version=0.11.0&__sails_io_sdk_platform=browser&__sails_io_sdk_language=javascript&EIO=3&transport=polling&t=Ls5upja&sid=4mrvSlNzMv9ZCetHAAAL HTTP/1.1" 502 182 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8"
24.123.110.242 - - [27/Jul/2017:21:10:47 +0000] "GET /socket.io/?__sails_io_sdk_version=0.11.0&__sails_io_sdk_platform=browser&__sails_io_sdk_language=javascript&EIO=3&transport=polling&t=Ls5ujej&sid=4mrvSlNzMv9ZCetHAAAL HTTP/1.1" 200 4 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8"
24.123.110.242 - - [27/Jul/2017:21:10:48 +0000] "GET /socket.io/?__sails_io_sdk_version=0.11.0&__sails_io_sdk_platform=browser&__sails_io_sdk_language=javascript&EIO=3&transport=polling&t=Ls5upzL HTTP/1.1" 200 101 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8"
24.123.110.242 - - [27/Jul/2017:21:10:49 +0000] "GET /socket.io/?__sails_io_sdk_version=0.11.0&__sails_io_sdk_platform=browser&__sails_io_sdk_language=javascript&EIO=3&transport=polling&t=Ls5up_l&sid=XSl8MUuC7TH6v4iLAAAM HTTP/1.1" 200 5 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8"
In Sails.js applications that have a version that are greater than 0.11.X and less than 1.0, the res.badRequest() response strips the data out of the response when the application is in production mode. To fix this, either upgrade the Sails.js application to 1.X and the documentation can be found here on how to do it, or you can comment out the following line in the api/responses/badRequest.js file:
// Only include errors in response if application environment
// is not set to 'production'. In production, we shouldn't
// send back any identifying information about errors.
if (sails.config.environment === 'production') {
//data = undefined;
}

Resources