Silence NGINX in Docker Compose - node.js

How can I silence NGINX in the terminal window while using Docker-Compose? Every single call is being echoed to the console. It is so noisy that I cannot see the other console.log() messages from Node.
Here's my nginx.conf file...
worker_processes 4;
events { worker_connections 4096; }
http {
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 30m;
proxy_temp_path /var/tmp;
include mime.types;
default_type application/octet-stream;
sendfile on;
upstream mycompany-api01 {
server mycompany-api01:3000;
}
server {
listen 80;
server_name api01.mycompany.com;
root /var/www/public;
location / {
proxy_pass http://mycompany-api01;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-NginX-Proxy true;
proxy_cache_bypass $http_upgrade;
}
}
}

You can use access_log off; to only disable the access log and leave the error log on.
Or if you only don't want to see the logs you can use docker-compose logs -f node to only see the logs of node.

You can use logging
logging:
driver: none

Related

No Such File Or Directory Error when uploading a file to website file upload folder on VPS

Uploading files with using of Multer and Express in Node.js. to file upload folder on my local server work fine but not working on Cloud VPS. I am getting the following error message when I try to make a POST request using Multer: ENOENT: no such file or directory, mkdir 'dist/assets/img' 500 Internal Server Error, but this directory exists and path is correct.
My website is using Nginx as a web server, may be my nginx configuration file is incorrect?
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
send_timeout 1800;
sendfile on;
keepalive_timeout 6500;
client_max_body_size 20M;
server {
listen 80;
server_name bcd.com;
location / {
proxy_pass http://localhost:8089;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Client-Verify SUCCESS;
proxy_set_header X-Client-DN $ssl_client_s_dn;
proxy_set_header X-SSL-Subject $ssl_client_s_dn;
proxy_set_header X-SSL-Issuer $ssl_client_i_dn;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_read_timeout 1800;
proxy_connect_timeout 1800;
}
}
server {
listen 80;
server_name app.bcd.com;
location / {
proxy_pass http://localhost:8020;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Client-Verify SUCCESS;
proxy_set_header X-Client-DN $ssl_client_s_dn;
proxy_set_header X-SSL-Subject $ssl_client_s_dn;
proxy_set_header X-SSL-Issuer $ssl_client_i_dn;
proxy_read_timeout 1800;
proxy_connect_timeout 1800;
}
}
}
Since everything is working correctly locally, I assume the problem is on VPS. I did try to configure nginx, but i not have enough experience with that.

Nginx the event was not signaled for 5s

I am hosting my web application on NGINX server.Till now it worked fine, but I don't know why I am getting the errors present in the image below.
I don't know why these errors occur, but as a trial and error method I thought my ssl certificated got expired so I updated it. Same errors got repeated.And also checked my conf.d file, not sure that everything is good.
Here is my conf file
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
send_timeout 100s;
keepalive_timeout 95;
#ssl_session_cache shared:SSL:10m;
#ssl_session_timeout 10m;
client_body_in_file_only clean;
client_body_buffer_size 32K;
client_max_body_size 300M;
server {
listen 80;
listen 443 ssl;
server_name sample.com;
ssl_certificate ..\ssl\mbxxxx.crt;
ssl_certificate_key ..\ssl\mbkey.pem;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
location / {
proxy_http_version 1.1;
client_max_body_size 300M;
proxy_read_timeout 300s;
proxy_connect_timeout 95s;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Real-IP $http_referer;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header content-type "application/json";
proxy_set_header X-NginX-Proxy true;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header REMOTE_ADDR $remote_addr;
proxy_set_header Access-Control-Allow-Origin *;
proxy_set_header Connection 'upgrade';
proxy_pass http://127.0.0.1:xxxx;
}
location /api {
proxy_http_version 1.1;
client_max_body_size 300M;
proxy_read_timeout 300s;
proxy_connect_timeout 95s;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Real-IP $http_referer;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header content-type "application/json";
proxy_set_header X-NginX-Proxy true;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header REMOTE_ADDR $remote_addr;
proxy_set_header Access-Control-Allow-Origin *;
proxy_set_header Connection 'upgrade';
proxy_pass http://127.0.0.1:xxxx;
}
error_page 405 =200 $uri;
# error_page 500 502 503 504 /50x.html;
#location = /50x.html {
#root html;
#}
}
}
enter code here
And there are no CORS restrictions.Any suggestions and reference docs would be great help.
And I don't know that this question servers my request or not.
Thanks in Advance.
So when I am doing some research on how to solve this issue, I found an answer that I have to remove passphrase in SSL certificate.I didn't get it. So what I have done is, I updated SSL certificate then I run my application. But not succeeded. So I thought nginx should be restarted after updating SSL certificate. Shockingly after restarting nginx, it worked fine.
You can specify passphrase in text file, and connect it via ssl_password_file directive. Something like this:
listen 3001 ssl;
ssl_certificate cert.pem;
ssl_certificate_key key.pem;
ssl_password_file pass.txt

NodeJS + Nginx: busboy behind an nginx reverse proxy

If I spin up a nodejs server and use busboy then I am able to upload large files (10+ gbs) but when i use the same nodejs code and use nginx as a reverse proxy then nginx throws "413 request entity too large"
Has anyone encountered such issue? How do we solve this? I know i can set a "client_max_body_size" variable but this would mean there will still be a hard limit of the file.
My nginx config looks like the following:
server {
listen 80;
server_name *.example.local;
location /api {
proxy_pass http://host.docker.internal:5000;
proxy_pass_header Accept;
proxy_pass_header Server;
keepalive_requests 1000;
proxy_redirect off;
proxy_buffering off;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Ssl on;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $host;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_connect_timeout 600;
proxy_send_timeout 600;
proxy_read_timeout 600;
send_timeout 600;
client_max_body_size 100M;
}
}
client_max_body_size size;
Sets the maximum allowed size of the client request body. If the size
in a request exceeds the configured value, the 413 (Request Entity Too
Large) error is returned to the client. Please be aware that browsers
cannot correctly display this error. Setting size to 0 disables
checking of client request body size.
Source
In your nginx configuration you have client_max_body_size=100M. You need to adjust it according to your needs like 15G etc. This property of configuration defines the max size of body payload a client can send to Nginx. If payload size is greater than this, a 413 http status is sent to client. Setting it to 0 (as suggested by Taxel) will disable the payload size check but this will expose the server to outside attack where a malicious user keeps your server busy by sending random big files which can deteriorate server performance.
Answering my own question:
In order to disable request/proxy buffering in my nginx reverse proxy, I had to add the following directives in the nginx config file
proxy_buffering off;
proxy_request_buffering off;
So, now my nginx config file looks like the following:
server {
listen 80;
server_name *.example.local;
location /api {
proxy_pass http://host.docker.internal:5000;
proxy_pass_header Accept;
proxy_pass_header Server;
keepalive_requests 1000;
proxy_redirect off;
proxy_buffering off;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Ssl on;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $host;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_connect_timeout 600;
proxy_send_timeout 600;
proxy_read_timeout 600;
send_timeout 600;
client_max_body_size 0;
proxy_buffering off; # <-----
proxy_request_buffering off; # <-----
}
}

Proxy from nginx to node - not enough worker connections

I want to setup nginx server listening on one port, proxying the connection to a different port to a nodejs application. The problem is that I get 500 error - worker_connections are not enough while connecting to upstream.
Nginx config:
upstream node {
server 127.0.0.1:1235;
keepalive 8;
}
server {
listen 1234;
server_name http://123.123.123.123:1234 node;
access_log off;
location / {
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://123.123.123.123:1234/;
proxy_redirect off;
}
}
What's wrong?
You should correct your proxy_pass since you are proxying requests back to nginx itself.
According to your config it must be
proxy_pass http://node/;
You may need to add:
proxy_responses 0;
to you nginx config.

Nginx not proxying websockets (using socket.io and nodejs)

I have a nodejs application running on port 8000 and the following nginx configs to proxy my requests (8800 for http and 8443 for https):
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
gzip on;
upstream node {
server 127.0.0.1:8000;
}
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 8800;
access_log /usr/local/etc/nginx/access.log;
error_log /usr/local/etc/nginx/error.log;
rewrite_log on;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://node;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
}
server {
listen 8443;
ssl on;
ssl_certificate /usr/local/etc/nginx/server.crt;
ssl_certificate_key /usr/local/etc/nginx/server.key;
ssl_session_timeout 5m;
ssl_protocols SSLv2 SSLv3 TLSv1;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
access_log /usr/local/etc/nginx/access.log;
error_log /usr/local/etc/nginx/error.log;
rewrite_log on;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://node;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
}
}
When I try to run my application at http://localhost:8800 and check Chrome Dev Tools, the websocket request (ws://localhost) returns a successful '101 Switching protocols'. But the same request (wss://localhost) doesn't work when I run it on the https://localhost (with self certified SSL certs)
Also, when I try to debug the request on nginx with
curl -i -N -H "Connection: Upgrade" -H "Upgrade: websocket" http://localhost:8800/
or
curl -i -N -k -H "Connection: Upgrade" -H "Upgrade: websocket" https://localhost:8443/
the response is 502 Bad Gateway.
I am trying to figure out why nginx isn't proxying the request properly.

Resources