modsecurity does not work if no required SSL certificate was sent - security

I have a lot of rules in modsecurity but none works if the host is numeric in SSL https://SERVER_IP, i get this response:
400 Bad Request No required SSL certificate was sent
My SSL is only valid to my domain name, but should not modsecurity work anyways? Because any request pass thru modsecurity before go to the application or something like that.
Question:
1 - How can i fix it?
2 - Why modsecurity does not work, and am i vunerable if i don't fix it?
This is my nginx.conf:
load_module modules/ngx_http_modsecurity_module.so;
user nobody;
worker_processes 1;
error_log /var/log/nginx/error.log error;
pid /var/run/nginx.pid;
events {
worker_connections 5000;
use epoll;
multi_accept on;
}
http {
sendfile on;
tcp_nopush on;
tcp_nodelay on;
modsecurity on;
modsecurity_rules_file /etc/nginx/modsec/main.conf;
client_header_timeout 20s;
client_body_timeout 20s;
client_max_body_size 20m;
client_header_buffer_size 6k;
client_body_buffer_size 128k;
large_client_header_buffers 2 2k;
send_timeout 10s;
keepalive_timeout 30 30;
reset_timedout_connection on;
server_names_hash_max_size 1024;
server_names_hash_bucket_size 1024;
ignore_invalid_headers on;
connection_pool_size 256;
request_pool_size 4k;
output_buffers 4 32k;
postpone_output 1460;
include mime.types;
default_type application/octet-stream;
# SSL Settings
ssl_certificate /etc/nginx/ssl/cf_cert.pem;
ssl_certificate_key /etc/nginx/ssl/cf_key.pem;
ssl_client_certificate /etc/nginx/ssl/origin-pull-ca.pem;
ssl_verify_client on;
ssl_verify_depth 5;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 1h;
ssl_protocols TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers "EECDH+ECDSA+AESGCM:EECDH+aRSA+AESGCM:EECDH+ECDSA+SHA384:EECDH+ECDSA+SHA256:EECDH+aRSA+SHA384:EECDH+aRSA+SHA256:EECDH+aRSA!RC4:EECDH:!RC4:!aNULL:!eNULL:!LOW:!3DES:!MD5:!EXP:!PSK:!SRP:!DSS";
ssl_session_tickets on;
ssl_session_ticket_key /etc/nginx/ssl/ticket.key;
ssl_dhparam /etc/nginx/ssl/dhparam.pem;
ssl_ecdh_curve secp384r1;
ssl_buffer_size 4k;
# Logs
log_format main '$remote_addr - $remote_user [$time_local] $request '
'"$status" $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
log_format bytes '$body_bytes_sent';
access_log off;
# Cache bypass
map $http_cookie $no_cache {
default 0;
~SESS 1;
~wordpress_logged_in 1;
}
etag off;
server_tokens off;
# Headers
add_header X-XSS-Protection "1; mode=block" always;
add_header X-Frame-Options deny always;
server {
listen 443 ssl http2;
server_name domain.com;
root /home/user/public_html;
index index.php index.html;
access_log /var/log/domain/domain.com.bytes bytes;
access_log /var/log/domain/domain.com.log combined;
error_log /var/log/domain/domain.com.error.log warn;
location / {
location ~.*\.(jpeg|jpg|png|gif|bmp|ico|svg|css|js)$ {
expires max;
}
location ~ [^/]\.php(/|$) {
try_files $uri =404;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_pass unix:/opt/alt/php-fpm73/usr/var/sockets/user.sock;
fastcgi_index index.php;
include /etc/nginx/fastcgi_params;
}
}
}
}

In short: This is unrelated to modsecurity.
Your server configuration requires the client to send client certificate. The TLS handshake will fail, if the client does not send such certificate - and this is the error you see.
modsecurity only analyzes the application data at the HTTP level. With HTTPS the TLS handshake first needs to be successfully done before the any application data gets exchanged. Since in this case the TLS handshake fails due to no certificate send by the client, the connection gets closed before any HTTP data gets exchanged and thus before modsecurity is used.

Related

Apply client_max_body_size to specific route in named location

I have the following: nginx.conf file:
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name mywebsite.com www.mywebsite.com;
server_tokens off;
ssl_certificate /etc/certificates/mywebsite.com/fullchain.pem;
ssl_certificate_key /etc/certificates/mywebsite.com/privkey.pem;
ssl_buffer_size 8k;
ssl_dhparam /etc/ssl/certs/dhparam-2048.pem;
ssl_protocols TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers ECDH+AESGCM:ECDH+AES256:ECDH+AES128:DH+3DES:!ADH:!AECDH:!MD5;
ssl_ecdh_curve secp384r1;
ssl_session_tickets off;
ssl_stapling on;
ssl_stapling_verify on;
resolver 8.8.8.8;
location / {
client_max_body_size 10M;
try_files $uri #nodejs;
}
location #nodejs {
proxy_pass http://localhost:8080;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header X-Content-Type-Options "nosniff" always;
add_header Referrer-Policy "no-referrer-when-downgrade" always;
add_header Content-Security-Policy "default-src * data: 'unsafe-eval' 'unsafe-inline'" always;
# enable strict transport security only if you understand the implications
# add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
}
root /var/www/html;
# index index.html index.htm index.nginx-debian.html;
}
which allows me to upload an image through the following Node.js endpoint (using: HTTPS/443):
https://www.mywebsite.com/api/photo/upload
It works just well for images up to: 10MB.
That's because this config: client_max_body_size 10M;.
What I need: Apply the 10MB limit just to that route: /api/photo/upload but not to others like: /api/echo. Notice that these routes are in named location: #nodejs.

NGINX does not return 404 on wrong pages

I am trying to find out why my website is not returning me a 404 on non-existent urls.
For example, I don't have a mydomain.com/contacts url, but the link still returns it.
My goal is to let only the main url ("/") to be accessible.
Every other url should return 404 (or better 301) and every parameter request (such as /?_escaped_fragment_=) to return 301.
Here is a piece of my nginx.conf
user www-data;
worker_processes auto;
pid /run/nginx.pid;
events {
worker_connections 768;
# multi_accept on;
}
http {
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048
include /etc/nginx/mime.types;
default_type application/octet-stream;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
gzip on;
server {
listen 443 ssl http2;
ssl_certificate /etc/ssl/example.crt;
ssl_certificate_key /etc/ssl/example.key;
server_name www.example.com;
return 301 $scheme://example.com/;
}
server {
listen 443 ssl http2;
ssl_certificate /etc/ssl/example.crt;
ssl_certificate_key /etc/ssl/example.key;
server_name crowdrobot.ru;
location / {
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host $http_host;
proxy_pass http://11.11.111.111:5000;
}
}
server {
listen 80 default_server;
server_name _;
return 301 https://example.com/;
}
}

socket.io slow only when a message emit from server to the client,while it is very fast from server to the client sent the message

The problem:
when the client sent a message to the server there is a delay to emit the message to the other client while the same message sent super fast to the client that sent the message
i.e
Client A --> Server --> Client A Super fast
Client A --> Server --> Client B a bit slower
i am using socket.io NodeJs/Express , nginx web server(tested in Apache same issue)
i noticed that in safari and also in mobile chrome
edit below is the nginx config
worker_processes 1;
error_log logs/error.log;
error_log logs/error.log notice;
error_log logs/error.log info;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
error_log /usr/local/apps/nginx/var/log/error_log debug;
http {
include mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
sendfile on;
tcp_nodelay on;
keepalive_timeout 65;
client_max_body_size 200M;
server_names_hash_bucket_size 64;
server_tokens off;
include /usr/local/apps/nginx/etc/conf.d/*.conf;
}
server {
listen *:443 ssl ;
server_name wsapidev.sample.com www.wsapidev.sample.com;
error_log /usr/local/apps/nginx/var/log/wsapidev.sample.com.err;
access_log /usr/local/apps/nginx/var/log/wsapidev.sample.com.log main;
ssl on;
ssl_certificate /etc/ssl/cert/wsapidev.sample.com.crt;
ssl_certificate_key /etc/ssl/private/wsapidev.sample.com.key;
ssl_dhparam /etc/ssl/private/dhparam.pem;
location /socket {
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_pass http://127.0.0.1:44451/socket;
}
}

nginx default_site doesn't appear to be working

I've got nginx running in docker as a reverse proxy and have been for some time - and it works wonderfully, short of one little issue I've recently seen crop up.
What I'd like: when a user gets to my nginx server and there isn't a .conf file specified for the URL, either 404/444 or some other HTTP response that drops the connection.
What I'm seeing: when a user navigates to sudomain.url.com and that subdomain isn't specified in any of my *.conf files, nginx uses the first conf file it finds - ignoring the default.conf. Find my details below.
Any other tips/tricks you can provide would be awesome as well!
nginx.conf:
user nginx;
worker_processes 1;
error_log /etc/nginx/log/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /etc/nginx/log/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 70;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
default.conf:
server {
server_name _;
listen 80 default_server;
return 444;
}
server {
server_name _;
listen 443 default_server;
return 444;
}
Example of a conf file (there are maybe a dozen of these):
server {
listen sub.domain.com:80;
server_name sub.domain.com;
return 302 https://sub.domain.com$request_uri;
}
server {
listen sub.domain.com:443;
server_name sub.domain.com;
ssl_certificate /etc/nginx/keys/ssl.pem;
ssl_certificate_key /etc/nginx/keys/ssl.key;
ssl on;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC4-SHA';
ssl_prefer_server_ciphers on;
ssl_dhparam /etc/nginx/keys/dhparams.pem;
add_header X-Frame-Options SAMEORIGIN;
add_header X-XSS-Protection "1; mode=block";
add_header Strict-Transport-Security "max-age=31536000; includeSubdomains";
location / {
proxy_pass http://10.0.1.4:81;
proxy_buffering off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}
I haven't actually tested this but my gut feeling is that your listen directives shouldn't contain a host name. They should contain the IP address of the interface you want to be listening on and the port you want to be listening to. Then for each different port/IP combination you can specify one of them as the default.
Only after resolving which IP address did the request go to and which port was it on, nginx begins to actually process the request. The first step here is to check the Host header, if it finds a matching server block for the value of the host header then that's where it should route. If it doesn't find one then it should route to the default.
If there is no host header being received then, I think, in more recent versions of nginx it will drop the request, however it would previously just handle this by sending to the default server for the IP/port combo.
Below is an nginx.conf which gives me working endpoints for named servers and returns 404 for everything else. Due to HSTS headers you need to hit test.se{1,2,3,4}.home-v.ind.in to see it work or you will just get back a browser error.
user nginx;
worker_processes auto;
error_log stderr notice;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
sendfile on;
tcp_nopush on;
keepalive_timeout 300s;
ssl_certificate /etc/pki/nginx/fullchain.pem;
ssl_certificate_key /etc/pki/nginx/privkey.pem;
ssl_dhparam /etc/pki/nginx/dhparams.pem;
ssl_protocols TLSv1.2;
ssl_ciphers EECDH+CHACHA20:EECDH+AES128:RSA+AES128:EECDH+AES256:RSA+AES256:EECDH+3DES:RSA+3DES:!MD5;
ssl_prefer_server_ciphers on;
ssl_buffer_size 1400;
ssl_session_timeout 1d;
ssl_session_cache shared:SSL:50m;
ssl_stapling on;
ssl_stapling_verify on;
ssl_trusted_certificate /etc/pki/nginx/fullchain.pem;
add_header "Cache-Control" "no-transform";
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
resolver 8.8.8.8 8.8.4.4 216.146.35.35 216.146.36.36 valid=60s;
resolver_timeout 2s;
server {
listen 80 default_server;
server_name _;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl http2;
server_name test.se1.home-v.ind.in;
root /usr/share/nginx/html;
location /.well-known { satisfy any; allow all; try_files $uri $uri/ =404; }
location /robots.txt { satisfy any; allow all; add_header Content-Type text/plain; return 200 "User-agent: *\nDisallow: /\n"; }
location / { satisfy any; allow all; add_header Content-Type text/plain; return 200 "Test Site 1"; }
}
server {
listen 443 ssl http2;
server_name test.se2.home-v.ind.in;
root /usr/share/nginx/html;
location /.well-known { satisfy any; allow all; try_files $uri $uri/ =404; }
location /robots.txt { satisfy any; allow all; add_header Content-Type text/plain; return 200 "User-agent: *\nDisallow: /\n"; }
location / { satisfy any; allow all; add_header Content-Type text/plain; return 200 "Test Site 2"; }
}
server {
listen 443 ssl http2 default_server;
server_name _;
root /usr/share/nginx/html;
location /.well-known { satisfy any; allow all; try_files $uri $uri/ =404; }
location / { return 404; }
}
}

how to check nginx is serving static file or not

This is my nginx.conf file. i want to serve all static contents that are located in different directories.
#user nobody;
worker_processes 1;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
#gzip on;
server {
listen 80;
server_name localhost;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
root html;
index index.html index.htm;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
# another virtual host using mix of IP-, name-, and port-based configuration
#
server {
listen 8080;
server_name localhost;
location / {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
access_log C:/Program Files/nginx-1.11.7/logs/access.log;
}
location ~* \.(css|js|gif|jpe?g|png|html)$ {
root D:/gamma-master;
autoindex on;
access_log C:/Program Files/nginx-1.11.7/logs/access.log;
}
}
# HTTPS server
#
#server {
# listen 443 ssl;
# server_name localhost;
# ssl_certificate cert.pem;
# ssl_certificate_key cert.key;
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 5m;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
# location / {
# root html;
# index index.html index.htm;
# }
#}
}
How to check nginx is serving static file or not. Also how should give static file path if my static content is located in different folders under root folder

Resources