API endpoint performance - node.js

I am attempting to benchmark the perf of my rest API endpoint. It's a node/mongdb endpoint with a pretty nasty aggregate query - a few unwraps, groups and a lookup. I am using Apache Bench (ab) to test it perf under 100 concurrency 1000 times. My issue: I am getting different results based on where I run the ab test:
localhost:3000/api_endpoint/ using ngrok to tunnel it = I am
getting around 50 requests per second.
Running the test on the
production app server directly where the app is hosed I am seeing
6000 requests per second.
Running it from my web server/reverse proxy I am seeing similarly 5000reqs per second -if I use a relative path to the API.
NOW: If I run the same test on the web server
but this time use the domain name instead or run it from local dev
machine but hitting the domain, now it drops to 9 requests per
second.
What can I deduce this? To me, It seems the query must not be that nasty if I am getting such high perf when I hit it directly. It seems I either have something configured wrong in my nginx.conf file for handing my domain requests or there is a DNS resolution problem? Its an exotic TLD (.ly) could that be it? Where should I look?
Edit: Adding my nginx.conf file for analysis:
user www-data;
worker_processes 1;
#error_log /log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
#include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
#optimizations per digitalOcean
client_body_buffer_size 10K;
client_header_buffer_size 1k;
client_max_body_size 8m;
large_client_header_buffers 2 1k;
client_body_timeout 12;
client_header_timeout 12;
keepalive_timeout 15;
send_timeout 10;
#compression
gzip on;
gzip_disable "msie6";
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_min_length 256;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain application/x-javascript text/xml text/css application/xml application/json text/xml application/xml+rss application/javascript text/javascript application/vnd.ms-fontobject application/x-font-ttf font/opentype image/svg+xml image/x-icon;
access_log off;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
#access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
#redirect to https
server {
server_name www.staq.ly staq.ly getstackrank.com www.getstackrank.com;
return 301 https://staq.ly;
}
server {
listen 80;
listen 443 ssl;
server_name staq.ly;
ssl_certificate /etc/nginx/ssl/staqly.crt;
ssl_certificate_key /etc/nginx/ssl/staqly.key;
root /var/local;
location ~ ^/(sitemap.xml) {
}
location ~ ^/(robots.txt) {
}
location ~ ^/(googlee828ea2f1ef594b3.html) {
}
location / {
try_files $uri #prerender;
}
location #prerender {
proxy_set_header X-Prerender-Token XXXXXXXX;
set $prerender 0;
if ($http_user_agent ~* "baiduspider|twitterbot|facebookexternalhit|rogerbot|linkedinbot|embedly|quora link preview|showyoubot|outbrain|pinterest|slackbot|vkShare|W3C_Validator") {
set $prerender 1;
}
if ($args ~ "_escaped_fragment_") {
set $prerender 1;
}
if ($http_user_agent ~ "Prerender") {
set $prerender 0;
}
if ($uri ~* "\.(js|css|xml|less|png|jpg|jpeg|gif|pdf|doc|txt|ico|rss|zip|mp3|rar|exe|wmv|doc|avi|ppt|mpg|mpeg|tif|wav|mov|psd|ai|xls|mp4|m4a|swf|dat|dmg|iso|flv|m4v|torrent|ttf|woff|svg|eot)") {
set $prerender 0;
}
#resolve using Google's DNS server to force DNS resolution and prevent caching of IPs
resolver 8.8.8.8;
if ($prerender = 1) {
#setting prerender as a variable forces DNS resolution since nginx caches IPs and doesnt play well with load balancing
set $prerender "service.prerender.io";
rewrite .* /$scheme://$host$request_uri? break;
proxy_pass http://$prerender;
}
if ($prerender = 0) {
# rewrite .* /index.ejs break; # Throw away the path because this is a single page web app with only an index.html
proxy_pass http://10.132.126.36:3000;
}
}
}
}

Related

NGINX Browser Caching Not Working - Node JS EC2

Went through several articles but cannot figure out why the browser caching isnt working. I am using prerender.io as well as SSL:
gzip on;
gzip_min_length 500;
gzip_proxied any;
gzip_comp_level 4;
gzip_types text/css text/javascript text/xml text/plain text/x-component application/javascript application/json application/xml application/rss+xml font/truetype font/opentype application/vnd.ms-fontobject image/svg+xml;
gzip_vary on;
gzip_disable "msie6";
server {
listen 8080 default_server;
listen [::]:8080 default_server;
server_name <servername> www.<servername>.com;
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl http2 default_server;
server_name <servername> www.<servername>.com;
ssl_certificate /etc/pki/tls/private/<servername>.com.chained.crt;
ssl_certificate_key /etc/pki/tls/private/private.key;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers <ssl_ciphers_code>
ssl_session_cache shared:SSL:5m;
ssl_session_timeout 1h;
add_header Strict-Transport-Security "max-age=15768000" always;
root /var/app/current;
location / {
proxy_set_header X-Prerender-Token iKJwgCElYIfxtt9u99Zg;
set $prerender 0;
if ($http_user_agent ~* "baiduspider|twitterbot|facebookexternalhit|rogerbot|linkedinbot|embedly|quora link preview|showyoubot|outbrain|pinterest|slackbot|vkShare|W3C_Validator") {
set $prerender 1;
}
if ($args ~ "_escaped_fragment_") {
set $prerender 1;
}
if ($http_user_agent ~ "Prerender") {
set $prerender 0;
}
if ($uri ~* "\.(js|css|xml|less|png|jpg|jpeg|gif|pdf|doc|txt|ico|rss|zip|mp3|rar|exe|wmv|doc|avi|ppt|mpg|mpeg|tif|wav|mov|psd|ai|xls|mp4|m4a|swf|dat|dmg|iso|flv|m4v|torrent|ttf|woff|svg|eot)") {
set $prerender 0;
}
#resolve using Google's DNS server to force DNS resolution and prevent caching of IPs
resolver 8.8.8.8;
if ($prerender = 1) {
#setting prerender as a variable forces DNS resolution since nginx caches IPs and doesnt play well with load balancing
set $prerender "service.prerender.io";
rewrite .* /$scheme://$host$request_uri? break;
proxy_pass http://$prerender;
}
# Proxy_pass configuration
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_max_temp_file_size 0;
proxy_pass http://0.0.0.0:3000;
proxy_redirect off;
proxy_read_timeout 240s;
}
location ~* \.(ico|css|js|gif|jpeg|jpg|png|woff|ttf|otf|svg|woff2|eot)$ {
root /var/app/current/app/dist/client/; #if i comment this out it, my css and js files are not found...
expires 30d;
access_log off;
log_not_found off;
add_header Pragma "public";
add_header Cache-Control "public";
}
# Increase http2 max sizes
http2_max_field_size 64k;
http2_max_header_size 64k;
client_max_body_size 4G;
keepalive_timeout 10;
}
My assets dir is as follows:
JS: /var/app/current/app/dist/client/js
CSS: /var/app/current/app/dist/client/assets/css
Images: /var/app/current/app/dist/client/assets/graphics
Fonts: /var/app/current/app/dist/client/assets/fonts
Videos: /var/app/current/app/dist/client/assets/videos
UPDATED CONFIG:
gzip on;
gzip_min_length 500;
gzip_proxied any;
gzip_comp_level 4;
gzip_types text/css text/javascript text/xml text/plain text/x-component application/javascript application/json application/xml application/rss+xml font/truetype font/opentype application/vnd.ms-fontobject image/svg+xml;
gzip_vary on;
gzip_disable "msie6";
server {
listen 8080 default_server;
listen [::]:8080 default_server;
server_name <servername.com> <www.servername.com>;
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl http2 default_server;
server_name <servername.com> <www.servername.com>;
ssl_certificate /etc/pki/tls/private/<servername>.com.chained.crt;
ssl_certificate_key /etc/pki/tls/private/private.key;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers <ciphers>;
ssl_session_cache shared:SSL:5m;
ssl_session_timeout 1h;
add_header Strict-Transport-Security "max-age=15768000" always;
root /var/app/current;
location / {
proxy_set_header X-Prerender-Token <token> ;
set $prerender 0;
if ($http_user_agent ~* "developers\.google\.com|googlebot|gigabot|yeti|yandex|ia_archiver|baiduspider|twitterbot|facebookexternalhit|rogerbot|linkedinbot|embedly|quora link preview|showyoubot|outbrain|pinterest|slackbot|vkShare|W3C_Validator") {
set $prerender 1;
}
if ($args ~ "_escaped_fragment_") {
set $prerender 1;
}
if ($http_user_agent ~ "Prerender") {
set $prerender 0;
}
if ($uri ~* "\.(js|css|xml|less|png|jpg|jpeg|gif|pdf|doc|txt|ico|rss|zip|mp3|rar|exe|wmv|doc|avi|ppt|mpg|mpeg|tif|wav|mov|psd|ai|xls|mp4|m4a|swf|dat|dmg|iso|flv|m4v|torrent|ttf|woff|svg|eot)") {
set $prerender 0;
}
#resolve using Google's DNS server to force DNS resolution and prevent caching of IPs
resolver 8.8.8.8;
if ($prerender = 1) {
#setting prerender as a variable forces DNS resolution since nginx caches IPs and doesnt play well with load balancing
set $prerender "service.prerender.io";
rewrite .* /$scheme://$host$request_uri? break;
proxy_pass http://$prerender;
}
# Proxy_pass configuration
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_max_temp_file_size 0;
proxy_pass http://0.0.0.0:3000;
proxy_redirect off;
proxy_read_timeout 240s;
}
location ~* \.(ico|css|js|gif|jpeg|jpg|png|woff|ttf|otf|svg|woff2|eot)$ {
root /var/app/current/app/dist/client/; #if i comment this out it, my css and js files are not found...
expires 30d;
access_log off;
log_not_found off;
add_header Pragma "public";
add_header Cache-Control "public";
}
location /assets/graphics/ {
proxy_ignore_headers Cache-Control;
proxy_cache_valid any 30d;
}
# Increase http2 max sizes
proxy_buffers 8 16k;
proxy_buffer_size 32k;
http2_max_field_size 64k;
http2_max_header_size 64k;
client_max_body_size 4G;
keepalive_timeout 10;
}
In your NGINX configuration, you set the cache expiration to 30 days for your images with the line:
expires 30d;
However looking at your images coming from your server, the max-age of the images is set to 0 which is causing your browser to re-pull the images on a refresh (below image was after a refresh):
I suspect that NGINX is acting as a proxy to an origin server that is part of your solution. This origin server is setting the max-age to 0 in the cache-control header and NGINX is respecting that setting.
Per the NGINX caching guide:
By default, NGINX respects the Cache-Control headers from origin servers. It does not cache responses with Cache-Control set to Private, No-Cache, or No-Store or with Set-Cookie in the response header. NGINX only caches GET and HEAD client requests.
To override the cache-control set at the origin server and set the max-age to 30d, use the NGINX proxy_ignore_headers and proxy_cache_valid directive like so:
...
location /assets/graphics/ {
proxy_ignore_headers Cache-Control;
proxy_cache_valid any 30d;
...
}
...
The code in my solution is taken directly from the NGINX caching guide and modified to your configuration.
Or determine how to change the cache control headers at the origin server.
UPDATE
After you updated your NGINX config, your images in the /assets/graphics/ directory are pulled from local browser memory and have expiration of 30 days (2595200) as seen below. Yesterday, they were all being pulled from your server and not being cached. This solution solves your problem. For the rest of the assets that you want cached, you need to further change your config to also cache these according to your requirements.

loading assets infinitely on a nginx for nodejs application whose resources are compiled with webpack

I try to put a nodejs application in production using a nginx server when I test locally everything works fine but when I put on line I have an infinite load of some particular assets
here is my nodejs configuration file
upstream beauteadom_me {
server localhost:3000;
}
server {
listen 80;
listen [::]:80;
server_name beauteadom.me www.beauteadom.me;
location ~ /\.well-known/acme-challenge {
allow all;
}
root /var/www/beauteadom.me;
location / {
return 301 https://www.beauteadom.me$request_uri;
}
}
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
http2_push_preload off;
server_name www.beauteadom.me;
root /var/www/beauteadom.me;
error_log /var/log/nginx/beauteadom.me.log notice;
access_log off;
location / {
http2_push_preload off;
add_header X-Content-Type-Options nosniff;
proxy_pass http://beauteadom_me;
}
location /websocket/ {
proxy_pass http://beauteadom_me;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
location ~* \.(html|css|js|png|jpg|jpeg|gif|ico|svg|eot|woff|ttf)$ {
expires max;
proxy_pass http://beauteadom_me;
}
location ~ /\. { deny all; }
gzip on;
gzip_vary on;
gzip_min_length 1000;
gzip_comp_level 2;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml;
gzip_disable "MSIE [1-6]\.";
access_log off;
error_log /var/log/nginx/error.log crit;
open_file_cache max=200000 inactive=20s;
open_file_cache_valid 30s;
open_file_cache_min_uses 2;
open_file_cache_errors on;
pagespeed on;
pagespeed FetchHttps enable,allow_self_signed;
pagespeed FileCachePath /var/ngx_pagespeed_cache;
pagespeed RewriteLevel OptimizeForBandwidth;
location ~ "\.pagespeed\.([a-z]\.)?[a-z]{2}\.[^.]{10}\.[^.]+" {
add_header "" "";
}
location ~ "^/pagespeed_static/" { }
location ~ "^/ngx_pagespeed_beacon$" { }
}
here's what i get in my chrome browser console
I think it's definitely one to one nginx security setting given that i do not have this problem locally
I already googling too much please I need your help. thank you in advance

I get 502 and 504 error from nginx in node.js

I changed the config file to increase the time and uploading size and doesn't found the php-fsm file. whenever I try to click after some time it stuck and gives 504 error and then 502 error every time.
This is my nginx.conf file.. which is located on etc/nginx/nginx.conf Please give me the solution for this issue as my project is on hold because of these things and i need to launch my web app as soon as possible.
user www-data;
worker_processes auto;
pid /run/nginx.pid;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 600;
types_hash_max_size 2048;
# set client body size to 500M #
proxy_connect_timeout 600;
proxy_send_timeout 600;
proxy_read_timeout 600;
send_timeout 600;
client_header_timeout 3000;
client_body_timeout 3000;
fastcgi_read_timeout 3000;
client_max_body_size 800m;
fastcgi_buffers 8 128k;
fastcgi_buffer_size 128k;
#client_max_body_size 800M;
#client_header_timeout 3000;
#client_body_timeout 3000;
#fastcgi_read_timeout 3000;
#client_max_body_size 800m;
#fastcgi_buffers 8 128k;
#fastcgi_buffer_size 128k;
#proxy_connect_timeout 600;
#proxy_send_timeout 600;
#proxy_read_timeout 600;
#send_timeout 600;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
#mail {
# # See sample authentication script at:
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
#
# # auth_http localhost/auth.php;
# # pop3_capabilities "TOP" "USER";
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
#
# server {
# listen localhost:110;
# protocol pop3;
# proxy on;
# }
#
# server {
# listen localhost:143;
# protocol imap;
# proxy on;
# }
#}

Turn off Nginx Gzip for a specific query string (used with nodejs)

I'm using nginx to gzip static files & json responses from a nodejs server.
For one specific request (with a query string like "?fn=foo"), I need to send a non-gzip json response.
I've tried to achieve this with nginx location module, based on a regex on the query string, but the query string is not in the URI used to match location by nginx
I've tried to put a if ($arg_fn = "foo") {gzip off;} in my main location route, but it fails with a 404 instead.
Any idea?
Is it possible to achieve this with nginx? or is there a way to tell from nodejs to nginx not to gzip this response?
My nginx conf file:
worker_processes 1;
error_log /var/log/nginx/error.log;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=one:8m max_size=3000m inactive=600m;
proxy_temp_path /var/cache/nginx/proxy_temp;
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
gzip on;
gzip_comp_level 6;
gzip_vary on;
gzip_min_length 1000;
gzip_proxied any;
gzip_types text/plain text/css application/x-javascript text/xml application/xml application/xml+rss text/javascript application/javascript application/json;
gzip_buffers 16 8k;
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
upstream upstream {
server 127.0.0.1:8887;
server 127.0.0.1:8888;
keepalive 64;
}
server {
listen 80;
listen 443 ssl;
server_name _;
ssl_certificate /etc/nginx/ssl/zellno-ssl-bundle.crt;
ssl_certificate_key /etc/nginx/ssl/zellno-key.pem;
location / {
if ($arg_fn = "comp") {
gzip off;
}
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-NginX-Proxy true;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass http://upstream/;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
}
error_page 404 /404.html;
location = /40x.html {
root /usr/share/nginx/html;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
}
I think your issue is that you said $args_fn with an "s", while it should be $arg_fn in singular form, try that and tell me how it goes.
EDIT:
Try this to make sure that you enter the if block
location / {
if ($arg_fn = "comp") {
return 444;
#gzip off;
}
}
If it returns the error code then the if is working but not the gzip, if it doesn't work then we need to fix the if first

Nginx set ssl for URLs

Good day. I got nginx server and it runs https connections.
For now all URLs run with https. All i need is - to exclude some URLs from https, so they could be accessed with simple http.
Here is my NGINX config file:
server {
listen 80;
server_name my-fin.ru www.my-fin.ru;
root /usr/server/finance/abacus/webapp;
location ~ ^/.+\.(eot|ttf|woff)$ {
expires max;
add_header Cache-Control public;
add_header Access-Control-Allow-Origin *;
}
location ~ ^/.+\.(ico|jpg|jpeg|gif|pdf|jar|png|js|css|txt|epf|svg)$ {
expires max;
add_header Cache-Control public;
}
location / {
return 301 https://my-fin.ru;
}
}
server {
listen *:443;
server_name my-fin.ru;
client_max_body_size 10m;
gzip on;
gzip_min_length 500;
gzip_buffers 4 8k;
gzip_types text/plain text/xml application/xml application/x-javascript text/javascript text/css text/json application/json;
access_log /var/log/nginx/finance.access.log;
error_log /var/log/nginx/finance.error.log;
ssl on;
ssl_certificate /usr/server/myfin.crt;
ssl_certificate_key /usr/server/myfin.key;
charset utf-8;
root /usr/server/finance/abacus/webapp;
location ~ ^/.+\.(eot|ttf|woff)$ {
expires max;
add_header Cache-Control public;
add_header Access-Control-Allow-Origin *;
}
location ~ ^/.+\.(ico|jpg|jpeg|gif|pdf|jar|png|js|css|txt|epf|svg)$ {
expires max;
add_header Cache-Control public;
}
location / {
# give site more time to respond
proxy_read_timeout 120;
proxy_pass http://127.0.0.1:8087;
proxy_redirect http:// $scheme://;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr ;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for ;
}
}
Please help to configure nginx.
According to this comment, Here's how i would do it.
## non https server
server {
#non ssl server
listen 80;
server_name example.com;
root /path/to/root;
location /features {
#handle /features
}
location /info {
# handle /info
}
location /help {
#handle /help
}
location /
return 301 https://example.com$request_uri;
}
}
## https server
server {
# handle ssl
listen 443 ssl;
server_name example.com subdomain1.example.com;
root /path/to/root;
location ~ /(features|help|info) {
# redirect those 3 subfolders to http
return 301 http://example.com$request_uri;
}
location / {
#handle ssl requests;
}
}
## https subdomain
server {
listen 443 ssl;
server_name subdomain2.example.com;
root /path/to/root;
location ~ /(features|help|info) {
# redirect those 3 subfolders to http
return 301 http://example.com$request_uri;
}
location / {
# subdomain handling
}
}
Please note that https wont work on subdomains unless you have a wildcard SSL certificate, otherwise the browser will issue a warning.

Resources