Cashing packages for npm registry with NGINX - node.js

My project is javascript app.
I have lots of dependencies.
I/O for npm registry takes major portion of CI execution.
So my idea is to setup NGINX in front of npm registry and cache tgz files downloads.
Im running Ubuntu 14.04.
NGINX version is 1.4.6
This is my nginx configuration script
user www-data;
worker_processes 4;
pid /run/nginx.pid;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# Logging Settings
##
# access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
include /etc/nginx/conf.d/*.conf;
# include /etc/nginx/sites-enabled/*;
# HTTP 1.1 support
proxy_http_version 1.1;
proxy_buffering off;
proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $proxy_connection;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto;
proxy_set_header X-Forwarded-Ssl $proxy_x_forwarded_ssl;
proxy_set_header X-Forwarded-Port $proxy_x_forwarded_port;
# If we receive X-Forwarded-Proto, pass it through; otherwise, pass along the
# scheme used to connect to this server
map $http_x_forwarded_proto $proxy_x_forwarded_proto {
default $http_x_forwarded_proto;
'' $scheme;
}
# If we receive X-Forwarded-Port, pass it through; otherwise, pass along the
# server port the client connected to
map $http_x_forwarded_port $proxy_x_forwarded_port {
default $http_x_forwarded_port;
'' $server_port;
}
# If we receive Upgrade, set Connection to "upgrade"; otherwise, delete any
# Connection header that may have been passed to this server
map $http_upgrade $proxy_connection {
default upgrade;
'' close;
}
# Set appropriate X-Forwarded-Ssl header
map $scheme $proxy_x_forwarded_ssl {
default off;
https on;
}
server {
listen 80 default_server;
location / {
access_log /var/log/nginx/root.log;
root /var/tmp/nginx/npm;
try_files $request_uri #fetch;
}
location #fetch {
internal;
proxy_pass http://nmregistry:4873$request_uri;
proxy_store /var/tmp/nginx/npm$request_uri;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_store_access user:rw group:rw all:r;
}
}
}
It works, i can install packages but they are not cached on NGINX machine.
Cant see any tgz files in /var/tmp/nginx/npm
What am i doing wrong here?

This is final version of configuration file.
The main problem was proxy_buffering on;
user www-data;
worker_processes 4;
pid /run/nginx.pid;
events {
worker_connections 768;
}
http {
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# Logging Settings
##
error_log /var/log/nginx/error.logi debug;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
include /etc/nginx/conf.d/*.conf;
# include /etc/nginx/sites-enabled/*;
# HTTP 1.1 support
proxy_http_version 1.1;
proxy_buffering on;
proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $proxy_connection;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto;
proxy_set_header X-Forwarded-Ssl $proxy_x_forwarded_ssl;
proxy_set_header X-Forwarded-Port $proxy_x_forwarded_port;
# If we receive X-Forwarded-Proto, pass it through; otherwise, pass along the
# scheme used to connect to this server
map $http_x_forwarded_proto $proxy_x_forwarded_proto {
default $http_x_forwarded_proto;
'' $scheme;
}
# If we receive X-Forwarded-Port, pass it through; otherwise, pass along the
# server port the client connected to
map $http_x_forwarded_port $proxy_x_forwarded_port {
default $http_x_forwarded_port;
'' $server_port;
}
# If we receive Upgrade, set Connection to "upgrade"; otherwise, delete any
# Connection header that may have been passed to this server
map $http_upgrade $proxy_connection {
default upgrade;
'' close;
}
# Set appropriate X-Forwarded-Ssl header
map $scheme $proxy_x_forwarded_ssl {
default off;
https on;
}
server {
listen 80 default_server;
location / {
access_log /var/log/nginx/cache_root.log;
proxy_pass http://nmregistry:4873;
}
location ~* .+/-/.+$ {
root /var/tmp/nginx/npm;
expires max;
try_files $uri #fetch;
}
location #fetch {
internal;
proxy_pass http://nmregistry:4873$request_uri;
proxy_store on;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_store_access user:rw group:rw all:rw;
proxy_temp_path /var/tmp/nginx/npm 1 2;
root /var/tmp/nginx/npm;
}
}
}

Related

How do i enable auth_basic on nginx for a domain and multiple subdomains with only one password?

I successfully added the auth_basic directive to my server block with the following config:
proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=proxy_cache_path_global:10m loader_threshold=300 loader_files=200 max_size=2g inactive=60m use_temp_path=off;
upstream backend {
server localhost:8080;
keepalive 128;
}
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name example.com www.example.com app.example.com;
auth_basic "Administrator’s Area";
auth_basic_user_file /etc/nginx/.htpasswd;
location / {
proxy_pass http://backend;
proxy_cache_methods GET HEAD;
proxy_cache proxy_cache_path_global;
proxy_cache_key $host$request_uri$cookie_user$slice_range;
proxy_cache_bypass $cookie_nocache $arg_nocache$arg_comment;
proxy_cache_min_uses 3;
proxy_cache_valid any 1m;
proxy_cache_revalidate on;
proxy_cache_background_update on;
proxy_cache_lock on;
proxy_cache_lock_age 10s;
proxy_cache_lock_timeout 3s;
proxy_redirect off;
proxy_http_version 1.1;
proxy_read_timeout 300;
proxy_connect_timeout 300;
slice 1m;
add_header X-Cache-Status $upstream_cache_status;
proxy_set_header Host $host;
proxy_set_header Connection "";
proxy_set_header Range $slice_range;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
}
}
When i visit my website, 2 login prompts appear after each other; one for example.com, and another one for app.example.com.
How do i configure this, so that i only have to type in my credentials once?
I already tried looking for a solution by myself unsuccessfully. Any hints would be appreciated.

Unable to access Elastic Beanstalk (single instance) from custom domain HTTPS

Greetings SO community,
I am attempting to configure my single-instance Elastic Beanstalk application to use a custom domain and HTTPS. Both the custom domain and SSL certificate were obtained from a third-party and uses their DNS servers (rather than Route 53).
I have added the .ebextensions/https-instance-securitygroup.config per AWS documentation (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/https-singleinstance.html) as well as the files for Node application (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/https-singleinstance-nodejs.html). The only difference in the last step is that I did not create a .ebextensions/https-instance.config file as I am pushing my code to GitHub and using CodePipeline to build my code. So, the https.conf and certificates were manually created and uploaded to the EC2 instance.
Also, I have checked my instance's inbound rules to ensure that 80 & 443 are open on the EB instance and for the associated security group.
proxy.conf
upstream nodejs {
server 127.0.0.1:5000;
keepalive 256;
}
server {
listen 8080;
if ($time_iso8601 ~ "^(\d{4})-(\d{2})-(\d{2})T(\d{2})") {
set $year $1;
set $month $2;
set $day $3;
set $hour $4;
}
access_log /var/log/nginx/healthd/application.log.$year-$month-$day-$hour healthd;
access_log /var/log/nginx/access.log main;
location / {
proxy_pass http://nodejs;
proxy_set_header Connection "";
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
gzip on;
gzip_comp_level 4;
gzip_types text/html text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
location /static {
alias /var/app/current/client/build/static;
}
}
https.conf
# HTTPS server
server {
listen 443 ssl;
server_name localhost;
ssl_certificate /etc/pki/tls/certs/server.crt;
ssl_certificate_key /etc/pki/tls/certs/server.key;
ssl_session_timeout 5m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
# For enhanced health reporting support, uncomment this block:
#if ($time_iso8601 ~ "^(\d{4})-(\d{2})-(\d{2})T(\d{2})") {
# set $year $1;
# set $month $2;
# set $day $3;
# set $hour $4;
#}
#access_log /var/log/nginx/healthd/application.log.$year-$month-$day-$hour healthd;
#access_log /var/log/nginx/access.log main;
location / {
proxy_pass http://nodejs;
proxy_set_header Connection "";
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
}
}
So after rereading the AWS documentation for what felt like the hundredth time, I was finally able to resolve my issues. And because I wasn't strictly using the preferred method of using an .ebextensions folder, I had to fiddle with the Nginx proxy on the Elastic Beanstalk-created EC2 instance directly.
In short, I was missing the following section from my /etc/nginx/conf.d/proxy.conffile:
location / {
### START MISSING ###
set $redirect 0;
if ($http_x_forwarded_proto != "https") {
set $redirect 1;
}
if ($http_user_agent ~* "ELB-HealthChecker") {
set $redirect 0;
}
if ($redirect = 1) {
return 301 https://$host$request_uri;
}
### END OF MISSING ###
proxy_pass http://nodejs;
proxy_set_header Connection "";
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
This documented here: https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/configuring-https-httpredirect.html
Specifically, This is a snippet from the NodeJS, default Nginx proxy config file: https://github.com/awsdocs/elastic-beanstalk-samples/blob/master/configuration-files/aws-provided/security-configuration/https-redirect/nodejs/https-redirect-nodejs.config

Issue with uploading 1 GB file via nginx

We are using nginx as used as load balancer behind sails server is running. We have to upload 1 Gb file to the server. We are using angular js as front end. When we upload 1 GB file into the server it shows upload progress as 99% then after that it shows 400 error code.
While looking in to the error logs it show client prematurely closed connection
We tried client_body_in_file_only and upload module. But fails
How can we solve this issue?
Our Nginx conf is
load_module modules/ngx_http_upload_module.so;
user www-data;
worker_processes auto;
pid /run/nginx.pid;
events {
worker_connections 768;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 360;
types_hash_max_size 2048;
proxy_ignore_client_abort on;
proxy_buffering off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log info;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
upstream api{
ip_hash;
server 127.0.0.1:1337;
}
server {
client_max_body_size 2048M;
client_body_buffer_size 2048M;
client_body_timeout 50m;
client_header_timeout 50m;
send_timeout 50m;
listen 80;
# location /test/upload {
# add_header 'Access-Control-Allow-Origin' *;
# #add_header 'Access-Control-Allow-Headers' 'Content-Type';
# add_header 'Access-Control-Allow-Credentials' 'true' always;
# add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS' always;
# add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Requested-With' always;
# # required to be able to read Authorization header in frontend
# add_header 'Access-Control-Expose-Headers' 'Authorization' always;
# # Pass altered request body to this location
# upload_pass #upload;
#
# # Store files to this directory
# # The directory is hashed, subdirectories 0 1 2 3 4 5 6 7 8 9 should exist
# upload_store /tmp;
#
# # Allow uploaded files to be read only by user
# upload_store_access user:r;
#
# # Set specified fields in request body
# upload_set_form_field $upload_field_name.name "$upload_file_name";
# upload_set_form_field $upload_field_name.content_type "$upload_content_type";
# upload_set_form_field $upload_field_name.path "$upload_tmp_path";
#
# # Inform backend about hash and size of a file
# upload_aggregate_form_field "$upload_field_name.md5" "$upload_file_md5";
# upload_aggregate_form_field "$upload_field_name.size" "$upload_file_size";
#
# upload_pass_form_field "^submit$|^description$|^account$";
#
# upload_cleanup 400 404 499 500-505;
# }
location #upload {
proxy_send_timeout 50m;
proxy_read_timeout 50m;
proxy_pass http://api;
proxy_http_version 1.1;
proxy_redirect off;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Real-IP $remote_addr;
}
location / {
proxy_send_timeout 50m;
proxy_read_timeout 50m;
proxy_pass http://api;
proxy_http_version 1.1;
proxy_redirect off;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Real-IP $remote_addr;
}
location /test/upload {
proxy_send_timeout 50m;
proxy_read_timeout 50m;
proxy_pass http://api;
proxy_http_version 1.1;
proxy_redirect off;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Real-IP $remote_addr;
client_body_in_file_only clean;
client_body_buffer_size 16K;
client_body_temp_path /tmp;
}
}
}

asp.net core on linux with nginx routing doesn't work

I've created an ASP.NET Core MVC application and deployed it into Linux server. When I go to sitename.com browser shows up the Home/Index page without any problem.
But when I try to go sitename.com/Home/Index or another controller like sitename.com/Admin/Login nginx throws a 404 Not Found error. What should be the problem?
Here is my Startup.cs/Configure method.
public void Configure(IApplicationBuilder app, IHostingEnvironment env, ILoggerFactory loggerFactory)
{
loggerFactory.AddConsole(Configuration.GetSection("Logging"));
loggerFactory.AddDebug();
if (env.IsDevelopment())
{
app.UseDeveloperExceptionPage();
app.UseBrowserLink();
}
else
{
app.UseExceptionHandler("/Home/Error");
}
app.UseStaticFiles();
app.UseSession();
app.UseMvc(routes =>
{
routes.MapRoute(
name: "default",
template: "{controller=Home}/{action=Index}/{id?}");
});
}
Here is my website config from sites-available folder
server {
listen 80 default_server;
listen [::]:80 default_server ipv6only=on;
root /var/www/sitename.com;
index index.html index.htm;
server_name sitename.com www.sitename.com;
location / {
try_files $uri $uri/ =404;
proxy_pass http://127.0.0.1:5000;
}
and nginx.conf
user www-data;
worker_processes 4;
pid /run/nginx.pid;
events {
worker_connections 768;
}
http {
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
gzip on;
gzip_disable "msie6";
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
mail {
}
Remove try_files $uri $uri/ =404; as it's testing if a certain url exists on the file system and if not return 404.
But /Home/Index is a route, which do not map to an existing file but to controller action, hence you get the 404 error.
To help someone searching on Google
I was getting 404, but I realized that ASP Net only accepts 1 server by name
Example NOT POSSIBLE:
server{
listen 80;
listen [::]:80;
server_name example.com;
location /asp_app_ONE {
proxy_pass http://0.0.0.0:3001;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection keep-alive;
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
location /asp_app_TWO{
proxy_pass http://0.0.0.0:3002;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection keep-alive;
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
Example OK:
server{
listen 80;
listen [::]:80;
server_name appONE.example.com;
location / {
proxy_pass http://0.0.0.0:3001;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection keep-alive;
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
server{
listen 80;
listen [::]:80;
server_name appTWO.example.com;
location / {
proxy_pass http://0.0.0.0:3002;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection keep-alive;
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}

Nginx - Redirect HTTPS back to HTTP when leaving a secured page

I have Nginx in front of a Node.js app. I have it set up so that if the url has /account in it, it'll redirect to HTTPS. My question is - how do I set it up so that if the user leaves the /account url (clicks a link to go to the home page), it'll get sent back to HTTP?
Here's my ngnix.conf:
worker_processes 1;
error_log logs/error.log;
pid logs/nginx.pid;
events {
worker_connections 128;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
server_tokens off;
#keepalive_timeout 0;
keepalive_timeout 65;
tcp_nodelay on;
proxy_buffer_size 128k;
proxy_buffers 4 256k;
proxy_busy_buffers_size 256k;
gzip on;
server {
listen 80;
server_name localhost;
location / {
proxy_set_header x-path $uri;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://127.0.0.1:3000;
proxy_redirect off;
}
location /account {
rewrite ^(.*) https://$host$1 permanent; #redirect to https
}
error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
server {
listen 443;
ssl on;
ssl_certificate ssl/server.crt;
ssl_certificate_key ssl/server.key;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-M-Secure "true";
proxy_redirect off;
proxy_max_temp_file_size 0;
proxy_pass http://127.0.0.1:3000;
}
}
}
Thanks in advance for any assistance.
This is untested.
server {
listen 443;
ssl on;
ssl_certificate ssl/server.crt;
ssl_certificate_key ssl/server.key;
location /account/ {
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-M-Secure "true";
proxy_redirect off;
proxy_max_temp_file_size 0;
proxy_pass http://127.0.0.1:3000;
}
location / {
rewrite ^(.*) http://$host$1 permanent; # redirect to http
}
}

Resources