asp.net core on linux with nginx routing doesn't work - linux

I've created an ASP.NET Core MVC application and deployed it into Linux server. When I go to sitename.com browser shows up the Home/Index page without any problem.
But when I try to go sitename.com/Home/Index or another controller like sitename.com/Admin/Login nginx throws a 404 Not Found error. What should be the problem?
Here is my Startup.cs/Configure method.
public void Configure(IApplicationBuilder app, IHostingEnvironment env, ILoggerFactory loggerFactory)
{
loggerFactory.AddConsole(Configuration.GetSection("Logging"));
loggerFactory.AddDebug();
if (env.IsDevelopment())
{
app.UseDeveloperExceptionPage();
app.UseBrowserLink();
}
else
{
app.UseExceptionHandler("/Home/Error");
}
app.UseStaticFiles();
app.UseSession();
app.UseMvc(routes =>
{
routes.MapRoute(
name: "default",
template: "{controller=Home}/{action=Index}/{id?}");
});
}
Here is my website config from sites-available folder
server {
listen 80 default_server;
listen [::]:80 default_server ipv6only=on;
root /var/www/sitename.com;
index index.html index.htm;
server_name sitename.com www.sitename.com;
location / {
try_files $uri $uri/ =404;
proxy_pass http://127.0.0.1:5000;
}
and nginx.conf
user www-data;
worker_processes 4;
pid /run/nginx.pid;
events {
worker_connections 768;
}
http {
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
gzip on;
gzip_disable "msie6";
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
mail {
}

Remove try_files $uri $uri/ =404; as it's testing if a certain url exists on the file system and if not return 404.
But /Home/Index is a route, which do not map to an existing file but to controller action, hence you get the 404 error.

To help someone searching on Google
I was getting 404, but I realized that ASP Net only accepts 1 server by name
Example NOT POSSIBLE:
server{
listen 80;
listen [::]:80;
server_name example.com;
location /asp_app_ONE {
proxy_pass http://0.0.0.0:3001;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection keep-alive;
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
location /asp_app_TWO{
proxy_pass http://0.0.0.0:3002;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection keep-alive;
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
Example OK:
server{
listen 80;
listen [::]:80;
server_name appONE.example.com;
location / {
proxy_pass http://0.0.0.0:3001;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection keep-alive;
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
server{
listen 80;
listen [::]:80;
server_name appTWO.example.com;
location / {
proxy_pass http://0.0.0.0:3002;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection keep-alive;
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}

Related

How do i enable auth_basic on nginx for a domain and multiple subdomains with only one password?

I successfully added the auth_basic directive to my server block with the following config:
proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=proxy_cache_path_global:10m loader_threshold=300 loader_files=200 max_size=2g inactive=60m use_temp_path=off;
upstream backend {
server localhost:8080;
keepalive 128;
}
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name example.com www.example.com app.example.com;
auth_basic "Administrator’s Area";
auth_basic_user_file /etc/nginx/.htpasswd;
location / {
proxy_pass http://backend;
proxy_cache_methods GET HEAD;
proxy_cache proxy_cache_path_global;
proxy_cache_key $host$request_uri$cookie_user$slice_range;
proxy_cache_bypass $cookie_nocache $arg_nocache$arg_comment;
proxy_cache_min_uses 3;
proxy_cache_valid any 1m;
proxy_cache_revalidate on;
proxy_cache_background_update on;
proxy_cache_lock on;
proxy_cache_lock_age 10s;
proxy_cache_lock_timeout 3s;
proxy_redirect off;
proxy_http_version 1.1;
proxy_read_timeout 300;
proxy_connect_timeout 300;
slice 1m;
add_header X-Cache-Status $upstream_cache_status;
proxy_set_header Host $host;
proxy_set_header Connection "";
proxy_set_header Range $slice_range;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
}
}
When i visit my website, 2 login prompts appear after each other; one for example.com, and another one for app.example.com.
How do i configure this, so that i only have to type in my credentials once?
I already tried looking for a solution by myself unsuccessfully. Any hints would be appreciated.

Nginx + nodejs, socket.io for https dosnt work

I have a problem with customizing my nodejs app on the server (https, socket.io).
I using: pm2, nginx(works on port :3000), nodejs
Prev I customized nginx for :80 port(http) - works fine!.
But when I inserted ssl certificate and customized him I get errors in the console:
"Access to XMLHttpRequest at 'https://example:2053/socket.io/?EIO=3&transport=polling&t=NAPoEIt' from origin 'https://example.com' has been blocked by CORS policy: No 'Access-Control-Allow-Origin' header is present on the requested resource.
My nginx settings:
server {
listen 80 default_server;
server_name example.com www.example.com;
return 301 http://skinsgaben.com$request_uri;
}
server {
listen 443 ssl;
server_name site.com www.example.com;
ssl_certificate /var/www/site/ssl/example.crt;
ssl_certificate_key /var/www/site/ssl/example.key;
ssl_session_timeout 5m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH';
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://localhost:3000/;
proxy_redirect off;
# Socket.IO Support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
location /socket.io/ {
proxy_pass http://localhost:3000/;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
error_log /var/www/site-log/error.log;
access_log /var/www/site-log/access.log;
}
I'm mostly using this pattern for local and remote nodejs apps.
let host = process.env.host || 'your host';
let PORT = process.env.port || 'your port';
let protocol = 'http';
let options = {};
if (APP_ENV === 'production') {
protocol = 'https';
options = {
key: fs.readFileSync(SSL.KEY),
cert: fs.readFileSync(SSL.CERT)
};
}
const server = require(protocol).createServer(options, app);
server.listen({ host, port: PORT }, () => {
console.log(`Server listening on `);
});

connect() failed (111: Connection refused) while connecting upstream

[error] 7697#7697: *100335 connect() failed (111: Connection refused) while connecting to upstream, client: XXX.XXX.XXX.XXX, server: v4.domain.com, request: "GET /socket.io/?__sails_io_sdk_version=0.13.8&__sails_io_sdk_platform=browser&__sails_io_sdk_language=javascript&EIO=3&transport=polling&t=Luvcibs HTTP/1.1", upstream: "http://127.0.0.1:1338/socket.io/?__sails_io_sdk_version=0.13.8&__sails_io_sdk_platform=browser&__sails_io_sdk_language=javascript&EIO=3&transport=polling&t=Luvcibs", host: "v4.domain.com", referrer: "http://v4.domain.com/?ct=t(Flash_Sals_Videotoolz_copy_05_12_29_2016)&mc_cid=404a630ab2&mc_eid=c44f7937fe"
[error] 7700#7700: *101735 connect() failed (111: Connection refused) while connecting to upstream, client: XX.XX.XX.XX, server: v4.domain.com, request: "GET /socket.io/?__sails_io_sdk_version=0.13.8&__sails_io_sdk_platform=browser&__sails_io_sdk_language=javascript&EIO=3&transport=polling&t=LuvciVy HTTP/1.1", upstream: "http://127.0.0.1:1338/socket.io/?__sails_io_sdk_version=0.13.8&__sails_io_sdk_platform=browser&__sails_io_sdk_language=javascript&EIO=3&transport=polling&t=LuvciVy", host: "v4.domain.com", referrer: "http://v4.domain.com/"
Recently i have configured my sails js application on Ubuntu 16.04 VPS Server which have nginx as reverse server. below is my nginx conf for site
Site runs fine but all of a sudden site break and shows 502 bad gateway.
Tried almost everything whatever i can.
Please help me to get it sorted.
server {
listen 80 default_server;
listen [::]:80 default_server;
# SSL configuration
#
listen 443 ssl default_server;
listen [::]:443 ssl default_server;
#
# Note: You should disable gzip for SSL traffic.
# See: https://bugs.debian.org/773332
#
# Read up on ssl_ciphers to ensure a secure configuration.
# See: https://bugs.debian.org/765782
#
# Self signed certs generated by the ssl-cert package
# Don't use them in a production server!
#
# include snippets/snakeoil.conf;
root /var/www/html/php;
# Add index.php to the list if you are using PHP
index index.html index.php index.htm index.nginx-debian.html;
server_name domain.com www.domain.com;
ssl_certificate /etc/letsencrypt/live/domain.com/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/domain.com/privkey.pem; # managed by Certbot
ssl_dhparam /etc/ssl/certs/dhparam.pem;
location / {
# First attempt to serve request as file, then
# as directory, then fall back to displaying a 404.
#try_files $uri $uri/ =404;
#try_files $uri $uri/ /index.php?q=$uri&$args;
try_files $uri $uri/ /index.php?$args;
}
location ~ \.php$ {
try_files not-existing-file #php;
}
location #php {
#fastcgi_pass 127.0.0.1:9000;
fastcgi_read_timeout 300;
fastcgi_pass unix:/var/run/php/php7.0-fpm.sock;
include snippets/fastcgi-php.conf;
}
location ~* \.(css|js|png|jpg|jpeg|gif|ico)$ {
expires 1d;
}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
upstream sails_server {
server 127.0.0.1:1338; # fail_timeout=0;
# keepalive 64;
}
server {
listen 80;
listen [::]:80;
server_name v4.domain.com;
root /root/domain/;
#Logging
error_log /root/domain/log/error.log notice;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $host;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_read_timeout 300;
proxy_pass http://sails_server;
proxy_redirect off;
# proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
# proxy_set_header Connection "";
# proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-NginX-Proxy true;
# proxy_cache_path /data/nginx/cache levels=1:2 keys_zone=one:10ms;
# proxy_cache one;
# proxy_cache_key sfs$request_uri$scheme;
# proxy_pass_request_headers on;
}
location /socket.io/ {
proxy_pass http://sails_server/socket.io/;
proxy_http_version 1.1;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header Port $server_port;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Nginx-Proxy true;
proxy_pass_request_headers on;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_buffers 8 32k;
proxy_buffer_size 64k;
}
}
My PM2 config file for my sails application is below
{
"apps": [
{
"name": "dj",
"script" : "./app.js",
"watch": false,
"ignore_watch" : ["node_modules", ".tmp"],
"watch_options": {
"followSymlinks": false
},
"env" : {
"PORT": 1338,
"NODE_ENV": "production"
}
}
]
}

Routed traffic with cookies Nginx

first time i'm trying to setup nginx.
I have following problem i would like to solve.
I have two versions of backend v1 and v2.
User is routed to "/v2" (proxy_pass '127.0.0.1:3000;')
Traffic tru "/" (javascript + sockets) has be routed to the same server '127.0.0.1:3000;' by looking at the cookie.
Heres my problem the cookie doesn't exists and therefore 404 will be returned
Any suggestion?
map $cookie_version $site_version {
default "";
"v1" "version=v1;Domain=localhost;Path=/";
"v2" "version=v2;Domain=localhost;Path=/";
}
server {
listen 8081;
server_name localhost;
location / {
if ($cookie_version ~* "v1") {
proxy_pass http://127.0.0.1:8080;
}
if ($cookie_version ~* "v2") {
proxy_pass http://127.0.0.1:3000;
}
}
location /v1 {
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
proxy_http_version 1.1;
set $cookie_version "v1";
add_header Set-Cookie $site_version;
rewrite ^/v1(.*) /$1 break;
proxy_pass http://127.0.0.1:8080;
}
location /v2 {
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
proxy_http_version 1.1;
set $cookie_version "v2";
add_header Set-Cookie $site_version;
rewrite ^/v2(.*) /$1 break;
proxy_pass http://127.0.0.1:3000;
}
}
First of all, read why if is evil http://wiki.nginx.org/IfIsEvil. Use map instead.
My suggestion:
map $cookie_version $mybackend {
default "127.0.0.1:3000";
"v1" "127.0.0.1:8080";
"v2" "127.0.0.1:3000";
}
...
location / {
proxy_pass http://$mybackend;
}
...
location ~ ^/(?<cver>v[12])/ {
...
add_header Set-Cookie "version=$cver;Domain=localhost;Path=/";
rewrite ^/v[12]/(.*)$ /$1 redirect;
}

Nginx - Redirect HTTPS back to HTTP when leaving a secured page

I have Nginx in front of a Node.js app. I have it set up so that if the url has /account in it, it'll redirect to HTTPS. My question is - how do I set it up so that if the user leaves the /account url (clicks a link to go to the home page), it'll get sent back to HTTP?
Here's my ngnix.conf:
worker_processes 1;
error_log logs/error.log;
pid logs/nginx.pid;
events {
worker_connections 128;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
server_tokens off;
#keepalive_timeout 0;
keepalive_timeout 65;
tcp_nodelay on;
proxy_buffer_size 128k;
proxy_buffers 4 256k;
proxy_busy_buffers_size 256k;
gzip on;
server {
listen 80;
server_name localhost;
location / {
proxy_set_header x-path $uri;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://127.0.0.1:3000;
proxy_redirect off;
}
location /account {
rewrite ^(.*) https://$host$1 permanent; #redirect to https
}
error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
server {
listen 443;
ssl on;
ssl_certificate ssl/server.crt;
ssl_certificate_key ssl/server.key;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-M-Secure "true";
proxy_redirect off;
proxy_max_temp_file_size 0;
proxy_pass http://127.0.0.1:3000;
}
}
}
Thanks in advance for any assistance.
This is untested.
server {
listen 443;
ssl on;
ssl_certificate ssl/server.crt;
ssl_certificate_key ssl/server.key;
location /account/ {
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-M-Secure "true";
proxy_redirect off;
proxy_max_temp_file_size 0;
proxy_pass http://127.0.0.1:3000;
}
location / {
rewrite ^(.*) http://$host$1 permanent; # redirect to http
}
}

Resources