I'm trying to deploy an angular app on a remote server using nodejs and nginx.
I built the application and I serve it with a node app. Nginx acts here as a reversed proxy.
I can acess the website and navigate without an issue. However, when I try to refresh the current page, my browser canno't find it anymore : response get a 404.
But if I enter the domain in the url bar, I can access again to the website.
Does anyone know where I made a mistake ?
Here's the code and the config for the node app and for nginx :
var express = require('express');
var app = express(); // create our app w/ express
var morgan = require('morgan'); // log requests to the console (express4)
var bodyParser = require('body-parser'); // pull information from HTML POST (express4)
var methodOverride = require('method-override'); // simulate DELETE and PUT (express4)
const mongoose = require("mongoose");
const dotenv = require("dotenv");
const mustacheExpress = require("mustache-express");
dotenv.config();
// configuration =================
mongoose.connect(process.env.DB_CONNECT, { useNewUrlParser: true }, () => {
console.log("connected to DB !");
});
app.use(express.static("../front-end/dist/website"));
app.use(morgan('dev')); // log every request to the console
app.use(bodyParser.urlencoded({'extended':'true'})); // parse application/x-www-form-urlencoded
app.use(bodyParser.json()); // parse application/json
app.use(bodyParser.json({ type: 'application/vnd.api+json' })); // parse application/vnd.api+json as json
app.use(methodOverride());
// Middleware
app.use(express.json());
// Templating
app.engine("html", mustacheExpress());
//import routes
const authRoute = require("./routes/auth");
const devisRoute = require("./routes/devis");
const messageRoute = require("./routes/message");
const actuRoute = require("./routes/actu");
//Routes middlewares
app.use("/api/user", authRoute);
app.use("/api/sendDevis", devisRoute);
app.use("/api/message", messageRoute);
app.use("/api/actu", actuRoute);
// listen (start app with node server.js) ======================================
app.listen(3000);
server {
root /var/www/domain.com/html;
index index.html index.htm index.nginx-debian.html;
server_name domain.com www.domain.com;
location / {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
listen [::]:443 ssl ipv6only=on; # managed by Certbot
listen 443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/domain.com/fullchain.pem; # m anaged by Certbot
ssl_certificate_key /etc/letsencrypt/live/domain.com/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
if ($host = www.domain.com) {
return 301 https://$host$request_uri;
} # managed by Certbot
if ($host = domain.com) {
return 301 https://$host$request_uri;
} # managed by Certbot
listen 80;
listen [::]:80;
server_name domain.com www.domain.com;
return 404; # managed by Certbot
}
This is an easy trap to fall into when doing client-side routing with a Single Page App.
The trick is this: when you do you navigating in the browser, the browser is not making requests to your server. When you refresh the page, the browser does send the request to the server. Since you're making an SPA, all of the information about what's on the page is in your Javascript, and that Javascript is loaded in your index.html file. The only file that exists on your server is your root file, and the express.static middleware maps URLs onto your files, so there is no file for it to find (hence the 404).
What you need server-side is to always serve up your index.html file no matter what html file is requested.
The easiest solution is add a fallback handler last in your routing using Express's build-in .sendFile() function. The simple, nuance-free way would be something like this:
// ... ^^ all your other routes
router.get("*",
// The '*' means match *everything*
(req,res)=>{
res.sendFile('index.html'); // Probaby need to set some options here to start in the right directory...
}
);
You may want to use different middleware or add your own bells and whistles for including compression, etc, but the idea would be the same.
Related
When I was on my local machine it works; but when uploaded to my droplet with nginx/1.18.0 and certbot giving my domain https, I get an error trying using a GET request (on my NextJS app - localhost:3000) to my express route (npm-script: "node_1": "nodemon --watch dev -e js dev/networkNode.js 3001 http://localhost:3001").
The error is Access to XMLHttpRequest at 'http://localhost:3001/blockchain' from origin 'https://chalkcoin.io' has been blocked by CORS policy: The 'Access-Control-Allow-Origin' header has a value 'http://localhost:3000' that is not equal to the supplied origin.
I tried changing the corsOptions to origin: "https://chalkcoin.io" and origin: "*" for app.use(cors(corsOptions)). I have tried leaving http: and https: off the //localhost:3001/blockchain in Blockchain.Context.js axios request.
I have also tried putting <meta httpEquiv="Content-Security-Policy" content="upgrade-insecure-requests"></meta> in the <Head> component of _app,js
Which brings me here, what's the best approach on being able to run my npm command for node_1 with pm2 in the background while my Nextjs app runs on localhost:3000.
Express backend (localhost:3001):
const express = require("express");
const bodyParser = require("body-parser");
const cors = require("cors");
const Blockchain = require("./blockchain");
const uuid = require("uuid/v1");
const port = process.argv[2];
const rp = require("request-promise");
const nodeAddress = uuid().split("-").join("");
var corsOptions = {
origin: "http://localhost:3000",
optionsSuccessStatus: 200, // For legacy browser support
};
const app = express();
const coin = new Blockchain();
app.use(cors(corsOptions));
app.use(bodyParser.json());
app.use(bodyParser.urlencoded({ extended: false }));
app.get("/blockchain", function (req, res) {
res.json(coin);
});
app.listen(port, function () {
console.log(`Listening on port ${port}...`);
});
Blockchain.Context.js:
import { createContext, useReducer, useEffect } from "react";
import axios from "axios";
import blockchainReducer from "../reducers/Blockchain.Reducer";
Date.prototype.yyyymmdd = function () {
var mm = this.getMonth() + 1; // getMonth() is zero-based
var dd = this.getDate();
return [this.getFullYear(), (mm > 9 ? "" : "0") + mm, (dd > 9 ? "" : "0") + dd].join("");
};
export const BlockchainContext = createContext();
export const BlockchainDispatch = createContext();
export function BlockchainProvider(props) {
const [blockchain, dispatch] = useReducer(blockchainReducer, {
chain: [],
pendingTransactions: [],
currentNodeUrl: "",
networkNodes: [],
initialized: false,
selectedBlock: [],
});
useEffect(() => {
async function getBlockchainData() {
try {
const getNode1 = `http://localhost:3001/blockchain`;
const res = await axios.get(getNode1);
dispatch({type: "INIT", data: res.data});
} catch (err) {
console.log(err.message);
}
}
getBlockchainData();
}, []);
return (
<BlockchainContext.Provider value={blockchain}>
<BlockchainDispatch.Provider value={dispatch}>{props.children}</BlockchainDispatch.Provider>
</BlockchainContext.Provider>
);
}
/etc/nginx/sites-available/config
server {
root /var/www/html;
index index.html index.htm index.nginx-debian.html;
server_name chalkcoin.io www.chalkcoin.io;
location / {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
location /node1 {
proxy_pass http://localhost:3001;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
listen [::]:443 ssl ipv6only=on; # managed by Certbot
listen 443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/chalkcoin.io/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/chalkcoin.io/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
if ($host = www.chalkcoin.io) {
return 301 https://$host$request_uri;
} # managed by Certbot
if ($host = chalkcoin.io) {
return 301 https://$host$request_uri;
} # managed by Certbot
listen 80;
listen [::]:80;
server_name chalkcoin.io www.chalkcoin.io;
return 404; # managed by Certbot
}
I solved the problem for Firefox and Chrome but Safari is still giving me trouble. So the Firefox and Chrome error was the CORS header wasn't equal to the value supplied. How I solved this was by changing the string interpolation in the axios.get a request to a regular string. And added the CORS options in the express app file.
const getNode1 = "http://localhost:3001/blockchain";
const res = await axios.get(getNode1);
const corsOptions = {
optionsSuccessStatus: 200, // For legacy browser support
credentials: true, // This is important.
origin: "https://chalkcoin.io",
};
const app = express();
app.use(cors(corsOptions));
app.use(bodyParser.json());
app.use(bodyParser.urlencoded({ extended: false }));
I believe the string interpolation syntax causes this error because CORS can't accept wildcards.
But I still don't know why Safari is giving this error. Is it because my domain is HTTPS and I'm making a request to an HTTP?
I have generated static files for a reactjs app using create react app tool. I then started an nginx server on a docker container to serve the front end built using reactjs.
The server is interacting with a node js in a different container. The application was working fine until I integrated an ssl certificate to Nginx.
I started to have this error Cross-Origin Request Blocked: The Same Origin Policy disallows reading the remote resource at https://ip_address:8000/api_endpoint.
Both containers are hosted on the same machine.
here is configuration file for Nginx:
server {
#listen [::]:80;
server_name domain_name.com www.domain_name.com;
root /var/www/react_app/html;
index index.html;
#server_name affiliates-pal.com;
access_log /var/log/nginx/react-app.access.log;
error_log /var/log/nginx/react-app.error.log;
location / {
try_files $uri /index.html;
}
listen 443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/domain_name.com/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/domain_name.com/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
if ($host = www.domain_name.com) {
return 301 https://$host$request_uri;
} # managed by Certbot
if ($host = domain_name.com) {
return 301 https://$host$request_uri;
} # managed by Certbot
listen 80;
server_name domain_name.com www.domain_name.com;
return 404; # managed by Certbot
}
Should I add anything to CORS defintion in the following startup script for node back end?
const express = require("express");
const bodyparser = require("body-parser");
var app = express();
var cron = require("node-cron");
var sleep = require("system-sleep");
const usersRouter = require("./api-routes");
if(process.env.IP_ADDRESS)
{
IP_ADDRESS = "http://"+process.env.IP_ADDRESS
}
else
{
IP_ADDRESS= "http://localhost:3000"
}
console.log("IP ADDRESS FROM GLOBAL VAR")
console.log(process.env.IP_ADDRESS)
console.log(IP_ADDRESS)
app.use(
bodyparser.urlencoded({
extended: true,
})
);
app.use(bodyparser.json());
var cors = require("cors");
app.use(
cors({
origin: [IP_ADDRESS],
credentials: true,
})
);
//"http://localhost:3000"
app.use(function (req, res, next) {
res.header("Access-Control-Allow-Origin", IP_ADDRESS);
res.header("Access-Control-Allow-Headers", true);
res.header("Access-Control-Allow-Credentials", true);
res.header(
"Access-Control-Allow-Methods",
"GET, POST, OPTIONS, PUT, PATCH, DELETE"
);
next();
});
app.use("/", usersRouter);
cron.schedule("40 11 10 * * *", () => {
console.log("before sleep");
sleep(20000);
console.log("after sleep");
});
const port = process.env.PORT || 8000;
app.listen(port, () => console.log(`Listening on port ${port}..`));
Should I add a proxy configuration for nginx as suggested in this answer ?
We have the same setup and we dont have issues with CORS.
Nginx config troubles me. Can you please try this ?
server {
root /path/to/your/static/files;
index index.html index.htm index.nginx-debian.html;
server_name subdomain.domain.com(or domain.com); # CHANGE THE NAME UNDER REDIRECT BELOW AS WELL
location / {
try_files $uri $uri/ /index.html;
}
listen 443 ssl;
ssl_certificate /path/to/your/cert/fullchain.pem;
ssl_certificate_key /path/to/your/key/privkey.pem;
}
server {
if ($host = subdomain.domain.com(or domain.com)) {
return 301 https://$host$request_uri;
}
server_name subdomain.domain.com(or domain.com);
listen 80;
return 404;
}
I'm new to Nginx, and I have some trouble with hosting 2 websites on my RaspberryPi (Raspbian).
I have 2 domains, site1.com (:8080) and site2.com(:8000), both are Node.JS apps (with Express). I have working SSL certifications with Let's Encrypt for both.
This is my site1 nginx config (/etc/nginx/site-available/site1):
server {
server_name site2.com;
listen 80;
listen [::]:80;
location / {
include /etc/nginx/proxy_params;
proxy_pass http://192.168.1.11:8080;
}
}
This is my site2 nginx config (/etc/nginx/site-available/site2):
server {
server_name site1.com;
listen 80;
listen [::]:80;
location / {
include /etc/nginx/proxy_params;
proxy_pass http://192.168.1.11:8000;
}
}
So indeed there is no part of 443 in these conf files but https://site2.com is working well but https://site1.com redirect me to the webpage of https://site2.com (keeping the site1 URL). I guess it's because my_server_ip:443 is already taken by site2 (no ??).
And the http://site2.com give me a 502 Bad Gateway and is not redirected to https (site1 is well redirected to his https).
This is the server part of my Node apps, they are the same for the 2 apps (except a port and SSL URI)
const express = require('express')
const app = express()
var https = require('https');
var http = require('http');
var fs = require('fs');
const privateKey = fs.readFileSync('/etc/letsencrypt/live/site1or2.com/privkey.pem', 'utf8');
const certificate = fs.readFileSync('/etc/letsencrypt/live/site1or2.com/cert.pem', 'utf8');
const ca = fs.readFileSync('/etc/letsencrypt/live/site1or2.com/chain.pem', 'utf8');
const credentials = {
key: privateKey,
cert: certificate,
ca: ca
};
const httpsServer = https.createServer(credentials, app);
const bodyParser = require("body-parser");
const urlencodedParser = app.use(bodyParser.urlencoded({
extended: true
}));
http.createServer(function (req, res) {
//redirect to https
res.writeHead(301, { "Location": "https://" + req.headers['host'] + req.url });
res.end();
}).listen(8080); //8000 for my site2.com
httpsServer.listen('443', () => {
console.log('Server https listening on Port 443');
})
I tried to change the Nginx confs to add 'listen 443; SSL on;...' but I have always errors like 'Failed to start A high-performance web server and a reverse proxy server' and I don't understand how to fix it.
So is the problem from my JS code or my Nginx confs? (or both maybe..?)
Thank for reading, it's my first StackOverflow post, I hope I didn't forget information and sorry if there is an English mistake.
Have a good evening (or day)!
I finaly understood how does multiples node websites hosting with SSL works, in the node.JS configuration (app.js) the app has to only listen to 1 port (8080 and 8000 for me) and must not refer to SSL and port 443 at all.
All the ssl configurations and https redirections have to be in the Nginx conf, for exemple, my file /etc/nginx/sites-available/site1:
server {
server_name site1.com;
listen 80;
listen [::]:80;
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl;
ssl_certificate /etc/letsencrypt/live/site1.com/cert.pem;
ssl_certificate_key /etc/letsencrypt/live/site1.com/privkey.pem;
server_name site1.com;
location / {
proxy_pass http://192.168.1.11:8080;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
And the same for site2 with port 8000.
I have some trouble with nginx proxy_pass redirection on localhost subdomain. I have a domain "domain.com", i want to redirect all request on *.domain.com on *.localhost:9000. Then node handle all request on *.localhost:9000 to the good express app.
On nginx conf when i try the following :
server {
server_name extranet.domain.com;
listen 80;
location / {
proxy_pass http://extranet.localhost:9000;
}
}
Request on extranet.domain.com are well redirected to the good express webapp.
With this :
server {
server_name ~^(.*?)\.domain\.com$;
listen 80;
location / {
proxy_pass http://localhost:9000/$1;
}
}
Express app running on localhost:9000 handle request /mysubdomainname, which implie that regex is good.
But when i try :
server {
server_name ~^(.*?)\.domain\.com$;
listen 80;
location / {
proxy_pass http://$1.localhost:9000;
}
}
All request on *.domain.com return http code 502.
Why http://localhost:9000/$1; works and not http://$1.localhost:9000; ?
(all subdomain are set in /etc/hosts).
Thanks in advance. I'm totally lost !
When a host name isn't known at run-time, nginx has to use its own resolver. Unlike the resolver provided by OS, it doesn't use your /etc/hosts file.
Maybe this will give you a hint, I wanted to pass the subdomain from Nginx to an Express app. Here is my code:
nginx.conf
http {
upstream express {
server localhost:3000;
}
domain.com inside nginx/sites-available
server {
listen 80;
server_name ~^(?<subdomain>.+)\.domain\.com$;
location / {
proxy_set_header Subdomain $subdomain;
proxy_set_header Host $host;
proxy_pass http://express;
}
}
Express app index.js
var express = require('express');
var app = express();
app.get('/', function (req, res) {
const subdomain = req.headers.subdomain;
});
app.listen(3000, function () {
console.log('Example app listening on port 4000!');
});
Here is my index.js file, i have changed below line so i can mount the parse server in root url for example parse.example.com:1337 instead of parse.example.com:1337/parse but Im not sure if it is correct way and i have very little experience with nodejs and javascript
"var mountPath = process.env.PARSE_MOUNT || '/parse';" to " var mountPath = process.env.PARSE_MOUNT || '/';"
index.js
var express = require('express');
var ParseServer = require('parse-server').ParseServer;
var path = require('path');
var databaseUri = process.env.DATABASE_URI || process.env.MONGODB_URI;
if (!databaseUri) {
console.log('DATABASE_URI not specified, falling back to localhost.');
}
var api = new ParseServer({
databaseURI: databaseUri || 'mongodb://parse:secretpass#127.0.0.1:27017/parsedb',
cloud: process.env.CLOUD_CODE_MAIN || __dirname + '/cloud/main.js',
appId: process.env.APP_ID || 'xxxxxxxxxxxxxxxxxx',
masterKey: process.env.MASTER_KEY || 'xxxxxxxxxxxxxxxx', //Add your master key here. Keep it secret!
serverURL: process.env.SERVER_URL || 'http://localhost:1337/', // Don't forget to change to https if needed
liveQuery: {
classNames: ["Posts", "Comments"] // List of classes to support for query subscriptions
}
});
var app = express();
// Serve static assets from the /public folder
app.use('/public', express.static(path.join(__dirname, '/public')));
// Serve the Parse API on the /parse URL prefix
var mountPath = process.env.PARSE_MOUNT || '/';
app.use(mountPath, api);
// Parse Server plays nicely with the rest of your web routes
app.get('/', function(req, res) {
res.status(200).send('I dream of being a website. Please star the parse-server repo on GitHub!');
});
// There will be a test page available on the /test path of your server url
// Remove this before launching your app
app.get('/test', function(req, res) {
res.sendFile(path.join(__dirname, '/public/test.html'));
});
var port = process.env.PORT || 1337;
var httpServer = require('http').createServer(app);
httpServer.listen(port, function() {
console.log('parse-server-example running on port ' + port + '.');
});
// This will enable the Live Query real-time server
ParseServer.createLiveQueryServer(httpServer);
You could use NGINX as a reverse proxy.
Install the nginx package:
sudo apt-get install -y nginx
Open /etc/nginx/sites-enabled/default
sudo nano /etc/nginx/sites-enabled/default
Replace it with the following:
# HTTP - redirect all requests to HTTPS
server {
listen 80;
listen [::]:80 default_server ipv6only=on;
return 301 https://$host$request_uri;
}
# HTTPS - serve HTML from /usr/share/nginx/html, proxy requests to /parse/
# through to Parse Server
server {
listen 443;
server_name your_domain_name;
root /usr/share/nginx/html;
index index.html index.htm;
ssl on;
# Use certificate and key
ssl_certificate /etc/letsencrypt/live/your_domain_name/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/your_domain_name/privkey.pem;
ssl_session_timeout 5m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH';
# Pass requests for /parse/ to Parse Server instance at localhost:1337
location /parse/ {
rewrite ^/parse(/.*)$ $1 break;# this is the line that will redirect to root url
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://localhost:1337/;
proxy_ssl_session_reuse off;
proxy_set_header Host $http_host;
proxy_redirect off;
}
location / {
try_files $uri $uri/ =404;
}
}
Save, exit and restart
sudo service nginx restart
Update:
Parse Server needs an ssl certificate to work by default, you can disable it but it is very strongly recommended to only use it via HTTPS.
I am using Lets Encrypt certificates, if you need help creating them I can show you a tutorial I wrote, or you can use your own certificates.