multiple instances of same node.js app on AWS - node.js

Is there an option on AWS to run multiple instances of same node.js app?
I try to run multiple instances of my node.js app on my computer, and it gives me EADDRINUSE error. When I change the port number in my code, save it as new js file, then I am able to run multiple instances. But how could I set this on AWS? Does maybe AWS Elastic Beanstalk have some option to set this? I couldn't find it..
This is part of my app where I communicate with clients, that's where I change the port number:
http.createServer(function(request, response){
var path = url.parse(request.url).pathname;
if(path=="/getjson"){
response.writeHead(200, {'Content-Type': 'text/plain'});
response.end('_testcb(\''+string+'\')');
console.log(new Date() + " - response sent to client");
}
}).listen(port);
Or maybe you have some better solution for running multiple instances of an same node.js app? Note that multiple instances need to share an small database.

You may want to use node's cluster api. documentation here
var cluster = require('cluster');
var numCPUs = require('os').cpus().length;
if (cluster.isMaster) {
// Fork workers.
for (var i = 0; i < numCPUs; i++) {
cluster.fork();
}
cluster.on('exit', function(worker, code, signal) {
console.log('Cluster worker ' + worker.process.pid + ' died');
cluster.fork();
});
} else {
http.createServer(function(request, response){
var path = url.parse(request.url).pathname;
if(path=="/getjson"){
response.writeHead(200, {'Content-Type': 'text/plain'});
response.end('_testcb(\''+string+'\')');
console.log(new Date() + " - response sent to client");
}
}).listen(port);
}

Have you tried amazon Load Balancing? more details can be found here. Load balancer can also manage auto scaling depending on trafic. See more documentation here.

Related

How to do IP-based API rate limitation in node.js cluster mode?

I am running a node.js/express/socket.io app in cluster mode.
I run an external server for the masterprocess and internal servers for the workers as in the following code.
if (cluster.isMaster) {
var workers = [];
var spawn = function(i) {
workers[i] = cluster.fork();
// Optional: Restart worker on exit
workers[i].on('exit', function(code, signal) {
console.log('Process exited! respawning worker no: ', i);
spawn(i);
});
};
// Spawn workers.
for (var i = 0; i < numCPUs; i++) {
spawn(i);
}
var worker_index = function(ip, len) {
return farmhash.fingerprint32(ip) % len; // Farmhash is the fastest and works with IPv6, too
};
// Create the outside facing server listening on our port.
var server = net.createServer({ pauseOnConnect: true }, function(connection) {
//!!!! the connection.remoteAddress causes issues with reverse proxy, however here not important as socket.io is used only for a global signalling to all clients
// console.log(connection.remoteAddress);
var worker = workers[worker_index(connection.remoteAddress, numCPUs)];
worker.send('sticky-session:connection', connection);
}).listen(port);
console.log('***************************************');
console.log('listening EXTERNALLY http://localhost:' + port);
console.log("This system has " + numCPUs + " virtual or real CPUs");
console.log('***************************************');
connection.remoteAddress // --> this is 127.0.0.1
} else {
// all worker processes enter the app from server.js
require("./server.js");
}
And then in server.js, which is being started up according to the number of cores available on the system.
const rateLimit = require('express-rate-limit');
...
app.enable("trust proxy"); // only if you're behind a reverse proxy (Heroku, Bluemix, AWS ELB, Nginx, etc)
...
app.use('/api/', apiLimiter); // --> this does not work as the real IP is not available
...
//startup the HTTP server
server.listen(0, 'localhost', function(err) {
console.log('***************************************');
console.log('***************************************');
console.log('listening INTERNALLY http://localhost:0');
console.log('***************************************');
});
With this code, the API limit (express-rate-limit) from node.js does not work. I suspect it is because the IP is always the same on the internal server (127.0.0.1, localhost).
After creating the http server, I can get the real IP with the following command:
req.headers['x-real-ip'] || req.connection.remoteAddress;
But this does not work before the http server has been created but I need this to enable the API limiter.
Somehow, I need to pass along the IP-address from the external server which seems is a TCP server.
Is there a way to capture the real IP at the very beginning before creating the http server?

Maximum performance for node HTTP server?

I'm running a test trying to draw maximum delivery speed from a Node HTTP server. It's a simple server.
In my test I have 50K virtual clients establishing a permanent connection with the server (I run ulimit -n 99999 before). Then, upon another event, an HTTP connection to a different port, the server sends one message to each virtual client. At the end all clients receive the corresponding message.
Sending all messages takes minutes in my tests. Are there any recommendations that would help me improve these measurements so that I can send 50K messages in seconds instead of minutes?
The server is running in a m1.medium AWS instance. The idea is to improve performance with the same platform.
Copying the server code:
var http = require("http");
var connectionResponse = [];
var connectionIndex = 0;
http.createServer(function(request, response) {
console.log("Received connection " + connectionIndex);
response.setTimeout(1200000, function() {
console.log("Socket timeout");
});
connectionResponse[connectionIndex] = response;
connectionIndex++;
}).listen(8888);
http.createServer(function(request, response) {
console.log("8887 connected - Will respond");
response.writeHead(200, {"Content-Type": "text/plain"});
response.write("Triggered all responses");
response.end();
console.log("Begin notifications:" + new Date().toISOString());
for(var i = 0; i < connectionIndex; i++) {
connectionResponse[i].writeHead(200, {"Content-Type": "text/plain", "Content-Length": 4, "transfer-encoding" : ""});
connectionResponse[i].write("CAFE");
connectionResponse[i].end();
}
console.log("End notifications" + new Date().toISOString());
}).listen(8887);
Setting this http://nodejs.org/api/http.html#http_agent_maxsockets to a sufficient number
var http = require('http');
http.globalAgent.maxSockets = xxx;
var https = require('https');
https.globalAgent.maxSockets = xxx;
Using nodejs clustering module, http://nodejs.org/api/cluster.html
Now, regarding the clustering, it really depends on what you want to do. The default example can go long way before you have to tweak it. An example would be
var cluster = require('cluster');
var http = require('http');
var numCPUs = require('os').cpus().length;
if (cluster.isMaster) {
// Fork workers.
for (var i = 0; i < numCPUs; i++) {
cluster.fork();
}
cluster.on('exit', function(worker, code, signal) {
console.log('worker ' + worker.process.pid + ' died');
});
} else {
// Workers can share any TCP connection
// In this case its a HTTP server
http.createServer(function(req, res) {
res.writeHead(200);
res.end("hello world\n");
}).listen(8000);
}

how does node.js clustering work?

i am trying to understand the following piece of code that is use to create miltiple servers to make use of a multi core cpu.
var cluster = require("cluster");
var http = require("http");
var numCPUs = require("os").cpus().length;
var port = parseInt(process.argv[2]);
if (cluster.isMaster) {
for (var i = 0; i < numCPUs; i++) {
cluster.fork();
}
cluster.on("exit", function(worker, code, signal) {
cluster.fork();
});
} else {
http.createServer(function(request, response) {
console.log("Request for: " + request.url);
response.writeHead(200);
response.end("hello world\n");
}).listen(port);
}
my question is, given every created server listens on same port, what guarantees that a request won't be served by more than one server?
In node v0.10, the OS kernel always chooses which child gets the request. In node v0.11+ and io.js v1.0.0+, manual round-robin scheduling is used (except on Windows for now). This default behavior is configurable by setting an environment variable though.

How to create server instance in master in nodejs cluster? (both in worker and master)

The following code snippet is throwing error. Is there a way to achieve the following functionality ? If not why it is not possible to create server instance in nodejs cluster master?
var cluster = require('cluster');
var http = require('http');
if (cluster.isMaster) {
var numCPUs = require('os').cpus().length;
for (var i = 0; i < numCPUs; i++) {
cluster.fork();
}
// How to create server instance in master?
// It is throwing error when I create server like below.
http.Server(function(req, res) {
res.writeHead(200);
res.end("hello worlddddd\n");
}).listen(8000);
} else {
http.Server(function(req, res) {
res.writeHead(200);
res.end("hello world\n");
}).listen(8000);
}
The error that I got when running this code seems to point to the address:port combination already being in use. Change the port that your master server is listening on, and that should resolve the error.

Node.js Cluster not listening

I am trying to build a "Hello World" multi-process node.js HTTP server.
Using the code samples provided in the node docs here I can't get the "listening" event to fire, and thus an HTTP server to respond to requests. I am, however, getting the "online" event to fire.
How can I get this server to respond to requests?
I'm running on OSX 10.8.4, node v0.10.7.
Here's what I have:
var cluster = require('cluster');
var http = require('http');
var numCPUs = require('os').cpus().length;
if (cluster.isMaster) {
// Fork workers.
for (var i = 0; i < numCPUs; i++) {
cluster.fork();
}
cluster.on('online', function(worker) {
console.log('A worker with #' + worker.id);
});
cluster.on('listening', function(worker, address) {
console.log('A worker is now connected to ' + address.address + ':' + address.port);
});
cluster.on('exit', function(worker, code, signal) {
console.log('worker ' + worker.process.pid + ' died');
});
} else {
// Workers can share any TCP connection
// In this case its a HTTP server
http.createServer(function(req, res) {
res.writeHead(200);
res.end('hello world\n');
}).listen(8000);
}
I fixed the issue by running the file directly. Previously, I ran server.js which was a one liner:
server.js
require("./server/app.js");
server/app.js (code snippet in the question)
By running node app.js instead of node server.js the cluster started listening. Not sure why this made a difference.

Resources