I'm doing a little OJT on my first node project and, while I can stand up a simple server, the app is going to get hammered so using cluster seems like a good idea. I've cobbled together some code snippets that I've found in various searches (including SO), but the server won't start. I'm sure my inexperience with node has me doing something stupid, but I don't see it.
var express = require( 'express' );
var cluster = require( 'cluster' );
var path = require( 'path' );
var cCPUs = require( 'os' ).cpus().length;
var port = 3000;
var root = path.dirname( __dirname );
if( cluster.isMaster ) {
for( var i = 0; i < cCPUs; i++ ) {
cluster.fork();
}
cluster.on( 'death', function( worker ) {
console.log( 'Worker ' + worker.pid + ' died.' );
});
}
else {
// eyes.inspect( process.env );
console.log( 'Worker: %s', process.env.NODE_WORKER_ID );
var app = express();
var routes = require( './routes' )( app );
app
.use( cluster.repl( root + 'cluster.repl' ) )
.use( cluster.stats({ connections: true, requests: true }) )
.use( cluster.reload( root ) )
.listen( port );
}
RESULT:
TypeError: Object #<Cluster> has no method 'repl'
If I remove the use calls, the workers start up correctly, but process.env.NODE_WORKER_ID is undefined. Inspecting process.env shows me that it's definitely not defined. Maybe the snippet I used was from an old version, but I'm not sure how to identify the worker thread in any other way.
If anyone can unscrambled whatever I've scrambled, I'd really appreciate it.
For anyone searching later, here's what I ended up with:
const cluster = require('cluster');
const express = require('express');
const path = require('path');
const port = 3000;
const root = path.dirname(__dirname);
const cCPUs = require('os').cpus().length;
if (cluster.isMaster) {
// Create a worker for each CPU
for (let i = 0; i < cCPUs; i++) {
cluster.fork();
}
cluster.on('online', function (worker) {
console.log('Worker ' + worker.process.pid + ' is online.');
});
cluster.on('exit', function (worker, code, signal) {
console.log('worker ' + worker.process.pid + ' died.');
});
} else {
const app = express();
const routes = require('./routes')(app);
app.use(express.bodyParser()).listen(port);
}
I'm still very early in the node learning curve, but the server starts and appears to have a working running on each core. Thanks to JohnnyH for getting me on the right track.
Also take a look at cluster2. It's used by eBay and has an express example
var Cluster = require('cluster2'),
express = require('express');
var app = express.createServer();
app.get('/', function(req, res) {
res.send('hello');
});
var c = new Cluster({
port: 3000,
});
c.listen(function(cb) {
cb(app);
});
Here is my draft of Cluster.js class. Note that we should catch port conflict when you start master process.
/*jslint indent: 2, node: true, nomen: true, vars: true */
'use strict';
module.exports = function Cluster(options, resources, logger) {
var start = function () {
var cluster = require('cluster');
if (cluster.isMaster) {
require('portscanner').checkPortStatus(options.express.port, '127.0.0.1', function (error, status) {
if (status === 'open') {
logger.log.error('Master server failed to start on port %d due to port conflict', options.express.port);
process.exit(1);
}
});
// Each core to run a single process.
// Running more than one process in a core does not add to the performance.
require('os').cpus().forEach(function () {
cluster.fork();
});
cluster.on('exit', function (worker, code, signal) {
logger.log.warn('Worker server died (ID: %d, PID: %d)', worker.id, worker.process.pid);
cluster.fork();
});
} else if (cluster.isWorker) {
var _ = require('underscore');
var express = require('express');
var resource = require('express-resource');
// Init App
var app = express();
// App Property
app.set('port', process.env.PORT || options.express.port);
app.set('views', options.viewPath);
app.set('view engine', 'jade');
app.set('case sensitive routing', true);
app.set('strict routing', false);
// App Middleware
app.use(express.favicon(options.faviconPath));
app.use(express.logger({ stream: logger.stream() }));
app.use(express.bodyParser());
app.use(express.methodOverride());
app.use(express.responseTime());
app.use(app.router);
app.use(require('stylus').middleware(options.publicPath));
app.use(express['static'](options.publicPath));
if (options.express.displayError) {
app.use(express.errorHandler());
}
// App Format
app.locals.pretty = options.express.prettyHTML;
// App Route Handler
if (!_.isUndefined(resources) && _.isArray(resources)) {
_.each(resources, function (item) {
if (!_.isUndefined(item.name) && !_.isUndefined(item.path)) {
app.resource(item.name, require(item.path));
}
});
}
// Start Server
var domain = require('domain').create();
domain.run(function () {
require('http').createServer(app).listen(app.get('port'), function () {
logger.log.info('Worker server started on port %d (ID: %d, PID: %d)', app.get('port'), cluster.worker.id, cluster.worker.process.pid);
});
});
domain.on('error', function (error) {
logger.log.error(error.stack);
});
}
};
return {
start: start
};
};
Related
Server
var cluster = require('cluster');
// Code to run if we're in the master process
if (cluster.isMaster) {
// Count the machine's CPUs
var cpuCount = require('os').cpus().length;
// Create a worker for each CPU
for (var i = 0; i < cpuCount; i += 1) {
cluster.fork();
}
// Listen for terminating workers
cluster.on('exit', function (worker) {
// Replace the terminated workers
console.log('Worker ' + worker.id + ' died :(');
cluster.fork();
});
// Code to run if we're in a worker process
}
else {
const app = express();
const server = require('http').createServer(app);
const io = require('socket.io')(server);
app.set('socketio', io);
const port = process.env.PORT || 9090;
server.listen(port,() => {
console.log('Server running at http://127.0.0.1:' + port + '/');
});
io.on('connection', function (socket) {
console.log("CONNECTED")
});
}
Client
import io from 'socket.io-client'
const socket = io('http://localhost:9090');
socket.on('notification', (data) => {
if(props.user && data.user._id === props.user._id) {
this.setNotification(data.notification);
}
})
error message
http://localhost:9090/socket.io/?EIO=3&transport=polling&t=MaDRz1u&sid=VzBUqt22usNbdqKCAAAb 400 (Bad Request)
When i remove the if else and keep the code that is in the else statement everything works. What do I need to add so the sessionID is not unknown.
The response object is {"code":1,"message":"Session ID unknown"}
This happens because each child process created by clusters are not in sync and do not know of each other. To overcome this you will need an adapter to communicate between the clusters.
Refer the Socket.io documentation to overcome this issue.
Socket IO server is running fine on single instance of NodeJs. But when I'm using cluster module of NodeJS to run the servers on multiple cores I'm getting the error, "Connection closed before receiving a handshake response". I've googled the reason and found out that,
Essence of the problem is, when you run multiple Node app threads (workers) on a server, or multiple servers, socket.io clients connections are routed by cluster in a random round-robin manner, and handshaken / authorized io client requests get handed to workers where they are not handshaken / authorized, where the mess begins. Source Link
I've tried a couple of things to make it work but no success so far. Here's the code
'use strict';
process.env.NODE_ENV = process.env.NODE_ENV || 'development';
var express = require('express');
var config = require('./config/environment');
var session = require('express-session');
var redisStore = require('connect-redis')(session);
var cluster = require('cluster');
var domain = require('domain');
var socketIo = require('./config/socketio');
var REDIS = require('redis')
var store = REDIS.createClient();
var pub = REDIS.createClient();
var sub = REDIS.createClient();
if(cluster.isMaster) {
var numWorkers = require('os').cpus().length;
for(var i = 0; i < numWorkers; i++) {
cluster.fork();
}
} else {
var d = domain.create ();
d.on ("error", function (error){
// start new server
});
// Setup server
var app = express();
var server = require('http');
d.run (function (){
server = server.createServer(app);
});
require('./config/express')(app);
require('./config/redis')();
require('./routes')(app);
server.listen(config.port, config.ip, function () {
console.log('Express server listening on %d, in %s mode', config.port, app.get('env'));
});
var redis = require('socket.io-redis');
var socketIO = require('socket.io')(server, {
serveClient: (config.env === 'production') ? false : true,
path: '/socket.io-client'
});
sub.subscribe('chat');
socketIO.adapter(redis( {host: 'localhost', port: 6379}));
socketIo.createSocketConnection('/dummy', socketIO, sub, pub, store);
exports = module.exports = app;
}
File: ./config/socketio
'use strict';
function addNamespaceForId (socketio, namespace, sub, pub, store){
socketio.of(namespace).on('connection', function(socket) {
onConnect(socketio, socket, namespace, sub, pub, store);
console.info('[%s] CONNECTED', socket.address);
sub.on('message', function(pattern, key){
store.hgetall(key, function(e, obj){
socket.send(obj.uid + ": " + obj.text)
})
})
socket.on('disconnect', function() {
console.info('[%s] DISCONNECTED', socket.address);
});
});
}
}
function onConnect(io, socket, namespace, sub, pub, store) {
socket.on('message', function(from, msg) {
store.incr("messageNextId", function(e, id){
store.hmset("messages:" + id, { uid: socket.sessionId, text: 'text;' }, function(e, r){
pub.publish("chat", "messages:" + id)
})
})
io.emit('broadcast', {
payload : from['message'],
source : from
});
io.of(namespace).emit('broadcast', {
payload : from['message'],
source : from
});
});
// When the client emits 'info', this listens and executes
socket.on('info', function(data) {
console.info('[%s] %s', socket.address, JSON.stringify(data, null, 2));
});
// Insert sockets below
require('../api/thing/thing.socket').register(socket);
}
module.exports = {
createSocketConnection : function (namespace, socketio, sub, pub, store){
addNamespaceForId(socketio, namespace, sub, pub, store);
}
};
I've also tried using adapter for redis as suggested in the documentation.
This setup works sometimes but not always. I'm unable to figure out the missing point.
I've been bouncing back and forth between socket.io and express.io - but settled for socket.io with Express 4, as I would like to use Namespaces.
I have worked on some examples of having an Express 4 Server using Socket.io - but most examples are based on one file with everything in it. I am trying to separate all my code to make it easier but I am at a loss as to how to add Socket.io (or where).
I have index.js which uses Cluster and basically calls server.js:
var server = require( "./server.js" );
var cluster = require('cluster');
var webApp={
run: function(){
console.log('Starting: Server');
server.listen();
}
};
if(cluster.isMaster){
cluster.fork();
cluster.on('exit',function(worker){
console.log('Worker ' + worker.id + ' died..');
setTimeout( function () { cluster.fork(); }, 1000 );
});
} else{
try {
webApp.run();
}
catch(e)
{
console.log(e);
process.exit(1);
}
process.on('uncaughtException', function(err){
console.log(err);
process.exit(1);
});
process.on( 'SIGINT', function () {
console.log( "\n SIGINT (Crtl-C)" );
//Kill worker
cluster.disconnect();
process.exit(1);
});
}
This then calls the server.js file:
var path = require('path');
var express = require('express');
var bodyParser = require('body-parser');
var config = require('./config/config.js');
var router = require('./routes');
var Server = Object.subClass({
/**
* Constructor
*/
init:function(){
this.appServer = express();
var that = this;
var appServer = this.appServer;
appServer.use(express.static(__dirname + '/public'));
appServer.set('views', path.join(__dirname, 'views'));
appServer.set('view engine', 'ejs');
appServer.use(bodyParser.urlencoded({ extended: true }));
appServer.use(bodyParser.json());
appServer.get('/',router.root);
},
/**
* Listener HTTP
*/
listen:function(){
var port = config.rest.port;
console.log(':: on port:' + port);
this.appServer.listen(port);
}
});
module.exports = new Server();
I am only having one 'route', which is the '/' and is defined in routes.js file. The page loads fine but where do I add the server side socket.io? and do I add any socket.io namespace definitions in the routes.js file or in the javascript of the page being loaded?
There are so many ways of using sockets that I can't seem to work out the best approach for my multi-file approach.
Any help would be brilliant as I seem to be going in circles.
Enjoy our Saturday :)
Thanks again.
I've spent the morning looking at the Cluster/Worker approach and decided to use 'SocketCluster' as it seems to do what I need.
Enjoy your Sunday
I have a big problem sice 1 week. I try to convert my node.JS project actually run on single core to multi core with cluster.
With websockets, at this moment, i have no problems for events but, for xhr-polling or jsonp-polling, i have big problems with socket.io on cluster mode.
this is my server configuration :
00-generic.js
'use strict';
var http = require('http'),
os = require('os'),
cluster = require('cluster');
module.exports = function(done) {
var app = this.express,
port = process.env.PORT || 3000,
address = '0.0.0.0';
if(this.env == 'test'){
port = 3030;
}
var self = this;
var size = os.cpus().length;
if (cluster.isMaster) {
console.info('Creating HTTP server cluster with %d workers', size);
for (var i = 0; i < size; ++i) {
console.log('spawning worker process %d', (i + 1));
cluster.fork();
}
cluster.on('fork', function(worker) {
console.log('worker %s spawned', worker.id);
});
cluster.on('online', function(worker) {
console.log('worker %s online', worker.id);
});
cluster.on('listening', function(worker, addr) {
console.log('worker %s listening on %s:%d', worker.id, addr.address, addr.port);
});
cluster.on('disconnect', function(worker) {
console.log('worker %s disconnected', worker.id);
});
cluster.on('exit', function(worker, code, signal) {
console.log('worker %s died (%s)', worker.id, signal || code);
if (!worker.suicide) {
console.log('restarting worker');
cluster.fork();
}
});
} else {
http.createServer(app).listen(port, address, function() {
var addr = this.address();
console.log('listening on %s:%d', addr.address, addr.port);
self.server = this;
done();
});
}
};
03-socket.io.js
"use strict";
var _ = require('underscore'),
socketio = require('socket.io'),
locomotive = require('locomotive'),
RedisStore = require("socket.io/lib/stores/redis"),
redis = require("socket.io/node_modules/redis"),
v1 = require(__dirname + '/../app/socket.io/v1'),
sockets = require(__dirname + '/../../app/socket/socket'),
config = require(__dirname + '/../app/global'),
cluster = require('cluster');
module.exports = function () {
if (!cluster.isMaster) {
this.io = socketio.listen(this.server);
var pub = redis.createClient(),
sub = redis.createClient(),
client = redis.createClient();
this.io.enable('browser client minification'); // send minified client
this.io.enable('browser client etag'); // apply etag caching logic based on version number
this.io.enable('browser client gzip'); // gzip the file
this.io.set("store", new RedisStore({
redisPub : pub,
redisSub : sub,
redisClient : client
}));
this.io.set('log level', 2);
this.io.set('transports', [
'websocket',
'jsonp-polling'
]);
this.io.set('close timeout', 24*60*60);
this.io.set('heartbeat timeout', 24*60*60);
this.io.sockets.on('connection', function (socket) {
console.log('connected with ' + this.io.transports[socket.id].name);
// partie v1 #deprecated
v1.events(socket);
// partie v1.1 refaite
_.each(sockets['1.1'], function(Mod) {
var mod = new Mod();
mod.launch({
socket : socket,
io : this.io
});
}, this);
}.bind(this));
}
};
With polling, the client connects from time to time on a different process than that initiated listeners. Similarly, the communication server to the client with emit.
With a little searching, I found it necessary to pass by a store for socket.io to share the data connection. So I built RedisStore socket.io as shown in the documentation but even with that, I find myself with events not arriving safely and I still get this error message:
warn: client not handshaken client should reconnect
EDIT
Now, the warn error is not called. I change the redisStore to socket.io-clusterhub BUT now, events are not always called. Sometimes as if the polling request was captured by another worker than that which began the listeners and so it nothing happens. Here is the new configuration:
'use strict';
var http = require('http'),
locomotive = require('locomotive'),
os = require('os'),
cluster = require('cluster'),
config = require(__dirname + '/../app/global'),
_ = require('underscore'),
socketio = require('socket.io'),
v1 = require(__dirname + '/../app/socket.io/v1'),
sockets = require(__dirname + '/../../app/socket/socket');
module.exports = function(done) {
var app = this.express,
port = process.env.PORT || 3000,
address = '0.0.0.0';
if(this.env == 'test'){
port = 3030;
}
var self = this;
var size = os.cpus().length;
this.clusterStore = new (require('socket.io-clusterhub'));
if (cluster.isMaster) {
for (var i = 0; i < size; ++i) {
console.log('spawning worker process %d', (i + 1));
cluster.fork();
}
cluster.on('fork', function(worker) {
console.log('worker %s spawned', worker.id);
});
cluster.on('online', function(worker) {
console.log('worker %s online', worker.id);
});
cluster.on('listening', function(worker, addr) {
console.log('worker %s listening on %s:%d', worker.id, addr.address, addr.port);
});
cluster.on('disconnect', function(worker) {
console.log('worker %s disconnected', worker.id);
});
cluster.on('exit', function(worker, code, signal) {
console.log('worker %s died (%s)', worker.id, signal || code);
if (!worker.suicide) {
console.log('restarting worker');
cluster.fork();
}
});
} else {
var server = http.createServer(app);
this.io = socketio.listen(server);
this.io.configure(function() {
this.io.enable('browser client minification'); // send minified client
this.io.enable('browser client etag'); // apply etag caching logic based on version number
this.io.enable('browser client gzip'); // gzip the file
this.io.set('store', this.clusterStore);
this.io.set('log level', 2);
this.io.set('transports', [
'websocket',
'jsonp-polling'
]);
//this.io.set('close timeout', 24*60*60);
//this.io.set('heartbeat timeout', 24*60*60);
}.bind(this));
this.io.sockets.on('connection', function (socket) {
console.log('connected with ' + this.io.transports[socket.id].name);
console.log('connected to worker: ' + cluster.worker.id);
// partie v1 #deprecated
v1.events(socket);
// partie v1.1 refaite
_.each(sockets['1.1'], function(Mod) {
var mod = new Mod();
mod.launch({
socket : socket,
io : this.io
});
}, this);
}.bind(this));
server.listen(port, address, function() {
var addr = this.address();
console.log('listening on %s:%d', addr.address, addr.port);
self.server = this;
done();
});
}
};
From that source : http://socket.io/docs/using-multiple-nodes/
If you plan to distribute the load of connections among different
processes or machines, you have to make sure that requests associated
with a particular session id connect to the process that originated
them.
This is due to certain transports like XHR Polling or JSONP Polling
relying on firing several requests during the lifetime of the
“socket”.
To route connections to the same worker every time :
sticky-session
This is, in the socket.io documentation, the recommended way to route requests to the same worker every time.
https://github.com/indutny/sticky-session
A simple performant way to use socket.io with a cluster.
Socket.io is doing multiple requests to perform handshake and
establish connection with a client. With a cluster those requests may
arrive to different workers, which will break handshake protocol.
var sticky = require('sticky-sesion');
sticky(function() {
// This code will be executed only in slave workers
var http = require('http'),
io = require('socket.io');
var server = http.createServer(function(req, res) {
// ....
});
io.listen(server);
return server;
}).listen(3000, function() {
console.log('server started on 3000 port');
});
To pass messages between nodes :
socket.io-redis
This is, in socket.io documentation, the recommended way to share messages between workers.
https://github.com/automattic/socket.io-redis
By running socket.io with the socket.io-redis adapter you can run
multiple socket.io instances in different processes or servers that
can all broadcast and emit events to and from each other.
socket.io-redis is used this way :
var io = require('socket.io')(3000);
var redis = require('socket.io-redis');
io.adapter(redis({ host: 'localhost', port: 6379 }));
Also
I think you are not using socket.io v1.0.0. You might want to update your version in order to get more stability.
You can check their migration guide at http://socket.io/docs/migrating-from-0-9/
There is a step missing from the socket.io docs when using
var io = require('socket.io')(3000);
var redis = require('socket.io-redis');
io.adapter(redis({ host: 'localhost', port: 6379 }));
You need to tell the client that you want to use 'websockets' as the only form of transport or it will not work... so for the constructor on the client use
io.connect(yourURL , { transports : ['websocket']});
see my answer to a similar question here ( my answer might be more appropriate on this thread ):
https://stackoverflow.com/a/30791006/4127352
The below code work for me, this is socket.io who created clusters, i set config.clusterSticky on true for activate compatibility clusters and socket.io
'use strict';
/*
var cl = console.log;
console.log = function(){
console.trace();
cl.apply(console,arguments);
};
*/
var cluster = require('cluster'),
config = require('./config/all'),
deferred = require('q').defer(),
express = require('express'),
app = express(),
http = require('http'),
sticky = require('socketio-sticky-session'),
io = require('socket.io');
// Code to run if we're in the master process or if we are not in debug mode/ running tests
if ((cluster.isMaster) &&
(process.execArgv.indexOf('--debug') < 0) &&
(process.env.NODE_ENV !== 'test') && (process.env.NODE_ENV !== 'development') &&
(process.execArgv.indexOf('--singleProcess') < 0) &&
(!config.clusterSticky)) {
console.log('for real!');
// Count the machine's CPUs
var cpuCount = process.env.CPU_COUNT || require('os').cpus().length;
// Create a worker for each CPU
for (var i = 0; i < cpuCount; i += 1) {
console.log('forking ', i);
cluster.fork();
}
// Listen for dying workers
cluster.on('exit', function (worker) {
// Replace the dead worker, we're not sentimental
console.log('Worker ' + worker.id + ' died :(');
cluster.fork();
});
// Code to run if we're in a worker process
} else {
var port = config.http.port;
var workerId = 0;
if (!cluster.isMaster) {
workerId = cluster.worker.id;
}
var server = http.createServer(app);
io.listen(server);
//TODO routes etc (core)
server.on('listening', function () {
console.log('Slave app started on port ' + port + ' (' + process.env.NODE_ENV + ') cluster.worker.id:', workerId);
});
if(config.clusterSticky && (process.env.NODE_ENV !== 'test') && (process.env.NODE_ENV !== 'development')) {
sticky(server).listen(port);
} else {
server.listen(port);
}
deferred.resolve(server);
}
module.exports = deferred.promise;
I have a nodejs server structured like so:
(app.js):
var fs = require('fs'),
http = require('http'),
https = require('https'),
express = require('express'),
connect = require('express/node_modules/connect'),
app = module.exports = express();
var ssl_options = {
key: fs.readFileSync('/etc/nginx/ssl/server.key'),
cert: fs.readFileSync('/etc/nginx/ssl/server.crt')
};
var server = https.createServer(ssl_options, app);
// EXPRESS
// app.set('view options', { layout: false });
var auth_token = "asdfasfdasdfasdf";
var express_session_id = "express.sid";
app.configure(function () {
app.use(express.cookieParser());
app.use(express.session({secret: auth_token, key: express_session_id}));
app.set("view options", {layout: false});
});
app.get('/', function (req, res) {
console.log (req.headers);
fs.readFile(__dirname + '/index.html', function(err, data){
res.writeHead(200, {'Content-Type': 'text/html'});
res.write(data, 'utf8');
res.end();
});
});
app.listen = function(port) {
server.listen(port);
console_log.log('info', "COTTONMOUTH server listening on port " + port);
};
I have a cluster.js running the above app.js:
var cluster = require('cluster'),
os = require('os'),
workers = {};
if (cluster.isMaster) {
cluster.on('exit', function(worker, code, signal) {
if (worker.suicide === false) {
var exitCode = worker.process.exitCode;
console.error('worker ' + worker.process.pid + ' died ('+exitCode+'). restarting...');
cluster.fork();
}
});
var n = process.env.PROC || os.cpus().length;
for(var i = 0; i < n; i++ ) {
cluster.fork();
}
process.on('SIGTERM', function() {
console.log('[MASTER] Going for shutdown...');
for (var id in cluster.workers) {
console.log('\tkilling worker: ' + cluster.workers[id].process.pid);
cluster.workers[id].destroy();
}
console.log("[MASTER] Here's looking at you, kid.");
});
} else {
require('./app').listen(process.env.PORT || 3456);
}
My problem is that the following setup works fine on my localhost virtual box environment (ubuntu virtual running on a mac host). I am able to access the nodejs server with dev.domain.com:3456.
However, when I move this to my production rackspace server (same environment configs and setup), and try to access it by prod.domain.com:3456
The browser hangs for a bit and returns Error 324 (net::ERR_EMPTY_RESPONSE): The server closed the connection without sending any data.
I did some research and have found some leads but weren't too helpful.
Any Ideas?
UPDATE:
when i lower the port down to 90, it seems to work which is interesting.
I am going to leave it at port 90 for now but if someone has an answer to why this is.
Thanks
I got this error message when my request grew too large (>65K). My solution was to reduce the data into several snippets.
Temporary Workaroud:
when i lower the port down to 90, it seems to work which is interesting. I am going to leave it at port 90 for now but if someone has an answer to why this is.
Thanks
This issue can also be encountered when you add an additional '/' from frontend to call a backend API
For example:
my route is
baseUrl/api/customer
but im sending request to
baseUrl/api/customer/
it will not work, at least this was in my case. Too dumb to notice this.
Maybe this can be helpful to someone