Sticky socket.io sessions by cookie for node.js cluster without sticky express sessions - node.js

I am working with the express and socket.io libraries of Node.js on the same server, listening on the same port. I would like to use the cluster module to support round-robin load balancing, but I want the load-balancing behavior for express and socket.io to be different. The behavior is as follows:
Incoming connections for HTTP/S should connect to any single worker
Incoming connections for WS/S should connect to a specific worker, based on a cookie value---more broadly, based on a value
Are there any available libraries to accomplish my desired behaviors? If not, how should I go about accomplishing these behaviors?

There's a bunch of ways you could do this. I'm gonna link you to this guide on using redis as a pub sub but also I'll give you a super short overview of what it could look like.
So spawn two clusters at start up, or however many you want..
var aWorker = cluster.fork();
var bWorker = cluster.fork();
then you need to set them up to listen on their respective ports, so using the net module:
var server1 = require('net').createServer([options], function( connection ) {
aWorker.send( 'ConnectionEvent' , connection );
}).listen(80); //HTTP/WS
var server2 = require('net').createServer([options], function( connection ) {
bWorker.send( 'ConnectionEvent' , connection );
}).listen(443); //HTTPS/WSS
In your cluster process:
var app_server = require('express')().listen( 0, 'localhost' );
var io = require('socket.io')(app_server);
io.adapter(require('socket.io-redis')({ host: '127.0.0.1', port: **REDIS PORT** });
io.on('connection', function (socket) {
//Rest of your io server code
...
process.on( 'message' , function( message, connection ) {
if( connection && message === 'ConnectionEvent' ) {
app_server.emit( 'connection', connection );
connection.resume();
}
}
I believe the room function of Socket.io would accomplish what you're trying to do in your second point rather than relying on creating new worker tasks. That's just my opinion though.

Related

Connecting to Websocket in OpenShift Online Next Gen Starter

I'm in the process of trying to get an application which I'd built on the old OpenShift Online 2 free service up and running on the new OpenShift Online 3 Starter, and I'm having a bit of trouble.
The application uses websocket, and in the old system all that was required was for the client to connect to my server on port 8443 (which was automatically routed to my server). That doesn't seem to work in the new setup however - the connection just times out - and I haven't been able to find any documentation about using websocket in the new system.
My first thought was that I needed an additional rout, but 8080 is the only port option available for routing as far as I can see.
The app lives here, and the connection is made on line 21 of this script with the line:
this.socket = new WebSocket( 'wss://' + this.server + ':' + this.port, 'tabletop-protocol' );
Which becomes, in practice:
this.socket = new WebSocket( 'wss://production-instanttabletop.7e14.starter-us-west-2.openshiftapps.com:8443/', 'tabletop-protocol' );
On the back end, the server setup is unchanged from what I had on OpenShift 2, aside from updating the IP and port lookup from env as needed, and adding logging to help diagnose the issues I've been having.
For reference, here's the node.js server code (with the logic trimmed out):
var http = require( "http" );
var ws = require( "websocket" ).server;
// Trimmed some others used by the logic...
var ip = process.env.IP || process.env.OPENSHIFT_NODEJS_IP || '0.0.0.0';
var port = process.env.PORT || process.env.OPENSHIFT_NODEJS_PORT || 8080;
/* FILE SERVER */
// Create a static file server for the client page
var pageHost = http.createServer( function( request, response ){
// Simple file server that seems to be working, if a bit slowly
// ...
} ).listen( port, ip );
/* WEBSOCKET */
// Create a websocket server for ongoing communications
var wsConnections = [];
wsServer = new ws( { httpServer: pageHost } );
// Start listening for events on the server
wsServer.on( 'request', function( request ){
// Server logic for the app, but nothing in here ever gets hit
// ...
} );
In another question it was suggested that nearly anything - including this -
could be related to the to the ongoing general issues with US West 2, but other related problems I was experiencing seem to have cleared, and that issue has been posted for a week with no update, so I figured I'd dig deeper into this on the assumption that it's something I'm doing wrong instead of them.
Anyone know more about this and what I need to do to make it work?

How to do graceful stop for koajs server?

There are a lot of examples of graceful stop for expressjs, how can I achieve the same for koajs?
I would like to disconnect database connections as well
I have a mongoose database connection, and 2 oracle db connection (https://github.com/oracle/node-oracledb)
I created an npm package http-graceful-shutdown (https://github.com/sebhildebrandt/http-graceful-shutdown) some time ago. This works perfectly with http, express and koa. As you want to add also your own cleanup stuff, I modified the package, so that you now can add your own cleanup function, that will be called on shutdown. So basically this package handles all http shutdown things plus calling your cleanup function (if provided in the options):
const koa = require('koa');
const gracefulShutdown = require('http-graceful-shutdown');
const app = new koa();
...
server = app.listen(...); // app can be an express OR koa app
...
// your personal cleanup function - this one takes one second to complete
function cleanup() {
return new Promise((resolve) => {
console.log('... in cleanup')
setTimeout(function() {
console.log('... cleanup finished');
resolve();
}, 1000)
});
}
// this enables the graceful shutdown with advanced options
gracefulShutdown(server,
{
signals: 'SIGINT SIGTERM',
timeout: 30000,
development: false,
onShutdown: cleanup,
finally: function() {
console.log('Server gracefulls shutted down.....')
}
}
);
I have answered a variation of "how to terminate a HTTP server" many times on different node.js support channels. Unfortunately, I couldn't recommend any of the existing libraries because they are lacking in one or another way. I have since put together a package that (I believe) is handling all the cases expected of graceful HTTP server termination.
https://github.com/gajus/http-terminator
The main benefit of http-terminator is that:
it does not monkey-patch Node.js API
it immediately destroys all sockets without an attached HTTP request
it allows graceful timeout to sockets with ongoing HTTP requests
it properly handles HTTPS connections
it informs connections using keep-alive that server is shutting down by setting a connection: close header
it does not terminate the Node.js process
Usage with Koa:
import Koa from 'koa';
import {
createHttpTerminator,
} from 'http-terminator';
const app = new Koa();
const server = app.listen();
const httpTerminator = createHttpTerminator({
server,
});
await httpTerminator.terminate();
To make sure the Oracle DB connections are closed nicely, you can use a connection pool and call pool.close() with a drainTime of 0 or greater. This will let the app relatively cleanly interrupt any operation that is currently using a connection. It allows freeing the DB end of the connections without the DB waiting for whatever timeout period to expire before it cleans itself up. Even with two connections this is a solution I'd look at, since it doesn't matter that the pool is small. You may need to set the Oracle Net out-of-band break detection as well, see Connections and High Availability.
Modern versions of node have support for AbortController, so no need for external libraries. A Simple example:
const app = new Koa();
const server = http.createServer(app.callback());
const controller = new AbortController();
server.listen({
host: 'localhost',
port: 80,
signal: controller.signal
});
// middleware... etc.
app.use(async (ctx) => {
ctx.body = 'Hello World';
});
// Later, when you want to close the server.
controller.abort();

Why is my client receiving socket emits from all child node cluster workers when it should only be connected to one?

I'm trying to make a scalable socket connection handler using node.js, express, socket.io, cluster, socket.io-adapter-mongo, and mubsub. This is my first attempt to use sockets, so forgive me if I reveal my noobness here, but it is my understanding that a cluster worker can only emit to the sockets that are connected to it.
In my dev environment I have cluster forking 8 workers (number of cpus.)
I have my workers subscribe to mubsub db so that they will pick up events that are published from other workers.
if (cluster.isMaster) {
var cpuCount = require("os").cpus().length;
for (var cp = 0; cp < cpuCount; cp++) {
cluster.fork();
}
} else {
io.adapter(mongo({ host: 'localhost', port: 27017, db: 'mubsub' }));
var client = mubsub('mongodb://localhost:27017/mubsub');
var channel = client.channel('test');
channel.subscribe('testEvent', function (message) {
console.log(message);
io.sockets.emit('testEvent', {message: cluster.worker.id});
});
io.sockets.on('connection', function (socket) {
console.log('connected to ' + cluster.worker.id);
channel.publish('testEvent', { message: 'connection3' });
});
...
server.listen(8080);
}
So when I try to connect from the client, the 'connection' event fires and a single console log is written by the worker that receives the connection.
That event is published to the database only once.
Each worker is subscribed to that event, and should emit to all sockets connected to that worker.
For some reason though, my connected client receives 8 messages, one for each worker.
How is the client picking up the emits from workers it should not be connected to? Am I overlooking some cluster magic here?
Not sure what version you are using but this should be true for most current versions.
From the socket.io docs (http://socket.io/docs/server-api/#server#emit):
Server#emit
Emits an event to all connected clients. The following two are equivalent:
var io = require('socket.io')();
io.sockets.emit('an event sent to all connected clients');
io.emit('an event sent to all connected clients');
So the method you are using will broadcast to all connected clients. If you want to split them across the workers this is something you need to manage.
There are a number of ways to address individual sockets(clients) that socket.ios API can help you with but probably best to refer to the docs for this:
http://socket.io/docs/rooms-and-namespaces/

Socket.io 'Handshake' failing with cluster and sticky-session

I am having problems getting the sticky-sessions socket.io module to work properly with even a simple example. Following the very minimal example given in the readme (https://github.com/indutny/sticky-session), I am just trying to get this example to work:
var cluster = require('cluster');
var sticky = require('sticky-session');
var http = require('http');
if (cluster.isMaster) {
for (var i = 0; i < 4; i++) {
cluster.fork();
}
Object.keys(cluster.workers).forEach(function(id) {
console.log("Worker running with ID : " +
cluster.workers[id].process.pid);
});
}
if (cluster.isWorker) {
var anotherServer = http.createServer(function(req, res) {
res.end('hello world!');
});
anotherServer.listen(3000);
console.log('http server on 3000');
}
sticky(function() {
var io = require('socket.io')();
var server = http.createServer(function(req, res) {
res.end('socket.io');
});
io.listen(server);
io.on('connection', function onConnect(socket) {
console.log('someone connected.');
socket.on('sync', sync);
socket.on('send', send);
function sync(id) {
socket.join(id);
console.log('someone joined ' + id);
}
function send(id, msg) {
io.sockets.in(id).emit(msg);
console.log('someone sent ' + msg + ' to ' + id);
}
});
return server;
}).listen(3001, function() {
console.log('socket.io server on 3001')
});
and a simple client:
var socket = require('socket.io-client')('http://localhost:3001');
socket.on('connect', function() {
console.log('connected')
socket.emit('sync', 'secret')
});
The workers start up fine. The http servers work fine. But when the client connects, the console logs 'someone connected' and nothing more. The client never fires the on connect event, so I think the upgrade/handshake is failing or something. If anyone can spot what I am doing wrong that would help alot.
Thanks!
#jordyyy : I was facing same issue after googling I have fond answer.
Socket.Io handshaking task complete in more than one request and when you will run on sticky session it means you are using multiple process according to your core.
So handshaking request will distribute on different different process and they can't talk.(not IPC) (They are child process) and most of time connection will be failed/lost.(connection-disconnect event occurs frequently )
So what is solution ? Solution is socketio-sticky-session
Socketio-sticky-session, manage connection on IP based. So when you will request by any client then it will maintain ip address with respect process/worker. So further request will be forward to same process/worker and your connection properly stabilized.
And When you will use redies adapter then you can actually maintain socket
connection data b/w all processes/workers.
For more information
https://github.com/elad/node-cluster-socket.io
(you need some patch on worker_index method, if your server is supporting IPv6)
Just knowledge bytes. :) :)
One more thing, you don't need to fork process. It will be done by sticky session.
This was super old and wasn't really answered when i needed it, but my solution was to drop this bad module and any other super confusing module and just use pub/sub with redis adapter. The only other step was to force transports to websockets, and if that bothers anyone then use something else. For my purposes my solution was simple, readable, didn't mess with the 'typical' socket.io api, and best of all it worked extremely well.

Using (socket.io + RedisStore) to communicate across multiple servers

I am working on a multiplayer online game, using Node.js and Socket.io. I expect a lot of players to join the game, so I am hosting it on Amazon Opworks.
The problem is that the servers aren't able to send socket events to clients connected to a different server. I am using RedisStore to manage socket.io sessions. I believed RedisStore and socket.io took care of this inter-server communication under the hood in a seamless manner. Here is a reference to another question: How does socket.io send messages across multiple servers?
But that's not the case it seems. Messages do not go through to other clients if they are on different servers; the app works if there is only one server, but fails if I use multiple servers loadbalanced using ELB on Opsworks.
This is just an extract from the whole code. Please ignore syntax errors etc if any.
app.js
//Redis Client Initialization
var redis = require("redis");
redis_client = require('redis-url').connect('redis://xxxxx');
//setting RedisStore for socket.io
var RedisStore = require('socket.io/lib/stores/redis')
, redis = require('socket.io/node_modules/redis')
, pub = redis.createClient(11042,'-----')
, sub = redis.createClient(11042,'-----')
, client = redis.createClient(11042,'-----');
// using RedisStore in combo with Socket.io to allow communications across multiple servers
io.set('store', new RedisStore({
redis : redis,
redisPub : pub,
redisSub : sub,
redisClient : client
}));
//socket communication specific code
io.of('/game')
.on('connection', function (socket) {
socket.on('init' , function(data){
var user_id = data.user_id; // collecting user_id that was sent by the client
var socket_id = socket.id;
redis_client.set("user_socket:"+user_id, socket_id, function(err, reply){
//stored a referece to the socket id for that user in the redis database
});
});
socket.on('send_message', function(data){
var sender = data.sender_id;
var reciepient = data.reciepient_id ; // id of the user to whom message is to be sent to
redis_client.get("user_socket:"+reciepient, function(err,socket_id){
if(socket_id){
var socket = io.of('/game').sockets[socket_id];
socket.emit("message", {sender : sender}); // This fails. Messages to others servers dont go through.
}
})
})
});
You can't broadcast directly to socket objects on other servers. What Redis does is allows you to broadcast to 'rooms' on other servers. Thankfully with socket.io 1.x, new connections automatically join a room with the name of their socket id. To solve your problem, change:
if(socket_id){
var socket = io.of('/game').sockets[socket_id];
socket.emit("message", {sender : sender}); // This fails. Messages to others servers dont go through.
}
to emit to the room instead of calling emit on a socket object:
if(socket_id){
io.to(socket_id).emit("message", {sender : sender}); // This fails. Messages to others servers dont go through.
}
And you might have more luck.

Resources