I count the number of online websockets via onConnection and onDisconnect events:
const socketIo = require('socket.io');
var on_connect = 0;
var on_disconnect = 0;
var port = 6001;
var io = socketIo(port, {
pingTimeout: 5000,
pingInterval: 10000
});
//I have only one NameSpace and root NS is not used
var ns1 = io.of('ns1');
ns1
.on('connection', function (socket) {
on_connect += 1;
socket.on('disconnect', function (reason) {
on_disconnect += 1;
});
});
...
var online = on_connect - on_disconnect;
...
But online value not equal io.engine.clientsCount value.
And over time the difference between online value and io.engine.clientsCount value is growing up.
Why this is happens?
What is needed to make to fix this?
on_connect and on_disconnect variables are updates in the callback events, whereas the online variable is not recalculated. So you will need to recalculate the online variable every time the other variable change.
It might be easier to use only one variable to count connections. Increment it on connection, and decrement it on disconnect. That's how I keep track of the number of connections. Then there isn't a need to calculate it every time its value is needed.
Also, the line that states var online = on_connect - on_disconnect; Is occurring before either is modified... That's what #gvmani is trying to tell you.
Here's an example of some of what I'm doing. The code below sets up to listen for connections & disconnections and maintains a count of current connections. I should note that I'm not using a namespace like the OP, but the counting portion is what's of importance. I'll also note that I use connCount > 0 in the send() function. Which in my application is used to broadcast to all connected clients.
/* ******************************************************************** */
// initialize the server
const http = require('http');
const server = http.createServer();
// Socket.io listens to our server
const io = require('socket.io').listen(server);
// Count connections as they occur, decrement when a client disconnects.
// If the counter is zero then we won't send anything over the socket.
var connCount = 0;
// A client has connected,
io.on('connection', function(socket) {
// Increment the connection counter
connCount += 1;
// log the new connection for debugging purposes.
console.log(`on connect - ${socket.id} ${connCount}`);
// The client that initiated the connection has disconnected.
socket.on('disconnect', function () {
connCount -= 1;
console.log(`on disconnect - ${socket.id} ${connCount}`);
});
});
// Start listening...
server.listen(3000);
// Send something to all connected clients (a broadcast) the
// 'channel' will indicate the destination within the client
// and 'data' becomes the payload.
function send(channel, data) {
console.log(`send() - channel = ${channel} payload = ${JSON.stringify(data)}`);
if(connCount > 0) io.emit(channel, {payload: data});
else console.log('send() - no connections');
};
Related
I have a Node app running on AWS Elastic Beanstalk (so Node running behind Nginx). I am running socket IO with redis as the memory store and have Node running clustered using the cluster module. Generally everything works great but every now and then I get a user trying to connect that throws an undefined error on the connection.remoteAddress. My code looks like this for the connections:
if (cluster.isMaster) {
/*
------------------------------------------------------------------------------------
This stores our workers. We need to keep them to be able to reference
them based on source IP address. It's also useful for auto-restart
We also setup a message listener to every worker in order to blast AND FILTER
socket io updates to all nodes.
------------------------------------------------------------------------------------
*/
var workers = [];
var messageRelay = function(msg) {
for(var i = 0; i < workers.length; i++) {
workers[i].send(msg);
}
};
var spawn = function(i) {
workers[i] = cluster.fork();
console.log("Hello from worker %s",workers[i].process.pid);
workers[i].on('message', messageRelay);
/*
----------------------------------------
Restart worker if it gets destroyed
----------------------------------------
*/
workers[i].on('disconnect', function(worker) {
console.log('Worker disconnected');
});
workers[i].on('exit', function(worker, code, signal) {
console.log('respawning worker', i);
spawn(i);
});
};
for (var i = 0; i < cpuCount; i++) {
spawn(i);
}
/*
--------------------------------------------------------------------------------------------
Helper function for getting a worker index based on IP address (supports IPv4 AND IPv6)
This is a hot path so it should be really fast. The way it works
is by converting the IP address to a number by removing the dots (for IPv4) and removing
the :: for IPv6, then compressing it to the number of slots we have.
Compared against "real" hashing (from the sticky-session code) and
"real" IP number conversion, this function is on par in terms of
worker index distribution only much faster.
--------------------------------------------------------------------------------------------
*/
var workerIndex = function (ip, len) {
var _ip = ip.split(/['.'|':']/),
arr = [];
for (el in _ip) {
if (_ip[el] == '') {
arr.push(0);
}
else {
arr.push(parseInt(_ip[el], 16));
}
}
return Number(arr.join('')) % len;
}
/*
------------------------------------------------------------------------------------
Create the outside facing server listening on our port.
------------------------------------------------------------------------------------
*/
var server = net.createServer({ pauseOnConnect: true }, function(connection) {
/*
------------------------------------------------------------------------------------
We received a connection and need to pass it to the appropriate
worker. Get the worker for this connection's source IP and pass
it the connection.
------------------------------------------------------------------------------------
*/
if(connection.remoteAddress === undefined) {
console.log("BLEH: %o ", connection.remoteAddress);
return;
}
else {
var worker = workers[workerIndex(connection.remoteAddress, cpuCount)];
worker.send('sticky-session:connection', connection);
}
}).listen(port, function() {
console.log("Spun up worker %s", process.pid);
console.log('Server listening on *:' + port);
});
}
else {
var sio = require('socket.io');
var redis = require('socket.io-redis');
var ioEvents = require(__base + 'lib/ioEvents');
var app = new express();
/*
------------------------------------------------------------------------------------
Note we don't use a port here because the master listens on it for us.
------------------------------------------------------------------------------------
*/
var server = app.listen(0, 'localhost'),
io = sio(server);
/*
----------------------------------------------------------------------------------------------
Using Redis as the store instead of memory. This allows us to blast socket updates
to all processes (unfiltered). For example, we can do io.sockets.emit("message")
and it will be distributed to all node processes.
We cannot filter these messages to specific socket connections or specific configurations
(e.g. updateSquares(socket)), in order to do that we must blast an update to all workers
and let each process filter the request individually.
----------------------------------------------------------------------------------------------
*/
io.adapter(redis({host:'localhost', port: portRedis}));
/*
------------------------------------------------------------------------------------
Setup the socket listeners
------------------------------------------------------------------------------------
*/
ioEvents.incoming(io);
/*
------------------------------------------------------------------------------------
Listen to master for worker process updates
------------------------------------------------------------------------------------
*/
process.on('message', function(message, connection) {
/*
------------------------------------------------------------------------------------
Listen for special updates to all nodes
------------------------------------------------------------------------------------
*/
if(message.squareUpdate) {
console.log("worker %s received message %o", process.pid, message.squareUpdate);
ioEvents.squaresForceUpdate(message.squareUpdate);
}
/*
------------------------------------------------------------------------------------
If it's not a special message, then check to make sure it's just a sticky-session
Otherwise, just bail, no need to do anything else
------------------------------------------------------------------------------------
*/
if (message !== 'sticky-session:connection') {
return;
}
/*
------------------------------------------------------------------------------------
| Emulate a connection event on the server by emitting the
| event with the connection the master sent us.
------------------------------------------------------------------------------------
*/
server.emit('connection', connection);
connection.resume();
});
So the problem lies in the section above with the "BLEH" log. For some reason, remoteAddress is undefined...but only SOMETIMES. Most of the connections look just fine, but randomly I'll get a user trying to connect that throws that error. I'd like to understand what is going on here. I've read that I cannot do IP stuff when there is a proxy involved (something between Node and the User)...but 98% of the time, the connections to workers are fine and everything works as expected. Any help here is really appreciated.
Ok, I have an express-powered API where I also have socket.io running to receive/send realtime events...all works just dandy. I need to cluster my app. I set everything up based on the below code. I spin up workers, they get connections and everything works, except the fact that now I can't "blast" to all socket.io connections. Here is the setup (taken from this):
var express = require('express'),
cluster = require('cluster'),
net = require('net'),
sio = require('socket.io'),
sio_redis = require('socket.io-redis');
var port = 3000,
num_processes = require('os').cpus().length;
if (cluster.isMaster) {
// This stores our workers. We need to keep them to be able to reference
// them based on source IP address. It's also useful for auto-restart,
// for example.
var workers = [];
// Helper function for spawning worker at index 'i'.
var spawn = function(i) {
workers[i] = cluster.fork();
// Optional: Restart worker on exit
workers[i].on('exit', function(worker, code, signal) {
console.log('respawning worker', i);
spawn(i);
});
};
// Spawn workers.
for (var i = 0; i < num_processes; i++) {
spawn(i);
}
// Helper function for getting a worker index based on IP address.
// This is a hot path so it should be really fast. The way it works
// is by converting the IP address to a number by removing the dots,
// then compressing it to the number of slots we have.
//
// Compared against "real" hashing (from the sticky-session code) and
// "real" IP number conversion, this function is on par in terms of
// worker index distribution only much faster.
var workerIndex = function (ip, len) {
var _ip = ip.split(/['.'|':']/),
arr = [];
for (el in _ip) {
if (_ip[el] == '') {
arr.push(0);
}
else {
arr.push(parseInt(_ip[el], 16));
}
}
return Number(arr.join('')) % len;
}
// Create the outside facing server listening on our port.
var server = net.createServer({ pauseOnConnect: true }, function(connection) {
// We received a connection and need to pass it to the appropriate
// worker. Get the worker for this connection's source IP and pass
// it the connection.
var worker = workers[worker_index(connection.remoteAddress, num_processes)];
worker.send('sticky-session:connection', connection);
}).listen(port);
} else {
// Note we don't use a port here because the master listens on it for us.
var app = new express();
// Here you might use middleware, attach routes, etc.
// Don't expose our internal server to the outside.
var server = app.listen(0, 'localhost'),
io = sio(server);
// Tell Socket.IO to use the redis adapter. By default, the redis
// server is assumed to be on localhost:6379. You don't have to
// specify them explicitly unless you want to change them.
io.adapter(sio_redis({ host: 'localhost', port: 6379 }));
// Here you might use Socket.IO middleware for authorization etc.
// Listen to messages sent from the master. Ignore everything else.
process.on('message', function(message, connection) {
if (message !== 'sticky-session:connection') {
return;
}
// Emulate a connection event on the server by emitting the
// event with the connection the master sent us.
server.emit('connection', connection);
connection.resume();
});
}
So I connect from various machines to test concurrency, workers do their thing and all is good, but when I get an IO connection, I'm logging the TOTAL "connected" count and it's always 1 per instance. I need a way to say
allClusterForks.emit(stuff)
I get the connection on the correct worker pid, but "ALL CONNECTIONS" always returns 1.
io.on('connection', function(socket) {
console.log('Connected to worker %s', process.pid);
console.log("Adapter ROOMS %s ", io.sockets.adapter.rooms);
console.log("Adapter SIDS %s ", io.sockets.adapter.sids);
console.log("SOCKETS CONNECTED %s ", Object.keys(io.sockets.connected).length);
});
I can see the subscribe/unsubscribe coming in using Redis MONITOR
1454701383.188231 [0 127.0.0.1:63150] "subscribe" "socket.io#/#gXJscUUuVQGzsYJfAAAA#"
1454701419.130100 [0 127.0.0.1:63167] "subscribe" "socket.io#/#geYSvYSd5zASi7egAAAA#"
1454701433.842727 [0 127.0.0.1:63167] "unsubscribe" "socket.io#/#geYSvYSd5zASi7egAAAA#"
1454701444.630427 [0 127.0.0.1:63150] "unsubscribe" "socket.io#/#gXJscUUuVQGzsYJfAAAA#"
These are connections from 2 different machines, I would expect by using the socket io redis adapter that these subscriptions would be coming in on the same redis connection, but they are different.
Am I just totally missing something? There's a surprising lack of documentation/articles out there for this that aren't either completely outdated/wrong/ambiguous.
EDIT:
Node v5.3.0
Redis v3.0.6
Socket.io v1.3.7
So if anyone comes across this, I figured out that actually "looking" at the counts of connected sockets across processes is not a thing, but broadcasting or emitting to them is. So I've basically just been "testing" for no reason. All works as expected. I WILL be rewriting the socket.io-redis adapter to allow checking counts across processes.
There was a pull request a few years ago to implement support for what I was trying to do. https://github.com/socketio/socket.io-redis/pull/15 and I might try cleaning that up and re-submitting.
I'm trying to build the back half of a Paranoid Pirate Pattern, a ROUTER that sends work out to a set of DEALER nodes (it's possible that I'm misundertanding the diagram). For now I just want the DEALERs to echo the work back or just send back a message that says "done". The problem is that the worker node (DEALER) is never receiving any messages.
var buildSocket, connectionTemplate, delay, frontPort, log, q, qPort, worker, workerPort, zmq;
zmq = require("zmq");
frontPort = 5000;
qPort = 5100;
workerPort = 5200;
connectionTemplate = "tcp://127.0.0.1:";
log = console.log;
debugger;
delay = process.argv[2] || 1000;
buildSocket = function(desc, socketType, port) {
var socket;
log("creating " + socketType + " socket");
socket = zmq.socket(socketType);
socket.identity = "" + desc + "-" + socketType + "-" + process.pid + "-" + port;
return socket;
};
q = buildSocket('q_output', 'router', qPort);
worker = buildSocket('worker', 'dealer', workerPort);
worker.bind("" + connectionTemplate + workerPort);
q.connect("" + connectionTemplate + workerPort);
q.on('message', function() {
var args;
args = Array.apply(null, arguments);
log('queue received ' + JSON.stringify(arguments));
return worker.send('work done');
});
worker.on('message', function() {
var args;
log('back received ' + JSON.stringify(arguments));
args = Array.apply(null, arguments);
return q.send(args);
});
setInterval((function() {
var value;
value = Math.floor(Math.random() * 100);
console.log(q.identity + ": sending " + value);
q.send(value);
}), delay);
The queue and worker on 'message' events never fire. The way I understand this is you set up the ROUTER node, bind it to a port (for return messages), set up the DEALER nodes and bind them to a port then connect the ROUTER to the DEALER port and start sending messages. In practice, messages are sent but never received:
creating router socket
creating dealer socket
q_output-router-60326-5100: sending 30
q_output-router-60326-5100: sending 25
q_output-router-60326-5100: sending 65
q_output-router-60326-5100: sending 68
q_output-router-60326-5100: sending 50
q_output-router-60326-5100: sending 88
You've got things a little backwards, here. Think of a DEALER socket as a modified REQ socket... it should be initiating your messages to your router. A ROUTER socket is more like a modified REP socket... it should be responding to the initial request sent by your dealer.
You don't strictly need to follow that pattern with ROUTER/DEALER pairings... but it definitely makes things much easier, so you should stick with it while you're learning.
The second thing that sticks out to me from your code is that you message handlers, you've got the wrong socket sending messages.
Take for instance this code (directly copied without modification):
q.on('message', function() {
var args;
args = Array.apply(null, arguments);
log('queue received ' + JSON.stringify(arguments));
return worker.send('work done');
});
... that says (in psuedocode):
when `q` receives a message from `worker`
print out the message we received
now have `worker` send *another* message that says "work done"
What you want is something more like the following (simplified):
var zmq = require("zmq");
var q = zmq.socket('router');
var worker = zmq.socket('dealer');
// I've switched it so the router is binding and the worker is connecting
// this is somewhat arbitrary, but generally I'd consider workers to be less
// reliable, more transient, and also more numerous. I'd think of the queue
// as the "server"
// I've used bindSync, which is synchronous, but that's typically OK in the
// startup phase of a script, and it simplifies things. If you're spinning
// up new sockets in the middle of your script, using the async bind()
// is more appropriate
q.bindSync('tcp://127.0.0.1:5200');
worker.connect('tcp://127.0.0.1:5200');
q.on('message', function() {
var args;
args = Array.apply(null, arguments);
log('queue received ' + JSON.stringify(arguments));
// if we've received a message at our queue, we know the worker is ready for
// more work, so we ready some new data, regardless of whether we
// received work back
var value = Math.floor(Math.random() * 100);
// note that the socket that received the message is responding back
if (args[1].toString() == 'ready') console.log('worker now online');
else console.log('work received: '+args[1].toString());
// we need to specify the "ID" of our worker as the first frame of
// the message
q.send([args[0], value]);
// we don't need to return anything, the return value of a
// callback is ignored
});
worker.on('message', function() {
var args;
log('back received ' + JSON.stringify(arguments));
args = Array.apply(null, arguments);
// we're just echoing back the "work" we received from the queue
// for additional "workiness", we wait somewhere between 10-1000
// milliseconds to respond
setTimeout(function() {
worker.send(args[0].toString());
}, parseInt(args[0].toString())*10);
});
setTimeout((function() {
var value;
console.log('WORKER STARTING UP');
// the worker starts the communication, indicating it's ready
// rather than the queue just blindly sending work
worker.send('ready'); // sending the first message, which we catch above
}), 500); // In my experience, half a second is more than enough, YMMV
... as you can see, the pattern is:
Worker indicates readiness
Queue sends available work
Worker completes work and sends back
Queue receives completed work and sends back more work
GOTO 3
I've written a small Socket.IO server, which works fine, I can connect to it, I can send/receive messages, so everything is working ok. Just the relevant part of the code is presented here:
var RedisStore = require('socket.io/lib/stores/redis');
const pub = redis.createClient('127.0.0.1', 6379);
const sub = redis.createClient('127.0.0.1', 6379);
const store = redis.createClient('127.0.0.1', 6379);
io.configure(function() {
io.set('store', new RedisStore({
redisPub : pub,
redisSub : sub,
redisClient : store
}));
});
io.sockets.on('connection', function(socket) {
socket.on('message', function(msg) {
pub.publish("lobby", msg);
});
/*
* Subscribe to the lobby and receive messages.
*/
var sub = redis.createClient('127.0.0.1', 6379);
sub.subscribe("lobby");
sub.on('message', function(channel, msg) {
socket.send(msg);
});
});
I've also written a script presented below that connects to the server and spawns connections in the setInterval function, which spawns a new connection each 10milisecons, so it's spawning quite a lot of connections.
#!/usr/bin/env node
var io = require('socket.io-client');
var reconn = {'force new connection': true};
var sockets = [];
var num = 1000;
function startSocket(i) {
sockets[i] = io.connect("http://127.0.0.1:8080", reconn);
sockets[i].on('connect', function() {
console.log("Socket["+i+"] connected.");
});
sockets[i].on('message', function(msg) {
console.log("Socket["+i+"] Message received: "+msg);
});
}
/*
* Start number of sockets.
*/
for(var i=0; i<num; i++) {
startSocket(i);
}
/*
* Send messages forever.
*/
setInterval(function() {
for(var i=0; i<num; i++) {
sockets[i].send("Hello from socket "+i+".");
}
}, 10);
This script is a benchmark tool spawning 1000 connections to the server, but when running for several minutes, the server dies with the following error message:
node.js:0 // Copyright Joyent, Inc. and other Node contributors. ^
RangeError: Maximum call stack size exceeded
I know that there's not enough stack space available so the exception occurs and the process is terminated, but even if I enlarge the stack with the --stack-size variable, this doesn't actually solve the problem, because I can always spawn more connections, which will eventually kill the server.
My question is: how can I prevent this. This is an effective DoS scenario, where anybody can hack together this little script and force the node server to terminate, but I would like to prevent this from happening. I would like Node server to never terminate, just process messages slowly.
Any ideas if this can be prevented. I'm not sure that I would like to block IPs, since I would also like mobile phones to login to the system, where many of them use the same IP, so the node server can mistakenly think a DoS is being in place by one mobile network operation and blocks its IP.
Thank you
If you would like your node server to run forever, no matter what, use https://github.com/nodejitsu/forever
As for the Exception - My hunch is that var sub = redis.createClient('127.0.0.1', 6379); may allocate a variable in the stack each time a connection is established.
I would first try to put var subs = [] in the global scope and
subs[socket.id] = redis.createClient('127.0.0.1', 6379);
Or something like socket.sub = redis.createClient('127.0.0.1', 6379); to piggyback the existing, hopefully heap based, socket.io data structures.
If not working, try to isolate the problem by removing the use of Redis...
I know socket.io has a built in feature for reconnecting and everything, however I don't think that it is working - as I have seen from others it's also not working for them either.
If a user puts their computer to sleep, it disconnects them, and then when they open it back up they are no longer connected so they don't any of the notifications or anything until they refresh the page. Perhaps it's just something that I'm not doing correctly?
var io = require('socket.io').listen(8080);
var users = {};
////////////////USER CONNECTED/////
console.log("Sever is now running");
io.sockets.on('connection', function (socket) {
//Tell the client that they are connected
socket.emit('connected');
//Once the users session is recieved
socket.on('session', function (session) {
//Add users to users variable
users[socket.id] = {userID:session, socketID:socket};
//When user disconnects
socket.on('disconnect', function () {
//socket.socket.connect();
var count= 0;
for(var key in users){
if(users[key].userID==session)++count;
if(count== 2) break;
}
if(count== 1){
socket.broadcast.emit('disconnect', { data : session});
}
//Remove users session id from users variable
delete users[socket.id];
});
socket.on('error', function (err) {
//socket.socket.connect();
});
socket.emit("connection") needs to be called when the user reconnects, or at least the events that happen in that event need to be called.
Also socket.socket.connect(); doesn't work, it returns with an error and it shuts the socket server down with an error of "connect doesn't exist".
The problem is related to io.connect params.
Look at this client code (it will try to reconnect forever, with max delay between attempts 3sec):
ioParams = {'reconnection limit': 3000, 'max reconnection attempts': Number.MAX_VALUE, 'connect timeout':7000}
socketAddress = null
socket = io.connect(socketAddress, ioParams)
There are two important parameters out there, related to your problem:
reconnection limit - limit the upper time of delay between reconnect attemts. Normally it's getting bigger and bigger in time of server outage
max reconnection attempts - how many times you want to try. Default is 10. In most cases this is the problem why the client stops trying.