On AWS node socketcluster can't connect more than 1000 connections - node.js

I am running a simple socketcluster node.js server and connecting to it from node.js websocket client.
By running the server on my local Ubuntu14.04, I could connect more than 10,000 clients to the server. But on AWS EC2 (c3-large) ubuntu14.04 instance the same code connects only less than 1000 connections.
Modified the etc/security/limits.conf and set the limits for "soft nofile" and "hard nofile" to 65535 on the EC2 instance.
Posix soft limit as suggested Node.js maxing out at 1000 concurrent connections is not helping.
Other sysctl parameter doesn't differ much between my local ubuntu and EC2 instance.
Latency might not be an issue, because I tried to connect to server from multiple client machines still number of connection remain < 1000.
Is there any AWS environment variable that could effect the performance?
Is number of messages to and from EC2 could be a limitation?
var posix = require('posix');
posix.setrlimit('nofile', {soft:10000});
var SocketCluster = require('socketcluster').SocketCluster;
var numCPUs = require('os').cpus().length;
var numWorkers = numCPUs;
var start = Date.now();
console.log("..... Starting Server....");
process.on('uncaughtException', function (err) {
console.log("***** SEVERE ERROR OCCURED!!! *****");
console.log(err);
});
var socketCluster = new SocketCluster({
balancers: 1,
workers: numWorkers,
stores: 1,
port: 7000,
appName: 'mysimapp',
workerController: __dirname + '/sim_server.js',
addressSocketLimit: 0,
socketEventLimit: 100,
rebootWorkerOnCrash: true,
useSmartBalancing: true
});
--sim_server.js--
module.exports.run = function(worker) {
var posix = require('posix');
posix.setrlimit('nofile', {soft:10000});
var connection_db = {};
var opencount = 0;
var closecount = 0;
var msgcount = 0;
function status()
{
console.log('open: ' + opencount);
console.log('close: ' + closecount);
//console.log('receive: ' + msgcount);
setTimeout(function(){
status();
},10000);
}
status();
websocket_server = worker.getSCServer();
websocket_server.on('connection', function(socket){
var mac;
socket.on('mac-id', function(data) {
opencount++;
mac = data;
connection_db[mac] = socket;
});
socket.on('message', function(data) {
msgcount++;
});
socket.on('close', function() {
delete connection_db[mac];
closecount++;
});
});
process.once('SIGINT', function() {
process.exit(0);
});
}

My bad, there wasn't any issue in code or with AWS.
In my setup the switch/isp-connection that was used for AWS setup was not able to handle many connections.

Related

socket.io-redis adapter timeout reached while waiting for clients response error

In my project, I'm using (socket.io - 2.3.0) & (socket.io-redis - 5.2.0) for data broadcasting between servers. In that scenario, I'm having the redis timeout issue from time to time, and I'm not sure why. In my server I just run a single node process, and I use Redis to store data and share it with other connections. Is it correct that I use almost 5 dbs in redis out of 15? In production, I encountered this problem more than ten times in a single day. Please assist us in resolving this issue.
Stack Trace:
Error: timeout reached while waiting for clients response
at Timeout._onTimeout (/var/www/html/project/node_modules/socket.io-redis/index.js:485:48)
at listOnTimeout (internal/timers.js:555:17)
at processTimers (internal/timers.js:498:7)
Here it's my node entry point.
var port = 12300;
var io = require('socket.io')(port);
var redis = require('redis');
const redis_adap = require("socket.io-redis");
io.adapter(redis_adap({
host: '127.0.0.1',
port: 6379,
requestsTimeout: 5000, // i tried upto 20000 but still the issue is occured
}));
io.on('connection', function(socket) {
var player = new Player();
var Redis = new redis();
var roomId;
player.id = socket.id;
socket.on('ping', function() {
socket.emit('pong');
});
socket.on('message', function(data) {
let _messageIndex = data.e;
let _id = player.id;
let returnData = {
i: _id,
e: _messageIndex
}
socket.broadcast.to(player.roomid).emit('message', returnData);
});
socket.on('UpdateHorn', function() {
let _id = player.id;
let returnData = {
i: _id
}
socket.broadcast.to(player.roomid).emit('UpdateHorn', returnData);
})
socket.on('UpdateTiles', function(data) {
let returnData = {
t: data.t
}
socket.broadcast.to(player.roomid).emit('UpdateTiles', returnData);
});
socket.on('getLobbyDetails', function() {
socket.emit('getLobbyDetails', { lobby: CONFIG.getLobbyDetails() });
})
})

Socket.io fs.readFileSync for many connections at same time

This is not really a question, but I wonder to know if what I did is correct because its working!
So, lets to the question, I`m monitoring many interfaces (PPPoE clients) at same to know its traffic reading the statistics from linux.
I`m using npm packages: express, socket.io and socket.io-stream.
Client:
var sessionsAccel = $('table.accel').DataTable([]);
sessionsAccel.on('preDraw', function() {
$('.interfaceAccel').each(function(i) {
var t = $(this).data();
sockets['socket' + t.id].disconnect();
delete speeds['tx_bytes' + t.id];
delete speeds['rx_bytes' + t.id];
});
})
.on('draw', function() {
$('.interfaceAccel').each(function(i) {
var t = $(this).data();
sockets['socket' + t.id] = io.connect('http://172.16.101.2:3000/status', {
query: 'interface=' + t.interface,
'forceNew': true
});
sockets['socket' + t.id].on("connect", function() {
ss(sockets['socket' + t.id]).on('sendStatus', function(stream, data) {
if (typeof speeds['tx_bytes' + t.id] != 'undefined') {
var speedtx = (data.tx_bytes - speeds['tx_bytes' + t.id]) * 8 / 1000;
var speedrx = (data.rx_bytes - speeds['rx_bytes' + t.id]) * 8 / 1000;
if (speedtx > 1000) {
speedtx = speedtx / 1000;
speedtx = speedtx.toFixed(2);
speedtx_info = speedtx + ' Mbps';
} else {
speedtx = speedtx.toFixed(2);
speedtx_info = speedtx + ' kbps';
}
if (speedrx > 1000) {
speedrx = speedrx / 1000;
speedrx = speedrx.toFixed(2);
speedrx_info = speedrx + ' Mbps';
} else {
speedrx = speedrx.toFixed(2);
speedrx_info = speedrx + ' kbps';
}
$('.tx_' + t.id).html(speedtx_info);
$('.rx_' + t.id).html(speedrx_info);
}
speeds['tx_bytes' + t.id] = data.tx_bytes;
speeds['rx_bytes' + t.id] = data.rx_bytes;
});
});
});
})
Server:
const app = require('express')();
const http = require('http').createServer(app);
const io = require('socket.io')(http);
const ss = require('socket.io-stream');
const path = require('path');
const fs = require('fs');
function getIntInfo(interface) {
if(fs.existsSync('/sys/class/net/'+ interface +'/statistics/tx_bytes')) {
var tx_bytes = fs.readFileSync('/sys/class/net/'+ interface +'/statistics/tx_bytes').toString();
var rx_bytes = fs.readFileSync('/sys/class/net/'+ interface +'/statistics/rx_bytes').toString();
var tx_packets = fs.readFileSync('/sys/class/net/'+ interface +'/statistics/tx_packets').toString();
var rx_packets = fs.readFileSync('/sys/class/net/'+ interface +'/statistics/rx_packets').toString();
return {tx_bytes : tx_bytes, rx_bytes : rx_bytes, tx_packets: tx_packets, rx_packets: rx_packets};
}else
return false;
}
io.of('/status').on('connection', function(socket) {
var query = socket.handshake.query['interface'];
var timer = setInterval(function() {
var stream = ss.createStream();
var info = getIntInfo(query);
ss(socket).emit('sendStatus', stream, info);
}, 1000);
socket.on('disconnect', function(){
socket.disconnect(true);
//console.info('disconnected user (id=' + socket.id + ').');
});
})
http.listen(3000, function(){
console.log('listening on *:3000');
});
That's it, every row from Datatable (which is the interface) open a socket connection and retrieve the statistics.
My question is, this will mess up my server with many I/O reading these files?
Since you're doing this every second for every connected client, it seems like you should probably cache this data so it doesn't have to be read from the disk or sent over the wire when it hasn't changed to save both server load and bandwidth usage. But, the details of how to best do that depend upon knowledge about your particular application that you haven't included.
You can at least use asynchronous I/O like this:
const util = require('util');
const fs = require('fs');
const readFile = util.promisify(fs.readFile);
function getIntInfo(interface) {
function readInfo(name) {
return readFile('/sys/class/net/'+ interface +'/statistics/' + name).then(data => data.toString());
}
return Promise.all(
readFile('tx_bytes'),
readFile('rx_bytes'),
readFile('tx_packets'),
readFile('rx_packets')
).then(([tx_bytes, rx_bytes, tx_packets, rx_packets]) => {
return {tx_bytes, rx_bytes, tx_packets, rx_packets};
}).catch(err => {
console.log(err);
return false;
});
}
And, you have to stop the interval any time a client disconnects and change how it calls getIntInfo():
io.of('/status').on('connection', function(socket) {
var query = socket.handshake.query['interface'];
var timer = setInterval(function() {
getIntInfo(query).then(info => {
var stream = ss.createStream();
ss(socket).emit('sendStatus', stream, info);
});
}, 1000);
socket.on('disconnect', function(){
// stop the timer for this connection
clearInterval(timer);
});
});
Now that I think about it a bit more, you could improve scalability quite a bit by having just one interval timer that was reading the data and then sending that one set of data to all listening clients that had connected to the /status namespace. You would reduce the file reading from once per second for every client to just once per second for no matter how many clients.

TCP socket handling for multiplayer game on NodeJS

I have a multiplayer game where my server uses nodejs and TCPSocket (net.createServer) to communicate with a client.
I have thousands of clients connecting to the server and many people are complaining that they are constantly being disconnected from the server.
Here is how my server handles the connections now:
Init:
var net = require("net");
server = net.createServer(function(socket) {
socket.setTimeout(15000);
socket.setKeepAlive(true);
socket.myBuffer = "";
socket.msgsQue = [];
socket.on("data", onData);
socket.on("error", onError);
socket.on("end", onClientDisconnect);
socket.on("timeout", onClientDisconnect);
});
server.listen(port);
Sending to client:
var stringData = JSON.stringify({name:event, message:data});
if (!this.socket.msgsQue || typeof this.socket.msgsQue == "undefined")
this.socket.msgsQue = [];
this.socket.msgsQue.unshift(stringData);
var i = this.socket.msgsQue.length;
while(i--) {
if (this.socket.writable) {
var elem = this.socket.msgsQue[i];
this.socket.write(elem+"\0");
this.socket.msgsQue.splice(i, 1);
} else {
//Unable to write at index "i"
break;//will send the rest on the next attempt
}
}
When disconnected
var onClientDisconnect = function() {
this.myBuffer = "";
delete this.myBuffer;
this.msgsQue = [];
delete this.msgsQue;
this.destroy();
}
Receiving from client
var onData = function(data) {
if (!data || !data.toString()) return;
var raw = data.toString();
this.myBuffer += raw;
var d_index = this.myBuffer.indexOf('\0'); // Find the delimiter
// While loop to keep going until no delimiter can be found
var string;
while (d_index > -1) {
// Create string up until the delimiter
string = this.myBuffer.substring(0,d_index);
// Cuts off the processed chunk
this.myBuffer = this.myBuffer.substring(d_index+1);
onMessage(string, this);//handle
// Find the new delimiter
d_index = this.myBuffer.indexOf('\0');
}
}
A problem I notice is that msgsQue becomes huge because the socket is not writable, but disconnect handler is not fired (or hired later..)
Can you give me some advises on how to optimize this ?
I noticed that sometimes I get disconnected, but I can ping the server, so it is definitely a server-related problem. Can it be because of high load on the server itself?
(I do not want to use socket.io because the last year I had many problems with it like memory leaking, freezing the server app, no support, etc..)

ZeroMQ: How to notify a Publisher from a Subscriber

I use PUB/SUB ZeroMQ pattern.
System consists from Web Server ( Publisher ), clustered TCP servers ( Subscribers ) and external applications ( clients, which connect to TCP servers ).
Huge amount of external clients connect to every TCP server. Every external client has unique peerId which I use as topic in Publisher. For some management purposes I send messages to TCP servers ( like remove peer, change, etc. ). But also I need to send messages from TCP server to Web Server ( connect, disconnect, error ). I didn't find right way how to do it. Can anybody suggest how to do it correctly?
Update 1
It looks like using ROUTER/DEALER pattern is the most convenient for that.
Some comments about scripts.
External clients connect to tcp servers ( cluster ) and send unique peerId, on tcp server side tcp socket cached by unique peerId. Then tcp server sends peerId message by ZeroMQ socket to Web Server. Web Server caches envelope by peerId. Every n milliseconds Web Server sends messages to random peer ( generate 'peerId' ). TCP Server receives these messages, gets correct tcp socket from cache and sends theirs to clients. Clients calculate count of messages and every n milliseconds send their to TCP server, which sends count to WEB Server by ZeroMQ socket. On Web Server every n milliseconds count of sended and received messages are printed on console.
Test js script of server part:
var cluster = require('cluster'),
zmq = require('zmq'),
net = require('net'),
zmqport = 'tcp://127.0.0.1:12345';
var count = 10;
var countPeers = 10000;
var interval = 1;
if (cluster.isMaster) {
for (var i = 0; i < count; i++) cluster.fork({
TCP_SERVER: 1
});
cluster.fork({
WEB_SERVER: 1
});
cluster.on('death', function (worker) {
console.log('worker ' + worker.pid + ' died');
});
} else {
if (process.env.TCP_SERVER) {
var sockets = Object.create(null);
var socket = zmq.socket('dealer');
socket.identity = 'process-' + process.pid;
socket.connect(zmqport);
socket.on('message', function (peerIdBuffer) {
var peerId = peerIdBuffer.toString();
if (typeof sockets[peerId] !== 'undefined') {
var buffer = new Buffer(4);
buffer.writeUInt32BE(1, 0);
sockets[peerId].write(buffer);
}
});
var server = net.createServer(function (tcpsocket) {
tcpsocket.on('data', function (data) {
if (!tcpsocket.peerId) {
var peerId = data.toString();
sockets[peerId] = tcpsocket;
tcpsocket.peerId = peerId;
return socket.send(['id', data]);
}
return socket.send(['count', data]);
});
});
server.listen('13333', '0.0.0.0');
} else {
var countMessagesSended = 0;
var countMessagesReceived = 0;
var socket = zmq.socket('router');
var clients = Object.create(null);
socket.bind(zmqport, function (err) {
if (err) throw err;
setInterval(function () {
for (var i = 0; i < countPeers; i++) {
var topic = Math.floor(Math.random() * countPeers) + '-peer';
if (typeof clients[topic] !== 'undefined') {
countMessagesSended++;
socket.send([clients[topic], topic]);
}
}
}, interval);
});
socket.on('message', function (envelope, messageId, data) {
switch (messageId.toString()) {
case "id":
clients[data.toString()] = envelope.toString();
break;
case "count":
countMessagesReceived += data.readUInt32BE(0);
break;
}
});
setInterval(function () {
console.log('%s messages have been sended, %s - received', countMessagesSended, countMessagesReceived);
countMessagesSended = 0;
countMessagesReceived = 0;
}, 5000);
}
}
Test js script for clients:
var cluster = require('cluster'),
net = require('net');
var count = 10;
if (cluster.isMaster) {
for (var i = 0; i < count; i++) cluster.fork({
CLUSTER: i
});
cluster.on('death', function (worker) {
console.log('worker ' + worker.pid + ' died');
});
} else {
var clientspernode = 1000;
var offset = parseInt(process.env.CLUSTER, 10);
for (var j = (offset) * clientspernode; j < (offset + 1) * clientspernode; j++) {
(function (j) {
var countMessages = 0;
var client = net.connect({
port: 13333,
host: '127.0.0.1'
}, function () {
client.write(j + '-peer');
});
client.on('data', function (buffer) {
countMessages += Math.ceil(buffer.length / 8);
});
client.on('error', function () {
});
setInterval(function () {
var buf = new Buffer(4);
buf.writeUInt32BE(countMessages, 0);
client.write(buf);
countMessages = 0;
}, 5000);
})(j);
}
}

Increasing max_frame rate in amqp.js -- Hitting limits in buffer copy

I have a situation where I have about 50 listeners on 50 'direct' exchanges. The client and the server are in javascript (node.js) . It is using the node-amqp from postwait .
Things work fairly well at low frequency of messages. Once the message frequency increases ~ 5000 messages per minute then there is a buffer copy error being shown in amqp.js
From what I could trace the max_frame_size in amqp.js is fixed to 131072 .
I just tried to double the value from 128k to 256k . But doing so causes the node.js to silently fail without starting up. There is no error message. I am assuming that I also have to change the corresponding value (max_frame) in the rabbit.config file.
Do I have to do anything else to increase this value . Any other suggestions will also be appreciated.
I have attached the minimal code to simulate the error . Run the commands below in 2 windows to simulate the error
node engine-so-client.js -c 200 -p 12000
node server-so.js
File server-so.js
var util= require('util')
var amqp = require('amqp');
var express = require ('express')
function httpServer(exchange) {
console.log("In httpServer start %s",exchange.name);
var port;
app = express.createServer();
app.get('/message/:routingKey/:message',function(req,res) {
exchange.publish(req.params.routingKey,{'d' : req.params.message});
res.send('Published the message '+req.params.message+'\n');
});
app.get('/register/:socket/:routingKey',function(req,res) {
var queue1 = conn.queue('',
{autoDelete: false, durable: true, exclusive: true},
function() {
console.log("Queue1 Callback");
queue1.subscribe(
function(message) {
console.log("subscribe Callback for "+req.params.routingKey + " " + message.d);
});
console.log("Queue Callback Binding with "+req.params.routingKey);
queue1.bind(exchange.name,req.params.routingKey);
});
res.send('Started the socket at '+req.params.socket+'\n');
});
app.listen(3000);
app.use(express.logger());
console.log('Started server on port %s', app.address().port);
}
function setup() {
console.log("Setup");
var exchange = conn.exchange('cf2-demo',
{'type': 'direct', durable: false}, function() {
var queue = conn.queue('',
{autoDelete: false, durable: true, exclusive: true},
function() {
console.log("Queue Callback Binding with test key");
queue.bind(exchange.name,'testKey');
});
queue.on('queueBindOk',
function() { httpServer(exchange); });
});
console.log("Completed setup %s", exchange.name);
}
var conn = amqp.createConnection({host:'localhost',
login:'guest',
password:'guest'},
{defaultExchangeName: "cf2-demo"});
conn.on('ready',setup);
File engine-so-client.js
var program = require('commander.js');
var util = require('util');
var http = require('http');
program
.version('0.0.1')
.option('-h, --host <host>', 'Host running server', String,'localhost')
.option('-p, --port <port>', 'Port to open to connect messages on',Number,12000)
.option('-k, --key <key>,', 'Routing Key to be used',String,'key1')
.option('-c, --count <count>','Iteration count',Number,50)
.option('-m, --mesg <mesg>','Message prefix',String,'hello')
.option('-t, --timeout', 'Timeout in ms between message posts')
.parse(process.argv);
function setup(host,port,key,mesg) {
var client = http.createClient(3000, host);
var request = client.request('GET','/register/'+port+"/"+key);
request.end();
request.on('response', function(response) {
response.on('data', function(chunk) {
postMessage(host,port,key,mesg,1);
});
});
}
function postMessage(host,port,key,mesg,count) {
var timeNow = new Date().getTime();
var mesgNow = mesg+"-"+count+"-"+port;
console.log("Type: Sent Mesg, Message: %s, Time: %s",mesgNow,timeNow);
var client1 = http.createClient(3000, host);
var request1 = client1.request('GET','/message/'+key+"/"+mesgNow);
request1.end();
count++;
if (count <100) {
setTimeout( function() { postMessage(host,port,key,mesg,count); }, 1000 );
}
}
var port = program.port;
var host = program.host;
var key = program.key;
var mesg = program.mesg;
var count = program.count;
var keys = ['key1','key2','key3','key4','key5'];
var messages = ['hello','world','good','morning','bye'];
var start=port;
for (i=0; i<count; i++) {
var index = i%keys.length;
var socket = start + i;
setup(host,socket,keys[index],messages[index]);
}
Error attached
buffer.js:494
throw new Error('sourceEnd out of bounds');
^
Error: sourceEnd out of bounds
at Buffer.copy (buffer.js:494:11)
at frame (/home/hvram/programs/node_modules/node-amqp/amqp.js:170:10)
at header (/home/hvram/programs/node_modules/node-amqp/amqp.js:160:14)
at frameEnd (/home/hvram/programs/node_modules/node-amqp/amqp.js:205:16)
at frame (/home/hvram/programs/node_modules/node-amqp/amqp.js:172:14)
at header (/home/hvram/programs/node_modules/node-amqp/amqp.js:160:14)
at frameEnd (/home/hvram/programs/node_modules/node-amqp/amqp.js:205:16)
at frame (/home/hvram/programs/node_modules/node-amqp/amqp.js:172:14)
at header (/home/hvram/programs/node_modules/node-amqp/amqp.js:160:14)
at frameEnd (/home/hvram/programs/node_modules/node-amqp/amqp.js:205:16)

Resources