node create multiple tcp connections, hard to identify which connection it is in the call back - node.js

I need to make 3 tcp connections, it's hard to tell which connection is successful in the call back for "connect" event.
var clients = [];
var ports = [81,82,83];
for (i=0; i<3; i++) {
clients[i] = net.createConnection(ports[i], '127.0.0.1');
clients[i].on('connect', function(conn) {
console.log("connect is setup");
console.log(conn); //it's always undefined, why???
//need to set different data to the different connections
});
}

The argument provided to the connect event is a potential connection error, so, you're checking if there is an error. If I recall correctly, console.log(this); has information about the socket. Your main reference to the socket is also clients[i]

Related

Node: Access Socket/Client Object Reference in ExpressJS On-Close

Get Socket Ref On Socket Close
Following this SO answer, I expect to be able to shutdown the server and restart it again. To do that accountably, it sounds like I need to not just call close() on the server instance object but also any remaining open sockets. Memory Leaks are an issue so I would like to store the socket instances in a Set and clean up expired connections as they close.
You need to
subscribe to the connection event of the server and add opened sockets to an array
keep track of the open sockets by subscribing to their close event and removing the closed ones from your array
call destroy on all of the remaining open sockets when you need to terminate the server
...
const sockets = new Set(); // use a set to easily get/delete sockets
...
function serve(compilations) {
const location = './dist', directory = express.static(location);
const server = app
.use('/', log, directory)
.get( '*', (req, res) => res.redirect('/') )
.listen(3000)
;
server.on('connection', handleConnection); // subscribe to connections (#1)
console.log(`serving...(${location})`);
return server;
}
...
function handleSocketClose(socket, a, b, ...x) { // #param#socket === false (has-error sentinel) (#2.2)
const closed = sockets.delete(socket);
console.log(`HANDLE-SOCKET-CLOSE...(${sockets.size})`, closed, a, b); // outputs "HANDLE-SOCKET-CLOSE...(N) false undefined undefined"
}
function handleConnection(socket) {
sockets.add(socket); // add socket/client to set (#1)
console.log(`HANDLE-CONNECTION...(${sockets.size})`);
socket.on('close', handleSocketClose); // subscribe to closures on individual sockets (#2.1)
}
From the SO answer (above), I have #1 & #3 completed. In fact, I even have, say, #2.1 done as I am currently listening on each socket to for the close event -- which does fire. The problem is, say, #2.2: "...and removing the closed ones from your array".
My expectation while implementing the handleSocketClose callback was that one of the argument values would be the socket object, itself, so that I could then cleanup the sockets collection.
Question
How in Laniakea can I obtain a reference to the socket in question at close time (without iteration and checking the connection status)?

Node Cluster issue using Socket.io and Redis

Ok, I have an express-powered API where I also have socket.io running to receive/send realtime events...all works just dandy. I need to cluster my app. I set everything up based on the below code. I spin up workers, they get connections and everything works, except the fact that now I can't "blast" to all socket.io connections. Here is the setup (taken from this):
var express = require('express'),
cluster = require('cluster'),
net = require('net'),
sio = require('socket.io'),
sio_redis = require('socket.io-redis');
var port = 3000,
num_processes = require('os').cpus().length;
if (cluster.isMaster) {
// This stores our workers. We need to keep them to be able to reference
// them based on source IP address. It's also useful for auto-restart,
// for example.
var workers = [];
// Helper function for spawning worker at index 'i'.
var spawn = function(i) {
workers[i] = cluster.fork();
// Optional: Restart worker on exit
workers[i].on('exit', function(worker, code, signal) {
console.log('respawning worker', i);
spawn(i);
});
};
// Spawn workers.
for (var i = 0; i < num_processes; i++) {
spawn(i);
}
// Helper function for getting a worker index based on IP address.
// This is a hot path so it should be really fast. The way it works
// is by converting the IP address to a number by removing the dots,
// then compressing it to the number of slots we have.
//
// Compared against "real" hashing (from the sticky-session code) and
// "real" IP number conversion, this function is on par in terms of
// worker index distribution only much faster.
var workerIndex = function (ip, len) {
var _ip = ip.split(/['.'|':']/),
arr = [];
for (el in _ip) {
if (_ip[el] == '') {
arr.push(0);
}
else {
arr.push(parseInt(_ip[el], 16));
}
}
return Number(arr.join('')) % len;
}
// Create the outside facing server listening on our port.
var server = net.createServer({ pauseOnConnect: true }, function(connection) {
// We received a connection and need to pass it to the appropriate
// worker. Get the worker for this connection's source IP and pass
// it the connection.
var worker = workers[worker_index(connection.remoteAddress, num_processes)];
worker.send('sticky-session:connection', connection);
}).listen(port);
} else {
// Note we don't use a port here because the master listens on it for us.
var app = new express();
// Here you might use middleware, attach routes, etc.
// Don't expose our internal server to the outside.
var server = app.listen(0, 'localhost'),
io = sio(server);
// Tell Socket.IO to use the redis adapter. By default, the redis
// server is assumed to be on localhost:6379. You don't have to
// specify them explicitly unless you want to change them.
io.adapter(sio_redis({ host: 'localhost', port: 6379 }));
// Here you might use Socket.IO middleware for authorization etc.
// Listen to messages sent from the master. Ignore everything else.
process.on('message', function(message, connection) {
if (message !== 'sticky-session:connection') {
return;
}
// Emulate a connection event on the server by emitting the
// event with the connection the master sent us.
server.emit('connection', connection);
connection.resume();
});
}
So I connect from various machines to test concurrency, workers do their thing and all is good, but when I get an IO connection, I'm logging the TOTAL "connected" count and it's always 1 per instance. I need a way to say
allClusterForks.emit(stuff)
I get the connection on the correct worker pid, but "ALL CONNECTIONS" always returns 1.
io.on('connection', function(socket) {
console.log('Connected to worker %s', process.pid);
console.log("Adapter ROOMS %s ", io.sockets.adapter.rooms);
console.log("Adapter SIDS %s ", io.sockets.adapter.sids);
console.log("SOCKETS CONNECTED %s ", Object.keys(io.sockets.connected).length);
});
I can see the subscribe/unsubscribe coming in using Redis MONITOR
1454701383.188231 [0 127.0.0.1:63150] "subscribe" "socket.io#/#gXJscUUuVQGzsYJfAAAA#"
1454701419.130100 [0 127.0.0.1:63167] "subscribe" "socket.io#/#geYSvYSd5zASi7egAAAA#"
1454701433.842727 [0 127.0.0.1:63167] "unsubscribe" "socket.io#/#geYSvYSd5zASi7egAAAA#"
1454701444.630427 [0 127.0.0.1:63150] "unsubscribe" "socket.io#/#gXJscUUuVQGzsYJfAAAA#"
These are connections from 2 different machines, I would expect by using the socket io redis adapter that these subscriptions would be coming in on the same redis connection, but they are different.
Am I just totally missing something? There's a surprising lack of documentation/articles out there for this that aren't either completely outdated/wrong/ambiguous.
EDIT:
Node v5.3.0
Redis v3.0.6
Socket.io v1.3.7
So if anyone comes across this, I figured out that actually "looking" at the counts of connected sockets across processes is not a thing, but broadcasting or emitting to them is. So I've basically just been "testing" for no reason. All works as expected. I WILL be rewriting the socket.io-redis adapter to allow checking counts across processes.
There was a pull request a few years ago to implement support for what I was trying to do. https://github.com/socketio/socket.io-redis/pull/15 and I might try cleaning that up and re-submitting.

Handle new TCP connections synchronously

I know nodejs is asynchronous by nature and it is preferable to use that way, but I have a use case where we need to handle incoming TCP connections in synchronous way. Once a new connections received we need to connect to some other TCP server and perform some book keeping stuff etc and then handle some other connection. Since number of connections are limited, it is fine to handle this in synchronous way.
Looking for an elegant way to handle this scenario.
net.createServer(function(sock) {
console.log('Received a connection - ');
var sock = null;
var testvar = null;
sock = new net.Socket();
sock.connect(PORT, HOST, function() {
console.log('Connected to server - ');
});
//Other listeners
}
In the above code if two connections received simultaneously the output may be (since asynchronous nature):
Received a connection
Receive a connection
Connected to server
Connected to server
But the expectation is:
Received a connection
Connected to server
Receive a connection
Connected to server
What is the proper way of ding this?
One solution is implement a queue kind of solution with emitting 'done' or 'complete' events to handle next connection.
For this we may have to take the connection callback out of the createServer call. How to handle scoping of connection and other variables (testvar) in this case?
In this case what happens to the data/messages if received on connections which are in queue but not yet processed and not yet 'data' listener is registered.?
Any other better solutions will be helpful.
I think it is important to separate the concepts of synchronous code vs serial code. You want to process each request serially, but that can still be accomplished while handling each request asynchronously. For your case, the easiest way would probably be to have a queue of requests to handle instead.
var inProgress = false;
var queue = [];
net.createServer(function(sock){
queue.push(sock);
processQueue();
});
function processQueue(){
if (inProgress || queue.length === 0) return;
inProgress = true;
handleSockSerial(queue.shift(), function(){
inProgress = false;
processQueue();
});
}
function handleSockSerial(sock, callback){
// Do all your stuff and then call 'callback' when you are done.
}
Note, as long as you are using node >= 0.10, the data coming in from the socket will be buffered until you read the data.

How do I shutdown a Node.js http(s) server immediately?

I have a Node.js application that contains an http(s) server.
In a specific case, I need to shutdown this server programmatically. What I am currently doing is calling its close() function, but this does not help, as it waits for any kept alive connections to finish first.
So, basically, this shutdowns the server, but only after a minimum wait time of 120 seconds. But I want the server to shutdown immediately - even if this means breaking up with currently handled requests.
What I can not do is a simple
process.exit();
as the server is only part of the application, and the rest of the application should remain running. What I am looking for is conceptually something such as server.destroy(); or something like that.
How could I achieve this?
PS: The keep-alive timeout for connections is usually required, hence it is not a viable option to decrease this time.
The trick is that you need to subscribe to the server's connection event which gives you the socket of the new connection. You need to remember this socket and later on, directly after having called server.close(), destroy that socket using socket.destroy().
Additionally, you need to listen to the socket's close event to remove it from the array if it leaves naturally because its keep-alive timeout does run out.
I have written a small sample application you can use to demonstrate this behavior:
// Create a new server on port 4000
var http = require('http');
var server = http.createServer(function (req, res) {
res.end('Hello world!');
}).listen(4000);
// Maintain a hash of all connected sockets
var sockets = {}, nextSocketId = 0;
server.on('connection', function (socket) {
// Add a newly connected socket
var socketId = nextSocketId++;
sockets[socketId] = socket;
console.log('socket', socketId, 'opened');
// Remove the socket when it closes
socket.on('close', function () {
console.log('socket', socketId, 'closed');
delete sockets[socketId];
});
// Extend socket lifetime for demo purposes
socket.setTimeout(4000);
});
// Count down from 10 seconds
(function countDown (counter) {
console.log(counter);
if (counter > 0)
return setTimeout(countDown, 1000, counter - 1);
// Close the server
server.close(function () { console.log('Server closed!'); });
// Destroy all open sockets
for (var socketId in sockets) {
console.log('socket', socketId, 'destroyed');
sockets[socketId].destroy();
}
})(10);
Basically, what it does is to start a new HTTP server, count from 10 to 0, and close the server after 10 seconds. If no connection has been established, the server shuts down immediately.
If a connection has been established and it is still open, it is destroyed.
If it had already died naturally, only a message is printed out at that point in time.
I found a way to do this without having to keep track of the connections or having to force them closed. I'm not sure how reliable it is across Node versions or if there are any negative consequences to this but it seems to work perfectly fine for what I'm doing. The trick is to emit the "close" event using setImmediate right after calling the close method. This works like so:
server.close(callback);
setImmediate(function(){server.emit('close')});
At least for me, this ends up freeing the port so that I can start a new HTTP(S) service by the time the callback is called (which is pretty much instantly). Existing connections stay open. I'm using this to automatically restart the HTTPS service after renewing a Let's Encrypt certificate.
If you need to keep the process alive after closing the server, then Golo Roden's solution is probably the best.
But if you're closing the server as part of a graceful shutdown of the process, you just need this:
var server = require('http').createServer(myFancyServerLogic);
server.on('connection', function (socket) {socket.unref();});
server.listen(80);
function myFancyServerLogic(req, res) {
req.connection.ref();
res.end('Hello World!', function () {
req.connection.unref();
});
}
Basically, the sockets that your server uses will only keep the process alive while they're actually serving a request. While they're just sitting there idly (because of a Keep-Alive connection), a call to server.close() will close the process, as long as there's nothing else keeping the process alive. If you need to do other things after the server closes, as part of your graceful shutdown, you can hook into process.on('beforeExit', callback) to finish your graceful shutdown procedures.
The https://github.com/isaacs/server-destroy library provides an easy way to destroy() a server with the behavior desired in the question (by tracking opened connections and destroying each of them on server destroy, as described in other answers).
As others have said, the solution is to keep track of all open sockets and close them manually. My node package killable can do this for you. An example (using express, but you can call use killable on any http.server instance):
var killable = require('killable');
var app = require('express')();
var server;
app.route('/', function (req, res, next) {
res.send('Server is going down NOW!');
server.kill(function () {
//the server is down when this is called. That won't take long.
});
});
var server = app.listen(8080);
killable(server);
Yet another nodejs package to perform a shutdown killing connections: http-shutdown, which seems reasonably maintained at the time of writing (Sept. 2016) and worked for me on NodeJS 6.x
From the documentation
Usage
There are currently two ways to use this library. The first is explicit wrapping of the Server object:
// Create the http server
var server = require('http').createServer(function(req, res) {
res.end('Good job!');
});
// Wrap the server object with additional functionality.
// This should be done immediately after server construction, or before you start listening.
// Additional functionailiy needs to be added for http server events to properly shutdown.
server = require('http-shutdown')(server);
// Listen on a port and start taking requests.
server.listen(3000);
// Sometime later... shutdown the server.
server.shutdown(function() {
console.log('Everything is cleanly shutdown.');
});
The second is implicitly adding prototype functionality to the Server object:
// .extend adds a .withShutdown prototype method to the Server object
require('http-shutdown').extend();
var server = require('http').createServer(function(req, res) {
res.end('God job!');
}).withShutdown(); // <-- Easy to chain. Returns the Server object
// Sometime later, shutdown the server.
server.shutdown(function() {
console.log('Everything is cleanly shutdown.');
});
My best guess would be to kill the connections manually (i.e. to forcibly close it's sockets).
Ideally, this should be done by digging into the server's internals and closing it's sockets by hand. Alternatively, one could run a shell-command that does the same (provided the server has proper privileges &c.)
I have answered a variation of "how to terminate a HTTP server" many times on different node.js support channels. Unfortunately, I couldn't recommend any of the existing libraries because they are lacking in one or another way. I have since put together a package that (I believe) is handling all the cases expected of graceful HTTP server termination.
https://github.com/gajus/http-terminator
The main benefit of http-terminator is that:
it does not monkey-patch Node.js API
it immediately destroys all sockets without an attached HTTP request
it allows graceful timeout to sockets with ongoing HTTP requests
it properly handles HTTPS connections
it informs connections using keep-alive that server is shutting down by setting a connection: close header
it does not terminate the Node.js process
Usage:
import http from 'http';
import {
createHttpTerminator,
} from 'http-terminator';
const server = http.createServer();
const httpTerminator = createHttpTerminator({
server,
});
await httpTerminator.terminate();
const Koa = require('koa')
const app = new Koa()
let keepAlive = true
app.use(async (ctx) => {
let url = ctx.request.url
// destroy socket
if (keepAlive === false) {
ctx.response.set('Connection', 'close')
}
switch (url) {
case '/restart':
ctx.body = 'success'
process.send('restart')
break;
default:
ctx.body = 'world-----' + Date.now()
}
})
const server = app.listen(9011)
process.on('message', (data, sendHandle) => {
if (data == 'stop') {
keepAlive = false
server.close();
}
})
process.exit(code); // code 0 for success and 1 for fail

node.js + socket.IO - socket not reconnecting?

I know socket.io has a built in feature for reconnecting and everything, however I don't think that it is working - as I have seen from others it's also not working for them either.
If a user puts their computer to sleep, it disconnects them, and then when they open it back up they are no longer connected so they don't any of the notifications or anything until they refresh the page. Perhaps it's just something that I'm not doing correctly?
var io = require('socket.io').listen(8080);
var users = {};
////////////////USER CONNECTED/////
console.log("Sever is now running");
io.sockets.on('connection', function (socket) {
//Tell the client that they are connected
socket.emit('connected');
//Once the users session is recieved
socket.on('session', function (session) {
//Add users to users variable
users[socket.id] = {userID:session, socketID:socket};
//When user disconnects
socket.on('disconnect', function () {
//socket.socket.connect();
var count= 0;
for(var key in users){
if(users[key].userID==session)++count;
if(count== 2) break;
}
if(count== 1){
socket.broadcast.emit('disconnect', { data : session});
}
//Remove users session id from users variable
delete users[socket.id];
});
socket.on('error', function (err) {
//socket.socket.connect();
});
socket.emit("connection") needs to be called when the user reconnects, or at least the events that happen in that event need to be called.
Also socket.socket.connect(); doesn't work, it returns with an error and it shuts the socket server down with an error of "connect doesn't exist".
The problem is related to io.connect params.
Look at this client code (it will try to reconnect forever, with max delay between attempts 3sec):
ioParams = {'reconnection limit': 3000, 'max reconnection attempts': Number.MAX_VALUE, 'connect timeout':7000}
socketAddress = null
socket = io.connect(socketAddress, ioParams)
There are two important parameters out there, related to your problem:
reconnection limit - limit the upper time of delay between reconnect attemts. Normally it's getting bigger and bigger in time of server outage
max reconnection attempts - how many times you want to try. Default is 10. In most cases this is the problem why the client stops trying.

Resources