amqp.node won't detect a connection drop - node.js

We have a node.js script running a socket.io server whose clients consume messages from a RabbitMQ queue. We've recently migrated to Amazon AWS and RabbitMQ is now a cluster of two machines (redundant instances). The AMQP connection is lost from time to time (it is a limitation that arrives from a high availability environment with redundant VMs and we have to cope with it) and if an attempt to reconnect is made, the DNS chooses which instance to connect to (it is a cluster with data replication so it doesn't matter which instance to connect to).
The problem is that the attempt to reconnect is never made; after a while, when the connection is lost, amqp.node apparently fails to notice that the connection has been lost. Also, the consumers stop receiving messages and the socket.io server simply stops accepting new connections.
We have a 55 seconds heartbeat timeout (not to be confused with the socket.io heartbeat timeout) set at the RabbitMQ URL and are checking for 'error' and 'close' events with amqp.node's callback API but they are apparently never issued. The queues expect the consumed messages to be ack'ed. We want the node script to detect a lost connection and finish itself, so the environment will automatically start a new process and establish a connection again.
Here is the code, maybe we are doing something wrong with the amqp.node callback API or something else.
var express = require('express');
app = express();
var http = require('http');
var serverio = http.createServer(app);
var io = require('socket.io').listen(serverio, { log: false });
var socket;
var allcli = [];
var red, blue, green, magenta, reset;
red = '\033[31m';
blue = '\033[34m';
green = '\033[32m';
magenta = '\033[35m';
orange = '\033[43m';
reset = '\033[0m';
var queue = 'ha.atualizacao_mobile';
var urlRabbit = 'amqp://login:password#host?heartbeat=55' // Amazon
var amqp = require('amqplib/callback_api');
var debug = true;
console.log("Original Socket.IO heartbeat interval: " + io.get('heartbeat interval') + " seconds.");
io.set('heartbeat interval', 10 * 60);
console.log("Hearbeat interval changed to " + io.get('heartbeat interval') + " seconds to reduce battery consumption in the mobile clients.");
console.log("Original Socket.IO heartbeat timeout: " + io.get('heartbeat timeout') + " seconds.");
io.set('heartbeat timeout', 11 * 60);
console.log("Heartbeat timeout set to " + io.get('heartbeat timeout') + " seconds.");
io.sockets.on('connection', function(socket){
socket.on('error', function (exc) {
console.log(orange+"Ignoring exception: " + exc + reset);
});
socket.on('send-indice', function (data) {
// Some business logic
});
socket.on('disconnect', function () {
// Some business logic
});
});
function updatecli(data){
// Some business logic
}
amqp.connect(urlRabbit, null, function(err, conn) {
if (err !== null) {
return console.log("Error creating connection: " + err);
}
conn.on('error', function(err) {
console.log("Generated event 'error': " + err);
});
conn.on('close', function() {
console.log("Connection closed.");
process.exit();
});
processRabbitConnection(conn, function() {
conn.close();
});
});
function processRabbitConnection(conn, finalize) {
conn.createChannel(function(err, channel) {
if (err != null) {
console.log("Error creating channel: " + err);
return finalize();
}
channel.assertQueue(queue, null, function(err, ok) {
if (err !== null) {
console.log("Error asserting queue " + queue + ": " + err);
return finalize();
}
channel.consume(queue, function (msg) {
if (msg !== null) {
try {
var dataObj = JSON.parse(msg.content);
if (debug == true) {
//console.log(dataObj);
}
updatecli(dataObj);
} catch(err) {
console.log("Error in JSON: " + err);
}
channel.ack(msg);
}
}, null, function(err, ok) {
if (err !== null) {
console.log("Error consuming message: " + err);
return finalize();
}
});
});
});
}
serverio.listen(9128, function () {
console.log('Server: Socket IO Online - Port: 9128 - ' + new Date());
});

Apparently the issue has been solved. The near 60 seconds heartbeat was the issue. It conflicts with the RabbitMQ load balancer which checks every 1 minute or so whether data has passed through the connection or not (if no data has passed, it breaks the connection). The AMQP connection stops receiving messages and the library apparently doesn't react to that. A lower heartbeat (e.g. 30 seconds) is necessary in order to avoid this situation.

Related

Sending files through net.socket

I trying to create a socket server over the net-module in node.js. There should be different clients connect to it and be able to send/receive commands which will be evaluated on the server in the future.
And also clients should be able to ask for files which the server will send to them.
So, for example, one client is sending the command "file picture.jpg" and then the server will send the content of file picture.jpg through the socket and the client will download it to download.jpg.
This works fine for files <=2kB but not above. It seems like the client gets the files split up in different packages. Which would be fine if I only send files through sockets (So I would write every "package" into the file), but the problem is if I send big files which takes minutes and in that time the server sends other commands (which cant wait for the file transfer to complete).
Here is my simplified code:
server.js
var net = require('net');
var fs = require('fs');
var HOST = '127.0.0.1';
var PORT = 6969;
// All connected socket clients will be in this list:
var client = [];
const server = net.createServer(function(sock) {
// We have a connection - a socket object is assigned to the connection automatically
console.log('Client connected: ' + sock.remoteAddress + ':' + sock.remotePort);
// Put this new client in the list
client.push(sock);
sock.on('data', function(data) {
const dataStr = data.toString();
const cmd = dataStr.split(' ', 1)[0];
const value = dataStr.slice(cmd.length).trim();
if (cmd == "file") {
fs.readFile(value, 'utf8', function(err, contents) {
if (err) {
if (err.code == 'ENOENT') {
sock.write('File not exist: '+value); // Send error to client
} else {
console.error(err);
}
} else {
sock.write('file '+contents); // Send file to client
}
});
console.log("Sending file to client.");
} else if (cmd == "quit") {
sock.write('Bye!');
sock.destroy();
} else {
sock.write('Unknown command: "' + dataStr + '"');
}
});
sock.on('error', (e) => {
if(e.code == "ECONNRESET") {
console.log("Got ECONNRESET, continue!");
} else {
console.log(e);
}
});
sock.on('close', function(data) {
var clientId = client.indexOf(sock);
client.splice(clientId, 1);
console.log('Client closed connection: ' + sock.remoteAddress +':'+ sock.remotePort);
});
});
server.on('error', function (e) {
if (e.code == 'EADDRINUSE') {
console.log('Creating server failed: Address in use ' + host +':'+ port);
}
});
server.listen(PORT, HOST, () => {
console.log('Server listening on ' + HOST +':'+ PORT);
});
client.js
var net = require('net');
var fs = require('fs');
var HOST = '127.0.0.1';
var PORT = 6969;
var client = new net.Socket();
process.stdin.setEncoding('utf8');
var stdin = process.openStdin();
client.on('error', function (e) {
console.log("ERROR: "+e.code);
process.exit(1);
});
client.connect(PORT, HOST, function() {
console.log('Connected to server: ' + HOST + ':' + PORT);
process.stdout.write('> ');
});
stdin.addListener("data", function(d) {
var inp = d.toString().trim(); // clean input functions here
if (inp === "exit") {
//client.write("goodbye");
client.destroy();
stdin.destroy();
} else {
client.write(inp);
}
});
client.on('data', function(data) {
if (data.toString().substr(0, 5) === 'file ') { // If receiving file from server, data starts with "file "
const filename = "downloaded.jpg";
fs.writeFile(filename, data.toString().substr(5), function(err) {
if(err) {
return console.log(err);
}
console.log("Saved "+filename+".");
process.stdout.write('> ');
});
} else { // Unhandeld commands will be printed on console:
console.log('DATA: ' + data);
process.stdout.write('> ');
}
});
client.on('close', function() {
console.log('Connection closed');
process.exit();
});
So any suggestions how I solve this the best? Also, can I simply expand the buffersize somehow in net.sockets to like 32MB?
Since TCP is emulating a stream, you don't want to rely on anything about how the stream is broken into separate data events. The data given to one callback could be the first half of something or 2 things.
Instead, you want to emulate your datagram protocol on top of this reliable stream by appending stream contents to the end of a buffer and removing complete messages from the front for processing.
For example, this simple server from the docs, demonstrates a minimal valid TCP server:
const net = require('net');
const server = net.createServer((socket) => {
let name = '';
socket.setEncoding('utf8');
socket.on('data', (chunk) => name += chunk);
socket.on('end', () => socket.end(`hello ${name}`));
});
server.listen(8000);
It assembles a buffer with no assumption about the number of data call(s) in its simple case, the buffer is a single message to use at the end event.
To process messages before the end of the connection, you also want to examine the front of the buffer at the end of every data event to look if some messages are complete and ready to process. This separation of complete messages needs to be part of your protocol.
While message separation can be done by length indicators or reserved sequences, reserved sequences require encoding files (to avoid accidentally seeing them in data) and scanning data continuously to find them. This makes length indicators preferable for dealing with the file data.
So for example, the file [data] response first becomes file [#####] [data] where ##### tells you how much data to keep assembling on the buffer before a data callback will remove this entire message from the front of the buffer for processing as a fileSave().
Then, to handle more granular interactivity, simply break up these operations into separate smaller interactions, for example replace file [wholefilecount] [data] responses with filechunk [0-maxchunk] [data] responses that require a filenext command to continue and send a fileend to tell the client the file is done.

Node Js / Typescript - AMQP Consumer

I am trying my hand at node.js/typescript for the first time and having a bit of trouble making a consumer for a rabbit queue.
Code:
let amqp = require('amqp');
let connection = amqp.createConnection({url: "amqp://" + RABBITMQ_USER + ":" + RABBITMQ_PASSWORD + "#" + RABBITMQ_HOST + ":" + RABBITMQ_PORT + RABBITMQ_VHOST});
connection.on('ready', function() {
connection.exchange(RABBITMQ_WORKER_EXCHANGE, function (exchange) {
connection.queue(RABBITMQ_QUEUE, function (queue) {
queue.bind(exchange, function() {
queue.publish(function (message) {
console.log('subscribed to queue');
let encoded_payload = unescape(message.data);
let payload = JSON.parse(encoded_payload);
console.log('Received a message:');
console.log(payload);
})
})
})
})
})
It seems to connect to the amqp server and throws no errors but it just sits there and doesn't consume anything. Is there a step I am missing?
Any help would be greatly appreciated,
Thank you.
Here is my solution that is working based off of amqp's JS tutorial.
https://www.rabbitmq.com/tutorials/tutorial-three-javascript.html
Probably not up to TypeScript standards, feel free to correct me if there's a better way.
#!/usr/bin/env node
require('dotenv').config();
import amqp = require('amqplib/callback_api');
import db = require('./database');
amqp.connect({
protocol: process.env.RABBITMQ_PROTOCOL,
hostname: process.env.RABBITMQ_HOST,
port: process.env.RABBITMQ_PORT,
username: process.env.RABBITMQ_USER,
password: process.env.RABBITMQ_PASSWORD,
vhost: process.env.RABBITMQ_VHOST
}, function(err, conn) {
conn.createChannel(function (err, ch) {
// set exchange that is being used
ch.assertExchange(process.env.RABBITMQ_WORKER_EXCHANGE, 'direct', {durable: true});
// set queue that is being used
ch.assertQueue(process.env.RABBITMQ_QUEUE, {durable: true}, function (err, q) {
console.log(" [*] Waiting for messages in %s. To exit press CTRL+C", q.queue);
// bind the queue to the exchange
ch.bindQueue(q.queue, process.env.RABBITMQ_WORKER_EXCHANGE, '');
// consume from the queue, one message at a time.
ch.consume(q.queue, function (msg) {
console.log("Message received: %s", msg.content.toString());
//save message to db
db.store(msg.content.toString()).then(function() {
//acknowledge receipt of message to amqp
console.log("Acknowledging message");
ch.ack(msg, true);
});
}, {noAck: false});
});
});
});
import * as Amqp from "amqp-ts";
var connection = new Amqp.Connection("amqp://localhost");
var exchange = connection.declareExchange("ExchangeName");
var queue = connection.declareQueue("QueueName");
queue.bind(exchange);
queue.activateConsumer((message) => {
console.log("Message received: " + message.getContent());
});
// it is possible that the following message is not received because
// it can be sent before the queue, binding or consumer exist
var msg = new Amqp.Message("Test");
exchange.send(msg);
connection.completeConfiguration().then(() => {
// the following message will be received because
// everything you defined earlier for this connection now exists
var msg2 = new Amqp.Message("Test2");
exchange.send(msg2);
});

RabbitMq program is hanging while handling 100k messages. is this the right way to do it?

The code is attached below. my system hangs. is this the right way to create those many channels?
i wanted to compare the cpu usage of rabbitMQ and redis pub/sub for 100k messages and i am running the rabbitMQ server on windows.
var amqp = require("amqplib/callback_api");
var time = 0;
var limit = 100000;
amqp.connect("amqp://localhost",function(err,conn){
if(err){
console.log('Connection closed-----------error connect');
return;
}
var timer = setInterval(() => {
time+=1;
if(time>=limit){
clearInterval(timer);
}
conn.createChannel(function(err,ch){
if(err){
console.log('Connection closed-----------error createChannel');
return;
}
var q = "queue_name"+time.toString();
// console.log(q);
var msg = "this is the message string!!!";
ch.assertQueue(q,{durable: false});
ch.sendToQueue(q,new Buffer(msg),{persistent: false});
// console.log("time = "+time);
});
}, 10);
});

node net socket connection timeout

I noticed, that my node server's net.createConnection() has a very long timeout before firing an error on some occasions (seems to be a particular problem with ports...)
i tried to connect to somedomain:9000 (listening, connecting and working as expected)
and to somedomain:1234 (same domain, different port, waiting around 2 minutes until "connect ETIMEDOUT")
When i connect to non-existent domains, i get an error right away, but not if i connect to unreachable ports on reachable hosts.
i need to determine if a machine is reachable in <1sec..
how do i handle this? Must be some way to notice an unreachable port in under 2minutes?
at least some kind of timeout that just sets the address as unreachable after a set amout of time?
Thanks
UPDATE: current Connection code:
this.openConnection = function() {
try {
console.log("[INFO] connecting to " + device.ip + ":" + device.port);
device.clientSocket = new net.createConnection(this.port,this.ip)
.on('connect', device.connected)
.on('data', device.inputReceived)
.on('error', function(err) {
if (err.code == "ENOTFOUND") {
console.log("[ERROR] No device found at this address!");
device.clientSocket.destroy();
return;
}
if (err.code == "ECONNREFUSED") {
console.log("[ERROR] Connection refused! Please check the IP.");
device.clientSocket.destroy();
return;
}
console.log("[CONNECTION] Unexpected error! " + err.message + " RESTARTING SERVER");
process.exit(1);
})
.on('disconnect', function() {
console.log("[CONNECTION] disconnected!");
});
} catch(err) {
console.log("[CONNECTION] connection failed! " + err);
}
};
When you connect, you can just set your own timer for whatever timeout you want and if the connect has not succeeded when that timer fires, then it did not succeed as quickly as you want.
This could be encapsulated in a single function with a single callback or returning a promise.
Based on your code, here's a shot at adding a timeout to it (untested code):
this.openConnection = function(timeout) {
var timer;
timeout = timeout || 2000;
try {
console.log("[INFO] connecting to " + device.ip + ":" + device.port);
device.clientSocket = new net.createConnection(this.port,this.ip)
.on('connect', function() {
clearTimeout(timer);
device.connected();
})
.on('data', function() {
clearTimeout(timer);
device.inputReceived();
})
.on('error', function(err) {
clearTimeout(timer);
if (err.code == "ENOTFOUND") {
console.log("[ERROR] No device found at this address!");
device.clientSocket.destroy();
return;
}
if (err.code == "ECONNREFUSED") {
console.log("[ERROR] Connection refused! Please check the IP.");
device.clientSocket.destroy();
return;
}
console.log("[CONNECTION] Unexpected error! " + err.message + " RESTARTING SERVER");
process.exit(1);
})
.on('disconnect', function() {
console.log("[CONNECTION] disconnected!");
});
timer = setTimeout(function() {
console.log("[ERROR] Attempt at connection exceeded timeout value");
device.clientSocket.end();
}, timeout);
} catch(err) {
console.log("[CONNECTION] connection failed! " + err);
}
};

EMFILE error when using nodejs redis

I want to test how many connections my redis server can hold, so I call redis.createClient() in a loop, while the redis server still runs lively, I got the EMFILE error, I know that I have used out my fds.
but wait, I have just test my mqtt server before, I did the same thing to my mqtt server,
I called mqtt.createClient() in a loop of 10000, of 20000... but I never got the EMFILE error .
so, does the nodejs mqtt library use a different mechanism underneath?
redis-client.js :
var redis = require('redis');
function start() {
var client = redis.createClient();
client.on('error', function(err) {
console.log('Error ' + err);
});
}
exports.start = start;
redis-test.js
var redis_client = require('./redis-client');
for(var i = 0 ; i < 10000 ; ++i) {
redis_client.start();
console.log('redis client ' + i + ' started');
}
mqtt-subclient.js
var mqtt = require('mqtt');
function start() {
var client = mqtt.createClient();
client.subscribe('message');
//client.publish('message', 'hello me!');
client.on('message', function(topic, message) {
console.log('receive message: ');
console.log(message);
});
client.on('connack', function(packet) {
console.log(packet);
if(packet.returnCode == 0) {
console.log('connect successfully');
}
});
client.on('suback', function(packet) {
console.log(packet.messageId);
});
client.on('error', function(err) {
console.log(err);
});
}
exports.start = start;
mqtt-test.js
var subclient = require('./mqtt-subclient.js');
for(var i = 0 ; i < 10000 ; ++i) {
subclient.start();
console.log('client ' + i + ' started');
}
Redis cannot accept more than x simultaneous connection attempts, where x is the backlog parameter of the listen system call.
It is limited by the minimum between the somaxconn kernel parameter (128 is a common default value), and 512. So if you attempt more than min(somaxconn,512) simultaneous connections, you can have errors. If you add a small delay between your connection attempts, it should fix this problem.
Then, you need to check that you have enough resources to open 10000 file descriptors (check the output of ulimit -a), and that your TCP/IP ephemeral port range is big enough to accomodate such a number of client connections.

Resources