ioredis - handle connection retry and ECONNRESET - node.js

I'm trying to handle redis retry connection if there's any network issue. It looks like it's working but I'm not sure because sometimes I get ECONNRESET.
redisOptions: {
sentinels: [
{ host: process.env.redis_server_1, port: process.env.redis_port_1 },
{ host: process.env.redis_server_2, port: process.env.redis_port_2 },
{ host: process.env.redis_server_3, port: process.env.redis_port_3 },
],
name: 'mymaster'
},
redisOptions object is defined in configuration file.
function updateConfigurations(){
console.log(configuration.redisOptions);
configuration.redisOptions.sentinelRetryStrategy = (times) => {
console.log('Trying to connect to Redis ', times);
return Math.min(times * 100, 10000);
}
// configuration.redisOptions.retryStrategy = (times) => Math.min(times * 50, 2000);
}
I'm testing this by disconnecting from VPN. The moment I disconnected from VPN, below error message is getting printed on console.
[ioredis] Unhandled error event: Error: read ECONNRESET
at TCP.onStreamRead (internal/stream_base_commons.js:209:20)
And then below logging for retry connecting
[ioredis] Unhandled error event: Error: All sentinels are unreachable. Retrying from scratch after 400ms. Last error: Connection is closed.
Is this a correct way to handle retry connecting? How should I handle ECONNRESET?

Related

How can I make my KafkaJS consumer wait indefinitely for a broker to come online?

I have a KafkaJS consumer configured as follows:
// Create the kafka client
const kafka = new Kafka({
clientId,
brokers,
});
// Create the consumer
const consumer = this.kafka.consumer({
groupId,
heartbeatInterval: 3000,
sessionTimeout: 30000,
});
// Connect the consumer
consumer.connect().then(async (res) => {
await this.consumer.run({
eachMessage: async ({ topic, partition, message, heartbeat, pause }) => {
this.subscriptionRegistrations[topic](topic, partition, message);
},
}).catch((err) => {
console.log('Error running consumer!', err);
});
}).catch(async (err) => {
console.log('Error connecting consumer!', err);
})
Currently, when starting the application I get several connection errors like these:
{"level":"ERROR","timestamp":"2023-01-27T23:29:58.214Z","logger":"kafkajs","message":"[BrokerPool] Failed to connect to seed broker, trying another broker from the list: Connection error: connect ECONNREFUSED 127.0.0.1:9092","retryCount":0,"retryTime":246}
{"level":"ERROR","timestamp":"2023-01-27T23:29:58.463Z","logger":"kafkajs","message":"[Connection] Connection error: connect ECONNREFUSED 127.0.0.1:9092","broker":"localhost:9092","clientId":"CLIENT_ID_TEST","stack":"Error: connect ECONNREFUSED 127.0.0.1:9092\n at TCPConnectWrap.afterConnect [as oncomplete] (node:net:1157:16)"}
The above errors are expected as I am not running a broker. Then followed by:
KafkaJSNonRetriableError
Caused by: KafkaJSConnectionError: Connection error: connect ECONNREFUSED 127.0.0.1:9092
at Socket.onError (/path/to/project/node_modules/kafkajs/src/network/connection.js:210:23)
at Socket.emit (node:events:526:28)
at emitErrorNT (node:internal/streams/destroy:157:8)
at emitErrorCloseNT (node:internal/streams/destroy:122:3)
at processTicksAndRejections (node:internal/process/task_queues:83:21)
[ERROR] 23:29:59 KafkaJSNumberOfRetriesExceeded: Connection error: connect ECONNREFUSED 127.0.0.1:9092
At which point the application hangs. It doesn't crash. I also get the error I logged in the catch block of consumer.connect():
Error connecting consumer KafkaJSNonRetriableError
Caused by: KafkaJSConnectionError: Connection error: connect ECONNREFUSED 127.0.0.1:9092
at Socket.onError (/path/to/project/node_modules/kafkajs/src/network/connection.js:210:23)
... 3 lines matching cause stack trace ...
at processTicksAndRejections (node:internal/process/task_queues:83:21) {
name: 'KafkaJSNumberOfRetriesExceeded',
retriable: false,
helpUrl: undefined,
retryCount: 5,
retryTime: 3636,
[cause]: KafkaJSConnectionError: Connection error: connect ECONNREFUSED 127.0.0.1:9092
at Socket.onError (/path/to/project/node_modules/kafkajs/src/network/connection.js:210:23)
at Socket.emit (node:events:526:28)
at emitErrorNT (node:internal/streams/destroy:157:8)
at emitErrorCloseNT (node:internal/streams/destroy:122:3)
at processTicksAndRejections (node:internal/process/task_queues:83:21) {
retriable: true,
helpUrl: undefined,
broker: 'localhost:9092',
code: 'ECONNREFUSED',
[cause]: undefined
}
}
I would like my application to be able to start up before the Kafka broker it is configured to connect to. In this scenario, KafkaJS would continue to retry the connection indefinitely until a broker is available. Ideally this would also work as a reconnection strategy if the broker goes down KafkaJS would continue to try to reconnect indefinitely until the broker come back online. From what I have read in the docs, this is how it is supposed to behave, but it's not doing that for me. Perhaps I have set up my client incorrectly. Thank you for the input!
I'd recommend using AdminClient instead to "ping" the cluster, first.
But, you can retry the connection after catching the KafkaJSNumberOfRetriesExceeded error.
const consumer = ...
while (true) {
try {
await consumer.connect();
break;
} catch (err) {
console.log('Error connecting consumer!', err);
if (err instanceof KafkaJSNonRetriableError && err.name === 'KafkaJSNumberOfRetriesExceeded') {
console.log('retrying connection...');
continue;
}
console.error('unknown error ' + err);
break;
}
}
await consumer.run() ...
Ideally this would also work as a reconnection strategy if the broker goes down KafkaJS would continue to try to reconnect indefinitely until the broker come back online
Or... You run more than one broker, and therefore have a highly available Kafka cluster that clients can connect to.
If you ran your code in Docker/Kubernetes with a restart policy, you could just let the process crash, and a new container would restart on its own. You could implement the same with supervisord.

Error: read ECONNRESET while connection rabbitmq with nodejs

I've encountered following error message while connection our external RabbitMQ with NodeJS as follow:
Error: read ECONNRESET
at TCP.onStreamRead (internal/stream_base_commons.js:205:27) {
errno: 'ECONNRESET',
code: 'ECONNRESET',
syscall: 'read'
}
and my nodejs code is as follow:
const amqp_url = "amqp://un:pw#sb-mq.com:9901/my-vhost";
amqp.connect(amqp_url, function (error0, connection) {
if (error0) {
throw error0;
}
connection.createChannel(function (error1, channel) {
if (error1) {
throw error1;
}
var queue = 'hello';
var msg = 'Hello World!';
channel.assertQueue(queue, {
durable: false
});
channel.sendToQueue(queue, Buffer.from(msg));
console.log(" [x] Sent %s", msg);
});
setTimeout(function () {
connection.close();
process.exit(0);
}, 500);
});
But the thing is when I've setup RabbidMQ locally with same configuration but using default port (like amqp://un:pw#localhost:5672/my-vhost), it was working perfectly. Please let me know how to troubleshoot that one, thanks.
"ECONNRESET" means the other side of the TCP conversation abruptly closed its end of the connection.
see How do I debug error ECONNRESET in Node.js?
about RabbitMQ check if rabbitmq actually is active in that port, just:
telnet sb-mq.com 9901
from your client machine and check the firewall configuration.
You may have another service running on 9901
ECONNRESET is network problem, rabbitmq can work in different ports without problems
I found that issue has been resolved when I've tried to use amqps instead of amqp.

Sync gateway connection Error: connect EMFILE - Local (undefined:undefined)

I am trying to get couchbase document revision identifier via sync gatetway API GET /{db}/{doc} within Node server:
function _getRev(docIdUrl, gateway, callback) {
let options = {
host: gateway.host,
path: gateway.path + docIdUrl,
port: gateway.port,
method: 'GET',
headers: {
'Content-Type': 'application/json'
}
};
// Node HTTP requests
let syncGatewayRequest = http.request(options, (response) => {
// ...unrelevant codes
});
syncGatewayRequest.on('error', (error) => {
logger.error('syncGateway connection error for ' + docIdUrl);
callback(error, null); // here is the error happening!!!!!
});
syncGatewayRequest.write(JSON.stringify({}));
syncGatewayRequest.end();
}
Then I got error:
[2017-11-03 11:07:51.961] { [Error: connect EMFILE 10.0.1.53:4985 - Local (undefined:undefined)]
code: 'EMFILE',
errno: 'EMFILE',
syscall: 'connect',
address: '10.0.1.53',
port: 4985 }
Error: connect EMFILE 10.0.1.53:4985 - Local (undefined:undefined)
at Object.exports._errnoException (util.js:870:11)
at exports._exceptionWithHostPort (util.js:893:20)
at connect (net.js:849:16)
at net.js:937:9
at nextTickCallbackWith0Args (node.js:420:9)
at process._tickCallback (node.js:349:13)
There is a context that the above function are executed asynchronously by a significant number of services, say 10,000+
I noticed the post here Nodejs connect EMFILE - How to reuse connections?
But I tried to unlimit the default connections by doing:
var http = require('http')
http.globalAgent.maxSockets = Infinity
But does not seem to work, error still ...
Anyone can let me know what's wrong here?

Port 9200 Refused Connection?

I'm trying to implement an elasticsearch client in NodeJS on a Cloud9 workspace and I'm just trying to get it running. My app runs on port 5678 and my MongoDB runs on 27017. I have tried searching for other answers, but I haven't really found anything particularly useful. This is the error message that I receive when trying to connect to localhost:9200.
Elasticsearch ERROR: 2015-06-26T04:24:19Z
Error: Request error, retrying -- connect ECONNREFUSED
at Log.error (/home/ubuntu/workspace/node_modules/elasticsearch/src/lib/log.js:213:60)
at checkRespForFailure (/home/ubuntu/workspace/node_modules/elasticsearch/src/lib/transport.js:192:18)
at HttpConnector.<anonymous> (/home/ubuntu/workspace/node_modules/elasticsearch/src/lib/connectors/http.js:153:7)
at ClientRequest.wrapper (/home/ubuntu/workspace/node_modules/elasticsearch/node_modules/lodash/index.js:3128:19)
at ClientRequest.emit (events.js:95:17)
at Socket.socketErrorListener (http.js:1552:9)
at Socket.emit (events.js:95:17)
at net.js:441:14
at process._tickCallback (node.js:442:13)
Elasticsearch TRACE: 2015-06-26T04:24:19Z
-> HEAD http://localhost:9200/
<- 0
Elasticsearch WARNING: 2015-06-26T04:24:19Z
Unable to revive connection: http://localhost:9200/
Elasticsearch WARNING: 2015-06-26T04:24:19Z
No living connections
Trace: elasticsearch cluster is down!
at Server (/home/ubuntu/workspace/app.js:32:13)
at respond (/home/ubuntu/workspace/node_modules/elasticsearch/src/lib/transport.js:251:9)
at sendReqWithConnection (/home/ubuntu/workspace/node_modules/elasticsearch/src/lib/transport.js:171:7)
at next (/home/ubuntu/workspace/node_modules/elasticsearch/src/lib/connection_pool.js:213:7)
at process._tickCallback (node.js:442:13)
.
My code for the elastic search client is very simple
var client = new elasticsearch.Client({
hosts: 'localhost:9200',
log: 'trace'
});
client.ping({
// ping usually has a 3000ms timeout
requestTimeout: 30000,
// undocumented params are appended to the query string
hello: "elasticsearch!"
}, function (error) {
if (error) {
console.trace('elasticsearch cluster is down!');
} else {
console.log('All is well');
}
});
If I try to connect to localhost:5678, I don't get an error refused, but the elastic cluster is still down? Any suggestions would be helpful, thanks :)
//Client.js
const es = require('elasticsearch');
const esClient = new es.Client({
host: {
protocol: 'http',
host: 'localhost',
port: 9200
},
log: 'trace'
});
module.exports = esClient;
//ping.js
const esClient = require('./client');
esClient.ping({
// ping usually has a 3000ms timeout
requestTimeout: 3000
}, function (error) {
if (error) {
console.trace('elasticsearch cluster is down!');
} else {
console.log('All is well');
}
});
Hey to anyone who wants to know what I did. I basically just downloaded elasticsearch onto cloud9 and ran it. Then I pinged the port accordingly and it worked. Looks like a noobie mistake on my part :P
In my case, stock elasticsearch with everything in default and using the JS client results in this error.
(short term) fix: http.cors.enabled and https.cors.allow-origin settings

Node.js Error : connect ECONN Refused

I am new to Node.js and am unable to resolve this error:
Error: connect ECONNREFUSED
at errnoException (net.js:901:11)
at Object.afterConnect (as oncomplete) (net.js:892)
The code I was trying out follows :
var async = require('async'),
request = require('request');
function done(err,results) {
if (err) {
throw err;
}
console.log('Done ! results: %j',results);
}
var collection = [1,2,3,4];
function iterator(value,callback) {
request.post({
url: 'http://localhost:8080',
body: JSON.stringify(value)
}, function (err,res,body){
if (err) {
callback(err,body && JSON.parse(body));
}
});
}
async.map(collection,iterator,done);
ECONNREFUSED – Connection refused by server error
A port is being blocked can be the root cause of this issue, check if your connection is being blocked or even the changed default port can also cause this issue. Identify which app/service you are connecting to and its port is being blocked or changed.
And in your case check whether the application is hosted on port: 8080 or not.
But, this most likely occurs with FileZilla.

Resources