Installed NodeJS on Raspberry Pi, is there a way to check if the rPi is connected to the internet via NodeJs ?
A quick and dirty way is to check if Node can resolve www.google.com:
require('dns').resolve('www.google.com', function(err) {
if (err) {
console.log("No connection");
} else {
console.log("Connected");
}
});
This isn't entire foolproof, since your RaspPi can be connected to the Internet yet unable to resolve www.google.com for some reason, and you might also want to check err.type to distinguish between 'unable to resolve' and 'cannot connect to a nameserver so the connection might be down').
While robertklep's solution works, it is far from being the best choice for this. It takes about 3 minutes for dns.resolve to timeout and give an error if you don't have an internet connection, while dns.lookup responds almost instantly with the error ENOTFOUND.
So I made this function:
function checkInternet(cb) {
require('dns').lookup('google.com',function(err) {
if (err && err.code == "ENOTFOUND") {
cb(false);
} else {
cb(true);
}
})
}
// example usage:
checkInternet(function(isConnected) {
if (isConnected) {
// connected to the internet
} else {
// not connected to the internet
}
});
This is by far the fastest way of checking for internet connectivity and it avoids all errors that are not related to internet connectivity.
I had to build something similar in a NodeJS-app some time ago. The way I did it was to first use the networkInterfaces() function is the OS-module and then check if one or more interfaces have a non-internal IP.
If that was true, then I used exec() to start ping with a well-defined server (I like Google's DNS servers). By checking the return value of exec(), I know if ping was sucessful or not. I adjusted the number of pings based on the interface type. Forking a process introduces some overhead, but since this test is not performed too frequently in my app, I can afford it. Also, by using ping and IP-adresses, you dont depend on DNS being configured. Here is an example:
var exec = require('child_process').exec, child;
child = exec('ping -c 1 128.39.36.96', function(error, stdout, stderr){
if(error !== null)
console.log("Not available")
else
console.log("Available")
});
It's not as foolproof as possible but get the job done:
var dns = require('dns');
dns.lookupService('8.8.8.8', 53, function(err, hostname, service){
console.log(hostname, service);
// google-public-dns-a.google.com domain
});
just use a simple if(err) and treat the response adequately. :)
ps.: Please don't bother telling me 8.8.8.8 is not a name to be resolved, it's just a lookup for a highly available dns server from google. The intention is to check connectivity, not name resolution.
Here is a one liner: (Node 10.6+)
let isConnected = !!await require('dns').promises.resolve('google.com').catch(()=>{});
Since I was concerned with DNS cache in other solutions here, I tried an actual connectivity test using http2.
I think this is the best way to test the internet connection as it doesn't send much data and also doesn't rely on DNS resolving alone and it is quite fast.
Note that this was added in: v8.4.0
const http2 = require('http2');
function isConnected() {
return new Promise((resolve) => {
const client = http2.connect('https://www.google.com');
client.on('connect', () => {
resolve(true);
client.destroy();
});
client.on('error', () => {
resolve(false);
client.destroy();
});
});
};
isConnected().then(console.log);
Edit: I made this into a package if anyone is interested.
As of 2019 you can use DNS promises lookup.
NOTE This API is experimental.
const dns = require('dns').promises;
exports.checkInternet = function checkInternet() {
return dns.lookup('google.com')
.then(() => true)
.catch(() => false);
};
I found a great and simple npm tool to detect internet connection. It's looks like more reliable.
First you need to install
npm i check-internet-connected
Then you can call it like follows
const checkInternetConnected = require('check-internet-connected');
const config = {
timeout: 5000, //timeout connecting to each server(A and AAAA), each try (default 5000)
retries: 5,//number of retries to do before failing (default 5)
domain: 'google.com'//the domain to check DNS record of
}
checkInternetConnected(config)
.then(() => {
console.log("Internet available");
}).catch((error) => {
console.log("No internet", error);
});
It is very helpful to check internet connection for our browser is available or not.
var internetAvailable = require("internet-available");
internetAvailable().then(function(){
console.log("Internet available",internetAvailable);
}).catch(function(){
console.log("No internet");
});
for more[internet-available][1]: https://www.npmjs.com/package/internet-available
It's a very simple function that does not import any stuff, but makes use of JS inbuilt function, and can only be executed when called, it does not monitor loss/internet connection; unlike some answers that make use of cache, this result is accurate as it does not return cached result.
const connected = fetch("https://google.com", {
method: "FET",
cache: "no-cache",
headers: { "Content-Type": "application/json" },
referrerPolicy: "no-referrer",
}).then(() => true)
.catch(() => false);
call it using await(make sure your'e inside an async function or you'll get a promise)
console.log(await connected);
Related
I am trying to run ansible as a spawned process from NodeJS.
I have tried everything I can find on the internet and on StackOverflow to prevent ansible from doing Strict Host Key Checking when logging in via SSH, however ansible is just ignoring all the settings.
I have set the environment variable
export ANSIBLE_HOST_KEY_CHECKING=False
I have added to `~/.ssh/config
Host *
StrictHostKeyChecking no
I have added ANSIBLE_HOST_KEY_CHECKING=False to my nodejs .env file.
As an example of the command I am running, here is some of my code.
function runPlaybook(playbook, ip) {
return new Promise((resolve, reject) => {
let ansible = spawn('ansible-playbook', [`-i ${ip},`, playbook]);
ansible.stderr.on('data', function (data) {
console.log('stderr: ' + data.toString());
});
ansible.stdout.on('data', function (data) {
let stdoutData = data.toString();
console.log('stdout: ', stdoutData);
if (stdoutData.includes(`ok: [${ip}]`)) {
console.log('Clearning Interval');
clearInterval(interval);
}
if (stdoutData.startsWith(ip)) {
const re =
/^[0-9.]* *: *ok=([0-9])* *changed=([0-9])* *unreachable=([0-9])* *failed=([0-9])* *skipped=([0-9])* *rescued=([0-9])* *ignored=([0-9])*/gm;
var m;
while ((m = re.exec(stdoutData))) {
const result = {
ok: parseInt(m[1]),
changed: parseInt(m[2]),
unreachable: parseInt(m[3]),
failed: parseInt(m[4]),
skipped: parseInt(m[5]),
rescued: parseInt(m[6]),
ignored: parseInt(m[7]),
};
if (result.unreachable + result.failed == 0) {
return resolve(result);
} else {
return reject(result);
}
}
}
});
});
}
Just can't think of anything else to try.
I have fixed the problem by using the correct key file, embarrassing I know, however the error I was getting from SSH was very similar if not the same as you would get if the host had changed. In this case, it had. I was deleting VM's on my virtual server and recreating them, I must be the only person working at the moment because the same IP address was available and kept being reissued to new VM's. However at the same time due to a refactor I had also changed the key I was pointing to.
As an upside, feel free to use my regex in the example code to get the Ansible results into an array.
I'm here to request help with mongo/mongoose. I use AWS lambda that accesses a mongo database and I'm having problems sometimes my connections reach the limit of 500. I'm trying to fix this problem and I did some things like this https://dzone.com/articles/how-to-use-mongodb-connection-pooling-on-aws-lambd and https://www.mongodb.com/blog/post/optimizing-aws-lambda-performance-with-mongodb-atlas-and-nodejs. That basically is to use a singleton-like and set context.callbackWaitsForEmptyEventLoop = false, which indeed helped but is still, rarely, open 100 connections in less than a minute, it looks like there is some connection that is not being reused even tho our logs show that they are being reused. So I realized a weird behavior, whenever mongoatlas shows me an increased number of commands, my mongo connections increase heavily. The first chart is operations and the second is the connections.
Looking at operations, there are too many commands and just a few queries. I have no idea what are those commands, my theory is that those commands are causing the problem but I did not find anything that explained what is the difference between query and command exactly for me to know if that is a valid theory or not. Another thing is, how to choose correctly the number of pool size, we have really simple queries.
Here is our singleton class because maybe this is what we are doing wrong:
class Database {
options: [string, mongoose.ConnectionOptions];
instance?: typeof mongoose | null;
constructor(options = config) {
console.log('[DatabaseService] Created database instance...');
this.options = options;
this.instance = null;
}
async checkConnection() {
try {
if (this.instance) {
const pingResponse = await this.instance.connection.db.admin().ping();
console.log(`[DatabaseService] Connection status: ${pingResponse.ok}`);
return pingResponse.ok === 1;
}
return false;
} catch (error) {
console.log(error);
return false;
}
}
async init() {
const connectionActive = await this.checkConnection();
if (connectionActive) {
console.log(`[DatabaseService] Already connected, returning instance`);
return this.instance;
}
console.log('[DatabaseService] Previous connection was not active, creating new connection...');
this.instance = await mongoose.connect(...this.options);
const timeId = Date.now();
console.log(`Connection opened ${timeId}`);
console.time(`Connection started at ${timeId}`);
this.instance?.connection.on('close', () => {
console.timeEnd(`Connection started at ${timeId}`);
console.log(`Closing connection ${timeId}`);
});
return this.instance;
}
async getData(id: string) {
await this.init();
const response = await Model.findOne({ 'uuid': id });
return response;
}
}
I hope that is enough information. My main question is if my theory of commands causing too many connections is possible and what are exactly commands because every explanation that I found look like is the same than query.
Based on the comment written by Matt I have changed my init function and now my connections are under control.
async init() {
if (this.instance) {
console.log(`[DatabaseService] Already connected, returning instance`);
return this.instance;
}
console.log('[DatabaseService] Previous connection was not active, creating new connection...');
this.instance = await mongoose.connect(...this.options);
const timeId = Date.now();
console.log(`Connection opened ${timeId}`);
console.time(`Connection started at ${timeId}`);
this.instance?.connection.on('close', () => {
console.timeEnd(`Connection started at ${timeId}`);
console.log(`Closing connection ${timeId}`);
});
return this.instance;
}
I have a code in NodeJS responsible to close an http.Server connection and I would like to test the error scenario on the http.Server.close() method.
The problem is to do that, I need to simulate the return of the close method with the err object populated, and I don't know how to do it.
Below you can find my code and I would like to test the line where we can find the code reject(err);
Note: In my integration tests, I'm starting temp HTTP servers to simulate the real scenario. So as far as I understood I need to find a real scenario where the .close method will be rejected by the default implementation.
Thanks.
this._httpServer = http.createServer((req: http.IncomingMessage, res: http.ServerResponse) => this.handler(req, res));
...
disconnect(): Promise<void> {
return new Promise((resolve, reject) => {
this._httpServer.close((err) => {
if (err) {
reject(err);
} else {
resolve();
}
});
});
}
I found the answer.
Based on the official documentation, the unique error scenario is when we are trying to close an already closed server.
So, to make the test work, before calling my disconnect method I only need to close the httpServer (this._httpServer.close()).
Reference: https://nodejs.org/docs/latest-v12.x/api/net.html#net_server_close_callback
I'm running a CouchDB server with docker and I'm trying to POST data through a Node app.
But I'm frequently prompted with a ESOCKETTIMEDOUT error (not always).
Here's the way I'm opening the connexion to the DB:
var remoteDB = new PouchDB('http://localhost:5984/dsndatabase', {
ajax: {
cache: true,
timeout: 40000 // I tried a lot of durations
}
});
And here's the code used to send the data :
exports.sendDatas = function(datas,db, time) {
console.log('> Export vers CouchDB')
db.bulkDocs(datas).then(function () {
return db.allDocs({include_docs: true});
}).then(function (){
var elapsedTime = new Date().getTime() - time;
console.log('> Export terminé en ', elapsedTime, ' ms');
}).catch(function (err) {
console.log(err);
})
};
The error doesn't show up every time but I'm unable to find a pattern.
And, timeout or not, all my data is successfully loaded in my CouchDB !
I've seen a lot of posts on this issue but none of them really answers my question ...
Any idea ?
Okay this seems to be real issue:
https://github.com/pouchdb/pouchdb/issues/3550#issuecomment-75100690
I think you can fix it by stating a reasonably longer timeout value/a retry logic using exponential backoff.
Let me know if that works for you.
I have a web app that is published via ExpressJS on NodeJS, of course. It uses CouchDB as it's data source. I implemented long polling to keep the app in sync at all times between all users. To accomplish this I use the following logic:
User logs into app and an initial long poll request is made to Node via an Express route.
Node in turn makes a long poll request to CouchDB.
When Couch is updated it responds to the request from Node.
Lastly Node responds to the browser.
Simple. What is happening, though, is that when I refresh the browser it freezes up on every fifth refresh. Huh? very wierd. But I can reproduce it over and over, even in my test environment. Every fifth refresh without fail freezes up Node and causes the app to freeze. Rebooting Node fixes the issue.
After much hair pulling I THOUGHT I solved it by changing this:
app.get('/_changes/:since*', security, routes.changes);
To this:
app.get('/_changes/:since*', security, function () { routes.changes });
However, after further testing this is just failing to run routes.changes. So no actual solution. Any ideas why long polling CouchDb from Node would do such a strange thing? On the fifth refresh I can have a break point in node on the first line of my routing code and it never get's hit. However, in the browser I can break on the request to node for long polling and it seems to go out. It's like Node is not accepting the connection for some reason...
Should I be approaching long polling from Node to CouchDB in a different way? I'm using feed=longpoll, should I maybe be doing feed=continuous? If I turn down the changes_timeout in couchdb to 5 seconds it doesn't get rid of the issue, but it does make it easier to cope with since the freezes only last 5 seconds tops. So this would seem to indicate that node can't handle having several outstanding requests to couch. Maybe I will try a continuous feed and see what happens.
self.getChanges = function (since) {
Browser:
$.ajax({
url: "/_changes/" + since,
type: "GET", dataType: "json", cache: false,
success: function (data) {
try {
self.processChanges(data.results);
self.lastSeq(data.last_seq);
self.getChanges(self.lastSeq());
self.longPollErrorCount(0);
} catch (e) {
self.longPollErrorCount(self.longPollErrorCount() + 1);
if (self.longPollErrorCount() < 10) {
setTimeout(function () {
self.getChanges(self.lastSeq());
}, 3000);
} else {
alert("You have lost contact with the server. Please refresh your browser.");
}
}
},
error: function (data) {
self.longPollErrorCount(self.longPollErrorCount() + 1);
if (self.longPollErrorCount() < 10) {
setTimeout(function () {
self.getChanges(self.lastSeq());
}, 3000);
} else {
alert("You have lost contact with the server. Please refresh your browser.");
}
}
});
}
Node:
Routing:
exports.changes = function (req, res) {
var args = {};
args.since = req.params.since;
db.changes(args, function (err, body, headers) {
if (err) {
console.log("Error retrieving changes feed: "+err);
res.send(err.status_code);
} else {
//send my response... code removed here
}
})
}
Database long poll calls:
self.changes = function (args, callback) {
console.log("changes");
if (args.since == 0) {
request(self.url + '/work_orders/_changes?descending=true&limit=1', function (err, res, headers) {
var body = JSON.parse(res.body);
var since = body.last_seq;
console.log("Since change: "+since);
self.longPoll(since, callback);
});
} else {
self.longPoll(args.since, callback);
}
}
self.longPoll = function (since, callback) {
console.log("about to request with: "+since);
request(self.url + '/work_orders/_changes?feed=continuous&include_docs=true&since=' + since,
function (err, res, headers) {
console.log("finished request.")
if (err) { console.log("Error starting long poll: "+err.reason); return; } //if err send it back
callback(err, res.body);
});
}
Socket.io will automatically fall back to long polling, and doesn't have a problem like the one you are having. So just use that. Also for CouchDB changes use this https://github.com/iriscouch/follow or maybe this one https://npmjs.org/package/changes like the other guy suggested.
Its very bad practice to reinvent things when we have popular modules that already do what you need. There are currently more than 52,000 node modules on https://npmjs.org/. People make a big deal about copying and pasting code. In my mind reinventing basic stuff is even worse than that.
I know with so many modules its hard to know about all of them, so I'm not saying you can never solve the same problem as someone else. But take a look at npmjs.org first and also sites like http://node-modules.com/ which may give better search results.