meteor observe array server side - node.js

I have a recursive function that builds asynchronously a tree on the server side and I would like to 'observe' it and have the calling method in Meteor rerun every time there is a change.
I have made a simplified example that builds a tree with a recursive readdir call (in the real application there is a computation that may take several minutes per node and its results depend on the nodes already explored)
in server/methods.js
var fs = Meteor.npmRequire('fs')
var path = Meteor.npmRequire('path')
var tree = function (dir, r) {
try
{
fs.readdir (dir, function (error, files) {
if (files && files.length)
for (var i = 0; i < files.length; i++)
{
r[i] = { name : files[i], children : [] }
tree(path.resolve(dir, files[i]), r[i].children)
}
})
} catch (e) { console.log("exception", e)}
}
Meteor.methods({
'build_tree' : function () {
var r = []
tree("/tmp/", r)
return r // Wrong !
}
})
in client/client.js
Meteor.call('build_tree', function (error, result) {
console.log(error, result)
}
I have already used futures in other parts of the code based on https://www.discovermeteor.com/patterns/5828399.
But in this case I am somehow lost due to
the recursive nature of the server-side code
the fact I want the client-side to update automatically every time the server-side data structure is updated
The only workaround that comes to my mind is to insert progressively the asynchronous results in a 'flat' Mongo collection and reactively rebuild it as a tree on the client side.

I managed to do this by
counting the number of times an asynchronous computation was started
or finished
resolving the future only when those numbers are equal
relaunching the function every time an asynchronous computation ends
(in case it returned to launch more asynchronous computations or resolve the future)
[line to close list markup or code doesn't format properly]
Future = Meteor.npmRequire('fibers/future')
FS = Meteor.npmRequire('fs')
Path = Meteor.npmRequire('path')
const all_files = []
const future = new Future()
const to_process = [dir]
let started = 0
let ended = 0
const tree = function () {
while (to_process.length) {
let dir = to_process.pop()
started++
FS.readdir (dir, function (error, files) {
if (error) {
if (error.code == 'ENOTDIR') all_files.push(dir)
}
else if (files && files.length)
{
for (let i = 0, leni = files.length; i < leni; i++)
{
let f = Path.resolve(dir, files[i])
to_process.push(f)
}
}
ended++
tree()
})
}
if (!to_process.length && started == ended)
future['return']()
}
tree()
future.wait()
It doesn't have the "progressive update" feeling that you get by updating the database and letting the reactivity manage it since all computations are waiting for that final Future['return']() but the code is simpler and self-contained.

That would be indeed very complicated. First of all, as your tree code runs async you need to either provide a success callback/resolve a promise/return a future or something else, so that you can control when the Meteor method returns. Then you need to use Futures to defer the return of the method util you have your result.
But even then I don't see how the server is supposed to know that something has changed.
The only workaround that comes to my mind is to insert progressively the asynchronous results in a 'flat' Mongo collection and reactively rebuild it as a tree on the client side.
This is actually a workable straightforward solution.

Related

Expressjs main loop blocked during intense operation

I'm having an expressjs server running in which an endpoint init performs some intense operation that has an average completion time of 10 seconds. During these 10 seconds, the main loop is "stuck", making it impossible to send requests to the expressjs server. I've been googling for a while now but found nothing which would enable expressjs to handle requests concurrently. It would seem silly if this is not possible. For any hints or help, I'm very thankful.
Example code:
routes.js
app.route('/v1/cv/random').get(init);
features/init.js
module.exports = async function init(req, res) {
try {
// perform some time consuming operation here
res.status(201).send(someVar);
} catch (err) {
res.status(500).send(`failed to init`).end();
}
};
It is possible to implement algorithms with long running time in a synchronous manner, for example the Tower of Hanoi:
function move(from, to, via, n) {
if (n > 1)
move(from, via, to, n - 1);
to.unshift(from.shift());
if (n > 1)
move(via, to, from, n - 1);
}
app.get("/tower", function(req, res) {
var a = [];
for (var i = 0; i < Number(req.query.n); i++) a.push(i);
var b = [];
var c = [];
move(a, b, c, a.length);
res.end("Done");
});
Invoking GET /tower?n=<N> with large enough <N> will indeed block the main loop of express.
This blocking can be avoided by introducing asynchronousness into the algorithm, for example with setTimeout(nextAlgorithmicStep) commands. This puts the nextAlgorithmicStep function in a queue, but the same queue also has room for functions that process concurrent requests:
function tick(from, to, via, n) {
return new Promise(function(resolve, reject) {
setTimeout(function() {
move(from, to, via, n, resolve);
});
});
}
async function move(from, to, via, n, resolve) {
if (n > 1)
await tick(from, via, to, n - 1);
to.unshift(from.shift());
if (n > 1)
await tick(via, to, from, n - 1);
resolve();
}
app.get("/tower", async function(req, res) {
var a = [];
for (var i = 0; i < Number(req.query.n); i++) a.push(i);
var b = [];
var c = [];
await tick(a, b, c, a.length);
res.end("Done");
});
With this, you can you wait (forever) for the request GET /tower?n=64 to come back, but you can at least still make concurrent requests to the same server. (Using simply Promise or process.nextTick instead of setTimeout is not "asynchronous enough" to allow concurrent requests to be processed in between.)
However, the execution of GET /tower?n=10, which finished "immediately" in the first version, now takes much longer. It would be better to use the setTimeout not on all n levels of recursion, but only on every tenth level or so. You have to find similar good points for asynchronousness in your RSA algorithm.
That's what you can do with a single-threaded Node.js program. But there is an alternative that uses multiple Node.js processes.
app.get("/tower", function(req, res) {
spawn("node", ["tower.js", req.query.n]).stdout.pipe(res);
});
where tower.js is an additional Javascript program:
function move(from, to, via, n) {
if (n > 1)
move(from, via, to, n - 1);
to.unshift(from.shift());
if (n > 1)
move(via, to, from, n - 1);
}
var a = [];
for (var i = 0; i < Number(process.argv[2]); i++) a.push(i);
var b = [];
var c = [];
move(a, b, c, a.length);
process.stdout.write("Done");
I found an answer shortly before #Heiko Theißen updated his answer. It is (I think) a similar approach.
I've found a way to use child_process and with that execute everything that a certain file has by using
const {fork} = require('child_process');
...
module.exports = async function init(req, res) {
try {
const childProcess = fork('./path/to/the/script.js');
childProcess.send({'body': req.body});
childProcess.on('message', (message) => {
res.status(201).json({someVar: message}).end();
});
} catch (err) {
res.status(500).send(`failed to init`).end();
}
};
The script.js looks like
process.on('message', async (message) => {
// perform a time consuming operation here
process.send(someVar);
process.exit();
});

Stop function from being invoked multiple times

I'm in the process of building a file upload component that allows you to pause/resume file uploads.
The standard way to achieve this seems to be to break the file into chunks on the client machine, then send the chunks along with book-keeping information up to the server which can store the chunks into a staging directory, then merge them together when it has received all of the chunks. So, this is what I am doing.
I am using node/express and I'm able to get the files fine, but I'm running into an issue because my merge_chunks function is being invoked multiple times.
Here's my call stack:
router.post('/api/videos',
upload.single('file'),
validate_params,
rename_uploaded_chunk,
check_completion_status,
merge_chunks,
record_upload_date,
videos.update,
send_completion_notice
);
the check_completion_status function is implemented as follows:
/* Recursively check to see if we have every chunk of a file */
var check_completion_status = function (req, res, next) {
var current_chunk = 1;
var see_if_chunks_exist = function () {
fs.exists(get_chunk_file_name(current_chunk, req.file_id), function (exists) {
if (current_chunk > req.total_chunks) {
next();
} else if (exists) {
current_chunk ++;
see_if_chunks_exist();
} else {
res.sendStatus(202);
}
});
};
see_if_chunks_exist();
};
The file names in the staging directory have the chunk numbers embedded in them, so the idea is to see if we have a file for every chunk number. The function should only next() one time for a given (complete) file.
However, my merge_chunks function is being invoked multiple times. (usually between 1 and 4) Logging does reveal that it's only invoked after I've received all of the chunks.
With this in mind, my assumption here is that it's the async nature of the fs.exists function that's causing the issue.
Even though the n'th invocation of check_completion_status may occur before I have all of the chunks, by the time we get to the nth call to fs.exists(), x more chunks may have arrived and been processed concurrently, so the function can keep going and in some cases get to the end and next(). However those chunks that arrived concurrently are also going to correspond to invocations of check_completion_status, which are also going to next() because we obviously have all of the files at this point.
This is causing issues because I didn't account for this when I wrote merge_chunks.
For completeness, here's the merge_chunks function:
var merge_chunks = (function () {
var pipe_chunks = function (args) {
args.chunk_number = args.chunk_number || 1;
if (args.chunk_number > args.total_chunks) {
args.write_stream.end();
args.next();
} else {
var file_name = get_chunk_file_name(args.chunk_number, args.file_id)
var read_stream = fs.createReadStream(file_name);
read_stream.pipe(args.write_stream, {end: false});
read_stream.on('end', function () {
//once we're done with the chunk we can delete it and move on to the next one.
fs.unlink(file_name);
args.chunk_number += 1;
pipe_chunks(args);
});
}
};
return function (req, res, next) {
var out = path.resolve('videos', req.video_id);
var write_stream = fs.createWriteStream(out);
pipe_chunks({
write_stream: write_stream,
file_id: req.file_id,
total_chunks: req.total_chunks,
next: next
});
};
}());
Currently, I'm receiving an error because the second invocation of the function is trying to read the chunks that have already been deleted by the first invocation.
What is the typical pattern for handling this type of situation? I'd like to avoid a stateful architecture if possible. Is it possible to cancel pending handlers right before calling next() in check_completion_status?
If you just want to make it work ASAP, I would use a lock (much like a db lock) to lock the resource so that only one of the requests processes the chunks. Simply create a unique id on the client, and send it along with the chunks. Then just store that unique id in some sort of a data structure, and look that id up prior to processing. The example below is by far not optimal (in fact this map will keep growing, which is bad), but it should demonstrate the concept
// Create a map (an array would work too) and keep track of the video ids that were processed. This map will persist through each request.
var processedVideos = {};
var check_completion_status = function (req, res, next) {
var current_chunk = 1;
var see_if_chunks_exist = function () {
fs.exists(get_chunk_file_name(current_chunk, req.file_id), function (exists) {
if (processedVideos[req.query.uniqueVideoId]){
res.sendStatus(202);
} else if (current_chunk > req.total_chunks) {
processedVideos[req.query.uniqueVideoId] = true;
next();
} else if (exists) {
current_chunk ++;
see_if_chunks_exist();
} else {
res.sendStatus(202);
}
});
};
see_if_chunks_exist();
};

Mongoose promise built in but not working?

Or quite possibly I am doing it wrong, in fact, more than likely I am doing it wrong.
Have a table which contains a "tree" of skill, starting at the root level and may be as deep as ten levels (only two so far), but I want to return it as one big fat JSON structure, so I want to ask the database for each set of data, build my structure then ask for the next level.
Of course if I just send of my requests using mongoose, they will come back at any time, as they are all nice asyncronous calls. Normally a good things.
Looking at the documentation for Mongoose(using 4.1.1) it seems like it has a promise built in, but whenever I try to use it the api call throws a hissy fit and I get a 500 back.
Here is my simple function:
exports.getSkills = function(req,res) {
console.log("Will return tree of all skills");
for (var i = 0; i<10; i++){
var returnData = [];
console.log("Lets get level " + i );
var query = Skill.find({level: i });//The query function
var promise = query.exec; //The promise?
promise.then(function(doc) { //Totally blows up at this point
console.log("Something came back")
return "OK";
});
}
}
The Mongoose documentation on the subject can be found here
http://mongoosejs.com/docs/api.html#promise_Promise
var promise = query.exec;
// =>
var promise = query.exec()
exports.getSkills = function(req,res) {
console.log("Will return tree of all skills");
var p;
for (var i = 0; i < 10; i ++) {
if (i == 0 ) {
p = Skill.find({level:i}).exec();
} else {
p.then(function (){
return Skill.find({level:i}).exec()
})
}
p.then(function (data) {
//deal with your data
})
}
p.then(function () {
// deal with response
})
}

Using Q to recursively generate a chain with multiple subchains

Basically, I'm trying to set up a tree, where each node has a reference to it's parent. I've tried the following:
function insert_node(node_data, parent_id) {
var deferral = Q.defer()
deferral.promise.then(async_create_node(node_data, parent_id))
deferral.promise.then(function(parent_node_id) {
var deferral = Q.defer()
node_data.children.forEach(function(node) {
deferral.promise.then(insert_node(generate_node_data(node), parent_node_id))
}
return deferral.resolve();
}
return deferral.resolve();
}
function insert_all_nodes() {
var deferral = Q.defer();
deferral.promise.then(insert_node(top_node));
deferral.resolve()
}
The issue is that I need it to only create one node at a time, and wait until that node is done being created, and only then move on to the next one. The way it's working now, it will start creating a new node before the first one is finished, which causes problems. I've tried nesting the functions in several ways, and using Q.fcall(). I need the id that async_create_node returns for multiple child nodes, otherwise I'd just use recursion.
The combination of asynchronicity and recursion makes this slightly tricky to get one's mind round.
Firstly, we must be sure that async_create_node() returns a promise, otherwise there's no basis on which to acknowledge its asynchronicity and the whole enterprise would need to be synchronous.
Secondly, let's assume that, at each level (particularly the topmost level), we need to know not only that that level's node has been created but also that the whole tree below it has been created.
Then, you should be able to do something like this :
function insert_node(node_data, parent_id) {
var dfrd = Q.defer();
async_create_node(node_data, parent_id).then(function(id) {
var promises = [];
node_data.children.forEach(function(node) {
promises.push(insert_node(generate_node_data(node), id));
});
Q.all(promises).then(dfrd.resolve);
});
return dfrd.promise;
}
function insert_all_nodes() {
return insert_node(top_node_data, top_node_id);
}
That's all off the top of my head, untested.
EDIT
To create sibling nodes sequentially, you can build a .then() chain in the forEach loop as follows :
function insert_node(node_data, parent_id) {
var dfrd = Q.defer();
async_create_node(node_data, parent_id).then(function(id) {
var p = Q.defer().resolve().promise;//Seed promise on which to build a then() chain.
node_data.children.forEach(function(node) {
p = p.then(function() {
insert_node(generate_node_data(node), id);
});
});
p.then(dfrd.resolve);
});
return dfrd.promise;
}

fs.watch fired twice when I change the watched file

fs.watch( 'example.xml', function ( curr, prev ) {
// on file change we can read the new xml
fs.readFile( 'example.xml','utf8', function ( err, data ) {
if ( err ) throw err;
console.dir(data);
console.log('Done');
});
});
OUTPUT:
some data
Done X 1
some data
Done X 2
It is my usage fault or ..?
The fs.watch api:
is unstable
has known "behaviour" with regards repeated notifications. Specifically, the windows case being a result of windows design, where a single file modification can be multiple calls to the windows API
I make allowance for this by doing the following:
var fsTimeout
fs.watch('file.js', function(e) {
if (!fsTimeout) {
console.log('file.js %s event', e)
fsTimeout = setTimeout(function() { fsTimeout=null }, 5000) // give 5 seconds for multiple events
}
}
I suggest to work with chokidar (https://github.com/paulmillr/chokidar) which is much better than fs.watch:
Commenting its README.md:
Node.js fs.watch:
Doesn't report filenames on OS X.
Doesn't report events at all when using editors like Sublime on OS X.
Often reports events twice.
Emits most changes as rename.
Has a lot of other issues
Does not provide an easy way to recursively watch file trees.
Node.js fs.watchFile:
Almost as bad at event handling.
Also does not provide any recursive watching.
Results in high CPU utilization.
If you need to watch your file for changes then you can check out my small library on-file-change. It checks file sha1 hash between fired change events.
Explanation of why we have multiple fired events:
You may notice in certain situations that a single creation event generates multiple Created events that are handled by your component. For example, if you use a FileSystemWatcher component to monitor the creation of new files in a directory, and then test it by using Notepad to create a file, you may see two Created events generated even though only a single file was created. This is because Notepad performs multiple file system actions during the writing process. Notepad writes to the disk in batches that create the content of the file and then the file attributes. Other applications may perform in the same manner. Because FileSystemWatcher monitors the operating system activities, all events that these applications fire will be picked up.
Source
My custom solution
I personally like using return to prevent a block of code to run when checking something, so, here is my method:
var watching = false;
fs.watch('./file.txt', () => {
if(watching) return;
watching = true;
// do something
// the timeout is to prevent the script to run twice with short functions
// the delay can be longer to disable the function for a set time
setTimeout(() => {
watching = false;
}, 100);
};
Feel free to use this example to simplify your code. It may NOT be better than using a module from others, but it works pretty well!
Similar/same problem. I needed to do some stuff with images when they were added to a directory. Here's how I dealt with the double firing:
var fs = require('fs');
var working = false;
fs.watch('directory', function (event, filename) {
if (filename && event == 'change' && active == false) {
active = true;
//do stuff to the new file added
active = false;
});
It will ignore the second firing until if finishes what it has to do with the new file.
I'm dealing with this issue for the first time, so all of the answers so far are probably better than my solution, however none of them were 100% suitable for my case so I came up with something slightly different – I used a XOR operation to flip an integer between 0 and 1, effectively keeping track of and ignoring every second event on the file:
var targetFile = "./watchThis.txt";
var flippyBit = 0;
fs.watch(targetFile, {persistent: true}, function(event, filename) {
if (event == 'change'){
if (!flippyBit) {
var data = fs.readFile(targetFile, "utf8", function(error, data) {
gotUpdate(data);
})
} else {
console.log("Doing nothing thanks to flippybit.");
}
flipBit(); // call flipBit() function
}
});
// Whatever we want to do when we see a change
function gotUpdate(data) {
console.log("Got some fresh data:");
console.log(data);
}
// Toggling this gives us the "every second update" functionality
function flipBit() {
flippyBit = flippyBit ^ 1;
}
I didn't want to use a time-related function (like jwymanm's answer) because the file I'm watching could hypothetically get legitimate updates very frequently. And I didn't want to use a list of watched files like Erik P suggests, because I'm only watching one file. Jan Święcki's solution seemed like overkill, as I'm working on extremely short and simple files in a low-power environment. Lastly, Bernado's answer made me a little nervous – it would only ignore the second update if it arrived before I'd finished processing the first, and I can't handle that kind of uncertainty. If anyone were to find themselves in this very specific scenario, there might be some merit to the approach I used? If there's anything massively wrong with it please do let me know/edit this answer, but so far it seems to work well?
NOTE: Obviously this strongly assumes that you'll get exactly 2 events per real change. I carefully tested this assumption, obviously, and learned its limitations. So far I've confirmed that:
Modifying a file in Atom editor and saving triggers 2 updates
touch triggers 2 updates
Output redirection via > (overwriting file contents) triggers 2 updates
Appending via >> sometimes triggers 1 update!*
I can think of perfectly good reasons for the differing behaviours but we don't need to know why something is happening to plan for it – I just wanted to stress that you'll want to check for yourself in your own environment and in the context of your own use cases (duh) and not trust a self-confessed idiot on the internet. That being said, with precautions taken I haven't had any weirdness so far.
* Full disclosure, I don't actually know why this is happening, but we're already dealing with unpredictable behaviour with the watch() function so what's a little more uncertainty? For anyone following along at home, more rapid appends to a file seem to cause it to stop double-updating but honestly, I don't really know, and I'm comfortable with the behaviour of this solution in the actual case it'll be used, which is a one-line file that will be updated (contents replaced) like twice per second at the fastest.
first is change and the second is rename
we can make a difference from the listener function
function(event, filename) {
}
The listener callback gets two arguments (event, filename). event is either 'rename' or 'change', and filename is the name of the file which triggered the event.
// rm sourcefile targetfile
fs.watch( sourcefile_dir , function(event, targetfile)){
console.log( targetfile, 'is', event)
}
as a sourcefile is renamed as targetfile, it's will call three event as fact
null is rename // sourcefile not exist again
targetfile is rename
targetfile is change
notice that , if you want catch all these three evnet, watch the dir of sourcefile
I somtimes get multible registrations of the Watch event causing the Watch event to fire several times.
I solved it by keeping a list of watching files and avoid registering the event if the file allready is in the list:
var watchfiles = {};
function initwatch(fn, callback) {
if watchlist[fn] {
watchlist[fn] = true;
fs.watch(fn).on('change', callback);
}
}
......
Like others answers says... This got a lot of troubles, but i can deal with this in this way:
var folder = "/folder/path/";
var active = true; // flag control
fs.watch(folder, function (event, filename) {
if(event === 'rename' && active) { //you can remove this "check" event
active = false;
// ... its just an example
for (var i = 0; i < 100; i++) {
console.log(i);
}
// ... other stuffs and delete the file
if(!active){
try {
fs.unlinkSync(folder + filename);
} catch(err) {
console.log(err);
}
active = true
}
}
});
Hope can i help you...
Easiest solution:
const watch = (path, opt, fn) => {
var lock = false
fs.watch(path, opt, function () {
if (!lock) {
lock = true
fn()
setTimeout(() => lock = false, 1000)
}
})
}
watch('/path', { interval: 500 }, function () {
// ...
})
I was downloading file with puppeteer and once a file saved, I was sending automatic emails. Due to problem above, I noticed, I was sending 2 emails. I solved by stopping my application using process.exit() and auto-start with pm2. Using flags in code didn't saved me.
If anyone has this problem in future, one can use this solution as well. Exit from program and restart with monitor tools automatically.
Here's my simple solution. It works well every time.
// Update obj as file updates
obj = JSON.parse(fs.readFileSync('./file.json', 'utf-8'));
fs.watch('./file.json', () => {
const data = JSON.parse(fs.readFileSync('./file.json', 'utf-8') || '{}');
if(Object.entries(data).length > 0) { // This checks fs.watch() isn't false-firing
obj = data;
console.log('File actually changed: ', obj)
}
});
I came across the same issue. If you don't want to trigger multiple times, you can use a debounce function.
fs.watch( 'example.xml', _.debounce(function ( curr, prev ) {
// on file change we can read the new xml
fs.readFile( 'example.xml','utf8', function ( err, data ) {
if ( err ) throw err;
console.dir(data);
console.log('Done');
});
}, 100));
Debouncing The Observer
A solution I arrived at was that (a) there needs to be a workaround for the problem in question and, (b), there needs to be a solution to ensure multiple rapid Ctrl+s actions do not cause Race Conditions. Here's what I have...
./**/utilities.js (somewhere)
export default {
...
debounce(fn, delay) { // #thxRemySharp https://remysharp.com/2010/07/21/throttling-function-calls/
var timer = null;
return function execute(...args) {
var context = this;
clearTimeout(timer);
timer = setTimeout(fn.bind(context, ...args), delay);
};
},
...
};
./**/file.js (elsewhere)
import utilities from './**/utilities.js'; // somewhere
...
function watch(server) {
const debounced = utilities.debounce(observeFilesystem.bind(this, server), 1000 * 0.25);
const observers = new Set()
.add( fs.watch('./src', debounced) )
.add( fs.watch('./index.html', debounced) )
;
console.log(`watching... (${observers.size})`);
return observers;
}
function observeFilesystem(server, type, filename) {
if (!filename) console.warn(`Tranfer Dev Therver: filesystem observation made without filename for type ${type}`);
console.log(`Filesystem event occurred:`, type, filename);
server.close(handleClose);
}
...
This way, the observation-handler that we pass into fs.watch is [in this case a bound bunction] which gets debounced if multiple calls are made less than 1000 * 0.25 seconds (250ms) apart from one another.
It may be worth noting that I have also devised a pipeline of Promises to help avoid other types of Race Conditions as the code also leverages other callbacks. Please also note the attribution to Remy Sharp whose debounce function has repeatedly proven very useful over the years.
watcher = fs.watch( 'example.xml', function ( curr, prev ) {
watcher.close();
fs.readFile( 'example.xml','utf8', function ( err, data ) {
if ( err ) throw err;
console.dir(data);
console.log('Done');
});
});
I had similar similar problem but I was also reading the file in the callback which caused a loop.
This is where I found how to close watcher:
How to close fs.watch listener for a folder
NodeJS does not fire multiple events for a single change, it is the editor you are using updating the file multiple times.
Editors use stream API for efficiency, they read and write data in chunks which causes multiple updates depending on the chunks size and the amount of content. Here is a snippet to test if fs.watch fires multiple events:
const http = require('http');
const fs = require('fs');
const path = require('path');
const host = 'localhost';
const port = 3000;
const file = path.join(__dirname, 'config.json');
const requestListener = function (req, res) {
const data = new Date().toString();
fs.writeFileSync(file, data, { encoding: 'utf-8' });
res.end(data);
};
const server = http.createServer(requestListener);
server.listen(port, host, () => {
fs.watch(file, (eventType, filename) => {
console.log({ eventType });
});
console.log(`Server is running on http://${host}:${port}`);
});
I believe a simple solution would be checking for the last modified timestamp:
let lastModified;
fs.watch(file, (eventType, filename) => {
stat(file).then(({ mtimeMs }) => {
if (lastModified !== mtimeMs) {
lastModified = mtimeMs;
console.log({ eventType, filename });
}
});
});
Please note that you need to use all-sync or all-async methods otherwise you will have issues:
Update the file in a editor, you will see only single event is logged:
const http = require('http');
const host = 'localhost';
const port = 3000;
const fs = require('fs');
const path = require('path');
const file = path.join(__dirname, 'config.json');
let lastModified;
const requestListener = function (req, res) {
const data = Date.now().toString();
fs.writeFileSync(file, data, { encoding: 'utf-8' });
lastModified = fs.statSync(file).mtimeMs;
res.end(data);
};
const server = http.createServer(requestListener);
server.listen(port, host, () => {
fs.watch(file, (eventType, filename) => {
const mtimeMs = fs.statSync(file).mtimeMs;
if (lastModified !== mtimeMs) {
lastModified = mtimeMs;
console.log({ eventType });
}
});
console.log(`Server is running on http://${host}:${port}`);
});
Few notes on the alternative solutions: Storing files for comparison will be memory inefficient especially if you have large files, taking file hashes will be expensive, custom flags are hard to keep track of, especially if you are going to detect changes made by other applications, and lastly unsubscribing and re-subscribing requires unnecessary juggling.
If you don't need an instant result, you can use setTimout to debounce successive events:
let timeoutId;
fs.watch(file, (eventType, filename) => {
clearTimeout(timeoutId);
timeoutId = setTimeout(() => {
console.log({ eventType });
}, 100);
});

Resources