Im trying to extract the contents of variable topPost and place it into const options under url. I cant seem to get it to work. Im using the snoowrap/Reddit API and image-downloader.
var subReddit = r.getSubreddit('dankmemes');
var topPost = subReddit.getTop({time: 'hour' , limit: 1,}).map(post => post.url).then(console.log);
var postTitle = subReddit.getTop({time: 'hour' , limit: 1 }).map(post => post.title).then(console.log);
const options = {
url: topPost,
dest: './dank_memes/photo.jpg'
}
async function downloadIMG() {
try {
const { filename, image } = await download.image(options)
console.log(filename) // => /path/to/dest/image.jpg
} catch (e) {
console.error(e)
}
}
the recommended formatting for the image downloader is as follows:
const options = {
url: 'http://someurl.com/image.jpg',
dest: '/path/to/dest'
}
async function downloadIMG() {
try {
const { filename, image } = await download.image(options)
console.log(filename) // => /path/to/dest/image.jpg
} catch (e) {
console.error(e)
}
}
downloadIMG()
so it looks like i have to have my url formatted in between ' ' but i have no idea how to get the url from var topPost and place it in between those quotes.
any ideas would be greatly appreciated.
Thanks!
topPost is a Promise, not the final value.
Promises existence is to work with asynchronous data easily. Asynchronous data is data that returns at a point in the future, not instantly, and that's why they have a then method. When a Promise resolves to a value, the then callback is called.
In this case, the library will connect to Reddit and download data from it, which is not something that can done instantly, so the code will continue running and later will call the then callback, when the data has finished downloading. So:
var subReddit = r.getSubreddit('dankmemes');
// First we get the top posts, and register a "then" callback to receive all these posts
subReddit.getTop({time: 'hour' , limit: 1,}).map(post => post.url).then((topPost) => {
// When we got the top posts, we connect again to Reddit to get the top posts title.
subReddit.getTop({time: 'hour' , limit: 1 }).map(post => post.title).then((postTitle) => {
// Here you have both topPost and postTitle (which will be both arrays. You must access the first element)
console.log("This console.log will be called last");
});
});
// The script will continue running at this point, but the script is still connecting to Reddit and downloading the data
console.log("This console.log will be called first");
With this code you have a problem. You first connect to Reddit to get the top post URL, and then you connect to Reddit again to get the post Title. Is like pressing F5 in between. Simply think that if a new post is added between those queries, you will get the wrong title (and also you are consuming double bandwidth consumption, which is not optimal too). The correct way of doing this is to get both the title and the url on the same query. How to do so?, like this:
var subReddit = r.getSubreddit('dankmemes');
// We get the top posts, and map BOTH the url and title
subReddit.getTop({time: 'hour' , limit: 1,}).map(post => {
return {
url: post.url,
title: post.title
};
}).then((topPostUrlAndTitle) => {
// Here you have topPostUrlAndTitle[0].url and topPostUrlAndTitle[0].title
// Note how topPostUrlAndTitle is an array, as you are actually asking for "all top posts" although you are limiting to only one.
});
BUT this is also weird to do. Why don't you just get the post data directly? Like so:
var subReddit = r.getSubreddit('dankmemes');
// We get the top posts
subReddit.getTop({time: 'hour' , limit: 1,}).then((posts) => {
// Here you have posts[0].url and posts[0].title
});
There's a way to get rid of JavaScript callback hell with async/await, but I'm not going to enter into matter because for a newbie is a bit difficult to explain why is not synchronous code although it seems to look like so.
Is there any way to get the file size on recording?
So, I can stop the recording when the it hits the maximal size allowed?
Currently there are only way to stop recording, by stop it using stop button and set the duration using "setRecordingDuration()":
function successCallback(stream) {
// RecordRTC usage goes here
var options = {
mimeType: 'audio/mp3', // or video/webm\;codecs=h264 or video/webm\;codecs=vp9
// audioBitsPerSecond: 128000,
// bufferSize: parseInt($(".media-bitrates").val()),
sampleRate: parseInt($(".media-framerates").val()),
// videoBitsPerSecond: parseInt($(".media-bitrates").val()),
bitsPerSecond: parseInt($(".media-bitrates").val()),
// bitsPerSecond: 128000 // if this line is provided, skip above two
};
varStream = stream;
recordRTC = RecordRTC(varStream, options);
audio.srcObject = varStream;
varStream.stop = function () {
this.getAudioTracks().forEach(function (track) {
track.stop();
});
};
}
From what I see from the coding, I can only get the size on stop recording, because the blob is not available during recording.
I found my own answer. Anyway this question was answered by Muaz Khan in his repiository on issue. Well it is not an issue, so I don't look up on there. Anyway, if someone looking for the answer this might help.
(function looper() {
if (!recorder) {
return;
}
var internal = recorder.getInternalRecorder();
if (internal && internal.getArrayOfBlobs) {
var blob = new Blob(internal.getArrayOfBlobs(), {
type: 'video/webm'
});
// -------------------------------------------------------------
if (blob.size > 100 * 1000) { // if blob is greater than 100 MB
recorder.stopRecording(callback); // stop recording here
return;
}
// -------------------------------------------------------------
document.querySelector('h1').innerHTML = 'Recording length: ' + bytesToSize(blob.size);
}
setTimeout(looper, 1000);
})();
you can find the answer on the link below:
https://github.com/muaz-khan/RecordRTC/issues/301
Well, this site can be down too.. ;)
anyway let's hope not. Let me provide the full the code instead. Explaining is not my best part. Working example are best explanation.
Before recording, MediaStreamRecorder should be as recorder Type.
TimeSlice params should be pass. Not so sure what this do. But I guess, this parameter give the Library an info, that the blob should be slice every 1000 ms. every one second then.
recorder = RecordRTC(camera, {
recorderType: MediaStreamRecorder,
mimeType: 'video/webm',
timeSlice: 1000 // pass this parameter
});
Then put this code after. This loop function run every 1000 ms. and check the current blob size. You can check the size, show the user or stop the recording one it reachs the allowed size. You should check the filesize on the server. You can't rely on the client side script.
(function looper() {
if(!recorder) {
return;
}
var internal = recorder.getInternalRecorder();
if(internal && internal.getArrayOfBlobs) {
var blob = new Blob(internal.getArrayOfBlobs(), {
type: 'video/webm'
});
document.querySelector('h1').innerHTML = 'Recording length: ' + bytesToSize(blob.size);
}
setTimeout(looper, 1000);
})();
and the last part.
recorder.startRecording();
I have an Electron app which uploads a dropped file to a predefined server with node-ftp. The upload works like a charm, but despite reading a couple of suggestions I cannot figure out how to get information on the actual progress for a progress-bar.
My upload-code so far:
var ftp = new Client();
let uploadfile = fs.createReadStream(f.path);
let newname = uuid(); //some function I use for renaming
ftp.on('ready', function () {
ftp.put(uploadfile, newname, function (err) {
if (err) throw err;
ftp.end();
});
});
c.connect({user: 'test', password: 'test'});
I always stumble across monitoring the 'data' event, but could not find out how or where to access it (as you can see I'm quite new to JavaScript).
Got it. I found the answer in streams with percentage complete
With my code changed to
var ftp = new Client();
let uploadfile = fs.createReadStream(f.path);
let newname = uuid(); //some function I use for renaming
ftp.on('ready', function() {
uploadfile.on('data', function(buffer) {
var segmentLength = buffer.length;
uploadedSize += segmentLength;
console.log("Progress:\t" + ((uploadedSize/f.size*100).toFixed(2) + "%"));
});
ftp.put(uploadfile, newname, function(err) {
if (err) throw err;
ftp.end();
});
});
c.connect({user: 'test', password: 'test'});
I get the percentage uploaded in console. From here it's only a small step to a graphical output.
on client side you can create a byte count for your upload stream (http://www.experts.exchange.com/questions/24041115/upload-file-on-ftp-with-progressbar-and-time-left.html)
set lower limit of the progressbar to 0
set upper limit to file length of upload file
feed the progress bar with the byte count
(http://www.stackoverflow.com/questions/24608048/how-do-i-count-bytecount-in-read-method-of-inputstream)
maybe you can use npm like stream-meter (https://www.npmjs.com/package/stream-meter) or progress-stream (https://www.npmjs.com/package/progress-stream) and pipe your file stream through to feed the progressbar. i am not sure about that because i do not know the internals of the npms. in progress-stream is a function transferred() that would fit exactly
a very accurate way is to have code on the server that gives feedback to the browser (http://www.stackoverflow.com/questions/8480240/progress-bar-for-iframe-uploads)
I am building a time-lapse camera web application using Raspberry Pi and the Raspberry Pi Camera Module. So far I have built a web application (using NodeJS, Express, AngularJS, and BootStrap 3) that can interact with the Raspberry Camera Module using an open source NodeJS module (https://www.npmjs.org/package/raspicam).
I have a global variable called "setting" that will always change whenever the user changes the camera settings:
var setting = {
mode: "timelapse",
output: "public/images/image%d.jpg", // image1, image2, image3, etc...
encoding: "jpg",
timelapse: 3000, // take a picture every 3 seconds
timeout: 12000 // take a total of 4 pictures over 12 seconds
}
I have three functions in Express that can:
set Camera settings
exports.setCamera = function(req, res) {
setting = {
mode: req.body.mode,
output: req.body.output,
encoding: req.body.encoding,
timelapse: req.body.timelapse,
timeout: req.body.timeout
}
res.json(setting, 200);
console.log('SET CAMERA - ' + JSON.stringify(setting));
}
start the Camera
exports.startCamera = function(req, res) {
camera = new RaspiCam(setting);
camera.on("start", function( err, timestamp ){
console.log("timelapse started at " + timestamp);
});
camera.on("read", function( err, timestamp, filename ){
console.log("timelapse image captured with filename: " + filename);
});
camera.on("exit", function( timestamp ){
console.log("timelapse child process has exited");
res.json(setting, 200);
});
camera.on("stop", function( err, timestamp ){
console.log("timelapse child process has been stopped at " + timestamp);
});
camera.start();
setTimeout(function(){
camera.stop();
}, setting.timeout + 1000);
console.log('START CAMERA - ' + JSON.stringify(setting));
}
stop the Camera
exports.stopCamera = function(req, res) {
camera.stop();
res.json(setting, 200);
console.log('STOP CAMERA - ' + JSON.stringify(setting));
}
As you can see in the "startCamera" function, I am creating a new RaspiCam object called "camera" that passes in the global variable "setting" (which can always change). When the camera object is created, I am also creating "start", "read", "exist", and "stop" functions for it. The problem is that since I am not setting the camera object as a global variable, when the user decides to click Stop halfway during the session, the "stopCamera" function gets called but it does not know what camera.stop() is and says it is undefined. Is there a way I can allow the "stopCamera" function to know what camera.stop() is (which was created in the "startCamera" function)?
Sorry if this is confusing, I don't know how else to describe my problem.. :(
I think you have a problem with how this is architected, but the simple solution to your question is check to see if the camera object has been initialized.
exports.stopCamera = function(req, res) {
if(camera && typeof(camera.stop) == "function") {
camera.stop();
console.log('STOP CAMERA - ' + JSON.stringify(setting));
}
res.json(setting, 200);
}
fs.watch( 'example.xml', function ( curr, prev ) {
// on file change we can read the new xml
fs.readFile( 'example.xml','utf8', function ( err, data ) {
if ( err ) throw err;
console.dir(data);
console.log('Done');
});
});
OUTPUT:
some data
Done X 1
some data
Done X 2
It is my usage fault or ..?
The fs.watch api:
is unstable
has known "behaviour" with regards repeated notifications. Specifically, the windows case being a result of windows design, where a single file modification can be multiple calls to the windows API
I make allowance for this by doing the following:
var fsTimeout
fs.watch('file.js', function(e) {
if (!fsTimeout) {
console.log('file.js %s event', e)
fsTimeout = setTimeout(function() { fsTimeout=null }, 5000) // give 5 seconds for multiple events
}
}
I suggest to work with chokidar (https://github.com/paulmillr/chokidar) which is much better than fs.watch:
Commenting its README.md:
Node.js fs.watch:
Doesn't report filenames on OS X.
Doesn't report events at all when using editors like Sublime on OS X.
Often reports events twice.
Emits most changes as rename.
Has a lot of other issues
Does not provide an easy way to recursively watch file trees.
Node.js fs.watchFile:
Almost as bad at event handling.
Also does not provide any recursive watching.
Results in high CPU utilization.
If you need to watch your file for changes then you can check out my small library on-file-change. It checks file sha1 hash between fired change events.
Explanation of why we have multiple fired events:
You may notice in certain situations that a single creation event generates multiple Created events that are handled by your component. For example, if you use a FileSystemWatcher component to monitor the creation of new files in a directory, and then test it by using Notepad to create a file, you may see two Created events generated even though only a single file was created. This is because Notepad performs multiple file system actions during the writing process. Notepad writes to the disk in batches that create the content of the file and then the file attributes. Other applications may perform in the same manner. Because FileSystemWatcher monitors the operating system activities, all events that these applications fire will be picked up.
Source
My custom solution
I personally like using return to prevent a block of code to run when checking something, so, here is my method:
var watching = false;
fs.watch('./file.txt', () => {
if(watching) return;
watching = true;
// do something
// the timeout is to prevent the script to run twice with short functions
// the delay can be longer to disable the function for a set time
setTimeout(() => {
watching = false;
}, 100);
};
Feel free to use this example to simplify your code. It may NOT be better than using a module from others, but it works pretty well!
Similar/same problem. I needed to do some stuff with images when they were added to a directory. Here's how I dealt with the double firing:
var fs = require('fs');
var working = false;
fs.watch('directory', function (event, filename) {
if (filename && event == 'change' && active == false) {
active = true;
//do stuff to the new file added
active = false;
});
It will ignore the second firing until if finishes what it has to do with the new file.
I'm dealing with this issue for the first time, so all of the answers so far are probably better than my solution, however none of them were 100% suitable for my case so I came up with something slightly different – I used a XOR operation to flip an integer between 0 and 1, effectively keeping track of and ignoring every second event on the file:
var targetFile = "./watchThis.txt";
var flippyBit = 0;
fs.watch(targetFile, {persistent: true}, function(event, filename) {
if (event == 'change'){
if (!flippyBit) {
var data = fs.readFile(targetFile, "utf8", function(error, data) {
gotUpdate(data);
})
} else {
console.log("Doing nothing thanks to flippybit.");
}
flipBit(); // call flipBit() function
}
});
// Whatever we want to do when we see a change
function gotUpdate(data) {
console.log("Got some fresh data:");
console.log(data);
}
// Toggling this gives us the "every second update" functionality
function flipBit() {
flippyBit = flippyBit ^ 1;
}
I didn't want to use a time-related function (like jwymanm's answer) because the file I'm watching could hypothetically get legitimate updates very frequently. And I didn't want to use a list of watched files like Erik P suggests, because I'm only watching one file. Jan Święcki's solution seemed like overkill, as I'm working on extremely short and simple files in a low-power environment. Lastly, Bernado's answer made me a little nervous – it would only ignore the second update if it arrived before I'd finished processing the first, and I can't handle that kind of uncertainty. If anyone were to find themselves in this very specific scenario, there might be some merit to the approach I used? If there's anything massively wrong with it please do let me know/edit this answer, but so far it seems to work well?
NOTE: Obviously this strongly assumes that you'll get exactly 2 events per real change. I carefully tested this assumption, obviously, and learned its limitations. So far I've confirmed that:
Modifying a file in Atom editor and saving triggers 2 updates
touch triggers 2 updates
Output redirection via > (overwriting file contents) triggers 2 updates
Appending via >> sometimes triggers 1 update!*
I can think of perfectly good reasons for the differing behaviours but we don't need to know why something is happening to plan for it – I just wanted to stress that you'll want to check for yourself in your own environment and in the context of your own use cases (duh) and not trust a self-confessed idiot on the internet. That being said, with precautions taken I haven't had any weirdness so far.
* Full disclosure, I don't actually know why this is happening, but we're already dealing with unpredictable behaviour with the watch() function so what's a little more uncertainty? For anyone following along at home, more rapid appends to a file seem to cause it to stop double-updating but honestly, I don't really know, and I'm comfortable with the behaviour of this solution in the actual case it'll be used, which is a one-line file that will be updated (contents replaced) like twice per second at the fastest.
first is change and the second is rename
we can make a difference from the listener function
function(event, filename) {
}
The listener callback gets two arguments (event, filename). event is either 'rename' or 'change', and filename is the name of the file which triggered the event.
// rm sourcefile targetfile
fs.watch( sourcefile_dir , function(event, targetfile)){
console.log( targetfile, 'is', event)
}
as a sourcefile is renamed as targetfile, it's will call three event as fact
null is rename // sourcefile not exist again
targetfile is rename
targetfile is change
notice that , if you want catch all these three evnet, watch the dir of sourcefile
I somtimes get multible registrations of the Watch event causing the Watch event to fire several times.
I solved it by keeping a list of watching files and avoid registering the event if the file allready is in the list:
var watchfiles = {};
function initwatch(fn, callback) {
if watchlist[fn] {
watchlist[fn] = true;
fs.watch(fn).on('change', callback);
}
}
......
Like others answers says... This got a lot of troubles, but i can deal with this in this way:
var folder = "/folder/path/";
var active = true; // flag control
fs.watch(folder, function (event, filename) {
if(event === 'rename' && active) { //you can remove this "check" event
active = false;
// ... its just an example
for (var i = 0; i < 100; i++) {
console.log(i);
}
// ... other stuffs and delete the file
if(!active){
try {
fs.unlinkSync(folder + filename);
} catch(err) {
console.log(err);
}
active = true
}
}
});
Hope can i help you...
Easiest solution:
const watch = (path, opt, fn) => {
var lock = false
fs.watch(path, opt, function () {
if (!lock) {
lock = true
fn()
setTimeout(() => lock = false, 1000)
}
})
}
watch('/path', { interval: 500 }, function () {
// ...
})
I was downloading file with puppeteer and once a file saved, I was sending automatic emails. Due to problem above, I noticed, I was sending 2 emails. I solved by stopping my application using process.exit() and auto-start with pm2. Using flags in code didn't saved me.
If anyone has this problem in future, one can use this solution as well. Exit from program and restart with monitor tools automatically.
Here's my simple solution. It works well every time.
// Update obj as file updates
obj = JSON.parse(fs.readFileSync('./file.json', 'utf-8'));
fs.watch('./file.json', () => {
const data = JSON.parse(fs.readFileSync('./file.json', 'utf-8') || '{}');
if(Object.entries(data).length > 0) { // This checks fs.watch() isn't false-firing
obj = data;
console.log('File actually changed: ', obj)
}
});
I came across the same issue. If you don't want to trigger multiple times, you can use a debounce function.
fs.watch( 'example.xml', _.debounce(function ( curr, prev ) {
// on file change we can read the new xml
fs.readFile( 'example.xml','utf8', function ( err, data ) {
if ( err ) throw err;
console.dir(data);
console.log('Done');
});
}, 100));
Debouncing The Observer
A solution I arrived at was that (a) there needs to be a workaround for the problem in question and, (b), there needs to be a solution to ensure multiple rapid Ctrl+s actions do not cause Race Conditions. Here's what I have...
./**/utilities.js (somewhere)
export default {
...
debounce(fn, delay) { // #thxRemySharp https://remysharp.com/2010/07/21/throttling-function-calls/
var timer = null;
return function execute(...args) {
var context = this;
clearTimeout(timer);
timer = setTimeout(fn.bind(context, ...args), delay);
};
},
...
};
./**/file.js (elsewhere)
import utilities from './**/utilities.js'; // somewhere
...
function watch(server) {
const debounced = utilities.debounce(observeFilesystem.bind(this, server), 1000 * 0.25);
const observers = new Set()
.add( fs.watch('./src', debounced) )
.add( fs.watch('./index.html', debounced) )
;
console.log(`watching... (${observers.size})`);
return observers;
}
function observeFilesystem(server, type, filename) {
if (!filename) console.warn(`Tranfer Dev Therver: filesystem observation made without filename for type ${type}`);
console.log(`Filesystem event occurred:`, type, filename);
server.close(handleClose);
}
...
This way, the observation-handler that we pass into fs.watch is [in this case a bound bunction] which gets debounced if multiple calls are made less than 1000 * 0.25 seconds (250ms) apart from one another.
It may be worth noting that I have also devised a pipeline of Promises to help avoid other types of Race Conditions as the code also leverages other callbacks. Please also note the attribution to Remy Sharp whose debounce function has repeatedly proven very useful over the years.
watcher = fs.watch( 'example.xml', function ( curr, prev ) {
watcher.close();
fs.readFile( 'example.xml','utf8', function ( err, data ) {
if ( err ) throw err;
console.dir(data);
console.log('Done');
});
});
I had similar similar problem but I was also reading the file in the callback which caused a loop.
This is where I found how to close watcher:
How to close fs.watch listener for a folder
NodeJS does not fire multiple events for a single change, it is the editor you are using updating the file multiple times.
Editors use stream API for efficiency, they read and write data in chunks which causes multiple updates depending on the chunks size and the amount of content. Here is a snippet to test if fs.watch fires multiple events:
const http = require('http');
const fs = require('fs');
const path = require('path');
const host = 'localhost';
const port = 3000;
const file = path.join(__dirname, 'config.json');
const requestListener = function (req, res) {
const data = new Date().toString();
fs.writeFileSync(file, data, { encoding: 'utf-8' });
res.end(data);
};
const server = http.createServer(requestListener);
server.listen(port, host, () => {
fs.watch(file, (eventType, filename) => {
console.log({ eventType });
});
console.log(`Server is running on http://${host}:${port}`);
});
I believe a simple solution would be checking for the last modified timestamp:
let lastModified;
fs.watch(file, (eventType, filename) => {
stat(file).then(({ mtimeMs }) => {
if (lastModified !== mtimeMs) {
lastModified = mtimeMs;
console.log({ eventType, filename });
}
});
});
Please note that you need to use all-sync or all-async methods otherwise you will have issues:
Update the file in a editor, you will see only single event is logged:
const http = require('http');
const host = 'localhost';
const port = 3000;
const fs = require('fs');
const path = require('path');
const file = path.join(__dirname, 'config.json');
let lastModified;
const requestListener = function (req, res) {
const data = Date.now().toString();
fs.writeFileSync(file, data, { encoding: 'utf-8' });
lastModified = fs.statSync(file).mtimeMs;
res.end(data);
};
const server = http.createServer(requestListener);
server.listen(port, host, () => {
fs.watch(file, (eventType, filename) => {
const mtimeMs = fs.statSync(file).mtimeMs;
if (lastModified !== mtimeMs) {
lastModified = mtimeMs;
console.log({ eventType });
}
});
console.log(`Server is running on http://${host}:${port}`);
});
Few notes on the alternative solutions: Storing files for comparison will be memory inefficient especially if you have large files, taking file hashes will be expensive, custom flags are hard to keep track of, especially if you are going to detect changes made by other applications, and lastly unsubscribing and re-subscribing requires unnecessary juggling.
If you don't need an instant result, you can use setTimout to debounce successive events:
let timeoutId;
fs.watch(file, (eventType, filename) => {
clearTimeout(timeoutId);
timeoutId = setTimeout(() => {
console.log({ eventType });
}, 100);
});