I'm trying to concatenate multiple files and save to a new file using node js createWriteStream and createReadStream. I have a small bug that I would like to print out the file name before its contents. However, the file name is always printed on top of the output file. Please see my result output and if you have any ideas why, please help!
Thanks!
test.js
var async = require('async');
var fs = require('fs');
var path = require('path');
var SOURCE_FOLDER = '/tmp/test';
var SOURCE_FILE_PATTERN = /\.json$/
var REPORT_FILE = path.join(SOURCE_FOLDER, 'output.html');
var writeStream = fs.createWriteStream(REPORT_FILE, {
flags: 'w',
encoding: 'UTF-8'
});
var appendReport = function appendReport(file, callback) {
var readStream = fs.createReadStream(file, {
flags: 'r',
encoding: 'UTF-8'
});
readStream.pipe(writeStream, {
end: false
});
readStream.on('end', callback);
};
fs.readdir(SOURCE_FOLDER, function (err, files) {
if (err) {
console.log(err);
} else {
writeStream.write("<html><body><pre>\n");
async.forEach(files, function (file, callback) {
var filePath = path.join(SOURCE_FOLDER, file);
fs.stat(filePath, function (err, stats) {
if (err) {
callback(err);
} else if (stats.isFile() && file.match(SOURCE_FILE_PATTERN)) {
writeStream.write("\n" + filePath);
appendReport(filePath, callback);
} else {
callback();
}
});
}, function (err) {
writeStream.write("\n</pre></body></html>");
});
}
});
My Current Result:
# node test.js; cat /tmp/test/output.html
<html><body><pre>
/tmp/test/a.json
/tmp/test/b.jsoncontent A
content B
</pre></body></html>
My Expected Result:
# node test.js; cat /tmp/test/output.html
<html><body><pre>
/tmp/test/a.json
content A
/tmp/test/b.json
content B
</pre></body></html>
The problem is that async.forEach === async.each and async.each() calls the iterator in parallel. What you want is async.eachSeries() instead.
Related
I am using fs mudule to read .txt file content , but the result always empty . My .txt file do has content in it could any one give me a hand pls ? This is my test code :
var fs = require("fs");
var content = "";
fs.readFile("2.txt", "utf8", function(err, data){
if(err) {
return console.log("fail", err);
}
content = data;
});
console.log(content);
The content is empty in console .
You are writing the result too early. You should log the result in the readFile callback.
var fs = require("fs");
var content = "";
fs.readFile("2.txt", "utf8", function(err, data){
if(err) {
return console.log("fail", err);
}
content = data;
console.log(content);
});
// The console log below will be executed right after the readFile call.
// It won't wait the file to be actually read.
// console.log(content);
Or you can write the same logic like this:
const fs = require('fs');
async function main() {
try {
const content = await fs.promises.readFile('2.txt', 'utf8');
console.log(content);
} catch (ex) {
console.trace(ex);
}
}
main();
This is the code written by me to get all the js files in a directory to be minified:
var http = require('http');
var testFolder = './tests/';
var UglifyJS = require("uglify-js");
var fs = require('fs');
var glob = require("glob");
var fillnam="";
hello();
function hello()
{
glob("gen/*.js", function (er, files) {
//console.log(files);
for(var i=0;i<files.length;i++)
{
fillnam=files[i];
console.log("File Name "+fillnam);
fs.readFile(fillnam, 'utf8', function (err,data)
{
if (err) {
console.log(err);
}
console.log(fillnam+" "+data);
var result = UglifyJS.minify(data);
var gtemp_file=fillnam.replace(".js","");
console.log(gtemp_file);
fs.writeFile(gtemp_file+".min.js", result.code, function(err) {
if(err) {
console.log(err);
} else {
console.log("File was successfully saved.");
}
});
});
}
});
}
http.createServer(function (req, res) {
res.writeHead(200, {'Content-Type': 'text/html'});
res.end('Hello World!');
}).listen(8080);
As a result respective minified js files with same name with .min.js should be formed in the same directory.
But what I am getting is a single file with all files data over written. Like for example if there are two files in a directory a.js and b.js with content:
var a=10;var b=20;
var name="stack";
What I'm getting is single file a.min.js with file content:
var a=10tack;
Please help.
You need to collect all file contents first, concat them and then run UglifyJS.minify on them to be able to save it as a single file.
Something like this (not fully tested)
const testFolder = './tests/';
const UglifyJS = require("uglify-js");
const fs = require('fs');
const readFile = require('util').promisify(fs.readFile);
const glob = require("glob");
function hello() {
glob("gen/*.js", async(er, files) {
let data = [];
for (const file of files) {
const fileData = await readFile(file, {
encoding: 'utf-8'
});
data.push(fileData);
}
const uglified = UglifyJS.minify(data.join('\n'));
fs.writeFile('main.min.js', uglified);
});
}
hello();
with a little help I've built an S3 uploader using Node.JS
It all works great and the files get there, they're set correctly and have the right permissions, but i'm stumped on how to detect whether the process has finished.
const async = require('async');
const AWS = require('aws-sdk');
const mime = require('mime');
const fs = require('fs');
const path = require("path");
require('dotenv').config();
const uploadDirToS3 = function(uploadPath) {
// instantiate aws object for s3
var s3 = new AWS.S3();
// async version
function walk(currentDirPath, callback) {
fs.readdir(currentDirPath, function (err, files) {
if (err) {
throw new Error(err);
}
files.forEach(function (name) {
var filePath = path.join(currentDirPath, name);
var stat = fs.statSync(filePath);
if (stat.isFile()) {
callback(filePath, stat);
} else if (stat.isDirectory()) {
walk(filePath, callback);
}
});
});
}
walk(uploadPath, function(filePath) {
fs.readFile(filePath, function (err, data) {
if (err) { throw err; }
// get content-type (html,jpeg,gif,etc...)
var metaData = mime.getType(filePath)
// set bucket, key (filename), body (file),
// public read-only and content-type
var params = {
Bucket: process.env.AWS_BUCKET,
Key: filePath,
Body: data,
ACL: 'public-read',
ContentType: metaData
};
// upload file to s3
s3.putObject(params, function(err, data) {
if (err) {
console.log(err)
} else {
console.log("Successfully uploaded "+filePath);
}
});
});
})
}
uploadDirToS3("./media/media-1517245218111")
Could it literally be a case of checking wether a callback exists and 'break;' ...ing out of the loop?
Any ideas?
You need to use IterateOver Pattern.
When you find a file to copy, increment a variable and when S3 copy is done, track with another variable that it is copied.
When the totalfind == totalcopied, then initiate the callback from the calling function.
function WaterfallOver(list, iterator, callback) {
var nextItemIndex = 0; //keep track of the index of the next item to be processed
function report() {
nextItemIndex++;
// if nextItemIndex equals the number of items in list, then we're done
if(nextItemIndex === list.length)
callback();
else
// otherwise, call the iterator on the next item
iterator(list[nextItemIndex], report);
}
// instead of starting all the iterations, we only start the 1st one
iterator(list[0], report);
}
Hope it helps.
I have the following code :
var fs = require("fs");
function getMediaList(){
var media_copy_list, line_list;
media_copy_list = [];
fs.readFile("input.csv", function(err, data) {
line_list = data.toString('utf-8').trim().split('\n');
return line_list.forEach(function(file_line) {
var output_path, source_path, split_list;
if (file_line.length) {
split_list = file_line.split(';');
console.log(split_list[0]);
if (split_list.length >= 2) {
source_path = split_list[0].toString('utf-8').trim();
output_path = split_list[1].toString('utf-8').trim();
media_copy_list.push({
source: source_path,
destination: output_path
});
}
}
});
});
}
You can see that that I'm filling a list with :
media_copy_list.push({
source: source_path,
destination: output_path
});
What I'd like to do is to return this list once I have finished reading the input.csv file.
I don't have any issues if I read the file synchrnously( just have to call return media_copy_list). But in this case , I don't know.
I heard about async.parallel but really don't know how to apply.
Example of input.csv :
FirstPart;SecondPart
Test/test2;Whatever/example
Just wrap your code inside a promise and resolve it only once you're done. Some suggest callbacks, which does pretty much the same thing, but this pattern is discouraged, now. You should really use a promise.
var fs = require("fs");
function getMediaList(file){
return new Promise(function (resolve, reject) {
fs.readFile(file, 'utf-8', function(err, data) {
if (err) {
return reject(err);
}
resolve(data.split('\n').reduce(function(media_copy_list, file_line) {
var output_path;
var source_path;
var split_list;
file_line = file_line.trim();
if (file_line.length) {
split_list = file_line.split(';');
console.log(split_list[0]);
if (split_list.length >= 2) {
source_path = split_list[0].toString('utf-8').trim();
output_path = split_list[1].toString('utf-8').trim();
media_copy_list.push({
source: source_path,
destination: output_path
});
}
}
return media_copy_list;
}, []));
});
});
}
Then, invoke with
getMediaList('input.csv').then(function (mediaList) {
// ...
}).catch(function (err) {
console.error(err.stack);
});
Note: bluebird, Q, etc. are quite unnecessary since Node 4.2+. Unless you are using an earlier version of Node, try to avoid them. IMO.
The reason why Promises are encouraged is because Node will implement async/await, which will allow you to call this exact same function like :
var mediaList = await getMediaList('input.csv');
As noted in the comments, you don't want to return the list from the function.. what you should do is include a callback as a parameter to getMediaList and call that callback with your results. I would use async.each for looping through the lines in the file. You can read more about async.each here: https://github.com/caolan/async#each. Here is an example:
var fs = require("fs");
function getMediaList(callback){
var media_copy_list, line_list;
media_copy_list = [];
fs.readFile("input.csv", function(err, data) {
if(err) {
return callback(err);
}
line_list = data.toString('utf-8').trim().split('\n');
async.each(line_list, function(file_line, next) {
var output_path, source_path, split_list;
if (file_line.length) {
split_list = file_line.split(';');
console.log(split_list[0]);
if (split_list.length >= 2) {
source_path = split_list[0].toString('utf-8').trim();
output_path = split_list[1].toString('utf-8').trim();
media_copy_list.push({
source: source_path,
destination: output_path
});
}
}
next(err);
}, function (err) {
callback(err, media_copy_list);
}
});
}
Or you can use promises(bluebird in the case below).
var Promise = require('bluebird'),
fs = require("fs"),
media_copy_list, line_list,
media_copy_list = [];
fs.readFile("input.csv", function(err, data) {
line_list = data.toString('utf-8').trim().split('\n');
Promise.map(line_list, function(file_line) {
var output_path, source_path, split_list;
if (file_line.length) {
split_list = file_line.split(';');
if (split_list.length >= 2) {
source_path = split_list[0].toString('utf-8').trim();
output_path = split_list[1].toString('utf-8').trim();
media_copy_list = {
source: source_path,
destination: output_path
};
}
}
return media_copy_list
}).then(function(values){
console.log(values);
})
});
hi i had tried to unzip the file from my c drive and trying to parse to javascript object
here is the code
var AdmZip = require('adm-zip');
var fs = require('fs'), xml2js = require('xml2js');
var parser = new xml2js.Parser();
var paramdata = 'c:/sample/kusuma.zip';
console.log(paramdata);
var zip = new AdmZip(paramdata);
var zipEntries = zip.getEntries();
var obj = [];
var count = 0;
zipEntries.forEach(function(zipEntry) {
var len = zipEntries.length;
console.log(zipEntry.toString());
console.log(zipEntry.entryName);
fs.readFile("", function(err, data) {
console.log(data);
parser.parseString(data, function(err, result) {
count++;
console.log(count);
obj.push(result);
if (count === len) {
console.log(obj);
res.send(obj);
}
});
});
});
please check the code once and provide me some more examples
Well, fs.readFile() is for reading files that are themselves directly on disk, which these aren't.
However, adm-zip is already reading in the contents of the .zip, so you shouldn't need fs. Each zipEntry has getData() and getDataAsync() methods that can be used to retrieve contents.
zipEntries.forEach(function (zipEntry) {
zipEntry.getDataAsync(function (data) {
parser.parseString(data, function (err, result) {
console.log(result);
});
});
});
Also, as zipEntries is an Array, you can use .filter() to reduce it to only XML files.
var zipEntries = zip.getEntries().filter(function (zipEntry) {
return !zipEntry.isDirectory && /\.xml$/.test(zipEntry.entryName);
});
You'll also want to determine len once from the collection rather than from each entry. You can also test that against use obj.length rather than having to keep count separately:
var len = zipEntries.length;
var obj = [];
zipEntries.forEach(function (zipEntry) {
zipEntry.getDataAsync(function (data) {
parser.parseString(data, function (err, result) {
obj.push(result);
if (obj.length === len) {
res.send(obj);
}
});
});
});