I am not sure what I am doing wrong.
I have a html content and want to save it as pdf. I use html-pdf (from npm) and a download library http://danml.com/download.html
Actually when I directly save to file or show it as a result I can get the pdf without problem. But I call my webservice method from a js method and I have a stream/buffer as a return value and saving with the 'download' library
Here is my code
pdf.create(html, options).toFile('./mypdf.pdf', function (err, res) {
if (err) return console.log(err);
console.log(res);
});
pdf.create(html,options).toBuffer(function (err, buffer) {
if (err) return reject(err);
return resolve(buffer);
});
//res.setHeader('Content-type', 'application/pdf');
pdf.create(html, options).toStream(function (err, stream) {
if (err) return res.send(err);
//res.type('pdf');
return resolve(stream);// .pipe(res);
});
I can save the content as a pdf it works fine. but when I try to send stream or buffer, somehow the page is empty. I opened the both pdf files with notepad. There are some differences. For example, probless one is 44kb the other one 78 kb. and the empty one contains also the following lines
%PDF-1.4 1 0 obj << /Title (��) /Creator (��) /Producer (�� Q t 5 .
5 . 1) /CreationDate (D:20190524152156)
endobj
I think toBuffer or toStream method has a problem in my case. Because the stream seems not bad. at least I can see that it is a pdf file (no error, just page is empty)
Anyway, here is my API router
let result = await
routerVoucher.CreatePdfStream(req.query.VoucherCode,req.query.AppName);
res.setHeader('Content-type', 'application/pdf');
res.type('pdf');
//result.pipe(res);
res.end(result, 'binary');
and here is my js consumer
$.ajax({
type: "GET",
url: '/api/vouchers/GetLicensePdf',
data:data,
success: function (pdfFile) {
if (!pdfFile)
throw new Error('There is nothing to download');
download(pdfFile,voucherCode + '.pdf', 'application/pdf')
I've solved the problem.
Firstly I converted buffer to base64
const base64 = buffer.toString('base64')
and then converted base64 to blob by using the following code
function base64toBlob (base64Data, contentType) {
contentType = contentType || '';
var sliceSize = 1024;
var byteCharacters = atob(base64Data);
//var byteCharacters = decodeURIComponent(escape(window.atob(base64Data)))
var bytesLength = byteCharacters.length;
var slicesCount = Math.ceil(bytesLength / sliceSize);
var byteArrays = new Array(slicesCount);
for (var sliceIndex = 0; sliceIndex < slicesCount; ++sliceIndex) {
var begin = sliceIndex * sliceSize;
var end = Math.min(begin + sliceSize, bytesLength);
var bytes = new Array(end - begin);
for (var offset = begin, i = 0 ; offset < end; ++i, ++offset) {
bytes[i] = byteCharacters[offset].charCodeAt(0);
}
byteArrays[sliceIndex] = new Uint8Array(bytes);
}
return new Blob(byteArrays, { type: contentType });
}
and then
again I've used my download method (from download.js library) as follow
download(new Blob([base64toBlob(base64PDF,"application/pdf")]),
voucherCode + '.pdf', "application/pdf");
then everything is fine :)
I'm recording the users screen via webrtc, and then posting video blobs every x seconds using MediaStreamRecorder. On the server side I have an action set up in sails which saves the blob as a webm file.
The problem is that I can't get it to append the data, and create one large webm file. When it appends the file size increases like expected, so the data is appending, but when I go to play the file it'll either play the first second, not play at all, or play but not show the video.
It would be possible to merge the files with ffmpeg, but I'd rather avoid this if at all possible.
Here's the code on the client:
'use strict';
// Polyfill in Firefox.
// See https://blog.mozilla.org/webrtc/getdisplaymedia-now-available-in-adapter-js/
if (typeof adapter != 'undefined' && adapter.browserDetails.browser == 'firefox') {
adapter.browserShim.shimGetDisplayMedia(window, 'screen');
}
io.socket.post('/processvideo', function(resData) {
console.log("Response: " + resData);
});
function handleSuccess(stream) {
const video = document.querySelector('video');
video.srcObject = stream;
var mediaRecorder = new MediaStreamRecorder(stream);
mediaRecorder.mimeType = 'video/webm';
mediaRecorder.ondataavailable = function (blob) {
console.log("Sending Data");
//var rawIO = io.socket._raw;
//rawIO.emit('some:event', "using native socket.io");
io.socket.post('/processvideo', {"vidblob": blob}, function(resData) {
console.log("Response: " + resData);
});
};
mediaRecorder.start(3000);
}
function handleError(error) {
errorMsg(`getDisplayMedia error: ${error.name}`, error);
}
function errorMsg(msg, error) {
const errorElement = document.querySelector('#errorMsg');
errorElement.innerHTML += `<p>${msg}</p>`;
if (typeof error !== 'undefined') {
console.error(error);
}
}
if ('getDisplayMedia' in navigator) {
navigator.getDisplayMedia({video: true})
.then(handleSuccess)
.catch(handleError);
} else {
errorMsg('getDisplayMedia is not supported');
}
Code on the server:
module.exports = async function processVideo (req, res) {
var fs = require('fs'),
path = require('path'),
upload_dir = './assets/media/uploads',
output_dir = './assets/media/outputs',
temp_dir = './assets/media/temp';
var params = req.allParams();
if(req.isSocket && req.method === 'POST') {
_upload(params.vidblob, "test.webm");
return res.send("Hi There");
}
else {
return res.send("Unknown Error");
}
function _upload(file_content, file_name) {
var fileRootName = file_name.split('.').shift(),
fileExtension = file_name.split('.').pop(),
filePathBase = upload_dir + '/',
fileRootNameWithBase = filePathBase + fileRootName,
filePath = fileRootNameWithBase + '.' + fileExtension,
fileID = 2;
/* Save all of the files as different files. */
/*
while (fs.existsSync(filePath)) {
filePath = fileRootNameWithBase + fileID + '.' + fileExtension;
fileID += 1;
}
fs.writeFileSync(filePath, file_content);
*/
/* Appends the binary data like you'd expect, but it's not playable. */
fs.appendFileSync(upload_dir + '/' + 'test.file', file_content);
}
}
Any help would be greatly appreciated!
I decided this would be difficult to develop, and wouldn't really fit the projects requirements. So I decided to build an electron app. Just posting this so I can resolve the question.
I'm trying to populate a database with pictures paths & names using Node.js.
What I am trying to do is the following :
- A function send a list of pictures as Base64 string.
- Another function receive this list, loop through it, convert it into picture and get the path back.
I'm pretty new to node.js so I might be doing something really stupid.
Here is the reception code :
app.post('/chatBot/moreinfo/create', function (req, res) {
returnList = '';
res.set('Content-Type', 'application/json');
//IF LIST IS NOT EMPTY
if (req.body.imgList.length !== 0) {
const imagePath = '/var/lib/SMImageBank/';
const regex = /^data:image\/(.*);.*$/i;
const listePicture = req.body.imgList;
// LOOPING INTO THE LIST
req.body.imgList.map ( function (element) {
const file = element;
const filetype = file.match(regex)[1];
var picLink2 = '';
const base64data = file.replace(/^data:image\/.*;base64,/, "");
const latin1data = new Buffer(base64data, 'base64').toString('latin1');
const filename = new Date().getTime() + '' + new Date().getMilliseconds() + "." + filetype;
fs.mkdir(imagePath, () => {
fs.writeFile(imagePath + filename, latin1data, "latin1", function (err, content) {
if (err) {
routerLog(req, {'type': 'error', 'content': err} );
res.sendStatus(500);
}
else {
if (process.env.NODE_ENV === "production")
picLink2 = 'http://****.fr/image/' + filename;
else if(process.env.NODE_ENV === "test")
picLink2 = 'http://dev.****.fr:8010/image/' + filename;
else if(process.env.NODE_ENV === "master")
picLink2 = 'http://dev.****.fr:8008/image/' + filename;
else{
picLink2 = 'http://*****.com:8008/image/' + filename;
}
}
});
})
console.log(picLink2);
returnList = returnList + ";" + picLink2;
});
}
MoreInfo.create(req.body, function (ret) {
res.send(ret);
routerLog(req);
})
});
What I want to do is to be able to access the variable "picLink2" from outside the writeFile & mkdir function so I can populate my returnList at each iteration. Obviously as node.js is asynchronous I can't access to picLink2 content from outside fs.writeFile() function. I know there has been a ton of question about this and lot of the answers are to put the code inside the writeFile()/readFile() function but I don't see how I can do it here since the writeFile() function is inside a map that is iterating into a list.
I'm new to the asynchronous world and I don't see how I can solve this problem.
Use writeFileSync for a synchronous operation if that doesn't hurt performance.
I'm making simple Node.js app and I need to delete first line in file. Please is any way how to do it? I think that It will be possible with fs.write, but how?
Here is streamed version of removing first line from file.
As it uses streams, means you don't need to load whole file in memory, so it is way more efficient and fast, as well can work on very large files without filling memory on your hardware.
var Transform = require('stream').Transform;
var util = require('util');
// Transform sctreamer to remove first line
function RemoveFirstLine(args) {
if (! (this instanceof RemoveFirstLine)) {
return new RemoveFirstLine(args);
}
Transform.call(this, args);
this._buff = '';
this._removed = false;
}
util.inherits(RemoveFirstLine, Transform);
RemoveFirstLine.prototype._transform = function(chunk, encoding, done) {
if (this._removed) { // if already removed
this.push(chunk); // just push through buffer
} else {
// collect string into buffer
this._buff += chunk.toString();
// check if string has newline symbol
if (this._buff.indexOf('\n') !== -1) {
// push to stream skipping first line
this.push(this._buff.slice(this._buff.indexOf('\n') + 2));
// clear string buffer
this._buff = null;
// mark as removed
this._removed = true;
}
}
done();
};
And use it like so:
var fs = require('fs');
var input = fs.createReadStream('test.txt'); // read file
var output = fs.createWriteStream('test_.txt'); // write file
input // take input
.pipe(RemoveFirstLine()) // pipe through line remover
.pipe(output); // save to file
Another way, which is not recommended.
If your files are not large, and you don't mind loading them into memory, load file, remove line, save file, but it is slower and wont work well on large files.
var fs = require('fs');
var filePath = './test.txt'; // path to file
fs.readFile(filePath, function(err, data) { // read file to memory
if (!err) {
data = data.toString(); // stringify buffer
var position = data.toString().indexOf('\n'); // find position of new line element
if (position != -1) { // if new line element found
data = data.substr(position + 1); // subtract string based on first line length
fs.writeFile(filePath, data, function(err) { // write file
if (err) { // if error, report
console.log (err);
}
});
} else {
console.log('no lines found');
}
} else {
console.log(err);
}
});
Here is another way:
const fs = require('fs');
const filePath = './table.csv';
let csvContent = fs.readFileSync(filePath).toString().split('\n'); // read file and convert to array by line break
csvContent.shift(); // remove the the first element from array
csvContent = csvContent.join('\n'); // convert array back to string
fs.writeFileSync(filePath, csvContent);
Thanks to #Lilleman 's comment, I've made an amendment to the original solution, which requires a 3rd-party module "line-by-line" and can prevent memory overflow and racing condition while processing very large file.
const fs = require('fs');
const LineReader = require('line-by-line');
const removeLines = function(srcPath, destPath, count, cb) {
if(count <= 0) {
return cb();
}
var reader = new LineReader(srcPath);
var output = fs.createWriteStream(destPath);
var linesRemoved = 0;
var isFirstLine = true;
reader.on('line', (line) => {
if(linesRemoved < count) {
linesRemoved++;
return;
}
reader.pause();
var newLine;
if(isFirstLine) {
newLine = line;
isFirstLine = false;
} else {
newLine = '\n' + line;
}
output.write(newLine, () => {
reader.resume();
});
})
.on('error', (err) => {
reader.pause();
return cb(err);
})
.on('close', () => {
return cb();
})
}
---------------- original solution below---------------
Inspired by another answer, here is a revised stream version:
const fs = require('fs');
const readline = require('readline');
const removeFirstLine = function(srcPath, destPath, done) {
var rl = readline.createInterface({
input: fs.createReadStream(srcPath)
});
var output = fs.createWriteStream(destPath);
var firstRemoved = false;
rl.on('line', (line) => {
if(!firstRemoved) {
firstRemoved = true;
return;
}
output.write(line + '\n');
}).on('close', () => {
return done();
})
}
and it can be easily modified to remove certain amount of lines, by changing the 'firstRemoved' into a counter:
var linesRemoved = 0;
...
if(linesRemoved < LINES_TO_BE_REMOVED) {
linesRemoved++;
return;
}
...
Here is a naive solution using the Promise-based file system APIs.
const fs = require('node:fs/promises')
const os = require('node:os')
async function removeLines(path, numLinesToRemove) {
const data = await fs.readFile(path, { encoding: 'utf-8' })
const newData = data
.split(os.EOL) // split data into array of strings
.slice(numLinesToRemove) // remove first N lines of array
.join(os.EOL) // join array into a single string
// overwrite original file with new data
return fs.writeFile(path, newData)
}
I need to do some parsing of large (5-10 Gb)logfiles in Javascript/Node.js (I'm using Cube).
The logline looks something like:
10:00:43.343423 I'm a friendly log message. There are 5 cats, and 7 dogs. We are in state "SUCCESS".
We need to read each line, do some parsing (e.g. strip out 5, 7 and SUCCESS), then pump this data into Cube (https://github.com/square/cube) using their JS client.
Firstly, what is the canonical way in Node to read in a file, line by line?
It seems to be fairly common question online:
http://www.quora.com/What-is-the-best-way-to-read-a-file-line-by-line-in-node-js
Read a file one line at a time in node.js?
A lot of the answers seem to point to a bunch of third-party modules:
https://github.com/nickewing/line-reader
https://github.com/jahewson/node-byline
https://github.com/pkrumins/node-lazy
https://github.com/Gagle/Node-BufferedReader
However, this seems like a fairly basic task - surely, there's a simple way within the stdlib to read in a textfile, line-by-line?
Secondly, I then need to process each line (e.g. convert the timestamp into a Date object, and extract useful fields).
What's the best way to do this, maximising throughput? Is there some way that won't block on either reading in each line, or on sending it to Cube?
Thirdly - I'm guessing using string splits, and the JS equivalent of contains (IndexOf != -1?) will be a lot faster than regexes? Has anybody had much experience in parsing massive amounts of text data in Node.js?
I searched for a solution to parse very large files (gbs) line by line using a stream. All the third-party libraries and examples did not suit my needs since they processed the files not line by line (like 1 , 2 , 3 , 4 ..) or read the entire file to memory
The following solution can parse very large files, line by line using stream & pipe. For testing I used a 2.1 gb file with 17.000.000 records. Ram usage did not exceed 60 mb.
First, install the event-stream package:
npm install event-stream
Then:
var fs = require('fs')
, es = require('event-stream');
var lineNr = 0;
var s = fs.createReadStream('very-large-file.csv')
.pipe(es.split())
.pipe(es.mapSync(function(line){
// pause the readstream
s.pause();
lineNr += 1;
// process line here and call s.resume() when rdy
// function below was for logging memory usage
logMemoryUsage(lineNr);
// resume the readstream, possibly from a callback
s.resume();
})
.on('error', function(err){
console.log('Error while reading file.', err);
})
.on('end', function(){
console.log('Read entire file.')
})
);
Please let me know how it goes!
You can use the inbuilt readline package, see docs here. I use stream to create a new output stream.
var fs = require('fs'),
readline = require('readline'),
stream = require('stream');
var instream = fs.createReadStream('/path/to/file');
var outstream = new stream;
outstream.readable = true;
outstream.writable = true;
var rl = readline.createInterface({
input: instream,
output: outstream,
terminal: false
});
rl.on('line', function(line) {
console.log(line);
//Do your stuff ...
//Then write to output stream
rl.write(line);
});
Large files will take some time to process. Do tell if it works.
I really liked #gerard answer which is actually deserves to be the correct answer here. I made some improvements:
Code is in a class (modular)
Parsing is included
Ability to resume is given to the outside in case there is an asynchronous job is chained to reading the CSV like inserting to DB, or a HTTP request
Reading in chunks/batche sizes that
user can declare. I took care of encoding in the stream too, in case
you have files in different encoding.
Here's the code:
'use strict'
const fs = require('fs'),
util = require('util'),
stream = require('stream'),
es = require('event-stream'),
parse = require("csv-parse"),
iconv = require('iconv-lite');
class CSVReader {
constructor(filename, batchSize, columns) {
this.reader = fs.createReadStream(filename).pipe(iconv.decodeStream('utf8'))
this.batchSize = batchSize || 1000
this.lineNumber = 0
this.data = []
this.parseOptions = {delimiter: '\t', columns: true, escape: '/', relax: true}
}
read(callback) {
this.reader
.pipe(es.split())
.pipe(es.mapSync(line => {
++this.lineNumber
parse(line, this.parseOptions, (err, d) => {
this.data.push(d[0])
})
if (this.lineNumber % this.batchSize === 0) {
callback(this.data)
}
})
.on('error', function(){
console.log('Error while reading file.')
})
.on('end', function(){
console.log('Read entirefile.')
}))
}
continue () {
this.data = []
this.reader.resume()
}
}
module.exports = CSVReader
So basically, here is how you will use it:
let reader = CSVReader('path_to_file.csv')
reader.read(() => reader.continue())
I tested this with a 35GB CSV file and it worked for me and that's why I chose to build it on #gerard's answer, feedbacks are welcomed.
I used https://www.npmjs.com/package/line-by-line for reading more than 1 000 000 lines from a text file. In this case, an occupied capacity of RAM was about 50-60 megabyte.
const LineByLineReader = require('line-by-line'),
lr = new LineByLineReader('big_file.txt');
lr.on('error', function (err) {
// 'err' contains error object
});
lr.on('line', function (line) {
// pause emitting of lines...
lr.pause();
// ...do your asynchronous line processing..
setTimeout(function () {
// ...and continue emitting lines.
lr.resume();
}, 100);
});
lr.on('end', function () {
// All lines are read, file is closed now.
});
The Node.js Documentation offers a very elegant example using the Readline module.
Example: Read File Stream Line-by-Line
const { once } = require('node:events');
const fs = require('fs');
const readline = require('readline');
const rl = readline.createInterface({
input: fs.createReadStream('sample.txt'),
crlfDelay: Infinity
});
rl.on('line', (line) => {
console.log(`Line from file: ${line}`);
});
await once(rl, 'close');
Note: we use the crlfDelay option to recognize all instances of CR LF ('\r\n') as a single line break.
Apart from read the big file line by line, you also can read it chunk by chunk. For more refer to this article
var offset = 0;
var chunkSize = 2048;
var chunkBuffer = new Buffer(chunkSize);
var fp = fs.openSync('filepath', 'r');
var bytesRead = 0;
while(bytesRead = fs.readSync(fp, chunkBuffer, 0, chunkSize, offset)) {
offset += bytesRead;
var str = chunkBuffer.slice(0, bytesRead).toString();
var arr = str.split('\n');
if(bytesRead = chunkSize) {
// the last item of the arr may be not a full line, leave it to the next chunk
offset -= arr.pop().length;
}
lines.push(arr);
}
console.log(lines);
I had the same problem yet. After comparing several modules that seem to have this feature, I decided to do it myself, it's simpler than I thought.
gist: https://gist.github.com/deemstone/8279565
var fetchBlock = lineByline(filepath, onEnd);
fetchBlock(function(lines, start){ ... }); //lines{array} start{int} lines[0] No.
It cover the file opened in a closure, that fetchBlock() returned will fetch a block from the file, end split to array (will deal the segment from last fetch).
I've set the block size to 1024 for each read operation. This may have bugs, but code logic is obvious, try it yourself.
Reading / Writing files using stream with the native nodejs modules (fs, readline):
const fs = require('fs');
const readline = require('readline');
const rl = readline.createInterface({
input: fs.createReadStream('input.json'),
output: fs.createWriteStream('output.json')
});
rl.on('line', function(line) {
console.log(line);
// Do any 'line' processing if you want and then write to the output file
this.output.write(`${line}\n`);
});
rl.on('close', function() {
console.log(`Created "${this.output.path}"`);
});
Based on this questions answer I implemented a class you can use to read a file synchronously line-by-line with fs.readSync(). You can make this "pause" and "resume" by using a Q promise (jQuery seems to require a DOM so cant run it with nodejs):
var fs = require('fs');
var Q = require('q');
var lr = new LineReader(filenameToLoad);
lr.open();
var promise;
workOnLine = function () {
var line = lr.readNextLine();
promise = complexLineTransformation(line).then(
function() {console.log('ok');workOnLine();},
function() {console.log('error');}
);
}
workOnLine();
complexLineTransformation = function (line) {
var deferred = Q.defer();
// ... async call goes here, in callback: deferred.resolve('done ok'); or deferred.reject(new Error(error));
return deferred.promise;
}
function LineReader (filename) {
this.moreLinesAvailable = true;
this.fd = undefined;
this.bufferSize = 1024*1024;
this.buffer = new Buffer(this.bufferSize);
this.leftOver = '';
this.read = undefined;
this.idxStart = undefined;
this.idx = undefined;
this.lineNumber = 0;
this._bundleOfLines = [];
this.open = function() {
this.fd = fs.openSync(filename, 'r');
};
this.readNextLine = function () {
if (this._bundleOfLines.length === 0) {
this._readNextBundleOfLines();
}
this.lineNumber++;
var lineToReturn = this._bundleOfLines[0];
this._bundleOfLines.splice(0, 1); // remove first element (pos, howmany)
return lineToReturn;
};
this.getLineNumber = function() {
return this.lineNumber;
};
this._readNextBundleOfLines = function() {
var line = "";
while ((this.read = fs.readSync(this.fd, this.buffer, 0, this.bufferSize, null)) !== 0) { // read next bytes until end of file
this.leftOver += this.buffer.toString('utf8', 0, this.read); // append to leftOver
this.idxStart = 0
while ((this.idx = this.leftOver.indexOf("\n", this.idxStart)) !== -1) { // as long as there is a newline-char in leftOver
line = this.leftOver.substring(this.idxStart, this.idx);
this._bundleOfLines.push(line);
this.idxStart = this.idx + 1;
}
this.leftOver = this.leftOver.substring(this.idxStart);
if (line !== "") {
break;
}
}
};
}
node-byline uses streams, so i would prefer that one for your huge files.
for your date-conversions i would use moment.js.
for maximising your throughput you could think about using a software-cluster. there are some nice-modules which wrap the node-native cluster-module quite well. i like cluster-master from isaacs. e.g. you could create a cluster of x workers which all compute a file.
for benchmarking splits vs regexes use benchmark.js. i havent tested it until now. benchmark.js is available as a node-module
import * as csv from 'fast-csv';
import * as fs from 'fs';
interface Row {
[s: string]: string;
}
type RowCallBack = (data: Row, index: number) => object;
export class CSVReader {
protected file: string;
protected csvOptions = {
delimiter: ',',
headers: true,
ignoreEmpty: true,
trim: true
};
constructor(file: string, csvOptions = {}) {
if (!fs.existsSync(file)) {
throw new Error(`File ${file} not found.`);
}
this.file = file;
this.csvOptions = Object.assign({}, this.csvOptions, csvOptions);
}
public read(callback: RowCallBack): Promise < Array < object >> {
return new Promise < Array < object >> (resolve => {
const readStream = fs.createReadStream(this.file);
const results: Array < any > = [];
let index = 0;
const csvStream = csv.parse(this.csvOptions).on('data', async (data: Row) => {
index++;
results.push(await callback(data, index));
}).on('error', (err: Error) => {
console.error(err.message);
throw err;
}).on('end', () => {
resolve(results);
});
readStream.pipe(csvStream);
});
}
}
import { CSVReader } from '../src/helpers/CSVReader';
(async () => {
const reader = new CSVReader('./database/migrations/csv/users.csv');
const users = await reader.read(async data => {
return {
username: data.username,
name: data.name,
email: data.email,
cellPhone: data.cell_phone,
homePhone: data.home_phone,
roleId: data.role_id,
description: data.description,
state: data.state,
};
});
console.log(users);
})();
I have made a node module to read large file asynchronously text or JSON.
Tested on large files.
var fs = require('fs')
, util = require('util')
, stream = require('stream')
, es = require('event-stream');
module.exports = FileReader;
function FileReader(){
}
FileReader.prototype.read = function(pathToFile, callback){
var returnTxt = '';
var s = fs.createReadStream(pathToFile)
.pipe(es.split())
.pipe(es.mapSync(function(line){
// pause the readstream
s.pause();
//console.log('reading line: '+line);
returnTxt += line;
// resume the readstream, possibly from a callback
s.resume();
})
.on('error', function(){
console.log('Error while reading file.');
})
.on('end', function(){
console.log('Read entire file.');
callback(returnTxt);
})
);
};
FileReader.prototype.readJSON = function(pathToFile, callback){
try{
this.read(pathToFile, function(txt){callback(JSON.parse(txt));});
}
catch(err){
throw new Error('json file is not valid! '+err.stack);
}
};
Just save the file as file-reader.js, and use it like this:
var FileReader = require('./file-reader');
var fileReader = new FileReader();
fileReader.readJSON(__dirname + '/largeFile.json', function(jsonObj){/*callback logic here*/});