how to read video Frames directly into memory with Nodejs? - node.js

What i am trying to do is taking a video and diving it to frames and passing this frames to a Model to detect objects in each frame but the problem is the extraction process cost so much time and i don't need the frames on my disk.

fmpeg-stream offers stream capabilities. So there is no need to write to a file.
It is also possible to use directly ffmpeg and spawn a new child process. Its .stdout property is a readable stream. On the event data, the chunk can be read.
const fs = require("fs");
const tf = require("#tensorflow/tfjs-node")
const logStream = fs.createWriteStream('./logFile.log');
const spawnProcess = require('child_process').spawn,
ffmpeg = spawnProcess('ffmpeg', [
'-i', 'videfile.mp4',
'-vcodec', 'png',
'-f', 'rawvideo',
'-s', 'h*w', // size of one frame
'pipe:1'
]);
ffmpeg.stderr.pipe(logStream); // for debugging
let i = 0
ffmpeg.stdout.on('data', (data) => {
try {
console.log(tf.node.decodeImage(data).shape)
console.log(`${++i} frames read`)
// dispose all tensors
} catch(e) {
console.log(e)
}
})
ffmpeg.on('close', function (code) {
console.log('child process exited with code ' + code);
});
Decoding the image is in a try catch block to prevent error raised when the chunk does not match a frame.
A more robust code to prevent decoding chunks that do not correspond to images will be the following:
const { Transform } = require("stream")
class ExtractFrames extends Transform {
constructor(delimiter) {
super({ readableObjectMode: true })
this.delimiter = Buffer.from(delimiter, "hex")
this.buffer = Buffer.alloc(0)
}
_transform(data, enc, cb) {
// Add new data to buffer
this.buffer = Buffer.concat([this.buffer, data])
const start = this.buffer.indexOf(this.delimiter)
if (start < 0) return // there's no frame data at all
const end = this.buffer.indexOf(
this.delimiter,
start + this.delimiter.length,
)
if (end < 0) return // we haven't got the whole frame yet
this.push(this.buffer.slice(start, end)) // emit a frame
this.buffer = this.buffer.slice(end) // remove frame data from buffer
if (start > 0) console.error(`Discarded ${start} bytes of invalid data`)
cb()
}
_flush(callback) {
// push remaining buffer to readable stream
callback(null, this.buffer);
}
}
const fs = require("fs");
const tf = require("#tensorflow/tfjs-node")
const logStream = fs.createWriteStream('./logFile.log');
const spawnProcess = require('child_process').spawn,
ffmpeg = spawnProcess('ffmpeg', [
'-i', 'generique.mp4',
'-vcodec', 'mjpeg',
'-f', 'rawvideo',
'-s', '420x360', // size of one frame
'pipe:1'
]);
ffmpeg.stderr.pipe(logStream); // for debugging
let i = 0
ffmpeg.stdout
.pipe(new ExtractFrames("FFD8FF")).on('data', (data) => {
try {
console.log(tf.node.decodeImage(data).shape)
console.log(`${++i} frames read`)
// dispose all tensors
} catch(e) {
console.log(e)
}
})
ffmpeg.on('close', function (code) {
console.log('child process exited with code ' + code);
});
Though, the above code works, it will still fill up quickly the memory. Separating the frame extraction from the data processing itself will help.
async function* frames() {
let resolve;
let promise = new Promise(r => resolve = r);
let bool = true;
ls.stdout.pipe(new ExtractFrames("FFD8FF")).on('data', data => {
resolve(data);
promise = new Promise(r => resolve = r);
});
ls.on('close', function (code) {
bool = false
console.log('code')
});
while (bool) {
const data = await promise;
yield data;
}
}
(async() => {
// data processing
// possibly create tf.dataset for training
for await (const data of stream()) {
console.log(tf.node.decodeImage(data).shape)
console.log(data);
}
})()

Related

NodeJS SpeechRecorder pipe to child process ffmpeg encoder - dest.on is not a function

Trying to make a utility to help me with recording words for something. Unfortunately getting stuck on the basics.
The error I get when I hit s key and talk enough to fill the buffer is
terminate called after throwing an instance of 'Napi::Error'
what(): dest.on is not a function
[1] 2298218 IOT instruction (core dumped) node record.js
Code is below. I can write the wav file and then encode that but I'd rather not have to have an intermediate file unless there is no way around it, and I can't imagine that is the case.
const { spawn } = require('child_process');
const { writeFileSync } = require('fs');
const readline = require('readline');
const { SpeechRecorder } = require('speech-recorder');
const { WaveFile } = require('wavefile');
let paused = true;
const ffmpeg = spawn('ffmpeg', [
// '-f', 's16', // input format
//'-sample_rate', '16000',
'-i', '-', // input source
'-c:a', 'libvorbis', // audio codec
'output.ogg', // output file
'-y'
]);
let buffer = [];
const recorder = new SpeechRecorder({
onAudio: ({ audio, speech }) => {
//if(speech) {
for (let i = 0; i < audio.length; i++) {
buffer.push(audio[i]);
}
if(buffer.length >= 16000 * 5) {
console.log('piping to ffmpeg for output');
let wav = new WaveFile()
wav.fromScratch(1,16000,"16",buffer);
//writeFileSync('output.wav',wav.toBuffer());
ffmpeg.stdin.pipe(buffer, {end: false});
}
//}
}
});
// listen for keypress events
readline.emitKeypressEvents(process.stdin);
process.stdin.setRawMode(true);
process.stdin.on('keypress', (str, key) => {
if (key.name === 's') {
// pause or resume recording
paused = !paused;
if(paused) { recorder.stop(); } else { recorder.start(); }
} else if (key.ctrl && key.name === 'c') {
// exit program
ffmpeg.kill('SIGINT');
process.exit();
}
});

Simple buffering duplex stream in Node - how to?

I'm trying to implement a duplex stream with a buffering capabilities.
It should accumulate chunks of data until there's enough them, and only then send them further.
It can be used, for example, when playing streamed audio/video data: one doesn't simply get frames in time, right?
Below is my silly attempt to create such a buffering duplex stream. There's a source stream, which sends x\n characters to the buffering stream, which in its turn should send data further to process.stdout.
Alas, it doesn't work. Specifically, the read() function doesn't seem to have any ways to pause or to stop, like:
"Hey, I don't have any data for you now, come back later".
Nah, once I return undefined or null, the story ends and nothing comes out to the stdout.
var {Readable, Duplex} = require('stream');
// Source stream, seeds: x\n, x\n, x\n, ...
let c = 10;
var rs = new Readable({
read () {
if (c > 0) {
c--;
console.log('rs reading:', 'x');
this.push('x\n');
}
else {
this.push(null)
}
},
});
// Buffering duplex stream
// I want it to cache 3 items and only then to proceed
const queue = [];
const limit = 3;
var ds = new Duplex({
writableHighWaterMark: 0,
write (chunk, encoding, callback) {
console.log('ds writing:', chunk, 'paused: ', ds.isPaused());
queue.push(chunk);
callback();
},
readableHighWaterMark: 0,
read () {
// We don't want to output anything
// until there's enough elements in the `queue`.
if (queue.length >= limit) {
const chunk = queue.shift();
console.log('ds reading:', chunk);
this.push(chunk);
}
else {
// So how to wait here?
this.push(undefined)
}
},
});
// PROBLEM: nothing is coming out of the "ds" and printed on the stdout
rs.pipe(ds).pipe(process.stdout);
Here is my repl: https://repl.it/#OnkelTem/BufferingStream1-1#index.js
I checked the state of the duplex and it's not even in the paused state. So it's not paused, it's flowing, and yet — returns nothing.
I also spent a couple of hours re-reading documentation on the Node streams, but it doesn't actually feel like it was created for understanding.
A buffering stream is just a type of a transform stream. If I understand what you're trying to do properly, the implementation shouldn't be any more complicated than this:
const { Transform } = require('stream');
const DEFAULT_CAPACITY = 10;
class BufferingTransform extends Transform {
constructor(options = {}) {
super(options);
this.capacity = options.capacity || DEFAULT_CAPACITY ;
this.pending = [] ;
return;
}
get atCapacity() {
return this.pending.length >= this.capacity;
}
_transform(chunk, encoding, cb) {
if ( this.atCapacity ) {
this.push( ...this.pending.shift() );
}
this.pending.push( [chunk, encoding] );
cb();
}
_flush(cb) {
while (this.pending.length > 0) {
this.push( ...this.pending.shift() );
}
cb();
}
}
Once you have that, it should be just a matter of piping your source through the BufferingStream and reading from the BufferingStream`:
async function readFromSource() {
const source = openSourceForReading();
const buffer = new BufferingStream();
source.pipe(buffer);
for await (const chunk of buffer) {
console.log(chunk);
}
}
Here's an implementation using async iterables:
function bufferStream(stream, bufferCount){
stream = normalizeAsyncIterable(stream);
const iterator = stream[Symbol.asyncIterator]();
const queue = []
while(queue.length < bufferCount)
queue.push(iterator.next());
return normalizeAsyncIterable({
[Symbol.asyncIterator]: () => ({
next: async () => {
const promise = queue.shift() ?? iterator.next();
while(queue.length < bufferCount)
queue.push(iterator.next());
return promise;
}
})
});
}
// Ensures that calls to .next() while the generator is paused are handled correctly
async function* normalizeAsyncIterable(iterable){
for await(const value of iterable)
yield value;
}
TS Playground Link

How to stream a huge string fast in node with back-pressure support?

The real use-case is sending a huge string through SSH, but for simplicity, I'll demonstrate it with two processes. See the code below.
When I convert this huge stream to a Readable stream and pipe it to the child process, it sends very small chunks of data, just a few bytes (~80) each and as a result, the transfer rate is extremely slow.
If, as an alternative, I write directly to the child process (stdin.write), it's super fast and each chunk is the correct size - 8KB. However, this method doesn't support back-pressure, so with a huge string and a slow consumer, it gets overwhelmed.
My question is, how do I pipe the string to the child process but with the normal chunk size.
parent.js
const stream = require('stream');
const spawn = require('child_process').spawn;
const child = spawn('node', ['child.js'], {});
const strSize = 1000000;
const hugeStr = [...Array(strSize)].map(i=>(~~(Math.random()*36)).toString(36)).join('');
console.log('PARENT: hugeStr size: ', hugeStr.length);
const readable = stream.Readable.from(hugeStr);
readable.pipe(child.stdin);
// an alternative to the pipe, but with no support for back-pressure
// child.stdin.write(hugeStr);
// child.stdin.end();
child.js
const fs = require('fs');
const debug = (str) => fs.writeFileSync('debug.log', `\n${str}`, { flag: 'a' });
function getDataFromStdin() {
let data = '';
return new Promise((resolve, reject) => {
process.stdin.on('data', (chunk) => {
data += chunk.toString();
const size = chunk.byteLength;
debug(`DATA ${size}B. ${Math.floor(size / 1024)}KB`);
});
process.stdin.on('end', () => {
const size = data.length;
debug(`END TOTAL DATA ${size}B. ${Math.floor(size / 1024)}KB ${Math.floor(size / 1024 / 1024)}MB`);
resolve();
});
process.stderr.on('data', (dataErr) => debug(`child error ${dataErr.toString()}`));
});
}
getDataFromStdin()
.then(() => debug('CHILD: COMPLETED'))
.catch(err => debug(`CHILD ERR: ${err}`))

node speaker Process finished with exit code 132 (interrupted by signal 4: SIGILL)

I'm trying to use node-speaker to speak an Amazon polly stream, once read I'm saving into a file a cache and second time I want to read that file with the speaker.
It works well but the script is exiting with Process finished with exit code 132 (interrupted by signal 4: SIGILL)
Here is my method:
speak: (text, cacheDisabled = false) => {
const hash = path.join(__dirname, 'cache', crypto.createHash('md5').update(text).digest('hex') + '.pcm')
return new Promise((resolve, reject) => {
fs.exists(hash, exist => {
if (exist) {
const audioFile = fs.createReadStream(hash)
audioFile.pipe(this.player)
}
else {
const audioFile = fs.createWriteStream(hash)
const params = {
'Text': text,
'OutputFormat': 'pcm',
'VoiceId': this.config.voiceId
}
this.polly.synthesizeSpeech(params, (err, data) => {
if (err) {
reject(err)
}
else if (data) {
if (data.AudioStream instanceof Buffer) {
// Initiate the source
const bufferStream = new Stream.PassThrough()
// convert AudioStream into a readable stream
bufferStream.end(data.AudioStream)
// save to file
if (!cacheDisabled && !this.cacheDisabled) {
bufferStream.pipe(audioFile)
}
// Pipe into Player
bufferStream.pipe(this.player)
resolve()
}
else {
reject()
}
}
})
}
})
})
}
And my player is basic:
this.player = new Speaker({
channels: 1,
bitDepth: 16,
sampleRate: 16000
})
Any idea how to prevent that ? Very annoying because it crash the project
Fixed by using npm install speaker --mpg123-backend=openal but this module doesn't seems maintain anymore :(

Parsing huge logfiles in Node.js - read in line-by-line

I need to do some parsing of large (5-10 Gb)logfiles in Javascript/Node.js (I'm using Cube).
The logline looks something like:
10:00:43.343423 I'm a friendly log message. There are 5 cats, and 7 dogs. We are in state "SUCCESS".
We need to read each line, do some parsing (e.g. strip out 5, 7 and SUCCESS), then pump this data into Cube (https://github.com/square/cube) using their JS client.
Firstly, what is the canonical way in Node to read in a file, line by line?
It seems to be fairly common question online:
http://www.quora.com/What-is-the-best-way-to-read-a-file-line-by-line-in-node-js
Read a file one line at a time in node.js?
A lot of the answers seem to point to a bunch of third-party modules:
https://github.com/nickewing/line-reader
https://github.com/jahewson/node-byline
https://github.com/pkrumins/node-lazy
https://github.com/Gagle/Node-BufferedReader
However, this seems like a fairly basic task - surely, there's a simple way within the stdlib to read in a textfile, line-by-line?
Secondly, I then need to process each line (e.g. convert the timestamp into a Date object, and extract useful fields).
What's the best way to do this, maximising throughput? Is there some way that won't block on either reading in each line, or on sending it to Cube?
Thirdly - I'm guessing using string splits, and the JS equivalent of contains (IndexOf != -1?) will be a lot faster than regexes? Has anybody had much experience in parsing massive amounts of text data in Node.js?
I searched for a solution to parse very large files (gbs) line by line using a stream. All the third-party libraries and examples did not suit my needs since they processed the files not line by line (like 1 , 2 , 3 , 4 ..) or read the entire file to memory
The following solution can parse very large files, line by line using stream & pipe. For testing I used a 2.1 gb file with 17.000.000 records. Ram usage did not exceed 60 mb.
First, install the event-stream package:
npm install event-stream
Then:
var fs = require('fs')
, es = require('event-stream');
var lineNr = 0;
var s = fs.createReadStream('very-large-file.csv')
.pipe(es.split())
.pipe(es.mapSync(function(line){
// pause the readstream
s.pause();
lineNr += 1;
// process line here and call s.resume() when rdy
// function below was for logging memory usage
logMemoryUsage(lineNr);
// resume the readstream, possibly from a callback
s.resume();
})
.on('error', function(err){
console.log('Error while reading file.', err);
})
.on('end', function(){
console.log('Read entire file.')
})
);
Please let me know how it goes!
You can use the inbuilt readline package, see docs here. I use stream to create a new output stream.
var fs = require('fs'),
readline = require('readline'),
stream = require('stream');
var instream = fs.createReadStream('/path/to/file');
var outstream = new stream;
outstream.readable = true;
outstream.writable = true;
var rl = readline.createInterface({
input: instream,
output: outstream,
terminal: false
});
rl.on('line', function(line) {
console.log(line);
//Do your stuff ...
//Then write to output stream
rl.write(line);
});
Large files will take some time to process. Do tell if it works.
I really liked #gerard answer which is actually deserves to be the correct answer here. I made some improvements:
Code is in a class (modular)
Parsing is included
Ability to resume is given to the outside in case there is an asynchronous job is chained to reading the CSV like inserting to DB, or a HTTP request
Reading in chunks/batche sizes that
user can declare. I took care of encoding in the stream too, in case
you have files in different encoding.
Here's the code:
'use strict'
const fs = require('fs'),
util = require('util'),
stream = require('stream'),
es = require('event-stream'),
parse = require("csv-parse"),
iconv = require('iconv-lite');
class CSVReader {
constructor(filename, batchSize, columns) {
this.reader = fs.createReadStream(filename).pipe(iconv.decodeStream('utf8'))
this.batchSize = batchSize || 1000
this.lineNumber = 0
this.data = []
this.parseOptions = {delimiter: '\t', columns: true, escape: '/', relax: true}
}
read(callback) {
this.reader
.pipe(es.split())
.pipe(es.mapSync(line => {
++this.lineNumber
parse(line, this.parseOptions, (err, d) => {
this.data.push(d[0])
})
if (this.lineNumber % this.batchSize === 0) {
callback(this.data)
}
})
.on('error', function(){
console.log('Error while reading file.')
})
.on('end', function(){
console.log('Read entirefile.')
}))
}
continue () {
this.data = []
this.reader.resume()
}
}
module.exports = CSVReader
So basically, here is how you will use it:
let reader = CSVReader('path_to_file.csv')
reader.read(() => reader.continue())
I tested this with a 35GB CSV file and it worked for me and that's why I chose to build it on #gerard's answer, feedbacks are welcomed.
I used https://www.npmjs.com/package/line-by-line for reading more than 1 000 000 lines from a text file. In this case, an occupied capacity of RAM was about 50-60 megabyte.
const LineByLineReader = require('line-by-line'),
lr = new LineByLineReader('big_file.txt');
lr.on('error', function (err) {
// 'err' contains error object
});
lr.on('line', function (line) {
// pause emitting of lines...
lr.pause();
// ...do your asynchronous line processing..
setTimeout(function () {
// ...and continue emitting lines.
lr.resume();
}, 100);
});
lr.on('end', function () {
// All lines are read, file is closed now.
});
The Node.js Documentation offers a very elegant example using the Readline module.
Example: Read File Stream Line-by-Line
const { once } = require('node:events');
const fs = require('fs');
const readline = require('readline');
const rl = readline.createInterface({
input: fs.createReadStream('sample.txt'),
crlfDelay: Infinity
});
rl.on('line', (line) => {
console.log(`Line from file: ${line}`);
});
await once(rl, 'close');
Note: we use the crlfDelay option to recognize all instances of CR LF ('\r\n') as a single line break.
Apart from read the big file line by line, you also can read it chunk by chunk. For more refer to this article
var offset = 0;
var chunkSize = 2048;
var chunkBuffer = new Buffer(chunkSize);
var fp = fs.openSync('filepath', 'r');
var bytesRead = 0;
while(bytesRead = fs.readSync(fp, chunkBuffer, 0, chunkSize, offset)) {
offset += bytesRead;
var str = chunkBuffer.slice(0, bytesRead).toString();
var arr = str.split('\n');
if(bytesRead = chunkSize) {
// the last item of the arr may be not a full line, leave it to the next chunk
offset -= arr.pop().length;
}
lines.push(arr);
}
console.log(lines);
I had the same problem yet. After comparing several modules that seem to have this feature, I decided to do it myself, it's simpler than I thought.
gist: https://gist.github.com/deemstone/8279565
var fetchBlock = lineByline(filepath, onEnd);
fetchBlock(function(lines, start){ ... }); //lines{array} start{int} lines[0] No.
It cover the file opened in a closure, that fetchBlock() returned will fetch a block from the file, end split to array (will deal the segment from last fetch).
I've set the block size to 1024 for each read operation. This may have bugs, but code logic is obvious, try it yourself.
Reading / Writing files using stream with the native nodejs modules (fs, readline):
const fs = require('fs');
const readline = require('readline');
const rl = readline.createInterface({
input: fs.createReadStream('input.json'),
output: fs.createWriteStream('output.json')
});
rl.on('line', function(line) {
console.log(line);
// Do any 'line' processing if you want and then write to the output file
this.output.write(`${line}\n`);
});
rl.on('close', function() {
console.log(`Created "${this.output.path}"`);
});
Based on this questions answer I implemented a class you can use to read a file synchronously line-by-line with fs.readSync(). You can make this "pause" and "resume" by using a Q promise (jQuery seems to require a DOM so cant run it with nodejs):
var fs = require('fs');
var Q = require('q');
var lr = new LineReader(filenameToLoad);
lr.open();
var promise;
workOnLine = function () {
var line = lr.readNextLine();
promise = complexLineTransformation(line).then(
function() {console.log('ok');workOnLine();},
function() {console.log('error');}
);
}
workOnLine();
complexLineTransformation = function (line) {
var deferred = Q.defer();
// ... async call goes here, in callback: deferred.resolve('done ok'); or deferred.reject(new Error(error));
return deferred.promise;
}
function LineReader (filename) {
this.moreLinesAvailable = true;
this.fd = undefined;
this.bufferSize = 1024*1024;
this.buffer = new Buffer(this.bufferSize);
this.leftOver = '';
this.read = undefined;
this.idxStart = undefined;
this.idx = undefined;
this.lineNumber = 0;
this._bundleOfLines = [];
this.open = function() {
this.fd = fs.openSync(filename, 'r');
};
this.readNextLine = function () {
if (this._bundleOfLines.length === 0) {
this._readNextBundleOfLines();
}
this.lineNumber++;
var lineToReturn = this._bundleOfLines[0];
this._bundleOfLines.splice(0, 1); // remove first element (pos, howmany)
return lineToReturn;
};
this.getLineNumber = function() {
return this.lineNumber;
};
this._readNextBundleOfLines = function() {
var line = "";
while ((this.read = fs.readSync(this.fd, this.buffer, 0, this.bufferSize, null)) !== 0) { // read next bytes until end of file
this.leftOver += this.buffer.toString('utf8', 0, this.read); // append to leftOver
this.idxStart = 0
while ((this.idx = this.leftOver.indexOf("\n", this.idxStart)) !== -1) { // as long as there is a newline-char in leftOver
line = this.leftOver.substring(this.idxStart, this.idx);
this._bundleOfLines.push(line);
this.idxStart = this.idx + 1;
}
this.leftOver = this.leftOver.substring(this.idxStart);
if (line !== "") {
break;
}
}
};
}
node-byline uses streams, so i would prefer that one for your huge files.
for your date-conversions i would use moment.js.
for maximising your throughput you could think about using a software-cluster. there are some nice-modules which wrap the node-native cluster-module quite well. i like cluster-master from isaacs. e.g. you could create a cluster of x workers which all compute a file.
for benchmarking splits vs regexes use benchmark.js. i havent tested it until now. benchmark.js is available as a node-module
import * as csv from 'fast-csv';
import * as fs from 'fs';
interface Row {
[s: string]: string;
}
type RowCallBack = (data: Row, index: number) => object;
export class CSVReader {
protected file: string;
protected csvOptions = {
delimiter: ',',
headers: true,
ignoreEmpty: true,
trim: true
};
constructor(file: string, csvOptions = {}) {
if (!fs.existsSync(file)) {
throw new Error(`File ${file} not found.`);
}
this.file = file;
this.csvOptions = Object.assign({}, this.csvOptions, csvOptions);
}
public read(callback: RowCallBack): Promise < Array < object >> {
return new Promise < Array < object >> (resolve => {
const readStream = fs.createReadStream(this.file);
const results: Array < any > = [];
let index = 0;
const csvStream = csv.parse(this.csvOptions).on('data', async (data: Row) => {
index++;
results.push(await callback(data, index));
}).on('error', (err: Error) => {
console.error(err.message);
throw err;
}).on('end', () => {
resolve(results);
});
readStream.pipe(csvStream);
});
}
}
import { CSVReader } from '../src/helpers/CSVReader';
(async () => {
const reader = new CSVReader('./database/migrations/csv/users.csv');
const users = await reader.read(async data => {
return {
username: data.username,
name: data.name,
email: data.email,
cellPhone: data.cell_phone,
homePhone: data.home_phone,
roleId: data.role_id,
description: data.description,
state: data.state,
};
});
console.log(users);
})();
I have made a node module to read large file asynchronously text or JSON.
Tested on large files.
var fs = require('fs')
, util = require('util')
, stream = require('stream')
, es = require('event-stream');
module.exports = FileReader;
function FileReader(){
}
FileReader.prototype.read = function(pathToFile, callback){
var returnTxt = '';
var s = fs.createReadStream(pathToFile)
.pipe(es.split())
.pipe(es.mapSync(function(line){
// pause the readstream
s.pause();
//console.log('reading line: '+line);
returnTxt += line;
// resume the readstream, possibly from a callback
s.resume();
})
.on('error', function(){
console.log('Error while reading file.');
})
.on('end', function(){
console.log('Read entire file.');
callback(returnTxt);
})
);
};
FileReader.prototype.readJSON = function(pathToFile, callback){
try{
this.read(pathToFile, function(txt){callback(JSON.parse(txt));});
}
catch(err){
throw new Error('json file is not valid! '+err.stack);
}
};
Just save the file as file-reader.js, and use it like this:
var FileReader = require('./file-reader');
var fileReader = new FileReader();
fileReader.readJSON(__dirname + '/largeFile.json', function(jsonObj){/*callback logic here*/});

Resources