I'm trying to implement a duplex stream with a buffering capabilities.
It should accumulate chunks of data until there's enough them, and only then send them further.
It can be used, for example, when playing streamed audio/video data: one doesn't simply get frames in time, right?
Below is my silly attempt to create such a buffering duplex stream. There's a source stream, which sends x\n characters to the buffering stream, which in its turn should send data further to process.stdout.
Alas, it doesn't work. Specifically, the read() function doesn't seem to have any ways to pause or to stop, like:
"Hey, I don't have any data for you now, come back later".
Nah, once I return undefined or null, the story ends and nothing comes out to the stdout.
var {Readable, Duplex} = require('stream');
// Source stream, seeds: x\n, x\n, x\n, ...
let c = 10;
var rs = new Readable({
read () {
if (c > 0) {
c--;
console.log('rs reading:', 'x');
this.push('x\n');
}
else {
this.push(null)
}
},
});
// Buffering duplex stream
// I want it to cache 3 items and only then to proceed
const queue = [];
const limit = 3;
var ds = new Duplex({
writableHighWaterMark: 0,
write (chunk, encoding, callback) {
console.log('ds writing:', chunk, 'paused: ', ds.isPaused());
queue.push(chunk);
callback();
},
readableHighWaterMark: 0,
read () {
// We don't want to output anything
// until there's enough elements in the `queue`.
if (queue.length >= limit) {
const chunk = queue.shift();
console.log('ds reading:', chunk);
this.push(chunk);
}
else {
// So how to wait here?
this.push(undefined)
}
},
});
// PROBLEM: nothing is coming out of the "ds" and printed on the stdout
rs.pipe(ds).pipe(process.stdout);
Here is my repl: https://repl.it/#OnkelTem/BufferingStream1-1#index.js
I checked the state of the duplex and it's not even in the paused state. So it's not paused, it's flowing, and yet — returns nothing.
I also spent a couple of hours re-reading documentation on the Node streams, but it doesn't actually feel like it was created for understanding.
A buffering stream is just a type of a transform stream. If I understand what you're trying to do properly, the implementation shouldn't be any more complicated than this:
const { Transform } = require('stream');
const DEFAULT_CAPACITY = 10;
class BufferingTransform extends Transform {
constructor(options = {}) {
super(options);
this.capacity = options.capacity || DEFAULT_CAPACITY ;
this.pending = [] ;
return;
}
get atCapacity() {
return this.pending.length >= this.capacity;
}
_transform(chunk, encoding, cb) {
if ( this.atCapacity ) {
this.push( ...this.pending.shift() );
}
this.pending.push( [chunk, encoding] );
cb();
}
_flush(cb) {
while (this.pending.length > 0) {
this.push( ...this.pending.shift() );
}
cb();
}
}
Once you have that, it should be just a matter of piping your source through the BufferingStream and reading from the BufferingStream`:
async function readFromSource() {
const source = openSourceForReading();
const buffer = new BufferingStream();
source.pipe(buffer);
for await (const chunk of buffer) {
console.log(chunk);
}
}
Here's an implementation using async iterables:
function bufferStream(stream, bufferCount){
stream = normalizeAsyncIterable(stream);
const iterator = stream[Symbol.asyncIterator]();
const queue = []
while(queue.length < bufferCount)
queue.push(iterator.next());
return normalizeAsyncIterable({
[Symbol.asyncIterator]: () => ({
next: async () => {
const promise = queue.shift() ?? iterator.next();
while(queue.length < bufferCount)
queue.push(iterator.next());
return promise;
}
})
});
}
// Ensures that calls to .next() while the generator is paused are handled correctly
async function* normalizeAsyncIterable(iterable){
for await(const value of iterable)
yield value;
}
TS Playground Link
I'm creating a bot that when it receives a message begins to read a text file and responds to the message with the contents of the file.
Unfortunately, I can't get out of this asynchronous hell and I only get errors, undefined or promise
The last experiment was this:
const fs = require('fs');
const readline = require('readline');
// bot.listen("message").reply(responseText())
function readFile(file) {
var text = '';
var readInterface = readline.createInterface({
input: fs.createReadStream(file),
terminal: false
});
readInterface.on('line', function(line) {
linea = line.trim();
console.log(linea);
text += linea;
}).on('close', function() {
return text;
});
});
}
async function responseText() {
var content = await readFile("file.txt");
content.then(function(data) {
return data;
})
}
What I would like to get then is delay the response until I get the contents of the file.
I know that node is based on async but I can't figure out how to handle it!
Thanks all
If you want to use async-await need to create a promise and return it.
function readFile(file) {
return new Promise((res, rej) => {
try {
var text = '';
var readInterface = readline.createInterface({
input: fs.createReadStream(file),
terminal: false
});
readInterface
.on('line', function (line) {
linea = line.trim();
text += linea;
})
.on('close', function () {
res(text);
});
} catch(err){
rej(err)
}
});
}
If your using express.js or any framework built on top of it, you can simply pipe the readstream to the response since express's responses are streams to begin with:
const es = require('event-stream')
...
let getFileStream = path => (
fs.createReadStream(path)
.pipe(es.split())
.pipe(es.map(function (data, cb) {
cb(null
, inspect(JSON.parse(data)))
}))
);
router.get('/message', function (req, res, next) {
let file$ = getFileStream(yourFilePath)
file$.on('error', next).pipe(res)
})
If you need to transform the file content, you can use a transform stream or as shown in the example above, a synchronous event-stream mapping. The idea is to always play around with the file content at stream level to avoid having to load the entire file content in memory.
You don't really want to buffer the whole file content in memory. It can quickly become a problem with huge files on a busy day. what you need is to pipe the file stream directly to the browser. Same principle applies for any kind of consumer.
Of course, if the mechanism is all internal, you should only pass the file path along or the actual stream until you need to actually open the file and do something with the content. In this case, you go back to your stream toolbox, whether it be the native node.js stream API implementation, the event-stream package or some kind of observable library like rxjs.
I had a similar issue in an app that watches a directory for new files, reads the file(s) and returns derived data based on the file content. My Reader function is based on this async example from the nodejs docs. I return options, which contains the context, only after the file is read completely.
const { createReadStream } = require('fs')
const { createInterface } = require('readline')
const { once } = require('events')
// Reader.js
async function Reader (options) {
let { file, cb } = options
let fileStream = createReadStream(file)
const readInterface = createInterface({
input: fileStream,
crlfDelay: Infinity
})
readInterface.on('line', (line) => {
cb(line)
})
await once(readInterface, 'close')
return options
}
module.exports = Reader
I then have a file which imports my Reader and defines how to use it. I define a callback function to pass to the line event listener. I bind the callback to the options object that I pass to my Reader function. In the
readFile function I make sure to return the call to Reader, which is a Promise.
/**
* #desc callback to instruct what to do with each line read
*
* #param {*} line
*/
const readFileLine = function (line) {
linea = line.trim();
console.log(linea);
text += linea;
this.context += linea
}
/**
* #desc once the added file is ready to be processed read file line by line
* #listens {Event} listens for `process` event
*/
const readFile = (options) => {
return Reader(options)
}
/**
* #desc Call the file reader and do what you need with the reponse
*
*/
const getResponseFromFiles = (file) => {
const opts = {}
opts.cb = readFileLine.bind(opts)
opts.context = ''
opts.file = file
readFile(opts)
.then(data => {
process.exitCode = 0
console.log(data)
return data
})
.catch(err => {
process.exitCode = 1
console.log(err.message)
})
}
I'm trying implement a stream which returns one symbol from file on each 'data' event.
I finished with code bellow:
var util = require('util'),
fs = require('fs'),
Readable = require('stream').Readable;
var util = require('util');
var Readable = require('stream').Readable;
var SymbolReadStream = function(filename, options) {
Readable.call(this);
this._readable = fs.createReadStream(filename, options).pause();
self = this;
this._readable.on('readable', function() {
var chunk;
chunk = self._readable.read(1);
// I believe the problem is here
self._readable.pause();
});
};
util.inherits(SymbolReadStream, Readable); // inherit the prototype methods
SymbolReadStream.prototype._read = function() {
this._readable.resume();
};
var r = new SymbolReadStream("test.txt", {
encoding: 'utf8',
});
r.on('data', function(el) {
console.log(el);
});
but this code doesn't work. Please help.
Is there an easier way to achieve the behavior?
This post give a great clue how to answer your question.
Also, you should take a loop at pipe that would be a cleaner way to accomplish what you're trying to do: piping an adapter to the filestream instead of wrapping it up
That said, personaly I wont reinvent the wheel here, and would just search for modules that can accomplish that. Especially "split" modules, making them split on every char, instead on new lines. As an example, event-stream has a split method that "takes the same arguments as string.split except it defaults to '\n' instead of ','". So the logic would be to try myStream.pipe(es.split('')) but the modules takes this like myStream.pipe(es.split()) which breaks on lines. So here's my solution, using a regex to say "break on each char"
var es = require('event-stream');
var fs = require('fs');
var symbolStream = fs.createReadStream(filename, options).pipe(es.split(/(?!$)/));
EDIT: event-stream seems to use split module internally, so you can even try
var split = require('split');
var fs = require('fs');
var symbolStream = fs.createReadStream(filename, options).pipe(split(/(?!$)/));
(this loose test is responsible of converting '' to \r\n)
In you stream implementation there is no emitting 'data' event to handler. Because of it, console.log are never called. After adding events, they will be streamed symbol by symbol. Example below:
var util = require('util'),
fs = require('fs'),
Readable = require('stream').Readable;
function SymbolReadStream(filename, options) {
if (!(this instanceof SymbolReadStream)) {
return new SymbolReadStream(length, options);
}
Readable.call(this);
this._readable = fs.createReadStream(filename, options);
}
util.inherits(SymbolReadStream, Readable); // inherit the prototype methods
SymbolReadStream.prototype._read = function() {
var self = this;
this._readable.on('readable', function() {
var chunk;
while (null !== (chunk = self._readable.read(1))) {
self.emit('data', chunk);
}
});
this._readable.on('end', function() {
self.emit('end');
});
};
var r = new SymbolReadStream("test.txt", {
encoding: 'utf8',
});
r.on('data', function(el) {
console.log(el);
});
r.on('end', function(el) {
console.log('done');
});
I need to do some parsing of large (5-10 Gb)logfiles in Javascript/Node.js (I'm using Cube).
The logline looks something like:
10:00:43.343423 I'm a friendly log message. There are 5 cats, and 7 dogs. We are in state "SUCCESS".
We need to read each line, do some parsing (e.g. strip out 5, 7 and SUCCESS), then pump this data into Cube (https://github.com/square/cube) using their JS client.
Firstly, what is the canonical way in Node to read in a file, line by line?
It seems to be fairly common question online:
http://www.quora.com/What-is-the-best-way-to-read-a-file-line-by-line-in-node-js
Read a file one line at a time in node.js?
A lot of the answers seem to point to a bunch of third-party modules:
https://github.com/nickewing/line-reader
https://github.com/jahewson/node-byline
https://github.com/pkrumins/node-lazy
https://github.com/Gagle/Node-BufferedReader
However, this seems like a fairly basic task - surely, there's a simple way within the stdlib to read in a textfile, line-by-line?
Secondly, I then need to process each line (e.g. convert the timestamp into a Date object, and extract useful fields).
What's the best way to do this, maximising throughput? Is there some way that won't block on either reading in each line, or on sending it to Cube?
Thirdly - I'm guessing using string splits, and the JS equivalent of contains (IndexOf != -1?) will be a lot faster than regexes? Has anybody had much experience in parsing massive amounts of text data in Node.js?
I searched for a solution to parse very large files (gbs) line by line using a stream. All the third-party libraries and examples did not suit my needs since they processed the files not line by line (like 1 , 2 , 3 , 4 ..) or read the entire file to memory
The following solution can parse very large files, line by line using stream & pipe. For testing I used a 2.1 gb file with 17.000.000 records. Ram usage did not exceed 60 mb.
First, install the event-stream package:
npm install event-stream
Then:
var fs = require('fs')
, es = require('event-stream');
var lineNr = 0;
var s = fs.createReadStream('very-large-file.csv')
.pipe(es.split())
.pipe(es.mapSync(function(line){
// pause the readstream
s.pause();
lineNr += 1;
// process line here and call s.resume() when rdy
// function below was for logging memory usage
logMemoryUsage(lineNr);
// resume the readstream, possibly from a callback
s.resume();
})
.on('error', function(err){
console.log('Error while reading file.', err);
})
.on('end', function(){
console.log('Read entire file.')
})
);
Please let me know how it goes!
You can use the inbuilt readline package, see docs here. I use stream to create a new output stream.
var fs = require('fs'),
readline = require('readline'),
stream = require('stream');
var instream = fs.createReadStream('/path/to/file');
var outstream = new stream;
outstream.readable = true;
outstream.writable = true;
var rl = readline.createInterface({
input: instream,
output: outstream,
terminal: false
});
rl.on('line', function(line) {
console.log(line);
//Do your stuff ...
//Then write to output stream
rl.write(line);
});
Large files will take some time to process. Do tell if it works.
I really liked #gerard answer which is actually deserves to be the correct answer here. I made some improvements:
Code is in a class (modular)
Parsing is included
Ability to resume is given to the outside in case there is an asynchronous job is chained to reading the CSV like inserting to DB, or a HTTP request
Reading in chunks/batche sizes that
user can declare. I took care of encoding in the stream too, in case
you have files in different encoding.
Here's the code:
'use strict'
const fs = require('fs'),
util = require('util'),
stream = require('stream'),
es = require('event-stream'),
parse = require("csv-parse"),
iconv = require('iconv-lite');
class CSVReader {
constructor(filename, batchSize, columns) {
this.reader = fs.createReadStream(filename).pipe(iconv.decodeStream('utf8'))
this.batchSize = batchSize || 1000
this.lineNumber = 0
this.data = []
this.parseOptions = {delimiter: '\t', columns: true, escape: '/', relax: true}
}
read(callback) {
this.reader
.pipe(es.split())
.pipe(es.mapSync(line => {
++this.lineNumber
parse(line, this.parseOptions, (err, d) => {
this.data.push(d[0])
})
if (this.lineNumber % this.batchSize === 0) {
callback(this.data)
}
})
.on('error', function(){
console.log('Error while reading file.')
})
.on('end', function(){
console.log('Read entirefile.')
}))
}
continue () {
this.data = []
this.reader.resume()
}
}
module.exports = CSVReader
So basically, here is how you will use it:
let reader = CSVReader('path_to_file.csv')
reader.read(() => reader.continue())
I tested this with a 35GB CSV file and it worked for me and that's why I chose to build it on #gerard's answer, feedbacks are welcomed.
I used https://www.npmjs.com/package/line-by-line for reading more than 1 000 000 lines from a text file. In this case, an occupied capacity of RAM was about 50-60 megabyte.
const LineByLineReader = require('line-by-line'),
lr = new LineByLineReader('big_file.txt');
lr.on('error', function (err) {
// 'err' contains error object
});
lr.on('line', function (line) {
// pause emitting of lines...
lr.pause();
// ...do your asynchronous line processing..
setTimeout(function () {
// ...and continue emitting lines.
lr.resume();
}, 100);
});
lr.on('end', function () {
// All lines are read, file is closed now.
});
The Node.js Documentation offers a very elegant example using the Readline module.
Example: Read File Stream Line-by-Line
const { once } = require('node:events');
const fs = require('fs');
const readline = require('readline');
const rl = readline.createInterface({
input: fs.createReadStream('sample.txt'),
crlfDelay: Infinity
});
rl.on('line', (line) => {
console.log(`Line from file: ${line}`);
});
await once(rl, 'close');
Note: we use the crlfDelay option to recognize all instances of CR LF ('\r\n') as a single line break.
Apart from read the big file line by line, you also can read it chunk by chunk. For more refer to this article
var offset = 0;
var chunkSize = 2048;
var chunkBuffer = new Buffer(chunkSize);
var fp = fs.openSync('filepath', 'r');
var bytesRead = 0;
while(bytesRead = fs.readSync(fp, chunkBuffer, 0, chunkSize, offset)) {
offset += bytesRead;
var str = chunkBuffer.slice(0, bytesRead).toString();
var arr = str.split('\n');
if(bytesRead = chunkSize) {
// the last item of the arr may be not a full line, leave it to the next chunk
offset -= arr.pop().length;
}
lines.push(arr);
}
console.log(lines);
I had the same problem yet. After comparing several modules that seem to have this feature, I decided to do it myself, it's simpler than I thought.
gist: https://gist.github.com/deemstone/8279565
var fetchBlock = lineByline(filepath, onEnd);
fetchBlock(function(lines, start){ ... }); //lines{array} start{int} lines[0] No.
It cover the file opened in a closure, that fetchBlock() returned will fetch a block from the file, end split to array (will deal the segment from last fetch).
I've set the block size to 1024 for each read operation. This may have bugs, but code logic is obvious, try it yourself.
Reading / Writing files using stream with the native nodejs modules (fs, readline):
const fs = require('fs');
const readline = require('readline');
const rl = readline.createInterface({
input: fs.createReadStream('input.json'),
output: fs.createWriteStream('output.json')
});
rl.on('line', function(line) {
console.log(line);
// Do any 'line' processing if you want and then write to the output file
this.output.write(`${line}\n`);
});
rl.on('close', function() {
console.log(`Created "${this.output.path}"`);
});
Based on this questions answer I implemented a class you can use to read a file synchronously line-by-line with fs.readSync(). You can make this "pause" and "resume" by using a Q promise (jQuery seems to require a DOM so cant run it with nodejs):
var fs = require('fs');
var Q = require('q');
var lr = new LineReader(filenameToLoad);
lr.open();
var promise;
workOnLine = function () {
var line = lr.readNextLine();
promise = complexLineTransformation(line).then(
function() {console.log('ok');workOnLine();},
function() {console.log('error');}
);
}
workOnLine();
complexLineTransformation = function (line) {
var deferred = Q.defer();
// ... async call goes here, in callback: deferred.resolve('done ok'); or deferred.reject(new Error(error));
return deferred.promise;
}
function LineReader (filename) {
this.moreLinesAvailable = true;
this.fd = undefined;
this.bufferSize = 1024*1024;
this.buffer = new Buffer(this.bufferSize);
this.leftOver = '';
this.read = undefined;
this.idxStart = undefined;
this.idx = undefined;
this.lineNumber = 0;
this._bundleOfLines = [];
this.open = function() {
this.fd = fs.openSync(filename, 'r');
};
this.readNextLine = function () {
if (this._bundleOfLines.length === 0) {
this._readNextBundleOfLines();
}
this.lineNumber++;
var lineToReturn = this._bundleOfLines[0];
this._bundleOfLines.splice(0, 1); // remove first element (pos, howmany)
return lineToReturn;
};
this.getLineNumber = function() {
return this.lineNumber;
};
this._readNextBundleOfLines = function() {
var line = "";
while ((this.read = fs.readSync(this.fd, this.buffer, 0, this.bufferSize, null)) !== 0) { // read next bytes until end of file
this.leftOver += this.buffer.toString('utf8', 0, this.read); // append to leftOver
this.idxStart = 0
while ((this.idx = this.leftOver.indexOf("\n", this.idxStart)) !== -1) { // as long as there is a newline-char in leftOver
line = this.leftOver.substring(this.idxStart, this.idx);
this._bundleOfLines.push(line);
this.idxStart = this.idx + 1;
}
this.leftOver = this.leftOver.substring(this.idxStart);
if (line !== "") {
break;
}
}
};
}
node-byline uses streams, so i would prefer that one for your huge files.
for your date-conversions i would use moment.js.
for maximising your throughput you could think about using a software-cluster. there are some nice-modules which wrap the node-native cluster-module quite well. i like cluster-master from isaacs. e.g. you could create a cluster of x workers which all compute a file.
for benchmarking splits vs regexes use benchmark.js. i havent tested it until now. benchmark.js is available as a node-module
import * as csv from 'fast-csv';
import * as fs from 'fs';
interface Row {
[s: string]: string;
}
type RowCallBack = (data: Row, index: number) => object;
export class CSVReader {
protected file: string;
protected csvOptions = {
delimiter: ',',
headers: true,
ignoreEmpty: true,
trim: true
};
constructor(file: string, csvOptions = {}) {
if (!fs.existsSync(file)) {
throw new Error(`File ${file} not found.`);
}
this.file = file;
this.csvOptions = Object.assign({}, this.csvOptions, csvOptions);
}
public read(callback: RowCallBack): Promise < Array < object >> {
return new Promise < Array < object >> (resolve => {
const readStream = fs.createReadStream(this.file);
const results: Array < any > = [];
let index = 0;
const csvStream = csv.parse(this.csvOptions).on('data', async (data: Row) => {
index++;
results.push(await callback(data, index));
}).on('error', (err: Error) => {
console.error(err.message);
throw err;
}).on('end', () => {
resolve(results);
});
readStream.pipe(csvStream);
});
}
}
import { CSVReader } from '../src/helpers/CSVReader';
(async () => {
const reader = new CSVReader('./database/migrations/csv/users.csv');
const users = await reader.read(async data => {
return {
username: data.username,
name: data.name,
email: data.email,
cellPhone: data.cell_phone,
homePhone: data.home_phone,
roleId: data.role_id,
description: data.description,
state: data.state,
};
});
console.log(users);
})();
I have made a node module to read large file asynchronously text or JSON.
Tested on large files.
var fs = require('fs')
, util = require('util')
, stream = require('stream')
, es = require('event-stream');
module.exports = FileReader;
function FileReader(){
}
FileReader.prototype.read = function(pathToFile, callback){
var returnTxt = '';
var s = fs.createReadStream(pathToFile)
.pipe(es.split())
.pipe(es.mapSync(function(line){
// pause the readstream
s.pause();
//console.log('reading line: '+line);
returnTxt += line;
// resume the readstream, possibly from a callback
s.resume();
})
.on('error', function(){
console.log('Error while reading file.');
})
.on('end', function(){
console.log('Read entire file.');
callback(returnTxt);
})
);
};
FileReader.prototype.readJSON = function(pathToFile, callback){
try{
this.read(pathToFile, function(txt){callback(JSON.parse(txt));});
}
catch(err){
throw new Error('json file is not valid! '+err.stack);
}
};
Just save the file as file-reader.js, and use it like this:
var FileReader = require('./file-reader');
var fileReader = new FileReader();
fileReader.readJSON(__dirname + '/largeFile.json', function(jsonObj){/*callback logic here*/});