Node.js How to delete first line in file - node.js

I'm making simple Node.js app and I need to delete first line in file. Please is any way how to do it? I think that It will be possible with fs.write, but how?

Here is streamed version of removing first line from file.
As it uses streams, means you don't need to load whole file in memory, so it is way more efficient and fast, as well can work on very large files without filling memory on your hardware.
var Transform = require('stream').Transform;
var util = require('util');
// Transform sctreamer to remove first line
function RemoveFirstLine(args) {
if (! (this instanceof RemoveFirstLine)) {
return new RemoveFirstLine(args);
}
Transform.call(this, args);
this._buff = '';
this._removed = false;
}
util.inherits(RemoveFirstLine, Transform);
RemoveFirstLine.prototype._transform = function(chunk, encoding, done) {
if (this._removed) { // if already removed
this.push(chunk); // just push through buffer
} else {
// collect string into buffer
this._buff += chunk.toString();
// check if string has newline symbol
if (this._buff.indexOf('\n') !== -1) {
// push to stream skipping first line
this.push(this._buff.slice(this._buff.indexOf('\n') + 2));
// clear string buffer
this._buff = null;
// mark as removed
this._removed = true;
}
}
done();
};
And use it like so:
var fs = require('fs');
var input = fs.createReadStream('test.txt'); // read file
var output = fs.createWriteStream('test_.txt'); // write file
input // take input
.pipe(RemoveFirstLine()) // pipe through line remover
.pipe(output); // save to file
Another way, which is not recommended.
If your files are not large, and you don't mind loading them into memory, load file, remove line, save file, but it is slower and wont work well on large files.
var fs = require('fs');
var filePath = './test.txt'; // path to file
fs.readFile(filePath, function(err, data) { // read file to memory
if (!err) {
data = data.toString(); // stringify buffer
var position = data.toString().indexOf('\n'); // find position of new line element
if (position != -1) { // if new line element found
data = data.substr(position + 1); // subtract string based on first line length
fs.writeFile(filePath, data, function(err) { // write file
if (err) { // if error, report
console.log (err);
}
});
} else {
console.log('no lines found');
}
} else {
console.log(err);
}
});

Here is another way:
const fs = require('fs');
const filePath = './table.csv';
let csvContent = fs.readFileSync(filePath).toString().split('\n'); // read file and convert to array by line break
csvContent.shift(); // remove the the first element from array
csvContent = csvContent.join('\n'); // convert array back to string
fs.writeFileSync(filePath, csvContent);

Thanks to #Lilleman 's comment, I've made an amendment to the original solution, which requires a 3rd-party module "line-by-line" and can prevent memory overflow and racing condition while processing very large file.
const fs = require('fs');
const LineReader = require('line-by-line');
const removeLines = function(srcPath, destPath, count, cb) {
if(count <= 0) {
return cb();
}
var reader = new LineReader(srcPath);
var output = fs.createWriteStream(destPath);
var linesRemoved = 0;
var isFirstLine = true;
reader.on('line', (line) => {
if(linesRemoved < count) {
linesRemoved++;
return;
}
reader.pause();
var newLine;
if(isFirstLine) {
newLine = line;
isFirstLine = false;
} else {
newLine = '\n' + line;
}
output.write(newLine, () => {
reader.resume();
});
})
.on('error', (err) => {
reader.pause();
return cb(err);
})
.on('close', () => {
return cb();
})
}
---------------- original solution below---------------
Inspired by another answer, here is a revised stream version:
const fs = require('fs');
const readline = require('readline');
const removeFirstLine = function(srcPath, destPath, done) {
var rl = readline.createInterface({
input: fs.createReadStream(srcPath)
});
var output = fs.createWriteStream(destPath);
var firstRemoved = false;
rl.on('line', (line) => {
if(!firstRemoved) {
firstRemoved = true;
return;
}
output.write(line + '\n');
}).on('close', () => {
return done();
})
}
and it can be easily modified to remove certain amount of lines, by changing the 'firstRemoved' into a counter:
var linesRemoved = 0;
...
if(linesRemoved < LINES_TO_BE_REMOVED) {
linesRemoved++;
return;
}
...

Here is a naive solution using the Promise-based file system APIs.
const fs = require('node:fs/promises')
const os = require('node:os')
async function removeLines(path, numLinesToRemove) {
const data = await fs.readFile(path, { encoding: 'utf-8' })
const newData = data
.split(os.EOL) // split data into array of strings
.slice(numLinesToRemove) // remove first N lines of array
.join(os.EOL) // join array into a single string
// overwrite original file with new data
return fs.writeFile(path, newData)
}

Related

How to read remove some lines from json file using node.js

I want to extract some comment lines of data from json file and after making those into certain format i need to remove those. I am explaining my json file below.
test.json:
#PATH:/test/
#DEVICES:div1
#TYPE:p1
{
name:'Raj',
address: {
city:'bbsr'
}
}
The above file is one test json file. Here I need to read the lines which includes # at beginning and then make them separate object like const obj ={PATH:'/test/',DEVICES:'div1',TYPE:p1} and finally remove those 3 line from original test.json file using node.js
Following snippet can be used to solve this problem. It can handle any number of similar comments.
const fs = require('fs')
const readline = require('readline');
const filepath = '/Users/.../nodeText.json'; // file to read from
const writePath = '/Users/.../nodeFileResult.json' // file to write to
const inputStream = fs.createReadStream(filepath)
const startLine = 'const obj = {\n';
let content = '';
const readPromise = new Promise(function(resolve, reject) {
const lineReader = readline.createInterface({
input: inputStream
});
inputStream.on('end', () => {
resolve('end');
});
lineReader.on('line', (line) => {
let resultText = '';
const replaceDict = {
'#': '"',
':': '": "',
'\n': '",\n'
};
const regexString = /#|:|\n/;
const re = new RegExp(regexString, 'gim');
if(line.startsWith('#')) {
resultText = (line+`\n`).replace(re, (match) => { // \n to make sure its line end and for regex to work
return replaceDict[match];
});
} else if(line.startsWith('{')) {
resultText = line.replace('{', `}, \n {`);
} else {
resultText = line
}
content = content + resultText;
});
});
readPromise
.then((resolve) => {
if(resolve === 'end') {
content = startLine + content
fs.writeFile(writePath,
content,
error => {
if (error) {
console.error(error)
return
}
})
}
})

readline doesn't stop line reading after rl.close() emit in nodejs

I have the following file I want to read line by line and stop reading it once I have found "nameserver 8.8.8.8".
nameserver 8.8.8.8
nameserver 45.65.85.3
nameserver 40.98.3.3
I am using nodejs and the readline module to do so
const readline = require('readline');
const fs = require('fs');
function check_resolv_nameserver(){
// flag indicates whether namerserver_line was found or not
var nameserver_flag = false;
const rl = readline.createInterface({
input: fs.createReadStream('file_to_read.conf')
});
rl.on('line', (line) => {
console.log(`Line from file: ${line}`);
if (line === 'nameserver 8.8.8.8'){
console.log('Found the right file. Reading lines should stop here.');
nameserver_flag = true;
rl.close();
}
});
rl.on('close', function(){
if (nameserver_flag === true){
console.log('Found nameserver 8.8.8.8');
}
else {
console.log('Could not find nameserver 8.8.8.8');
}
});
}
check_resolv_nameserver();
Since I emit a close event with rl.close() as soon as I read the first match, I would expect my Code to read only the first line and then stop reading further. But instead my output looks like this
Line from file: nameserver 8.8.8.8
Found the right file. Reading lines should stop here.
Found nameserver 8.8.8.8
Line from file: nameserver 45.65.85.3
Line from file: nameserver 40.98.3.3
How can I make readline stop after first match and let me proceed with a something else?
for those of you who can't make the linereader stop, do this (in your readline callback):
lineReader.close()
lineReader.removeAllListeners()
It appears readline buffers some lines, so you'll have to add your own check.
Example:
#! /usr/bin/node
const fs = require('fs')
const readline = require('readline')
const reader = readline.createInterface({
input: fs.createReadStream('test.js')
})
let wasRead = false
reader.on('line', line => {
if (wasRead) return undefined
console.log('hello world')
wasRead = true
reader.close()
})
You should close the stream as well:
const readline = require('readline');
const fs = require('fs');
const readStream = fs.createReadStream('file_to_read.conf');
// More code here ...
const rl = readline.createInterface({
input: readStream
});
// Rest of your code
rl.close();
readStream.destroy();
I Searched for a long time I didn't get the chance to get this working... SO I managed to get what I want thanks to a node module : line-reader
It's good as it can read from file but also from buffer.
Here is a simple code sample where you can read 2 lines then stop.
const lineReader = require('line-reader');
const stream = require('stream');
let bufferStream = new stream.PassThrough();
bufferStream.end(yourBuffer);
let lineNumber = 0;
lineReader.eachLine(bufferStream, function(line) {
lineNumber++;
if (lineNumber === 1 || lineNumber === 2) {
// Perform whatever
} else {
// returning false breaks the reading
return false;
}
}, async function finished (err) {
if (err) {
// throw error or whatever
}
// Do after reading processing here
});
EDIT: I Found a clean way to achieve everything exactly as planned :
1st create a splitter to read string chunks
class Splitter extends Transform {
constructor(options){
super(options);
this.splitSize = options.splitSize;
this.buffer = Buffer.alloc(0);
this.continueThis = true;
}
stopIt() {
this.continueThis = false;
}
_transform(chunk, encoding, cb){
this.buffer = Buffer.concat([this.buffer, chunk]);
while ((this.buffer.length > this.splitSize || this.buffer.length === 1) && this.continueThis){
try {
let chunk = this.buffer.slice(0, this.splitSize);
this.push(chunk);
this.buffer = this.buffer.slice(this.splitSize);
if (this.buffer[0] === 26){
console.log('EOF : ' + this.buffer[0]);
}
} catch (err) {
console.log('ERR OCCURED => ', err);
break;
}
}
console.log('WHILE FINISHED');
cb();
}
}
Then pipe it to your stream :
let bufferStream = new stream.PassThrough();
bufferStream.end(hugeBuffer);
let splitter = new Splitter({splitSize : 170}); // In my case I have 170 length lines, so I want to process them line by line
let lineNr = 0;
bufferStream
.pipe(splitter)
.on('data', async function(line){
line = line.toString().trim();
splitter.pause(); // pause stream so you can perform long time processing with await
lineNr++;
if (lineNr === 1){
// DO stuff with 1st line
} else {
splitter.stopIt(); // Break the stream and stop reading so we just read 1st line
}
splitter.resume() // resumestream so you can process next chunk
}).on('error', function(err){
console.log('Error while reading file.' + err);
// whatever
}).on('end', async function(){
console.log('end event');
// Stream has ended, do whatever...
});
This code enables perfect read streams, line by line. No need to use the Splitter if the whole file is not so long
What works for me was add on pause a resume, it allow me to edit the file after read.
var lineReader = require('readline').createInterface({
input: require('fs').createReadStream(require('path').resolve('test.js'))
});
lineReader.on('line', function (line) { console.log(line) }) /*loop all lines*/
.on('pause', function () {
/* resume after read lines is finished to close file */
lineReader.resume();
})
.on('close', function () {
/*action after file read is close*/
console.log('Close ok')
});
You can declare a line event listener and remove it when needed.
const lineEventListener = (line) => {
// do sth
// Close
rl.close();
rl.removeListener('line', lineEventListener);
}
rl.on('line', lineEventListener);

Read csv files in stream and store them in database

I have a few huge csv files, what I need to store in a mongo database. Because these files are too big, I need to use stream. I pause the stream while the data writing into the database.
var fs = require('fs');
var csv = require('csv');
var mongo = require('mongodb');
var db = mongo.MongoClient.connect...
var readStream = fs.createReadStream('hugefile.csv');
readStream.on('data', function(data) {
readStream.pause();
csv.parse(data.toString(), { delimiter: ','}, function(err, output) {
db.collection(coll).insert(data, function(err) {
readStream.resume();
});
});
});
readStream.on('end', function() {
logger.info('file stored');
});
But the csv.parse drop an error, because I would need to read the files line by line to handle them as csv, and convert to json for the mongodb. Maybe I should not pause them, but use an interface. I didn't find any solution for this yet.
Any help would be appreciated!
I think you might want to create a stream of lines from your raw data stream.
Here is an example from the split package. https://www.npmjs.com/package/split
fs.createReadStream(file)
.pipe(split())
.on('data', function (line) {
//each chunk now is a seperate line!
})
Adapted to your example it might look like this
var readStream = fs.createReadStream('hugefile.csv');
var lineStream = readStream.pipe(split());
lineStream.on('data', function(data) {
//remaining code unmodified
I'm unsure if bulk() was a thing back in '15, but whosoever is trying to import items from large sources should consider using them.
var fs = require('fs');
var csv = require('fast-csv');
var mongoose = require('mongoose');
var db = mongoose.connect...
var counter = 0; // to keep count of values in the bulk()
const BULK_SIZE = 1000;
var bulkItem = Item.collection.initializeUnorderedBulkOp();
var readStream = fs.createReadStream('hugefile.csv');
const csvStream = csv.fromStream(readStream, { headers: true });
csvStream.on('data', data => {
counter++;
bulkOrder.insert(order);
if (counter === BATCH_SIZE) {
csvStream.pause();
bulkOrder.execute((err, result) => {
if (err) console.log(err);
counter = 0;
bulkItem = Item.collection.initializeUnorderedBulkOp();
csvStream.resume();
});
}
}
});

How to read from stdin line by line in Node

I'm looking to process a text file with node using a command line call like:
node app.js < input.txt
Each line of the file needs to be processed individually, but once processed the input line can be forgotten.
Using the on-data listener of the stdin, I get the input steam chunked by a byte size so I set this up.
process.stdin.resume();
process.stdin.setEncoding('utf8');
var lingeringLine = "";
process.stdin.on('data', function(chunk) {
lines = chunk.split("\n");
lines[0] = lingeringLine + lines[0];
lingeringLine = lines.pop();
lines.forEach(processLine);
});
process.stdin.on('end', function() {
processLine(lingeringLine);
});
But this seems so sloppy. Having to massage around the first and last items of the lines array. Is there not a more elegant way to do this?
You can use the readline module to read from stdin line by line:
const readline = require('readline');
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
terminal: false
});
rl.on('line', (line) => {
console.log(line);
});
rl.once('close', () => {
// end of input
});
// Work on POSIX and Windows
var fs = require("fs");
var stdinBuffer = fs.readFileSync(0); // STDIN_FILENO = 0
console.log(stdinBuffer.toString());
readline is specifically designed to work with terminal (that is process.stdin.isTTY === true). There are a lot of modules which provide split functionality for generic streams, like split. It makes things super-easy:
process.stdin.pipe(require('split')()).on('data', processLine)
function processLine (line) {
console.log(line + '!')
}
#!/usr/bin/env node
const EventEmitter = require('events');
function stdinLineByLine() {
const stdin = new EventEmitter();
let buff = '';
process.stdin
.on('data', data => {
buff += data;
lines = buff.split(/\r\n|\n/);
buff = lines.pop();
lines.forEach(line => stdin.emit('line', line));
})
.on('end', () => {
if (buff.length > 0) stdin.emit('line', buff);
});
return stdin;
}
const stdin = stdinLineByLine();
stdin.on('line', console.log);
read stream line by line,should be good for large files piped into stdin, my version:
var n=0;
function on_line(line,cb)
{
////one each line
console.log(n++,"line ",line);
return cb();
////end of one each line
}
var fs = require('fs');
var readStream = fs.createReadStream('all_titles.txt');
//var readStream = process.stdin;
readStream.pause();
readStream.setEncoding('utf8');
var buffer=[];
readStream.on('data', (chunk) => {
const newlines=/[\r\n]+/;
var lines=chunk.split(newlines)
if(lines.length==1)
{
buffer.push(lines[0]);
return;
}
buffer.push(lines[0]);
var str=buffer.join('');
buffer.length=0;
readStream.pause();
on_line(str,()=>{
var i=1,l=lines.length-1;
i--;
function while_next()
{
i++;
if(i<l)
{
return on_line(lines[i],while_next);
}
else
{
buffer.push(lines.pop());
lines.length=0;
return readStream.resume();
}
}
while_next();
});
}).on('end', ()=>{
if(buffer.length)
var str=buffer.join('');
buffer.length=0;
on_line(str,()=>{
////after end
console.error('done')
////end after end
});
});
readStream.resume();
Explanation:
to cut it correctly on utf8 letter and not in middle byte set encoding to utf8 it ensures it emits each time full multibyte letter.
When data is received the input is paused. It is used to block the input until all lines are used up. It prevents overflowing the buffet if the lines processing function is slower than input.
If there is every time a line without newlines each time. need to accommulate it for all calls and do nothing, return . once there are more than one line also append it and use the accommulated buffer.
after all the splitted lines were consumed. On the last line push the last line to buffer and resume paused stream.
es6 code
var n=0;
async function on_line(line)
{
////one each line
console.log(n++,"line ",line);
////end of one each line
}
var fs = require('fs');
var readStream = fs.createReadStream('all_titles.txt');
//var readStream = process.stdin;
readStream.pause();
readStream.setEncoding('utf8');
var buffer=[];
readStream.on('data', async (chunk) => {
const newlines=/[\r\n]+/;
var lines=chunk.split(newlines)
if(lines.length==1)
{
buffer.push(lines[0]);
return;
}
readStream.pause();
// let i=0;
buffer.push(lines[0]); // take first line
var str=buffer.join('');
buffer.length=0;//clear array, because consumed
await on_line(str);
for(let i=1;i<lines.length-1;i++)
await on_line(lines[i]);
buffer.push(lines[lines.length-1]);
lines.length=0; //optional, clear array to hint GC.
return readStream.resume();
}).on('end', async ()=>{
if(buffer.length)
var str=buffer.join('');
buffer.length=0;
await on_line(str);
});
readStream.resume();
I did not test the es6 code
In my case the program (elinks) returned lines that looked empty, but in fact had special terminal characters, color control codes and backspace, so grep options presented in other answers did not work for me. So I wrote this small script in Node.js. I called the file tight, but that's just a random name.
#!/usr/bin/env node
function visible(a) {
var R = ''
for (var i = 0; i < a.length; i++) {
if (a[i] == '\b') { R -= 1; continue; }
if (a[i] == '\u001b') {
while (a[i] != 'm' && i < a.length) i++
if (a[i] == undefined) break
}
else R += a[i]
}
return R
}
function empty(a) {
a = visible(a)
for (var i = 0; i < a.length; i++) {
if (a[i] != ' ') return false
}
return true
}
var readline = require('readline')
var rl = readline.createInterface({ input: process.stdin, output: process.stdout, terminal: false })
rl.on('line', function(line) {
if (!empty(line)) console.log(line)
})
if you want to ask the user number of lines first:
//array to save line by line
let xInputs = [];
const getInput = async (resolve)=>{
const readline = require('readline').createInterface({
input: process.stdin,
output: process.stdout,
});
readline.on('line',(line)=>{
readline.close();
xInputs.push(line);
resolve(line);
})
}
const getMultiInput = (numberOfInputLines,callback)=>{
let i = 0;
let p = Promise.resolve();
for (; i < numberOfInputLines; i++) {
p = p.then(_ => new Promise(resolve => getInput(resolve)));
}
p.then(()=>{
callback();
});
}
//get number of lines
const readline = require('readline').createInterface({
input: process.stdin,
output: process.stdout,
terminal: false
});
readline.on('line',(line)=>{
getMultiInput(line,()=>{
//get here the inputs from xinputs array
});
readline.close();
})
process.stdin.pipe(process.stdout);

Parsing huge logfiles in Node.js - read in line-by-line

I need to do some parsing of large (5-10 Gb)logfiles in Javascript/Node.js (I'm using Cube).
The logline looks something like:
10:00:43.343423 I'm a friendly log message. There are 5 cats, and 7 dogs. We are in state "SUCCESS".
We need to read each line, do some parsing (e.g. strip out 5, 7 and SUCCESS), then pump this data into Cube (https://github.com/square/cube) using their JS client.
Firstly, what is the canonical way in Node to read in a file, line by line?
It seems to be fairly common question online:
http://www.quora.com/What-is-the-best-way-to-read-a-file-line-by-line-in-node-js
Read a file one line at a time in node.js?
A lot of the answers seem to point to a bunch of third-party modules:
https://github.com/nickewing/line-reader
https://github.com/jahewson/node-byline
https://github.com/pkrumins/node-lazy
https://github.com/Gagle/Node-BufferedReader
However, this seems like a fairly basic task - surely, there's a simple way within the stdlib to read in a textfile, line-by-line?
Secondly, I then need to process each line (e.g. convert the timestamp into a Date object, and extract useful fields).
What's the best way to do this, maximising throughput? Is there some way that won't block on either reading in each line, or on sending it to Cube?
Thirdly - I'm guessing using string splits, and the JS equivalent of contains (IndexOf != -1?) will be a lot faster than regexes? Has anybody had much experience in parsing massive amounts of text data in Node.js?
I searched for a solution to parse very large files (gbs) line by line using a stream. All the third-party libraries and examples did not suit my needs since they processed the files not line by line (like 1 , 2 , 3 , 4 ..) or read the entire file to memory
The following solution can parse very large files, line by line using stream & pipe. For testing I used a 2.1 gb file with 17.000.000 records. Ram usage did not exceed 60 mb.
First, install the event-stream package:
npm install event-stream
Then:
var fs = require('fs')
, es = require('event-stream');
var lineNr = 0;
var s = fs.createReadStream('very-large-file.csv')
.pipe(es.split())
.pipe(es.mapSync(function(line){
// pause the readstream
s.pause();
lineNr += 1;
// process line here and call s.resume() when rdy
// function below was for logging memory usage
logMemoryUsage(lineNr);
// resume the readstream, possibly from a callback
s.resume();
})
.on('error', function(err){
console.log('Error while reading file.', err);
})
.on('end', function(){
console.log('Read entire file.')
})
);
Please let me know how it goes!
You can use the inbuilt readline package, see docs here. I use stream to create a new output stream.
var fs = require('fs'),
readline = require('readline'),
stream = require('stream');
var instream = fs.createReadStream('/path/to/file');
var outstream = new stream;
outstream.readable = true;
outstream.writable = true;
var rl = readline.createInterface({
input: instream,
output: outstream,
terminal: false
});
rl.on('line', function(line) {
console.log(line);
//Do your stuff ...
//Then write to output stream
rl.write(line);
});
Large files will take some time to process. Do tell if it works.
I really liked #gerard answer which is actually deserves to be the correct answer here. I made some improvements:
Code is in a class (modular)
Parsing is included
Ability to resume is given to the outside in case there is an asynchronous job is chained to reading the CSV like inserting to DB, or a HTTP request
Reading in chunks/batche sizes that
user can declare. I took care of encoding in the stream too, in case
you have files in different encoding.
Here's the code:
'use strict'
const fs = require('fs'),
util = require('util'),
stream = require('stream'),
es = require('event-stream'),
parse = require("csv-parse"),
iconv = require('iconv-lite');
class CSVReader {
constructor(filename, batchSize, columns) {
this.reader = fs.createReadStream(filename).pipe(iconv.decodeStream('utf8'))
this.batchSize = batchSize || 1000
this.lineNumber = 0
this.data = []
this.parseOptions = {delimiter: '\t', columns: true, escape: '/', relax: true}
}
read(callback) {
this.reader
.pipe(es.split())
.pipe(es.mapSync(line => {
++this.lineNumber
parse(line, this.parseOptions, (err, d) => {
this.data.push(d[0])
})
if (this.lineNumber % this.batchSize === 0) {
callback(this.data)
}
})
.on('error', function(){
console.log('Error while reading file.')
})
.on('end', function(){
console.log('Read entirefile.')
}))
}
continue () {
this.data = []
this.reader.resume()
}
}
module.exports = CSVReader
So basically, here is how you will use it:
let reader = CSVReader('path_to_file.csv')
reader.read(() => reader.continue())
I tested this with a 35GB CSV file and it worked for me and that's why I chose to build it on #gerard's answer, feedbacks are welcomed.
I used https://www.npmjs.com/package/line-by-line for reading more than 1 000 000 lines from a text file. In this case, an occupied capacity of RAM was about 50-60 megabyte.
const LineByLineReader = require('line-by-line'),
lr = new LineByLineReader('big_file.txt');
lr.on('error', function (err) {
// 'err' contains error object
});
lr.on('line', function (line) {
// pause emitting of lines...
lr.pause();
// ...do your asynchronous line processing..
setTimeout(function () {
// ...and continue emitting lines.
lr.resume();
}, 100);
});
lr.on('end', function () {
// All lines are read, file is closed now.
});
The Node.js Documentation offers a very elegant example using the Readline module.
Example: Read File Stream Line-by-Line
const { once } = require('node:events');
const fs = require('fs');
const readline = require('readline');
const rl = readline.createInterface({
input: fs.createReadStream('sample.txt'),
crlfDelay: Infinity
});
rl.on('line', (line) => {
console.log(`Line from file: ${line}`);
});
await once(rl, 'close');
Note: we use the crlfDelay option to recognize all instances of CR LF ('\r\n') as a single line break.
Apart from read the big file line by line, you also can read it chunk by chunk. For more refer to this article
var offset = 0;
var chunkSize = 2048;
var chunkBuffer = new Buffer(chunkSize);
var fp = fs.openSync('filepath', 'r');
var bytesRead = 0;
while(bytesRead = fs.readSync(fp, chunkBuffer, 0, chunkSize, offset)) {
offset += bytesRead;
var str = chunkBuffer.slice(0, bytesRead).toString();
var arr = str.split('\n');
if(bytesRead = chunkSize) {
// the last item of the arr may be not a full line, leave it to the next chunk
offset -= arr.pop().length;
}
lines.push(arr);
}
console.log(lines);
I had the same problem yet. After comparing several modules that seem to have this feature, I decided to do it myself, it's simpler than I thought.
gist: https://gist.github.com/deemstone/8279565
var fetchBlock = lineByline(filepath, onEnd);
fetchBlock(function(lines, start){ ... }); //lines{array} start{int} lines[0] No.
It cover the file opened in a closure, that fetchBlock() returned will fetch a block from the file, end split to array (will deal the segment from last fetch).
I've set the block size to 1024 for each read operation. This may have bugs, but code logic is obvious, try it yourself.
Reading / Writing files using stream with the native nodejs modules (fs, readline):
const fs = require('fs');
const readline = require('readline');
const rl = readline.createInterface({
input: fs.createReadStream('input.json'),
output: fs.createWriteStream('output.json')
});
rl.on('line', function(line) {
console.log(line);
// Do any 'line' processing if you want and then write to the output file
this.output.write(`${line}\n`);
});
rl.on('close', function() {
console.log(`Created "${this.output.path}"`);
});
Based on this questions answer I implemented a class you can use to read a file synchronously line-by-line with fs.readSync(). You can make this "pause" and "resume" by using a Q promise (jQuery seems to require a DOM so cant run it with nodejs):
var fs = require('fs');
var Q = require('q');
var lr = new LineReader(filenameToLoad);
lr.open();
var promise;
workOnLine = function () {
var line = lr.readNextLine();
promise = complexLineTransformation(line).then(
function() {console.log('ok');workOnLine();},
function() {console.log('error');}
);
}
workOnLine();
complexLineTransformation = function (line) {
var deferred = Q.defer();
// ... async call goes here, in callback: deferred.resolve('done ok'); or deferred.reject(new Error(error));
return deferred.promise;
}
function LineReader (filename) {
this.moreLinesAvailable = true;
this.fd = undefined;
this.bufferSize = 1024*1024;
this.buffer = new Buffer(this.bufferSize);
this.leftOver = '';
this.read = undefined;
this.idxStart = undefined;
this.idx = undefined;
this.lineNumber = 0;
this._bundleOfLines = [];
this.open = function() {
this.fd = fs.openSync(filename, 'r');
};
this.readNextLine = function () {
if (this._bundleOfLines.length === 0) {
this._readNextBundleOfLines();
}
this.lineNumber++;
var lineToReturn = this._bundleOfLines[0];
this._bundleOfLines.splice(0, 1); // remove first element (pos, howmany)
return lineToReturn;
};
this.getLineNumber = function() {
return this.lineNumber;
};
this._readNextBundleOfLines = function() {
var line = "";
while ((this.read = fs.readSync(this.fd, this.buffer, 0, this.bufferSize, null)) !== 0) { // read next bytes until end of file
this.leftOver += this.buffer.toString('utf8', 0, this.read); // append to leftOver
this.idxStart = 0
while ((this.idx = this.leftOver.indexOf("\n", this.idxStart)) !== -1) { // as long as there is a newline-char in leftOver
line = this.leftOver.substring(this.idxStart, this.idx);
this._bundleOfLines.push(line);
this.idxStart = this.idx + 1;
}
this.leftOver = this.leftOver.substring(this.idxStart);
if (line !== "") {
break;
}
}
};
}
node-byline uses streams, so i would prefer that one for your huge files.
for your date-conversions i would use moment.js.
for maximising your throughput you could think about using a software-cluster. there are some nice-modules which wrap the node-native cluster-module quite well. i like cluster-master from isaacs. e.g. you could create a cluster of x workers which all compute a file.
for benchmarking splits vs regexes use benchmark.js. i havent tested it until now. benchmark.js is available as a node-module
import * as csv from 'fast-csv';
import * as fs from 'fs';
interface Row {
[s: string]: string;
}
type RowCallBack = (data: Row, index: number) => object;
export class CSVReader {
protected file: string;
protected csvOptions = {
delimiter: ',',
headers: true,
ignoreEmpty: true,
trim: true
};
constructor(file: string, csvOptions = {}) {
if (!fs.existsSync(file)) {
throw new Error(`File ${file} not found.`);
}
this.file = file;
this.csvOptions = Object.assign({}, this.csvOptions, csvOptions);
}
public read(callback: RowCallBack): Promise < Array < object >> {
return new Promise < Array < object >> (resolve => {
const readStream = fs.createReadStream(this.file);
const results: Array < any > = [];
let index = 0;
const csvStream = csv.parse(this.csvOptions).on('data', async (data: Row) => {
index++;
results.push(await callback(data, index));
}).on('error', (err: Error) => {
console.error(err.message);
throw err;
}).on('end', () => {
resolve(results);
});
readStream.pipe(csvStream);
});
}
}
import { CSVReader } from '../src/helpers/CSVReader';
(async () => {
const reader = new CSVReader('./database/migrations/csv/users.csv');
const users = await reader.read(async data => {
return {
username: data.username,
name: data.name,
email: data.email,
cellPhone: data.cell_phone,
homePhone: data.home_phone,
roleId: data.role_id,
description: data.description,
state: data.state,
};
});
console.log(users);
})();
I have made a node module to read large file asynchronously text or JSON.
Tested on large files.
var fs = require('fs')
, util = require('util')
, stream = require('stream')
, es = require('event-stream');
module.exports = FileReader;
function FileReader(){
}
FileReader.prototype.read = function(pathToFile, callback){
var returnTxt = '';
var s = fs.createReadStream(pathToFile)
.pipe(es.split())
.pipe(es.mapSync(function(line){
// pause the readstream
s.pause();
//console.log('reading line: '+line);
returnTxt += line;
// resume the readstream, possibly from a callback
s.resume();
})
.on('error', function(){
console.log('Error while reading file.');
})
.on('end', function(){
console.log('Read entire file.');
callback(returnTxt);
})
);
};
FileReader.prototype.readJSON = function(pathToFile, callback){
try{
this.read(pathToFile, function(txt){callback(JSON.parse(txt));});
}
catch(err){
throw new Error('json file is not valid! '+err.stack);
}
};
Just save the file as file-reader.js, and use it like this:
var FileReader = require('./file-reader');
var fileReader = new FileReader();
fileReader.readJSON(__dirname + '/largeFile.json', function(jsonObj){/*callback logic here*/});

Resources