Not able to access value in nodejs code - node.js

I have following code in my nodejs function. Even if the IF condition is true, I am not getting values updated for stringResponse variable.
var fs = require('fs'),
byline = require('byline');
var stream = fs.createReadStream('Solutionbank.csv');
var stringResponse = "We found a similar issue. ";
stream = byline.createStream(stream);
stream.on('data', function(line) {
var csvLine = line.toString('utf8');
csvLine = csvLine.toLowerCase().trim();
var msgText = subject.toLowerCase().trim();
//console.log("email block",msgText);
if(csvLine.indexOf(msgText) > -1) {
var arr = line.toString('utf8').split(",");
stringResponse = stringResponse + "Issue ID"+arr[0]+",Subject : "+arr[1]+", Resolution Comment:"+arr[2];
console.log(stringResponse);
}
});
console.log("stringResponse from email",stringResponse);

Line :
console.log("stringResponse from email",stringResponse);
Is outsite on('data') callback function, this line is called before setting the stringResponse variable. Put it at the end of the callback function (inside). Or in the other callback function called at the end of the stream :
stream.on('data', function(line) {
...
console.log("stringResponse from email",stringResponse); // show each append
});
stream.on('end', function() {
...
console.log("stringResponse from email",stringResponse); // show each append
});

Related

How to get base64 value from function?

I need to create dummy data for mongodb, part of the data is pictures which needs to be converted first to base64 and then i need the display them.
Here is my controller
var buffer = require('buffer');
var path = require('path');
var fs = require('fs');
var users_model;
var PageUsersController = function () {
users_model = require('../models/users_model');
}
module.exports = PageUsersController;
var pic_binary;
function encode_base64(filename){
fs.readFile(path.join(__dirname,'../foo/',filename),function(error,data){
if(error){
throw error;
}else{
var buf = Buffer.from(data);
var base64 = buf.toString('base64');
pic_binary = base64;
console.log(pic_binary); // returns result.
return base64;
}
});
}
encode_base64('user_pic.jpg');
PageUsersController.getUserDetails = function (cb) {
var flat_details = [
{
user_name: "john",
user_pic:pic_binary, // always return undefined
}
];
return cb(null, flat_details);
}
How can i pass the mongodb schema the variable 'pic_binary' that holds the results of the encoding?
I've tried to set the function's value into a variable and then passing it... but i get undefined no matter how i'm setting the result as variable
Got it.
Should have used Synchronized :
var buff = fs.readFileSync(path.join(__dirname,'../user_pic/','profile_pic.png'));
var base64data = buff.toString('base64');
var flat_details = [
{
user_pic:base64data , // problem solved.
}
];

mocha test sends `test` as variable to node app

When writing the tests for my entry file, index.js I run into the problem that the command mocha test passes test as an argument to index.js as it uses process.argv to receive parameters to run on a development environment. I had thought that by using something like minimist to name the parameters would fix this, however this problem still remains when running the tests. In this way my tests do not use the object provided in my test suits, as shown in the following code.
How do I get around this, so that when running my tests, it uses the event object I provide in my test set-up and not the command mocha test?
index.js
'use strict';
var _ = require("underscore");
var async = require('async');
var argv = require("minimist")(process.argv.slice(2));
var getprotocol = require("./getProtocol");
var _getprotocol = getprotocol.getProtocol;
var S3rs = require("./S3resizer");
var s3resizer = S3rs.rs;
var objCr = require("./objectCreator");
var createObj = objCr.creator;
var fileRs = require("./fileResizer");
var fileResizer = fileRs.rs;
var configs = require("./configs.json");
var mkDir = require("./makeDir");
var makeDir = mkDir.handler;
exports.imageRs = function (event, context) {
var _path = argv.x || event.path; //argv.x used to be process.argv[2]
console.log("Path, %s", _path);
var _dir = argv.y; // used to be process.argv[3]
console.log(_dir);
var parts = _getprotocol(_path);
var imgName = parts.pathname.split("/").pop();
console.log("imgName: %s", imgName);
var s3Bucket = parts.hostname;
var s3Key = imgName;
var _protocol = parts.protocol;
console.log(_protocol);
// RegExp to check for image type
var imageTypeRegExp = /(?:(jpg)|(png)|(jpeg))$/;
var sizesConfigs = configs.sizes;
var obj = createObj(_path);
// Check if file has a supported image extension
var imgExt = imageTypeRegExp.exec(s3Key);
if (imgExt === null) {
console.error('unable to infer the image type for key %s', s3Key);
context.done(new Error('unable to infer the image type for key %s' + s3Key));
return;
}
var imageType = imgExt[1] || imgExt[2];
// Do more stuff here
};
if (!process.env.LAMBDA_TASK_ROOT) {
exports.imageRs();
}
test.js
describe("imgeRs", function () {
var getprotocol = require("../getProtocol");
var S3rs = require("../S3resizer");
var objCr = require("../objectCreator");
var mkDir = require("../makeDir");
var fileResizer = require("../fileResizer");
describe("Calling S3", function () {
describe("Success call", function () {
var testedModule, eventObj, contextDoneSpy, S3resizerStub, objCreatorStub, getProtocolStub, fakeResults, mkDirStub, fileResizerStub;
before(function (done) {
contextDoneSpy = sinon.spy();
S3resizerStub = sinon.stub(S3rs, "rs");
objCreatorStub = sinon.stub(objCr, 'creator');
getProtocolStub = sinon.stub(getprotocol, "getProtocol");
mkDirStub = sinon.stub(mkDir, "handler");
fileResizerStub = sinon.stub(fileResizer, "rs");
eventObj = {"path": "s3://theBucket/image.jpeg"};
fakeResults = ["resized"];
testedModule = proxyquire("../index", {
'./getProtocol': {
'getProtocol': getProtocolStub
},
'./S3resizer': {
'rs': S3resizerStub
},
'./objectCreator': {
'creator': objCreatorStub
},
'./makeDir': {
'handler': mkDirStub
},
'./fileResizer': {
'rs': fileResizerStub
}
});
S3resizerStub.callsArgWith(5, null, fakeResults);
testedModule.imageRs(eventObj, {done: function (error) {
contextDoneSpy.apply(null, arguments);
done();
}});
});
after(function () {
S3rs.rs.restore();
objCr.creator.restore();
getprotocol.getProtocol.restore();
mkDir.handler.restore();
fileResizer.rs.restore();
});
it("calls context.done with no error", function () {
expect(contextDoneSpy).has.been.called;
});
});
});
});

nodejs create a duplex stream from a readable and writable

I'm working on the nodeschool stream adventure and got to the duplex redux challenge.
I solved the challenge using the duplexer module but wanted to solve it using native stream functionality.
Here is the solution using duplexer:
var duplexer = require('duplexer');
var through = require('through');
module.exports = function (counter) {
var counts = {};
var input = through(write, end);
return duplexer(input, counter);
function write (row) {
counts[row.country] = (counts[row.country] || 0) + 1;
}
function end () { counter.setCounts(counts) }
};
So, it gets a readable stream in, and is returned a duplex stream. I'm trying to solve this again with:
var duplexer = require('duplexer');
var stream = require('stream');
module.exports = function (counter) {
var counts = {};
var ts = stream.Transform({ objectMode: true })
ts._transform = function(obj, enc, next) {
var country = obj.country;
var count = counts[country] || 0;
counts[obj.country] = count + 1;
next();
}
ts.on('finish', function() {
counter.setCounts(counts);
});
counter.pipe(ts)
return ts;
};
When run as is, it results in no output at all, so I changed next() to next(null, JSON.stringify(counts)). The stringify is because the next stream is not set to object mode. I get output, but its wrong and it throws stream.push() after EOF.

Node appendFile not appending data chunk to file on filesystem

I have a program that is trying to get the values from the request using curl and store them in a file and serve the stored content back. The decision to store or append the contents in file are based on a query parameter appendFlag
Now when i run this program what i am getting in console is "true" and "appending" This suggests that it indeed reads the flag goes to the if part but somehow the appendFile function is not working.
var http = require('http');
var url = require('url');
var querystring = require('querystring');
var fs = require('fs');
http.createServer(function(request,response){
var str = request.url.split('?')[1];
var query = querystring.parse(str);
var writeStream = fs.createWriteStream(query['fileName']);
console.log("query - ");
console.log(query["appendFlag"]);
request.on('data',function(chunk){
if(query["appendFlag"]=="true"){
console.log("appending");
fs.appendFile(query['fileName'],chunk.toString(),function(err){
if(err) throw err;
});
}else{
var bufferGood = writeStream.write(chunk);
if(!bufferGood) request.pause();
}
});
request.on('end',function(){
response.writeHead(200);
response.write("\n Content with this url is - \n");
var readStream = fs.createReadStream(query['fileName'],{bufferSize:64*1024});
readStream.on('data',function(chunk){
response.write(chunk.toString());
});
readStream.on('end',function(){
response.write("\n");
response.end();
});
});
writeStream.on('drain',function(){
request.resume();
});
}).listen(8080);
Then after reading an answer from SO( How to create appending writeStream in Node.js ) i tried -
// Program to extract url from the request and writing in that particular file
var http = require('http');
var url = require('url');
var querystring = require('querystring');
var fs = require('fs');
http.createServer(function(request,response){
var str = request.url.split('?')[1];
var query = querystring.parse(str);
var writeStream = fs.createWriteStream(query['fileName']);
options = {
'flags': 'a' ,
'encoding' : null,
'mode' : 0666
}
var appendStream = fs.createWriteStream(query['fileName'],[options]);
console.log("query - ");
console.log(query["appendFlag"]);
request.on('data',function(chunk){
if(query["appendFlag"]=="true"){
console.log("appending");
var bufferGood = appendStream.write(chunk.toString());
if(!bufferGood) request.pause();
}else{
var bufferGood = writeStream.write(chunk.toString());
if(!bufferGood) request.pause();
}
});
request.on('end',function(){
response.writeHead(200);
response.write("\n Content with this url is - \n");
var readStream = fs.createReadStream(query['fileName'],{bufferSize:64*1024});
readStream.on('data',function(chunk){
response.write(chunk.toString());
});
readStream.on('end',function(){
response.write("\n");
response.end();
});
});
writeStream.on('drain',function(){
request.resume();
});
}).listen(8080);
That is changed the flag to the 'a' and it also did not append the data?
Your can use your first variant. But before appendFile() you've opened writeStream for the same query["filename"]. The stream is already opened.
var writeStream = fs.createWriteStream(query['fileName']);
options = {
'flags': 'a' ,
'encoding' : null,
'mode' : 0666
}
var appendStream = fs.createWriteStream(query['fileName'],[options]);
May be it's better to do something like:
var options = {
flags: query.appendFile ? 'w' : 'a'
...
Next: why [options]? You should remove the brackets.
Next: there is no guarantee you'll have filename param in querystring. Please handle this situation.

Parsing huge logfiles in Node.js - read in line-by-line

I need to do some parsing of large (5-10 Gb)logfiles in Javascript/Node.js (I'm using Cube).
The logline looks something like:
10:00:43.343423 I'm a friendly log message. There are 5 cats, and 7 dogs. We are in state "SUCCESS".
We need to read each line, do some parsing (e.g. strip out 5, 7 and SUCCESS), then pump this data into Cube (https://github.com/square/cube) using their JS client.
Firstly, what is the canonical way in Node to read in a file, line by line?
It seems to be fairly common question online:
http://www.quora.com/What-is-the-best-way-to-read-a-file-line-by-line-in-node-js
Read a file one line at a time in node.js?
A lot of the answers seem to point to a bunch of third-party modules:
https://github.com/nickewing/line-reader
https://github.com/jahewson/node-byline
https://github.com/pkrumins/node-lazy
https://github.com/Gagle/Node-BufferedReader
However, this seems like a fairly basic task - surely, there's a simple way within the stdlib to read in a textfile, line-by-line?
Secondly, I then need to process each line (e.g. convert the timestamp into a Date object, and extract useful fields).
What's the best way to do this, maximising throughput? Is there some way that won't block on either reading in each line, or on sending it to Cube?
Thirdly - I'm guessing using string splits, and the JS equivalent of contains (IndexOf != -1?) will be a lot faster than regexes? Has anybody had much experience in parsing massive amounts of text data in Node.js?
I searched for a solution to parse very large files (gbs) line by line using a stream. All the third-party libraries and examples did not suit my needs since they processed the files not line by line (like 1 , 2 , 3 , 4 ..) or read the entire file to memory
The following solution can parse very large files, line by line using stream & pipe. For testing I used a 2.1 gb file with 17.000.000 records. Ram usage did not exceed 60 mb.
First, install the event-stream package:
npm install event-stream
Then:
var fs = require('fs')
, es = require('event-stream');
var lineNr = 0;
var s = fs.createReadStream('very-large-file.csv')
.pipe(es.split())
.pipe(es.mapSync(function(line){
// pause the readstream
s.pause();
lineNr += 1;
// process line here and call s.resume() when rdy
// function below was for logging memory usage
logMemoryUsage(lineNr);
// resume the readstream, possibly from a callback
s.resume();
})
.on('error', function(err){
console.log('Error while reading file.', err);
})
.on('end', function(){
console.log('Read entire file.')
})
);
Please let me know how it goes!
You can use the inbuilt readline package, see docs here. I use stream to create a new output stream.
var fs = require('fs'),
readline = require('readline'),
stream = require('stream');
var instream = fs.createReadStream('/path/to/file');
var outstream = new stream;
outstream.readable = true;
outstream.writable = true;
var rl = readline.createInterface({
input: instream,
output: outstream,
terminal: false
});
rl.on('line', function(line) {
console.log(line);
//Do your stuff ...
//Then write to output stream
rl.write(line);
});
Large files will take some time to process. Do tell if it works.
I really liked #gerard answer which is actually deserves to be the correct answer here. I made some improvements:
Code is in a class (modular)
Parsing is included
Ability to resume is given to the outside in case there is an asynchronous job is chained to reading the CSV like inserting to DB, or a HTTP request
Reading in chunks/batche sizes that
user can declare. I took care of encoding in the stream too, in case
you have files in different encoding.
Here's the code:
'use strict'
const fs = require('fs'),
util = require('util'),
stream = require('stream'),
es = require('event-stream'),
parse = require("csv-parse"),
iconv = require('iconv-lite');
class CSVReader {
constructor(filename, batchSize, columns) {
this.reader = fs.createReadStream(filename).pipe(iconv.decodeStream('utf8'))
this.batchSize = batchSize || 1000
this.lineNumber = 0
this.data = []
this.parseOptions = {delimiter: '\t', columns: true, escape: '/', relax: true}
}
read(callback) {
this.reader
.pipe(es.split())
.pipe(es.mapSync(line => {
++this.lineNumber
parse(line, this.parseOptions, (err, d) => {
this.data.push(d[0])
})
if (this.lineNumber % this.batchSize === 0) {
callback(this.data)
}
})
.on('error', function(){
console.log('Error while reading file.')
})
.on('end', function(){
console.log('Read entirefile.')
}))
}
continue () {
this.data = []
this.reader.resume()
}
}
module.exports = CSVReader
So basically, here is how you will use it:
let reader = CSVReader('path_to_file.csv')
reader.read(() => reader.continue())
I tested this with a 35GB CSV file and it worked for me and that's why I chose to build it on #gerard's answer, feedbacks are welcomed.
I used https://www.npmjs.com/package/line-by-line for reading more than 1 000 000 lines from a text file. In this case, an occupied capacity of RAM was about 50-60 megabyte.
const LineByLineReader = require('line-by-line'),
lr = new LineByLineReader('big_file.txt');
lr.on('error', function (err) {
// 'err' contains error object
});
lr.on('line', function (line) {
// pause emitting of lines...
lr.pause();
// ...do your asynchronous line processing..
setTimeout(function () {
// ...and continue emitting lines.
lr.resume();
}, 100);
});
lr.on('end', function () {
// All lines are read, file is closed now.
});
The Node.js Documentation offers a very elegant example using the Readline module.
Example: Read File Stream Line-by-Line
const { once } = require('node:events');
const fs = require('fs');
const readline = require('readline');
const rl = readline.createInterface({
input: fs.createReadStream('sample.txt'),
crlfDelay: Infinity
});
rl.on('line', (line) => {
console.log(`Line from file: ${line}`);
});
await once(rl, 'close');
Note: we use the crlfDelay option to recognize all instances of CR LF ('\r\n') as a single line break.
Apart from read the big file line by line, you also can read it chunk by chunk. For more refer to this article
var offset = 0;
var chunkSize = 2048;
var chunkBuffer = new Buffer(chunkSize);
var fp = fs.openSync('filepath', 'r');
var bytesRead = 0;
while(bytesRead = fs.readSync(fp, chunkBuffer, 0, chunkSize, offset)) {
offset += bytesRead;
var str = chunkBuffer.slice(0, bytesRead).toString();
var arr = str.split('\n');
if(bytesRead = chunkSize) {
// the last item of the arr may be not a full line, leave it to the next chunk
offset -= arr.pop().length;
}
lines.push(arr);
}
console.log(lines);
I had the same problem yet. After comparing several modules that seem to have this feature, I decided to do it myself, it's simpler than I thought.
gist: https://gist.github.com/deemstone/8279565
var fetchBlock = lineByline(filepath, onEnd);
fetchBlock(function(lines, start){ ... }); //lines{array} start{int} lines[0] No.
It cover the file opened in a closure, that fetchBlock() returned will fetch a block from the file, end split to array (will deal the segment from last fetch).
I've set the block size to 1024 for each read operation. This may have bugs, but code logic is obvious, try it yourself.
Reading / Writing files using stream with the native nodejs modules (fs, readline):
const fs = require('fs');
const readline = require('readline');
const rl = readline.createInterface({
input: fs.createReadStream('input.json'),
output: fs.createWriteStream('output.json')
});
rl.on('line', function(line) {
console.log(line);
// Do any 'line' processing if you want and then write to the output file
this.output.write(`${line}\n`);
});
rl.on('close', function() {
console.log(`Created "${this.output.path}"`);
});
Based on this questions answer I implemented a class you can use to read a file synchronously line-by-line with fs.readSync(). You can make this "pause" and "resume" by using a Q promise (jQuery seems to require a DOM so cant run it with nodejs):
var fs = require('fs');
var Q = require('q');
var lr = new LineReader(filenameToLoad);
lr.open();
var promise;
workOnLine = function () {
var line = lr.readNextLine();
promise = complexLineTransformation(line).then(
function() {console.log('ok');workOnLine();},
function() {console.log('error');}
);
}
workOnLine();
complexLineTransformation = function (line) {
var deferred = Q.defer();
// ... async call goes here, in callback: deferred.resolve('done ok'); or deferred.reject(new Error(error));
return deferred.promise;
}
function LineReader (filename) {
this.moreLinesAvailable = true;
this.fd = undefined;
this.bufferSize = 1024*1024;
this.buffer = new Buffer(this.bufferSize);
this.leftOver = '';
this.read = undefined;
this.idxStart = undefined;
this.idx = undefined;
this.lineNumber = 0;
this._bundleOfLines = [];
this.open = function() {
this.fd = fs.openSync(filename, 'r');
};
this.readNextLine = function () {
if (this._bundleOfLines.length === 0) {
this._readNextBundleOfLines();
}
this.lineNumber++;
var lineToReturn = this._bundleOfLines[0];
this._bundleOfLines.splice(0, 1); // remove first element (pos, howmany)
return lineToReturn;
};
this.getLineNumber = function() {
return this.lineNumber;
};
this._readNextBundleOfLines = function() {
var line = "";
while ((this.read = fs.readSync(this.fd, this.buffer, 0, this.bufferSize, null)) !== 0) { // read next bytes until end of file
this.leftOver += this.buffer.toString('utf8', 0, this.read); // append to leftOver
this.idxStart = 0
while ((this.idx = this.leftOver.indexOf("\n", this.idxStart)) !== -1) { // as long as there is a newline-char in leftOver
line = this.leftOver.substring(this.idxStart, this.idx);
this._bundleOfLines.push(line);
this.idxStart = this.idx + 1;
}
this.leftOver = this.leftOver.substring(this.idxStart);
if (line !== "") {
break;
}
}
};
}
node-byline uses streams, so i would prefer that one for your huge files.
for your date-conversions i would use moment.js.
for maximising your throughput you could think about using a software-cluster. there are some nice-modules which wrap the node-native cluster-module quite well. i like cluster-master from isaacs. e.g. you could create a cluster of x workers which all compute a file.
for benchmarking splits vs regexes use benchmark.js. i havent tested it until now. benchmark.js is available as a node-module
import * as csv from 'fast-csv';
import * as fs from 'fs';
interface Row {
[s: string]: string;
}
type RowCallBack = (data: Row, index: number) => object;
export class CSVReader {
protected file: string;
protected csvOptions = {
delimiter: ',',
headers: true,
ignoreEmpty: true,
trim: true
};
constructor(file: string, csvOptions = {}) {
if (!fs.existsSync(file)) {
throw new Error(`File ${file} not found.`);
}
this.file = file;
this.csvOptions = Object.assign({}, this.csvOptions, csvOptions);
}
public read(callback: RowCallBack): Promise < Array < object >> {
return new Promise < Array < object >> (resolve => {
const readStream = fs.createReadStream(this.file);
const results: Array < any > = [];
let index = 0;
const csvStream = csv.parse(this.csvOptions).on('data', async (data: Row) => {
index++;
results.push(await callback(data, index));
}).on('error', (err: Error) => {
console.error(err.message);
throw err;
}).on('end', () => {
resolve(results);
});
readStream.pipe(csvStream);
});
}
}
import { CSVReader } from '../src/helpers/CSVReader';
(async () => {
const reader = new CSVReader('./database/migrations/csv/users.csv');
const users = await reader.read(async data => {
return {
username: data.username,
name: data.name,
email: data.email,
cellPhone: data.cell_phone,
homePhone: data.home_phone,
roleId: data.role_id,
description: data.description,
state: data.state,
};
});
console.log(users);
})();
I have made a node module to read large file asynchronously text or JSON.
Tested on large files.
var fs = require('fs')
, util = require('util')
, stream = require('stream')
, es = require('event-stream');
module.exports = FileReader;
function FileReader(){
}
FileReader.prototype.read = function(pathToFile, callback){
var returnTxt = '';
var s = fs.createReadStream(pathToFile)
.pipe(es.split())
.pipe(es.mapSync(function(line){
// pause the readstream
s.pause();
//console.log('reading line: '+line);
returnTxt += line;
// resume the readstream, possibly from a callback
s.resume();
})
.on('error', function(){
console.log('Error while reading file.');
})
.on('end', function(){
console.log('Read entire file.');
callback(returnTxt);
})
);
};
FileReader.prototype.readJSON = function(pathToFile, callback){
try{
this.read(pathToFile, function(txt){callback(JSON.parse(txt));});
}
catch(err){
throw new Error('json file is not valid! '+err.stack);
}
};
Just save the file as file-reader.js, and use it like this:
var FileReader = require('./file-reader');
var fileReader = new FileReader();
fileReader.readJSON(__dirname + '/largeFile.json', function(jsonObj){/*callback logic here*/});

Resources