How to run Node.js module 'windows-1252' along with require statements - node.js

Trying to correctly write a .json file from data.response.stream from a POST request using Node.js and Newman on Windows 10 AWS EC2. The default encoding is cp1252, but the response encoding is utf-8, and after attempts using iconv, iconv-lite, futzing with Buffer, I can't seem to arrive at a satisfactory result.
Here's the code I'm using:
const newman = require('newman'); // require Newman in the project
const fs = require('fs'); // require the Node.js module 'File System'
var url = require('url');
const https = require('https');
var path = require('path');
const { Iconv } = require('iconv').Iconv;
const iconvlite = require('iconv-lite');
const utf8 = require('utf8');
const windows1252 = require('windows-1252');
var requirejs = require('requirejs');
//import {encode, decode, labels} from 'windows-1252';
//import * as windows1252 from 'windows-1252';
//const collectionURL = 'https://www.getpostman.com/collections/mycollection';
let pageNumber = 16287;
// call newman.run to pass `options` object and wait for callback
newman.run({
collection: require('./postman-collection.json'),
iterationData: './iteration-data.csv',
color: 'on',
verbose: 'on',
exportCollection: './/after_pmRuns',
delayRequest: 500,
environment: require('./postman-environment.json'),
reporters: 'cli',
}).on('request', (error, data) => {
if (error) {
console.log(error);
return;
}
console.log('Request name: ' + data.item.name);
console.log(data.response.stream.toString());
var currentPageNumber = pageNumber++;
const requestName = data.item.name.replace(/[^a-z0-9]/gi, '-');
const randomString = Math.random().toString(36).substring(7);
const fileName = `./results/_00${currentPageNumber}-response-${requestName}-${randomString}.json`;
const encodedData = windows1252.encode(data.response.stream.toString(), {
mode: 'fatal'
});
const decodedData = iconvlite.decode(encodedData, 'utf-8');
//var iconv = new Iconv('windows-1252', 'UTF-8//TRANSLIT//IGNORE');
//var content = data.response.stream;
//var buffer = iconv.convert(content);
//var new_content = buffer.toString('utf8')
//const win = iconvlite.encode(data.response.stream, "windows1252");
//const utfStr = iconvlite.decode(win, "utf-8");
//const requestContent = data.response.stream.toString();
//return str.toString();
fs.writeFileSync(fileName, decodedData, function(error) {
if (error) {
console.error(error);
}
});
});
I keep getting ascii encoded .json files after opening in Notepad. And occasionally I'm getting replacement characters like \ufffd or variations of that.
When I try to adjust the package.json, I Newman throws an error since it's in a require statement, but when I try to import windows-1252 it says it's undefined.
Any ideas on how I can workaround this?

I hope we don't need encode or decode response data, we can simply use "parse" for buffur data to response json.
JSON.parse(responseData);

Related

Puppeteer to convert html to pdf using Nodejs in Durable functions(fan out fan in)

I'm working on a small project to convert a large xml to several formatted pdf documents. The large xml contains multiple similar format xmls. So I'm using a single html template for printing all the documents. After producing all the pdf documents I also need to produce a metadata file with some basic info on each document that was printed.
I thought using the fan out fan in scenario of durable functions is a perfect for my use case. I'm working with Nodejs. I setup all my code and it seems to be working fine locally. The Orchestration function looks like the below.
const df = require("durable-functions");
module.exports = df.orchestrator(function* (context) {
var xmldata = yield context.df.callActivity("DurablegetblobJS1","");
var tasks = [];
for (file of xmldata) {
tasks.push(context.df.callActivity("Durableactivityjs2", file));
}
const outputs = yield context.df.Task.all(tasks);
var finalout = "";
for (out of outputs){
console.log('I am done1 :' + out );
finalout = finalout + out;
}
return finalout;
});
DurablegetblobJS1 : Fetches the entire xmls and splits it into multiple smaller xmls(1 per document).
Durableactivityjs2 : Fetches the html template, extracts the different values from the individual xmls and applies them to the html and finally prints out the pdf into an azure storage. It returns the name of the pdf document that was printed for creation of the metadata file. The code for this is below.
var fs = require('fs');
var xml2js = require('xml2js');
var html_to_pdf = require('html-pdf-node');
var parser = new xml2js.Parser();
module.exports = async function (context) {
//console.log("Hello from activity :")
var xmldict = {}
var xmltext = context.bindings.name;
//Extract the nodes and attributes
metadata(xmltext,xmldict);
report(xmltext,xmldict);
context.log(xmldict)
const { BlobServiceClient } = require("#azure/storage-blob");
// Load the .env file if it exists
require("dotenv").config();
const AZURE_STORAGE_CONNECTION_STRING = process.env.STORAGE_CONNECTION_STRING || "";
const blobServiceClient = BlobServiceClient.fromConnectionString(
AZURE_STORAGE_CONNECTION_STRING
);
var containerClient = blobServiceClient.getContainerClient('test');
var blobname = 'comb_template.html';
var blockBlobClient = containerClient.getBlockBlobClient(blobname);
var downloadBlockBlobResponse = await blockBlobClient.download(0);
var html_template = await streamToText(downloadBlockBlobResponse.readableStreamBody);
let options = { format: 'A4'};
let file = { content: html_template};
const x = await writepdf1(file, options,blobServiceClient,xmldict);
console.log("Written Blob PDF");
return x;
};
async function writepdf1(file, options,blobServiceClient,xmldict){
const pdfBuffer = await html_to_pdf.generatePdf(file, options);
const containerClient = blobServiceClient.getContainerClient('test2');
const targetblob = xmldict['OU'] + '/' + xmldict['ReportName'] + '/' + xmldict['OU'] + '_' + xmldict['ReportName'] + '_' + xmldict['DocumentID'] + '_' + '.pdf';
console.log('Blob name :' + targetblob);
const blockBlobClient_t = containerClient.getBlockBlobClient(targetblob);
const uploadBlobResponse = await blockBlobClient_t.upload(pdfBuffer, pdfBuffer.length);
return targetblob;
}
async function streamToText(readable) {
readable.setEncoding('utf8');
let data = '';
for await (const chunk of readable) {
data += chunk;
}
return data;
}
function metadata(xmltext,xmldict){
parser.parseString(xmltext, function (err, result) {
var test1 = result['HPDPSMsg']['DocumentRequest'][0]['MetaData'][0];
Object.entries(test1).forEach(([key, value]) => {
xmldict[key] = value[0];
});
});
}
function report(xmltext,xmldict){
parser.parseString(xmltext, function (err, result) {
var test2 = result['HPDPSMsg']['DocumentRequest'][0]['Report'][0]['$'];
Object.entries(test2).forEach(([key, value]) => {
xmldict[key] = value;
});
});
}
However, when I deploy the entire project into a azure premium function(EP1 - Windows), I see some errors in app insights when I try and execute my function and the pdfs are never generated.
Activity function 'Durableactivityjs2' failed: Could not find browser
revision 818858. Run "PUPPETEER_PRODUCT=firefox npm install" or
"PUPPETEER_PRODUCT=firefox yarn install" to download a supported
Firefox browser binary
I'm a bit clueless how I'm supposed to resolve this. Any help or suggestions would be appreciated.

How to save base64 image using node js?

I am saving the base64 image using nodejs, it save my image at exact path, but show me the error.
Please help me to find the error.
Here is my code
var express = require('express');
var router = express.Router();
const fs = require('fs');
const mime = require('mime');
const path = './uploads';
router.post('/register', (req, res, next) => {
const base64Image = req.body.image;
const matches = base64Image.match(/^data:([A-Za-z-+\/]+);base64,(.+)$/);
response = {};
if (matches.length !== 3) {
return new Error('Invalid input String');
}
response.type = matches[1];
response.data = new Buffer(matches[2]);
let decodedImg = response;
let imageBuffer = decodedImg.data;
let type = decodedImg.type;
let extension = mime.extension(type);
let fileName = 'image.' + extension;
try {
fs.writeFileSync(path + '/' + fileName, imageBuffer, 'utf8');
return res.send({ status: 'success' });
} catch (e) {
next(e);
}
return;
});
module.exports = router;
Any solution appreciated!
The mistake you made is when you are creating a buffer you are not specifying an encoding. You should create buffer like this:
new Buffer() is deprecated use Buffer.from() instead.
let buff = Buffer.from(m[2],'base64'); // encoding type base64
Basic code snippet
const fs = require('fs')
let a = 'base64ImageString'
let m = a.match(/^data:([A-Za-z-+\/]+);base64,(.+)$/);
let b = Buffer.from(m[2],'base64');
fs.writeFile('image.png',b,function(err){
if(!err){
console.log("file is created")
}
});
Also, when you writing a buffer to file you don't have to pass encoding type, but if writing string you have to.
Check this out for a demo
https://repl.it/repls/GrimOfficialLocations
But it is not advised to send an image as base64 string. It is
inefficient for large images. base64 roughly takes 33% more bits than
its binary equivalent. I recommend you to check this out:
Upload base64 image with Ajax

How can I get the path of an excel file with nodeJS in a chatbot using Bot framework?

I have done a chatbot with bot framework and with this framework it is possible to add an attachement.
So I have done a code to have my excel file in base64 after I add it in my chatbot.
But I want to take an Excel file from everywhere in my pc and to transform it in base64 I need to have the full path and in NodeJS I don't know how to do it.
async attachmentsStep(stepContext, next) {
var fs = require('fs');
var activity = stepContext.context.activity;
if (activity.attachments && activity.attachments.length > 0) {
var attachment = activity.attachments[0];
// function to encode file data to base64 encoded string
function base64_encode(file) {
// read binary data
var bitmap = fs.readFileSync(file);
// convert binary data to base64 encoded string
return new Buffer.from(bitmap).toString('base64');
}
this.base64str = base64_encode( **PATH OF EXCEL FILE** + attachment.name);
var nex = await stepContext.next();
var base64 = this.base64str;
return {
base64,
nex
};
}
}
Do you have an idea please ?
You can use the __filename and __dirname for getting the file's absolute path.
console.log(__filename);
// Prints: /Users/mjr/example.js
ContentUrl recovers the file so not need the path and with the url I have directly convert it in base64 like this :
async attachmentsStep(stepContext, next) {
var activity = stepContext.context.activity;
if (activity.attachments && activity.attachments.length > 0) {
var attachment = activity.attachments[0];
var base64Url = attachment.contentUrl;
console.log(process.env.PATH);
/** Convert Url in base64 **/
var axios = require('axios');
var excel = await axios.get(base64Url, {responseType: 'arraybuffer'});
var base64str = Buffer.from(excel.data).toString('base64');
/**************************/
// base64str = 'data:' + base64Type + ';base64,' + base64str;
var nex = await stepContext.next();
return {
base64str,
nex
};
}
}

How to require or evaluate gzipped JavaScript file with Node.js and zlib?

Everything works when I require a normal JavaScript file in Node:
var test = require('test.js');
console.log(test.foo); // prints the function
But now I have compressed that test.js file, reading it as a string works:
var fs = require('fs');
var zlib = require('zlib');
var gunzip = zlib.createGunzip();
var buffer = [];
fs.createReadStream('test.js.gz').pipe(gunzip);
gunzip.on('data', function(data){
buffer.push(data.toString())
});
gunzip.on('finish', function(){
console.log(buffer.join("")); //prints the JS file as string
}).on("error", function(e) {
console.log(e);
});
But, I don't want a string. Using eval does not seem right.
How can I evaluate the string as a JavaScript similar to what I get with require('test.js'); (I cannot use external libraries)
Here's a possible implementation:
const Module = require('module');
const zlib = require('zlib');
const fs = require('fs');
function requireGZ(filename) {
let code = zlib.gunzipSync(fs.readFileSync(filename)).toString();
let mod = new Module();
mod._compile(code, filename);
return mod.exports;
}
// Use:
let test = requireGZ('./test.js.gz');

Node appendFile not appending data chunk to file on filesystem

I have a program that is trying to get the values from the request using curl and store them in a file and serve the stored content back. The decision to store or append the contents in file are based on a query parameter appendFlag
Now when i run this program what i am getting in console is "true" and "appending" This suggests that it indeed reads the flag goes to the if part but somehow the appendFile function is not working.
var http = require('http');
var url = require('url');
var querystring = require('querystring');
var fs = require('fs');
http.createServer(function(request,response){
var str = request.url.split('?')[1];
var query = querystring.parse(str);
var writeStream = fs.createWriteStream(query['fileName']);
console.log("query - ");
console.log(query["appendFlag"]);
request.on('data',function(chunk){
if(query["appendFlag"]=="true"){
console.log("appending");
fs.appendFile(query['fileName'],chunk.toString(),function(err){
if(err) throw err;
});
}else{
var bufferGood = writeStream.write(chunk);
if(!bufferGood) request.pause();
}
});
request.on('end',function(){
response.writeHead(200);
response.write("\n Content with this url is - \n");
var readStream = fs.createReadStream(query['fileName'],{bufferSize:64*1024});
readStream.on('data',function(chunk){
response.write(chunk.toString());
});
readStream.on('end',function(){
response.write("\n");
response.end();
});
});
writeStream.on('drain',function(){
request.resume();
});
}).listen(8080);
Then after reading an answer from SO( How to create appending writeStream in Node.js ) i tried -
// Program to extract url from the request and writing in that particular file
var http = require('http');
var url = require('url');
var querystring = require('querystring');
var fs = require('fs');
http.createServer(function(request,response){
var str = request.url.split('?')[1];
var query = querystring.parse(str);
var writeStream = fs.createWriteStream(query['fileName']);
options = {
'flags': 'a' ,
'encoding' : null,
'mode' : 0666
}
var appendStream = fs.createWriteStream(query['fileName'],[options]);
console.log("query - ");
console.log(query["appendFlag"]);
request.on('data',function(chunk){
if(query["appendFlag"]=="true"){
console.log("appending");
var bufferGood = appendStream.write(chunk.toString());
if(!bufferGood) request.pause();
}else{
var bufferGood = writeStream.write(chunk.toString());
if(!bufferGood) request.pause();
}
});
request.on('end',function(){
response.writeHead(200);
response.write("\n Content with this url is - \n");
var readStream = fs.createReadStream(query['fileName'],{bufferSize:64*1024});
readStream.on('data',function(chunk){
response.write(chunk.toString());
});
readStream.on('end',function(){
response.write("\n");
response.end();
});
});
writeStream.on('drain',function(){
request.resume();
});
}).listen(8080);
That is changed the flag to the 'a' and it also did not append the data?
Your can use your first variant. But before appendFile() you've opened writeStream for the same query["filename"]. The stream is already opened.
var writeStream = fs.createWriteStream(query['fileName']);
options = {
'flags': 'a' ,
'encoding' : null,
'mode' : 0666
}
var appendStream = fs.createWriteStream(query['fileName'],[options]);
May be it's better to do something like:
var options = {
flags: query.appendFile ? 'w' : 'a'
...
Next: why [options]? You should remove the brackets.
Next: there is no guarantee you'll have filename param in querystring. Please handle this situation.

Resources