How to save base64 image using node js? - node.js

I am saving the base64 image using nodejs, it save my image at exact path, but show me the error.
Please help me to find the error.
Here is my code
var express = require('express');
var router = express.Router();
const fs = require('fs');
const mime = require('mime');
const path = './uploads';
router.post('/register', (req, res, next) => {
const base64Image = req.body.image;
const matches = base64Image.match(/^data:([A-Za-z-+\/]+);base64,(.+)$/);
response = {};
if (matches.length !== 3) {
return new Error('Invalid input String');
}
response.type = matches[1];
response.data = new Buffer(matches[2]);
let decodedImg = response;
let imageBuffer = decodedImg.data;
let type = decodedImg.type;
let extension = mime.extension(type);
let fileName = 'image.' + extension;
try {
fs.writeFileSync(path + '/' + fileName, imageBuffer, 'utf8');
return res.send({ status: 'success' });
} catch (e) {
next(e);
}
return;
});
module.exports = router;
Any solution appreciated!

The mistake you made is when you are creating a buffer you are not specifying an encoding. You should create buffer like this:
new Buffer() is deprecated use Buffer.from() instead.
let buff = Buffer.from(m[2],'base64'); // encoding type base64
Basic code snippet
const fs = require('fs')
let a = 'base64ImageString'
let m = a.match(/^data:([A-Za-z-+\/]+);base64,(.+)$/);
let b = Buffer.from(m[2],'base64');
fs.writeFile('image.png',b,function(err){
if(!err){
console.log("file is created")
}
});
Also, when you writing a buffer to file you don't have to pass encoding type, but if writing string you have to.
Check this out for a demo
https://repl.it/repls/GrimOfficialLocations
But it is not advised to send an image as base64 string. It is
inefficient for large images. base64 roughly takes 33% more bits than
its binary equivalent. I recommend you to check this out:
Upload base64 image with Ajax

Related

Puppeteer to convert html to pdf using Nodejs in Durable functions(fan out fan in)

I'm working on a small project to convert a large xml to several formatted pdf documents. The large xml contains multiple similar format xmls. So I'm using a single html template for printing all the documents. After producing all the pdf documents I also need to produce a metadata file with some basic info on each document that was printed.
I thought using the fan out fan in scenario of durable functions is a perfect for my use case. I'm working with Nodejs. I setup all my code and it seems to be working fine locally. The Orchestration function looks like the below.
const df = require("durable-functions");
module.exports = df.orchestrator(function* (context) {
var xmldata = yield context.df.callActivity("DurablegetblobJS1","");
var tasks = [];
for (file of xmldata) {
tasks.push(context.df.callActivity("Durableactivityjs2", file));
}
const outputs = yield context.df.Task.all(tasks);
var finalout = "";
for (out of outputs){
console.log('I am done1 :' + out );
finalout = finalout + out;
}
return finalout;
});
DurablegetblobJS1 : Fetches the entire xmls and splits it into multiple smaller xmls(1 per document).
Durableactivityjs2 : Fetches the html template, extracts the different values from the individual xmls and applies them to the html and finally prints out the pdf into an azure storage. It returns the name of the pdf document that was printed for creation of the metadata file. The code for this is below.
var fs = require('fs');
var xml2js = require('xml2js');
var html_to_pdf = require('html-pdf-node');
var parser = new xml2js.Parser();
module.exports = async function (context) {
//console.log("Hello from activity :")
var xmldict = {}
var xmltext = context.bindings.name;
//Extract the nodes and attributes
metadata(xmltext,xmldict);
report(xmltext,xmldict);
context.log(xmldict)
const { BlobServiceClient } = require("#azure/storage-blob");
// Load the .env file if it exists
require("dotenv").config();
const AZURE_STORAGE_CONNECTION_STRING = process.env.STORAGE_CONNECTION_STRING || "";
const blobServiceClient = BlobServiceClient.fromConnectionString(
AZURE_STORAGE_CONNECTION_STRING
);
var containerClient = blobServiceClient.getContainerClient('test');
var blobname = 'comb_template.html';
var blockBlobClient = containerClient.getBlockBlobClient(blobname);
var downloadBlockBlobResponse = await blockBlobClient.download(0);
var html_template = await streamToText(downloadBlockBlobResponse.readableStreamBody);
let options = { format: 'A4'};
let file = { content: html_template};
const x = await writepdf1(file, options,blobServiceClient,xmldict);
console.log("Written Blob PDF");
return x;
};
async function writepdf1(file, options,blobServiceClient,xmldict){
const pdfBuffer = await html_to_pdf.generatePdf(file, options);
const containerClient = blobServiceClient.getContainerClient('test2');
const targetblob = xmldict['OU'] + '/' + xmldict['ReportName'] + '/' + xmldict['OU'] + '_' + xmldict['ReportName'] + '_' + xmldict['DocumentID'] + '_' + '.pdf';
console.log('Blob name :' + targetblob);
const blockBlobClient_t = containerClient.getBlockBlobClient(targetblob);
const uploadBlobResponse = await blockBlobClient_t.upload(pdfBuffer, pdfBuffer.length);
return targetblob;
}
async function streamToText(readable) {
readable.setEncoding('utf8');
let data = '';
for await (const chunk of readable) {
data += chunk;
}
return data;
}
function metadata(xmltext,xmldict){
parser.parseString(xmltext, function (err, result) {
var test1 = result['HPDPSMsg']['DocumentRequest'][0]['MetaData'][0];
Object.entries(test1).forEach(([key, value]) => {
xmldict[key] = value[0];
});
});
}
function report(xmltext,xmldict){
parser.parseString(xmltext, function (err, result) {
var test2 = result['HPDPSMsg']['DocumentRequest'][0]['Report'][0]['$'];
Object.entries(test2).forEach(([key, value]) => {
xmldict[key] = value;
});
});
}
However, when I deploy the entire project into a azure premium function(EP1 - Windows), I see some errors in app insights when I try and execute my function and the pdfs are never generated.
Activity function 'Durableactivityjs2' failed: Could not find browser
revision 818858. Run "PUPPETEER_PRODUCT=firefox npm install" or
"PUPPETEER_PRODUCT=firefox yarn install" to download a supported
Firefox browser binary
I'm a bit clueless how I'm supposed to resolve this. Any help or suggestions would be appreciated.

How to run Node.js module 'windows-1252' along with require statements

Trying to correctly write a .json file from data.response.stream from a POST request using Node.js and Newman on Windows 10 AWS EC2. The default encoding is cp1252, but the response encoding is utf-8, and after attempts using iconv, iconv-lite, futzing with Buffer, I can't seem to arrive at a satisfactory result.
Here's the code I'm using:
const newman = require('newman'); // require Newman in the project
const fs = require('fs'); // require the Node.js module 'File System'
var url = require('url');
const https = require('https');
var path = require('path');
const { Iconv } = require('iconv').Iconv;
const iconvlite = require('iconv-lite');
const utf8 = require('utf8');
const windows1252 = require('windows-1252');
var requirejs = require('requirejs');
//import {encode, decode, labels} from 'windows-1252';
//import * as windows1252 from 'windows-1252';
//const collectionURL = 'https://www.getpostman.com/collections/mycollection';
let pageNumber = 16287;
// call newman.run to pass `options` object and wait for callback
newman.run({
collection: require('./postman-collection.json'),
iterationData: './iteration-data.csv',
color: 'on',
verbose: 'on',
exportCollection: './/after_pmRuns',
delayRequest: 500,
environment: require('./postman-environment.json'),
reporters: 'cli',
}).on('request', (error, data) => {
if (error) {
console.log(error);
return;
}
console.log('Request name: ' + data.item.name);
console.log(data.response.stream.toString());
var currentPageNumber = pageNumber++;
const requestName = data.item.name.replace(/[^a-z0-9]/gi, '-');
const randomString = Math.random().toString(36).substring(7);
const fileName = `./results/_00${currentPageNumber}-response-${requestName}-${randomString}.json`;
const encodedData = windows1252.encode(data.response.stream.toString(), {
mode: 'fatal'
});
const decodedData = iconvlite.decode(encodedData, 'utf-8');
//var iconv = new Iconv('windows-1252', 'UTF-8//TRANSLIT//IGNORE');
//var content = data.response.stream;
//var buffer = iconv.convert(content);
//var new_content = buffer.toString('utf8')
//const win = iconvlite.encode(data.response.stream, "windows1252");
//const utfStr = iconvlite.decode(win, "utf-8");
//const requestContent = data.response.stream.toString();
//return str.toString();
fs.writeFileSync(fileName, decodedData, function(error) {
if (error) {
console.error(error);
}
});
});
I keep getting ascii encoded .json files after opening in Notepad. And occasionally I'm getting replacement characters like \ufffd or variations of that.
When I try to adjust the package.json, I Newman throws an error since it's in a require statement, but when I try to import windows-1252 it says it's undefined.
Any ideas on how I can workaround this?
I hope we don't need encode or decode response data, we can simply use "parse" for buffur data to response json.
JSON.parse(responseData);

how to send file from specified path in response from local storage using node.js

I Have path tmp\daily_gasoline_report\9. where 9 is uniqe-id which will be different every time I want to send image from that folder in response. how can I send that file in response ??
I have tried with fs
if (!req.body.path) {
logger.warn(error.MANDATORY_FIELDS);
return res.status(500).send(error.MANDATORY_FIELDS)
}
let directory_name = req.body.path;
let filenames = fs.readdirSync(directory_name);
console.log("\nFilenames in directory:");
filenames.forEach((file) => {
console.log("File:", file);
});
let result = error.OK
result.data = filenames
logger.info(result);
return res.status(200).send(result)
I get only files name not files but now I have files name which comes in Url with path and file name from front end so, how can I send file from path in response
I got the Answer
this worked for me to get single image from folder
const fs = require('fs');
const url = require('url')
let query = url.parse(req.url, true).query;
let pic = query.image;
let id = query.id
let directory_name = "tmp/daily_gasoline_report/" + id + "/" + pic
let filename = fs.existsSync(directory_name);
if (filename) {
//read the image using fs and send the image content back in the response
fs.readFile(directory_name, function (err, content) {
if (err) {
res.writeHead(400, { 'Content-type': 'text/html' })
console.log(err);
res.end("No such image");
} else {
//specify the content type in the response will be an image
res.writeHead(200);
res.end(content);
}
});
Try this,
let directory_name = req.body.path;
let filenames = fs.readdirSync(directory_name);
let resultantArr = [];
console.log("\nFilenames in directory:");
filenames.forEach((file) => {
console.log("File:", file);
var fr = new FileReader();
fr.onload = function () {
var data = fr.result;
var array = new Int8Array(data);
resultantArr.push(JSON.stringify(array, null, ' '));
};
fr.readAsArrayBuffer(file);
});
let result = error.OK
result.data = resultantArr;
logger.info(result);
return res.status(200).send(result)
#Arya Here is the code-snippet to deal with image file:
let fr = new FileReader();
fr.onloadend = function(event) {
let base64Data = fr.result;
let arrayBufferView = new Uint8Array(base64Data);
let blob = new Blob( [ arrayBufferView ], { type: file.type } );
resultantArr.push(blob);
}
fr.readAsArrayBuffer(file);
And later on to read the blob values of an array and create image url out of it, you can do this:
let urlCreator = window.URL || window.webkitURL || {}.createObjectURL;
let imageUrl = urlCreator.createObjectURL( blob );
you will need to zip all files in the path and send zip folder, you cannot send multiple files.
Also cross check your requirements, if file size goes to big number your application will crash

How can I get the path of an excel file with nodeJS in a chatbot using Bot framework?

I have done a chatbot with bot framework and with this framework it is possible to add an attachement.
So I have done a code to have my excel file in base64 after I add it in my chatbot.
But I want to take an Excel file from everywhere in my pc and to transform it in base64 I need to have the full path and in NodeJS I don't know how to do it.
async attachmentsStep(stepContext, next) {
var fs = require('fs');
var activity = stepContext.context.activity;
if (activity.attachments && activity.attachments.length > 0) {
var attachment = activity.attachments[0];
// function to encode file data to base64 encoded string
function base64_encode(file) {
// read binary data
var bitmap = fs.readFileSync(file);
// convert binary data to base64 encoded string
return new Buffer.from(bitmap).toString('base64');
}
this.base64str = base64_encode( **PATH OF EXCEL FILE** + attachment.name);
var nex = await stepContext.next();
var base64 = this.base64str;
return {
base64,
nex
};
}
}
Do you have an idea please ?
You can use the __filename and __dirname for getting the file's absolute path.
console.log(__filename);
// Prints: /Users/mjr/example.js
ContentUrl recovers the file so not need the path and with the url I have directly convert it in base64 like this :
async attachmentsStep(stepContext, next) {
var activity = stepContext.context.activity;
if (activity.attachments && activity.attachments.length > 0) {
var attachment = activity.attachments[0];
var base64Url = attachment.contentUrl;
console.log(process.env.PATH);
/** Convert Url in base64 **/
var axios = require('axios');
var excel = await axios.get(base64Url, {responseType: 'arraybuffer'});
var base64str = Buffer.from(excel.data).toString('base64');
/**************************/
// base64str = 'data:' + base64Type + ';base64,' + base64str;
var nex = await stepContext.next();
return {
base64str,
nex
};
}
}

Google Vision | Vietnamese: Low Quality OCR Results

Background
Using Google Vision API (with Node) to recognize Vietnamese text, the result is lacking quality. There are some (not all but some) tone markers as well as vowel signifies missing.
Compared to their online demo, which returns a decent result (scroll down for live demo):
https://cloud.google.com/vision/
(As I do not have a company account with them, I cannot ask Google directly.)
Question
Can I tweak my request to get better results?
I already set the language hint to "vi" and tried to combine it with "en". I also tried the more specific "vi-VN".
Example Image
https://www.tecc.org/Slatwall/custom/assets/images/product/default/cache/j056vt-_800w_800h_sb.jpg
Example Code
const fs = require("fs");
const path = require("path");
const vision = require("#google-cloud/vision");
async function quickstart() {
let text;
const fileName = "j056vt-_800w_800h_sb.jpg";
const imageFile = fs.readFileSync(fileName);
const image = Buffer.from(imageFile).toString("base64");
const client = new vision.ImageAnnotatorClient();
const request = {
image: {
content: image
},
imageContext: {
languageHints: ["vi", 'en']
}
};
const [result] = await client.textDetection(request);
for (const tmp of result.textAnnotations) {
text += tmp.description + '\n';
}
const out = path.basename(fileName, path.extname(fileName)) + ".txt";
fs.writeFileSync(out, text);
}
quickstart();
Solution
// $env:GOOGLE_APPLICATION_CREDENTIALS="[PATH]"
const fs = require("fs");
const path = require("path");
const vision = require("#google-cloud/vision");
async function quickstart() {
let text = '';
const fileName = "j056vt-_800w_800h_sb.jpg";
const imageFile = fs.readFileSync(fileName);
const image = Buffer.from(imageFile).toString("base64");
const client = new vision.ImageAnnotatorClient();
const request = {
image: {
content: image
},
imageContext: {
languageHints: ["vi-VN"]
}
};
const [result] = await client.documentTextDetection(request);
// OUTPUT METHOD A
for (const tmp of result.textAnnotations) {
text += tmp.description + "\n";
}
console.log(text);
const out = path.basename(fileName, path.extname(fileName)) + ".txt";
fs.writeFileSync(out, text);
// OUTPUT METHOD B
const fullTextAnnotation = result.fullTextAnnotation;
console.log(`Full text: ${fullTextAnnotation.text}`);
fullTextAnnotation.pages.forEach(page => {
page.blocks.forEach(block => {
console.log(`Block confidence: ${block.confidence}`);
block.paragraphs.forEach(paragraph => {
console.log(`Paragraph confidence: ${paragraph.confidence}`);
paragraph.words.forEach(word => {
const wordText = word.symbols.map(s => s.text).join("");
console.log(`Word text: ${wordText}`);
console.log(`Word confidence: ${word.confidence}`);
word.symbols.forEach(symbol => {
console.log(`Symbol text: ${symbol.text}`);
console.log(`Symbol confidence: ${symbol.confidence}`);
});
});
});
});
});
}
quickstart();
This question is already answered in this one.
In summary, the Demo is in this case probably using the DOCUMENT_TEXT_DETECTION, which can sometimes make a more thorough strings extraction, while you are using TEXT_DETECTION.
You can try to make a client.document_text_detection request instead of client.textDetection and you will probably get results closer to the Demo.
If you want to read to the related documentation you can find it here.
I hope this resolves your question!

Resources