I am trying to convert multiple svg strings to png's so I can render them onto a pdf using PdfMake in nodejs. This works fine for one svg, but when I add multiple svg strings, they get overwritten by the last one. With this example code, it renders two images of png2 (svg2).
const promises = [svg1,svg2].map(str => {
const stream = new Readable();
stream.push(str);
stream.push(null);
return svgPromise(stream);
});
const result = await Promise.all(promises);
const png1 = result[0].content;
const png2 = result[1].content;
function svgPromise(stream) {
return new Promise((resolve, reject) => {
const svg = new Rsvg();
stream.pipe(svg);
svg.on("finish", function() {
const buffer = svg.render({
format: "png",
width: width * 2,
height: height * 2
}).data;
const png = datauri.format(".png", buffer);
resolve(png);
});
});
}
Not sure if this error is related to stream or my promise logic. Any ideas?
Dependencies:
"librsvg": "0.7.0"
"pdfmake": "0.1.35"
"datauri": "1.0.5"
It pays to list all the used modules. Assuming you used datauri, it seems you need to initialize a new instance for every call:
svg.on("finish", function() {
const datauri = new Datauri();
const buffer = svg.render({
format: "png",
width: 16,
height: 16
}).data;
const png = datauri.format(".png", buffer);
resolve(png);
});
Related
I am working on project with pdf's, i want to add a custom watermark to each pages of pdf but only using 'input.pdf' and save it no 'output.pdf'.
I tried with pdf-lib.js but i can't achieve what i want
const fs = require('fs');
const {
writeFileSync,
readFileSync
} = require("fs");
async function createPDF() {
const document = await PDFDocument.load(readFileSync("./f1.pdf-1675923413580.pdf"));
const courierBoldFont = await document.embedFont(StandardFonts.Courier);
const firstPage = document.getPage(0);
const {
width,
height
} = firstPage.getSize()
firstPage.drawText('This text was added with JavaScript!', {
x: 5,
y: height / 2 + 300,
size: 50,
font: courierBoldFont ,
color: rgb(0.95, 0.1, 0.1),
rotate: degrees(-45),
})
writeFileSync("./output.pdf", await document.save());
}
createPDF().catch((err) => console.log(err));
Goal
input.pdf + watermark = input.pdf done on itself
I'm using TypeScript + Node.js + the pdfkit library to create PDFs and verify that they're consistent.
However, when just creating the most basic PDF, consistency already fails. Here's my test.
import {readFileSync, createWriteStream} from "fs";
const PDFDocument = require('pdfkit');
const assert = require('assert').strict;
const fileName = '/tmp/tmp.pdf'
async function makeSimplePDF() {
return new Promise(resolve => {
const stream = createWriteStream(fileName);
const doc = new PDFDocument();
doc.pipe(stream);
doc.end();
stream.on('finish', resolve);
})
}
describe('test that pdfs are consistent', () => {
it('simple pdf test.', async () => {
await makeSimplePDF();
const data: Buffer = readFileSync(fileName);
await makeSimplePDF(); // make PDF again
const data2: Buffer = readFileSync(fileName);
assert.deepStrictEqual(data, data2); // fails!
});
});
Most of the values in the two Buffers are identical but a few of them are not. What's happening here?
I believe that the bytes may be slightly different due to the creation time being factored into the Buffer somehow. When I used mockdate(https://www.npmjs.com/package/mockdate) to fix 'now', I ended up getting consistent Buffers.
I'm trying to create a gradient text on Node using canvas, I tested a code from https://www.w3schools.com/tags/canvas_filltext.asp below is an reimplementation and I received an error.
const fs = require('fs');
const {
createCanvas,
loadImage
} = require('canvas');
const text="Gradient";
const output="./image.png";
async function start(text,output){
let [width,height] = [1280,720];
const canvas = createCanvas(width, height);
let context = canvas.getContext("2d");
await drawGradientText(text);
await saveImage(output);
async function drawGradientText(text) {
return new Promise((resolve) => {
context.font = "30px Verdana";
// Create gradient
let gradient = context.createLinearGradient(0, 0, canvas.width, 0);
gradient.addColorStop("0", " magenta");
gradient.addColorStop("0.5", "blue");
gradient.addColorStop("1.0", "red");
// Fill with gradient
context.fillStyle = gradient;
context.fillText(text, 10, 90);
resolve();
})
}
function saveImage(output) {
return new Promise((resolve) => {
const buffer = canvas.toBuffer('image/png');
fs.writeFileSync(output, buffer);
resolve();
})
}
}
start(text,output);
THE CONSOLE SHOWS
TypeError: offset required
(node:18932) UnhandledPromiseRejectionWarning: Unhandled promise rejection. This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch()...
How could I create a textgradient on nodejs?
Unlike browsers, node-canvas is pretty strict regarding the type passed as offset to addColorStop( offset, color ).
They won't type cast it to float and will just throw the error you received, as can be seen here..
Arguably this is an interop issue, and they may want to fix it, but even in browsers, this offset should be a number, so pass numbers, not strings:
gradient.addColorStop(0, " magenta");
Based on the answer of Kaiido https://stackoverflow.com/users/3702797/kaiido
I notice that have to write the color in hexadecimal number format.
the code now looks like this:
const fs = require('fs');
const {
createCanvas,
loadImage
} = require('canvas');
const text="Gradient";
const output="./image.png";
async function start(text,output){
let [width,height] = [1280,720];
const canvas = createCanvas(width, height);
let context = canvas.getContext("2d");
await drawGradientText(text);
await saveImage(output);
async function drawGradientText(text) {
return new Promise((resolve) => {
context.font = "30px Verdana";
// Create gradient
let gradient = context.createLinearGradient(0, 0, canvas.width, 0);
gradient.addColorStop(0, "#004")
gradient.addColorStop(0.5, "#00fef3")
context.fillStyle = gradient;
context.fillText(text, width/2 - , height/2);
resolve();
})
}
function saveImage(output) {
return new Promise((resolve) => {
const buffer = canvas.toBuffer('image/png');
fs.writeFileSync(output, buffer);
resolve();
})
}
}
start(text,output);
My server receives a list base64 image eg [img1,img2....] at an endpoint.
I have function getPoses(img) that takes an image and returns an object back.
I want to be able to map the images to getPoses() and get the object back for each image in Parallel.
I have tried Async.each function but I notice it's not really Parallel. Probably because it runs in a single thread/core.
I have also tried to use cluster, so that I can utilize all the cores to run getPoses() in multiple cores. But I feel stuck because, It's not clear to me how the master can obtain the poses found by workers.
I would be super gratefully for any help from the community.
Aync code:
//END POINT
app.post('/postImage' ,async function(req,res){
const imgBase64List = req.body.imgBase64.split('next');
const imgBase64IndexList = []
imgBase64List.map((imgBase64,index)=>{
imgBase64IndexList.push([index,imgBase64])
})
async.each(imgBase64IndexList , getPoseDetection , function(err){
if(err){
console.log("Error occured when feteching one of the pose")
console.log(err)
}else{
console.log('Sending Poses!')
res.send(JSON.stringify(poseDetectionResults))
}
})
});//End of app.post and asyn function
const getPoseDetection = async (imgBase64Index, callback) => {
//console.log('start');
const img = new Image();
img.src = imgBase64Index[1];
const imgIndex = imgBase64Index[0]
const canvas = createCanvas(img.width, img.height);
const ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
const input = tf.browser.fromPixels(canvas);
const imageScaleFactor = 0.3;
const flipHorizontal = false;
const outputStride = 8;
const poses = await net.estimateMultiplePoses(input, {
flipHorizontal: false,
maxDetections: 2,
minPoseConfidence: 0.15,
minPartConfidence:0.1,
nmsRadius:20,
});
boundingBoxes = [] //reset the boundingBoxese
poses.forEach(pose => {
var box = posenet.getBoundingBoxPoints(pose.keypoints);
boundingBoxes.push(box)
});
poseDetectionResults[imgIndex] = {detectionList:poses}
//console.log(poses)
}
Thank You
I am using fill-pdf npm module for filling template pdf's and it creates new file which is read from the disk and returned as buffer to callback. I have two files for which i do the same operation. I want to combine the two buffers there by to form a single pdf file which i can send back to the client. I tried different methods of buffer concatenation. The buffer can be concatenated using Buffer.concat, like,
var newBuffer = Buffer.concat([result_pdf.output, result_pdf_new.output]);
The size of new buffer is also the sum of the size of the input buffers. But still when the newBuffer is sent to client as response, it shows only the file mentioned last in the array.
res.type("application/pdf");
return res.send(buffer);
Any idea ?
As mentioned by #MechaCode, the creator has ended support for HummusJS.
So I would like to give you 2 solutions.
Using node-pdftk npm module
The Following sample code uses node-pdftk npm module to combine
two pdf buffers seamlessly.
const pdftk = require('node-pdftk');
var pdfBuffer1 = fs.readFileSync("./pdf1.pdf");
var pdfBuffer2 = fs.readFileSync("./pdf2.pdf");
pdftk
.input([pdfBuffer1, pdfBuffer2])
.output()
.then(buf => {
let path = 'merged.pdf';
fs.open(path, 'w', function (err, fd) {
fs.write(fd, buf, 0, buf.length, null, function (err) {
fs.close(fd, function () {
console.log('wrote the file successfully');
});
});
});
});
The requirement for node-pdftk npm module is you need to install the
PDFtk library. Some of you may find this overhead / tedious. So I have another solution using pdf-lib library.
Using pdf-lib npm module
const PDFDocument = require('pdf-lib').PDFDocument
var pdfBuffer1 = fs.readFileSync("./pdf1.pdf");
var pdfBuffer2 = fs.readFileSync("./pdf2.pdf");
var pdfsToMerge = [pdfBuffer1, pdfBuffer2]
const mergedPdf = await PDFDocument.create();
for (const pdfBytes of pdfsToMerge) {
const pdf = await PDFDocument.load(pdfBytes);
const copiedPages = await mergedPdf.copyPages(pdf, pdf.getPageIndices());
copiedPages.forEach((page) => {
mergedPdf.addPage(page);
});
}
const buf = await mergedPdf.save(); // Uint8Array
let path = 'merged.pdf';
fs.open(path, 'w', function (err, fd) {
fs.write(fd, buf, 0, buf.length, null, function (err) {
fs.close(fd, function () {
console.log('wrote the file successfully');
});
});
});
Personally I prefer to use pdf-lib npm module.
HummusJS supports combining PDFs using its appendPDFPagesFromPDF method
Example using streams to work with buffers:
const hummus = require('hummus');
const memoryStreams = require('memory-streams');
/**
* Concatenate two PDFs in Buffers
* #param {Buffer} firstBuffer
* #param {Buffer} secondBuffer
* #returns {Buffer} - a Buffer containing the concactenated PDFs
*/
const combinePDFBuffers = (firstBuffer, secondBuffer) => {
var outStream = new memoryStreams.WritableStream();
try {
var firstPDFStream = new hummus.PDFRStreamForBuffer(firstBuffer);
var secondPDFStream = new hummus.PDFRStreamForBuffer(secondBuffer);
var pdfWriter = hummus.createWriterToModify(firstPDFStream, new hummus.PDFStreamForResponse(outStream));
pdfWriter.appendPDFPagesFromPDF(secondPDFStream);
pdfWriter.end();
var newBuffer = outStream.toBuffer();
outStream.end();
return newBuffer;
}
catch(e){
outStream.end();
throw new Error('Error during PDF combination: ' + e.message);
}
};
combinePDFBuffers(PDFBuffer1, PDFBuffer2);
Here's what we use in our Express server to merge a list of PDF blobs.
const { PDFRStreamForBuffer, createWriterToModify, PDFStreamForResponse } = require('hummus');
const { WritableStream } = require('memory-streams');
// Merge the pages of the pdfBlobs (Javascript buffers) into a single PDF blob
const mergePdfs = pdfBlobs => {
if (pdfBlobs.length === 0) throw new Error('mergePdfs called with empty list of PDF blobs');
// This optimization is not necessary, but it avoids the churn down below
if (pdfBlobs.length === 1) return pdfBlobs[0];
// Adapted from: https://stackoverflow.com/questions/36766234/nodejs-merge-two-pdf-files-into-one-using-the-buffer-obtained-by-reading-them?answertab=active#tab-top
// Hummus is useful, but with poor interfaces -- E.g. createWriterToModify shouldn't require any PDF stream
// And Hummus has many Issues: https://github.com/galkahana/HummusJS/issues
const [firstPdfRStream, ...restPdfRStreams] = pdfBlobs.map(pdfBlob => new PDFRStreamForBuffer(pdfBlob));
const outStream = new WritableStream();
const pdfWriter = createWriterToModify(firstPdfRStream, new PDFStreamForResponse(outStream));
restPdfRStreams.forEach(pdfRStream => pdfWriter.appendPDFPagesFromPDF(pdfRStream));
pdfWriter.end();
outStream.end();
return outStream.toBuffer();
};
module.exports = exports = {
mergePdfs,
};