Button labels going wrong on pdf-lib capture using html2canvas - node.js

I am using pdf-lib to capture and html2canvas, but when the page is captured, Button labels, they go wrong. "Buy Again" becomes "BuyA Gain".
Also when I scroll the page, the page captured gets cut, when it should not because it is being captured on the basis of elementID
const onClickPrint = () => {
const domElement = document.getElementById(elementId);
var printButton = document.getElementById("printIcon");
printButton.style.visibility = "hidden";
var downloadButton;
try {
downloadButton = document.getElementById("downloadIcon");
downloadButton.style.visibility = "hidden";
} catch (e) { }
html2canvas(domElement).then((canvas) => {
(async () => {
const pdfDoc = await PDFDocument.create();
const imagePDF = await pdfDoc.embedPng(canvas.toDataURL("image/PNG"));
let height = imagePDF.height;
let width = imagePDF.width;
const page = pdfDoc.addPage([A4_PAGE.width, A4_PAGE.height]);
let widthRatio = A4_PAGE.width / width;
let heightRatio = A4_PAGE.height / height;
let ratio = widthRatio > heightRatio ? heightRatio : widthRatio;
page.drawImage(imagePDF, {
x: A4_PAGE.width / 2 - (width * ratio) / 2,
y: A4_PAGE.height / 2 - (height * ratio) / 2,
width: width * ratio,
height: height * ratio,
});
const pdfBytes = await pdfDoc.save();
const blob = new Blob([pdfBytes], { type: "application/pdf" });
openPrintDialog(blob);
})();
});
printButton.style.visibility = "visible";
if (downloadButton != null) {
downloadButton.style.visibility = "visible";
}
};

Related

how to read json file and search with filter for common items in nodejs

I have JSON file contain games objects, I want to get top 5 games that have the highest total playtime between users.
I tried to get all objects by reading the file using file system in nodejs :
const queryGames = async () => {
let data = fs.readFileSync(path.resolve(__dirname, '../../games.json'))
let games = JSON.parse(data)
return games
}
/**
* Query for top games by play time
* #returns {Promise<QueryResult>}
*/
const selectTopByPlaytime = async () => {
}
this is the json file : https://jsoneditoronline.org/#left=cloud.3b82169327044c04b7207fa186aee85b&right=local.tiniqu
something like this should work.
const gamePlayData = require('./gamePlay.json').data
/**
* Query for games and time
* #returns {
'League of legends': 1650,
'World of warcraft': 2300,
'Dark Souls': 218,
'The Witcher 3: Wild Hunt': 987,
etc....
}
*/
const getGamePlayTimes = () => {
gamePlayTimes = {}
gamePlayData.forEach( (playData) => {
const gameName = playData.game
if(gamePlayTimes[gameName]) {
gamePlayTimes[gameName] += playData.playTime
}
else {
gamePlayTimes[gameName] = playData.playTime
}
})
return gamePlayTimes;
}
const getGamesAndTimesAsList = (playTimes) => {
let gamesWithTimeArr = [];
let i = 0;
for(let game in playTimes) {
let gameAndPlayTime = {game: "", playTime: 0};
gameAndPlayTime.game = game;
gameAndPlayTime.playTime = playTimes[game];
gamesWithTimeArr[i++] = gameAndPlayTime
}
return gamesWithTimeArr;
}
const reverseBubbleSort = (a, par) => {
let swapped;
do {
swapped = false;
for (var i = 0; i < a.length - 1; i++) {
if (a[i][par] < a[i + 1][par]) {
var temp = a[i];
a[i] = a[i + 1];
a[i + 1] = temp;
swapped = true;
}
}
} while (swapped);
return a;
}
sortedArr = reverseBubbleSort(getGamesAndTimesAsList( getGameAndPlayTimes() ) , 'playTime')
const top5 = sortedArr.slice(0, 5);
console.log(top5);

How do I change the style when feature is modifying and not stop the modify

I want to change the style when the feature is modifying, but once the feature's style has been setting, its modify would be stopped, how can I change the style and it still can modify?
here is the code:
this.modify.on(['modifystart', 'modifyend'], function (evt) {
target.style.cursor = evt.type === 'modifystart' ? 'grabbing' : 'pointer'
const currentFeature = evt.features.array_[0]
const oldStyle = currentFeature.getStyle()
console.log(oldStyle)
console.log(evt.type)
if (evt.type === 'modifystart') {
const moveImage = new Image()
const pinInfo = evt.features.array_[0].get('pinInfo')
moveImage.src = that.moveIcon
moveImage.width = 50
moveImage.height = 70
moveImage.onload = function () {
const iconCanvas = document.createElement('canvas')
iconCanvas.width = moveImage.width
iconCanvas.height = moveImage.height
const iconCanvasCtx = iconCanvas.getContext('2d')
// 背景アイコンを描画
iconCanvasCtx.drawImage(moveImage, 0, -25, 50, 70)
const style = new Style({
image: new Icon({
anchor: [0.5, 0.5],
img: iconCanvas,
imgSize: [moveImage.width, moveImage.height],
scale: that.imgSize / 30
})
})
// const arrayStyle = [style, oldStyle]
currentFeature.setStyle(style)
}
} else if (evt.type === 'modifyend') {
// currentFeature.setStyle(oldStyle)
}
})
Thanks a lot!

Adding image dynamically in public folder in reactjs

I am developing an face detection application,for that I need to collect the users image for reference to detect them later.i have successfully uploaded the image in MySQL databse.now I need upload the image in public folder in react to detect the image in camera.i stuck in uploading image in react public folder.help me out get rid of this problem..
This is the React code where image to be detected in the imgUrl variable
detect = async () => {
const videoTag = document.getElementById("videoTag");
const canvas = document.getElementById("myCanvas");
const displaySize = { width: videoTag.width, height: videoTag.height };
faceapi.matchDimensions(canvas, displaySize);
//setInterval starts here for continuous detection
time = setInterval(async () => {
let fullFaceDescriptions = await faceapi
.detectAllFaces(videoTag)
.withFaceLandmarks()
.withFaceExpressions()
.withFaceDescriptors();
const value = fullFaceDescriptions.length;
this.setState({ detection: value });
fullFaceDescriptions = faceapi.resizeResults(
fullFaceDescriptions,
displaySize
);
canvas.getContext("2d").clearRect(0, 0, canvas.width, canvas.height);
//Label Images
var dummy = ["praveen", "vikranth", "Gokul", "Rahul"];
const labels = nameArray1;
// const labels = ["praveen", "vikranth", "Gokul", "Rahul"];
if (no_of_times <= 0) {
if (no_of_times === 0) {
labeledFaceDescriptors = await Promise.all(
labels.map(async (label) => {
// fetch image data from urls and convert blob to HTMLImage element
const imgUrl = `/img/${label}.png`; // for testing purpose
// const imgUrl = testImage;
const img = await faceapi.fetchImage(imgUrl);
const fullFaceDescription = await faceapi
.detectSingleFace(img)
.withFaceLandmarks()
.withFaceExpressions()
.withFaceDescriptor();
if (!fullFaceDescription) {
throw new Error(`no faces detected for ${label}`);
}
const faceDescriptors = [fullFaceDescription.descriptor];
return new faceapi.LabeledFaceDescriptors(label, faceDescriptors);
})
);
// console.log(no_of_times);
}
}
const maxDescriptorDistance = 0.7;
no_of_times++;
const faceMatcher = new faceapi.FaceMatcher(
labeledFaceDescriptors,
maxDescriptorDistance
);
const results = fullFaceDescriptions.map((fd) =>
faceMatcher.findBestMatch(fd.descriptor)
);
result = [];
results.forEach((bestMatch, i) => {
const box = fullFaceDescriptions[i].detection.box;
// console.log(box)
const text = bestMatch.toString(); //this for basMatch name detection
var str = "";
//This is for removing names confidence to map value without duplicate
var val = text.replace(/[0-9]/g, "");
for (let i of val) {
if (i !== " ") {
str += i;
} else {
break;
}
}
if (result.includes(str) === false) result.push(str);
const drawBox = new faceapi.draw.DrawBox(box, { label: text });
drawBox.draw(canvas);
faceapi.draw.drawFaceExpressions(canvas, fullFaceDescriptions, 0.85);
});
for (let i = 0; i < fullFaceDescriptions.length; i++) {
const result1 = fullFaceDescriptions[i].expressions.asSortedArray()[i];
// console.log(result[i]);
// console.log(result1.expression);
this.test(result[i], result1.expression);
}
}, 100);
In the above code i am manually putting image in public folder,this need to be done dynamically when the user uploads image.
this is place i get the images in base64 from nodejs
axios.get("/image").then((res) => {
testImage = res.data;
// console.log("from image" + res.data);
imgback = <img src={`data:image/jpeg;base64,${res.data}`} />;
});
This is nodejs code for the get request from reactjs
app.get("/image", (req, res) => {
connection.query("SELECT * FROM images", (error, row, fields) => {
if (!!error) {
console.log("Error in the query");
} else {
console.log("successful query");
var buffer = new Buffer(row[0].image, "binary");
var bufferBase64 = buffer.toString("base64");
res.send(bufferBase64);
}
});
});
my goal is, in the imgUrl variable in react code i need to specify the image folder for that i need to dynamically add image in folder.
Or is there is any other way to directly give image array in the imgUrl variable.please help me to sort out this problem.

Electron desktopCapturer to Web API frequency all 0s

==== SOLVED ====
To be 100% honest I'm not sure what I did, but I have a fully working audio bar visualizer based on desktop audio. I went back to the original codepen (link below) I started working off of and edited everything as needed to accept a media stream, and it works.
Full Code
const {desktopCapturer} = require('electron')
desktopCapturer.getSources({types: ['window', 'screen']}, (error, sources) => {
if (error) throw error
for (let i = 0; i < sources.length; ++i) {
if (sources[i].name === 'Entire screen') {
navigator.mediaDevices.getUserMedia({
audio: { mandatory : { chromeMediaSource: 'desktop' }},
video: { mandatory : { chromeMediaSource: 'desktop' }}
})
.then((stream) => handleStream(stream))
return
}
}
})
function handleStream (stream)
{
const context = new AudioContext()
let src = context.createMediaStreamSource(stream)
let analyser = context.createAnalyser()
let canvas = document.getElementById("canvas")
canvas.width = window.innerWidth
canvas.height = window.innerHeight
let ctx = canvas.getContext("2d")
src.connect(analyser)
analyser.fftSize = 256
let bufferLength = analyser.frequencyBinCount
let dataArray = new Uint8Array(bufferLength)
let WIDTH = canvas.width
let HEIGHT = canvas.height
let barWidth = (WIDTH / bufferLength) * 2.5
let barHeight
let x = 0
function renderFrame()
{
requestAnimationFrame(renderFrame)
x = 0
analyser.getByteFrequencyData(dataArray)
ctx.fillStyle = "#000"
ctx.fillRect(0, 0, WIDTH, HEIGHT)
for (let i = 0; i < bufferLength; i++)
{
barHeight = dataArray[i]
let r = barHeight + (25 * (i / bufferLength))
var g = 250 * (i/bufferLength)
var b = 50
ctx.fillStyle = `rgb(${r}, ${g}, ${b})`
ctx.fillRect(x, HEIGHT - barHeight, barWidth, barHeight)
x += barWidth + 1
}
}
renderFrame()
}
Codepen I used as a starting point
https://codepen.io/nfj525/pen/rVBaab
Original Post
I'm working on setting up a desktop visualizer that will graph the users desktop audio.
desktopCapture appears to be grabbing the media as sending it to a video tag displays the stream, along with an echo of the media. Since I only need the audio, I'm setting the mediaStraem to an AudioContext MediaStreamSource, which also appears to be working as I get an audio echo if I connect the analyser to the ctx destination. The issue I'm running into is when I try to get the frequency data its returning an array of only 0s. Below is my current code
const {desktopCapturer} = require('electron')
desktopCapturer.getSources({types: ['window', 'screen']}, (error, sources) => {
if (error) throw error
for (let i = 0; i < sources.length; ++i) {
if (sources[i].name === 'Entire screen') {
navigator.mediaDevices.getUserMedia({
audio: {
mandatory : {
chromeMediaSource: 'desktop'
}
},
video: {
mandatory: {
chromeMediaSource: 'desktop',
}
}
})
.then((stream) => handleStream(stream))
return
}
}
})
function handleStream (stream) {
let audioCtx = new AudioContext();
let source = audioCtx.createMediaStreamSource(stream);
let analyser = audioCtx.createAnalyser()
//uncommenting results in echo, but still all 0s
//analyser.connect(audioCtx.destination)
analyser.fftSize = 256
let bufferLength = analyser.frequencyBinCount
let dataArray = new Uint8Array(bufferLength)
console.log(dataArray)
}
===== EDIT =====
I've been able to get this working; kind of but am still running into a slight issue.
1) I had connect the source and the analyzer with
source.connect(analyser)
2) Had to fill the dataArray with time domain with
getByteTimeDomainData
3) Outstanding issue is when there is no media playing the dataArray is filled with values of 126, 127, & 128 making the bars "dance" at almost full height.
4) The FPS also seems extremely fast, but have some plans to fix that
Current somewhat working code :
const {desktopCapturer} = require('electron')
desktopCapturer.getSources({types: ['window', 'screen']}, (error, sources) => {
if (error) throw error
for (let i = 0; i < sources.length; ++i) {
if (sources[i].name === 'Entire screen') {
navigator.mediaDevices.getUserMedia({
audio: {
mandatory : {
chromeMediaSource: 'desktop'
}
},
video: {
mandatory: {
chromeMediaSource: 'desktop',
}
}
})
.then((stream) => handleStream(stream))
return
}
}
})
function handleStream (stream) {
const audioCtx = new AudioContext()
let source = audioCtx.createMediaStreamSource(stream)
let analyser = audioCtx.createAnalyser()
source.connect(analyser) //Had To Connect Source To Analyser
analyser.fftSize = 128
let bufferLength = analyser.frequencyBinCount
let dataArray = new Uint8Array(bufferLength)
let canvas = document.getElementById("canvas")
canvas.width = window.innerWidth
canvas.height = window.innerHeight
let canvasCtx = canvas.getContext("2d")
let WIDTH = canvas.width;
let HEIGHT = canvas.height;
let barWidth = (WIDTH / bufferLength);
let barHeight;
let x = 0;
function draw() {
var drawVisual = requestAnimationFrame(draw)
analyser.getByteTimeDomainData(dataArray) //added to the draw to fill the dataArray
canvasCtx.fillStyle = "#000";
canvasCtx.fillRect(0, 0, WIDTH, HEIGHT);
var x = 0;
for (let i = 0; i < bufferLength; i++) {
barHeight = dataArray[i];
let r = 50
let g = 250
let b = 50
canvasCtx.fillStyle = `rgb(${r}, ${g}, ${b})`
canvasCtx.fillRect(x, HEIGHT - barHeight, barWidth, barHeight);
x += barWidth + 1;
}
}
draw()
}

Queueing Asynchronous tasks

I'm trying to create a script that takes a list of URL's, goes to the site and takes a screenshot.
I have managed to get this to work with puppeteer. However the problem I've had is when I have say 50 URLs in the list, it will attempt to launch puppet sessions for all of them at once, which means that most time out before the site loads and it can take a screenshot.
I've found I can successfully run 10 at once, so I'm wanting to set up a queueing system to do this.
parser.on('readable', function(){
while(record = parser.read()){
counter +=1;
console.log(record.URL);
(async (url = record.URL, name = record.shortURL, counter1 = counter) => {
const browser = await puppeteer.launch( {defaultViewport: {width: 1024, height:768} } );
const page = await browser.newPage();
await page.goto(url);
title = await page.title();
domainRegex = /^(?:https?:\/\/)?(?:[^#\n]+#)?(?:www\.)?([^:\/\n?]+)/img;
match = domainRegex.exec(url);
width = 1024;//await page.viewport().width;
height = 1000;//await page.viewport.height();
await page.screenshot({path: "Screenshots/"+counter1+". "+match[1] + "- " +title.replace(/[\W_]+/g,"")+".jpg", clip : {x:0, y:0, width: width, height: height}});
await browser.close();
})();
}
});
The code below will initially launch 10 sessions. Once each session finishes, it will dequeue the next record and launch another one, until there are no more records remaining. This will ensure that a max of 10 will be running at the same time.
parser.on('readable', async () => {
const maxNumberOfSessions = 10;
let counter = 0;
await Promise.all(Array.from({length: maxNumberOfSessions}, dequeueRecord));
console.log("All records have been processed.");
function dequeueRecord() {
const nextRecord = parser.read();
if(nextRecord) return processRecord(nextRecord).then(dequeueRecord);
}
async function processRecord(record) {
const number = ++counter;
console.log("Processing record #" + number + ": " + record.URL);
const browser = await puppeteer.launch({defaultViewport: {width: 1024, height: 768}});
const page = await browser.newPage();
await page.goto(record.URL);
const title = await page.title();
const domainRegex = /^(?:https?:\/\/)?(?:[^#\n]+#)?(?:www\.)?([^:\/\n?]+)/img;
const match = domainRegex.exec(record.URL);
const width = 1024; // await page.viewport().width;
const height = 1000; // await page.viewport().height;
await page.screenshot({path: "Screenshots/" + number + ". " + match[1] + "- " + title.replace(/[\W_]+/g, "") + ".jpg", clip: {x: 0, y: 0, width, height}});
await browser.close();
}
});
If you want to run all of them serially, you can turn this into a async function and await it. This way, it will run one by one.
// let's separate it for readability
async function getRecord(record, counter) {
const url = record.URL,
name = record.shortURL,
counter1 = counter;
const browser = await puppeteer.launch({
defaultViewport: {
width: 1024,
height: 768
}
});
const page = await browser.newPage();
await page.goto(url);
title = await page.title();
domainRegex = /^(?:https?:\/\/)?(?:[^#\n]+#)?(?:www\.)?([^:\/\n?]+)/img;
match = domainRegex.exec(url);
width = 1024; //await page.viewport().width;
height = 1000; //await page.viewport.height();
await page.screenshot({
path: "Screenshots/" + counter1 + ". " + match[1] + "- " + title.replace(/[\W_]+/g, "") + ".jpg",
clip: {
x: 0,
y: 0,
width: width,
height: height
}
});
await browser.close();
}
parser.on('readable', async function() { // <-- here we make it async
while (record = parser.read()) {
counter += 1;
console.log(record.URL);
await getRecord(record, counter) // <-- and we await each call
}
});
There are other ways like Promise.map and for..of, but let's keep this simpler for now.
You might want to take a look at puppeteer-cluster (disclaimer: I'm the author).
You can do it like this:
(async () => {
// create a cluster that handles 10 parallel browsers
const cluster = await Cluster.launch({
concurrency: Cluster.CONCURRENCY_BROWSER,
maxConcurrency: 10,
});
// define the task
await cluster.task(async ({ page, data: { counter, record} }) => {
const url = record.URL;
await page.goto(url);
title = await page.title();
domainRegex = /^(?:https?:\/\/)?(?:[^#\n]+#)?(?:www\.)?([^:\/\n?]+)/img;
match = domainRegex.exec(url);
width = 1024;//await page.viewport().width;
height = 1000;//await page.viewport.height();
await page.screenshot({path: "Screenshots/"+counter+". "+match[1] + "- " +title.replace(/[\W_]+/g,"")+".jpg", clip : {x:0, y:0, width: width, height: height}});
});
// queue your jobs
parser.on('readable', function () {
while (record = parser.read()) {
counter += 1;
cluster.queue({ counter, record });
}
});
})();
This will handle 10 parallel browser instances and will also take care of browser crashes and error handling.
If you want to run a set of promises in sequence you could make use of Promise.mapSeries from Bluebird package. I understand that this would mean adding an additional package, but it's simple and does not need you to build a queuing system.
http://bluebirdjs.com/docs/api/promise.mapseries.html

Resources