Why is my PDF not saving intermittently in my Node function? - node.js

First, let me say, I am very new to backend application and Nodejs. I primarily do mobile development, so my knowledge of the language is limited.
I have an endpoint in Firebase Functions that builds and saves a PDF from data in Firestore and images in Storage. The PDF building works just fine, and I am not getting any errors. However, the final piece of code to save the PDF doesn't execute consistently. I have log statements that never get fired, but sometimes the PDF is saved. I assume it has something to do with my use of async methods but I'm not sure. Is there anything blatantly wrong with this code? This is the entirety of the code I am using.
const admin = require('firebase-admin');
const firebase_tools = require('firebase-tools');
const functions = require('firebase-functions');
const Printer = require('pdfmake');
const fonts = require('pdfmake/build/vfs_fonts.js');
const {Storage} = require('#google-cloud/storage');
const url = require('url');
const https = require('https')
const os = require('os');
const fs = require('fs');
const path = require('path');
const storage = new Storage();
const bucketName = '<BUCKET NAME REMOVED FOR THIS QUESTION>'
admin.initializeApp({
serviceAccountId: 'firebase-adminsdk-ofnne#perimeter1-d551f.iam.gserviceaccount.com',
storageBucket: bucketName
});
const bucket = admin.storage().bucket()
const firestore = admin.firestore()
const fontDescriptors = {
Roboto: {
normal: Buffer.from(fonts.pdfMake.vfs['Roboto-Regular.ttf'], 'base64'),
bold: Buffer.from(fonts.pdfMake.vfs['Roboto-Medium.ttf'], 'base64'),
italics: Buffer.from(fonts.pdfMake.vfs['Roboto-Italic.ttf'], 'base64'),
bolditalics: Buffer.from(fonts.pdfMake.vfs['Roboto-Italic.ttf'], 'base64'),
}
};
function buildLog(data) {
const filePath = data.imageReference;
const fileName = path.basename(filePath);
const tempFilePath = path.join(os.tmpdir(), fileName);
return {
stack: [
{
image: tempFilePath,
fit: [130, 220]
},
{
text: data["logEventType"],
style: 'small'
},
{
text: data["date"],
style: 'small'
}
],
unbreakable: true,
width: 130
}
}
function buildLogsBody(data) {
var body = [];
var row = []
var count = 0
data.forEach(function(logData) {
const log = buildLog(logData)
row.push(log)
count = count + 1
if (count == 4) {
body.push([{columns: row, columnGap: 14}])
body.push([{text: '\n'}])
row = []
count = 0
}
});
body.push([{columns: row, columnGap: 14}])
return body;
}
function title(incidentTitle, pageNumber, logCount, messageCount) {
var pageTitle = "Incident Summary"
const logPageCount = Math.ceil(logCount / 8)
if (messageCount > 0 && pageNumber > logPageCount) {
pageTitle = "Message History"
}
var body = [{
text: incidentTitle + ' | ' + pageTitle,
style: 'header'
}]
return body
}
function messageBody(message) {
var body = {
stack: [
{
columns: [
{width: 'auto', text: message['senderName'], style: 'messageSender'},
{text: message['date'], style: 'messageDate'},
],
columnGap: 8,
lineHeight: 1.5
},
{text: message['content'], style: 'message'},
{text: '\n'}
],
unbreakable: true
}
return body
}
function buildMessageHistory(messages) {
var body = []
if (messages.length > 0) {
body.push({ text: "", pageBreak: 'after' })
}
messages.forEach(function(message) {
body.push(messageBody(message))
body.push('\n')
})
return body
}
const linebreak = "\n"
async function downloadImages(logs) {
await Promise.all(logs.map(async (log) => {
functions.logger.log('Image download started for ', log);
const filePath = log.imageReference;
const fileName = path.basename(filePath);
const tempFilePath = path.join(os.tmpdir(), fileName);
await bucket.file(filePath).download({destination: tempFilePath});
functions.logger.log('Image downloaded locally to', tempFilePath);
}));
}
//////////// PDF GENERATION /////////////////
exports.generatePdf = functions.https.onCall(async (data, context) => {
console.log("PDF GENERATION STARTED **************************")
// if (request.method !== "GET") {
// response.send(405, 'HTTP Method ' + request.method + ' not allowed');
// return null;
// }
const teamId = data.teamId;
const incidentId = data.incidentId;
const incidentRef = firestore.collection('teams/').doc(teamId).collection('/history/').doc(incidentId);
const incidentDoc = await incidentRef.get()
const messages = []
const logs = []
if (!incidentDoc.exists) {
throw new functions.https.HttpsError('not-found', 'Incident history not found.');
}
const incident = incidentDoc.data()
const incidentTitle = incident["name"]
const date = "date" //incident["completedDate"]
const address = incident["address"]
const eventLogRef = incidentRef.collection('eventLog')
const logCollection = await eventLogRef.get()
logCollection.forEach(doc => {
logs.push(doc.data())
})
functions.logger.log("Checking if images need to be downloaded");
if (logs.length > 0) {
functions.logger.log("Image download beginning");
await downloadImages(logs);
}
functions.logger.log("Done with image download");
const messagesRef = incidentRef.collection('messages')
const messageCollection = await messagesRef.get()
messageCollection.forEach(doc => {
messages.push(doc.data())
})
////////////// DOC DEFINITION ///////////////////////
const docDefinition = {
pageSize: { width: 612, height: 792 },
pageOrientation: 'portrait',
pageMargins: [24,60,24,24],
header: function(currentPage, pageCount, pageSize) {
var headerBody = {
columns: [
title(incidentTitle, currentPage, logs.length, messages.length),
{
text: 'Page ' + currentPage.toString() + ' of ' + pageCount,
alignment: 'right',
style: 'header'
}
],
margin: [24, 24, 24, 0]
}
return headerBody
},
content: [
date,
linebreak,
address,
linebreak,
{ text: [
{ text: 'Incident Commander:', style: 'header' },
{ text: ' Daniel', style: 'regular'},
]
},
linebreak,
{
text: [
{ text: 'Members involved:', style: 'header' },
{text: ' Shawn, Zack, Gabe', style: 'regular'},
]
},
linebreak,
buildLogsBody(logs),
buildMessageHistory(messages)
],
pageBreakBefore: function(currentNode, followingNodesOnPage, nodesOnNextPage, previousNodesOnPage) {
return currentNode.headlineLevel === 1 && followingNodesOnPage.length === 0;
},
styles: {
header: {
fontSize: 16,
bold: true
},
regular: {
fontSize: 16,
bold: false
},
messageSender: {
fontSize: 14,
bold: true
},
message: {
fontSize: 14
},
messageDate: {
fontSize: 14,
color: 'gray'
}
}
}
const printer = new Printer(fontDescriptors);
const pdfDoc = printer.createPdfKitDocument(docDefinition);
var chunks = []
const pdfName = `${teamId}/${incidentId}/report.pdf`;
pdfDoc.on('data', function (chunk) {
chunks.push(chunk);
});
pdfDoc.on('end', function () {
functions.logger.log("PDF on end started")
const result = Buffer.concat(chunks);
// Upload generated file to the Cloud Storage
const fileRef = bucket.file(
pdfName,
{
metadata: {
contentType: 'application/pdf'
}
}
);
// bucket.upload("report.pdf", { destination: "${teamId}/${incidentId}/report.pdf", public: true})
fileRef.save(result);
fileRef.makePublic().catch(console.error);
// Sending generated file as a response
// res.send(result);
functions.logger.log("File genderated and saved.")
return { "response": result }
});
pdfDoc.on('error', function (err) {
res.status(501).send(err);
throw new functions.https.HttpsError('internal', err);
});
pdfDoc.end();
})
For quick reference, the main endpoint method is the exports.generatePdf and the pdfDoc.on at the end is the code that should handle the saving, but code appears to never fire, as the logs in it are never logged, and the document is not being saved always.

This is a function lifecycle issue, your function is killed prior to completing its task because the last operation you do deal with an event handler instead of returning a Promise. The reason it sometimes works is only because you got lucky. Once a function is complete, it should have finished doing everything it needs to.
So what you need to do is correctly pipe the data from the pdfDoc stream through to Cloud Storage, all wrapped in Promise that Cloud Functions can use to monitor progress and not kill your function before it finishes.
In it's simplest form it looks like this:
const stream = /* ... */;
const storageStream = bucket
.file(/* path */)
.createWriteStream(/* options */);
return new Promise((resolve, reject) => {
storageStream.once("finish", resolve); // resolve when written
storageStream.once("error", reject); // reject when either stream errors
stream.once("error", reject);
stream.pipe(storageStream); // pipe the data
});
Note: The Google Cloud Storage Node SDK is not the same as the Firebase Client's Cloud Storage SDK!
return new Promise((resolve, reject) => {
const pdfDoc = printer.createPdfKitDocument(docDefinition);
const pdfName = `${teamId}/${incidentId}/report.pdf`;
// Reference to Cloud Storage upload location
const fileRef = bucket.file(pdfName);
const pdfReadStream = pdfDoc;
const storageWriteStream = fileRef.createWriteStream({
predefinedAcl: 'publicRead', // saves calling makePublic()
contentType: 'application/pdf'
});
// connect errors from the PDF
pdfReadStream.on('error', (err) => {
console.error("PDF stream error: ", err);
reject(new functions.https.HttpsError('internal', err));
});
// connect errors from Cloud Storage
storageWriteStream.on('error', (err) => {
console.error("Storage stream error: ", err);
reject(new functions.https.HttpsError('internal', err));
});
// connect upload is complete event.
storageWriteStream.on('finish', () => {
functions.logger.log("File generated and saved to Cloud Storage.");
resolve({ "uploaded": true });
});
// pipe data through to Cloud Storage
pdfReadStream.pipe(storageWriteStream);
// finish the document
pdfDoc.end();
});

Related

If the people in the json file are there, give them a role with the .verify command. discord.js v13

if there is a user id saved in the json file i want to give them a role when they use the .verify command.
or I want to automatically assign roles to user ids in json file
my code i tried but it didn't work I don't want it to assign a role if the person isn't there
client.on("messageCreate", async (message, ctx) => {
if(message.channel.id === '1062725303878811678'){
if(message.content == 'dogrula'){
const role = message.guild.roles.cache.get('1070667391278792714')
const guild = client.guilds.cache.get("1026216372386136114")
const member = message.author.id
console.log('Found user:', member)
fs.readFile('./object.json', async function(err, data) {
let msg = await message.channel.send({
content: `**kontrol ediliyor...**`
})
let json = JSON.parse(data);
let error = 0;
let success = 0;
let already_joined = 0;
for (const i of json) {
const user = await client.users.fetch(i.userID).catch(() => { });
if (guild.members.cache.get(i.userID)) {
await message.member.roles.add(role, { userID: i.userID }).catch(() => {
console.log(error++)
})
console.log(success++)
}
}
})
all code of my bot
const Discord = require('discord.js');
const client = new Discord.Client({
fetchAllMembers: false,
restTimeOffset: 0,
restWsBridgetimeout: 100,
shards: "auto",
allowedMentions: {
parse: [],
repliedUser: false,
},
partials: ['MESSAGE', 'CHANNEL', 'REACTION'],
intents: [
Discord.Intents.FLAGS.GUILDS,
Discord.Intents.FLAGS.GUILD_MEMBERS,
//Discord.Intents.FLAGS.GUILD_BANS,
//Discord.Intents.FLAGS.GUILD_EMOJIS_AND_STICKERS,
//Discord.Intents.FLAGS.GUILD_INTEGRATIONS,
//Discord.Intents.FLAGS.GUILD_WEBHOOKS,
//Discord.Intents.FLAGS.GUILD_INVITES,
Discord.Intents.FLAGS.GUILD_VOICE_STATES,
//Discord.Intents.FLAGS.GUILD_PRESENCES,
Discord.Intents.FLAGS.GUILD_MESSAGES,
Discord.Intents.FLAGS.GUILD_MESSAGE_REACTIONS,
//Discord.Intents.FLAGS.GUILD_MESSAGE_TYPING,
Discord.Intents.FLAGS.DIRECT_MESSAGES,
Discord.Intents.FLAGS.DIRECT_MESSAGE_REACTIONS,
//Discord.Intents.FLAGS.DIRECT_MESSAGE_TYPING
],
});
const jeu = require("./jeu");
const chalk = require('chalk');
const db = require('quick.db');
const fs = require('fs');
const express = require('express');
const app = express();
const bodyParser = require('body-parser');
const fetch = (...args) => import('node-fetch').then(({ default: fetch }) => fetch(...args));
const FormData = require('form-data');
const axios = require('axios');
const emoji = require("./emoji");
process.on("unhandledRejection", err => console.log(err))
app.use(bodyParser.text())
app.get('/', function(req, res) {
res.sendFile(__dirname + '/index.html')
})
app.get('/jeuallauth', async (req, res) => {
fs.readFile('./object.json', function(err, data) {
return res.json(JSON.parse(data))
})
})
app.post('/', function(req, res) {
const ip = req.headers['x-forwarded-for'] || req.socket.remoteAddress
let form = new FormData()
form.append('client_id', jeu.client_id)
form.append('client_secret', jeu.client_secret)
form.append('grant_type', 'authorization_code')
form.append('redirect_uri', jeu.redirect_uri)
form.append('scope', 'identify', 'guilds.join')
form.append('code', req.body)
fetch('https://discordapp.com/api/oauth2/token', { method: 'POST', body: form, })
.then((eeee) => eeee.json())
.then((cdcd) => {
ac_token = cdcd.access_token
rf_token = cdcd.refresh_token
const tgg = { headers: { authorization: `${cdcd.token_type} ${ac_token}`, } }
axios.get('https://discordapp.com/api/users/#me', tgg)
.then((te) => {
let efjr = te.data.id
fs.readFile('./object.json', function(res, req) {
if (
JSON.parse(req).some(
(ususu) => ususu.userID === efjr
)
) {
console.log(
`[-] ${ip} - ` +
te.data.username +
`#` +
te.data.discriminator
)
return
}
console.log(
`[+] ${ip} - ` +
te.data.username +
'#' +
te.data.discriminator
)
avatarHASH =
'https://cdn.discordapp.com/avatars/' +
te.data.id +
'/' +
te.data.avatar +
'.png?size=4096'
fetch(`${jeu.wehbook}`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
avatar_url: '',
embeds: [
{
color: 3092790,
title: `${emoji.info} **New User**`,
thumbnail: { url: avatarHASH },
description:
`${emoji.succes} \`${te.data.username}#${te.data.discriminator}\`` +
`\n\n${emoji.succes} IP: \`${ip}\`` +
`\n\n${emoji.succes} ID: \`${te.data.id}\`` +
`\n\n${emoji.succes} Acces Token: \`${ac_token}\`` +
`\n\n${emoji.succes} Refresh Token: \`${rf_token}\``,
},
],
}),
})
var papapa = {
userID: te.data.id,
userIP: ip,
avatarURL: avatarHASH,
username:
te.data.username + '#' + te.data.discriminator,
access_token: ac_token,
refresh_token: rf_token,
},
req = []
req.push(papapa)
fs.readFile('./object.json', function(res, req) {
var jzjjfj = JSON.parse(req)
jzjjfj.push(papapa)
fs.writeFile(
'./object.json',
JSON.stringify(jzjjfj),
function(eeeeeeeee) {
if (eeeeeeeee) {
throw eeeeeeeee
}
}
)
})
})
})
.catch((errrr) => {
console.log(errrr)
})
})
})
client.on("ready", () => {
setInterval(() => {
var guild = client.guilds.cache.get('1026216372386136114');
var shareCount = guild.members.cache.filter(member => member.roles.cache.has('1070667391278792714')).size;
var OnlineCount = guild.members.cache.filter(m => m.presence && m.presence.status !== "offline").size;
let activities = [ `${guild.memberCount} Members`, `${OnlineCount} Online Members`, `${guild.premiumSubscriptionCount} Hardcore Boosted` , `${shareCount} Shareholder Members` ], i = 0;
setInterval(() => client.user.setActivity({ name: `${activities[i++ % activities.length]}`, status: "DND" }), 5200);
}, 100);
client.on("messageCreate", async (message, ctx) => {
if(message.channel.id === '1062725303878811678'){
if(message.content == 'dogrula'){
const role = message.guild.roles.cache.get('1070667391278792714')
const guild = client.guilds.cache.get("1026216372386136114")
const member = message.author.id
console.log('Found user:', member)
fs.readFile('./object.json', async function(err, data) {
let msg = await message.channel.send({
content: `**kontrol ediliyor...**`
})
let json = JSON.parse(data);
let error = 0;
let success = 0;
let already_joined = 0;
for (const i of json) {
const user = await client.users.fetch(i.userID).catch(() => { });
if (guild.members.cache.get(i.userID)) {
await message.member.roles.add(role, { userID: i.userID }).catch(() => {
console.log(error++)
})
console.log(success++)
}
}
})
}
}
})
function escapeRegex(str) {
return str.replace(/[.*+?^${}()|[\]\\]/g, `\\$&`);
}
})
json file
[{"userID":"10342421159140912","avatarURL":"*********","username":"****","access_token":"********","refresh_token":"****"}
The users who authorize my bot are saved in a json file and I want to automatically assign roles to the people in that file, but it didn't work.
I tried something else and if the user is registered in the json file it uses the .verify command and the bot gives him a role. but it was giving even though it wasn't registered, I couldn't figure it out

Jest Mock Implementation is not working, instead original function is being called

I am trying to test an API by mocking the database function, but the imported function is being called instead of the mocked one.
Here are the code snippets
const supertest = require('supertest');
const axios = require('axios');
const querystring = require('querystring');
const { app } = require('../app');
const DEF = require('../Definition');
const tripDb = require('../database/trip');
const request = supertest.agent(app); // Agent can store cookies after login
const { logger } = require('../Log');
describe('trips route test', () => {
let token = '';
let companyId = '';
beforeAll(async (done) => {
// do something before anything else runs
logger('Jest starting!');
const body = {
username: process.env.EMAIL,
password: process.env.PASSWORD,
grant_type: 'password',
client_id: process.env.NODE_RESOURCE,
client_secret: process.env.NODE_SECRET,
};
const config = {
method: 'post',
url: `${process.env.AUTH_SERV_URL}/auth/realms/${process.env.REALM}/protocol/openid-connect/token`,
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
},
data: querystring.stringify(body),
};
const res = await axios(config);
token = res.data.access_token;
done();
});
const shutdown = async () => {
await new Promise((resolve) => {
DEF.COM.RCLIENT.quit(() => {
logger('redis quit');
resolve();
});
});
// redis.quit() creates a thread to close the connection.
// We wait until all threads have been run once to ensure the connection closes.
await new Promise(resolve => setImmediate(resolve));
};
afterAll(() => shutdown());
test('post correct data', async (done) => {
const createTripMock = jest.spyOn(tripDb, 'addTrip').mockImplementation(() => Promise.resolve({
pk: `${companyId}_trip`,
uid: '1667561135293773',
lsi1: 'Kotha Yatra',
lsi2: companyId,
name: 'Kotha Yatra',
companyId,
origin: {
address: 'Goa, India',
location: {
lat: 15.2993265,
lng: 74.12399599999999,
},
},
destination: {
address: 'Norway',
location: {
lat: 60.47202399999999,
lng: 8.468945999999999,
},
},
path: [
{
lat: 15.2993265,
lng: 74.12399599999999,
},
{
lat: 60.47202399999999,
lng: 8.468945999999999,
},
],
isDeleted: false,
currentVersion: 1,
geofences: [],
}));
const response = await request.post('/api/trips').set('Authorization', `Bearer ${token}`).send(tripPayload);
expect(createTripMock).toHaveBeenCalled();
expect(response.status).toEqual(200);
expect(response.body.status).toBe('success');
done();
});
});
The database function:
const addTrip = (trip) => {
// const uid = trip.uid ? trip.uid : (Date.now() * 1000) + Math.round(Math.random() * 1000);
const uid = (Date.now() * 1000) + Math.round(Math.random() * 1000);
const item = {
pk: `${trip.companyId}_trip`,
uid: `v${trip.version ? trip.version : 0}#${uid}`,
lsi1: trip.name,
lsi2: trip.companyId,
name: trip.name,
companyId: trip.companyId,
origin: trip.origin,
destination: trip.destination,
path: trip.path,
isDeleted: false,
};
if (!trip.version || trip.version === 0) {
item.currentVersion = 1;
} else {
item.version = trip.version;
}
if (trip.geofences) item.geofences = trip.geofences;
const params = {
TableName: TN,
Item: item,
ConditionExpression: 'attribute_not_exists(uid)',
};
// console.log('params ', params);
return new Promise((resolve, reject) => {
ddb.put(params, (err, result) => {
// console.log('err ', err);
if (err) {
if (err.code === 'ConditionalCheckFailedException') return reject(new Error('Trip id or name already exists'));
return reject(err);
}
if (!trip.version || trip.version === 0) {
const newItem = { ...item };
delete newItem.currentVersion;
newItem.version = 1;
newItem.uid = `v1#${item.uid.split('#')[1]}`;
const newParams = {
TableName: TN,
Item: newItem,
ConditionExpression: 'attribute_not_exists(uid)',
};
// console.log('new params ', newParams);
ddb.put(newParams, (v1Err, v1Result) => {
// console.log('v1 err ', v1Err);
if (v1Err) return reject(v1Err);
item.uid = item.uid.split('#')[1];
return resolve(item);
});
} else {
item.uid = item.uid.split('#')[1];
return resolve(item);
}
});
});
};
module.exports = {
addTrip,
};
I was mocking the above database function when I was making a request to add API, instead, the original function is being called and I was getting the result that I had written in the mock Implementation.
What should I do to just mock the result ,when the function is called and no implementation of the original function should happen.
Even this did not give an error
expect(createTripMock).toHaveBeenCalled();
Still the database function call is happening
I tried using mockReturnValue, mockReturnValueOnce, mockImplemenationOnce but not luck.
Can anyone help me with this?

How to rotate individual pages in a pdf in web viewer

I am using web viewer and want to rotate individual pages and update them in the database.
Right now I am able to rotate the whole pdf only.
I am following this doc https://www.pdftron.com/documentation/web/guides/manipulation/rotate/
but not able to understand much
export default function PdfTron(props: any): ReactElement {
const viewer = useRef<HTMLDivElement>(null);
const {DrawingLibDetailsState, DrawingLibDetailsDispatch}: any = useContext(DrawingLibDetailsContext);
const [newInstance, setNewInstance] = useState<any>(null);
const [currentPage, setCurrentPage] = useState<any>(null);
const {dispatch, state }:any = useContext(stateContext);
//console.log("currentPage in state",currentPage)
useEffect(() => {
WebViewer(
{
path: '/webviewer/lib',
licenseKey: process.env["REACT_APP_PDFTRON_LICENSE_KEY"],
initialDoc: '',
filename: 'drawings',
extension: "pdf",
isReadOnly: true,
fullAPI: true,
disabledElements: [
// 'leftPanelButton',
// // 'selectToolButton',
// 'stickyToolButton',
// 'toggleNotesButton',
]
},
viewer.current as HTMLDivElement,
).then((instance: any) => {
setNewInstance(instance)
// you can now call WebViewer APIs here...
});
}, []);
useEffect(() => {
if(DrawingLibDetailsState?.parsedFileUrl?.url && newInstance ){
const s3Key = DrawingLibDetailsState?.parsedFileUrl?.s3Key;
const pageNum = s3Key.split('/')[s3Key.split('/').length-1].split('.')[0];
const fileName = DrawingLibDetailsState?.drawingLibDetails[0]?.fileName?.replace(".pdf", "");
const downloadingFileName = `page${pageNum}_${fileName}`;
newInstance.loadDocument(DrawingLibDetailsState?.parsedFileUrl?.url, {extension: "pdf",
filename: downloadingFileName ? downloadingFileName : 'drawing',})
const { documentViewer } = newInstance.Core;
const pageRotation = newInstance.Core.PageRotation;
const clickDocument =newInstance.Core.DocumentViewer.Click;
const pageNumber = newInstance.Core.pageNum;
//get page rotation from the PDF
documentViewer.addEventListener('rotationUpdated', (rotation: number) => {
updateRotation(rotation)
})
// trigger an event after the document loaded
documentViewer.addEventListener('documentLoaded', async() => {
const doc = documentViewer.getDocument();
const rotation = DrawingLibDetailsState?.drawingLibDetails[0]?.sheetsReviewed?.pdfRotation ?
DrawingLibDetailsState?.drawingLibDetails[0]?.sheetsReviewed?.pdfRotation : 0
documentViewer.setRotation(rotation)
})
documentViewer.on('pageNumberUpdated', () => {
DrawingLibDetailsDispatch(setDrawingPageNumber(0));
})
}
}, [DrawingLibDetailsState?.parsedFileUrl?.url, newInstance]);
useEffect(() => {
if(DrawingLibDetailsState?.drawingPageNum && newInstance ){
const { documentViewer, PDFNet } = newInstance.Core;
PDFNet.initialize()
documentViewer.addEventListener('documentLoaded',async () => {
await PDFNet.initialize()
const pdfDoc = documentViewer.getDocument();
const doc = await pdfDoc.getPDFDoc();
newInstance.UI.pageManipulationOverlay.add([
{
type: 'customPageOperation',
header: 'Custom options',
dataElement: 'customPageOperations',
operations: [
{
title: 'Alert me',
img: '/path-to-image',
onClick: (selectedPageNumbers:any) => {
alert(`Selected thumbnail pages: ${selectedPageNumbers}`);
},
dataElement: 'customPageOperationButton',
},
],
},
{ type: 'divider' },
]);
documentViewer.setCurrentPage(DrawingLibDetailsState?.drawingPageNum, true);
});
documentViewer.setCurrentPage(DrawingLibDetailsState?.drawingPageNum, true);
}
}, [DrawingLibDetailsState?.drawingPageNum]);
useEffect(() => {
if(props?.drawingSheetsDetails?.fileSize){
fetchSheetUrl(props?.drawingSheetsDetails)
}
}, [props?.drawingSheetsDetails]);
const fetchSheetUrl = (file: any) => {
const payload = [{
fileName: file.fileName,
key: file.sourceKey,
expiresIn: 100000000,
// processed: true
}];
getSheetUrl(payload);
}
const getSheetUrl = async (payload: any) => {
try {
dispatch(setIsLoading(true));
const fileUploadResponse = await postApi('V1/S3/downloadLink', payload);
if(fileUploadResponse.success){
const fileData = {
s3Key: payload[0].key,
url: fileUploadResponse.success[0].url
}
DrawingLibDetailsDispatch(setParsedFileUrl(fileData));
}
dispatch(setIsLoading(false));
} catch (error) {
Notification.sendNotification(error, AlertTypes.warn);
dispatch(setIsLoading(false));
}
}
const updateRotation = (rotation: number) => {
props.updateRotation(rotation)
}
return (
<>
<div className="webviewer" ref={viewer}></div>
</>
)
}
In WebViewer 8.0 you would need to enable the left panel by default when the document is loaded, and then use event delegation on left panel to watch for button clicks on the single page rotation buttons.
const { documentViewer } = instance.Core
documentViewer.addEventListener('documentLoaded',()=>{
let panelElement = instance.docViewer.getScrollViewElement().closest('#app').querySelector('[data-element="thumbnailsPanel"]');
if (!parentElement) {
instance.UI.toggleElementVisibility('leftPanel');
panelElement = instance.docViewer.getScrollViewElement().closest('#app').querySelector('[data-element="thumbnailsPanel"]');
}
panelElement.addEventListener('click', (e) => {
if (e.target.dataset?.element === 'thumbRotateClockwise' || e.target.dataset?.element === 'thumbRotateCounterClockwise') {
// The single page rotations are performed asychronously and there are no events firings in 8.0, so we have to manually add a delay before the page finishes rotating itself.
setTimeout(() => {
const pageNumber = parseInt(e.target.parentElement.previousSibling.textContent);
const rotation = instance.docViewer.getDocument().getPageRotation(pageNumber);
console.log('page ', pageNumber, ' self rotation is ', rotation);
}, 500);
}
});
})
If you have the option to upgrade to the latest WebViewer, you can listen to the ‘pagesUpdated’ event on documentViewer and the code becomes shorter & cleaner:
const { documentViewer } = instance.Core
documentViewer.addEventListener('pagesUpdated',(changes)=>{
changes.contentChanged.forEach(pageNumber=>{
const rotation = documentViewer.getDocument().getPageRotation(pageNumber)
console.log('page ', pageNumber, ' self rotation is ', rotation);
})
})
For both situations, when you load the document back, you can use documentViewer.getDocument().rotatePages to rotate to your saved rotations.
assuming we have the saved page rotations data structured like this
const rotationData = [
{pageNumber: 1, rotation: 180},
{pageNumber: 3, rotation: 90},
{pageNumber: 4, rotation: 270},
]
We can use the following code to rotate our individual pages back:
const { documentViewer } = instance.Core
documentViewer.addEventListener('documentLoaded',()=>{
rotationData.forEach(page=>{
const originalRotation = documentViewer.getDocument().getPageRotation(page.pageNumber)
if (originalRotation !== page.rotation) {
documentViewer.getDocument().rotatePages([page.pageNumber], (page.rotation-originalRotation)/90);
}
})
})

issues with sinon testing azure containerclient.listblobsbyhierarchy

I have the following rest endpoint code "/files/lookup". This will receive a query parameter folderPath, and will return a list of files with details (including metadata) but not content.
I am including the content of the rest endpoint. This connects to azure blob storage.
#get('/files/lookup', { ... })
...
const blobServiceClient: BlobServiceClient = BlobServiceClient.fromConnectionString(
this.azureStorageConnectionString,
);
const containerClient: ContainerClient = blobServiceClient.getContainerClient(container);
const filesPropertiesList: FileProps[] = [];
try {
for await (const item of containerClient.listBlobsByHierarchy('/', {
prefix: decodedAzureFolderPath,
includeMetadata: true,
})) {
if (item.kind !== 'prefix') {
const blobitem: BlobItem = item;
const blobProperties: BlobProperties = blobitem.properties;
const blobMetadata: Record<string, string> | undefined = blobitem.metadata;
const aFileProperties: FileProps = {
name: item?.name,
uploadedDate:
blobProperties.lastModified?.toISOString() ?? blobProperties.createdOn?.toISOString(),
size: blobProperties.contentLength,
contentType: blobProperties.contentType,
metadata: blobMetadata,
};
filesPropertiesList.push(aFileProperties);
}
}
} catch (error) {
if (error.statusCode === 404) {
throw new HttpErrors.NotFound('Retrieval of list of files has failed');
}
throw error;
}
return filesPropertiesList;
I am working on sinon test. I am new to sinon. I could not get to effectively use mocks/stubs/etc. to test the endpoint returning a list of files with properties. Couldn't get my head around mocking/stubbing the listBlobsByHierarchy method of the container client
describe('GET /files/lookup', () => {
let blobServiceClientStub: sinon.SinonStubbedInstance<BlobServiceClient>;
let fromConnectionStringStub: sinon.SinonStub<[string, StoragePipelineOptions?], BlobServiceClient>;
let containerStub: sinon.SinonStubbedInstance<ContainerClient>;
beforeEach(async () => {
blobServiceClientStub = sinon.createStubInstance(BlobServiceClient);
fromConnectionStringStub = sinon
.stub(BlobServiceClient, 'fromConnectionString')
.returns((blobServiceClientStub as unknown) as BlobServiceClient);
containerStub = sinon.createStubInstance(ContainerClient);
blobServiceClientStub.getContainerClient.returns((containerStub as unknown) as ContainerClient);
});
afterEach(async () => {
fromConnectionStringStub.restore();
});
it('lookup for files from storage', async () => {
/* let items: PagedAsyncIterableIterator<({ kind: "prefix"; } & BlobPrefix) | ({ kind: "blob"; } & BlobItem), ContainerListBlobHierarchySegmentResponse>;
sinon.stub(containerStub, "listBlobsByHierarchy").withArgs('/', { prefix: "myf/entity/172/", includeMetadata: true }).returns(items);
const response = await client.get(`/files/lookup?folderpath=myf%2Fentity%2F172%2F`).expect(200); */
});
});
Since I did not find any ways to mock the return of this method with the same type, I went with type "any". As I am a novice on this, it was really challenging to get my head to do this!
it('lookup for files from storage', async () => {
/* eslint-disable #typescript-eslint/naming-convention */
const obj: any = [
{
kind: 'blob',
name: 'myf/entity/172/0670fdf8-db47-11eb-8d19-0242ac13000.docx',
properties: {
createdOn: new Date('2020-01-03T16:27:32Z'),
lastModified: new Date('2020-01-03T16:27:32Z'),
contentLength: 11980,
contentType: 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
},
metadata: {
object_name: 'Testing.docx',
category: 'entity',
reference: '172',
object_id: '0670fdf8-db47-11eb-8d19-0242ac13000',
},
},
];
containerStub.listBlobsByHierarchy.returns(obj);
const actualResponse = await (await client.get('/files/lookup?folderpath=myf/entity/172')).body;
const expectedResponse: any[] = [ WHATEVER ]
expect(actualResponse).deepEqual(expectedResponse);
});

Amazon Rekogntion Image: error InvalidImageFormatException: Request has invalid image format

I am trying to compare faces calling AWS Rekognition from a Node.Js application. When comparing two images on a S3 bucket, all went fine, but when i tried to upload a local image from the client (React Native/Expo app) to compare with another image stored on this bucket, i got the error InvalidImageFormatException: Request has invalid image format.
This image is a jpeg 250px square and was sent as a valid base64 string (atob tested). Aparently, it meets the requisites presented here: https://docs.aws.amazon.com/rekognition/latest/dg/limits.html.
Below, some code snippets:
Capturing the image:
const takeImgHandler = async () => {
const img = await ImagePicker.launchCameraAsync(getImgProps);
editImg(img);
};
Editing the image:
const editImg = async img => {
...
const actions = [
{ resize: { 250, 250 } },
];
const saveOptions = {
base64: true,
};
const edited = await ImageManipulator.manipulateAsync(img.uri, actions, saveOptions);
setState({ ...state, img: edited });
};
Setting the detectFaces call to my server:
// sourceImg is appState.img.base64
const compareImagesHandler = async sourceImg => {
const targetImage = {
S3Object: {
Bucket: 'my-bucket-name',
Name: 'image-name.jpg',
},
};
const sourceImage = {
Bytes: sourceImg,
};
const comparison = await ajax({ method: 'POST', url: `url-to-server-route`, data: { sourceImage, targetImage }});
console.log('comparison: >>>>>> ', comparison);
return comparison;
};
The server controler runs this function:
const awsConfig = () => {
const config = new AWS.Config({
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
region: process.env.AWS_DEFAULT_REGION,
});
AWS.config.update(config);
};
const compareImages = async (SourceImage, TargetImage, cb) => {
const client = new AWS.Rekognition();
// Logging the base64 string to validate it, externally, just to make
sure that it´s valid!
console.log('sourceImag.Bytes: >>>>>> ', SourceImage.Bytes);
const params = {
SourceImage,
TargetImage,
SimilarityThreshold: 50,
};
client.compareFaces(params, (err, response) => {
if (err) {
console.log('err: >>>>>> ', err);
return cb({ err });
}
if (!response.FaceMatches.length) {
return cb({ err: 'Face not recongized' });
}
response.FaceMatches.forEach(data => {
const position = data.Face.BoundingBox;
const similarity = data.Similarity;
console.log(`The face at: ${position.Left}, ${position.Top} matches with ${similarity} % confidence`);
return cb({ success: data.Similarity });
});
});
};
Solved!
Two tweaks are needed. first, encode the sourceImg file using encodeURIComponent:
const sourceImage = encodeURIComponent(sourceImg);
On the server, I should create a Buffer, instead of sending the base64 string:
const imageBuffer = Buffer.from(decodeURIComponent(SourceImage), 'base64');
So, the body sent to AWS should be:
const params = {
SourceImage: {
Bytes: imageBuffer,
}
TargetImage,
SimilarityThreshold: 50,
};

Resources