Simple API endpoint in NodeJs - node.js

I'm trying to write a very simple API endpoint for a Udacity project I'm working on. When I try postman against the below endpoint, I get a promise rejection warning. I first attempted this using the 3 commented lines within this endpoint, but since it ran asynchronously, the file would not be ready before the delete function would run resulting in a file not found.
Any suggestions?
app.get( "/filteredimage", async ( req, res ) => {
var re = req.query.image_url;
if (!re){
return res.status(400).send(`id is required`);
}
var myfun = function (data, callback) {
var filteredpath = filterImageFromURL(data);
callback([filteredpath]);
};
myfun(re,deleteLocalFiles);
// let filteredpath = filterImageFromURL(re);
// res.sendFile(filteredpath);
// deleteLocalFiles([filteredpath]);
} );
Here are the util functions:
export async function filterImageFromURL(inputURL: string): Promise<string>{
return new Promise( async resolve => {
const photo = await Jimp.read(inputURL);
const outpath = '/tmp/filtered.'+Math.floor(Math.random() * 2000)+'.jpg';
await photo
.resize(256, 256) // resize
.quality(60) // set JPEG quality
.greyscale() // set greyscale
.write(__dirname+outpath, (img)=>{
resolve(__dirname+outpath);
});
});
}
// deleteLocalFiles
// helper function to delete files on the local disk
// useful to cleanup after tasks
// INPUTS
// files: Array<string> an array of absolute paths to files
export async function deleteLocalFiles(files:Array<string>){
for( let file of files) {
fs.unlinkSync(file);
}
}

Not much for TypeScript but I see a few issues:
One, you don't have to put a Promise in an async function because an async function is a Promise. Two, you're not awaiting filterImageFromURL() properly. And three, you should use try/catch blocks so that you don't get Unhandled Promise Rejection errors.
Forgive my stripping out of your TS.
app.get( "/filteredimage", async ( req, res ) => {
var myfun = async function (data, callback) {
try {
var filteredpath = await filterImageFromURL(data);
callback([filteredpath]);
} catch(err) {
console.error(err);
}
};
try {
var re = req.query.image_url;
if (!re) {
return res.status(400).send(`id is required`);
}
await myfun(re, deleteLocalFiles);
// let filteredpath = filterImageFromURL(re);
// res.sendFile(filteredpath);
// deleteLocalFiles([filteredpath]);
} catch(err) {
console.error(err);
}
});
export async function filterImageFromURL(inputUR) {
try {
const photo = await Jimp.read(inputURL);
const outpath = '/tmp/filtered.'+Math.floor(Math.random() * 2000)+'.jpg';
const img = photo
.resize(256, 256) // resize
.quality(60) // set JPEG quality
.greyscale() // set greyscale
.write(__dirname+outpath);
return img
} catch(err) {
console.error(err);
}
}

Related

BotFramework TypeError: Cannot perform 'get' on a proxy that has been revoked

I am trying to develop a MS Teams bot that sends content to students module(unit) wise. I have created 3 classes:
methods.js = Contains all the methods for sending texts, attachments etc.
teamBot.js = Captures a specific keyword from the users and based on that executes a function.
test.js = Connects the bot with Airtable and sends the content accordingly
I am facing Cannot perform 'get' on a proxy that has been revoked error. I figured it might be because of the context. I am passing context as a parameter, which I feel might not be the correct way, how can I achieve the result, and retain the context between files.
teamsBot.js
const test = require("./test");
class TeamsBot extends TeamsActivityHandler {
constructor() {
super();
// record the likeCount
this.likeCountObj = { likeCount: 0 };
this.onMessage(async (context, next) => {
console.log("Running with Message Activity.");
let txt = context.activity.text;
// const removedMentionText = TurnContext.removeRecipientMention(context.activity);
// if (removedMentionText) {
// // Remove the line break
// txt = removedMentionText.toLowerCase().replace(/\n|\r/g, "").trim();
// }
// Trigger command by IM text
switch (txt) {
case "Begin": {
await test.sendModuleContent(context)
}
// By calling next() you ensure that the next BotHandler is run.
await next();
});
// Listen to MembersAdded event, view https://learn.microsoft.com/en-us/microsoftteams/platform/resources/bot-v3/bots-notifications for more events
this.onMembersAdded(async (context, next) => {
const membersAdded = context.activity.membersAdded;
for (let cnt = 0; cnt < membersAdded.length; cnt++) {
if (membersAdded[cnt].id) {
const card = cardTools.AdaptiveCards.declareWithoutData(rawWelcomeCard).render();
await context.sendActivity({ attachments: [CardFactory.adaptiveCard(card)] });
break;
}
}
await next();
});
}
test.js
const ms = require('./methods')
async function sendModuleContent(context) {
data = module_text //fetched from Airtable
await ms.sendText(context, data)
}
methods.js
const {TeamsActivityHandler, ActivityHandler, MessageFactory } = require('botbuilder');
async function sendText(context, text){
console.log("Sending text")
await context.sendActivity(text);
}
Refer this: TypeError: Cannot perform 'get' on a proxy that has been revoked
make the following changes to test.js
const {
TurnContext
} = require("botbuilder");
var conversationReferences = {};
var adapter;
async function sendModuleContent(context) {
data = module_text //fetched from Airtable
const currentUser = context.activity.from.id;
conversationReferences[currentUser] = TurnContext.getConversationReference(context.activity);
adapter = context.adapter;
await adapter.continueConversation(conversationReferences[currentUser], async turnContext => {
await turnContext.sendActivity(data);
});
}

fs.writeFile crashes node app after writing first json file

I'm trying to crawl several web pages to check broken links and writing the results of the links to a json files, however, after the first file is completed the app crashes with no error popping up...
I'm using Puppeteer to crawl, Bluebird to run each link concurrently and fs to write the files.
WHAT IVE TRIED:
switching file type to '.txt' or '.php', this works but I need to create another loop outside the current workflow to convert the files from '.txt' to '.json'. Renaming the file right after writing to it also causes the app to crash.
using try catch statements for fs.writeFile but it never throws an error
the entire app outside of express, this worked at some point but i trying to use it within the framework
const express = require('express');
const router = express.Router();
const puppeteer = require('puppeteer');
const bluebird = require("bluebird");
const fs = require('fs');
router.get('/', function(req, res, next) {
(async () => {
// Our (multiple) URLs.
const urls = ['https://www.testing.com/allergy-test/', 'https://www.testing.com/genetic-testing/'];
const withBrowser = async (fn) => {
const browser = await puppeteer.launch();
try {
return await fn(browser);
} finally {
await browser.close();
}
}
const withPage = (browser) => async (fn) => {
const page = await browser.newPage();
// Turns request interceptor on.
await page.setRequestInterception(true);
// Ignore all the asset requests, just get the document.
page.on('request', request => {
if (request.resourceType() === 'document' ) {
request.continue();
} else {
request.abort();
}
});
try {
return await fn(page);
} finally {
await page.close();
}
}
const results = await withBrowser(async (browser) => {
return bluebird.map(urls, async (url) => {
return withPage(browser)(async (page) => {
await page.goto(url, {
waitUntil: 'domcontentloaded',
timeout: 0 // Removes timeout.
});
// Search for urls we want to "crawl".
const hrefs = await page.$$eval('a[href^="https://www.testing.com/"]', as => as.map(a => a.href));
// Predefine our arrays.
let links = [];
let redirect = [];
// Loops through each /goto/ url on page
for (const href of Object.entries(hrefs)) {
response = await page.goto(href[1], {
waitUntil: 'domcontentloaded',
timeout: 0 // Remove timeout.
});
const chain = response.request().redirectChain();
const link = {
'source_url': href[1],
'status': response.status(),
'final_url': response.url(),
'redirect_count': chain.length,
};
// Loops through the redirect chain for each href.
for ( const ch of chain) {
redirect = {
status: ch.response().status(),
url: ch.url(),
};
}
// Push all info of target link into links
links.push(link);
}
// JSONify the data.
const linksJson = JSON.stringify(links);
fileName = url.replace('https://www.testing.com/', '');
fileName = fileName.replace(/[^a-zA-Z0-9\-]/g, '');
// Write data to file in /tmp directory.
fs.writeFile(`./tmp/${fileName}.json`, linksJson, (err) => {
if (err) {
return console.log(err);
}
});
});
}, {concurrency: 4}); // How many pages to run at a time.
});
})();
});
module.exports = router;
UPDATE:
So there is nothing wrong with my code... I realized nodemon was stopping the process after each file was saved. Since nodemon would detect a "file change" it kept restarting my server after the first item

How to get an docx file generated in node saved to firebase storage

Hi I am quite new to docxtemplater but I absolutely love how it works. Right now I seem to be able to generate a new docx document as follows:
const functions = require('firebase-functions');
const admin = require('firebase-admin');
const {Storage} = require('#google-cloud/storage');
var PizZip = require('pizzip');
var Docxtemplater = require('docxtemplater');
admin.initializeApp();
const BUCKET = 'gs://myapp.appspot.com';
exports.test2 = functions.https.onCall((data, context) => {
// The error object contains additional information when logged with JSON.stringify (it contains a properties object containing all suberrors).
function replaceErrors(key, value) {
if (value instanceof Error) {
return Object.getOwnPropertyNames(value).reduce(function(error, key) {
error[key] = value[key];
return error;
}, {});
}
return value;
}
function errorHandler(error) {
console.log(JSON.stringify({error: error}, replaceErrors));
if (error.properties && error.properties.errors instanceof Array) {
const errorMessages = error.properties.errors.map(function (error) {
return error.properties.explanation;
}).join("\n");
console.log('errorMessages', errorMessages);
// errorMessages is a humanly readable message looking like this :
// 'The tag beginning with "foobar" is unopened'
}
throw error;
}
let file_name = 'example.docx';// this is the file saved in my firebase storage
const File = storage.bucket(BUCKET).file(file_name);
const read = File.createReadStream();
var buffers = [];
readable.on('data', (buffer) => {
buffers.push(buffer);
});
readable.on('end', () => {
var buffer = Buffer.concat(buffers);
var zip = new PizZip(buffer);
var doc;
try {
doc = new Docxtemplater(zip);
doc.setData({
first_name: 'Fred',
last_name: 'Flinstone',
phone: '0652455478',
description: 'Web app'
});
try {
doc.render();
doc.pipe(remoteFile2.createReadStream());
}
catch (error) {
errorHandler(error);
}
} catch(error) {
errorHandler(error);
}
});
});
My issue is that i keep getting an error that doc.pipe is not a function. I am quite new to nodejs but is there a way to have the newly generated doc after doc.render() to be saved directly to the firebase storage?
Taking a look at the type of doc, we find that is a Docxtemplater object and find that doc.pipe is not a function of that class. To get the file out of Docxtemplater, we need to use doc.getZip() to return the file (this will be either a JSZip v2 or Pizzip instance based on what we passed to the constructor). Now that we have the zip's object, we need to generate the binary data of the zip - which is done using generate({ type: 'nodebuffer' }) (to get a Node.JS Buffer containing the data). Unfortunately because the docxtemplater library doesn't support JSZip v3+, we can't make use of the generateNodeStream() method to get a stream to use with pipe().
With this buffer, we can either reupload it to Cloud Storage or send it back to the client that is calling the function.
The first option is relatively simple to implement:
import { v4 as uuidv4 } from 'uuid';
/* ... */
const contentBuffer = doc.getZip()
.generate({type: 'nodebuffer'});
const targetName = "compiled.docx";
const targetStorageRef = admin.storage().bucket()
.file(targetName);
await targetStorageRef.save(contentBuffer);
// send back the bucket-name pair to the caller
return { bucket: targetBucket, name: targetName };
However, to send back the file itself to the client isn't as easy because this involves switching to using a HTTP Event Function (functions.https.onRequest) because a Callable Cloud Function can only return JSON-compatible data. Here we have a middleware function that takes a callable's handler function but supports returning binary data to the client.
import * as functions from "firebase-functions";
import * as admin from "firebase-admin";
import corsInit from "cors";
admin.initializeApp();
const cors = corsInit({ origin: true }); // TODO: Tighten
function callableRequest(handler) {
if (!handler) {
throw new TypeError("handler is required");
}
return (req, res) => {
cors(req, res, (corsErr) => {
if (corsErr) {
console.error("Request rejected by CORS", corsErr);
res.status(412).json({ error: "cors", message: "origin rejected" });
return;
}
// for validateFirebaseIdToken, see https://github.com/firebase/functions-samples/blob/main/authorized-https-endpoint/functions/index.js
validateFirebaseIdToken(req, res, () => { // validateFirebaseIdToken won't pass errors to `next()`
try {
const data = req.body;
const context = {
auth: req.user ? { token: req.user, uid: req.user.uid } : null,
instanceIdToken: req.get("Firebase-Instance-ID-Token"); // this is used with FCM
rawRequest: req
};
let result: any = await handler(data, context);
if (result && typeof result === "object" && "buffer" in result) {
res.writeHead(200, [
["Content-Type", res.contentType],
["Content-Disposition", "attachment; filename=" + res.filename]
]);
res.end(result.buffer);
} else {
result = functions.https.encode(result);
res.status(200).send({ result });
}
} catch (err) {
if (!(err instanceof HttpsError)) {
// This doesn't count as an 'explicit' error.
console.error("Unhandled error", err);
err = new HttpsError("internal", "INTERNAL");
}
const { status } = err.httpErrorCode;
const body = { error: err.toJSON() };
res.status(status).send(body);
}
});
});
};
})
functions.https.onRequest(callableRequest(async (data, context) => {
/* ... */
const contentBuffer = doc.getZip()
.generate({type: "nodebuffer"});
const targetName = "compiled.docx";
return {
buffer: contentBuffer,
contentType: "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
filename: targetName
}
}));
In your current code, there are a number of odd segments where you have nested try-catch blocks and variables in different scopes. To help combat this, we can make use of File#download() that returns a Promise that resolves with the file contents in a Node.JS Buffer and File#save() that returns a Promise that resolves when the given Buffer is uploaded.
Rolling this together for reuploading to Cloud Storage gives:
// This code is based off the examples provided for docxtemplater
// Copyright (c) Edgar HIPP [Dual License: MIT/GPLv3]
import * as functions from "firebase-functions";
import * as admin from "firebase-admin";
import PizZip from "pizzip";
import Docxtemplater from "docxtemplater";
admin.initializeApp();
// The error object contains additional information when logged with JSON.stringify (it contains a properties object containing all suberrors).
function replaceErrors(key, value) {
if (value instanceof Error) {
return Object.getOwnPropertyNames(value).reduce(
function (error, key) {
error[key] = value[key];
return error;
},
{}
);
}
return value;
}
function errorHandler(error) {
console.log(JSON.stringify({ error: error }, replaceErrors));
if (error.properties && error.properties.errors instanceof Array) {
const errorMessages = error.properties.errors
.map(function (error) {
return error.properties.explanation;
})
.join("\n");
console.log("errorMessages", errorMessages);
// errorMessages is a humanly readable message looking like this :
// 'The tag beginning with "foobar" is unopened'
}
throw error;
}
exports.test2 = functions.https.onCall(async (data, context) => {
const file_name = "example.docx"; // this is the file saved in my firebase storage
const templateRef = await admin.storage().bucket()
.file(file_name);
const template_content = (await templateRef.download())[0];
const zip = new PizZip(template_content);
let doc;
try {
doc = new Docxtemplater(zip);
} catch (error) {
// Catch compilation errors (errors caused by the compilation of the template : misplaced tags)
errorHandler(error);
}
doc.setData({
first_name: "Fred",
last_name: "Flinstone",
phone: "0652455478",
description: "Web app",
});
try {
doc.render();
} catch (error) {
errorHandler(error);
}
const contentBuffer = doc.getZip().generate({ type: "nodebuffer" });
// do something with contentBuffer
// e.g. reupload to Cloud Storage
const targetStorageRef = admin.storage().bucket().file("compiled.docx");
await targetStorageRef.save(contentBuffer);
return { bucket: targetStorageRef.bucket.name, name: targetName };
});
In addition to returning a bucket-name pair to the caller, you may also consider returning an access URL to the caller. This could be a signed url that can last for up to 7 days, a download token URL (like getDownloadURL(), process described here) that can last until the token is revoked, Google Storage URI (gs://BUCKET_NAME/FILE_NAME) (not an access URL, but can be passed to a client SDK that can access it if the client passes storage security rules) or access it directly using its public URL (after the file has been marked public).
Based on the above code, you should be able to merge in returning the file directly yourself.

Using Async/Await in WATSON Nodejs SDK

I am building a chatbot with WATSON API where I use the async/await method in order to fetch the data from MongoDB and attain the result, which then I send it back to the user.
The function artpromise is the promise that collects data from mongo DB. And the function randomartist is a function that fetches 3 random document from the DB. However, the WATSON BLUEMIX Cloud service supports Nodejs SDK of 6.1.3 which does not support the async method. Is there any way to update the SDK version on Blumix or should I use a difference approach in fetching data from the server?
let getConversationResponse = (message, context) => {
let payload = {
workspace_id: process.env.WORKSPACE_ID,
context: context || {},
input: message || {}
};
payload = preProcess(payload);
return new Promise((resolved, rejected) => {
// Send the input to the conversation service
conversation.message(payload, async function(err, data) {
if (err) {
rejected(err);
}
else{
if(data.context.type == 'ask'){
let artist = data.context.name;
let result = await artpromise(artist);
console.log(result);
data.context.name = result[0].name;
data.context.nationality = result[0].nationality;
data.context.birth = result[0].years;
data.context.url = result[0].art_link;
data.output.text = data.context.name+' is a '+data.context.nationality+' artist from '+data.context.birth+'. Check out a painting at '+data.context.url;
}
else if(data.context.type == 'random_artist'){
let result = await randomArtist();
console.log(result);
data.output.text = 'Let\'s find some random artists for you! \n'+result;
}
let processed = postProcess(data);
if(processed){
// return 값이 Promise 일 경우
if(typeof processed.then === 'function'){
processed.then(data => {
resolved(data);
}).catch(err => {
rejected(err);
})
}
// return 값이 변경된 data일 경우
else{
resolved(processed);
}
}
else{
// return 값이 없을 경우
resolved(data);
}
}
});
})
}
Using Node's util.promisify() utility, you can transform a callback-style function into a Promise-based one.
Somewhere outside of your getConversationResponse-function, assign it to a local variable:
const util = require('util');
const messagePromise = util.promisify(conversation.message);
And use that function instead. Something like this should work:
const util = require('util');
const messagePromise = util.promisify(conversation.message);
let getConversationResponse = async (message, context) => {
let payload = preprocess({
workspace_id: process.env.WORKSPACE_ID,
context: context || {},
input: message || {}
});
let data = await messagePromise(payload);
if (data.context.type == 'ask') {
let artist = data.context.name;
let result = await artpromise(artist);
console.log(result)
data.context.name = result[0].name;
data.context.nationality = result[0].nationality;
data.context.birth = result[0].years;
data.context.url = result[0].art_link;
data.output.text = data.context.name+' is a '+data.context.nationality+' artist from '+data.context.birth+'. Check out a painting at '+data.context.url;
} else if (data.context.type == 'random_artist'){
let result = await randomArtist();
console.log(result);
data.output.text = 'Let\'s find some random artists for you! \n'+result;
}
return postProcess(data) || data;
};
Note that if the return value of postProcess is falsy, it will return the data variable instead. Additionally, an async function always returns a Promise, so to call this function, you'll do:
getConversationResponse(message, context).then((data) => {
// Do something with the data
}).catch((e) => {
// Handle the error!
});
or if you call it from another async function:
let data = await getConversationResponse(message, context);
or if you need to specifically catch errors in the calling async function:
try {
let data = await getConversationResponse(message, context);
} catch (e) {
// Handle error
}
Just like regular synchronous code, any error thrown in the function call chain "trickles up" to the top-most callee. If you're confused about this, I suggest reading up on error handling.
If you want to use the Watson API in an async Promise-based fashion throughout your code, it might be feasible to write a small wrapper library and use that directly instead.
A Promise-only implementation:
const util = require('util');
const messagePromise = util.promisify(conversation.message);
let getConversationResponse = (message, context) => {
let payload = preprocess({
workspace_id: process.env.WORKSPACE_ID,
context: context || {},
input: message || {}
});
return messagePromise(payload).then((data) => {
if (data.context.type == 'ask') {
let artist = data.context.name;
return artpromise(artist).then((result) => {
data.context.name = result[0].name;
data.context.nationality = result[0].nationality;
data.context.birth = result[0].years;
data.context.url = result[0].art_link;
data.output.text = data.context.name+' is a '+data.context.nationality+' artist from '+data.context.birth+'. Check out a painting at '+data.context.url;
return data;
});
} else if (data.context.type == 'random_artist') {
return randomArtist().then((result) => {
data.output.text = 'Let\'s find some random artists for you! \n' + result;
return data;
});
}
}).then((data) => {
return postProcess(data) || data;
});
};
Calling it is the exact same as the async/await implementation.

Use headless chrome to intercept image request data

I have a use case that needs to use Headless Chrome Network (https://chromedevtools.github.io/devtools-protocol/tot/Network/) to intercept all images requests and find out the image size before saving it (basically discard small images such as icons).
However, I am unable to figure out a way to load the image data in memory before saving it. I need to load it in Img object to get width and height. The Network.getResponseBody is taking requestId which I don't have access in Network.requestIntercepted. Also Network.loadingFinished always gives me "0" in encodedDataLength variable. I have no idea why. So my questions are:
How to intercept all responses from jpg/png request and get the image data? Without saving the file via URL string to the disk and load back.
BEST: how to get image dimension from header response? Then I don't have to read the data into memory at all.
My code is below:
const chromeLauncher = require('chrome-launcher');
const CDP = require('chrome-remote-interface');
const file = require('fs');
(async function() {
async function launchChrome() {
return await chromeLauncher.launch({
chromeFlags: [
'--disable-gpu',
'--headless'
]
});
}
const chrome = await launchChrome();
const protocol = await CDP({
port: chrome.port
});
const {
DOM,
Network,
Page,
Emulation,
Runtime
} = protocol;
await Promise.all([Network.enable(), Page.enable(), Runtime.enable(), DOM.enable()]);
await Network.setRequestInterceptionEnabled({enabled: true});
Network.requestIntercepted(({interceptionId, request, resourceType}) => {
if ((request.url.indexOf('.jpg') >= 0) || (request.url.indexOf('.png') >= 0)) {
console.log(JSON.stringify(request));
console.log(resourceType);
if (request.url.indexOf("/unspecified.jpg") >= 0) {
console.log("FOUND unspecified.jpg");
console.log(JSON.stringify(interceptionId));
// console.log(JSON.stringify(Network.getResponseBody(interceptionId)));
}
}
Network.continueInterceptedRequest({interceptionId});
});
Network.loadingFinished(({requestId, timestamp, encodedDataLength}) => {
console.log(requestId);
console.log(timestamp);
console.log(encodedDataLength);
});
Page.navigate({
url: 'https://www.yahoo.com/'
});
Page.loadEventFired(async() => {
protocol.close();
chrome.kill();
});
})();
This should get you 90% of the way there. It gets the body of each image request. You'd still need to base64decode, check size and save etc...
const CDP = require('chrome-remote-interface');
const sizeThreshold = 1024;
async function run() {
try {
var client = await CDP();
const { Network, Page } = client;
// enable events
await Promise.all([Network.enable(), Page.enable()]);
// commands
const _url = "https://google.co.za";
let _pics = [];
Network.responseReceived(async ({requestId, response}) => {
let url = response ? response.url : null;
if ((url.indexOf('.jpg') >= 0) || (url.indexOf('.png') >= 0)) {
const {body, base64Encoded} = await Network.getResponseBody({ requestId }); // throws promise error returning null/undefined so can't destructure. Must be different in inspect shell to app?
_pics.push({ url, body, base64Encoded });
console.log(url, body, base64Encoded);
}
});
await Page.navigate({ url: _url });
await sleep(5000);
// TODO: process _pics - base64Encoded, check body.length > sizeThreshold, save etc...
} catch (err) {
if (err.message && err.message === "No inspectable targets") {
console.error("Either chrome isn't running or you already have another app connected to chrome - e.g. `chrome-remote-interface inspect`")
} else {
console.error(err);
}
} finally {
if (client) {
await client.close();
}
}
}
function sleep(miliseconds = 1000) {
if (miliseconds == 0)
return Promise.resolve();
return new Promise(resolve => setTimeout(() => resolve(), miliseconds))
}
run();

Resources