how to create gradient text in canvas nodejs? - node.js

I'm trying to create a gradient text on Node using canvas, I tested a code from https://www.w3schools.com/tags/canvas_filltext.asp below is an reimplementation and I received an error.
const fs = require('fs');
const {
createCanvas,
loadImage
} = require('canvas');
const text="Gradient";
const output="./image.png";
async function start(text,output){
let [width,height] = [1280,720];
const canvas = createCanvas(width, height);
let context = canvas.getContext("2d");
await drawGradientText(text);
await saveImage(output);
async function drawGradientText(text) {
return new Promise((resolve) => {
context.font = "30px Verdana";
// Create gradient
let gradient = context.createLinearGradient(0, 0, canvas.width, 0);
gradient.addColorStop("0", " magenta");
gradient.addColorStop("0.5", "blue");
gradient.addColorStop("1.0", "red");
// Fill with gradient
context.fillStyle = gradient;
context.fillText(text, 10, 90);
resolve();
})
}
function saveImage(output) {
return new Promise((resolve) => {
const buffer = canvas.toBuffer('image/png');
fs.writeFileSync(output, buffer);
resolve();
})
}
}
start(text,output);
THE CONSOLE SHOWS
TypeError: offset required
(node:18932) UnhandledPromiseRejectionWarning: Unhandled promise rejection. This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch()...
How could I create a textgradient on nodejs?

Unlike browsers, node-canvas is pretty strict regarding the type passed as offset to addColorStop( offset, color ).
They won't type cast it to float and will just throw the error you received, as can be seen here..
Arguably this is an interop issue, and they may want to fix it, but even in browsers, this offset should be a number, so pass numbers, not strings:
gradient.addColorStop(0, " magenta");

Based on the answer of Kaiido https://stackoverflow.com/users/3702797/kaiido
I notice that have to write the color in hexadecimal number format.
the code now looks like this:
const fs = require('fs');
const {
createCanvas,
loadImage
} = require('canvas');
const text="Gradient";
const output="./image.png";
async function start(text,output){
let [width,height] = [1280,720];
const canvas = createCanvas(width, height);
let context = canvas.getContext("2d");
await drawGradientText(text);
await saveImage(output);
async function drawGradientText(text) {
return new Promise((resolve) => {
context.font = "30px Verdana";
// Create gradient
let gradient = context.createLinearGradient(0, 0, canvas.width, 0);
gradient.addColorStop(0, "#004")
gradient.addColorStop(0.5, "#00fef3")
context.fillStyle = gradient;
context.fillText(text, width/2 - , height/2);
resolve();
})
}
function saveImage(output) {
return new Promise((resolve) => {
const buffer = canvas.toBuffer('image/png');
fs.writeFileSync(output, buffer);
resolve();
})
}
}
start(text,output);

Related

Discord Node js Canvas Error: Unsupported image type

I have been trying to fix this thing for over a week now, searching in google and youtube for this damn error and no solution have been found. I know Im at first days of node js so my code might be not perfect, but the canvas one were taken from people that did tutorials and I have try as many of them but the result is always the same error no matter what. Apparently the error going away if I remove every related displayAvatarURL code, which sucks because I can't use the user avatar in my welcome image. I have try to change formats, changing code, changing background image with a black one made with Gimp (not that matter because the problem seem is avatar)and removed background to check again. Nothing work. The bot will crash ALWAYS as soon a real user join probably because the avatar image and it DOESN'T crash when invite a bot for testing (because it doesnt have custom avatars apparently?). Thank you for the help.
Error:
node:events:505
throw er; // Unhandled 'error' event
^
Error: Unsupported image type
at setSource (C:**\Desktop\lolinya_bot_js\node_modules\canvas\lib\image.js:91:13)
at C:**\Desktop\lolinya_bot_js\node_modules\canvas\lib\image.js:59:11
at C:**\Desktop\lolinya_bot_js\node_modules\simple-get\index.js:97:7
at IncomingMessage. (C:**\Desktop\lolinya_bot_js\node_modules\simple-concat\index.js:88:13)
at Object.onceWrapper (node:events:641:28)
at IncomingMessage.emit (node:events:539:35)
at endReadableNT (node:internal/streams/readable:1345:12)
at processTicksAndRejections (node:internal/process/task_queues:83:21)
Emitted 'error' event on Client instance at:
at emitUnhandledRejectionOrErr (node:events:384:10)
at processTicksAndRejections (node:internal/process/task_queues:85:21)
Index.js:
const {
Client,
GatewayIntentBits,
Routes,
// InteractionType,
} = require('discord.js');
const Discord = require('discord.js');
const { REST } = require('#discordjs/rest');
const fs = require('node:fs');
// const { Console } = require('node:console');
const generateImage = require('../generateImage.js');
dotenv.config();
// - CONFIG TOKEN, CLIENT AND GUILD ID
const TOKEN = process.env.TOKEN;
const CLIENT_ID = process.env.CLIENT_ID;
const GUILD_ID = process.env.GUILD_ID;
const WELCOME_ID = process.env.WELCOME_ID;
const client = new Client({
intents: [
GatewayIntentBits.Guilds,
GatewayIntentBits.GuildMessages,
GatewayIntentBits.GuildMembers,
GatewayIntentBits.MessageContent,
],
});
// - CONFIG SLASH COMMANDS -
const commands = [];
const commandFiles = fs.readdirSync('./src/commands')
.filter(file => file.endsWith('js'));
client.slashcommands = new Discord.Collection();
for (const file of commandFiles) {
const command = require(`./commands/${file}`);
client.slashcommands.set(command.data.name, command);
commands.push(command.data.toJSON());
}
const rest = new REST({ version: '10' }).setToken(TOKEN);
// - CLIENT EMITTERS -
client.on('ready', () => {
console.log(`${client.user.tag} has logged in.`);
});
client.on('guildMemberAdd', async (member) => {
const img = await generateImage(member);
member.guild.channels.cache.get(WELCOME_ID).send({
content: `<#${member.id}> Welcome to the server!`,
files: [img]
});
});
client.on('interactionCreate', async (interaction) => {
if (!interaction.isChatInputCommand()) return;
const slashcmd = client.slashcommands.get(interaction.commandName);
await slashcmd.run({ client, interaction });
});
// - INITIALIZE THE BOT AND ALSO REFRESH SLASH COMMANDS LIST -
(async () => {
try {
console.log('Started refreshing application (/) commands.');
await rest.put(Routes.applicationGuildCommands(CLIENT_ID, GUILD_ID), {
body: commands,
});
console.log('Successfully reloaded application (/) commands.');
client.login(TOKEN);
} catch (err) {
console.error(err);
}
})();
generateImage.js:
const { createCanvas, loadImage, registerFont } = require('canvas');
registerFont('./font/Cat paw.ttf', {family: 'neko'});
const Discord = require('discord.js');
const { AttachmentBuilder } = require('discord.js');
const background = "https://i.imgur.com/VZblp7S.jpg";
const dim = {
height: 675,
width: 1200,
margin: 50,
}
const av = {
size: 256,
x: 480,
y: 170,
}
const generateImage = async (member) => {
let canvas = await createCanvas(dim.width, dim.height);
let ctx = await canvas.getContext('2d');
let username = member.user.username;
let discrim = member.user.discriminator;
// let avatarURL = member.displayAvatarURL({format: 'jpg', dynamic: true, size: av.size})
// Draw the canvas for our image
// const canvas = Canvas.createCanvas(dim.width, dim.height);
// const ctx = canvas.getContext('2d');
const backimg = await loadImage(background);
let x = 0 //canvas.width / 2 - backimg.width / 2;
let y = 0 //canvas.height / 2 - backimg.height / 2;
ctx.drawImage(backimg, x, y);
// Draw a semi-transparent box for text readability
ctx.fillStyle = "rgba(0,0,0,0.7)"
ctx.fillRect(
dim.margin,
dim.margin,
dim.width - 2 * dim.margin,
dim.height - 2 * dim.margin
); //fillRect(posX,posY, width, height)
ctx.save();
ctx.beginPath();
ctx.arc(
av.x + av.size / 2,
av.y + av.size / 2,
av.size / 2, 0,
Math.PI * 2,
false
); //arc(centerX, centerY, radius, startAngle, endAngle, clockwise)
ctx.clip();
let avimg = await loadImage(member.displayAvatarURL({ format: 'png' }));
ctx.drawImage(avimg, av.x, av.y);
ctx.restore();
// Config our welcome text
ctx.fillStyle = 'pink';
ctx.textAlign = 'center';
// Draw the welcome text
ctx.font = '80px Cat paw';
ctx.fillText("Welcome to the Server", dim.width / 2, dim.margin + 70)
// Draw the username text
ctx.font = '100px Cat paw';
ctx.fillText(`${username} #${discrim}`, dim.width / 2, dim.height - dim.margin - 125);
// Draw the server name text
ctx.font = '40px Cat paw';
ctx.fillText(`You are the member #${member.guild.memberCount}!`, dim.width / 2, dim.height - dim.margin - 50);
let attachment = new AttachmentBuilder(canvas.toBuffer(), { name: 'welcome.png' })
return attachment;
}
module.exports = generateImage ```
If you get the unsupported image type error with the avatar URL -> On discord.js v14 format was replaced with extension so you'll have to change that
I have the same issue, it comes from the fact that
member.displayAvatarURL({ format: 'png' })
returns a .webp instead of returning a .png. It's strange, maybe it's bug from Discord.js v14.
I have found an ugly fix:
let avimg = await loadImage("https://cdn.discordapp.com/avatars/" + member.id + "/" > + member.avatar + ".png");

decodePng in tensorflowjs throws shape error

I am trying to follow a tutorial and just want to load an image in TensorFlowJS.
import * as tf from '#tensorflow/tfjs-node';
import fs from 'fs';
(async () => {
const desk = fs.readFileSync(__dirname + '/' + 'desk.png');
const buf = Buffer.from(desk);
const imageArray = new Uint8Array(buf);
const pngDecodedTensor = tf.node.decodePng(imageArray);
})();
When I run the above code, I see this error:
The shape of dict['image_tensor'] provided in model.execute(dict) must be [-1,-1,-1,3], but was [1,4032,3024,4]
The image is 3024x4032 and 10.4MB
Thanks for your help
The issue is related to the tensor shape when making the prediction.
The model is expecting a tensor with 3 channels whereas the tense passed as argument has 4 channels.
The tensor can be sliced to use only 3 of its 4 channels.
pngDecodedTensor = tf.node.decodePng(imageArray).slice([0], [-1, -1, 3])
You may want to try the fromPixels function like this:
const { Image } = require('canvas')
// From a buffer:
fs.readFile('images/squid.png', (err, squid) => {
if (err) throw err
const img = new Image()
img.onload = () => ctx.drawImage(img, 0, 0)
img.onerror = err => { throw err }
img.src = squid
})
// From a local file path:
const img = new Image()
img.onload = () => ctx.drawImage(img, 0, 0)
img.onerror = err => { throw err }
img.src = 'images/squid.png'
// From a remote URL:
img.src = 'http://picsum.photos/200/300'
// ... as above
var imgAsTensor = tf.fromPixels(img);
// ... now use it as you wish.
You can learn more about this function here:

Simple API endpoint in NodeJs

I'm trying to write a very simple API endpoint for a Udacity project I'm working on. When I try postman against the below endpoint, I get a promise rejection warning. I first attempted this using the 3 commented lines within this endpoint, but since it ran asynchronously, the file would not be ready before the delete function would run resulting in a file not found.
Any suggestions?
app.get( "/filteredimage", async ( req, res ) => {
var re = req.query.image_url;
if (!re){
return res.status(400).send(`id is required`);
}
var myfun = function (data, callback) {
var filteredpath = filterImageFromURL(data);
callback([filteredpath]);
};
myfun(re,deleteLocalFiles);
// let filteredpath = filterImageFromURL(re);
// res.sendFile(filteredpath);
// deleteLocalFiles([filteredpath]);
} );
Here are the util functions:
export async function filterImageFromURL(inputURL: string): Promise<string>{
return new Promise( async resolve => {
const photo = await Jimp.read(inputURL);
const outpath = '/tmp/filtered.'+Math.floor(Math.random() * 2000)+'.jpg';
await photo
.resize(256, 256) // resize
.quality(60) // set JPEG quality
.greyscale() // set greyscale
.write(__dirname+outpath, (img)=>{
resolve(__dirname+outpath);
});
});
}
// deleteLocalFiles
// helper function to delete files on the local disk
// useful to cleanup after tasks
// INPUTS
// files: Array<string> an array of absolute paths to files
export async function deleteLocalFiles(files:Array<string>){
for( let file of files) {
fs.unlinkSync(file);
}
}
Not much for TypeScript but I see a few issues:
One, you don't have to put a Promise in an async function because an async function is a Promise. Two, you're not awaiting filterImageFromURL() properly. And three, you should use try/catch blocks so that you don't get Unhandled Promise Rejection errors.
Forgive my stripping out of your TS.
app.get( "/filteredimage", async ( req, res ) => {
var myfun = async function (data, callback) {
try {
var filteredpath = await filterImageFromURL(data);
callback([filteredpath]);
} catch(err) {
console.error(err);
}
};
try {
var re = req.query.image_url;
if (!re) {
return res.status(400).send(`id is required`);
}
await myfun(re, deleteLocalFiles);
// let filteredpath = filterImageFromURL(re);
// res.sendFile(filteredpath);
// deleteLocalFiles([filteredpath]);
} catch(err) {
console.error(err);
}
});
export async function filterImageFromURL(inputUR) {
try {
const photo = await Jimp.read(inputURL);
const outpath = '/tmp/filtered.'+Math.floor(Math.random() * 2000)+'.jpg';
const img = photo
.resize(256, 256) // resize
.quality(60) // set JPEG quality
.greyscale() // set greyscale
.write(__dirname+outpath);
return img
} catch(err) {
console.error(err);
}
}

Streaming multiple svg strings to png in nodejs using promises

I am trying to convert multiple svg strings to png's so I can render them onto a pdf using PdfMake in nodejs. This works fine for one svg, but when I add multiple svg strings, they get overwritten by the last one. With this example code, it renders two images of png2 (svg2).
const promises = [svg1,svg2].map(str => {
const stream = new Readable();
stream.push(str);
stream.push(null);
return svgPromise(stream);
});
const result = await Promise.all(promises);
const png1 = result[0].content;
const png2 = result[1].content;
function svgPromise(stream) {
return new Promise((resolve, reject) => {
const svg = new Rsvg();
stream.pipe(svg);
svg.on("finish", function() {
const buffer = svg.render({
format: "png",
width: width * 2,
height: height * 2
}).data;
const png = datauri.format(".png", buffer);
resolve(png);
});
});
}
Not sure if this error is related to stream or my promise logic. Any ideas?
Dependencies:
"librsvg": "0.7.0"
"pdfmake": "0.1.35"
"datauri": "1.0.5"
It pays to list all the used modules. Assuming you used datauri, it seems you need to initialize a new instance for every call:
svg.on("finish", function() {
const datauri = new Datauri();
const buffer = svg.render({
format: "png",
width: 16,
height: 16
}).data;
const png = datauri.format(".png", buffer);
resolve(png);
});

How to detect more than 10 faces in the google vision apis

Hi i am new to google vision apis. I want to detect the faces on the Image ,i am using the node.js. the local image containing more than 10 faces. but vision api returning only 10 faces Detection. Is there any way to detect all the faces using this Vision api. please refer vision node api.
and you can take this image as ref
Here is my code
function findFaceontheImage(req, res, next) {
var vision = Vision();
var inputfile = 'NASA_Astronaut_Group_15.jpg';
var outputFile = 'out.png';
vision.faceDetection({source: {filename: inputfile}})
.then(function (results) {
const faces = results[0].faceAnnotations;
console.log('Faces:');
req.body['faces']=results;
var numFaces = faces.length;
console.log('Found ' + numFaces + (numFaces === 1 ? ' face' : ' faces'));
highlightFaces(inputfile, faces, outputFile, Canvas, function (err) {
if (err) {
next()
}
console.log("Finished!");
next()
});
})
.catch(function (err) {
console.error('ERROR:', err);
});
}
function highlightFaces(inputFile, faces, outputFile, Canvas, callback) {
fs.readFile(inputFile, function (err, image) {
if (err) {
return callback(err);
}
var Image = Canvas.Image;
// Open the original image into a canvas
var img = new Image();
img.src = image;
var canvas = new Canvas(img.width, img.height);
var context = canvas.getContext("2d");
context.drawImage(img, 0, 0, img.width, img.height);
// Now draw boxes around all the faces
context.strokeStyle = "rgba(0,255,0,0.8)";
context.lineWidth = "5";
faces.forEach(function (face) {
context.beginPath();
var origX = 0;
var origY = 0;
face.boundingPoly.vertices.forEach(function (bounds, i) {
if (i === 0) {
origX = bounds.x;
origY = bounds.y;
}
context.lineTo(bounds.x, bounds.y);
});
context.lineTo(origX, origY);
context.stroke();
});
// Write the result to a file
console.log("Writing to file " + outputFile);
var writeStream = fs.createWriteStream(outputFile);
var pngStream = canvas.pngStream();
pngStream.on("data", function (chunk) {
writeStream.write(chunk);
});
pngStream.on("error", console.log);
pngStream.on("end", callback);
});
}
In case there're other people who's still struggling on this topic.
With the Node.js Client Library, you can pass the ImprovedRequest object to the client.faceDetection(..) method instead of using the filepath or imageuri.
For example, in my case, I want the api to process an image in my GCS. So, instead of placing the imageuri as string. I'd do something like below.
import { protos } from '#google-cloud/vision';
// BEFORE
const [result] = await CLIENT.faceDetection(`gs://${bucketName}/${filePath}`);
// AFTER
const [result] = await CLIENT.faceDetection({
image: {
source: { imageUri: `gs://${bucketName}/${filePath}` }
},
features: [
{
maxResults: 100,
type: protos.google.cloud.vision.v1.Feature.Type.FACE_DETECTION,
},
],
});
Just in case noone will come up with solution that would force API to return more results, a pseudocode:
def process(image)
faces = process image
return faces if faces.size < 10
split image into two a bit overlapping half1 and half2
# we do overlapping because splitting may split a face
a = process(half1)
b = process(half2)
return a + b - intersection(a + b)
The intersection function should throw out those images that are on the same (taking in mind the possible +/-few pixel errors) coordinates plus the shift that we had between half1 and half2 withing the image.

Resources