I am trying to follow a tutorial and just want to load an image in TensorFlowJS.
import * as tf from '#tensorflow/tfjs-node';
import fs from 'fs';
(async () => {
const desk = fs.readFileSync(__dirname + '/' + 'desk.png');
const buf = Buffer.from(desk);
const imageArray = new Uint8Array(buf);
const pngDecodedTensor = tf.node.decodePng(imageArray);
})();
When I run the above code, I see this error:
The shape of dict['image_tensor'] provided in model.execute(dict) must be [-1,-1,-1,3], but was [1,4032,3024,4]
The image is 3024x4032 and 10.4MB
Thanks for your help
The issue is related to the tensor shape when making the prediction.
The model is expecting a tensor with 3 channels whereas the tense passed as argument has 4 channels.
The tensor can be sliced to use only 3 of its 4 channels.
pngDecodedTensor = tf.node.decodePng(imageArray).slice([0], [-1, -1, 3])
You may want to try the fromPixels function like this:
const { Image } = require('canvas')
// From a buffer:
fs.readFile('images/squid.png', (err, squid) => {
if (err) throw err
const img = new Image()
img.onload = () => ctx.drawImage(img, 0, 0)
img.onerror = err => { throw err }
img.src = squid
})
// From a local file path:
const img = new Image()
img.onload = () => ctx.drawImage(img, 0, 0)
img.onerror = err => { throw err }
img.src = 'images/squid.png'
// From a remote URL:
img.src = 'http://picsum.photos/200/300'
// ... as above
var imgAsTensor = tf.fromPixels(img);
// ... now use it as you wish.
You can learn more about this function here:
Related
I have an image I take a snippet of, I am trying to save the snippet to a file.
get image using getPixels
snip section of ndarray (my goal is the top left corner)
save the ndarray a new image
My code:
var getPixels = require("get-pixels");
var fs = require("fs");
var savePixels = require("save-pixels");
let img='path_to_img_file'
getPixels(img, "image/jpeg", function (err, pixels) {
if (err) {
console.log("Bad image", err);
return;
}
pixels = pixels.hi(50, 50, 50); //subset
savePixels(pixels, "png").pipe((p) =>
fs.createWriteStream(p, "shot.png")
);
My error:
Unhandled error. ('No data provided')
This worked for me:
var getPixels = require("get-pixels");
var fs = require("fs");
var savePixels = require("save-pixels");
let img='./output.png'
getPixels(img, "image/png", function (err, pixels) {
if (err) {
console.log("Bad image", err);
return;
}
// Save to output2.png
savePixels(pixels, "png").pipe(fs.createWriteStream("./output2.png"));
});
I think your issue was that you did
.pipe((p) =>
and the usual syntax, to the best of my knowledge, is
readableStream.pipe(writeableStream)
rather than
function.pipe((readableStream) => writeableStream.pipe(readableStream));
I'm trying to create a gradient text on Node using canvas, I tested a code from https://www.w3schools.com/tags/canvas_filltext.asp below is an reimplementation and I received an error.
const fs = require('fs');
const {
createCanvas,
loadImage
} = require('canvas');
const text="Gradient";
const output="./image.png";
async function start(text,output){
let [width,height] = [1280,720];
const canvas = createCanvas(width, height);
let context = canvas.getContext("2d");
await drawGradientText(text);
await saveImage(output);
async function drawGradientText(text) {
return new Promise((resolve) => {
context.font = "30px Verdana";
// Create gradient
let gradient = context.createLinearGradient(0, 0, canvas.width, 0);
gradient.addColorStop("0", " magenta");
gradient.addColorStop("0.5", "blue");
gradient.addColorStop("1.0", "red");
// Fill with gradient
context.fillStyle = gradient;
context.fillText(text, 10, 90);
resolve();
})
}
function saveImage(output) {
return new Promise((resolve) => {
const buffer = canvas.toBuffer('image/png');
fs.writeFileSync(output, buffer);
resolve();
})
}
}
start(text,output);
THE CONSOLE SHOWS
TypeError: offset required
(node:18932) UnhandledPromiseRejectionWarning: Unhandled promise rejection. This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch()...
How could I create a textgradient on nodejs?
Unlike browsers, node-canvas is pretty strict regarding the type passed as offset to addColorStop( offset, color ).
They won't type cast it to float and will just throw the error you received, as can be seen here..
Arguably this is an interop issue, and they may want to fix it, but even in browsers, this offset should be a number, so pass numbers, not strings:
gradient.addColorStop(0, " magenta");
Based on the answer of Kaiido https://stackoverflow.com/users/3702797/kaiido
I notice that have to write the color in hexadecimal number format.
the code now looks like this:
const fs = require('fs');
const {
createCanvas,
loadImage
} = require('canvas');
const text="Gradient";
const output="./image.png";
async function start(text,output){
let [width,height] = [1280,720];
const canvas = createCanvas(width, height);
let context = canvas.getContext("2d");
await drawGradientText(text);
await saveImage(output);
async function drawGradientText(text) {
return new Promise((resolve) => {
context.font = "30px Verdana";
// Create gradient
let gradient = context.createLinearGradient(0, 0, canvas.width, 0);
gradient.addColorStop(0, "#004")
gradient.addColorStop(0.5, "#00fef3")
context.fillStyle = gradient;
context.fillText(text, width/2 - , height/2);
resolve();
})
}
function saveImage(output) {
return new Promise((resolve) => {
const buffer = canvas.toBuffer('image/png');
fs.writeFileSync(output, buffer);
resolve();
})
}
}
start(text,output);
I am developing an face detection application,for that I need to collect the users image for reference to detect them later.i have successfully uploaded the image in MySQL databse.now I need upload the image in public folder in react to detect the image in camera.i stuck in uploading image in react public folder.help me out get rid of this problem..
This is the React code where image to be detected in the imgUrl variable
detect = async () => {
const videoTag = document.getElementById("videoTag");
const canvas = document.getElementById("myCanvas");
const displaySize = { width: videoTag.width, height: videoTag.height };
faceapi.matchDimensions(canvas, displaySize);
//setInterval starts here for continuous detection
time = setInterval(async () => {
let fullFaceDescriptions = await faceapi
.detectAllFaces(videoTag)
.withFaceLandmarks()
.withFaceExpressions()
.withFaceDescriptors();
const value = fullFaceDescriptions.length;
this.setState({ detection: value });
fullFaceDescriptions = faceapi.resizeResults(
fullFaceDescriptions,
displaySize
);
canvas.getContext("2d").clearRect(0, 0, canvas.width, canvas.height);
//Label Images
var dummy = ["praveen", "vikranth", "Gokul", "Rahul"];
const labels = nameArray1;
// const labels = ["praveen", "vikranth", "Gokul", "Rahul"];
if (no_of_times <= 0) {
if (no_of_times === 0) {
labeledFaceDescriptors = await Promise.all(
labels.map(async (label) => {
// fetch image data from urls and convert blob to HTMLImage element
const imgUrl = `/img/${label}.png`; // for testing purpose
// const imgUrl = testImage;
const img = await faceapi.fetchImage(imgUrl);
const fullFaceDescription = await faceapi
.detectSingleFace(img)
.withFaceLandmarks()
.withFaceExpressions()
.withFaceDescriptor();
if (!fullFaceDescription) {
throw new Error(`no faces detected for ${label}`);
}
const faceDescriptors = [fullFaceDescription.descriptor];
return new faceapi.LabeledFaceDescriptors(label, faceDescriptors);
})
);
// console.log(no_of_times);
}
}
const maxDescriptorDistance = 0.7;
no_of_times++;
const faceMatcher = new faceapi.FaceMatcher(
labeledFaceDescriptors,
maxDescriptorDistance
);
const results = fullFaceDescriptions.map((fd) =>
faceMatcher.findBestMatch(fd.descriptor)
);
result = [];
results.forEach((bestMatch, i) => {
const box = fullFaceDescriptions[i].detection.box;
// console.log(box)
const text = bestMatch.toString(); //this for basMatch name detection
var str = "";
//This is for removing names confidence to map value without duplicate
var val = text.replace(/[0-9]/g, "");
for (let i of val) {
if (i !== " ") {
str += i;
} else {
break;
}
}
if (result.includes(str) === false) result.push(str);
const drawBox = new faceapi.draw.DrawBox(box, { label: text });
drawBox.draw(canvas);
faceapi.draw.drawFaceExpressions(canvas, fullFaceDescriptions, 0.85);
});
for (let i = 0; i < fullFaceDescriptions.length; i++) {
const result1 = fullFaceDescriptions[i].expressions.asSortedArray()[i];
// console.log(result[i]);
// console.log(result1.expression);
this.test(result[i], result1.expression);
}
}, 100);
In the above code i am manually putting image in public folder,this need to be done dynamically when the user uploads image.
this is place i get the images in base64 from nodejs
axios.get("/image").then((res) => {
testImage = res.data;
// console.log("from image" + res.data);
imgback = <img src={`data:image/jpeg;base64,${res.data}`} />;
});
This is nodejs code for the get request from reactjs
app.get("/image", (req, res) => {
connection.query("SELECT * FROM images", (error, row, fields) => {
if (!!error) {
console.log("Error in the query");
} else {
console.log("successful query");
var buffer = new Buffer(row[0].image, "binary");
var bufferBase64 = buffer.toString("base64");
res.send(bufferBase64);
}
});
});
my goal is, in the imgUrl variable in react code i need to specify the image folder for that i need to dynamically add image in folder.
Or is there is any other way to directly give image array in the imgUrl variable.please help me to sort out this problem.
My server receives a list base64 image eg [img1,img2....] at an endpoint.
I have function getPoses(img) that takes an image and returns an object back.
I want to be able to map the images to getPoses() and get the object back for each image in Parallel.
I have tried Async.each function but I notice it's not really Parallel. Probably because it runs in a single thread/core.
I have also tried to use cluster, so that I can utilize all the cores to run getPoses() in multiple cores. But I feel stuck because, It's not clear to me how the master can obtain the poses found by workers.
I would be super gratefully for any help from the community.
Aync code:
//END POINT
app.post('/postImage' ,async function(req,res){
const imgBase64List = req.body.imgBase64.split('next');
const imgBase64IndexList = []
imgBase64List.map((imgBase64,index)=>{
imgBase64IndexList.push([index,imgBase64])
})
async.each(imgBase64IndexList , getPoseDetection , function(err){
if(err){
console.log("Error occured when feteching one of the pose")
console.log(err)
}else{
console.log('Sending Poses!')
res.send(JSON.stringify(poseDetectionResults))
}
})
});//End of app.post and asyn function
const getPoseDetection = async (imgBase64Index, callback) => {
//console.log('start');
const img = new Image();
img.src = imgBase64Index[1];
const imgIndex = imgBase64Index[0]
const canvas = createCanvas(img.width, img.height);
const ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
const input = tf.browser.fromPixels(canvas);
const imageScaleFactor = 0.3;
const flipHorizontal = false;
const outputStride = 8;
const poses = await net.estimateMultiplePoses(input, {
flipHorizontal: false,
maxDetections: 2,
minPoseConfidence: 0.15,
minPartConfidence:0.1,
nmsRadius:20,
});
boundingBoxes = [] //reset the boundingBoxese
poses.forEach(pose => {
var box = posenet.getBoundingBoxPoints(pose.keypoints);
boundingBoxes.push(box)
});
poseDetectionResults[imgIndex] = {detectionList:poses}
//console.log(poses)
}
Thank You
Hi i am new to google vision apis. I want to detect the faces on the Image ,i am using the node.js. the local image containing more than 10 faces. but vision api returning only 10 faces Detection. Is there any way to detect all the faces using this Vision api. please refer vision node api.
and you can take this image as ref
Here is my code
function findFaceontheImage(req, res, next) {
var vision = Vision();
var inputfile = 'NASA_Astronaut_Group_15.jpg';
var outputFile = 'out.png';
vision.faceDetection({source: {filename: inputfile}})
.then(function (results) {
const faces = results[0].faceAnnotations;
console.log('Faces:');
req.body['faces']=results;
var numFaces = faces.length;
console.log('Found ' + numFaces + (numFaces === 1 ? ' face' : ' faces'));
highlightFaces(inputfile, faces, outputFile, Canvas, function (err) {
if (err) {
next()
}
console.log("Finished!");
next()
});
})
.catch(function (err) {
console.error('ERROR:', err);
});
}
function highlightFaces(inputFile, faces, outputFile, Canvas, callback) {
fs.readFile(inputFile, function (err, image) {
if (err) {
return callback(err);
}
var Image = Canvas.Image;
// Open the original image into a canvas
var img = new Image();
img.src = image;
var canvas = new Canvas(img.width, img.height);
var context = canvas.getContext("2d");
context.drawImage(img, 0, 0, img.width, img.height);
// Now draw boxes around all the faces
context.strokeStyle = "rgba(0,255,0,0.8)";
context.lineWidth = "5";
faces.forEach(function (face) {
context.beginPath();
var origX = 0;
var origY = 0;
face.boundingPoly.vertices.forEach(function (bounds, i) {
if (i === 0) {
origX = bounds.x;
origY = bounds.y;
}
context.lineTo(bounds.x, bounds.y);
});
context.lineTo(origX, origY);
context.stroke();
});
// Write the result to a file
console.log("Writing to file " + outputFile);
var writeStream = fs.createWriteStream(outputFile);
var pngStream = canvas.pngStream();
pngStream.on("data", function (chunk) {
writeStream.write(chunk);
});
pngStream.on("error", console.log);
pngStream.on("end", callback);
});
}
In case there're other people who's still struggling on this topic.
With the Node.js Client Library, you can pass the ImprovedRequest object to the client.faceDetection(..) method instead of using the filepath or imageuri.
For example, in my case, I want the api to process an image in my GCS. So, instead of placing the imageuri as string. I'd do something like below.
import { protos } from '#google-cloud/vision';
// BEFORE
const [result] = await CLIENT.faceDetection(`gs://${bucketName}/${filePath}`);
// AFTER
const [result] = await CLIENT.faceDetection({
image: {
source: { imageUri: `gs://${bucketName}/${filePath}` }
},
features: [
{
maxResults: 100,
type: protos.google.cloud.vision.v1.Feature.Type.FACE_DETECTION,
},
],
});
Just in case noone will come up with solution that would force API to return more results, a pseudocode:
def process(image)
faces = process image
return faces if faces.size < 10
split image into two a bit overlapping half1 and half2
# we do overlapping because splitting may split a face
a = process(half1)
b = process(half2)
return a + b - intersection(a + b)
The intersection function should throw out those images that are on the same (taking in mind the possible +/-few pixel errors) coordinates plus the shift that we had between half1 and half2 withing the image.