save result of ndarray to image - node.js

I have an image I take a snippet of, I am trying to save the snippet to a file.
get image using getPixels
snip section of ndarray (my goal is the top left corner)
save the ndarray a new image
My code:
var getPixels = require("get-pixels");
var fs = require("fs");
var savePixels = require("save-pixels");
let img='path_to_img_file'
getPixels(img, "image/jpeg", function (err, pixels) {
if (err) {
console.log("Bad image", err);
return;
}
pixels = pixels.hi(50, 50, 50); //subset
savePixels(pixels, "png").pipe((p) =>
fs.createWriteStream(p, "shot.png")
);
My error:
Unhandled error. ('No data provided')

This worked for me:
var getPixels = require("get-pixels");
var fs = require("fs");
var savePixels = require("save-pixels");
let img='./output.png'
getPixels(img, "image/png", function (err, pixels) {
if (err) {
console.log("Bad image", err);
return;
}
// Save to output2.png
savePixels(pixels, "png").pipe(fs.createWriteStream("./output2.png"));
});
I think your issue was that you did
.pipe((p) =>
and the usual syntax, to the best of my knowledge, is
readableStream.pipe(writeableStream)
rather than
function.pipe((readableStream) => writeableStream.pipe(readableStream));

Related

Identify similar images in a folder based on an image from the scanner

I have a question that I can't figure out,I do not know where to begin.I use the following technologies: React Native (mobile application) and the backend application is built with Node Js.
What I'm trying to do is, the user takes a picture of a product, with the help of this mobile application the caption is saved in a directory on the server, let's say [image_phone],I managed to do this part,on the server I also have a directory [app_images] where I have images of products with a specific name.
What I want to do is create a function that will return from the folder [app_images] all the images similar to the one uploaded by the user in the folder [image_phone].
What did I try to do about it:
Following a tutorial on the Internet, I tried to solve this problem using the following modules.[jimp, pixelmatch, pngjs]
Code:
const Jimp = require('jimp');
const PNG = require('pngjs').PNG;
const pixelmatch = require('pixelmatch');
exports.compareImage = catchAsync(async(req,res,next)=>{
const originalFile = imageRoot+'/imagine3.png';
const compareFile = compareRoot+'/Screenshot_72.png';
const createBufferImage = async (url) => {
return new Promise(async (resolve, reject) => {
await Jimp.read(url, async (err, image) => {
if (err) {
console.log(`eroare la citirea imagini Jimp: ${err}`);
reject(err);
}
image.resize(400, 400);
return image.getBuffer(jimp.MIME_PNG, (err, buffer) => {
if (err) {
console.log(`eroare convertire url in buffer: ${err}`);
reject(err);
}
resolve(buffer);
});
});
});
};
const compareImageApp = async (
capture,
app_image
) => {
try {
console.log('> Start compare');
const img1Buffer = await createBufferImage(capture);
const img2Buffer = await createBufferImage(app_image);
const img1 = PNG.sync.read(img1Buffer);
const img2 = PNG.sync.read(img2Buffer);
const { width, height } = img1;
const diff = new PNG({ width, height });
const difference = pixelmatch(
img1.data,
img2.data,
diff.data,
width,
height,
{
threshold: 0,
}
);
const compatibility = 100 - (difference * 100) / (width * height);
console.log(`${difference} diferenta pixel`);
console.log(`Compatibilitate: ${compatibility}%`);
console.log('< Misiune completa');
return compatibility;
} catch (error) {
console.log(`Eroare la compararea imaginilor: ${error}`);
throw error;
}
};
compareImageApp(originalFile,
compareFile
)
res.status(200).json({
status:'succes'
})
})
Result:
But the accuracy does not really exist, there are images that do not resemble each other at all, but it gives me an accuracy of over 50%.
2)I tried to solve this problem using another module https://www.npmjs.com/package/rembrandt but the same accuracy problem does not exist.
Now I'm trying to solve this problem using tensorFlow js, to make a kind of image recognition app, something like facial recognition.
Do you have any ideas on how to solve this problem, if you have faced it before.

fs.writefile() lost data of image

I use a post request to upload a picture and store the image data in my server but lost some image data:
let storePic = function(imgData) {
const base64Data = imgData.replace(/^data:image\/\w+;base64,/, "");
const dataBuffer = new Buffer.alloc(5000,base64Data, 'base64')
fs.writeFile(imgPath, dataBuffer, (err) => {
if (err) {
console.log('fail to store image')
} else {
console.log('success to store image')
}
})
}
When I get the image from the server, it is broken:
Should use Buffer.from(base64Data, 'base64') instead else its truncated.
Imo its slightly better to match out the image rather then just presume its there:
let matches = imgData.match(/^data:([A-Za-z-+\/]+);base64,(.+)$/)
if (matches.length !== 3) new Error('Invalid base64 image URI')
// matches[1] contains the mime-type which is handy for alot of things
fs.writeFile(imgPath, Buffer.from(matches[2], 'base64'), (err) => {

How to detect more than 10 faces in the google vision apis

Hi i am new to google vision apis. I want to detect the faces on the Image ,i am using the node.js. the local image containing more than 10 faces. but vision api returning only 10 faces Detection. Is there any way to detect all the faces using this Vision api. please refer vision node api.
and you can take this image as ref
Here is my code
function findFaceontheImage(req, res, next) {
var vision = Vision();
var inputfile = 'NASA_Astronaut_Group_15.jpg';
var outputFile = 'out.png';
vision.faceDetection({source: {filename: inputfile}})
.then(function (results) {
const faces = results[0].faceAnnotations;
console.log('Faces:');
req.body['faces']=results;
var numFaces = faces.length;
console.log('Found ' + numFaces + (numFaces === 1 ? ' face' : ' faces'));
highlightFaces(inputfile, faces, outputFile, Canvas, function (err) {
if (err) {
next()
}
console.log("Finished!");
next()
});
})
.catch(function (err) {
console.error('ERROR:', err);
});
}
function highlightFaces(inputFile, faces, outputFile, Canvas, callback) {
fs.readFile(inputFile, function (err, image) {
if (err) {
return callback(err);
}
var Image = Canvas.Image;
// Open the original image into a canvas
var img = new Image();
img.src = image;
var canvas = new Canvas(img.width, img.height);
var context = canvas.getContext("2d");
context.drawImage(img, 0, 0, img.width, img.height);
// Now draw boxes around all the faces
context.strokeStyle = "rgba(0,255,0,0.8)";
context.lineWidth = "5";
faces.forEach(function (face) {
context.beginPath();
var origX = 0;
var origY = 0;
face.boundingPoly.vertices.forEach(function (bounds, i) {
if (i === 0) {
origX = bounds.x;
origY = bounds.y;
}
context.lineTo(bounds.x, bounds.y);
});
context.lineTo(origX, origY);
context.stroke();
});
// Write the result to a file
console.log("Writing to file " + outputFile);
var writeStream = fs.createWriteStream(outputFile);
var pngStream = canvas.pngStream();
pngStream.on("data", function (chunk) {
writeStream.write(chunk);
});
pngStream.on("error", console.log);
pngStream.on("end", callback);
});
}
In case there're other people who's still struggling on this topic.
With the Node.js Client Library, you can pass the ImprovedRequest object to the client.faceDetection(..) method instead of using the filepath or imageuri.
For example, in my case, I want the api to process an image in my GCS. So, instead of placing the imageuri as string. I'd do something like below.
import { protos } from '#google-cloud/vision';
// BEFORE
const [result] = await CLIENT.faceDetection(`gs://${bucketName}/${filePath}`);
// AFTER
const [result] = await CLIENT.faceDetection({
image: {
source: { imageUri: `gs://${bucketName}/${filePath}` }
},
features: [
{
maxResults: 100,
type: protos.google.cloud.vision.v1.Feature.Type.FACE_DETECTION,
},
],
});
Just in case noone will come up with solution that would force API to return more results, a pseudocode:
def process(image)
faces = process image
return faces if faces.size < 10
split image into two a bit overlapping half1 and half2
# we do overlapping because splitting may split a face
a = process(half1)
b = process(half2)
return a + b - intersection(a + b)
The intersection function should throw out those images that are on the same (taking in mind the possible +/-few pixel errors) coordinates plus the shift that we had between half1 and half2 withing the image.

Read/write binary data to MongoDB in Node.js

I've been able to successfully write binary data (an image) to MongoDB in Node.js. However I can't find clear documentation on how to read it back.
Here's how I'm writing the image to MongoDB:
var imageFile = req.files.myFile;
var imageData = fs.readFileSync(imageFile.path);
var imageBson = {};
imageBson.image = new db.bson_serializer.Binary(imageData);
imageBson.imageType = imageFile.type;
db.collection('images').insert(imageBson, {safe: true},function(err, data) {
I'd appreciate any pointers on reading the image from Mongo using Node. I'm assuming there's a function like "db.bson_deserializer...". Thanks!
Found the answer:
var imageFile = req.files.myFile;
fs.exists(imageFile.path, function(exists) {
if(exists)
{
console.log("File uploaded: " + util.inspect(imageFile));
fs.readFile(imageFile.path, function(err, imageData) {
if (err) {
res.end("Error reading your file on the server!");
}else{
//when saving an object with an image's byte array
var imageBson = {};
//var imageData = fs.readFileSync(imageFile.path);
imageBson.image = new req.mongo.Binary(imageData);
imageBson.imageType = imageFile.mimetype;
console.log("imageBson: " + util.inspect(imageBson));
req.imagesCollection.insert(imageBson, {safe: true},function(err, bsonData) {
if (err) {
res.end({ msg:'Error saving your file to the database!' });
}else{
fs.unlink(imageFile.path); // Deletes the file from the local disk
var imageBson = bsonData[0];
var imageId = imageBson._id;
res.redirect('images/' + imageId);
}
});
}
});
} else {
res.end("Oddly your file was uploaded but doesn't seem to exist!\n" + util.inspect(imageFile));
}
});
The MongoDB part isn't complicated. Once a Buffer is in the model, just let the db save it. MongoDB converts that into BinData. 80% of this code is just getting an image into and out of a PNG file.
People say don't store images in MongoDB, but icons/thumbs are tiny. Having said that, it might be a good idea to have an icons collection and only store them once using a hash of the image data as the _id.
model class example
class MyModel {
_icon: Buffer
get icon(): Buffer {
return this._icon
}
set icon(value: Buffer) {
this._icon = value
}
}
image helper
static async loadImage(url: string) {
var files = require('../lib/files')
var buffer = await files.urlContents(url, true)
return buffer
}
static async saveImage(image: Buffer, path: string) {
var files = require('../lib/files')
files.write(path, image.buffer)
return path
}
files helper
function urlResponse(url, binary) {
var request = require("request")
if (binary)
request = request.defaults({ encoding: null })
return new Promise(function (resolve, reject) {
request(url, function (error, res, body) {
if (error || res.statusCode !== 200 || body.includes('Incomplete response received from application'))
resolve({ statusCode: res?.statusCode !== 200 ? (res?.statusCode || 500) : 500 });
else
resolve(res);
});
});
}
async function urlContents(url, binary) {
var res = await urlResponse(url, binary)
if (binary)
return Buffer.from(res.body)
else
return res.body
}
function write(fileName, contents) {
fs.writeFileSync(fileName, contents)
}
mongodb helper
// ...saving
myModel.icon = loadImage('http://some.site.com/image.png')
collection.insertOne(myModel)
// ..getting
myModel = collection.findOne(query) // now myModel contains icon
saveImage(myModel.icon, '/home/toddmo/pictures/wow.png')

How to render image as pdf (canvas and pdfkit)

I have a function on nodejs that generates an image from many images and then generate a pdf file from that. Im trying with just one image but i need to add more, but this doesnt seems to work
function HelperHandler() {
this.pdf = function(req, res, next) {
var doc = new PDFDocument;
mergeImages(function(err, image) {
if (err)
return res.json(err);
doc.image(image, 100, 100);
doc.output(function(string) {
res.contentType = "application/pdf";
res.send(string);
});
})
}
}
var mergeImages = function(callback) {
var Canvas = require("canvas")
, fs = require("fs");
fs.readFile(global.root_path + "/images/bg.jpg", function(err, data) {
if (err)
callback("error loading image");
else {
var canvas = new Canvas(408, 939)
, img = new Canvas.Image(data);
ctx = canvas.getContext("2d");
img.onload = function() {
ctx.drawImage(img, 0, 0, 408, 939);
}
canvas.toDataURL('image/png', function(err, str) {
callback(null, str);
});
}
});
}
Error
Error: ENAMETOOLONG, name too long 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAZgAAAOrCAYAAABqSpB/AAAABmJLR0QA/wD/AP+gvaeTAAAF5UlEQVR4nO3BMQEAAADCoPVPbQo/oAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAICjAWckAAHF4EUcAAAAAElFTkSuQmCC'
at Object.openSync (fs.js:427:18)
at Object.readFileSync (fs.js:284:15)
at Function.open (/Users/jtomasrl/code/app/server/node_modules/pdfkit/js/image.js:27:28)
at PDFDocument.image (/Users/jtomasrl/code/app/server/node_modules/pdfkit/js/mixins/images.js:27:26)
at /Users/jtomasrl/code/app/server/lib/handler/current/helper.js:15:11
at /Users/jtomasrl/code/app/server/lib/handler/current/helper.js:41:9
at /Users/jtomasrl/code/app/server/node_modules/canvas/lib/canvas.js:217:7
You can use a buffer or a path with PDFKit image.
But you can't use a base64 URL, you need to decode this string to a buffer.
To use base64 data:
doc.image(new Buffer(image.replace('data:image/png;base64,',''), 'base64'), 100, 100); // this will decode your base64 to a new buffer
More information on base64 de/encode with Node Buffer here.

Resources