I am using Resemble Js for comparing two image using Node. I am getting error " error while reading from input stream" fetching external url image.
code works fine when I access images locally.Thanks in advance .
const http = require('http-server');
const compareImages = require('resemblejs/compareImages');
const resemble = require('resemblejs');
const fs = require("mz/fs");
const request = require('request');
const FileReader = require('filereader');
const stream = require('stream');
var file = "https://static-cdn.jtvnw.net/previews-ttv/live_user_combatgo-640x360.jpg";
var file2 = "https://static-cdn.jtvnw.net/previews-ttv/live_user_combatgo-640x360.jpg";
// var file3 = "./kritika.png";
// var file4 = "./some gal.png"
async function getDiff() {
const options = {
output: {
errorColor: {
red: 255,
green: 0,
blue: 255
},
errorType: 'movement',
transparency: 0.3,
largeImageThreshold: 1200,
useCrossOrigin: false,
outputDiff: true
},
scaleToSameSize: true,
ignore: ['nothing', 'less', 'antialiasing', 'colors', 'alpha'],
};
// request.get(file, (err, res) => {
// this.newFile = file;
// console.log("SAMPLE RECEPT",newFile);
// return newFile;
// });
var diff = resemble(file2).compareTo(file2).onComplete(
(data) => {
console.log("Data", data);
}
);
// await fs.writeFile('./output.png', data.getBuffer());
// The parameters can be Node Buffers
// data is the same as usual with an additional getBuffer() function
}
getDiff();
Related
I am trying to upload file onto IPFS.
My code is as below
var express = require('express')
var fs = require('fs')
const { create, globSource } = require('ipfs-http-client')
const ipfs = create()
....
const filesAdded = await ipfs.add(
{ path: fileName, content: file },
{
progress: (len) => console.log('Uploading file...' + len),
},
)
console.log(filesAdded)
const fileHash = filesAdded.cid.toString()
This code return hash value.
But i can't see my file(image) on https://ipfs.io/ipfs/{hash}
Here's my code.
I have went through the google cloud platform API documentation, and followed as per the GCP DOC steps correctly. But still unable to fix the encoding error, which you can see it below. I'm trying to translate an audio clip from en-US(english) to hi-IN (hindi), and it would be helpful if you can give some alternative ways for this solution.
function main(filename, encoding, sourceLanguage, targetLanguage) {
const fs = require('fs');
const {
SpeechTranslationServiceClient,
} = require('#google-cloud/media-translation');
const client = new SpeechTranslationServiceClient();
async function quickstart() {
const filename = './16kmonoceo.wav';
const encoding = 'LINEAR16';
const sourceLanguage = 'en-US';
const targetLangauge = 'hi-IN';
const config = {
audioConfig: {
audioEncoding: encoding,
sourceLanguageCode: sourceLanguage,
targetLanguageCode: targetLangauge,
},
};
const initialRequest = {
streamingConfig: config,
audioContent: null,
};
const readStream = fs.createReadStream(filename, {
highWaterMark: 4096,
encoding: 'base64',
});
const chunks = [];
readStream
.on('data', chunk => {
const request = {
streamingConfig: config,
audioContent: chunk.toString(),
};
chunks.push(request);
})
.on('close', () => {
// Config-only request should be first in stream of requests
stream.write(initialRequest);
for (let i = 0; i < chunks.length; i++) {
stream.write(chunks[i]);
}
stream.end();
});
const stream = client.streamingTranslateSpeech().on('data', response => {
const {result} = response;
if (result.textTranslationResult.isFinal) {
console.log(
`\nFinal translation: ${result.textTranslationResult.translation}`
);
console.log(`Final recognition result: ${result.recognitionResult}`);
} else {
console.log(
`\nPartial translation: ${result.textTranslationResult.translation}`
);
console.log(`Partial recognition result: ${result.recognitionResult}`);
}
});
}
quickstart();
}
main(...process.argv.slice(2));
here my error from command line.
CHECK ERROR MESSAGE
I'm using windows 10 and IDE VS CODE.
This is a case where careful reading of the error message helps.
Some module gacked on "LINEAR16" as the audioEncoding value saying there's no encoding with that name.
A quick look at the documentation shows "linear16" (lower case) as the value to use.
I'm building out a firebase function that uses the html-pdf package (which uses PhantomJS). The function works fine on my local machine, but whenever I deploy the function on Firebase, I get the following error:
Error: html-pdf: PDF generation timeout. Phantom.js script did not exit.
I've changed the timeout parameter for pdf.create() and keep getting the same result. Any idea on what might be creating this issue that is unique to only when a deploy this to Firebase? Code is below.
const pdf = require('html-pdf');
const runtimeOpts = {
timeoutSeconds: 540, // in seconds
memory: '2GB'
}
exports.sendToKindle = functions.runWith(runtimeOpts).https.onRequest(async (req, res) => {
// REMOVED A BLOCK OF CODE FOR SIMPLICITY, BUT CAN PUT BACK IN IF NEEDED//
var options = {
format: 'Letter',
directory: "/tmp",
timeout: 540000, // in milliseconds
};
const blookFileName = createFileName(blookData.title) + '.pdf';
const tempFilePath = path.join(os.tmpdir(), `${blookFileName}`);
const htmlFilePath = path.join(os.tmpdir(), 'book.html');
const htmlFs = fs.openSync(htmlFilePath, 'w');
await fs.promises.appendFile(htmlFilePath, bookHTML);
const fd = fs.openSync(tempFilePath, 'w');
var html = fs.readFileSync(htmlFilePath, 'utf8');
let mailgunObject = null;
pdf.create(html, options).toFile(tempFilePath, async (err, res) => {
if (err) return console.error(err);
mailgunObject = await sendEmail(tempFilePath, kindleEmail);
return console.log(res);
});
fs.closeSync(fd);
fs.closeSync(htmlFs);
return cors(req, res, () => {
res.status(200).type('application/json').send({'response': 'Success'})
})
I was able to solve this issue by modifying the code by having the pdf.create().toFile() placed within the return of the cloud function.
const pdf = require('html-pdf');
const runtimeOpts = {
timeoutSeconds: 300, // in seconds
memory: '1GB'
}
exports.sendToKindle = functions.runWith(runtimeOpts).https.onRequest(async (req, res) => {
// REMOVED A BLOCK OF CODE FOR SIMPLICITY, BUT CAN PUT BACK IN IF NEEDED//
var options = {
format: 'Letter',
directory: "/tmp",
timeout: 540000, // in milliseconds
};
const blookFileName = createFileName(blookData.title) + '.pdf';
const tempFilePath = path.join(os.tmpdir(), `${blookFileName}`);
const htmlFilePath = path.join(os.tmpdir(), 'book.html');
const htmlFs = fs.openSync(htmlFilePath, 'w');
await fs.promises.appendFile(htmlFilePath, bookHTML);
const fd = fs.openSync(tempFilePath, 'w');
var html = fs.readFileSync(htmlFilePath, 'utf8');
return cors(req, res, () => {
pdf.create(html, options).toFile(tempFilePath, async (err, res) => {
if (err) return console.error(err);
let mailgunObject = await sendEmail(tempFilePath, kindleEmail);
fs.closeSync(fd);
fs.closeSync(htmlFs);
return console.log(res);
});
res.status(200).type('application/json').send({'response': 'Success'})
})
I got the same issue. Actualy I realized that when I called the function using html-pdf through Postman, or simply through a request by Google Chrome, the pdf used to generate within a 2 or 3 seconds, whereas it was more 2 or 3 minutes when calling it directly from my app.
So this is what I did : putting html-pdf in a separate function that I deployed, and then calling it :
https = require('https');
https.get(https://us-central1-your-project-name.cloudfunctions.net/your-function-using-html-pdf)
I am developing an face detection application,for that I need to collect the users image for reference to detect them later.i have successfully uploaded the image in MySQL databse.now I need upload the image in public folder in react to detect the image in camera.i stuck in uploading image in react public folder.help me out get rid of this problem..
This is the React code where image to be detected in the imgUrl variable
detect = async () => {
const videoTag = document.getElementById("videoTag");
const canvas = document.getElementById("myCanvas");
const displaySize = { width: videoTag.width, height: videoTag.height };
faceapi.matchDimensions(canvas, displaySize);
//setInterval starts here for continuous detection
time = setInterval(async () => {
let fullFaceDescriptions = await faceapi
.detectAllFaces(videoTag)
.withFaceLandmarks()
.withFaceExpressions()
.withFaceDescriptors();
const value = fullFaceDescriptions.length;
this.setState({ detection: value });
fullFaceDescriptions = faceapi.resizeResults(
fullFaceDescriptions,
displaySize
);
canvas.getContext("2d").clearRect(0, 0, canvas.width, canvas.height);
//Label Images
var dummy = ["praveen", "vikranth", "Gokul", "Rahul"];
const labels = nameArray1;
// const labels = ["praveen", "vikranth", "Gokul", "Rahul"];
if (no_of_times <= 0) {
if (no_of_times === 0) {
labeledFaceDescriptors = await Promise.all(
labels.map(async (label) => {
// fetch image data from urls and convert blob to HTMLImage element
const imgUrl = `/img/${label}.png`; // for testing purpose
// const imgUrl = testImage;
const img = await faceapi.fetchImage(imgUrl);
const fullFaceDescription = await faceapi
.detectSingleFace(img)
.withFaceLandmarks()
.withFaceExpressions()
.withFaceDescriptor();
if (!fullFaceDescription) {
throw new Error(`no faces detected for ${label}`);
}
const faceDescriptors = [fullFaceDescription.descriptor];
return new faceapi.LabeledFaceDescriptors(label, faceDescriptors);
})
);
// console.log(no_of_times);
}
}
const maxDescriptorDistance = 0.7;
no_of_times++;
const faceMatcher = new faceapi.FaceMatcher(
labeledFaceDescriptors,
maxDescriptorDistance
);
const results = fullFaceDescriptions.map((fd) =>
faceMatcher.findBestMatch(fd.descriptor)
);
result = [];
results.forEach((bestMatch, i) => {
const box = fullFaceDescriptions[i].detection.box;
// console.log(box)
const text = bestMatch.toString(); //this for basMatch name detection
var str = "";
//This is for removing names confidence to map value without duplicate
var val = text.replace(/[0-9]/g, "");
for (let i of val) {
if (i !== " ") {
str += i;
} else {
break;
}
}
if (result.includes(str) === false) result.push(str);
const drawBox = new faceapi.draw.DrawBox(box, { label: text });
drawBox.draw(canvas);
faceapi.draw.drawFaceExpressions(canvas, fullFaceDescriptions, 0.85);
});
for (let i = 0; i < fullFaceDescriptions.length; i++) {
const result1 = fullFaceDescriptions[i].expressions.asSortedArray()[i];
// console.log(result[i]);
// console.log(result1.expression);
this.test(result[i], result1.expression);
}
}, 100);
In the above code i am manually putting image in public folder,this need to be done dynamically when the user uploads image.
this is place i get the images in base64 from nodejs
axios.get("/image").then((res) => {
testImage = res.data;
// console.log("from image" + res.data);
imgback = <img src={`data:image/jpeg;base64,${res.data}`} />;
});
This is nodejs code for the get request from reactjs
app.get("/image", (req, res) => {
connection.query("SELECT * FROM images", (error, row, fields) => {
if (!!error) {
console.log("Error in the query");
} else {
console.log("successful query");
var buffer = new Buffer(row[0].image, "binary");
var bufferBase64 = buffer.toString("base64");
res.send(bufferBase64);
}
});
});
my goal is, in the imgUrl variable in react code i need to specify the image folder for that i need to dynamically add image in folder.
Or is there is any other way to directly give image array in the imgUrl variable.please help me to sort out this problem.
Background
Using Google Vision API (with Node) to recognize Vietnamese text, the result is lacking quality. There are some (not all but some) tone markers as well as vowel signifies missing.
Compared to their online demo, which returns a decent result (scroll down for live demo):
https://cloud.google.com/vision/
(As I do not have a company account with them, I cannot ask Google directly.)
Question
Can I tweak my request to get better results?
I already set the language hint to "vi" and tried to combine it with "en". I also tried the more specific "vi-VN".
Example Image
https://www.tecc.org/Slatwall/custom/assets/images/product/default/cache/j056vt-_800w_800h_sb.jpg
Example Code
const fs = require("fs");
const path = require("path");
const vision = require("#google-cloud/vision");
async function quickstart() {
let text;
const fileName = "j056vt-_800w_800h_sb.jpg";
const imageFile = fs.readFileSync(fileName);
const image = Buffer.from(imageFile).toString("base64");
const client = new vision.ImageAnnotatorClient();
const request = {
image: {
content: image
},
imageContext: {
languageHints: ["vi", 'en']
}
};
const [result] = await client.textDetection(request);
for (const tmp of result.textAnnotations) {
text += tmp.description + '\n';
}
const out = path.basename(fileName, path.extname(fileName)) + ".txt";
fs.writeFileSync(out, text);
}
quickstart();
Solution
// $env:GOOGLE_APPLICATION_CREDENTIALS="[PATH]"
const fs = require("fs");
const path = require("path");
const vision = require("#google-cloud/vision");
async function quickstart() {
let text = '';
const fileName = "j056vt-_800w_800h_sb.jpg";
const imageFile = fs.readFileSync(fileName);
const image = Buffer.from(imageFile).toString("base64");
const client = new vision.ImageAnnotatorClient();
const request = {
image: {
content: image
},
imageContext: {
languageHints: ["vi-VN"]
}
};
const [result] = await client.documentTextDetection(request);
// OUTPUT METHOD A
for (const tmp of result.textAnnotations) {
text += tmp.description + "\n";
}
console.log(text);
const out = path.basename(fileName, path.extname(fileName)) + ".txt";
fs.writeFileSync(out, text);
// OUTPUT METHOD B
const fullTextAnnotation = result.fullTextAnnotation;
console.log(`Full text: ${fullTextAnnotation.text}`);
fullTextAnnotation.pages.forEach(page => {
page.blocks.forEach(block => {
console.log(`Block confidence: ${block.confidence}`);
block.paragraphs.forEach(paragraph => {
console.log(`Paragraph confidence: ${paragraph.confidence}`);
paragraph.words.forEach(word => {
const wordText = word.symbols.map(s => s.text).join("");
console.log(`Word text: ${wordText}`);
console.log(`Word confidence: ${word.confidence}`);
word.symbols.forEach(symbol => {
console.log(`Symbol text: ${symbol.text}`);
console.log(`Symbol confidence: ${symbol.confidence}`);
});
});
});
});
});
}
quickstart();
This question is already answered in this one.
In summary, the Demo is in this case probably using the DOCUMENT_TEXT_DETECTION, which can sometimes make a more thorough strings extraction, while you are using TEXT_DETECTION.
You can try to make a client.document_text_detection request instead of client.textDetection and you will probably get results closer to the Demo.
If you want to read to the related documentation you can find it here.
I hope this resolves your question!