My object shows all black in STL Reader of VTK.js - vtk

I am using vtk.js in my angular app to display 3D STL objects. I know STL files don't have color info, but at least my 3D object should be white and details of it could be seen. However, my 3D object is full black, zero details.
import vtkFullScreenRenderWindow from 'vtk.js/Sources/Rendering/Misc/FullScreenRenderWindow';
import vtkActor from 'vtk.js/Sources/Rendering/Core/Actor';
import vtkMapper from 'vtk.js/Sources/Rendering/Core/Mapper';
import vtkSTLReader from 'vtk.js/Sources/IO/Geometry/STLReader';
ngOnInit(): void {
const reader = vtkSTLReader.newInstance();
const mapper = vtkMapper.newInstance({ scalarVisibility: false });
const actor = vtkActor.newInstance();
actor.setMapper(mapper);
mapper.setInputConnection(reader.getOutputPort());
function update() {
const fullScreenRenderer = vtkFullScreenRenderWindow.newInstance();
const renderer = fullScreenRenderer.getRenderer();
const renderWindow = fullScreenRenderer.getRenderWindow();
const resetCamera = renderer.resetCamera;
const render = renderWindow.render;
renderer.addActor(actor);
resetCamera();
render();
}
const myContainer = document.querySelector('body');
const fileContainer = document.createElement('div');
fileContainer.innerHTML = '<input type="file" class="file"/>';
myContainer.appendChild(fileContainer);
const fileInput = fileContainer.querySelector('input');
function handleFile(event) {
event.preventDefault();
const dataTransfer = event.dataTransfer;
const files = event.target.files || dataTransfer.files;
if (files.length === 1) {
myContainer.removeChild(fileContainer);
const fileReader = new FileReader();
fileReader.onload = function onLoad(e) {
reader.parseAsArrayBuffer(fileReader.result);
update();
};
fileReader.readAsArrayBuffer(files[0]);
}
}
fileInput.addEventListener('change', handleFile);
reader.setUrl("./assets/test2.stl", { binary: true }).then(update);
}
How can I add color to my object? I couldn't find any example about it, either.

Related

When I use the plaiceholder plugin to blur images in nextjs, why does it crash when there are more images?

I use the following code to blur the remote images, when the number of images is small it can be executed, but now I have 50 images and the browser gets stuck after the project runs.
what is the best way to optimize it? Is it caused by the plaiceholder plugin or by nextjs itself?
import fs from 'fs';
import path from 'path';
import { getPlaiceholder } from 'plaiceholder';
import { PhotoData } from '../types';
const directory = path.join(process.cwd(), 'data');
export async function getPhotos() {
const filePath = path.join(directory, 'photos.json');
const jsonData = fs.readFileSync(filePath, 'utf8');
const photosData = JSON.parse(jsonData).sort((a: PhotoData, b: PhotoData) =>
new Date(b.date) > new Date(a.date) ? 1 : -1
);
const photosProps = await Promise.all(
photosData.map(async (photoData: PhotoData) => {
const { thumbnail } = photoData;
const { base64, img } = await getPlaiceholder(thumbnail);
return {
...img,
blurDataURL: base64,
};
})
).then((values) => values);
return {
photosProps,
photosData,
};
}

Stuck in Bull board adapter with node

I´m a newbie with bull-board.
I read the docs and just copied the code:
....
... express settings
....
const Queue = require("bull");
const { createBullBoard } = require("#bull-board/api");
const { BullAdapter } = require("#bull-board/api/bullAdapter");
const { ExpressAdapter } = require("#bull-board/express");
const someQueue = new Queue("someQueueName");
const someOtherQueue = new Queue("someOtherQueueName");
const serverAdapter = new ExpressAdapter();
const { addQueue, removeQueue, setQueues, replaceQueues } = createBullBoard({
queues: [new BullAdapter(someQueue), new BullAdapter(someOtherQueue)],
serverAdapter: serverAdapter,
});
serverAdapter.setBasePath("/admin/queues");
app.use("/admin/queues", serverAdapter.getRouter());
After I access th url "admin/queues", I get stuck in a page with "...loading".
What am I doing wrong?

Trouble importing an image

I keep getting this error when importing an image and using canvas with discord.js:
C:\Users\Travi\OneDrive\Documents\GitHub\re\src\img\licenseTemp.png:1
�PNG
SyntaxError: Invalid or unexpected token
Here's my code:
I'm also using module Alias so that's why it's const licenseTemp = require('#img')
const BaseCommand = require('../../utils/structures/BaseCommand');
const Canvas = require('canvas');
const { MessageAttachment } = require('discord.js');
const licenseTemp = require('#img');
module.exports = class RankCommand extends BaseCommand {
constructor() {
super('rank', 'Information', []);
}
async run(client, message, args) {
const canvas = Canvas.createCanvas(449, 292);
const ctx = canvas.getContext('2d');
const background = await Canvas.loadImage(licenseTemp);
ctx.drawImage(background, 0, 0, canvas.width, canvas.height);
const attachment = new MessageAttachment(canvas.toBuffer(), 'license.png');
message.channel.send(attachment);
}
}
Do not require('#img'), this will try to make your image's content read as text be part of the script.
Instead let node-canvas's loadImage handle the fetching of that resource: make licenseTemp be the URL to that image directly.

Adding image dynamically in public folder in reactjs

I am developing an face detection application,for that I need to collect the users image for reference to detect them later.i have successfully uploaded the image in MySQL databse.now I need upload the image in public folder in react to detect the image in camera.i stuck in uploading image in react public folder.help me out get rid of this problem..
This is the React code where image to be detected in the imgUrl variable
detect = async () => {
const videoTag = document.getElementById("videoTag");
const canvas = document.getElementById("myCanvas");
const displaySize = { width: videoTag.width, height: videoTag.height };
faceapi.matchDimensions(canvas, displaySize);
//setInterval starts here for continuous detection
time = setInterval(async () => {
let fullFaceDescriptions = await faceapi
.detectAllFaces(videoTag)
.withFaceLandmarks()
.withFaceExpressions()
.withFaceDescriptors();
const value = fullFaceDescriptions.length;
this.setState({ detection: value });
fullFaceDescriptions = faceapi.resizeResults(
fullFaceDescriptions,
displaySize
);
canvas.getContext("2d").clearRect(0, 0, canvas.width, canvas.height);
//Label Images
var dummy = ["praveen", "vikranth", "Gokul", "Rahul"];
const labels = nameArray1;
// const labels = ["praveen", "vikranth", "Gokul", "Rahul"];
if (no_of_times <= 0) {
if (no_of_times === 0) {
labeledFaceDescriptors = await Promise.all(
labels.map(async (label) => {
// fetch image data from urls and convert blob to HTMLImage element
const imgUrl = `/img/${label}.png`; // for testing purpose
// const imgUrl = testImage;
const img = await faceapi.fetchImage(imgUrl);
const fullFaceDescription = await faceapi
.detectSingleFace(img)
.withFaceLandmarks()
.withFaceExpressions()
.withFaceDescriptor();
if (!fullFaceDescription) {
throw new Error(`no faces detected for ${label}`);
}
const faceDescriptors = [fullFaceDescription.descriptor];
return new faceapi.LabeledFaceDescriptors(label, faceDescriptors);
})
);
// console.log(no_of_times);
}
}
const maxDescriptorDistance = 0.7;
no_of_times++;
const faceMatcher = new faceapi.FaceMatcher(
labeledFaceDescriptors,
maxDescriptorDistance
);
const results = fullFaceDescriptions.map((fd) =>
faceMatcher.findBestMatch(fd.descriptor)
);
result = [];
results.forEach((bestMatch, i) => {
const box = fullFaceDescriptions[i].detection.box;
// console.log(box)
const text = bestMatch.toString(); //this for basMatch name detection
var str = "";
//This is for removing names confidence to map value without duplicate
var val = text.replace(/[0-9]/g, "");
for (let i of val) {
if (i !== " ") {
str += i;
} else {
break;
}
}
if (result.includes(str) === false) result.push(str);
const drawBox = new faceapi.draw.DrawBox(box, { label: text });
drawBox.draw(canvas);
faceapi.draw.drawFaceExpressions(canvas, fullFaceDescriptions, 0.85);
});
for (let i = 0; i < fullFaceDescriptions.length; i++) {
const result1 = fullFaceDescriptions[i].expressions.asSortedArray()[i];
// console.log(result[i]);
// console.log(result1.expression);
this.test(result[i], result1.expression);
}
}, 100);
In the above code i am manually putting image in public folder,this need to be done dynamically when the user uploads image.
this is place i get the images in base64 from nodejs
axios.get("/image").then((res) => {
testImage = res.data;
// console.log("from image" + res.data);
imgback = <img src={`data:image/jpeg;base64,${res.data}`} />;
});
This is nodejs code for the get request from reactjs
app.get("/image", (req, res) => {
connection.query("SELECT * FROM images", (error, row, fields) => {
if (!!error) {
console.log("Error in the query");
} else {
console.log("successful query");
var buffer = new Buffer(row[0].image, "binary");
var bufferBase64 = buffer.toString("base64");
res.send(bufferBase64);
}
});
});
my goal is, in the imgUrl variable in react code i need to specify the image folder for that i need to dynamically add image in folder.
Or is there is any other way to directly give image array in the imgUrl variable.please help me to sort out this problem.

Google Vision | Vietnamese: Low Quality OCR Results

Background
Using Google Vision API (with Node) to recognize Vietnamese text, the result is lacking quality. There are some (not all but some) tone markers as well as vowel signifies missing.
Compared to their online demo, which returns a decent result (scroll down for live demo):
https://cloud.google.com/vision/
(As I do not have a company account with them, I cannot ask Google directly.)
Question
Can I tweak my request to get better results?
I already set the language hint to "vi" and tried to combine it with "en". I also tried the more specific "vi-VN".
Example Image
https://www.tecc.org/Slatwall/custom/assets/images/product/default/cache/j056vt-_800w_800h_sb.jpg
Example Code
const fs = require("fs");
const path = require("path");
const vision = require("#google-cloud/vision");
async function quickstart() {
let text;
const fileName = "j056vt-_800w_800h_sb.jpg";
const imageFile = fs.readFileSync(fileName);
const image = Buffer.from(imageFile).toString("base64");
const client = new vision.ImageAnnotatorClient();
const request = {
image: {
content: image
},
imageContext: {
languageHints: ["vi", 'en']
}
};
const [result] = await client.textDetection(request);
for (const tmp of result.textAnnotations) {
text += tmp.description + '\n';
}
const out = path.basename(fileName, path.extname(fileName)) + ".txt";
fs.writeFileSync(out, text);
}
quickstart();
Solution
// $env:GOOGLE_APPLICATION_CREDENTIALS="[PATH]"
const fs = require("fs");
const path = require("path");
const vision = require("#google-cloud/vision");
async function quickstart() {
let text = '';
const fileName = "j056vt-_800w_800h_sb.jpg";
const imageFile = fs.readFileSync(fileName);
const image = Buffer.from(imageFile).toString("base64");
const client = new vision.ImageAnnotatorClient();
const request = {
image: {
content: image
},
imageContext: {
languageHints: ["vi-VN"]
}
};
const [result] = await client.documentTextDetection(request);
// OUTPUT METHOD A
for (const tmp of result.textAnnotations) {
text += tmp.description + "\n";
}
console.log(text);
const out = path.basename(fileName, path.extname(fileName)) + ".txt";
fs.writeFileSync(out, text);
// OUTPUT METHOD B
const fullTextAnnotation = result.fullTextAnnotation;
console.log(`Full text: ${fullTextAnnotation.text}`);
fullTextAnnotation.pages.forEach(page => {
page.blocks.forEach(block => {
console.log(`Block confidence: ${block.confidence}`);
block.paragraphs.forEach(paragraph => {
console.log(`Paragraph confidence: ${paragraph.confidence}`);
paragraph.words.forEach(word => {
const wordText = word.symbols.map(s => s.text).join("");
console.log(`Word text: ${wordText}`);
console.log(`Word confidence: ${word.confidence}`);
word.symbols.forEach(symbol => {
console.log(`Symbol text: ${symbol.text}`);
console.log(`Symbol confidence: ${symbol.confidence}`);
});
});
});
});
});
}
quickstart();
This question is already answered in this one.
In summary, the Demo is in this case probably using the DOCUMENT_TEXT_DETECTION, which can sometimes make a more thorough strings extraction, while you are using TEXT_DETECTION.
You can try to make a client.document_text_detection request instead of client.textDetection and you will probably get results closer to the Demo.
If you want to read to the related documentation you can find it here.
I hope this resolves your question!

Resources