Loss curve saturates at 1.0 for MNIST dataset using Tensorflow.js - mnist

data.js
/**
* #license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const IMAGE_SIZE = 784;
const NUM_CLASSES = 10;
const NUM_DATASET_ELEMENTS = 65000;
const TRAIN_TEST_RATIO = 5 / 6;
const NUM_TRAIN_ELEMENTS = Math.floor(TRAIN_TEST_RATIO * NUM_DATASET_ELEMENTS);
const NUM_TEST_ELEMENTS = NUM_DATASET_ELEMENTS - NUM_TRAIN_ELEMENTS;
const MNIST_IMAGES_SPRITE_PATH =
'https://storage.googleapis.com/learnjs-data/model-builder/mnist_images.png';
const MNIST_LABELS_PATH =
'https://storage.googleapis.com/learnjs-data/model-builder/mnist_labels_uint8';
/**
* A class that fetches the sprited MNIST dataset and returns shuffled batches.
*
* NOTE: This will get much easier. For now, we do data fetching and
* manipulation manually.
*/
export class MnistData {
constructor() {
this.shuffledTrainIndex = 0;
this.shuffledTestIndex = 0;
}
async load() {
// Make a request for the MNIST sprited image.
const img = new Image();
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
const imgRequest = new Promise((resolve, reject) => {
img.crossOrigin = '';
img.onload = () => {
img.width = img.naturalWidth;
img.height = img.naturalHeight;
const datasetBytesBuffer =
new ArrayBuffer(NUM_DATASET_ELEMENTS * IMAGE_SIZE * 4);
const chunkSize = 5000;
canvas.width = img.width;
canvas.height = chunkSize;
for (let i = 0; i < NUM_DATASET_ELEMENTS / chunkSize; i++) {
const datasetBytesView = new Float32Array(
datasetBytesBuffer, i * IMAGE_SIZE * chunkSize * 4,
IMAGE_SIZE * chunkSize);
ctx.drawImage(
img, 0, i * chunkSize, img.width, chunkSize, 0, 0, img.width,
chunkSize);
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
for (let j = 0; j < imageData.data.length / 4; j++) {
// All channels hold an equal value since the image is grayscale, so
// just read the red channel.
datasetBytesView[j] = imageData.data[j * 4] / 255;
}
}
this.datasetImages = new Float32Array(datasetBytesBuffer);
resolve();
};
img.src = MNIST_IMAGES_SPRITE_PATH;
});
const labelsRequest = fetch(MNIST_LABELS_PATH);
const [imgResponse, labelsResponse] =
await Promise.all([imgRequest, labelsRequest]);
this.datasetLabels = new Uint8Array(await labelsResponse.arrayBuffer());
// Create shuffled indices into the train/test set for when we select a
// random dataset element for training / validation.
this.trainIndices = tf.util.createShuffledIndices(NUM_TRAIN_ELEMENTS);
this.testIndices = tf.util.createShuffledIndices(NUM_TEST_ELEMENTS);
// Slice the the images and labels into train and test sets.
this.trainImages =
this.datasetImages.slice(0, IMAGE_SIZE * NUM_TRAIN_ELEMENTS);
this.testImages = this.datasetImages.slice(IMAGE_SIZE * NUM_TRAIN_ELEMENTS);
this.trainLabels =
this.datasetLabels.slice(0, NUM_CLASSES * NUM_TRAIN_ELEMENTS);
this.testLabels =
this.datasetLabels.slice(NUM_CLASSES * NUM_TRAIN_ELEMENTS);
}
nextTrainBatch(batchSize) {
return this.nextBatch(
batchSize, [this.trainImages, this.trainLabels], () => {
this.shuffledTrainIndex =
(this.shuffledTrainIndex + 1) % this.trainIndices.length;
return this.trainIndices[this.shuffledTrainIndex];
});
}
nextTestBatch(batchSize) {
return this.nextBatch(batchSize, [this.testImages, this.testLabels], () => {
this.shuffledTestIndex =
(this.shuffledTestIndex + 1) % this.testIndices.length;
return this.testIndices[this.shuffledTestIndex];
});
}
nextBatch(batchSize, data, index) {
const batchImagesArray = new Float32Array(batchSize * IMAGE_SIZE);
const batchLabelsArray = new Uint8Array(batchSize * NUM_CLASSES);
for (let i = 0; i < batchSize; i++) {
const idx = index();
const image =
data[0].slice(idx * IMAGE_SIZE, idx * IMAGE_SIZE + IMAGE_SIZE);
batchImagesArray.set(image, i * IMAGE_SIZE);
const label =
data[1].slice(idx * NUM_CLASSES, idx * NUM_CLASSES + NUM_CLASSES);
batchLabelsArray.set(label, i * NUM_CLASSES);
}
const xs = tf.tensor2d(batchImagesArray, [batchSize, IMAGE_SIZE]);
const labels = tf.tensor2d(batchLabelsArray, [batchSize, NUM_CLASSES]);
return {xs, labels};
}
}
script.js
import {MnistData} from './data.js';
var canvas,ctx,saveButton,clearButton;
var pos={x:0,y:0};
var rawImage;
var model;
function getModel()
{
model=tf.sequential();
model.add(tf.layers.conv2d({inputShape:[28,28,1],kernelSize:3,filters:8,activation:'relu'}));
model.add(tf.layers.maxPooling2d({poolSize:[2,2]}));
model.add(tf.layers.conv2d({filters:16,kernelSize:3,activation:'relu'}));
model.add(tf.layers.maxPooling2d({poolSize:[2,2]}));
model.add(tf.layers.flatten());
model.add(tf.layers.dense({units:128,activation:'sigmoid'}));
model.add(tf.layers.dense({units:10,activation:'softmax'}));
model.compile({optimizer:tf.train.adam(),loss:'categoricalCrossentropy',metrics:['accuracy']});
return model;
}
async function train(model,data){
const metrics=['loss', 'val_loss', 'acc', 'val_acc'];
const container={name:'Model training',styles:{height:'640px'}};
const fitCallbacks=tfvis.show.fitCallbacks(container,metrics);
const BATCH_SIZE = 512;
const TRAIN_DATA_SIZE = 5500;
const TEST_DATA_SIZE = 1000;
const [trainXs,trainYs]=tf.tidy(()=>
{
const d=data.nextTrainBatch(TRAIN_DATA_SIZE);
return[
d.xs.reshape([TRAIN_DATA_SIZE,28,28,1]),
d.labels
];
});
const [testXs,testYs]=tf.tidy(()=>{
const d=data.nextTestBatch(TEST_DATA_SIZE);
return[
d.xs.reshape([TEST_DATA_SIZE,28,28,1]),
d.labels
];
});
return model.fit(trainXs,trainYs,{
batchSize:BATCH_SIZE,
validationData:[testXs,testYs],
epochs:20,
shuffle:true,
callbacks:fitCallbacks
});
}
function setPosition(e){
pos.x=e.clientX-100;
pos.y=e.clientY-100;
}
function draw(e)
{
if(e.buttons!=1)return ;
ctx.beginPath();
ctx.lineWidth=24;
ctx.lineCap='round';
ctx.strokeStyle='white';
ctx.moveTo(pos.x,pos.y);
setPosition(e);
ctx.lineTo(pos.x,pos.y)
ctx.stroke();
rawImage.src=canvas.toDataURL('image/png');
}
function erase()
{
ctx.fillStyle="black";
ctx.fillRect(0,0,280,280);
}
function save()
{
var raw=tf.browser.fromPixels(rawImage,1);
var resized=tf.image.resizeBilinear(raw,[28,28]);
var tensor=resized.expandDims(0);
var prediction=model.predict(tensor);
var pIndex=tf.argMax(prediction,1).dataSync();
alert(pIndex);
}
function init()
{
canvas=document.getElementById('canvas');
rawImage=document.getElementById('canvasimg');
ctx=canvas.getContext("2d");
ctx.fillStyle="black";
ctx.fillRect(0,0,280,280);
canvas.addEventListener("mousemove",draw);
canvas.addEventListener("mousedown",setPosition);
canvas.addEventListener("mouseenter",setPosition);
saveButton=document.getElementById('sb');
saveButton.addEventListener("click",save);
clearButton=document.getElementById('cb');
clearButton.addEventListener("click",erase);
}
async function run()
{
const data=new MnistData();
await data.load();
const model=getModel();
tfvis.show.modelSummary({name:'Model Architecture'},model);
await train(model,data);
init();
alert("Training is done, try classifying...");
}
document.addEventListener('DOMContentLoaded', run);
mnist.htm
<html>
<head>
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs#latest"></script>
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs-vis"></script>
</head>
<body>
<h1> Handwritten character recognition</h1>
<canvas id="canvas" width="280" height="280" style="position:absolute;top:100;left:100;border:8px solid;"></canvas>
<img id="canvasimg" style="position:absolute;top:10%;left=52%;width:280;height=280;display:none;">
<input type="button" value="classify" id="sb" size="48" style="position:absolute;top:400;left:100;">
<input type="button" value="clear" id="cb" size="23" style="position:absolute;top:400;left:180;">
<script src="data.js" type="module"></script>
<script src="script.js" type="module"></script>
</body>
</html>
I tried to make a handwritten digits classifier which recognizes digits based on what we draw on canvas of web page. But while training my loss curve saturates at 1.0 and my accuracy saturates at 60%. So I tried changing the activation function of the 128 nodes dense layer from relu to sigmoid. Even after changing that my loss saturates at 1.0. Please help me out.

There are models that have been defined to classify mnist dataset here and there. If you want to rewrite your own model, then you will need to compare it against those official models that will serve as a baseline.

Related

Tensor must have a shape comprised of positive integers but got shape [,96]

const predictGenre = async (filename) => {
console.log("*********** Predict Genre ***********");
// eslint-disable-next-line no-undef
const audioFilePath = musicPath + filename;
//Decode Audio (Wav File)
const audioBuffer = fs.readFileSync(audioFilePath);
const preprocessedAudio = preprocess(audioBuffer);
// Initialisation "musicnn"
let features = {
melSpectrum: [],
batchSize: 0,
melBandsSize: 96,
patchSize: 187,
};
let frameSize = 512;
let hopSize = 256;
//Extract Features
const extractor = new EssentiaModel.EssentiaTFInputExtractor(
EssentiaWASM,
"musicnn"
);
let wasm = EssentiaWASM;
let essentia = extractor.essentia;
features.melSpectrum = []; // ensure it's empty from previous runs
let frames = essentia.FrameGenerator(preprocessedAudio, frameSize, hopSize);
let audioLength = frames.size();
for (var i = 0; i < audioLength; i++) {
let frame = wasm.vectorToArray(frames.get(i));
features.melSpectrum.push(extractor.compute(frame).melSpectrum);
}
//frames.delete();
features.batchSize = features.melSpectrum.length;
//Load Model
const modelPath =
//eslint-disable-next-line no-undef
"file://" + path.join(__dirname, "/models/genderModel.json");
let musicnn = new EssentiaModel.TensorflowMusiCNN(tf, modelPath);
await musicnn.initialize();
let predictions = await musicnn.predict(features);
console.log("-> predictions", predictions);
};
I am trying to predict the genre of an audio file with essentia js models.
when I pass the features to the prediction model after extracting them correctly I get this tensor flow error ..
Tensorflow Js error

PixiJs mask filter with background image

How can I create effect like on this site (work links)
https://monopo.london/work/
I was trying implement this example, but can't change black background with another image.
https://pixijs.io/examples/#/masks/filter.js
Please, HELP!
const app = new PIXI.Application();
document.body.appendChild(app.view);
// Inner radius of the circle
const radius = 100;
// The blur amount
const blurSize = 12;
app.loader.add('grass', 'https://images.prismic.io/monopolondon/abb53b73-caae-41d2-9031-f889aa27780d_onefinestay_thumb.jpeg?auto=compress,format&rect=0,0,1200,1436&w=600&h=718');
app.loader.load(setup);
function setup(loader, resources) {
const background = new PIXI.Sprite(resources.grass.texture);
app.stage.addChild(background);
background.width = app.screen.width;
background.height = app.screen.height;
const circle = new PIXI.Graphics()
.beginFill(0xFF0000)
.drawCircle(radius + blurSize, radius + blurSize, radius)
.endFill();
circle.filters = [new PIXI.filters.BlurFilter(blurSize)];
const bounds = new PIXI.Rectangle(0, 0, (radius + blurSize) * 2, (radius + blurSize) * 2);
const texture = app.renderer.generateTexture(circle, PIXI.SCALE_MODES.NEAREST, 1, bounds);
const focus = new PIXI.Sprite(texture);
console.log(texture);
app.stage.addChild(focus);
background.mask = focus;
app.stage.interactive = true;
app.stage.on('mousemove', pointerMove);
function pointerMove(event) {
focus.position.x = event.data.global.x - focus.width / 2;
focus.position.y = event.data.global.y - focus.height / 2;
}
}
Try simply adding some other Sprite to stage - before grass is added - like this:
look at usage of some_bg
const app = new PIXI.Application();
document.body.appendChild(app.view);
// Inner radius of the circle
const radius = 100;
// The blur amount
const blurSize = 32;
app.loader.add('grass', 'examples/assets/bg_grass.jpg');
app.loader.add('some_bg', 'examples/assets/bg_plane.jpg');
app.loader.load(setup);
function setup(loader, resources) {
const some_bg = new PIXI.Sprite(resources.some_bg.texture);
some_bg.width = app.screen.width;
some_bg.height = app.screen.height;
app.stage.addChild(some_bg);
console.log(some_bg);
const background = new PIXI.Sprite(resources.grass.texture);
app.stage.addChild(background);
background.width = app.screen.width;
background.height = app.screen.height;
const circle = new PIXI.Graphics()
.beginFill(0xFFFFFF)
.drawCircle(radius + blurSize, radius + blurSize, radius)
.endFill();
circle.filters = [new PIXI.filters.BlurFilter(blurSize)];
const bounds = new PIXI.Rectangle(0, 0, (radius + blurSize) * 2, (radius + blurSize) * 2);
const texture = app.renderer.generateTexture(circle, PIXI.SCALE_MODES.NEAREST, 1, bounds);
const focus = new PIXI.Sprite(texture);
app.stage.addChild(focus);
background.mask = focus;
app.stage.interactive = true;
app.stage.on('mousemove', pointerMove);
function pointerMove(event) {
focus.position.x = event.data.global.x - focus.width / 2;
focus.position.y = event.data.global.y - focus.height / 2;
}
}

BodyPix - running toMask() and toColoredPartMask() in node.js throws an error: ImageData is not defined

I am trying to get segmented person parts colored mask in TensorFlowJS bodypix model. Below code works fine before toColoredPartMask or toMask which throws an error "ImageData is not defined".
const tfjsnode = require('#tensorflow/tfjs-node')
const bodyPix = require('#tensorflow-models/body-pix');
const fs = require('fs');
setTimeout(async () => {
maskImageWithBodyPix().then(response => {
console.log(response)
}).catch(e => {
console.log("Error => " + e)
})
}, 1000)
async function maskImageWithBodyPix(image = readImage("./person.jpeg")) {
console.log("loadModel ...");
if (image == null)
return Promise.resolve("Image Not Found...")
const resNet = {
architecture: 'ResNet50',
outputStride: 16,
quantBytes: 4
};
let bodyModel = await bodyPix.load(resNet)
console.log("segmentPersonParts ...");
let segmentedPersonParts = await bodyModel.segmentPersonParts(image, {
flipHorizontal: false,
internalResolution: 'full',
segmentationThreshold: 0.5,
})
console.log(`ImageHeight: ${segmentedPersonParts.height} | ImageWidth: ${segmentedPersonParts.width}`)
console.log("toMaskImageData ...")
const maskedImageData = bodyPix.toColoredPartMask(segmentedPersonParts, false);
console.log(`maskedImageData = ${maskedImageData}`)
return Promise.resolve(true)
}
const readImage = path => {
console.log(`readImage ...`)
if (!fs.existsSync(path))
return null
const imageBuffer = fs.readFileSync(path);
const tfimage = tfjsnode.node.decodeImage(imageBuffer);
return tfimage;
}
So this is a huge hack, but I got it working.
The problem lies in node_modules\#tensorflow-models\body-pix\dist\output_rendering_util.js.
If we look at this function inside:
function toColoredPartMask(partSegmentation, partColors) {
if (partColors === void 0) { partColors = RAINBOW_PART_COLORS; }
if (Array.isArray(partSegmentation) && partSegmentation.length === 0) {
return null;
}
var multiPersonPartSegmentation;
if (!Array.isArray(partSegmentation)) {
multiPersonPartSegmentation = [partSegmentation];
}
else {
multiPersonPartSegmentation = partSegmentation;
}
var _a = multiPersonPartSegmentation[0], width = _a.width, height = _a.height;
var bytes = new Uint8ClampedArray(width * height * 4);
for (var i = 0; i < height * width; ++i) {
// invert mask. Invert the segmentation mask.
var j = i * 4;
bytes[j + 0] = 255;
bytes[j + 1] = 255;
bytes[j + 2] = 255;
bytes[j + 3] = 255;
for (var k = 0; k < multiPersonPartSegmentation.length; k++) {
var partId = multiPersonPartSegmentation[k].data[i];
if (partId !== -1) {
var color = partColors[partId];
if (!color) {
throw new Error("No color could be found for part id " + partId);
}
bytes[j + 0] = color[0];
bytes[j + 1] = color[1];
bytes[j + 2] = color[2];
bytes[j + 3] = 255;
}
}
}
return new ImageData(bytes, width, height);
}
It seemingly returns a new ImageData object. However, if we look at what ImageData actually is, we get this:
And if we dig deeper, we get to some node.js internal typescript files and see this:
declare var ImageData: {
prototype: ImageData;
new(sw: number, sh: number, settings?: ImageDataSettings): ImageData;
new(data: Uint8ClampedArray, sw: number, sh?: number, settings?: ImageDataSettings): ImageData;
};
I thought that this won't work for us, so we need to overwrite that reference somehow.
What I did was install this package here and added const ImageData = require("#canvas/image-data"); line to the very top of the node_modules\#tensorflow-models\body-pix\dist\output_rendering_util.js file like this:
"use strict";
/**
* #license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const ImageData = require("#canvas/image-data");
Object.defineProperty(exports, "__esModule", { value: true });
...
Now when we look at the ImageData again, we get this:
Just by doing that and also removing the false parameter from toColoredPartMask-call, so it's called like this:
const maskedImageData = bodyPix.toColoredPartMask(segmentedPersonParts);
We get this result:
If we were to JSON.stringify(maskedImageData), we'd get a billion lines of some numbers, so I'm not going to paste that here. Anyway, I think it works now.
I think the problem lies in the node.js implementation of ImageData. We're using some kind of weird browser implementation of it. The package I installed implements an outside of browser version of ImageData and we force bodypix to use that instead with the require.
Here is my input image:
And here is the output:
DO NOTE THAT YOU SHOULD BASICALLY NEVER EDIT ANYTHING INSIDE node_modules, BUT IF THIS IS JUST A PROJECT FOR FUN, I DON'T SEE WHY NOT PLAY AROUND A LITTLE
P.S I think the first line in toColoredPartMask that checks partColors is a bit funky, so I replaced it with if(arguments.length === 1 || !partColors) { partColors = RAINBOW_PART_COLORS; } in my code.

DragControls with three.js module is not working on Node.js and React

I'm trying to drag and drop objects to change their positions with Three.js in React. I tried to use three-dragcontrols and drag-controls Node modules but the dragging functionality that's brought by those modules are not working. Here's my implementation:
import * as THREE from 'three';
// DragControls.install({ THREE: THREE }); // for 'drag-controls' module only
const rendererGl = buildRendererGl();
const objects = [];
function buildRendererGl() {
const rendererGl = new THREE.WebGLRenderer({
canvas // canvas is an appended child HTML element of a DOM element
});
rendererGl.setPixelRatio(window.devicePixelRatio);
rendererGl.domElement.style.position = 'absolute';
rendererGl.domElement.style.top = 0;
return rendererGl;
}
// ... a lot more including rendererGl.setSize(width, height) upon window.onresize
// and rendererGl.render(sceneGl, camera) upon update
const geometry = new THREE.BoxGeometry(40, 40, 40);
for (let i = 0; i < 10; i++) {
const object = new THREE.Mesh(
geometry,
new THREE.MeshBasicMaterial({ color: Math.random() * 0xffffff })
);
object.position.x = Math.random() * 1000 - 500;
object.position.y = Math.random() * 600 - 300;
object.position.z = Math.random() * 800 - 400;
object.rotation.x = Math.random() * 2 * Math.PI;
object.rotation.y = Math.random() * 2 * Math.PI;
object.rotation.z = Math.random() * 2 * Math.PI;
object.scale.x = Math.random() * 2 + 1;
object.scale.y = Math.random() * 2 + 1;
object.scale.z = Math.random() * 2 + 1;
sceneGl.add(object);
objects.push(object);
}
const dragControls = new DragControls( objects, sceneGl, rendererGl.domElement );
dragControls.addEventListener('dragstart', function() {
controls.enabled = false;
});
dragControls.addEventListener('dragend', function() {
controls.enabled = true;
});
The result is like, everything is working well but can't drag any draggable objects.
I'm also using import { CSS3DRenderer } from 'three-css3drenderer' that has its own DOM element and import { OrbitControls } from 'three/examples/jsm/controls/OrbitControls' but I don't think they're hindering the drag functionality because the THREE.Raycaster() that I've implemented to change a mesh color upon hovering is working well. Here's my rendererCss:
const rendererCss = buildRendererCss(containerElement);
function buildRendererCss(domElement) {
const rendererCss = new CSS3DRenderer();
rendererCss.domElement.style.position = 'absolute';
rendererCss.domElement.style.top = 0;
domElement.appendChild(rendererCss.domElement);
return rendererCss;
}
// ... and rendererCss.render(sceneCss, camera) upon update
Does anybody know how can I implement DragControls on Node.js, specially in React v16.8.4 (or higher) with three 0.102.1 (or higher)? What did you do to make them work? Any alternative?
Inputing a THREE.Scene instead of a THREE.Camera on the second argument of DragControls is a wrap, with the help of #jbyte: https://github.com/jbyte/three-dragcontrols/issues/2#issuecomment-480594826
And I've figure out that there's a problem when you use DragControls and CSS3DRenderer at the same time:
https://github.com/jbyte/three-dragcontrols/issues/2#issuecomment-480602491
https://discourse.threejs.org/t/dragcontrols-with-css3drenderer/783?u=5ervant

Calculate the bounding box of STL file with JavaScript

So I am using this npm package: node-stl
And its working great. However the regexp syntax, mathematics and geometrical calculations are somewhat confusing to me. Especially all at the same time.
Basically what I want to achieve is to extend the script to calculate the bounding box of the STL.
Here is the main file that calculates the volume and weight of the STL being parsed/read.
var fs = require('fs');
// Vertex
function Vertex (v1,v2,v3) {
this.v1 = Number(v1);
this.v2 = Number(v2);
this.v3 = Number(v3);
}
// Vertex Holder
function VertexHolder (vertex1,vertex2,vertex3) {
this.vert1 = vertex1;
this.vert2 = vertex2;
this.vert3 = vertex3;
}
// transforming a Node.js Buffer into a V8 array buffer
function _toArrayBuffer (buffer) {
var
ab = new ArrayBuffer(buffer.length),
view = new Uint8Array(ab);
for (var i = 0; i < buffer.length; ++i) {
view[i] = buffer[i];
}
return ab;
}
// calculation of the triangle volume
// source: http://stackoverflow.com/questions/6518404/how-do-i-calculate-the-volume-of-an-object-stored-in-stl-files
function _triangleVolume (vertexHolder) {
var
v321 = Number(vertexHolder.vert3.v1 * vertexHolder.vert2.v2 * vertexHolder.vert1.v3),
v231 = Number(vertexHolder.vert2.v1 * vertexHolder.vert3.v2 * vertexHolder.vert1.v3),
v312 = Number(vertexHolder.vert3.v1 * vertexHolder.vert1.v2 * vertexHolder.vert2.v3),
v132 = Number(vertexHolder.vert1.v1 * vertexHolder.vert3.v2 * vertexHolder.vert2.v3),
v213 = Number(vertexHolder.vert2.v1 * vertexHolder.vert1.v2 * vertexHolder.vert3.v3),
v123 = Number(vertexHolder.vert1.v1 * vertexHolder.vert2.v2 * vertexHolder.vert3.v3);
return Number(1.0/6.0)*(-v321 + v231 + v312 - v132 - v213 + v123);
}
// parsing an STL ASCII string
function _parseSTLString (stl) {
var totalVol = 0;
// yes, this is the regular expression, matching the vertexes
// it was kind of tricky but it is fast and does the job
var vertexes = stl.match(/facet\s+normal\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+outer\s+loop\s+vertex\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+vertex\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+vertex\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+endloop\s+endfacet/g);
vertexes.forEach(function (vert) {
var preVertexHolder = new VertexHolder();
vert.match(/vertex\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s/g).forEach(function (vertex, i) {
var tempVertex = vertex.replace('vertex', '').match(/[-+]?[0-9]*\.?[0-9]+/g);
var preVertex = new Vertex(tempVertex[0],tempVertex[1],tempVertex[2]);
preVertexHolder['vert'+(i+1)] = preVertex;
});
var partVolume = _triangleVolume(preVertexHolder);
totalVol += Number(partVolume);
})
var volumeTotal = Math.abs(totalVol)/1000;
return {
volume: volumeTotal, // cubic cm
weight: volumeTotal * 1.04 // gm
}
}
// parsing an STL Binary File
// (borrowed some code from here: https://github.com/mrdoob/three.js/blob/master/examples/js/loaders/STLLoader.js)
function _parseSTLBinary (buf) {
buf = _toArrayBuffer(buf);
var
headerLength = 80,
dataOffset = 84,
faceLength = 12*4 + 2,
le = true; // is little-endian
var
dvTriangleCount = new DataView(buf, headerLength, 4),
numTriangles = dvTriangleCount.getUint32(0, le),
totalVol = 0;
for (var i = 0; i < numTriangles; i++) {
var
dv = new DataView(buf, dataOffset + i*faceLength, faceLength),
normal = new Vertex(dv.getFloat32(0, le), dv.getFloat32(4, le), dv.getFloat32(8, le)),
vertHolder = new VertexHolder();
for(var v = 3; v < 12; v+=3) {
var vert = new Vertex(dv.getFloat32(v*4, le), dv.getFloat32((v+1)*4, le), dv.getFloat32( (v+2)*4, le ) );
vertHolder['vert'+(v/3)] = vert;
}
totalVol += _triangleVolume(vertHolder);
}
var volumeTotal = Math.abs(totalVol)/1000;
return {
volume: volumeTotal, // cubic cm
weight: volumeTotal * 1.04 // gm
}
}
// NodeStl
// =======
// > var stl = NodeStl(__dirname + '/myCool.stl');
// > console.log(stl.volume + 'cm^3');
// > console.log(stl.weight + 'gm');
function NodeStl (stlPath) {
var
buf = fs.readFileSync(stlPath),
isAscii = true;
for (var i=0, len=buf.length; i<len; i++) {
if (buf[i] > 127) { isAscii=false; break; }
}
if (isAscii)
return _parseSTLString(buf.toString());
else
return _parseSTLBinary(buf);
}
module.exports = NodeStl;
If anyone could help me with this it would be great. I know and it feels like it simple. That I just need to know max/min of the different directions(x,y,z) and could then calculate the bounding box.
But I do not understand what the max/min for x,y and z is here. Please answer if you have an idea.
I've made a new branch https://github.com/johannesboyne/node-stl/tree/boundingbox could you please verify whether the applied algorithm works?
Best,
Johannes
Edit: If the branch is stable -> works I'll push it into v.0.1.0 (don't know why it is still 0.0.1)

Resources