NodeJS packing and unpacking as buffer slower than JSON - node.js

I wrote this simple test to compare the speeds of serializing an array of objects as binary and JSON.
const { performance } = require('perf_hooks');
let units = [];
let id = 0;
let CHUNK_SIZE = 23;
for (let i = 0; i < 50000; i++) {
let r = Math.random();
let u = {};
u.id = id;
u.rotation = Math.PI * 2 * Math.random();
u.type = 0;
u.x = i;
u.y = i;
u.size = r * 20 + 12;
u.health = 1;
u.int = 1;
units.push(u);
}
[
[
"JSON",
(units) => JSON.stringify(units.map(unit => [
unit.id,
unit.type,
unit.x.toFixed(0),
unit.y.toFixed(0),
unit.rotation.toFixed(4),
unit.health.toFixed(2),
unit.size.toFixed(0),
parseInt(unit.int)
])),
(units) => JSON.parse(units)
],
[
"Binary",
(units) => {
return Buffer.concat(units.map(unit => {
let buf = new Buffer(CHUNK_SIZE);
buf.writeUInt32BE(unit.id);
buf.writeUInt8(unit.type, 4);
buf.writeInt32BE(unit.x, 5);
buf.writeInt32BE(unit.y, 9);
buf.writeFloatBE(unit.rotation, 13);
buf.writeFloatBE(unit.health, 17);
buf.writeUInt8(unit.size, 21);
buf.writeUInt8(unit.attacking ? 1 : 0, 22);
return buf;
}));
},
(units) => {
let u = units.buffer;
let result = [];
for (let offset = 0; offset < u.byteLength; offset += CHUNK_SIZE) {
let view = new DataView(u, offset, CHUNK_SIZE);
result.push([
view.getUint32(0),
view.getUint8(4),
view.getInt32(5),
view.getInt32(9),
view.getFloat32(13),
view.getFloat32(17),
view.getUint8(21),
view.getUint8(22)
]);
}
return result;
}
]
].forEach(([name, compress, decompress]) => {
console.log("Test: " + name);
let t0 = performance.now();
let compressed = compress(units);
let t1 = performance.now();
let decompressed = decompress(compressed);
let t2 = performance.now();
console.log(`Result: ${decompressed.length}`);
console.log(`Compression took: ${t1 - t0}ms`);
console.log(`Compressed length: ${compressed.byteLength || compressed.length}`);
console.log(`Decompression took: ${t2 - t1}ms`);
console.log(`Total time: ${t2 - t0}ms`);
console.log("");
})
Drop that into NodeJS and look at the results, here are mine
Test: JSON
Result: 50000
Compression took: 411.7958119995892ms
Compressed length: 2227781
Decompression took: 134.79507100209594ms
Total time: 546.5908830016851ms
Test: Binary
Result: 50000
Compression took: 612.1825229972601ms
Compressed length: 1150000
Decompression took: 191.14320900291204ms
Total time: 803.3257320001721ms
I'm quite surprised to find that JSON is faster since it is doing considerably more work than the binary counterpart.
Why is that and how can it be improved?

Hope I don't look like a weirdo answering, like a third consecutive, question I asked myself, but hopefully I'm providing content that could be useful for someone.
It appears as if I have been taking way too much advantage of convenience functions. Combine that with not knowing exactly what is happening "under the hood" and you get the results from the question.
Constructing a new buffer for each entry and then concatenating them seems like a costly thing to do. Instead create a single buffer and add to that:
(units) => {
let buf = new Buffer(units.length * CHUNK_SIZE);
units.forEach((unit, i) => {
let offset = i * CHUNK_SIZE;
buf.writeUInt32BE(unit.id, offset);
buf.writeUInt8(unit.type, offset + 4);
buf.writeInt32BE(unit.x, offset + 5);
buf.writeInt32BE(unit.y, offset + 9);
buf.writeFloatBE(unit.rotation, offset + 13);
buf.writeFloatBE(unit.health, offset + 17);
buf.writeUInt8(unit.size, offset + 21);
buf.writeUInt8(unit.attacking ? 1 : 0, offset + 22);
});
return buf;
},
Same goes on the unserializing part, instead of constructing a new DataView, which can mislead you to believe it would be more effective as it's constructor takes an offset and length, construct one and read off of it:
(units) => {
let u = units.buffer;
let result = [];
let view = new DataView(u);
for (let offset = 0; offset < u.byteLength; offset += CHUNK_SIZE) {
result.push([
view.getUint32(offset + 0),
view.getUint8(offset + 4),
view.getInt32(offset + 5),
view.getInt32(offset + 9),
view.getFloat32(offset + 13),
view.getFloat32(offset + 17),
view.getUint8(offset + 21),
view.getUint8(offset + 22)
]);
}
return result;
}
And now we've got more acceptable results:
Test: JSON
Result: 50000
Compression took: 284.3018040023744ms
Compressed length: 2934399
Decompression took: 197.91818399727345ms
Total time: 522.21998799964786ms
Test: Binary
Result: 50000
Compression took: 175.56888100132346ms
Compressed length: 1150000
Decompression took: 79.27483800053596ms
Total time: 254.84371900185943ms
JSON length has increased because I removed the .toFixed calls from the compress function.
I'm interested if there are further improvements that can be done, as I'm sure there are plenty of people who are more competent than me out there.

Related

Why does array this take up 1GB of memory?

I have files that store game map data, and I am using this function to read the map file and load it into memory when the server starts. Basically 4 bytes at the start tell the server how big the map is, and then each tile on the map has 6 bytes of data. I have roughly 33MB of data stored in multiple files, but when I read them into an array to access from memory, it takes up almost 1GB of RAM. I'm just wondering if something I am doing here is redundant or not needed and causing too much memory to be allocated.
Example: a 256x256 map would have 256 * 256 * 6 + 4 bytes of data
let mapData = [];
function loadMapFiles() {
fs.readdir("./maps", (err, files) => {
for (let file of files) {
let pointer = 4;
fs.readFile("./maps/" + file, (error, data) => {
if (error) throw error;
let sizex = data.readUInt16BE(0);
let sizey = data.readUInt16BE(2);
let mapNameNumber = Number(file);
mapData[mapNameNumber] = [];
for (let y = 0; y < sizey; y++) {
mapData[mapNameNumber][y] = [];
for (let x = 0; x < sizex; x++) {
mapData[mapNameNumber][y][x] = [];
mapData[mapNameNumber][y][x][0] = data.readUInt16BE(pointer);
mapData[mapNameNumber][y][x][1] = data.readUInt16BE(pointer + 2);
mapData[mapNameNumber][y][x][2] = data.readUInt16BE(pointer + 4);
pointer = pointer + 6;
}
}
});
}
});
}

Node.js and CPU cache utilization

I want to understand CPU cache utilisation. For that purpose i wrote a small bit of Node.js code:
let testArray = [];
let length = "";
let times = "";
do {
testArray.push(Math.random());
if (testArray.length % 1000 === 0) {
testArray = testArray.slice();
const start = performance.now();
action(testArray);
const stop = performance.now();
const duration = stop - start;
length += testArray.length + "," + endOfLine;
times += duration + "," + endOfLine;
console.log(`Took: ${duration}, length: ${testArray.length}`);
}
}
while (testArray.length < 10000000)
function action(a) {
let sum = 0;
for (let index = 0; index < 10000; index++) {
sum += a[index];
}
}
I would expect the duration of the call to the function to be similar to this chart:
In spite of my expectations the durations are pretty much the same no matter what the size of the array is. I thought that as the array gets bigger it would exceed L1, L2 and L3 caches and I would see it on the graph.
Is my code wrong or am I missing something?

NodeJS Faster Execution on Repeated Function Calls

I was writing a JavaScript implementation of merge sort in NodeJS, since the native V8 sort function uses quick sort, which is less efficient than merge sort in its worst case. In order to the test the performance, I made an array of 10000 elements, pre-sorted (worst-case for quick sort) and timed how long it took to sort this list with both functions. In a single execution, the native sort function takes about 16 milliseconds, where my implementation takes about 9 milliseconds. However, when executing both 100 times, the native sort takes about 2.1 milliseconds on average, and mine takes about 4.3 milliseconds. Here is my code:
const { performance } = require('perf_hooks');
function merge(a, b, func) {
const array = new Array(a.length + b.length);
let aIndex = 0;
let bIndex = 0;
while(aIndex + bIndex < array.length) {
if(aIndex >= a.length || func(a[aIndex], b[bIndex]) > 0) {
array[aIndex + bIndex] = b[bIndex++];
} else {
array[aIndex + bIndex] = a[aIndex++];
}
}
return array;
}
function mergeSort(list, func) {
if(list.length <= 1) {
return list;
}
const half = list.length / 2;
return merge(mergeSort(list.slice(0, half), func), mergeSort(list.slice(half), func), func);
}
function time(func, iters) {
let sum = 0;
for(let i = 0; i < iters; i++) {
let startTime = performance.now();
func();
sum += performance.now() - startTime;
}
return sum / iters;
}
const arr = [...Array(10000).keys()];
const sortFunc = (a, b) => a - b;
console.log("JavaScript built-in sort execution time, one iteration:")
console.log(time(() => arr.sort(sortFunc), 1)); // ~16
console.log("Manually implemented merge sort execution time, one iteration:")
console.log(time(() => mergeSort(arr, sortFunc), 1)); // ~9
console.log();
console.log("JavaScript built-in sort average execution time, 100 iterations:")
console.log(time(() => arr.sort(sortFunc), 100)); // ~2.1
console.log("Manually implemented merge sort average execution time, 100 iterations:")
console.log(time(() => mergeSort(arr, sortFunc), 100)); // ~4.3
Why is it so much faster when executed repeatedly than only once, and why is this improvement more pronounced for the native sort function?
EDIT: I was able to make my algorithm more efficient by tracking array indices instead of using the slice method. My code now consistently beats v8's native sort when used on pre-sorted arrays, but loses on randomized arrays, as expected. Here is that code, for those interested:
const { performance } = require('perf_hooks');
function merge(a, b, func) {
const array = new Array(a.length + b.length);
let aIndex = 0;
let bIndex = 0;
while(aIndex + bIndex < array.length) {
if(aIndex >= a.length || func(a[aIndex], b[bIndex]) > 0) {
array[aIndex + bIndex] = b[bIndex++];
} else {
array[aIndex + bIndex] = a[aIndex++];
}
}
return array;
}
function mergeSortRec(list, func, start, limit) {
if (limit === 1) {
return [list[start]];
}
const half = limit / 2 | 0;
return merge(mergeSortRec(list, func, start, half), mergeSortRec(list, func, half + start, limit - half), func);
}
function mergeSort(list, func) {
return mergeSortRec(list, func, 0, list.length);
}
function time(func) {
let startTime = performance.now();
func();
return performance.now() - startTime;
}
const sortFunc = (a, b) => a - b;
console.log();
console.log("--- Sequential array ---");
console.log();
const sequenceArr = [...Array(10000).keys()];
console.log("JavaScript built-in sort execution time, one iteration:");
console.log(time(() => sequenceArr.slice(0).sort(sortFunc)));
console.log("Manually implemented merge sort execution time, one iteration:");
console.log(time(() => mergeSort(sequenceArr, sortFunc)));
let sum = 0;
for(let i = 0; i < 100; i++) {
const array = sequenceArr.slice(0);
sum += time(() => array.sort(sortFunc));
}
console.log("JavaScript built-in sort average execution time, 100 iterations:");
console.log(sum / 100);
sum = 0;
for(let i = 0; i < 100; i++) {
sum += time(() => mergeSort(sequenceArr, sortFunc))
}
console.log("Manually implemented merge sort average execution time, 100 iterations:");
console.log(sum / 100);
console.log();
console.log("--- Randomized array ---");
console.log();
const randomArrays = new Array(101);
for(let i = 0; i < 101; i++) {
randomArrays[i] = new Array(10000);
for(let j = 0; j < 10000; j++) {
randomArrays[i][j] = Math.random() * 5000 | 0;
}
}
console.log("JavaScript built-in sort execution time, one iteration:");
console.log(time(() => randomArrays[100].slice(0).sort(sortFunc)));
console.log("Manually implemented merge sort execution time, one iteration:");
console.log(time(() => mergeSort(randomArrays[100], sortFunc)));
sum = 0;
for(let i = 0; i < 100; i++) {
const array = randomArrays[i].slice(0)
sum += time(() => array.sort(sortFunc));
}
console.log("JavaScript built-in sort average execution time, 100 iterations:");
console.log(sum / 100);
sum = 0;
for(let i = 0; i < 100; i++) {
sum += time(() => mergeSort(randomArrays[i], sortFunc))
}
console.log("Manually implemented merge sort average execution time, 100 iterations:");
console.log(sum / 100);

How do I reverse a scanline using the jpeg-js module/node JS buffer?

I've been fiddling around with the jpeg-js module and Node JS Buffer, and attempting to create a small command line program that modifies the decoded JPEG buffer data and creates a pattern of X number of reversed scanlines and X number of normal scanlines before saving a new JPEG. In other words, I'm looking to flip portions of the image, but not the entire image itself (plenty of modules that do such a thing, of course, but not the specific use case I have).
To create the reversed/normal line patterns, I've been reading/writing line by line, and saving a slice of that line to a variable, then starting at the end of scanline and incrementally going down by slices of 4 bytes (the alloc for an RGBA value) until I'm at the beginning of the line. Code for the program:
'use strict';
const fs = require('fs');
const jpeg = require('jpeg-js');
const getPixels = require('get-pixels');
let a = fs.readFileSync('./IMG_0006_2.jpg');
let d = Buffer.allocUnsafe(a.width * a.height * 4);
let c = jpeg.decode(a);
let val = false; // track whether normal or reversed scanlines
let lineWidth = b.width * 4;
let lineCount = 0;
let track = 0;
let track2 = 0;
let track3 = 0;
let curr, currLine; // storage for writing/reading scnalines, respectively
let limit = {
one: Math.floor(Math.random() * 141),
two: Math.floor(Math.random() * 151),
three: Math.floor(Math.random() * 121)
};
if (limit.one < 30) {
limit.one = 30;
}
if (limit.two < 40) {
limit.two = 40;
}
if (limit.two < 20) {
limit.two = 20;
}
let calc = {};
calc.floor = 0;
calc.ceil = 0 + lineWidth;
d.forEach(function(item, i) {
if (i % lineWidth === 0) {
lineCount++;
/* // alternate scanline type, currently disabled to figure out how to succesfully reverse image
if (lineCount > 1 && lineCount % limit.one === 0) {
// val = !val;
}
*/
if (lineCount === 1) {
val = !val; // setting alt scanline check to true initially
} else if (calc.floor + lineWidth < b.data.length - 1) {
calc.floor += lineWidth;
calc.ceil += lineWidth;
}
currLine = c.data.slice(calc.floor, calc.ceil); // current line
track = val ? lineWidth : 0; // tracking variable for reading from scanline
track2 = val ? 4 : 0; // tracking variable for writing from scanline
}
//check if reversed and writing variable has written 4 bytes for RGBA
//if so, set writing source to 4 bytes at end of line and read from there incrementally
if (val && track2 === 4) {
track2 = 0; // reset writing count
curr = currLine.slice(track - 4, track); // store 4 previous bytes as writing source
if (lineCount === 1 && lineWidth - track < 30) console.log(curr); //debug
} else {
curr = currLine; //set normal scanline
}
d[i] = curr[track2];
// check if there is no match between data source and decoded image
if (d[i] !== curr[track2]) {
if (track3 < 50) {
console.log(i);
}
track3++;
}
track2++; //update tracking variable
track = val ? track - 1 : track + 1; //update tracking variable
});
var rawImageData = {
data: d,
width: b.width,
height: b.height
};
console.log(b.data.length);
console.log('errors\t', track3);
var jpegImageData = jpeg.encode(rawImageData, 100);
fs.writeFile('foo2223.jpg', jpegImageData.data);
Alas, the reversed scanline code I've written does not properly. Unfortunately, I've only been able successfully reverse the red channel of my test image (see below left), with the blue and green channels just turning into vague blurs. The color scheme should look something like the right image.
What am I doing wrong here?
For reversed lines, you stored slices of 4 bytes(4 bytes = 1 pixel), then write the first value of the pixel(red) correctly.
But in the next iteration, you overwrite the slice curr with currLine, rest of channels gets wrong values.
if (val && track2 === 4) {
track2 = 0; // reset writing count
curr = currLine.slice(track - 4, track); // store 4 previous bytes as writing source
if (lineCount === 1 && lineWidth - track < 30) console.log(curr); //debug
} else {
curr = currLine; //set normal scanline
}
Iteration 0: val == true, track2 == 4, set curr to next pixel, write red channel.
Iteration 1: val == true, track2 == 1, (val && track2 === 4) == false, set curr to currLine, write green channel.
You can move track2 === 4 branch to avoid this:
if (val) {
if (track2 === 4) {
track2 = 0; // reset writing count
curr = currLine.slice(track - 4, track); // store 4 previous bytes as writing source
if (lineCount === 1 && lineWidth - track < 30) console.log(curr); //debug
}
} else {
curr = currLine; //set normal scanline
}
Fixed code should look like this:
function flipAlt(input, output) {
const fs = require('fs');
const jpeg = require('jpeg-js');
let a = fs.readFileSync(input);
let b = jpeg.decode(a);
let d = Buffer.allocUnsafe(b.width * b.height * 4);
let val = false; // track whether normal or reversed scanlines
let lineWidth = b.width * 4;
let lineCount = 0;
let track = 0;
let track2 = 0;
let track3 = 0;
let curr, currLine; // storage for writing/reading scnalines, respectively
let limit = {
one: Math.floor(Math.random() * 141),
two: Math.floor(Math.random() * 151),
three: Math.floor(Math.random() * 121)
};
if (limit.one < 30) {
limit.one = 30;
}
if (limit.two < 40) {
limit.two = 40;
}
if (limit.two < 20) {
limit.two = 20;
}
let calc = {};
calc.floor = 0;
calc.ceil = 0 + lineWidth;
d.forEach(function(item, i) {
if (i % lineWidth === 0) {
lineCount++;
if (lineCount > 1) {
val = !val;
}
if (lineCount === 1) {
val = !val; // setting alt scanline check to true initially
} else if (calc.floor + lineWidth < b.data.length - 1) {
calc.floor += lineWidth;
calc.ceil += lineWidth;
}
currLine = b.data.slice(calc.floor, calc.ceil); // current line
track = val ? lineWidth : 0; // tracking variable for reading from scanline
track2 = val ? 4 : 0; // tracking variable for writing from scanline
}
//check if reversed and writing variable has written 4 bytes for RGBA
//if so, set writing source to 4 bytes at end of line and read from there incrementally
if (val) {
if (track2 === 4) {
track2 = 0; // reset writing count
curr = currLine.slice(track - 4, track); // store 4 previous bytes as writing source
if (lineCount === 1 && lineWidth - track < 30) console.log(curr); //debug
}
} else {
curr = currLine; //set normal scanline
}
d[i] = curr[track2];
// check if there is no match between data source and decoded image
if (d[i] !== curr[track2]) {
if (track3 < 50) {
console.log(i);
}
track3++;
}
track2++; //update tracking variable
track = val ? track - 1 : track + 1; //update tracking variable
});
var rawImageData = {
data: d,
width: b.width,
height: b.height
};
console.log(b.data.length);
console.log('errors\t', track3);
var jpegImageData = jpeg.encode(rawImageData, 100);
fs.writeFile(output, jpegImageData.data);
}
flipAlt('input.jpg', 'output.jpg');
Instead of tracking array indices, you can use utility library like lodash, it should make things easier:
function flipAlt(input, output) {
const fs = require('fs');
const jpeg = require('jpeg-js');
const _ = require('lodash');
const image = jpeg.decode(fs.readFileSync(input));
const lines = _.chunk(image.data, image.width*4);
const flipped = _.flatten(lines.map((line, index) => {
if (index % 2 != 0) {
return line;
}
const pixels = _.chunk(line, 4);
return _.flatten(pixels.reverse());
}));
const imageData = jpeg.encode({
width: image.width,
height: image.height,
data: new Buffer(flipped)
}, 100).data;
fs.writeFile(output, imageData);
}
flipAlt('input.jpg', 'output.jpg');

Calculate the bounding box of STL file with JavaScript

So I am using this npm package: node-stl
And its working great. However the regexp syntax, mathematics and geometrical calculations are somewhat confusing to me. Especially all at the same time.
Basically what I want to achieve is to extend the script to calculate the bounding box of the STL.
Here is the main file that calculates the volume and weight of the STL being parsed/read.
var fs = require('fs');
// Vertex
function Vertex (v1,v2,v3) {
this.v1 = Number(v1);
this.v2 = Number(v2);
this.v3 = Number(v3);
}
// Vertex Holder
function VertexHolder (vertex1,vertex2,vertex3) {
this.vert1 = vertex1;
this.vert2 = vertex2;
this.vert3 = vertex3;
}
// transforming a Node.js Buffer into a V8 array buffer
function _toArrayBuffer (buffer) {
var
ab = new ArrayBuffer(buffer.length),
view = new Uint8Array(ab);
for (var i = 0; i < buffer.length; ++i) {
view[i] = buffer[i];
}
return ab;
}
// calculation of the triangle volume
// source: http://stackoverflow.com/questions/6518404/how-do-i-calculate-the-volume-of-an-object-stored-in-stl-files
function _triangleVolume (vertexHolder) {
var
v321 = Number(vertexHolder.vert3.v1 * vertexHolder.vert2.v2 * vertexHolder.vert1.v3),
v231 = Number(vertexHolder.vert2.v1 * vertexHolder.vert3.v2 * vertexHolder.vert1.v3),
v312 = Number(vertexHolder.vert3.v1 * vertexHolder.vert1.v2 * vertexHolder.vert2.v3),
v132 = Number(vertexHolder.vert1.v1 * vertexHolder.vert3.v2 * vertexHolder.vert2.v3),
v213 = Number(vertexHolder.vert2.v1 * vertexHolder.vert1.v2 * vertexHolder.vert3.v3),
v123 = Number(vertexHolder.vert1.v1 * vertexHolder.vert2.v2 * vertexHolder.vert3.v3);
return Number(1.0/6.0)*(-v321 + v231 + v312 - v132 - v213 + v123);
}
// parsing an STL ASCII string
function _parseSTLString (stl) {
var totalVol = 0;
// yes, this is the regular expression, matching the vertexes
// it was kind of tricky but it is fast and does the job
var vertexes = stl.match(/facet\s+normal\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+outer\s+loop\s+vertex\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+vertex\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+vertex\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+endloop\s+endfacet/g);
vertexes.forEach(function (vert) {
var preVertexHolder = new VertexHolder();
vert.match(/vertex\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s/g).forEach(function (vertex, i) {
var tempVertex = vertex.replace('vertex', '').match(/[-+]?[0-9]*\.?[0-9]+/g);
var preVertex = new Vertex(tempVertex[0],tempVertex[1],tempVertex[2]);
preVertexHolder['vert'+(i+1)] = preVertex;
});
var partVolume = _triangleVolume(preVertexHolder);
totalVol += Number(partVolume);
})
var volumeTotal = Math.abs(totalVol)/1000;
return {
volume: volumeTotal, // cubic cm
weight: volumeTotal * 1.04 // gm
}
}
// parsing an STL Binary File
// (borrowed some code from here: https://github.com/mrdoob/three.js/blob/master/examples/js/loaders/STLLoader.js)
function _parseSTLBinary (buf) {
buf = _toArrayBuffer(buf);
var
headerLength = 80,
dataOffset = 84,
faceLength = 12*4 + 2,
le = true; // is little-endian
var
dvTriangleCount = new DataView(buf, headerLength, 4),
numTriangles = dvTriangleCount.getUint32(0, le),
totalVol = 0;
for (var i = 0; i < numTriangles; i++) {
var
dv = new DataView(buf, dataOffset + i*faceLength, faceLength),
normal = new Vertex(dv.getFloat32(0, le), dv.getFloat32(4, le), dv.getFloat32(8, le)),
vertHolder = new VertexHolder();
for(var v = 3; v < 12; v+=3) {
var vert = new Vertex(dv.getFloat32(v*4, le), dv.getFloat32((v+1)*4, le), dv.getFloat32( (v+2)*4, le ) );
vertHolder['vert'+(v/3)] = vert;
}
totalVol += _triangleVolume(vertHolder);
}
var volumeTotal = Math.abs(totalVol)/1000;
return {
volume: volumeTotal, // cubic cm
weight: volumeTotal * 1.04 // gm
}
}
// NodeStl
// =======
// > var stl = NodeStl(__dirname + '/myCool.stl');
// > console.log(stl.volume + 'cm^3');
// > console.log(stl.weight + 'gm');
function NodeStl (stlPath) {
var
buf = fs.readFileSync(stlPath),
isAscii = true;
for (var i=0, len=buf.length; i<len; i++) {
if (buf[i] > 127) { isAscii=false; break; }
}
if (isAscii)
return _parseSTLString(buf.toString());
else
return _parseSTLBinary(buf);
}
module.exports = NodeStl;
If anyone could help me with this it would be great. I know and it feels like it simple. That I just need to know max/min of the different directions(x,y,z) and could then calculate the bounding box.
But I do not understand what the max/min for x,y and z is here. Please answer if you have an idea.
I've made a new branch https://github.com/johannesboyne/node-stl/tree/boundingbox could you please verify whether the applied algorithm works?
Best,
Johannes
Edit: If the branch is stable -> works I'll push it into v.0.1.0 (don't know why it is still 0.0.1)

Resources