SVG Zoom Issue Desktop vs Mobile with leaflet.js - svg

I've tried to use leaflet.js to allow user to zoom and pan on big SVG files on my website. I've used the following script to display the SVG with leaflet :
// Using leaflet.js to pan and zoom a big image.
// See also: http://kempe.net/blog/2014/06/14/leaflet-pan-zoom-image.html
var factor = 1;
// create the slippy map
var map = L.map('image-map', {
minZoom: 1,
maxZoom: 5,
center: [0, 0],
zoom: 1,
crs: L.CRS.Simple
});
function getMeta(url) {
const img = new Image();
img.addEventListener("load", function () {
var w = this.naturalWidth;
var h = this.naturalHeight;
var southWest = map.unproject([0, h], map.getMaxZoom() - 1);
var northEast = map.unproject([w, 0], map.getMaxZoom() - 1);
var bounds = new L.LatLngBounds(southWest, northEast);
// add the image overlay,
// so that it covers the entire map
L.imageOverlay(img.src, bounds).addTo(map);
// tell leaflet that the map is exactly as big as the image
map.setMaxBounds(bounds);
map.fitBounds(bounds); // test
});
img.src = url;
}
getMeta("/assets/images/bhikkhu-patimokkha.svg")
You can see the result here. The probleme is that it works fine on my iPhone, the zoom level are approriate and it is easy to zoom in, but on the desktop version, you can only zoom out and not in.
I've tried to change the minZoom: 1 to different values, but it dosen't seem to do anything. The only way I was able to make it work on desktop was to add a multiplicating factor 10 to var w and var h, but then it would screw up on the mobile version.
I had more sucess with PNG images, but they are very slow to load. Maybe the code is inapropriate for SVG and particularly when calling naturalHeight ? But it looked fined when I debbuged it.
Thanks for your help.
Here is a codepen to play around if you want.
EDIT : Using the nicer code from #Grzegorz T. It works well on Desktop now. but on Safari iOS it is overzoomed and cannot unzoom... see picture below (it was working with the previous code on Iphone but not on Destop...)

Display the variables w and h you will see what small variables are returned. To increase them I increased them by * 5 for this I used fitBounds and it should now scale to the viewer window and at the same time it is possible to zoom.
To be able to also click more than once on the zoom, I changed map.getMaxZoom () - 1 to map.getMaxZoom () - 2
var map = L.map("image-map", {
minZoom: 1, // tried 0.1,-4,...
maxZoom: 4,
center: [0, 0],
zoom: 2,
crs: L.CRS.Simple
});
function getMeta(url) {
const img = new Image();
img.addEventListener("load", function () {
var w = this.naturalWidth * 5;
var h = this.naturalHeight * 5;
var southWest = map.unproject([0, h], map.getMaxZoom() - 2);
var northEast = map.unproject([w, 0], map.getMaxZoom() - 2);
var bounds = new L.LatLngBounds(southWest, northEast);
// add the image overlay,
// so that it covers the entire map
L.imageOverlay(img.src, bounds).addTo(map);
// tell leaflet that the map is exactly as big as the image
// map.setMaxBounds(bounds);
map.fitBounds(bounds);
});
img.src = url;
}
getMeta("https://fractalcitta.github.io/assets/images/bhikkhu-patimokkha.svg");
You don't need to increase (w, h) * 5 but just change to map.getMaxZoom () - 4
And one more important thing that you should always do with svg, which is optimizing these files.
I always use this site - svgomg
Second version width async/promise ----------
let map = L.map("map", {
crs: L.CRS.Simple,
minZoom: 1,
maxZoom: 4,
});
function loadImage(url) {
return new Promise((resolve, reject) => {
const img = new Image();
img.addEventListener("load", () => resolve(img));
img.addEventListener("error", reject);
img.src = url;
});
}
async function getImageData(url) {
const img = await loadImage(url);
return { img, width: img.naturalWidth, height: img.naturalHeight };
}
async function showOnMap(url) {
const { img, width, height } = await getImageData(url);
const southWest = map.unproject([0, height], map.getMaxZoom() - 4);
const northEast = map.unproject([width, 0], map.getMaxZoom() - 4);
const bounds = new L.LatLngBounds(southWest, northEast);
L.imageOverlay(img.src, bounds).addTo(map);
map.fitBounds(bounds);
}
showOnMap(
"https://fractalcitta.github.io/assets/images/bhikkhu-patimokkha.svg"
);
The third approach to the problem, I hope the last one ;)
You need a little description of what's going on here.
We get svg by featch we inject into hidden div which has width/height set to 0
Then we use the getBBox() property to get the exact dimensions from that injected svg.
I am not using map.unproject in this example. To have the exact dimensions of bounds it is enough that:
const bounds = [
[0, 0], // padding
[width, height], // image dimensions
];
All code below:
<div id="map"></div>
<div id="svg" style="position: absolute; bottom: 0; left: 0; width: 0; height: 0;"></div>
let map = L.map("map", {
crs: L.CRS.Simple,
minZoom: -4,
maxZoom: 1,
});
const url =
"https://fractalcitta.github.io/assets/images/bhikkhu-patimokkha.svg";
async function fetchData(url) {
try {
const response = await fetch(url);
const data = await response.text();
return data;
} catch (err) {
console.error(err);
}
}
fetchData(url)
.then((svg) => {
const map = document.getElementById("svg");
map.insertAdjacentHTML("afterbegin", svg);
})
.then(() => {
const svgElement = document.getElementById("svg");
const { width, height } = svgElement.firstChild.getBBox();
return { width, height };
})
.then(({ width, height }) => {
const img = new Image();
img.src = url;
const bounds = [
[0, 0], // padding
[width, height], // image dimensions
];
L.imageOverlay(img.src, bounds).addTo(map);
map.fitBounds(bounds);
});

OK, so finally I used the code from #Grzegorz T. with a small modification. Adding a const H = 100; that play the role of an arbitrary height and then the width W is calculated using the ratio of dimensions given by the browers. (ratio are the same, dimension of SVG are differents on different browsers)
Here is the code :
const W = 100; // this allow to get the correct ratio
// then it dosen't depend on browser giving different dimentions.
let map = L.map("image-map", {
crs: L.CRS.Simple,
minZoom: 1,
maxZoom: 4,
});
function loadImage(url) {
return new Promise((resolve, reject) => {
const img = new Image();
img.addEventListener("load", () => resolve(img));
img.addEventListener("error", reject);
img.src = url;
});
}
async function getImageData(url) {
const img = await loadImage(url);
return { img, width: img.naturalWidth, height: img.naturalHeight };
}
async function showOnMap(url) {
const { img, width, height } = await getImageData(url);
const H = W * width / height; // hopefuly height is not = 0 (mozilla ?)
const southWest = map.unproject([0, H], map.getMaxZoom() - 4);
const northEast = map.unproject([W, 0], map.getMaxZoom() - 4);
const bounds = new L.LatLngBounds(southWest, northEast);
L.imageOverlay(img.src, bounds).addTo(map);
map.fitBounds(bounds);
}
showOnMap(
"https://fractalcitta.github.io/assets/images/bhikkhu-patimokkha.svg"
);

Related

Turn 3d .obj into a SVG with SVG renderer

Using the WebGLRenderer, successfully loaded an .obj file created in Cinema4d.
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera( 75, window.innerWidth/window.innerHeight, 0.1, 1000 );
camera.position.z = 200;
var renderer = new THREE.WebGLRenderer();
renderer.setSize( window.innerWidth, window.innerHeight );
document.body.appendChild( renderer.domElement );
var controls = new THREE.OrbitControls(camera, renderer.domElement);
controls.enableDamping = true;
controls.dampingFactor = 0.25;
controls.enableZoom = true;
var keyLight = new THREE.DirectionalLight(new THREE.Color('hsl(30, 100%, 75%)'), 1.0);
keyLight.position.set(-100, 0, 100);
var fillLight = new THREE.DirectionalLight(new THREE.Color('hsl(240, 100%, 75%)'), 0.75);
fillLight.position.set(100, 0, 100);
var backLight = new THREE.DirectionalLight(0xffffff, 1.0);
backLight.position.set(100, 0, -100).normalize();
scene.add(keyLight);
scene.add(fillLight);
scene.add(backLight);
const material = new THREE.LineBasicMaterial( {
color: 0xffffff,
linewidth: 1,
linecap: 'round', //ignored by WebGLRenderer
linejoin: 'round' //ignored by WebGLRenderer
} );
scene.add(material)
var objLoader = new THREE.OBJLoader();
objLoader.setPath('/examples/3d-obj-loader/assets/');
objLoader.load('Untitled2.obj', function (object) {
object.position.y -= 60;
scene.add(object);
});
var animate = function () {
requestAnimationFrame( animate );
controls.update();
renderer.render(scene, camera);
};
animate();
Aiming to convert this to vector using the SVGRenderer - tried swapping out the THREE.WebGLRenderer for SVGRenderer but there's clearly more to it than this, and can't find any walkthroughs, or breakdowns on here.
Can see that what I'm looking for is technically possible with three.js!
Linked an image of the 3d file I'm looking to vectorise - a simple mountain style scene - aiming for each edges of the vertices to be stroked, with no texture needed (white shape, black edges).
Tried making a JSON with Bodymovin and proved impossible to join al the paths of the vertices without lots of stray lines appearing.
enter image description here

html canvas clip but with an image

I have been working with html canvas compositing trying to clip a pattern with a mask.
The main issue that I have is that the mask I have comes from an svg with transparencies within the outer most border. I want the entire inside from the outer most border to be filled with the pattern.
Take this SVG for example you can see that there is a single pixel border, then some transparency, and then an opaque red inner blob. The compositing I have done works as the documentation says it should, the single pixel border and the red inner portion pick up the pattern that I want to mask into this shape. The problem is that I want to mask the entire innards starting from the single pixel border.
This is where I think clip might help. But it seems clip only works with manually drawn paths, not paths from an svg (at least that I am aware of).
Is there a way to accomplish what I am trying to do?
Regards,
James
The Path2D constructor accepts an SVG path data argument, that it will parse as the d attribute of an SVG <path> element.
You can then use this Path2D object with the clip() method:
(async () => {
// fetch the svg's path-data
const markup = await fetch("https://upload.wikimedia.org/wikipedia/commons/7/76/Autism_spectrum_infinity_awareness_symbol.svg").then(resp => resp.ok && resp.text());
const doc = new DOMParser().parseFromString(markup, "image/svg+xml");
const pathData = doc.querySelector("[d]").getAttribute("d");
// build our Path2D object and use it
const path = new Path2D(pathData);
const canvas = document.querySelector("canvas");
const ctx = canvas.getContext("2d");
ctx.clip(path);
// draw something that will get clipped
const rad = 30;
for(let y = 0; y < canvas.height; y += rad * 2 ) {
for(let x = 0; x < canvas.width; x += rad * 2 ) {
ctx.moveTo(x+rad, y);
ctx.arc(x, y, rad, 0, Math.PI*2);
}
}
ctx.fillStyle = "red";
ctx.fill();
})().catch(console.error);
<canvas width="792" height="612"></canvas>
If you need to transform this path-data (e.g scale, or rotate), then you can create a second Path2D object, and use its .addPath(path, matrix) method to do so:
// same as above, but smaller
(async () => {
const markup = await fetch("https://upload.wikimedia.org/wikipedia/commons/7/76/Autism_spectrum_infinity_awareness_symbol.svg").then(resp => resp.ok && resp.text());
const doc = new DOMParser().parseFromString(markup, "image/svg+xml");
const pathData = doc.querySelector("[d]").getAttribute("d");
const originalPath = new Path2D(pathData);
const path = new Path2D();
// scale by 0.5
path.addPath(originalPath, { a: 0.5, d: 0.5 });
const canvas = document.querySelector("canvas");
const ctx = canvas.getContext("2d");
ctx.clip(path);
// draw something that will get clipped
const rad = 15;
for(let y = 0; y < canvas.height; y += rad * 2 ) {
for(let x = 0; x < canvas.width; x += rad * 2 ) {
ctx.moveTo(x+rad, y);
ctx.arc(x, y, rad, 0, Math.PI*2);
}
}
ctx.fillStyle = "red";
ctx.fill();
})().catch(console.error);
<canvas width="396" height="306"></canvas>

How to convert a Uint8Array to a CanvasImageSource?

In TypeScript (NodeJS), I am struggling to convert a Uint8Array with bitmap image data into a CanvasImageSource type.
More Context
I am working on a typescript library that will be used in the browser as well as NodeJS environments. The library does image operations with WebGL, so in NodeJS environments, I am attempting to leverage headless-gl. My library includes a function (getCanvasImageSource) that returns a CanvasImageSource for clients to use.
A bunch of code was removed for the purposes of asking the question. A WebGL shader will create the desired image on the gl context, which the client can retrieve through the CanvasImageSource. This works as intended in a browser client.
/**
* Browser version of the library.
*/
export class MyLibrary {
protected gl!: WebGLRenderingContext;
protected width: number;
protected height: number;
public getCanvasImageSource(): CanvasImageSource {
return this.gl.canvas;
}
/**
* the GL context can only be created in a browser.
*/
protected makeGL(): WebGLRenderingContext {
const canvas = document.createElement('canvas');
canvas.width = this.width;
canvas.height = this.height;
const glContext = canvas.getContext('webgl');
if (!glContext) {
throw new Error("Unable to initialize WebGL. This browser or device may not support it.");
}
return glContext;
}
}
import gl from 'gl';
/**
* A subclass of MyLibrary that overrides the browser-specific functionality.
*/
export class MyHeadlessLibrary extends MyLibrary {
public getCanvasImageSource(): CanvasImageSource {
// The canvas is `undefined` from headless-gl.
// this.gl.canvas === undefined;
// But, I can read the pixel data as a bitmap.
const format = this.gl.RGBA;
const type = this.gl.UNSIGNED_BYTE;
const bitmapData = new Uint8Array(this.width * this.height * 4);
this.gl.readPixels(0, 0, this.width, this.height, format, type, bitmapData);
// This is where I am struggling...
// Is there a way to convert my `bitmapData` into a `CanvasImageSource`?
}
/**
* Overrides the browser's WebGL context with the headless-gl implementation.
*/
protected makeGL(): WebGLRenderingContext {
const glContext = gl(this.width, this.height);
return glContext;
}
}
However, I am struggling to find a way to successfully convert the Uint8Array data read from the headless-gl context into a CanvasImageSource object.
Here are some things that I have tried:
1. Return this.gl.canvas
This ends up being undefined in the case of headless-gl.
2. Use JSDOM
JSDOM's canvas does not support the WebGL render context.
3. Use JSDOM to create an HTMLImageElement and set the src to a base64 URL
I haven't quite figured out why, but the promise here does not ever resolve or reject. This results in my library timing out. So perhaps this strategy will work, but there is an issue with my implementation.
This strategy has been used in other areas of the library, but none that involve headless-gl or even WebGL. Just the 2D canvas context.
import gl from 'gl';
import { JSDOM } from 'jsdom';
export class MyHeadlessLibrary extends MyLibrary {
/**
* In this attempt, I changed the return type to Promise<CanvasImageSource> here, in MyLibrary, and in the client code.
*/
public getCanvasImageSource(): Promise<CanvasImageSource> {
// The canvas is `undefined` from headless-gl.
// this.gl.canvas === undefined;
// But, I can read the pixel data as a bitmap.
const format = this.gl.RGBA;
const type = this.gl.UNSIGNED_BYTE;
const bitmapData = new Uint8Array(this.width * this.height * 4);
this.gl.readPixels(0, 0, this.width, this.height, format, type, bitmapData);
// Create a DOM and HTMLImageElement.
const html: string = `<!DOCTYPE html><html><head><meta charset="utf-8" /><title>DOM</title></head><body></body></html>`;
const dom = new JSDOM(html);
const img = dom.window.document.createElement('img');
// Create a base64 data URL
const buffer = Buffer.from(bitmapData);
const dataurl = `data:image/bmp;base64,${buffer.toString('base64')}`;
// Set the image source and wrap the result in a promise
return new Promise((resolve, reject) => {
img.onerror = reject;
img.onload = () => resolve(img);
img.src = dataurl;
});
}
}
Please let me know if something in my code jumps out as a problem, or point me to a potential solution to this problem!
According to the spec a CanvasImageSource is
typedef (HTMLOrSVGImageElement or
HTMLVideoElement or
HTMLCanvasElement or
ImageBitmap or
OffscreenCanvas) CanvasImageSource;
So it depends on what your needs are. If you don't need any alpha then one of those is HTMLCanvasElement so given pixels you could do
function pixelsToCanvas(pixels, width, height) {
const canvas = document.createElement('canvas');
canvas.width = width;
canvas.height = height;
const ctx = canvas.getContext('2d');
const imgData = ctx.createImageData(width, height);
imgData.data.set(pixels);
ctx.putImageData(imgData, 0, 0);
// flip the image
ctx.scale(1, -1);
ctx.globalCompositeOperation = 'copy';
ctx.drawImage(canvas, 0, -height, width, height);
return canvas;
}
This should work as long as either you have no alpha or you don't care about lossy alpha. Note: the code assumes you're providing unpremliplied alpha
The issue is pixels could have a pixel like this 255, 192, 128, 0. But because alpha is zero, when you pass that through the function above you'll get 0, 0, 0, 0 in the canvas that comes out because canvases always use premultiplied alpha. That may not be an issue because for most use cases 255, 192, 128, 0 appears as 0,0,0,0 anyway but if you have a special use case then this solution won't work.
Note: You'll need the canvas package
As for your Image from dataURL example this code makes no sense
// Create a base64 data URL
const buffer = Buffer.from(bitmapData);
const dataurl = `data:image/bmp;base64,${buffer.toString('base64')}`;
First off, whether image/bmp is supported or not is browser dependent so it's possible JSDOM doesn't support image/bmp but further a bmp file has a header which the code is not supplying. Without that header there is no way for any API to know what's in the data. If you gave it 256 bytes is that an 8x8 4byte per pixel image? A 16x4 4byte per pixel image? A black and white 32x64 1 bit per pixel image? etc.. You need the header.
Maybe writing the header will make that code work?
function convertPixelsToBMP(pixels, width, height) {
const BYTES_PER_PIXEL = 4;
const FILE_HEADER_SIZE = 14;
const INFO_HEADER_SIZE = 40;
const dst = new Uint8Array(FILE_HEADER_SIZE + INFO_HEADER_SIZE + width * height * 4);
{
const data = new DataView(dst.buffer);
const fileSize = FILE_HEADER_SIZE + INFO_HEADER_SIZE + (width * height * 4);
data.setUint8 ( 0, 0x42); // 'B'
data.setUint8 ( 1, 0x4D); // 'M'
data.setUint32( 2, fileSize, true)
data.setUint8 (10, FILE_HEADER_SIZE + INFO_HEADER_SIZE);
data.setUint32(14, INFO_HEADER_SIZE, true);
data.setUint32(18, width, true);
data.setUint32(22, height, true);
data.setUint16(26, 1, true);
data.setUint16(28, BYTES_PER_PIXEL * 8, true);
}
// bmp expects colors in BGRA format
const pdst = new Uint8Array(dst.buffer, FILE_HEADER_SIZE + INFO_HEADER_SIZE);
for (let i = 0; i < pixels.length; i += 4) {
pdst[i ] = pixels[i + 2];
pdst[i + 1] = pixels[i + 1];
pdst[i + 2] = pixels[i + 0];
pdst[i + 3] = pixels[i + 3];
}
return dst;
}
note: this code also assumes you're providing unpremultiplied alpha.
#gman, thank you for the help! And that makes sense that I would need the header for the base64 URL, but I didn't need that anyways. Returning an HTMLCanvasElement is sufficient for my needs. There is some alpha in the image, but premultiplied alpha is not an issue.
The one other thing I ran into was that the resulting image was flipped vertically. I assume this is because of the difference in WebGL & 2D canvas coordinate systems. I solved this by looping through the pixels & swapping rows.
The resulting solution looks like this:
export class MyHeadlessLibrary extends MyLibrary {
public getCanvasImageSource(): CanvasImageSource {
// read the pixel data
const pixels = new Uint8Array(this.width * this.height * 4);
this.gl.readPixels(0, 0, this.width, this.height, this.gl.RGBA, this.gl.UNSIGNED_BYTE, pixels);
// create a headless canvas & 2d context
const html: string = `<!DOCTYPE html><html><head><meta charset="utf-8" /><title>DOM</title></head><body></body></html>`;
const dom = new JSDOM(html);
const canvas = dom.window.document.createElement('canvas');
canvas.width = this.width;
canvas.height = this.height;
const ctx = canvas.getContext('2d');
if (!ctx) {
throw Error("Unable to create a 2D render context");
}
// flip the image
const bytesPerRow = this.width * 4;
const temp = new Uint8Array(bytesPerRow);
for (let y = 0; y < this.height / 2; y += 1) {
const topOffset = y * bytesPerRow;
const bottomOffset = (this.height - y - 1) * bytesPerRow;
temp.set(pixels.subarray(topOffset, topOffset + bytesPerRow));
pixels.copyWithin(topOffset, bottomOffset, bottomOffset + bytesPerRow);
pixels.set(temp, bottomOffset);
}
// Draw the pixels into the new canvas
const imgData = ctx.createImageData(this.width, this.height);
imgData.data.set(pixels);
ctx.putImageData(imgData, 0, 0);
return canvas;
}
}

How to rotate cubee by quaternion in three.js?

I have some problems with understanding of how to rotate the figure by a quaternion. Can somebody please explain how to do it? In function render I want to rotate cubes by quaternions
function main() {
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas});
const fov = 100;
const aspect = 2; // the canvas default
const near = 0.1;
const far = 5;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.z = 3;
const scene = new THREE.Scene();
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(-1, 2, 4);
scene.add(light);
}
function makeInstance(color, x, width, height, depth) {
const material = new THREE.MeshPhongMaterial({color});
const geometry = new THREE.BoxGeometry(width, height, depth);
const cube = new THREE.Mesh(geometry, material);
scene.add(cube);
cube.position.x = x;
return cube;
}
const cubes = [
makeInstance(0x8844aa, -2, 3, 1, 1),
makeInstance(0xaa8844, 0.5, 2, 1, 1),
];
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render(time) {
time *= 0.001;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
// cubes.forEach((cube, ndx) => {
//const speed = 1 + ndx * .1;
//const rot = time * speed;
//cube.rotation.x = rot;
//cube.rotation.y = rot;
//});
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
You have an Object3d (Points, Lines, Meshes, etc.) that you want to rotate via quaternions. You have a mesh (the cube). The immediate answer is to:
cube.applyQuaternion(myquat);
And where does myquat come from? Probably from one of these:
myquat = new THREE.Quaternion(); // now, Probably from one of these:
myquat.setFromAxisAngle ( axis : Vector3, angle : Float )
myquat.setFromEuler ( euler : Euler )
myquat.setFromRotationMatrix ( m : Matrix4 )
myquat.setFromUnitVectors ( vFrom : Vector3, vTo : Vector3 )
I hope this gives you a start, even to ask a more specific question.

Three.js Server Rendering

I am trying to create a script that loads my .obj and .mtl files in three.js and then takes a screenshot of it. I'm doing this for rendering items and characters on my website. I made a function called renderItem that calls the .php page on which the item is rendered and the screenshot is taken but the only way to get it to work is by echoing the contents of the page (not really what I wanted to do).
Now I am looking at using three.js with node to render my items on the server. I've been researching different ways people have done this for days and I can get it to work using a CanvasRenderer or headless gl, but now my issue is that only loading simple boxes works with that. My .obj files and my .mtl files load but do not show up at all. I have been working on this code for over a week and I cannot find anyone else with the same issue I am having. Idk what else to do so any help would be appreciated. I will paste the code below:
var MockBrowser = require('mock-browser').mocks.MockBrowser;
var mock = new MockBrowser();
global.document = mock.getDocument();
global.window = MockBrowser.createWindow();
global.THREE = require('three');
var Canvas = require('canvas');
var gl = require("gl")(500, 500);
var OBJLoader = require('three/examples/js/loaders/OBJLoader.js');
var MTLLoader = require('three/examples/js/loaders/MTLLoader.js');
require('three/examples/js/renderers/CanvasRenderer.js');
require('three/examples/js/renderers/Projector.js');
global.XMLHttpRequest = require('xhr2').XMLHttpRequest;
var scene, camera, renderer, controls;
var express = require('express');
var app = express();
const fitCameraToObject = function ( camera, object, offset, controls ) {
offset = offset || 1.25;
const boundingBox = new THREE.Box3();
// get bounding box of object - this will be used to setup controls and camera
boundingBox.setFromObject( object );
const center = boundingBox.getCenter();
const size = boundingBox.getSize();
// get the max side of the bounding box (fits to width OR height as needed )
const maxDim = Math.max( size.x, size.y, size.z );
const fov = camera.fov * ( Math.PI / 180 );
let cameraZ = Math.abs( maxDim / 4 * Math.tan( fov * 2 ) );
cameraZ *= offset; // zoom out a little so that objects don't fill the screen
camera.position.z = cameraZ;
const minZ = boundingBox.min.z;
const cameraToFarEdge = ( minZ < 0 ) ? -minZ + cameraZ : cameraZ - minZ;
camera.far = cameraToFarEdge * 3;
camera.updateProjectionMatrix();
if ( controls ) {
// set camera to rotate around center of loaded object
controls.target = center;
// prevent camera from zooming out far enough to create far plane cutoff
controls.maxDistance = cameraToFarEdge * 2;
controls.saveState();
} else {
camera.lookAt( center )
}
}
function init() {
var manager = new THREE.LoadingManager();
scene = new THREE.Scene();
camera = new THREE.PerspectiveCamera( 70, 1, 0.01, 100 );
camera.position.set(-0.25,0.86,0.76);
var canvas = new Canvas.createCanvas(500, 500);
canvas.style = {}; // dummy shim to prevent errors during render.setSize
var renderer = new THREE.CanvasRenderer({
canvas: canvas
});
renderer.setSize( 500, 500 );
// lighting
ambientLight = new THREE.AmbientLight(0xffffff,1.4);
scene.add(ambientLight);
var mtlLoader = new THREE.MTLLoader();
mtlLoader.setTexturePath('texturepath');
mtlLoader.setPath('mtlpath');
mtlLoader.load('Boots.mtl', function(materials) {
materials.preload();
var objLoader = new THREE.OBJLoader(manager);
objLoader.setMaterials(materials);
objLoader.setPath('objpath');
objLoader.load('Boots.obj', function(object) {
scene.add(object);
fitCameraToObject(camera, object, 7, controls);
renderer.render( scene, camera );
});
});
/*
var geometry = new THREE.BoxGeometry( 1, 1, 1 );
var material = new THREE.MeshBasicMaterial( { color: 0x00ff00 } );
var cube = new THREE.Mesh( geometry, material );
scene.add( cube );
camera.position.z = 5;*/
var animate = function () {
//requestAnimationFrame( animate );
renderer.render( scene, camera );
};
animate();
manager.onLoad = function () {
renderer.render( scene, camera );
console.log('Image: ' + renderer.domElement.toDataURL('image/png'));
};
}
app.get('/', function(req, res) {
res.send(req.params);
init();
});
app.listen(3000, function() {
console.log('Server started on Port 3000...');
});
I changed the paths because I don't want my website name to be shown but I can confirm that the object and mtl loads correctly. I commented out the code that makes a box but when it isn't commented it prints out a base64 code which I just paste into a website I found online and it shows me an image of the green box. The MTL/OBJ Loader part prints out a base64 code but doesn't show anything. If I need to give more info let me know but I am completely lost.

Resources