WebGL draw perspective view volume - graphics

I'm trying to calculate the 8 (4+4) vertices of a view volume's plane : near and far.
I need this vertices to draw, in webGL, the view volume of a camera.
So far I managed to calculate them by using trigonometry from each perspective but somehow the result does not seem accurate when I draw the vertices.
I reached this equations for vertices so far:
y = sqrt(hypotenuse^2 - plane^2)
x = sqrt(hypotenuse^2 - plane^2)
z = plane (near or far)
Can anyone help? Thank you in advance.

you can just project a standard cube through an inverse projection matrix.
const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl");
const vs = `
attribute vec4 position;
uniform mat4 u_worldViewProjection;
void main() {
gl_Position = u_worldViewProjection * position;
}
`;
const fs = `
precision mediump float;
void main() {
gl_FragColor = vec4(1, 0, 0, 1);
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const arrays = {
position: [
-1, 1, -1,
1, 1, -1,
1, -1, -1,
-1, -1, -1,
-1, 1, 1,
1, 1, 1,
1, -1, 1,
-1, -1, 1,
],
indices: [
0, 1, 1, 2, 2, 3, 3, 0,
4, 5, 5, 6, 6, 7, 7, 4,
0, 4, 1, 5, 2, 6, 3, 7,
],
};
const bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
let projectionToViewWith;
{
const fov = 30 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 0.5;
const zFar = 100;
projectionToViewWith = m4.perspective(fov, aspect, zNear, zFar);
}
let projectionToBeViewed;
{
const fov = 30 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 2;
const zFar = 10;
projectionToBeViewed = m4.perspective(fov, aspect, zNear, zFar);
}
const inverseProjectionToBeViewed = m4.inverse(projectionToBeViewed);
const radius = 20;
const eye = [Math.sin(time) * radius, 4, Math.cos(time) * radius];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const viewProjection = m4.multiply(projectionToViewWith, view);
const worldViewProjection = m4.multiply(
viewProjection,
inverseProjectionToBeViewed);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_worldViewProjection: worldViewProjection,
});
twgl.drawBufferInfo(gl, bufferInfo, gl.LINES);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
To get the points of the 8 corners you just have to do a reverse projection. The projection matrix takes the space of the frustum that is fovy tall, fovy * aspect wide, starting at -zNear and ending at -zFar and converting that space to -1 <-> +1 box after the perspective divide.
To go backward and compute the points of that box we just project a -1 to +1 box through the inverse projection matrix and do the perpective divide again (which is exactly what's happening in the example above, we're just doing it all in the GPU)
So, we pull it out of the GPU and do it in JavaScript
[
[-1, 1, -1],
[ 1, 1, -1],
[ 1, -1, -1],
[-1, -1, -1],
[-1, 1, 1],
[ 1, 1, 1],
[ 1, -1, 1],
[-1, -1, 1],
].forEach((point) => {
console.log(m4.transformPoint(inverseProjectionMatrix, point));
});
Here's an example.
const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl");
const vs = `
attribute vec4 position;
uniform mat4 u_worldViewProjection;
void main() {
gl_Position = u_worldViewProjection * position;
gl_PointSize = 10.;
}
`;
const fs = `
precision mediump float;
uniform vec4 u_color;
void main() {
gl_FragColor = u_color;
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const positions = [
-1, 1, -1,
1, 1, -1,
1, -1, -1,
-1, -1, -1,
-1, 1, 1,
1, 1, 1,
1, -1, 1,
-1, -1, 1,
];
const arrays = {
position: positions,
indices: [
0, 1, 1, 2, 2, 3, 3, 0,
4, 5, 5, 6, 6, 7, 7, 4,
0, 4, 1, 5, 2, 6, 3, 7,
],
};
const bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
let projectionToViewWith;
{
const fov = 30 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 0.5;
const zFar = 100;
projectionToViewWith = m4.perspective(fov, aspect, zNear, zFar);
}
let projectionToBeViewed;
{
const fov = 30 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 2;
const zFar = 10;
projectionToBeViewed = m4.perspective(fov, aspect, zNear, zFar);
}
const inverseProjectionToBeViewed = m4.inverse(projectionToBeViewed);
const radius = 20;
const eye = [Math.sin(time) * radius, 4, Math.cos(time) * radius];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const viewProjection = m4.multiply(projectionToViewWith, view);
const worldViewProjection = m4.multiply(
viewProjection,
inverseProjectionToBeViewed);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_worldViewProjection: worldViewProjection,
u_color: [1, 0, 0, 1],
});
twgl.drawBufferInfo(gl, bufferInfo, gl.LINES);
// just because I'm lazy let's draw each point one at a time
// note: since in our case the frustum is not moving we
// could have computed these at init time.
const positionLoc = programInfo.attribSetters.position.location;
gl.disableVertexAttribArray(positionLoc);
for (let i = 0; i < positions.length; i += 3) {
const point = positions.slice(i, i + 3);
const worldPosition = m4.transformPoint(
inverseProjectionToBeViewed, point);
gl.vertexAttrib3f(positionLoc, ...worldPosition);
twgl.setUniforms(programInfo, {
u_color: [0, 1, 0, 1],
u_worldViewProjection: viewProjection,
});
gl.drawArrays(gl.POINT, 0, 1);
}
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
Mention in a comment you wanted to show the view frustum of a camera in one canvas in another canvas.
That's effectively what's happening above except the camera in canvasWhosFrustumWeWantToRender is not moving. Instead it's just sitting at the origin looking down the -Z axis with +Y up. To allow the frustum to move to show where it is relative to teh camera just need to add in the camera matrix
const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl");
const ext = gl.getExtension("OES_standard_derivatives");
const vs = `
attribute vec4 position;
uniform mat4 u_worldViewProjection;
varying vec3 v_position;
void main() {
gl_Position = u_worldViewProjection * position;
v_position = position.xyz; // for fake lighting
}
`;
const fs = `
#extension GL_OES_standard_derivatives : enable
precision mediump float;
varying vec3 v_position;
uniform vec4 u_color;
void main() {
vec3 fdx = dFdx(v_position);
vec3 fdy = dFdy(v_position);
vec3 n = normalize(cross(fdx,fdy));
float l = dot(n, normalize(vec3(1,2,-3))) * .5 + .5;
gl_FragColor = u_color;
gl_FragColor.rgb *= l;
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const arrays = {
position: [
-1, 1, -1,
1, 1, -1,
1, -1, -1,
-1, -1, -1,
-1, 1, 1,
1, 1, 1,
1, -1, 1,
-1, -1, 1,
],
indices: [
0, 1, 1, 2, 2, 3, 3, 0,
4, 5, 5, 6, 6, 7, 7, 4,
0, 4, 1, 5, 2, 6, 3, 7,
],
};
const concat = twgl.primitives.concatVertices;
const reorient = twgl.primitives.reorientVertices;
const wireCubeBufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
const solidCubeBufferInfo = twgl.primitives.createCubeBufferInfo(gl, 2);
const cameraBufferInfo = twgl.createBufferInfoFromArrays(gl,
concat([
reorient(twgl.primitives.createCubeVertices(2),
m4.translation([0, 0, 1])),
reorient(twgl.primitives.createTruncatedConeVertices(0, 1, 2, 12, 1),
m4.rotationX(Math.PI * -.5)),
])
);
const black = [0, 0, 0, 1];
const blue = [0, 0, 1, 1];
function drawScene(viewProjection, clearColor) {
gl.clearColor(...clearColor);
gl.clear(gl.COLOR_BUFFER_BIT);
const numCubes = 10;
for (let i = 0; i < numCubes; ++i) {
const u = i / numCubes;
let mat = m4.rotationY(u * Math.PI * 2);
mat = m4.translate(mat, [0, 0, 10]);
mat = m4.scale(mat, [1, 1 + u * 23 % 1, 1]);
mat = m4.translate(mat, [0, .5, 0]);
mat = m4.multiply(viewProjection, mat);
drawModel(solidCubeBufferInfo, mat, [u, u * 3 % 1, u * 7 % 1,1]);
}
}
function drawModel(bufferInfo, worldViewProjection, color, mode) {
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_worldViewProjection: worldViewProjection,
u_color: color,
});
twgl.drawBufferInfo(gl, bufferInfo, mode);
}
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
const width = gl.canvas.width;
const height = gl.canvas.height;
const halfWidth = width / 2;
gl.viewport(0, 0, width, height);
gl.disable(gl.SCISSOR_TEST);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
gl.enable(gl.DEPTH_TEST);
let projectionToViewWith; // the projection on the right
{
const fov = 60 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / 2 / gl.canvas.clientHeight;
const zNear = 0.5;
const zFar = 100;
projectionToViewWith = m4.perspective(fov, aspect, zNear, zFar);
}
let projectionToBeViewed; // the projeciton on the left
{
const fov = 60 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / 2 / gl.canvas.clientHeight;
const zNear = 1.5;
const zFar = 15;
projectionToBeViewed = m4.perspective(fov, aspect, zNear, zFar);
}
const inverseProjectionToBeViewed = m4.inverse(projectionToBeViewed);
let cameraViewingScene; // camera for right view
{
const t1 = 0;
const radius = 30;
const eye = [Math.sin(t1) * radius, 4, Math.cos(t1) * radius];
const target = [0, 0, 0];
const up = [0, 1, 0];
cameraViewingScene = m4.lookAt(eye, target, up);
}
let cameraInScene; // camera for left view
{
const t1 = time;
const t2 = time + .4;
const r1 = 10 + Math.sin(t1);
const r2 = 10 + Math.sin(t2) * 2;
const eye = [Math.sin(t1) * r1, 0 + Math.sin(t1) * 4, Math.cos(t1) * r1];
const target = [Math.sin(t2) * r2, 1 + Math.sin(t2), Math.cos(t2) * r2];
const up = [0, 1, 0];
cameraInScene = m4.lookAt(eye, target, up);
}
// there's only one shader program so just set it once
gl.useProgram(programInfo.program);
// draw only on left half of canvas
gl.enable(gl.SCISSOR_TEST);
gl.scissor(0, 0, halfWidth, height);
gl.viewport(0, 0, halfWidth, height);
// draw the scene on the left using the camera inside the scene
{
const view = m4.inverse(cameraInScene);
const viewProjection = m4.multiply(projectionToBeViewed, view);
drawScene(viewProjection, [.9, 1, .9, 1]);
}
// draw only on right half of canvas
gl.scissor(halfWidth, 0, halfWidth, height);
gl.viewport(halfWidth, 0, halfWidth, height);
// draw the same scene on the right using the camera outside the scene
{
const view = m4.inverse(cameraViewingScene);
const viewProjection = m4.multiply(projectionToViewWith, view);
drawScene(viewProjection, [.9, 1, 1, 1]);
// draw the in scene camera's frustum
{
const world = m4.multiply(cameraInScene, inverseProjectionToBeViewed);
const worldViewProjection = m4.multiply(viewProjection, world);
drawModel(wireCubeBufferInfo, worldViewProjection, black, gl.LINES);
}
// draw the in scene camera's camera model
{
const worldViewProjection = m4.multiply(viewProjection, cameraInScene);
drawModel(cameraBufferInfo, worldViewProjection, blue);
}
}
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>

Related

Project plane on a specific distance using Three.js

I have a rectangle defined by 4 points. I want to move it to the left or right by specific distance. By moving I mean that result rectangle should be parallel to the original and if we put straights through corresponding points we will get rectangular cuboid.
On an image I am given coordinates of points A,B,C,D and distance H.
How can I calculate 4 new points using Three.js?
I guess it has something to do with projection, but I couldn't find an easy way to do it.
Just an example. Aqua lines are base, yellow lines are shifted ones:
body{
overflow: hidden;
margin: 0;
}
<script type="module">
import * as THREE from "https://cdn.skypack.dev/three#0.136.0";
import { OrbitControls } from "https://cdn.skypack.dev/three#0.136.0/examples/jsm/controls/OrbitControls";
let scene = new THREE.Scene();
let camera = new THREE.PerspectiveCamera(60, innerWidth / innerHeight, 1, 1000);
camera.position.set(0, 5, 5);
let renderer = new THREE.WebGLRenderer({ antialias: true });
renderer.setSize(innerWidth, innerHeight);
document.body.appendChild(renderer.domElement);
let controls = new OrbitControls(camera, renderer.domElement);
controls.enableDamping = true;
controls.enablePan = false;
scene.add(new THREE.GridHelper());
let dir = new THREE.Vector3(0, 0, 1);
let colors = [];
let ptsBase = [
[-1, 1], [1, 1], [1, -1], [-1, -1]
].map(p => {
colors.push(0, 1, 1);
return new THREE.Vector3(p[0], p[1], 0);
});
let ptsShift = ptsBase.map(p => {
colors.push(1, 1, 0);
return p.clone().addScaledVector(dir, 2)
});
let ptsFinal = ptsBase.concat(ptsShift);
let g = new THREE.BufferGeometry().setFromPoints(ptsFinal);
g.setIndex([0, 1, 1, 2, 2, 3, 3, 0, 4, 5, 5, 6, 6, 7, 7, 4, 0, 4, 1, 5, 2, 6, 3, 7]);
g.setAttribute("color", new THREE.Float32BufferAttribute(colors, 3));
let m = new THREE.LineBasicMaterial({vertexColors: true});
let l = new THREE.LineSegments(g, m);
scene.add(l);
renderer.setAnimationLoop(() => {
controls.update();
renderer.render(scene, camera);
});
</script>
What I found to be working is getting normal vector, multiplying it by distance, and adding it to my point vectors.
I did it like this
let pointA = new THREE.Vector3(0, 0, 0);
let pointB = new THREE.Vector3(1, 0, 0);
let pointC = new THREE.Vector3(1, 1, 0);
let pointD = new THREE.Vector3(0, 1, 0);
const distance = 0.5;
let triangle = new THREE.Triangle(pointA, pointB, pointC);
let plane = new THREE.Plane();
triangle.getPlane(plane);
let normalVector = plane.normal.multiplyScalar(distance);
pointA1 = pointA.clone().add(normalVector);
pointB1 = pointB.clone().add(normalVector);
pointC1 = pointC.clone().add(normalVector);
pointD1 = pointD.clone().add(normalVector);

WebGL - jagged edges despite antialiasing

Here is a line made up by two triangles in WebGL2: https://codepen.io/Candleout/pen/bGMYpNM
At least on my computer, the line looks a bit weird and is not as smooth as you would expect:
Antialiasing is turned on, and has some effect, but as you can see in the picture above the line is still not entirely smooth.
The code is based on this article, with the same shaders as in the first example: https://wwwtyro.net/2019/11/18/instanced-lines.html
As you can see, the lines looks smoother in that article, maybe because regl is doing something in the background?
Is there something wrong with and/or missing from my setup, that causes the problem?
I'm running WebGL on a Macbook Air from 2013.
// HTML:
<canvas id="canvas"></canvas>
// JS:
// Setup WebGL context
const canvas = document.getElementById('canvas');
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
const gl = document.getElementById('canvas').getContext('webgl2', { antialias: true } );
// Shaders
const vertexShaderSrc = `
precision highp float;
attribute vec2 position, pointA, pointB;
uniform mat3 projection;
uniform float width;
void main() {
vec2 xBasis = pointB - pointA;
vec2 yBasis = normalize(vec2(-xBasis.y, xBasis.x));
vec2 point = pointA + xBasis * position.x + yBasis * width * position.y;
gl_Position = vec4(projection * vec3(point, 1), 1);
}`;
const fragmentShaderSrc = `
precision highp float;
void main() {
gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
}`;
// Setup program with shaders
const program = gl.createProgram();
const vertexShader = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(vertexShader, vertexShaderSrc);
gl.compileShader(vertexShader);
gl.attachShader(program, vertexShader);
const fragmentShader = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(fragmentShader, fragmentShaderSrc);
gl.compileShader(fragmentShader);
gl.attachShader(program, fragmentShader);
gl.linkProgram(program);
if (!gl.getProgramParameter(program, gl.LINK_STATUS)) {
console.log(gl.getShaderInfoLog(vertexShader));
console.log(gl.getShaderInfoLog(fragmentShader));
}
gl.useProgram(program);
function main() {
const positionLoc = gl.getAttribLocation(program, 'position');
const pointALoc = gl.getAttribLocation(program, 'pointA');
const pointBLoc = gl.getAttribLocation(program, 'pointB');
const projectionLoc = gl.getUniformLocation(program, 'projection');
const widthLoc = gl.getUniformLocation(program, 'width');
// Line geometry:
const geometryData = new Float32Array([
// position
0.0, -0.5,
1.0, -0.5,
1.0, 0.5,
0.0, -0.5,
1.0, 0.5,
0.0, 0.5
]);
const geometryBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, geometryBuffer);
gl.bufferData(gl.ARRAY_BUFFER, geometryData, gl.STATIC_DRAW);
gl.vertexAttribPointer(
positionLoc, // attribute
2, // size
gl.FLOAT, // type
false, // normalize
8, // stride
0); // offset
gl.enableVertexAttribArray(positionLoc);
// End points:
const pointData = new Float32Array([
// pointA
6.123233995736766e-17,
1,
// pointB
-0.8660254037844387,
0.49999999999999994
]);
const pointsBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, pointsBuffer);
gl.bufferData(gl.ARRAY_BUFFER, pointData, gl.STATIC_DRAW);
gl.vertexAttribPointer(
pointALoc, // attribute
2, // size
gl.FLOAT, // type
false, // normalize
16, // stride
0); // offset
gl.vertexAttribDivisor(pointALoc, 1);
gl.enableVertexAttribArray(pointALoc);
gl.vertexAttribPointer(
pointBLoc, // attribute
2, // size
gl.FLOAT, // type
false, // normalize
16, // stride
8); // offset
gl.vertexAttribDivisor(pointBLoc, 1);
gl.enableVertexAttribArray(pointBLoc);
// Projection matrix - for 90 degree angles
let aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
gl.uniformMatrix3fv(projectionLoc, false, new Float32Array([
2 / (aspect - -aspect), 0, 0,
0, 1, 0,
0, 0, 1
])
)
gl.uniform1f(
widthLoc, // uniform
0.025) // thickness
gl.drawArraysInstanced(
gl.TRIANGLES, // method
0, // offset
6, // number of vertices per instance
1 // number of instances
);
}
main();

Drawing an image inside a circle using canvas

I have been trying to center an image inside a canvas drawn circle, the circle is centered in the middle of the page but the image that I want to draw inside it can't.
const background = await Canvas.loadImage(`./backgrounds/${UserJSON[message.author.id].background}.png`);
const canvas = Canvas.createCanvas(250, 400);
const context = canvas.getContext('2d');
context.drawImage(background, 0, 0, canvas.width, canvas.height);
context.strokeStyle = '#0099ff';
context.strokeRect(0, 0, canvas.width, canvas.height);
context.font = '28px sans-serif';
context.fillStyle = '#ffffff';
context.fillText(`${message.author.username}`, canvas.width / 4, canvas.height / 1.6);
context.beginPath();
context.arc(canvas.width / 2, canvas.height / 2, 70, 0, Math.PI * 2, true);
context.closePath();
context.clip();
const avatar = await Canvas.loadImage(message.author.displayAvatarURL({ format: 'jpg', size: 4096 }));
context.drawImage(avatar, 0, 0, 250, 320);
const attachment = new MessageAttachment(canvas.toBuffer(), 'profile-image.png');
I don't know what I'm doing wrong at "drawImage", I can't seem to minimize the image to fit the circle or anything else. Below I put my discord avatar and the way it's drawn on the canvas
You need to make some calculations on your own. drawImage will take a bitmap and project it on whatever rectangle you gave it (will not preserve aspect ratio). So you need to make all the center and cover calculations.
const canvas = Canvas.createCanvas(250, 400);
const context = canvas.getContext('2d');
context.fillStyle = '#6f6f6f';
context.fillRect(0, 0, canvas.width, canvas.height);
const circle = {
x: canvas.width / 2,
y: canvas.height / 2,
radius: 70,
}
context.beginPath();
context.arc(circle.x, circle.y, circle.radius, 0, Math.PI * 2, true);
context.closePath();
context.clip();
const avatar = await Canvas.loadImage('./image.jpg');
console.log(avatar.height, avatar.width);
// Compute aspectration
const aspect = avatar.height / avatar.width;
// Math.max is ued to have cover effect use Math.min for contain
const hsx = circle.radius * Math.max(1.0 / aspect, 1.0);
const hsy = circle.radius * Math.max(aspect, 1.0);
// x - hsl and y - hsy centers the image
context.drawImage(avatar,circle.x - hsx,circle.y - hsy,hsx * 2,hsy * 2);

How to properly blend colors across two triangles and remove diagonal smear

I am learning WebGL and I've drawn a full screen quad with colors for each vertex. No lighting or normals or perspective matrix or depth buffer; I'm just drawing a gradient background. This is what I get:
It looks good but I cannot help noticing the diagonal smear from the bottom right to the top left. I feel this is an artifact of linear interpolating the far opposite vertices. I'm drawing two triangles: the bottom left, and the top right. I think I would get similar results using OpenGL instead of WebGL.
Given the same four colors and the same size rectangle, is there a way to render this so the edge between the two triangles isn't so apparent? Maybe more vertices, or a different blending function? I'm not sure exactly what the colors should be at each pixel; I just want to know how to get rid of the diagonal smear.
The issue is the top right triangle has no knowledge of the bottom left corner so the top right triangle is not including any of the blue from the bottom left (and visa versa)
A couple of ways to fix that.
One is to use a 2x2 texture with linear sampling. You have to do some extra math to get the interpolation correct because a texture only interpolates between pixels
+-------+-------+
| | |
| +-------+ |
| | | | |
+---|---+---|---+
| | | | |
| +-------+ |
| | |
+-------+-------+
Above is a 4 pixel texture stretched to 14 by 6. Sampling happens between pixels so only this center area will get the gradient. Outside that area would be sampled with pixels outside the texture so using CLAMP_TO_EDGE or on the opposite side of the texture using REPEAT.
const gl = document.querySelector('canvas').getContext('webgl');
const tl = [254, 217, 138];
const tr = [252, 252, 252];
const bl = [18, 139, 184];
const br = [203, 79, 121];
const tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.pixelStorei(gl.UNPACK_ALIGNMENT, 1);
gl.texImage2D(
gl.TEXTURE_2D,
0, // mip level
gl.RGB, // internal format
2, // width,
2, // height,
0, // border
gl.RGB, // format
gl.UNSIGNED_BYTE, // type
new Uint8Array([...bl, ...br, ...tl, ...tr]));
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
const vs = `
attribute vec4 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
v_texcoord = texcoord;
}
`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
const vec2 texSize = vec2(2, 2); // could pass this in
uniform sampler2D tex;
void main() {
gl_FragColor = texture2D(tex,
(v_texcoord * (texSize - 1.0) + 0.5) / texSize);
}
`;
const program = twgl.createProgram(gl, [vs, fs]);
const positionLoc = gl.getAttribLocation(program, 'position');
const texcoordLoc = gl.getAttribLocation(program, 'texcoord');
function createBufferAndSetupAttribute(loc, data) {
const buffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(data), gl.STATIC_DRAW);
gl.enableVertexAttribArray(loc);
gl.vertexAttribPointer(
loc,
2, // 2 elements per iteration
gl.FLOAT, // type of data in buffer
false, // normalize
0, // stride
0, // offset
);
}
createBufferAndSetupAttribute(positionLoc, [
-1, -1,
1, -1,
-1, 1,
-1, 1,
1, -1,
1, 1,
]);
createBufferAndSetupAttribute(texcoordLoc, [
0, 0,
1, 0,
0, 1,
0, 1,
1, 0,
1, 1,
]);
gl.useProgram(program);
// note: no need to set sampler uniform as it defaults
// to 0 which is what we'd set it to anyway.
gl.drawArrays(gl.TRIANGLES, 0, 6);
canvas { border: 1px solid black; }
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
Note: to see what I mean about the extra math needed for the texture coordinates here is the same example without the extra math
const gl = document.querySelector('canvas').getContext('webgl');
const tl = [254, 217, 138];
const tr = [252, 252, 252];
const bl = [18, 139, 184];
const br = [203, 79, 121];
const tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.pixelStorei(gl.UNPACK_ALIGNMENT, 1);
gl.texImage2D(
gl.TEXTURE_2D,
0, // mip level
gl.RGB, // internal format
2, // width,
2, // height,
0, // border
gl.RGB, // format
gl.UNSIGNED_BYTE, // type
new Uint8Array([...bl, ...br, ...tl, ...tr]));
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
const vs = `
attribute vec4 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
v_texcoord = texcoord;
}
`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D tex;
void main() {
gl_FragColor = texture2D(tex, v_texcoord);
}
`;
const program = twgl.createProgram(gl, [vs, fs]);
const positionLoc = gl.getAttribLocation(program, 'position');
const texcoordLoc = gl.getAttribLocation(program, 'texcoord');
function createBufferAndSetupAttribute(loc, data) {
const buffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(data), gl.STATIC_DRAW);
gl.enableVertexAttribArray(loc);
gl.vertexAttribPointer(
loc,
2, // 2 elements per iteration
gl.FLOAT, // type of data in buffer
false, // normalize
0, // stride
0, // offset
);
}
createBufferAndSetupAttribute(positionLoc, [
-1, -1,
1, -1,
-1, 1,
-1, 1,
1, -1,
1, 1,
]);
createBufferAndSetupAttribute(texcoordLoc, [
0, 0,
1, 0,
0, 1,
0, 1,
1, 0,
1, 1,
]);
gl.useProgram(program);
// note: no need to set sampler uniform as it defaults
// to 0 which is what we'd set it to anyway.
gl.drawArrays(gl.TRIANGLES, 0, 6);
canvas { border: 1px solid black; }
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
Also of course, rather than do the math in the fragment shader we could fix the texture coordinates in JavaScript
const gl = document.querySelector('canvas').getContext('webgl');
const tl = [254, 217, 138];
const tr = [252, 252, 252];
const bl = [18, 139, 184];
const br = [203, 79, 121];
const tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.pixelStorei(gl.UNPACK_ALIGNMENT, 1);
gl.texImage2D(
gl.TEXTURE_2D,
0, // mip level
gl.RGB, // internal format
2, // width,
2, // height,
0, // border
gl.RGB, // format
gl.UNSIGNED_BYTE, // type
new Uint8Array([...bl, ...br, ...tl, ...tr]));
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
const vs = `
attribute vec4 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
v_texcoord = texcoord;
}
`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D tex;
void main() {
gl_FragColor = texture2D(tex, v_texcoord);
}
`;
const program = twgl.createProgram(gl, [vs, fs]);
const positionLoc = gl.getAttribLocation(program, 'position');
const texcoordLoc = gl.getAttribLocation(program, 'texcoord');
function createBufferAndSetupAttribute(loc, data) {
const buffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(data), gl.STATIC_DRAW);
gl.enableVertexAttribArray(loc);
gl.vertexAttribPointer(
loc,
2, // 2 elements per iteration
gl.FLOAT, // type of data in buffer
false, // normalize
0, // stride
0, // offset
);
}
createBufferAndSetupAttribute(positionLoc, [
-1, -1,
1, -1,
-1, 1,
-1, 1,
1, -1,
1, 1,
]);
createBufferAndSetupAttribute(texcoordLoc, [
0.25, 0.25,
0.75, 0.25,
0.25, 0.75,
0.25, 0.75,
0.75, 0.25,
0.75, 0.75,
]);
gl.useProgram(program);
// note: no need to set sampler uniform as it defaults
// to 0 which is what we'd set it to anyway.
gl.drawArrays(gl.TRIANGLES, 0, 6);
canvas { border: 1px solid black; }
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
Another way is to do the interpolation yourself based on those corners (which is effectively doing what the texture sampler is doing in the previous example, bi-linear interpolation of the 4 colors).
const gl = document.querySelector('canvas').getContext('webgl');
const tl = [254/255, 217/255, 138/255];
const tr = [252/255, 252/255, 252/255];
const bl = [ 18/255, 139/255, 184/255];
const br = [203/255, 79/255, 121/255];
const vs = `
attribute vec4 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
v_texcoord = texcoord;
}
`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
uniform vec3 tl;
uniform vec3 tr;
uniform vec3 bl;
uniform vec3 br;
void main() {
vec3 l = mix(bl, tl, v_texcoord.t);
vec3 r = mix(br, tr, v_texcoord.t);
vec3 c = mix(l, r, v_texcoord.s);
gl_FragColor = vec4(c, 1);
}
`;
const program = twgl.createProgram(gl, [vs, fs]);
const positionLoc = gl.getAttribLocation(program, 'position');
const texcoordLoc = gl.getAttribLocation(program, 'texcoord');
const tlLoc = gl.getUniformLocation(program, 'tl');
const trLoc = gl.getUniformLocation(program, 'tr');
const blLoc = gl.getUniformLocation(program, 'bl');
const brLoc = gl.getUniformLocation(program, 'br');
function createBufferAndSetupAttribute(loc, data) {
const buffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(data), gl.STATIC_DRAW);
gl.enableVertexAttribArray(loc);
gl.vertexAttribPointer(
loc,
2, // 2 elements per iteration
gl.FLOAT, // type of data in buffer
false, // normalize
0, // stride
0, // offset
);
}
createBufferAndSetupAttribute(positionLoc, [
-1, -1,
1, -1,
-1, 1,
-1, 1,
1, -1,
1, 1,
]);
createBufferAndSetupAttribute(texcoordLoc, [
0, 0,
1, 0,
0, 1,
0, 1,
1, 0,
1, 1,
]);
gl.useProgram(program);
gl.uniform3fv(tlLoc, tl);
gl.uniform3fv(trLoc, tr);
gl.uniform3fv(blLoc, bl);
gl.uniform3fv(brLoc, br);
gl.drawArrays(gl.TRIANGLES, 0, 6);
canvas { border: 1px solid black; }
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
Number of dimensions
You should be passing the 2D coordinate space of the quad to the fragment shader rather than the one dimensional (per channel) color space.
Then in the fragment shader you can do the color interpolation in 2D space removing the color artifact due to the diagonal line interpolating in 1D.
The shader snippet for linear color interpolation, where coord2D is the 2D coordinate space
pixel = vec4(vec3(mix(
mix(colors[0], colors[1], coord2D.x),
mix(colors[2], colors[3], coord2D.x),
coord2D.y
)), 1);
Improved color interpolation
When interpolating colors by their RGB values the results can visually darken between opposing hues.
An easy fix is to use a closer approximation of the sRGB color model by interpolating between the squares of the color channel values. The final output is the square root of the interpolated values.
The interpolation snippet.
pixel = vec4(sqrt(vec3(mix(
mix(colors[0], colors[1], coord2D.x),
mix(colors[2], colors[3], coord2D.x),
coord2D.y
))) / 255.0, 1);
Note that the color channel values in the uniform colors are in logarithmic space. [R^2, G^2, B^2] and thus range from 0 to 65025.
Example
In the example click the canvas to switch between interpolation methods.
You will note that when using approx ~sRGB that the brightness in the center of the canvas out towards the center edges does not dip as much below the perceivable brightness at the corners.
Also note that the balance of the transition from the bottom blue and redish to the top orange and white moves down closer the the center. This is because interpolating the RGB model will darken colors that have strong components from 2 or more channels, Reds, greens, blues and blacks will dominate over yellows, cyans, magentas and whites making the interpolation seem to shift and stretch out the RGB primaries.
var program, colorsLoc, modelLoc, loc, text = " interpolation. Click for ", model = "RGB"; // or sRGB
const vertSrc = `#version 300 es
in vec2 verts;
out vec2 coord2D;
void main() {
coord2D = verts * 0.5 + 0.5; // convert to quad space 0,0 <=> 1, 1
gl_Position = vec4(verts, 1, 1);
}`;
const fragSrc = `#version 300 es
#define channelMax 255.0
// color location indexes
#define TR 3
#define TL 2
#define BR 1
#define BL 0
precision mediump float;
uniform vec3 colors[4];
uniform bool isRGB;
in vec2 coord2D;
out vec4 pixel;
void main() {
if (isRGB) {
pixel = vec4(vec3(mix(
mix(colors[BL], colors[BR], coord2D.x),
mix(colors[TL], colors[TR], coord2D.x),
coord2D.y
)) / channelMax, 1);
} else {
pixel = vec4(vec3(sqrt(mix(
mix(colors[BL], colors[BR], coord2D.x),
mix(colors[TL], colors[TR], coord2D.x),
coord2D.y
))) / channelMax, 1);
}
}`;
const fArr = arr => new Float32Array(arr);
const colors = [64,140,190, 224,81,141, 247,223,140, 245,245,245];
const gl = canvas.getContext("webgl2", {premultipliedAlpha: false, antialias: false, alpha: false});
addEventListener("resize", draw);
addEventListener("click", draw);
setup();
draw();
function compileShader(src, type, shader = gl.createShader(type)) {
gl.shaderSource(shader, src);
gl.compileShader(shader);
return shader;
}
function setup() {
program = gl.createProgram();
gl.attachShader(program, compileShader(vertSrc, gl.VERTEX_SHADER));
gl.attachShader(program, compileShader(fragSrc, gl.FRAGMENT_SHADER));
gl.linkProgram(program);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, gl.createBuffer());
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, new Uint8Array([0,1,2,0,2,3]), gl.STATIC_DRAW);
gl.bindBuffer(gl.ARRAY_BUFFER, gl.createBuffer());
gl.bufferData(gl.ARRAY_BUFFER, fArr([-1,-1,1,-1,1,1,-1,1]), gl.STATIC_DRAW);
gl.enableVertexAttribArray(loc = gl.getAttribLocation(program, "verts"));
gl.vertexAttribPointer(loc, 2, gl.FLOAT, false, 0, 0);
colorsLoc = gl.getUniformLocation(program, "colors");
modelLoc = gl.getUniformLocation(program, "isRGB");
gl.useProgram(program);
}
function draw() {
[info.textContent, model] = model != "RGB"? [`RGB${text}~sRGB.`, "RGB"]: [`~sRGB${text}RGB.`, "~sRGB"];
if (canvas.width !== innerWidth || canvas.height !== innerHeight) {
[canvas.width, canvas.height] = [innerWidth, innerHeight];
gl.viewport(0, 0, canvas.width, canvas.height);
}
gl.uniform3fv(colorsLoc, fArr(colors.map(v => model=="RGB"? v: v*v)), 0, 12);
gl.uniform1i(modelLoc, model=="RGB");
gl.drawElements(gl.TRIANGLES, 6, gl.UNSIGNED_BYTE, 0);
}
body {
padding: 0px;
margin: 0px;
font-family: arial;
color: white;
}
canvas {
position: absolute;
top: 0px;
left: 0px;
}
h2 {
position: absolute;
bottom: 0px;
left: 0px;
right: 0px;
text-align: center;
}
<canvas id="canvas"></canvas>
<h2 id="info"></h2>

fabricJS Not persisting Floodfill

I have an algorithm for Floodfilling a canvas. Im trying to incorporate this with fabricJS. So here is the dilemna.... I create a fabric.Canvas(). Which creates a wrapper canvas and also an upper-canvas canvas. I click on the canvas to apply my Floodfill(). This works fine and applies my color. But as soon as i go to drag my canvas objects around, or add additional objects to the canvas, the color disappears and looks like it resets of sort.
Any idea why this is?
This happen because fabricjs wipe out all canvas every frame and redraw from its internal data.
I made a JSfiddle that implements Flood Fill for Fabric JS. Check it here: https://jsfiddle.net/av01d/dfvp9j2u/
/*
* FloodFill for fabric.js
* #author Arjan Haverkamp (av01d)
* #date October 2018
*/
var FloodFill = {
// Compare subsection of array1's values to array2's values, with an optional tolerance
withinTolerance: function(array1, offset, array2, tolerance)
{
var length = array2.length,
start = offset + length;
tolerance = tolerance || 0;
// Iterate (in reverse) the items being compared in each array, checking their values are
// within tolerance of each other
while(start-- && length--) {
if(Math.abs(array1[start] - array2[length]) > tolerance) {
return false;
}
}
return true;
},
// The actual flood fill implementation
fill: function(imageData, getPointOffsetFn, point, color, target, tolerance, width, height)
{
var directions = [[1, 0], [0, 1], [0, -1], [-1, 0]],
coords = [],
points = [point],
seen = {},
key,
x,
y,
offset,
i,
x2,
y2,
minX = -1,
maxX = -1,
minY = -1,
maxY = -1;
// Keep going while we have points to walk
while (!!(point = points.pop())) {
x = point.x;
y = point.y;
offset = getPointOffsetFn(x, y);
// Move to next point if this pixel isn't within tolerance of the color being filled
if (!FloodFill.withinTolerance(imageData, offset, target, tolerance)) {
continue;
}
if (x > maxX) { maxX = x; }
if (y > maxY) { maxY = y; }
if (x < minX || minX == -1) { minX = x; }
if (y < minY || minY == -1) { minY = y; }
// Update the pixel to the fill color and add neighbours onto stack to traverse
// the fill area
i = directions.length;
while (i--) {
// Use the same loop for setting RGBA as for checking the neighbouring pixels
if (i < 4) {
imageData[offset + i] = color[i];
coords[offset+i] = color[i];
}
// Get the new coordinate by adjusting x and y based on current step
x2 = x + directions[i][0];
y2 = y + directions[i][1];
key = x2 + ',' + y2;
// If new coordinate is out of bounds, or we've already added it, then skip to
// trying the next neighbour without adding this one
if (x2 < 0 || y2 < 0 || x2 >= width || y2 >= height || seen[key]) {
continue;
}
// Push neighbour onto points array to be processed, and tag as seen
points.push({ x: x2, y: y2 });
seen[key] = true;
}
}
return {
x: minX,
y: minY,
width: maxX-minX,
height: maxY-minY,
coords: coords
}
}
}; // End FloodFill
var fcanvas; // Fabric Canvas
var fillColor = '#f00';
var fillTolerance = 2;
function hexToRgb(hex, opacity) {
opacity = Math.round(opacity * 255) || 255;
hex = hex.replace('#', '');
var rgb = [], re = new RegExp('(.{' + hex.length/3 + '})', 'g');
hex.match(re).map(function(l) {
rgb.push(parseInt(hex.length % 2 ? l+l : l, 16));
});
return rgb.concat(opacity);
}
function floodFill(enable) {
if (!enable) {
fcanvas.off('mouse:down');
fcanvas.selection = true;
fcanvas.forEachObject(function(object){
object.selectable = true;
});
return;
}
fcanvas.deactivateAll().renderAll(); // Hide object handles!
fcanvas.selection = false;
fcanvas.forEachObject(function(object){
object.selectable = false;
});
fcanvas.on({
'mouse:down': function(e) {
var mouse = fcanvas.getPointer(e.e),
mouseX = Math.round(mouse.x), mouseY = Math.round(mouse.y),
canvas = fcanvas.lowerCanvasEl,
context = canvas.getContext('2d'),
parsedColor = hexToRgb(fillColor),
imageData = context.getImageData(0, 0, canvas.width, canvas.height),
getPointOffset = function(x,y) {
return 4 * (y * imageData.width + x)
},
targetOffset = getPointOffset(mouseX, mouseY),
target = imageData.data.slice(targetOffset, targetOffset + 4);
if (FloodFill.withinTolerance(target, 0, parsedColor, fillTolerance)) {
// Trying to fill something which is (essentially) the fill color
console.log('Ignore... same color')
return;
}
// Perform flood fill
var data = FloodFill.fill(
imageData.data,
getPointOffset,
{ x: mouseX, y: mouseY },
parsedColor,
target,
fillTolerance,
imageData.width,
imageData.height
);
if (0 == data.width || 0 == data.height) {
return;
}
var tmpCanvas = document.createElement('canvas'), tmpCtx = tmpCanvas.getContext('2d');
tmpCanvas.width = canvas.width;
tmpCanvas.height = canvas.height;
var palette = tmpCtx.getImageData(0, 0, tmpCanvas.width, tmpCanvas.height); // x, y, w, h
palette.data.set(new Uint8ClampedArray(data.coords)); // Assuming values 0..255, RGBA
tmpCtx.putImageData(palette, 0, 0); // Repost the data.
var imgData = tmpCtx.getImageData(data.x, data.y, data.width, data.height); // Get cropped image
tmpCanvas.width = data.width;
tmpCanvas.height = data.height;
tmpCtx.putImageData(imgData,0,0);
fcanvas.add(new fabric.Image(tmpCanvas, {
left: data.x,
top: data.y,
selectable: false
}))
}
});
}
$(function() {
// Init Fabric Canvas:
fcanvas = new fabric.Canvas('c', {
backgroundColor:'#fff',
enableRetinaScaling: false
});
// Add some demo-shapes:
fcanvas.add(new fabric.Circle({
radius: 80,
fill: false,
left: 100,
top: 100,
stroke: '#000',
strokeWidth: 2
}));
fcanvas.add(new fabric.Triangle({
width: 120,
height: 160,
left: 50,
top: 50,
stroke: '#000',
fill: '#00f',
strokeWidth: 2
}));
fcanvas.add(new fabric.Rect({
width: 120,
height: 160,
left: 150,
top: 50,
fill: 'red',
stroke: '#000',
strokeWidth: 2
}));
fcanvas.add(new fabric.Rect({
width: 200,
height: 120,
left: 200,
top: 120,
fill: 'green',
stroke: '#000',
strokeWidth: 2
}));
/* Images work very well too. Make sure they're CORS
enabled though! */
var img = new Image();
img.crossOrigin = 'anonymous';
img.onload = function() {
fcanvas.add(new fabric.Image(img, {
left: 300,
top: 100,
angle: 30,
}));
}
img.src = 'http://misc.avoid.org/chip.png';
});

Resources