WebGL - jagged edges despite antialiasing - antialiasing

Here is a line made up by two triangles in WebGL2: https://codepen.io/Candleout/pen/bGMYpNM
At least on my computer, the line looks a bit weird and is not as smooth as you would expect:
Antialiasing is turned on, and has some effect, but as you can see in the picture above the line is still not entirely smooth.
The code is based on this article, with the same shaders as in the first example: https://wwwtyro.net/2019/11/18/instanced-lines.html
As you can see, the lines looks smoother in that article, maybe because regl is doing something in the background?
Is there something wrong with and/or missing from my setup, that causes the problem?
I'm running WebGL on a Macbook Air from 2013.
// HTML:
<canvas id="canvas"></canvas>
// JS:
// Setup WebGL context
const canvas = document.getElementById('canvas');
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
const gl = document.getElementById('canvas').getContext('webgl2', { antialias: true } );
// Shaders
const vertexShaderSrc = `
precision highp float;
attribute vec2 position, pointA, pointB;
uniform mat3 projection;
uniform float width;
void main() {
vec2 xBasis = pointB - pointA;
vec2 yBasis = normalize(vec2(-xBasis.y, xBasis.x));
vec2 point = pointA + xBasis * position.x + yBasis * width * position.y;
gl_Position = vec4(projection * vec3(point, 1), 1);
}`;
const fragmentShaderSrc = `
precision highp float;
void main() {
gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
}`;
// Setup program with shaders
const program = gl.createProgram();
const vertexShader = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(vertexShader, vertexShaderSrc);
gl.compileShader(vertexShader);
gl.attachShader(program, vertexShader);
const fragmentShader = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(fragmentShader, fragmentShaderSrc);
gl.compileShader(fragmentShader);
gl.attachShader(program, fragmentShader);
gl.linkProgram(program);
if (!gl.getProgramParameter(program, gl.LINK_STATUS)) {
console.log(gl.getShaderInfoLog(vertexShader));
console.log(gl.getShaderInfoLog(fragmentShader));
}
gl.useProgram(program);
function main() {
const positionLoc = gl.getAttribLocation(program, 'position');
const pointALoc = gl.getAttribLocation(program, 'pointA');
const pointBLoc = gl.getAttribLocation(program, 'pointB');
const projectionLoc = gl.getUniformLocation(program, 'projection');
const widthLoc = gl.getUniformLocation(program, 'width');
// Line geometry:
const geometryData = new Float32Array([
// position
0.0, -0.5,
1.0, -0.5,
1.0, 0.5,
0.0, -0.5,
1.0, 0.5,
0.0, 0.5
]);
const geometryBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, geometryBuffer);
gl.bufferData(gl.ARRAY_BUFFER, geometryData, gl.STATIC_DRAW);
gl.vertexAttribPointer(
positionLoc, // attribute
2, // size
gl.FLOAT, // type
false, // normalize
8, // stride
0); // offset
gl.enableVertexAttribArray(positionLoc);
// End points:
const pointData = new Float32Array([
// pointA
6.123233995736766e-17,
1,
// pointB
-0.8660254037844387,
0.49999999999999994
]);
const pointsBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, pointsBuffer);
gl.bufferData(gl.ARRAY_BUFFER, pointData, gl.STATIC_DRAW);
gl.vertexAttribPointer(
pointALoc, // attribute
2, // size
gl.FLOAT, // type
false, // normalize
16, // stride
0); // offset
gl.vertexAttribDivisor(pointALoc, 1);
gl.enableVertexAttribArray(pointALoc);
gl.vertexAttribPointer(
pointBLoc, // attribute
2, // size
gl.FLOAT, // type
false, // normalize
16, // stride
8); // offset
gl.vertexAttribDivisor(pointBLoc, 1);
gl.enableVertexAttribArray(pointBLoc);
// Projection matrix - for 90 degree angles
let aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
gl.uniformMatrix3fv(projectionLoc, false, new Float32Array([
2 / (aspect - -aspect), 0, 0,
0, 1, 0,
0, 0, 1
])
)
gl.uniform1f(
widthLoc, // uniform
0.025) // thickness
gl.drawArraysInstanced(
gl.TRIANGLES, // method
0, // offset
6, // number of vertices per instance
1 // number of instances
);
}
main();

Related

How to rotate cubee by quaternion in three.js?

I have some problems with understanding of how to rotate the figure by a quaternion. Can somebody please explain how to do it? In function render I want to rotate cubes by quaternions
function main() {
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas});
const fov = 100;
const aspect = 2; // the canvas default
const near = 0.1;
const far = 5;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.z = 3;
const scene = new THREE.Scene();
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(-1, 2, 4);
scene.add(light);
}
function makeInstance(color, x, width, height, depth) {
const material = new THREE.MeshPhongMaterial({color});
const geometry = new THREE.BoxGeometry(width, height, depth);
const cube = new THREE.Mesh(geometry, material);
scene.add(cube);
cube.position.x = x;
return cube;
}
const cubes = [
makeInstance(0x8844aa, -2, 3, 1, 1),
makeInstance(0xaa8844, 0.5, 2, 1, 1),
];
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render(time) {
time *= 0.001;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
// cubes.forEach((cube, ndx) => {
//const speed = 1 + ndx * .1;
//const rot = time * speed;
//cube.rotation.x = rot;
//cube.rotation.y = rot;
//});
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
You have an Object3d (Points, Lines, Meshes, etc.) that you want to rotate via quaternions. You have a mesh (the cube). The immediate answer is to:
cube.applyQuaternion(myquat);
And where does myquat come from? Probably from one of these:
myquat = new THREE.Quaternion(); // now, Probably from one of these:
myquat.setFromAxisAngle ( axis : Vector3, angle : Float )
myquat.setFromEuler ( euler : Euler )
myquat.setFromRotationMatrix ( m : Matrix4 )
myquat.setFromUnitVectors ( vFrom : Vector3, vTo : Vector3 )
I hope this gives you a start, even to ask a more specific question.

How to properly blend colors across two triangles and remove diagonal smear

I am learning WebGL and I've drawn a full screen quad with colors for each vertex. No lighting or normals or perspective matrix or depth buffer; I'm just drawing a gradient background. This is what I get:
It looks good but I cannot help noticing the diagonal smear from the bottom right to the top left. I feel this is an artifact of linear interpolating the far opposite vertices. I'm drawing two triangles: the bottom left, and the top right. I think I would get similar results using OpenGL instead of WebGL.
Given the same four colors and the same size rectangle, is there a way to render this so the edge between the two triangles isn't so apparent? Maybe more vertices, or a different blending function? I'm not sure exactly what the colors should be at each pixel; I just want to know how to get rid of the diagonal smear.
The issue is the top right triangle has no knowledge of the bottom left corner so the top right triangle is not including any of the blue from the bottom left (and visa versa)
A couple of ways to fix that.
One is to use a 2x2 texture with linear sampling. You have to do some extra math to get the interpolation correct because a texture only interpolates between pixels
+-------+-------+
| | |
| +-------+ |
| | | | |
+---|---+---|---+
| | | | |
| +-------+ |
| | |
+-------+-------+
Above is a 4 pixel texture stretched to 14 by 6. Sampling happens between pixels so only this center area will get the gradient. Outside that area would be sampled with pixels outside the texture so using CLAMP_TO_EDGE or on the opposite side of the texture using REPEAT.
const gl = document.querySelector('canvas').getContext('webgl');
const tl = [254, 217, 138];
const tr = [252, 252, 252];
const bl = [18, 139, 184];
const br = [203, 79, 121];
const tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.pixelStorei(gl.UNPACK_ALIGNMENT, 1);
gl.texImage2D(
gl.TEXTURE_2D,
0, // mip level
gl.RGB, // internal format
2, // width,
2, // height,
0, // border
gl.RGB, // format
gl.UNSIGNED_BYTE, // type
new Uint8Array([...bl, ...br, ...tl, ...tr]));
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
const vs = `
attribute vec4 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
v_texcoord = texcoord;
}
`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
const vec2 texSize = vec2(2, 2); // could pass this in
uniform sampler2D tex;
void main() {
gl_FragColor = texture2D(tex,
(v_texcoord * (texSize - 1.0) + 0.5) / texSize);
}
`;
const program = twgl.createProgram(gl, [vs, fs]);
const positionLoc = gl.getAttribLocation(program, 'position');
const texcoordLoc = gl.getAttribLocation(program, 'texcoord');
function createBufferAndSetupAttribute(loc, data) {
const buffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(data), gl.STATIC_DRAW);
gl.enableVertexAttribArray(loc);
gl.vertexAttribPointer(
loc,
2, // 2 elements per iteration
gl.FLOAT, // type of data in buffer
false, // normalize
0, // stride
0, // offset
);
}
createBufferAndSetupAttribute(positionLoc, [
-1, -1,
1, -1,
-1, 1,
-1, 1,
1, -1,
1, 1,
]);
createBufferAndSetupAttribute(texcoordLoc, [
0, 0,
1, 0,
0, 1,
0, 1,
1, 0,
1, 1,
]);
gl.useProgram(program);
// note: no need to set sampler uniform as it defaults
// to 0 which is what we'd set it to anyway.
gl.drawArrays(gl.TRIANGLES, 0, 6);
canvas { border: 1px solid black; }
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
Note: to see what I mean about the extra math needed for the texture coordinates here is the same example without the extra math
const gl = document.querySelector('canvas').getContext('webgl');
const tl = [254, 217, 138];
const tr = [252, 252, 252];
const bl = [18, 139, 184];
const br = [203, 79, 121];
const tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.pixelStorei(gl.UNPACK_ALIGNMENT, 1);
gl.texImage2D(
gl.TEXTURE_2D,
0, // mip level
gl.RGB, // internal format
2, // width,
2, // height,
0, // border
gl.RGB, // format
gl.UNSIGNED_BYTE, // type
new Uint8Array([...bl, ...br, ...tl, ...tr]));
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
const vs = `
attribute vec4 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
v_texcoord = texcoord;
}
`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D tex;
void main() {
gl_FragColor = texture2D(tex, v_texcoord);
}
`;
const program = twgl.createProgram(gl, [vs, fs]);
const positionLoc = gl.getAttribLocation(program, 'position');
const texcoordLoc = gl.getAttribLocation(program, 'texcoord');
function createBufferAndSetupAttribute(loc, data) {
const buffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(data), gl.STATIC_DRAW);
gl.enableVertexAttribArray(loc);
gl.vertexAttribPointer(
loc,
2, // 2 elements per iteration
gl.FLOAT, // type of data in buffer
false, // normalize
0, // stride
0, // offset
);
}
createBufferAndSetupAttribute(positionLoc, [
-1, -1,
1, -1,
-1, 1,
-1, 1,
1, -1,
1, 1,
]);
createBufferAndSetupAttribute(texcoordLoc, [
0, 0,
1, 0,
0, 1,
0, 1,
1, 0,
1, 1,
]);
gl.useProgram(program);
// note: no need to set sampler uniform as it defaults
// to 0 which is what we'd set it to anyway.
gl.drawArrays(gl.TRIANGLES, 0, 6);
canvas { border: 1px solid black; }
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
Also of course, rather than do the math in the fragment shader we could fix the texture coordinates in JavaScript
const gl = document.querySelector('canvas').getContext('webgl');
const tl = [254, 217, 138];
const tr = [252, 252, 252];
const bl = [18, 139, 184];
const br = [203, 79, 121];
const tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.pixelStorei(gl.UNPACK_ALIGNMENT, 1);
gl.texImage2D(
gl.TEXTURE_2D,
0, // mip level
gl.RGB, // internal format
2, // width,
2, // height,
0, // border
gl.RGB, // format
gl.UNSIGNED_BYTE, // type
new Uint8Array([...bl, ...br, ...tl, ...tr]));
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
const vs = `
attribute vec4 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
v_texcoord = texcoord;
}
`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D tex;
void main() {
gl_FragColor = texture2D(tex, v_texcoord);
}
`;
const program = twgl.createProgram(gl, [vs, fs]);
const positionLoc = gl.getAttribLocation(program, 'position');
const texcoordLoc = gl.getAttribLocation(program, 'texcoord');
function createBufferAndSetupAttribute(loc, data) {
const buffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(data), gl.STATIC_DRAW);
gl.enableVertexAttribArray(loc);
gl.vertexAttribPointer(
loc,
2, // 2 elements per iteration
gl.FLOAT, // type of data in buffer
false, // normalize
0, // stride
0, // offset
);
}
createBufferAndSetupAttribute(positionLoc, [
-1, -1,
1, -1,
-1, 1,
-1, 1,
1, -1,
1, 1,
]);
createBufferAndSetupAttribute(texcoordLoc, [
0.25, 0.25,
0.75, 0.25,
0.25, 0.75,
0.25, 0.75,
0.75, 0.25,
0.75, 0.75,
]);
gl.useProgram(program);
// note: no need to set sampler uniform as it defaults
// to 0 which is what we'd set it to anyway.
gl.drawArrays(gl.TRIANGLES, 0, 6);
canvas { border: 1px solid black; }
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
Another way is to do the interpolation yourself based on those corners (which is effectively doing what the texture sampler is doing in the previous example, bi-linear interpolation of the 4 colors).
const gl = document.querySelector('canvas').getContext('webgl');
const tl = [254/255, 217/255, 138/255];
const tr = [252/255, 252/255, 252/255];
const bl = [ 18/255, 139/255, 184/255];
const br = [203/255, 79/255, 121/255];
const vs = `
attribute vec4 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
v_texcoord = texcoord;
}
`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
uniform vec3 tl;
uniform vec3 tr;
uniform vec3 bl;
uniform vec3 br;
void main() {
vec3 l = mix(bl, tl, v_texcoord.t);
vec3 r = mix(br, tr, v_texcoord.t);
vec3 c = mix(l, r, v_texcoord.s);
gl_FragColor = vec4(c, 1);
}
`;
const program = twgl.createProgram(gl, [vs, fs]);
const positionLoc = gl.getAttribLocation(program, 'position');
const texcoordLoc = gl.getAttribLocation(program, 'texcoord');
const tlLoc = gl.getUniformLocation(program, 'tl');
const trLoc = gl.getUniformLocation(program, 'tr');
const blLoc = gl.getUniformLocation(program, 'bl');
const brLoc = gl.getUniformLocation(program, 'br');
function createBufferAndSetupAttribute(loc, data) {
const buffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(data), gl.STATIC_DRAW);
gl.enableVertexAttribArray(loc);
gl.vertexAttribPointer(
loc,
2, // 2 elements per iteration
gl.FLOAT, // type of data in buffer
false, // normalize
0, // stride
0, // offset
);
}
createBufferAndSetupAttribute(positionLoc, [
-1, -1,
1, -1,
-1, 1,
-1, 1,
1, -1,
1, 1,
]);
createBufferAndSetupAttribute(texcoordLoc, [
0, 0,
1, 0,
0, 1,
0, 1,
1, 0,
1, 1,
]);
gl.useProgram(program);
gl.uniform3fv(tlLoc, tl);
gl.uniform3fv(trLoc, tr);
gl.uniform3fv(blLoc, bl);
gl.uniform3fv(brLoc, br);
gl.drawArrays(gl.TRIANGLES, 0, 6);
canvas { border: 1px solid black; }
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
Number of dimensions
You should be passing the 2D coordinate space of the quad to the fragment shader rather than the one dimensional (per channel) color space.
Then in the fragment shader you can do the color interpolation in 2D space removing the color artifact due to the diagonal line interpolating in 1D.
The shader snippet for linear color interpolation, where coord2D is the 2D coordinate space
pixel = vec4(vec3(mix(
mix(colors[0], colors[1], coord2D.x),
mix(colors[2], colors[3], coord2D.x),
coord2D.y
)), 1);
Improved color interpolation
When interpolating colors by their RGB values the results can visually darken between opposing hues.
An easy fix is to use a closer approximation of the sRGB color model by interpolating between the squares of the color channel values. The final output is the square root of the interpolated values.
The interpolation snippet.
pixel = vec4(sqrt(vec3(mix(
mix(colors[0], colors[1], coord2D.x),
mix(colors[2], colors[3], coord2D.x),
coord2D.y
))) / 255.0, 1);
Note that the color channel values in the uniform colors are in logarithmic space. [R^2, G^2, B^2] and thus range from 0 to 65025.
Example
In the example click the canvas to switch between interpolation methods.
You will note that when using approx ~sRGB that the brightness in the center of the canvas out towards the center edges does not dip as much below the perceivable brightness at the corners.
Also note that the balance of the transition from the bottom blue and redish to the top orange and white moves down closer the the center. This is because interpolating the RGB model will darken colors that have strong components from 2 or more channels, Reds, greens, blues and blacks will dominate over yellows, cyans, magentas and whites making the interpolation seem to shift and stretch out the RGB primaries.
var program, colorsLoc, modelLoc, loc, text = " interpolation. Click for ", model = "RGB"; // or sRGB
const vertSrc = `#version 300 es
in vec2 verts;
out vec2 coord2D;
void main() {
coord2D = verts * 0.5 + 0.5; // convert to quad space 0,0 <=> 1, 1
gl_Position = vec4(verts, 1, 1);
}`;
const fragSrc = `#version 300 es
#define channelMax 255.0
// color location indexes
#define TR 3
#define TL 2
#define BR 1
#define BL 0
precision mediump float;
uniform vec3 colors[4];
uniform bool isRGB;
in vec2 coord2D;
out vec4 pixel;
void main() {
if (isRGB) {
pixel = vec4(vec3(mix(
mix(colors[BL], colors[BR], coord2D.x),
mix(colors[TL], colors[TR], coord2D.x),
coord2D.y
)) / channelMax, 1);
} else {
pixel = vec4(vec3(sqrt(mix(
mix(colors[BL], colors[BR], coord2D.x),
mix(colors[TL], colors[TR], coord2D.x),
coord2D.y
))) / channelMax, 1);
}
}`;
const fArr = arr => new Float32Array(arr);
const colors = [64,140,190, 224,81,141, 247,223,140, 245,245,245];
const gl = canvas.getContext("webgl2", {premultipliedAlpha: false, antialias: false, alpha: false});
addEventListener("resize", draw);
addEventListener("click", draw);
setup();
draw();
function compileShader(src, type, shader = gl.createShader(type)) {
gl.shaderSource(shader, src);
gl.compileShader(shader);
return shader;
}
function setup() {
program = gl.createProgram();
gl.attachShader(program, compileShader(vertSrc, gl.VERTEX_SHADER));
gl.attachShader(program, compileShader(fragSrc, gl.FRAGMENT_SHADER));
gl.linkProgram(program);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, gl.createBuffer());
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, new Uint8Array([0,1,2,0,2,3]), gl.STATIC_DRAW);
gl.bindBuffer(gl.ARRAY_BUFFER, gl.createBuffer());
gl.bufferData(gl.ARRAY_BUFFER, fArr([-1,-1,1,-1,1,1,-1,1]), gl.STATIC_DRAW);
gl.enableVertexAttribArray(loc = gl.getAttribLocation(program, "verts"));
gl.vertexAttribPointer(loc, 2, gl.FLOAT, false, 0, 0);
colorsLoc = gl.getUniformLocation(program, "colors");
modelLoc = gl.getUniformLocation(program, "isRGB");
gl.useProgram(program);
}
function draw() {
[info.textContent, model] = model != "RGB"? [`RGB${text}~sRGB.`, "RGB"]: [`~sRGB${text}RGB.`, "~sRGB"];
if (canvas.width !== innerWidth || canvas.height !== innerHeight) {
[canvas.width, canvas.height] = [innerWidth, innerHeight];
gl.viewport(0, 0, canvas.width, canvas.height);
}
gl.uniform3fv(colorsLoc, fArr(colors.map(v => model=="RGB"? v: v*v)), 0, 12);
gl.uniform1i(modelLoc, model=="RGB");
gl.drawElements(gl.TRIANGLES, 6, gl.UNSIGNED_BYTE, 0);
}
body {
padding: 0px;
margin: 0px;
font-family: arial;
color: white;
}
canvas {
position: absolute;
top: 0px;
left: 0px;
}
h2 {
position: absolute;
bottom: 0px;
left: 0px;
right: 0px;
text-align: center;
}
<canvas id="canvas"></canvas>
<h2 id="info"></h2>

WebGL creating multiple objects?

So I am trying to create circles using the midpoint algorithm. I'm having trouble on how to handle buffers and basically get WebGL properly set up. Using the console I can see that the algorithm is working fine and making the vertex arrray, but I need help understanding what to do with the use.Program, createBuffers, drawArrays. Where should I place them?
Also, should I concat the circle everytime I call it in the START() function?
like: circle(blah blah).concat(circle(blah blah));
var vertexShaderText =
[
'precision mediump float;',
'',
'attribute vec2 vertPosition;',
'attribute vec3 vertColor;',
'varying vec3 fragColor;',
'',
'void main()',
'{',
' fragColor = vertColor;',
' gl_Position = vec4(vertPosition, 0.0, 1.0);',
'}'
].join('\n');
var fragmentShaderText =
[
'precision mediump float;',
'',
'varying vec3 fragColor;',
'void main()',
'{',
' gl_FragColor = vec4(fragColor, 1.0);',
'}'
].join('\n');
var START = function () {
console.log('This is working');
var canvas = document.getElementById('sky');
var gl = canvas.getContext('webgl');
if (!gl) {
console.log('WebGL not supported, falling back on experimental-webgl');
gl = canvas.getContext('experimental-webgl');
}
if (!gl) {
alert('Your browser does not support WebGL');
}
gl.clearColor(.3, .3, .7, 1.0);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
// Create shaders
var vertexShader = gl.createShader(gl.VERTEX_SHADER);
var fragmentShader =
gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(vertexShader, vertexShaderText);
gl.shaderSource(fragmentShader, fragmentShaderText);
//create a program for the shaders
var program = gl.createProgram();
gl.attachShader(program, vertexShader);
gl.attachShader(program, fragmentShader);
gl.linkProgram(program);
gl.useProgram(program);
var circle = function (xmid, ymid, r) {
var points = [];
var x = 0;
var y = r;
var pk = 5/4 - r;
while (x < y)
{
if (pk < 0)
{
x++;
pk += 2*x + 1;
}
else
{
x++;
y--;
pk += 2 * (x-y) + 1;
}
points.push(x+xmid, y+ymid);
points.push(x+xmid, -y+ymid);
points.push(-x+xmid, y+ymid);
points.push(-x+xmid, -y+ymid);
points.push(y+xmid, x+ymid);
points.push(y+xmid, -x+ymid);
points.push(-y+xmid, x+ymid);
points.push(-y+xmid, -x+ymid);
}
var cbuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, cbuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(points),
gl.STATIC_DRAW);
gl.drawArrays(gl.POINTS, 0, points.length/2);
var positionAttribLocation = gl.getAttribLocation(program,
'vertPosition');
var colorAttribLocation = gl.getAttribLocation(program,
'vertColor');
gl.vertexAttribPointer(
positionAttribLocation, // Attribute location
2, // Number of elements per attribute
gl.FLOAT, // Type of elements
gl.FALSE,
5 * Float32Array.BYTES_PER_ELEMENT, // Size of an individual vertex
0 // Offset from the beginning of a single vertex to this attribute
);
gl.enableVertexAttribArray(positionAttribLocation);
gl.enableVertexAttribArray(colorAttribLocation);
return points;
}
circle(0.6, 0.6, 0.18);
circle(0.9, 0.6, 0.18);
circle(0.5, 0.4, 0.18);
circle(1.0, 0.4, 0.18);
circle(0.75, 0.4, 0.18);
circle(0.75, 0.4, 0.18);
}
START();
<canvas id="sky"></canvas>
This is what my console log is saying:
6WebGL: INVALID_OPERATION: useProgram: program not
valid
6WebGL: INVALID_OPERATION: drawArrays: no valid shader
program in use
12WebGL: INVALID_OPERATION: getAttribLocation: program
not linked
You can clearly see that I am linking and using the program at the very beginning. So what gives?
There's more than one issue with the code
The shaders are not compiled
After setting the shader source with gl.shaderSource you need
to compile them with gl.compileShader. You should also
be checking for errors by calling gl.getShaderParameter(shader, gl.COMPILE_STATUS)
and you should be checking for errors after linking by calling
gl.getProgramParameter(program, gl.LINK_STATUS)
gl.drawArrays is called before setting the attributes
The code is enabling 2 attributes but only supplying data for 1 attribute.
The code is drawing gl.POINTS but the vertex shader is not setting gl_PointSize
I also don't really understand your circle code but since I don't know what it's really trying to do I can't fix it.
And finally you should probably read some tutorials on WebGL
I'd also suggest you use multiline template literals for your shaders
const vertexShaderText = `
precision mediump float;
attribute vec2 vertPosition;
attribute vec3 vertColor;
varying vec3 fragColor;
void main()
{
fragColor = vertColor;
gl_Position = vec4(vertPosition, 0.0, 1.0);
gl_PointSize = 5.;
}
`;
const fragmentShaderText = `
precision mediump float;
varying vec3 fragColor;
void main()
{
gl_FragColor = vec4(fragColor, 1.0);
}
`;
const start = function () {
console.log('This is working');
const canvas = document.getElementById('sky');
const gl = canvas.getContext('webgl');
if (!gl) {
alert('Your browser does not support WebGL');
return;
}
gl.clearColor(.3, .3, .7, 1.0);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
//create a shader program
const program = createProgram(gl, vertexShaderText, fragmentShaderText);
gl.useProgram(program);
const circle = function (xmid, ymid, r) {
const points = [];
let x = 0;
let y = r;
let pk = 5/4 - r;
while (x < y)
{
if (pk < 0)
{
x++;
pk += 2*x + 1;
}
else
{
x++;
y--;
pk += 2 * (x-y) + 1;
}
points.push(x+xmid, y+ymid);
points.push(x+xmid, -y+ymid);
points.push(-x+xmid, y+ymid);
points.push(-x+xmid, -y+ymid);
points.push(y+xmid, x+ymid);
points.push(y+xmid, -x+ymid);
points.push(-y+xmid, x+ymid);
points.push(-y+xmid, -x+ymid);
}
const cbuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, cbuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(points), gl.STATIC_DRAW);
const positionAttribLocation = gl.getAttribLocation(program, 'vertPosition');
const colorAttribLocation = gl.getAttribLocation(program, 'vertColor');
gl.vertexAttribPointer(
positionAttribLocation, // Attribute location
2, // Number of elements per attribute
gl.FLOAT, // Type of elements
gl.FALSE,
0, // Size of an individual vertex
0 // Offset from the beginning of a single vertex to this attribute
);
gl.enableVertexAttribArray(positionAttribLocation);
// you probably meant to supply colors for this attribute
// since if you wanted a constant color you'd have probably
// used a uniform but since you didn't we'll set a constant
// color
gl.vertexAttrib4f(colorAttribLocation, 1, 0, 0, 1);
gl.drawArrays(gl.POINTS, 0, points.length/2);
return points;
}
circle(0.6, 0.6, 0.18);
circle(0.9, 0.6, 0.18);
circle(0.5, 0.4, 0.18);
circle(1.0, 0.4, 0.18);
circle(0.75, 0.4, 0.18);
circle(0.75, 0.4, 0.18);
}
function createProgram(gl, vertexShaderText, fragmentShaderText) {
// Create shaders
const vertexShader = createShader(gl, gl.VERTEX_SHADER, vertexShaderText);
const fragmentShader = createShader(gl, gl.FRAGMENT_SHADER, fragmentShaderText);
const program = gl.createProgram();
gl.attachShader(program, vertexShader);
gl.attachShader(program, fragmentShader);
gl.linkProgram(program);
if (!gl.getProgramParameter(program, gl.LINK_STATUS)) {
console.error(gl.getProgramInfoLog(program));
gl.deleteProgram(program);
return null;
}
return program;
}
function createShader(gl, type, source) {
const shader = gl.createShader(type);
gl.shaderSource(shader, source);
gl.compileShader(shader);
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
console.error(gl.getShaderInfoLog(shader));
gl.deleteShader(shader);
return null;
}
return shader;
}
start();
<canvas id="sky"></canvas>

WebGL draw perspective view volume

I'm trying to calculate the 8 (4+4) vertices of a view volume's plane : near and far.
I need this vertices to draw, in webGL, the view volume of a camera.
So far I managed to calculate them by using trigonometry from each perspective but somehow the result does not seem accurate when I draw the vertices.
I reached this equations for vertices so far:
y = sqrt(hypotenuse^2 - plane^2)
x = sqrt(hypotenuse^2 - plane^2)
z = plane (near or far)
Can anyone help? Thank you in advance.
you can just project a standard cube through an inverse projection matrix.
const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl");
const vs = `
attribute vec4 position;
uniform mat4 u_worldViewProjection;
void main() {
gl_Position = u_worldViewProjection * position;
}
`;
const fs = `
precision mediump float;
void main() {
gl_FragColor = vec4(1, 0, 0, 1);
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const arrays = {
position: [
-1, 1, -1,
1, 1, -1,
1, -1, -1,
-1, -1, -1,
-1, 1, 1,
1, 1, 1,
1, -1, 1,
-1, -1, 1,
],
indices: [
0, 1, 1, 2, 2, 3, 3, 0,
4, 5, 5, 6, 6, 7, 7, 4,
0, 4, 1, 5, 2, 6, 3, 7,
],
};
const bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
let projectionToViewWith;
{
const fov = 30 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 0.5;
const zFar = 100;
projectionToViewWith = m4.perspective(fov, aspect, zNear, zFar);
}
let projectionToBeViewed;
{
const fov = 30 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 2;
const zFar = 10;
projectionToBeViewed = m4.perspective(fov, aspect, zNear, zFar);
}
const inverseProjectionToBeViewed = m4.inverse(projectionToBeViewed);
const radius = 20;
const eye = [Math.sin(time) * radius, 4, Math.cos(time) * radius];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const viewProjection = m4.multiply(projectionToViewWith, view);
const worldViewProjection = m4.multiply(
viewProjection,
inverseProjectionToBeViewed);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_worldViewProjection: worldViewProjection,
});
twgl.drawBufferInfo(gl, bufferInfo, gl.LINES);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
To get the points of the 8 corners you just have to do a reverse projection. The projection matrix takes the space of the frustum that is fovy tall, fovy * aspect wide, starting at -zNear and ending at -zFar and converting that space to -1 <-> +1 box after the perspective divide.
To go backward and compute the points of that box we just project a -1 to +1 box through the inverse projection matrix and do the perpective divide again (which is exactly what's happening in the example above, we're just doing it all in the GPU)
So, we pull it out of the GPU and do it in JavaScript
[
[-1, 1, -1],
[ 1, 1, -1],
[ 1, -1, -1],
[-1, -1, -1],
[-1, 1, 1],
[ 1, 1, 1],
[ 1, -1, 1],
[-1, -1, 1],
].forEach((point) => {
console.log(m4.transformPoint(inverseProjectionMatrix, point));
});
Here's an example.
const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl");
const vs = `
attribute vec4 position;
uniform mat4 u_worldViewProjection;
void main() {
gl_Position = u_worldViewProjection * position;
gl_PointSize = 10.;
}
`;
const fs = `
precision mediump float;
uniform vec4 u_color;
void main() {
gl_FragColor = u_color;
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const positions = [
-1, 1, -1,
1, 1, -1,
1, -1, -1,
-1, -1, -1,
-1, 1, 1,
1, 1, 1,
1, -1, 1,
-1, -1, 1,
];
const arrays = {
position: positions,
indices: [
0, 1, 1, 2, 2, 3, 3, 0,
4, 5, 5, 6, 6, 7, 7, 4,
0, 4, 1, 5, 2, 6, 3, 7,
],
};
const bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
let projectionToViewWith;
{
const fov = 30 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 0.5;
const zFar = 100;
projectionToViewWith = m4.perspective(fov, aspect, zNear, zFar);
}
let projectionToBeViewed;
{
const fov = 30 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 2;
const zFar = 10;
projectionToBeViewed = m4.perspective(fov, aspect, zNear, zFar);
}
const inverseProjectionToBeViewed = m4.inverse(projectionToBeViewed);
const radius = 20;
const eye = [Math.sin(time) * radius, 4, Math.cos(time) * radius];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const viewProjection = m4.multiply(projectionToViewWith, view);
const worldViewProjection = m4.multiply(
viewProjection,
inverseProjectionToBeViewed);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_worldViewProjection: worldViewProjection,
u_color: [1, 0, 0, 1],
});
twgl.drawBufferInfo(gl, bufferInfo, gl.LINES);
// just because I'm lazy let's draw each point one at a time
// note: since in our case the frustum is not moving we
// could have computed these at init time.
const positionLoc = programInfo.attribSetters.position.location;
gl.disableVertexAttribArray(positionLoc);
for (let i = 0; i < positions.length; i += 3) {
const point = positions.slice(i, i + 3);
const worldPosition = m4.transformPoint(
inverseProjectionToBeViewed, point);
gl.vertexAttrib3f(positionLoc, ...worldPosition);
twgl.setUniforms(programInfo, {
u_color: [0, 1, 0, 1],
u_worldViewProjection: viewProjection,
});
gl.drawArrays(gl.POINT, 0, 1);
}
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
Mention in a comment you wanted to show the view frustum of a camera in one canvas in another canvas.
That's effectively what's happening above except the camera in canvasWhosFrustumWeWantToRender is not moving. Instead it's just sitting at the origin looking down the -Z axis with +Y up. To allow the frustum to move to show where it is relative to teh camera just need to add in the camera matrix
const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl");
const ext = gl.getExtension("OES_standard_derivatives");
const vs = `
attribute vec4 position;
uniform mat4 u_worldViewProjection;
varying vec3 v_position;
void main() {
gl_Position = u_worldViewProjection * position;
v_position = position.xyz; // for fake lighting
}
`;
const fs = `
#extension GL_OES_standard_derivatives : enable
precision mediump float;
varying vec3 v_position;
uniform vec4 u_color;
void main() {
vec3 fdx = dFdx(v_position);
vec3 fdy = dFdy(v_position);
vec3 n = normalize(cross(fdx,fdy));
float l = dot(n, normalize(vec3(1,2,-3))) * .5 + .5;
gl_FragColor = u_color;
gl_FragColor.rgb *= l;
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const arrays = {
position: [
-1, 1, -1,
1, 1, -1,
1, -1, -1,
-1, -1, -1,
-1, 1, 1,
1, 1, 1,
1, -1, 1,
-1, -1, 1,
],
indices: [
0, 1, 1, 2, 2, 3, 3, 0,
4, 5, 5, 6, 6, 7, 7, 4,
0, 4, 1, 5, 2, 6, 3, 7,
],
};
const concat = twgl.primitives.concatVertices;
const reorient = twgl.primitives.reorientVertices;
const wireCubeBufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
const solidCubeBufferInfo = twgl.primitives.createCubeBufferInfo(gl, 2);
const cameraBufferInfo = twgl.createBufferInfoFromArrays(gl,
concat([
reorient(twgl.primitives.createCubeVertices(2),
m4.translation([0, 0, 1])),
reorient(twgl.primitives.createTruncatedConeVertices(0, 1, 2, 12, 1),
m4.rotationX(Math.PI * -.5)),
])
);
const black = [0, 0, 0, 1];
const blue = [0, 0, 1, 1];
function drawScene(viewProjection, clearColor) {
gl.clearColor(...clearColor);
gl.clear(gl.COLOR_BUFFER_BIT);
const numCubes = 10;
for (let i = 0; i < numCubes; ++i) {
const u = i / numCubes;
let mat = m4.rotationY(u * Math.PI * 2);
mat = m4.translate(mat, [0, 0, 10]);
mat = m4.scale(mat, [1, 1 + u * 23 % 1, 1]);
mat = m4.translate(mat, [0, .5, 0]);
mat = m4.multiply(viewProjection, mat);
drawModel(solidCubeBufferInfo, mat, [u, u * 3 % 1, u * 7 % 1,1]);
}
}
function drawModel(bufferInfo, worldViewProjection, color, mode) {
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_worldViewProjection: worldViewProjection,
u_color: color,
});
twgl.drawBufferInfo(gl, bufferInfo, mode);
}
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
const width = gl.canvas.width;
const height = gl.canvas.height;
const halfWidth = width / 2;
gl.viewport(0, 0, width, height);
gl.disable(gl.SCISSOR_TEST);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
gl.enable(gl.DEPTH_TEST);
let projectionToViewWith; // the projection on the right
{
const fov = 60 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / 2 / gl.canvas.clientHeight;
const zNear = 0.5;
const zFar = 100;
projectionToViewWith = m4.perspective(fov, aspect, zNear, zFar);
}
let projectionToBeViewed; // the projeciton on the left
{
const fov = 60 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / 2 / gl.canvas.clientHeight;
const zNear = 1.5;
const zFar = 15;
projectionToBeViewed = m4.perspective(fov, aspect, zNear, zFar);
}
const inverseProjectionToBeViewed = m4.inverse(projectionToBeViewed);
let cameraViewingScene; // camera for right view
{
const t1 = 0;
const radius = 30;
const eye = [Math.sin(t1) * radius, 4, Math.cos(t1) * radius];
const target = [0, 0, 0];
const up = [0, 1, 0];
cameraViewingScene = m4.lookAt(eye, target, up);
}
let cameraInScene; // camera for left view
{
const t1 = time;
const t2 = time + .4;
const r1 = 10 + Math.sin(t1);
const r2 = 10 + Math.sin(t2) * 2;
const eye = [Math.sin(t1) * r1, 0 + Math.sin(t1) * 4, Math.cos(t1) * r1];
const target = [Math.sin(t2) * r2, 1 + Math.sin(t2), Math.cos(t2) * r2];
const up = [0, 1, 0];
cameraInScene = m4.lookAt(eye, target, up);
}
// there's only one shader program so just set it once
gl.useProgram(programInfo.program);
// draw only on left half of canvas
gl.enable(gl.SCISSOR_TEST);
gl.scissor(0, 0, halfWidth, height);
gl.viewport(0, 0, halfWidth, height);
// draw the scene on the left using the camera inside the scene
{
const view = m4.inverse(cameraInScene);
const viewProjection = m4.multiply(projectionToBeViewed, view);
drawScene(viewProjection, [.9, 1, .9, 1]);
}
// draw only on right half of canvas
gl.scissor(halfWidth, 0, halfWidth, height);
gl.viewport(halfWidth, 0, halfWidth, height);
// draw the same scene on the right using the camera outside the scene
{
const view = m4.inverse(cameraViewingScene);
const viewProjection = m4.multiply(projectionToViewWith, view);
drawScene(viewProjection, [.9, 1, 1, 1]);
// draw the in scene camera's frustum
{
const world = m4.multiply(cameraInScene, inverseProjectionToBeViewed);
const worldViewProjection = m4.multiply(viewProjection, world);
drawModel(wireCubeBufferInfo, worldViewProjection, black, gl.LINES);
}
// draw the in scene camera's camera model
{
const worldViewProjection = m4.multiply(viewProjection, cameraInScene);
drawModel(cameraBufferInfo, worldViewProjection, blue);
}
}
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>

Implementing a gradient shader in three.js

I am trying to learn about shaders using three.js. What I am trying to do is create a shader that generates gradients to texture planets with. Right now I am just trying to generate one gradient to make sure it works. However, when I apply the shader it only renders one of the colors, and does not create the gradient effect I'm looking for. I can't seem to find where I'm going wrong with my code.
I'm using the Book of Shaders as the basis for my code. Specifically, I was looking at this example, trying to replicate the background color.
Here is my shader code:
<section id="fragmentshader">
#ifdef GL_ES
precision mediump float;
#endif
// #define PI 3.14159265359
uniform vec2 u_resolution;
// uniform vec2 u_mouse;
// uniform float u_time;
vec3 colorA = vec3(0.500,0.141,0.912);
vec3 colorB = vec3(1.000,0.833,0.224);
void main() {
vec2 st = gl_FragCoord.xy/u_resolution.xy;
vec3 color = vec3(0.0);
color = mix( colorA,
colorB,
st.y);
gl_FragColor = vec4(color,1.0);
}
</section>
<section id="vertexshader">
void main() {
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
</section>
and my three.js code inside an a-frame component:
var uniforms = {
u_resolution: { type: "v2", value: new THREE.Vector2() },
};
var fShader = $('#fragmentshader');
var vShader = $('#vertexshader');
var geometry = new THREE.SphereGeometry(getRandomInt(100, 250), 20, 20);
// var material = new THREE.MeshBasicMaterial( {wireframe: true });
var material = new THREE.ShaderMaterial({
uniforms: uniforms,
vertexShader: vShader.text(),
fragmentShader: fShader.text()
});
var sphere = new THREE.Mesh(geometry, material);
This is what my spheres currently look like
var camera, scene, renderer, mesh, material;
init();
animate();
function init() {
// Renderer.
renderer = new THREE.WebGLRenderer();
//renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(window.innerWidth, window.innerHeight);
// Add renderer to page
document.body.appendChild(renderer.domElement);
// Create camera.
camera = new THREE.PerspectiveCamera(70, window.innerWidth / window.innerHeight, 1, 1000);
camera.position.z = 400;
// Create scene.
scene = new THREE.Scene();
var uniforms = {
"color1" : {
type : "c",
value : new THREE.Color(0xffffff)
},
"color2" : {
type : "c",
value : new THREE.Color(0x000000)
},
};
var fShader = document.getElementById('fragmentShader').text;
var vShader = document.getElementById('vertexShader').text;
// Create material
var material = new THREE.ShaderMaterial({
uniforms: uniforms,
vertexShader: vShader,
fragmentShader: fShader
});
// Create cube and add to scene.
var geometry = new THREE.BoxGeometry(200, 200, 200);
mesh = new THREE.Mesh(geometry, material);
scene.add(mesh);
// Create ambient light and add to scene.
var light = new THREE.AmbientLight(0x404040); // soft white light
scene.add(light);
// Create directional light and add to scene.
var directionalLight = new THREE.DirectionalLight(0xffffff);
directionalLight.position.set(1, 1, 1).normalize();
scene.add(directionalLight);
// Add listener for window resize.
window.addEventListener('resize', onWindowResize, false);
}
function animate() {
requestAnimationFrame(animate);
mesh.rotation.x += 0.005;
mesh.rotation.y += 0.01;
renderer.render(scene, camera);
}
function onWindowResize() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
}
<script src="https://rawgit.com/mrdoob/three.js/r86/build/three.min.js"></script>
<script id="vertexShader" type="x-shader/x-vertex">
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4(position,1.0);
}
</script>
<script id="fragmentShader" type="x-shader/x-fragment">
uniform vec3 color1;
uniform vec3 color2;
varying vec2 vUv;
void main() {
gl_FragColor = vec4(mix(color1, color2, vUv.y),1.0);
}
</script>

Resources