Per fragment lighting on heightmap and generating normals - graphics

I am trying to implement per framgent lighting on a heightmap. I am uploading the height map to the shader as a texture and adjusting vertex heights according to respective pixels. To generate the normals I take values of four neighbouring pixels, make vectors of them and compute the cross product like so:
vec3 offset = vec3(-1.0/mapSize, 0, 1.0/mapSize);
float s1 = texture2D(sampler, texCoord + offset.xy).x;
float s2 = texture2D(sampler, texCoord + offset.zy).x;
float s3 = texture2D(sampler, texCoord + offset.yx).x;
float s4 = texture2D(sampler, texCoord + offset.yz).x;
vec3 va = normalize(vec3(1.0, 0.0, s2 - s1));
vec3 vb = normalize(vec3(0.0, 1.0, s3 - s4));
vec3 n = normalize(cross(va, vb));
and heres my lighting function
vec4 directional(Light light){
vec4 ret = vec4(0.0);
vec3 lPos = (V * vec4(light.position, 0.0)).xyz;
vec3 normal = normalize(vNormal);
vec3 lightDir = normalize(lPos);
vec3 reflectDir = reflect(-lightDir, normal);
vec3 viewDir = normalize(-vPosition);
float lambertTerm = max(dot(lightDir, normal), 0.0);
float specular = 0.0;
if(lambertTerm > 0.0){
float specAngle = max(dot(reflectDir, viewDir), 0.0);
specular = pow(specAngle, material.shininess);
}
ret = vec4(light.ambient * material.ambient + light.diffuse * material.diffuse * lambertTerm + light.specular * material.specular * specular, 1.0);
return ret;
}
This kinda works. Only the y and z axis seem to by flipped ie. if I move the light along the y axis it looks like its moving along the z axis and vice versa.
I should also point out that the function works perfectly on regular 3D models, so I assume the problem is in the generation of normals.

If you're using a y-up coordinate system then you want to be doing your deltas on the y component, not the z-component.
vec3 va = normalize(vec3(1.0, s2 - s1, 0.0));
vec3 vb = normalize(vec3(0.0, s4 - s3, 0.0));
Also you should confirm whether it's s3 - s4 or s4 - s3.

Related

Upscaling using color interpolation for lighting?

I'm writing a lighting system for 2D games using a rather common method of 2D radiosity. The idea is to generate a JFA voronoi of the game scene (black, alpha = 1.0 for occluders and color, alpha = 1.0 for emitters) and generate an SDF from the JFA. Next you raymarch every pixel on screen for N rays with M max steps on the SDF with random angle offsets for each pixel. You then sample the emitter/occluder surface at the end point of each ray, step back into empty space and sample again for light emitted in the nearest empty space. This gives you a nice result as seen below:
That isn't the problem, it works great. The problem is efficiency. The idea behind fixing this is to render the GI at 1/N sample size (width/N, height/N) and then upscale the GI using interpolation. As I've done below:
This is the problem. The upscaling I've accomplished using weighted color-interpolation, but it produces these nasty results near occluders:
Here's the full shader:
The uniforms passed are the GI downsampled texture (in_GIField), Scene (emitters/occluders only) Texture (gm_basetexture), Signed Distance Field (in_SDField), Resolution (in_Screen) and the Downsample ratio (in_Sample).
/*
UPSCALING SHADER:
Find the nearest 4 boundign samples to the current pixel (xyDelta & xyShift)
Calculate all of the sample's weights based on whether they're marchable or source pixels.
Final perform a composite weighted interpolation for the current pixel to the nearest 4 samples.
*/
varying vec2 in_Coord;
uniform float in_Sample;
uniform vec2 in_Screen;
uniform sampler2D in_GIField;
uniform sampler2D in_SDField;
#define TPI 9.4247779607693797153879301498385
#define PI 3.1415926535897932384626433832795
#define TAU 6.2831853071795864769252867665590
#define EPSILON 0.001 // floating point precision check
#define dot(f) dot(f,f) // shorthand dot of a single float
float ATAN2(float yy, float xx) { return mod(atan(yy, xx), TAU); }
float DIRECT(vec2 v1, vec2 v2) { vec2 v3 = v2 - v1; return ATAN2(-v3.y, v3.x); }
float DIFFERENCE(float src, float dst) { return mod(dst - src + TPI, TAU) - PI; }
float V2_F16(vec2 v) { return v.x + (v.y / 255.0); }
float VMAX(vec3 v) { return max(v.r, max(v.g, v.b)); }
vec2 SAMPLEXY(vec2 xycoord) { return (floor(xycoord / in_Sample) * in_Sample) + (in_Sample*0.5); }
vec3 TONEMAP(vec3 color, float dist) { return color * (1.0 / (1.0 + dot(dist / min(in_Screen.x, in_Screen.y)))); }
float TESTMARCH(vec2 pix, vec2 end) {
float aspect = in_Screen.x / in_Screen.y,
dst = distance(pix, end);
vec2 dir = normalize((end*in_Screen) - (pix*in_Screen)) / in_Screen;
for(float i = 0.0; i < in_Sample; i += 1.0) {
vec2 test = vec2(pix.x * aspect, pix.y) + (dir * (i/in_Screen));
test.x /= aspect;
vec4 sourceCol = texture2D(gm_BaseTexture, test);
float source = max(sourceCol.r, max(sourceCol.g, sourceCol.b));
if (source < EPSILON && sourceCol.a > 0.0) return 0.0;
}
return 1.0;
}
vec3 WCOMPOSITE(vec3 colors[4], float weights[4], vec2 uv) {
// (uv * A * B) + (B * (1.0 - A)) //0, 2, 1, 3
float weightA = (uv.y * weights[0] * weights[2]) + (weights[2] * (1.0 - weights[0])),
weightB = (uv.y * weights[1] * weights[3]) + (weights[3] * (1.0 - weights[1]));
vec3 colorA = mix(colors[0], colors[2], weightA),
colorB = mix(colors[1], colors[3], weightB);
return mix(colorA, colorB, uv.x);
}
void main() {
vec2 xyCoord = in_Coord * in_Screen;
vec2 xyLight = SAMPLEXY(xyCoord);
vec2 xyDelta = sign(sign(xyCoord - xyLight) - 1.0);
vec2 xyShift[4];
xyShift[0] = vec2(0.,0.) + xyDelta;
xyShift[1] = vec2(1.,0.) + xyDelta;
xyShift[2] = vec2(0.,1.) + xyDelta;
xyShift[3] = vec2(1.,1.) + xyDelta;
vec2 xyField[4]; vec3 xyColor[4]; float notSource[4]; float xyWghts[4];
for(int i = 0; i < 4; i++) {
xyField[i] = (xyLight + (xyShift[i] * in_Sample)) * (1.0/in_Screen);
xyColor[i] = texture2D(in_GIField, xyField[i]).rgb;
notSource[i] = 1.0 - sign(texture2D(gm_BaseTexture, xyField[i]).a);
xyWghts[i] = TESTMARCH(in_Coord, xyField[i]) * sign(VMAX(xyColor[i])) * notSource[i];
}
vec2 uvCoord = mod(xyCoord-xyLight, in_Sample) * (1.0/in_Sample);
vec3 xyFinal = WCOMPOSITE(xyColor, xyWghts, uvCoord);
vec4 xySource = texture2D(gm_BaseTexture, in_Coord);
float isSource = sign(xySource.a);
gl_FragColor = vec4((isSource * xySource.rgb) + ((1.0-isSource) * xyFinal), 1.0);
}
EDIT: This DOES produce the intended result in empty space, but ends up with nasty artifacting near emitters and occluders. I tried to solve this in the for-loop in the main function by weighting out the emitter/occluder (source pixels in the scene texture) colors, but this isn't working.
See shader code attached (Shadertoy). I noticed that the weighting function will actually produce some colors with a weight of 0 (as expected as originally written). I currently don't have a solution for how to remove colors from the interpolation process entirely.
Full Source Code
Full Color Shader Code

Abound image-space derivatives of the barycentrics

I found a code in geometry shader to calculate the derivatives of barycentrics w.r.t screen space coordinates (dudX,dudY,dvdX,dvdY)。
And here is the code:
void main()
{
// Plane equations for bary differentials.
float w0 = gl_in[0].gl_Position.w;
float w1 = gl_in[1].gl_Position.w;
float w2 = gl_in[2].gl_Position.w;
vec2 p0 = gl_in[0].gl_Position.xy / w0;
vec2 p1 = gl_in[1].gl_Position.xy / w1;
vec2 p2 = gl_in[2].gl_Position.xy / w2;
vec2 e0 = p0 - p2;
vec2 e1 = p1 - p2;
float a = e0.x*e1.y - e0.y*e1.x;
// Clamp area to an epsilon to avoid arbitrarily high bary differentials.
float eps = 1e-6f; // ~1 pixel in 1k x 1k image.
float ca = (abs(a) >= eps) ? a : (a < 0.f) ? -eps : eps; // Clamp with sign.
float ia = 1.f / ca; // Inverse area.
vec2 ascl = ia * vp_scale;
float dudx = e1.y * ascl.x;
float dudy = -e1.x * ascl.y;
float dvdx = -e0.y * ascl.x;
float dvdy = e0.x * ascl.y;
float duwdx = dudx / w0;
float dvwdx = dvdx / w1;
float duvdx = (dudx + dvdx) / w2;
float duwdy = dudy / w0;
float dvwdy = dvdy / w1;
float duvdy = (dudy + dvdy) / w2;
vec4 db0 = vec4(duvdx - dvwdx, duvdy - dvwdy, dvwdx, dvwdy);
vec4 db1 = vec4(duwdx, duwdy, duvdx - duwdx, duvdy - duwdy);
vec4 db2 = vec4(duwdx, duwdy, dvwdx, dvwdy);
int layer_id = v_layer[0];
int prim_id = gl_PrimitiveIDIn + v_offset[0];
gl_Layer = layer_id; gl_PrimitiveID = prim_id; gl_Position = vec4(gl_in[0].gl_Position.x, gl_in[0].gl_Position.y, gl_in[0].gl_Position.z, gl_in[0].gl_Position.w); var_uvzw = vec4(1.f, 0.f, gl_in[0].gl_Position.z, gl_in[0].gl_Position.w); var_db = db0; EmitVertex();
gl_Layer = layer_id; gl_PrimitiveID = prim_id; gl_Position = vec4(gl_in[1].gl_Position.x, gl_in[1].gl_Position.y, gl_in[1].gl_Position.z, gl_in[1].gl_Position.w); var_uvzw = vec4(0.f, 1.f, gl_in[1].gl_Position.z, gl_in[1].gl_Position.w); var_db = db1; EmitVertex();
gl_Layer = layer_id; gl_PrimitiveID = prim_id; gl_Position = vec4(gl_in[2].gl_Position.x, gl_in[2].gl_Position.y, gl_in[2].gl_Position.z, gl_in[2].gl_Position.w); var_uvzw = vec4(0.f, 0.f, gl_in[2].gl_Position.z, gl_in[2].gl_Position.w); var_db = db2; EmitVertex();
}
db0, db1 and db2 are the output derivatives of three vertex in a triangle.
vp_scale is a vec2 variable which contains (width, height) of display viewport.
I could understand the code until dudx, dudy, dvdx, dvdy.
The most confusion part for me is the db0, db1 and db2. Also, I dont know what duvdx and duvdy presents for.
I think maybe its something relate to perspective correction in rasterization-inpterpolation of vertex attribute. But I cant found a good way to the answer.
Does anyone have idea about it?

Compute Shader Corrupting Vertex Buffer

I'm making a tutorial for computing tangents and bitangents in a WGPU (Vulkan GLSL) compute shader. I'm creating the vertex buffer on the CPU from a .obj I made in blender.
Here's the code for the compute shader.
#version 450
#define VERTICES_PER_TRIANGLE 3
layout(local_size_x = VERTICES_PER_TRIANGLE) in;
// Should match the struct in model.rs
struct ModelVertex {
vec3 position;
vec2 tex_coords;
vec3 normal;
vec3 tangent;
vec3 bitangent;
};
layout(std140, set=0, binding=0) buffer SrcVertexBuffer {
ModelVertex srcVertices[];
};
layout(std140, set=0, binding=1) buffer DstVertexBuffer {
ModelVertex dstVertices[];
};
layout(std140, set=0, binding=2) buffer IndexBuffer {
uint Indices[];
};
void main() {
uint index = gl_GlobalInvocationID.x;
// Grab the indices for the triangle
uint i0 = Indices[index];
uint i1 = Indices[index + 1];
uint i2 = Indices[index + 2];
// Grab the vertices for the triangle
ModelVertex v0 = srcVertices[i0];
ModelVertex v1 = srcVertices[i1];
ModelVertex v2 = srcVertices[i2];
// Grab the position and uv components of the vertices
vec3 pos0 = v0.position;
vec3 pos1 = v1.position;
vec3 pos2 = v2.position;
vec2 uv0 = v0.tex_coords;
vec2 uv1 = v1.tex_coords;
vec2 uv2 = v2.tex_coords;
// Calculate the edges of the triangle
vec3 delta_pos1 = pos1 - pos0;
vec3 delta_pos2 = pos2 - pos0;
// This will give us a direction to calculate the
// tangent and bitangent
vec2 delta_uv1 = uv1 - uv0;
vec2 delta_uv2 = uv2 - uv0;
// Solving the following system of equations will
// give us the tangent and bitangent.
// delta_pos1 = delta_uv1.x * T + delta_u.y * B
// delta_pos2 = delta_uv2.x * T + delta_uv2.y * B
// Luckily, the place I found this equation provided
// the solution!
float r = 1.0 / (delta_uv1.x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
vec3 tangent = (delta_pos1 * delta_uv2.y - delta_pos2 * delta_uv1.y) * r;
vec3 bitangent = (delta_pos2 * delta_uv1.x - delta_pos1 * delta_uv2.x) * r;
// We'll use the same tangent/bitangent for each vertex in the triangle
dstVertices[i0].tangent = tangent;
dstVertices[i1].tangent = tangent;
dstVertices[i2].tangent = tangent;
dstVertices[i0].bitangent = bitangent;
dstVertices[i1].bitangent = bitangent;
dstVertices[i2].bitangent = bitangent;
}
This leads to an image like the following.
The problem occurs in the last six lines.
dstVertices[i0].tangent = tangent;
dstVertices[i1].tangent = tangent;
dstVertices[i2].tangent = tangent;
dstVertices[i0].bitangent = bitangent;
dstVertices[i1].bitangent = bitangent;
dstVertices[i2].bitangent = bitangent;
If I delete these lines, the output is fine (albeit the lightings all wrong due to the tangent and bitangent being a 0 vector).
Why is modifying the tangent and bitangent messing with the position of the vertices?
Here's the rest of the code for context. https://github.com/sotrh/learn-wgpu/tree/compute/code/intermediate/tutorial14-compute
EDIT:
Here's the code where I'm calling the compute shader.
let src_vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", m.name)),
contents: bytemuck::cast_slice(&vertices),
// UPDATED!
usage: wgpu::BufferUsage::STORAGE,
});
let dst_vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", m.name)),
contents: bytemuck::cast_slice(&vertices),
// UPDATED!
usage: wgpu::BufferUsage::VERTEX | wgpu::BufferUsage::STORAGE,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", m.name)),
contents: bytemuck::cast_slice(&m.mesh.indices),
// UPDATED!
usage: wgpu::BufferUsage::INDEX | wgpu::BufferUsage::STORAGE,
});
let binding = BitangentComputeBinding {
dst_vertex_buffer,
src_vertex_buffer,
index_buffer,
num_elements: m.mesh.indices.len() as u32,
};
// Calculate the tangents and bitangents
let calc_bind_group = self.binder.create_bind_group(
&binding,
device,
Some("Mesh BindGroup")
);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Tangent and Bitangent Calc"),
});
{
let mut pass = encoder.begin_compute_pass();
pass.set_pipeline(&self.pipeline);
pass.set_bind_group(0, &calc_bind_group, &[]);
pass.dispatch(binding.num_elements as u32 / 3, 1, 1);
}
queue.submit(std::iter::once(encoder.finish()));
device.poll(wgpu::Maintain::Wait);
The shader is supposed to loop through all the triangles in the mesh and compute the tangent and bitangent using the positon, and uv coordinates of the vertices of that triangle. I'm guessing that the vertices that are shared with multiple triangles are getting written to at the same time, causing this memory corruption.
I don't think it's a problem with shaders elsewhere, as I'm using the same model for the light, and the vertex shader responsible for that doesn't use the tangent and bitangent at all.
#version 450
layout(location=0) in vec3 a_position;
layout(location=0) out vec3 v_color;
layout(set=0, binding=0)
uniform Uniforms {
vec3 u_view_position;
mat4 u_view_proj;
};
layout(set=1, binding=0)
uniform Light {
vec3 u_position;
vec3 u_color;
};
// Let's keep our light smaller than our other objects
float scale = 0.25;
void main() {
vec3 v_position = a_position * scale + u_position;
gl_Position = u_view_proj * vec4(v_position, 1);
v_color = u_color;
}
Looking at the vertex data in Render Doc shows that they position data is getting messed up.
Also here's what the cubes look like if I set the tangent and bitangent to vec3(0, 1, 0).
My only guess is that storage buffers have a byte alignment rule that I'm unaware of. I know that's the case for uniform buffers, but I'm using storage buffers for my instancing code, and that doesn't seem to have any issues.
Turns out Vulkan style GLSL aligns to the largest field in the struct when using std430.
https://github.com/KhronosGroup/glslang/issues/264
In my case it's vec3. The vec2 tex_coord is throwing it off causing the shader to pull data from the wrong parts of the vertex buffer.
The fix was to change the struct in model_load.comp to specify the individual components instead.
struct ModelVertex {
float x; float y; float z;
float uv; float uw;
float nx; float ny; float nz;
float tx; float ty; float tz;
float bx; float by; float bz;
};
Now the base alignment is a float (4 bytes), and the shader reads the vertex buffer data properly.
I'm aware there's a packed layout, but shaderc doesn't allow me to use that for reasons beyond me. Honestly I think this is quite annoying, and cumbersome, but it works.
There's still a flaw in the result. There's some banding on the edge faces of the cube. My guess is that it's do a single vertex sharing multiple triangles, but that's another problem that I'll have to look into later.

Is there a faked antialiasing algorithm using the depth buffer?

Lately I implemented the FXAA algorithm into my OpenGL application. I haven't understand this algorithm completely by now but I know that it uses contrast data of the final image to selectively apply blurring. As a post processing effect that makes sense. B since I use deferred shading in my application I already have a depth texture of the scene. Using that it might be much easier and more precise to find edges for applying blur there.
So is there a known antialiasing algorithm using the depth texture instead of the final image to find the edges? By fakes I mean an antialiasing algorithm based on a pixel basis instead of a vertex basis.
After some research I found out that my idea is widely used already in deferred renderers. I decided to post this answer because I came up with my own implementation which I want to share with the community.
Based on the gradient changes of the depth and the angle changes of the normals, there is blurring applied to the pixel.
// GLSL fragment shader
#version 330
in vec2 coord;
out vec4 image;
uniform sampler2D image_tex;
uniform sampler2D position_tex;
uniform sampler2D normal_tex;
uniform vec2 frameBufSize;
void depth(out float value, in vec2 offset)
{
value = texture2D(position_tex, coord + offset / frameBufSize).z / 1000.0f;
}
void normal(out vec3 value, in vec2 offset)
{
value = texture2D(normal_tex, coord + offset / frameBufSize).xyz;
}
void main()
{
// depth
float dc, dn, ds, de, dw;
depth(dc, vec2( 0, 0));
depth(dn, vec2( 0, +1));
depth(ds, vec2( 0, -1));
depth(de, vec2(+1, 0));
depth(dw, vec2(-1, 0));
float dvertical = abs(dc - ((dn + ds) / 2));
float dhorizontal = abs(dc - ((de + dw) / 2));
float damount = 1000 * (dvertical + dhorizontal);
// normals
vec3 nc, nn, ns, ne, nw;
normal(nc, vec2( 0, 0));
normal(nn, vec2( 0, +1));
normal(ns, vec2( 0, -1));
normal(ne, vec2(+1, 0));
normal(nw, vec2(-1, 0));
float nvertical = dot(vec3(1), abs(nc - ((nn + ns) / 2.0)));
float nhorizontal = dot(vec3(1), abs(nc - ((ne + nw) / 2.0)));
float namount = 50 * (nvertical + nhorizontal);
// blur
const int radius = 1;
vec3 blur = vec3(0);
int n = 0;
for(float u = -radius; u <= +radius; ++u)
for(float v = -radius; v <= +radius; ++v)
{
blur += texture2D(image_tex, coord + vec2(u, v) / frameBufSize).rgb;
n++;
}
blur /= n;
// result
float amount = mix(damount, namount, 0.5);
vec3 color = texture2D(image_tex, coord).rgb;
image = vec4(mix(color, blur, min(amount, 0.75)), 1.0);
}
For comparison, this is the scene without any anti-aliasing.
This is the result with anti-aliasing applied.
You may need to view the images at their full resolution to judge the effect. In my view the result is adequate for the simple implementation. The best thing is that there are nearly no jagged artifacts when the camera moves.

How do you calculate the angle between two normals in glsl?

How do you calculate the angle between two normals in glsl? I am trying to add the fresnel effect to the outer edges of an object (combining that effect with phong shading), and I think that the angle is the only thing I am missing.
Fragment Shader:
varying vec3 N;
varying vec3 v;
void main(void) {
v = vec3(gl_ModelViewMatrix * gl_Vertex);
N = normalize(gl_NormalMatrix * gl_Normal);
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
Vertex Shader:
varying vec3 N;
varying vec3 v;
void main(void) {
vec3 L = normalize(gl_LightSource[0].position.xyz - v);
vec3 E = normalize(-v);
vec3 R = normalize(-reflect(L,N));
vec4 Iamb = gl_FrontLightProduct[0].ambient
vec4 Idiff = gl_FrontLightProduct[0].diffuse * max(dot(N,L), 0.0);
vec4 Ispec = gl_FrontLightProduct[0].specular * pow(max(dot(R,E),0.0), gl_FrontMaterial.shininess);
vec4 Itot = gl_FrontLightModelProduct.sceneColor + Iamb + Idiff + Ispec;
vec3 A = //calculate the angle between the lighting direction and the normal//
float F = 0.33 + 0.67*(1-cos(A))*(1-cos(A))*(1-cos(A))*(1-cos(A))*(1-cos(A));
vec4 white = {1.0, 1.0, 1.0, 1.0};
gl_FragColor = F*white + (1.0-F)*Itot;
}
varying vec3
dot product between two vectors will return the cosine of the angle (in GLSL it's dot(a,b)). Taking arc-cosine of that will return angle in radians (in GLSL it's acos(x)).
Dot product is very cheap, arc-cosine is quite expensive.
However, Fresnel effect does not really need the angle. Just having dot result between the vectors is enough. There are many approximations for the Fresnel effect, one of the cheapest is just using the dot directly. Or squaring it (x*x), or raising to some other power.
In your shader above, it looks like you just want to raise dot to 5th power. Something like:
float oneMinusDot = 1.0 - dot(L, N);
float F = pow(oneMinusDot, 5.0);
From the dot product of two vectors you can get the cosine of the angle between them
cos A = DotProduct(v1, v2) / (Length(v1) * Length(v2))
Using this, you don't need to calculate the cosine when calculating F. Since your vectors are unit vectors, e.g., have length one, you can even avoid the division.

Resources