Implementing Ambient Occlusion in simple Voxel Renderer - graphics

I am currently trying to implement Ambient Occlusion in a simple Voxel Renderer, each block is rendered (unless not visible) with 24 vertices and 12 triangles, so no meshing of any kind.
I followed this tutorial on how ambient occlusion for minecraft-like worlds could work, but it didn't explain how to actually implement that in shaders / graphics programming, so what I've done so far are only my uneducated best guesses.
The basic idea is, for each vertex to generate a value from the surrounding blocks about how "dark" the ambient occlusion should be. (Nevermind my values being completely wrong currently). These are from 0 (no surroundings) to 3 (vertex completely surrounded) The picture in the tutorial I linked helps to explain that.
So I tried that, but it seems to not darken the area around a vertex, but instead the whole triangle, the vertex is in... How do I make it not do that? I am a total beginner in shaders and graphics programming, so any help is appreciated :D
Here is my vertex shader, the inputs are
position of vertex
normal of vertex
tex_coord in texture atlas
tile_uv: position on block (0 or 1 for left/right bottom/top)
the ambient_occlusion value
#version 450
layout(location = 0) in vec3 position;
layout(location = 1) in vec3 normal;
layout(location = 2) in vec2 tex_coord;
layout(location = 3) in vec2 tile_uv;
layout(location = 4) in uint ambient_occlusion;
layout(location = 0) out vec3 v_position;
layout(location = 1) out vec3 v_normal;
layout(location = 2) out vec2 v_tex_coord;
layout(location = 3) out vec2 v_tile_uv;
layout(location = 4) out uint v_ambient_occlusion;
layout(set = 0, binding = 0) uniform Data {
mat4 world;
mat4 view;
mat4 proj;
vec2 tile_size;
} uniforms;
void main() {
mat4 worldview = uniforms.view * uniforms.world;
v_normal = mat3(transpose(inverse(uniforms.world))) * normal;
v_tex_coord = tex_coord;
v_tile_uv = tile_uv;
v_position = vec3(uniforms.world * vec4(position, 1.0));
v_ambient_occlusion = ambient_occlusion;
gl_Position = uniforms.proj * worldview * vec4(position.x, position.y, position.z, 1.0);
}
And here is my fragment shader:
#version 450
layout(location = 0) in vec3 v_position;
layout(location = 1) in vec3 v_normal;
layout(location = 2) in vec2 v_tex_coord;
layout(location = 3) in vec2 v_tile_uv;
layout(location = 4) in flat uint v_ambient_occlusion;
layout(location = 0) out vec4 f_color;
layout(set = 0, binding = 1) uniform sampler2D block_texture;
void main() {
vec3 ao_color;
switch (v_ambient_occlusion) {
case 0: ao_color = vec3(1.0, 0.0, 0.0); break;
case 1: ao_color = vec3(0.0, 1.0, 0.0); break;
case 2: ao_color = vec3(0.0, 0.0, 1.0); break;
case 3: ao_color = vec3(1.0, 1.0, 1.0); break;
}
f_color = texture(block_texture, v_tex_coord);
f_color.rgb = mix(f_color.rgb, vec3(0.05, 0.05, 0.05), 0.3 * v_ambient_occlusion * distance(v_tile_uv, vec2(0.5)));
}

The "flat" part of this ...
layout(location = 4) in flat uint v_ambient_occlusion;
... means that the same value is used for the whole triangle (taken from the provoking vertex). If you want interpolation between the 3 per-vertex values then just remove the flat qualifier.
Note that currently this is an integer attribute value, which must be flat shaded, so you'll need to convert the input into a float too.

Related

how to apply displacement map to a vertex shader in Webgl?

Hello I have an image of displacement like this. I want to make the surface displace more when the part of image getting darker.
Here is my code in vertex shader. However, it doesn't work. There is no displacement :(. I'm not sure where is the problem
varying vec2 v_uv;
varying vec3 v_normal;
varying vec3 v_position;
uniform sampler2D tex;
uniform sampler2D dis;
void main() {
float height = texture2D(dis,uv).g;
vec3 pos = position + height*normal *10.0;
gl_Position = projectionMatrix * modelViewMatrix * vec4( pos, 1.0 );
vec4 pos_1 = (modelViewMatrix * vec4(position,1.0));
gl_Position = projectionMatrix * pos_1;
v_position = pos_1.xyz;
v_normal = normalMatrix * normal;
v_uv = uv;
}
//in fragment vector
varying vec2 v_uv;
uniform sampler2D tex;
uniform sampler2D dis;
varying vec3 v_normal;
const vec3 lightDirWorld = vec3(0,1,1);
void main()
{
vec3 nhat = normalize(v_normal);
vec3 lightDir = normalize(viewMatrix * vec4(lightDirWorld, 0)).xyz;
float light = dot(nhat, lightDir);
vec4 color =texture2D(tex, v_uv);
gl_FragColor = color*vec4(light,light,light,1);
}

Vulkan diffuse shading doesn't change with object transformation

System:
GPU: Intel(R) UHD Graphics
Here is my shader code:
vertex shader:
#version 450
layout(location = 0) in vec3 position;
layout(location = 1) in vec3 color;
layout(location = 2) in vec3 normal;
layout(location = 3) in vec2 uv;
layout(location = 0) out vec3 fragCol;
layout(push_constant) uniform Push {
mat4 transform; // projection * view * model
mat4 modelMatrix;
} push;
vec3 DIRECTION_TO_LIGHT = normalize(vec3(1.0, -10.0, -1.0));
void main() {
gl_Position = push.transform * vec4(position, 1.0);
vec3 normalWorldSpace = normalize(mat3(push.modelMatrix) * normal);
float lightIntensity = max(dot(normalWorldSpace, DIRECTION_TO_LIGHT), 0);
fragCol = lightIntensity * vec3(1.0);
}
fragment shader:
#version 450 core
layout(location = 0) in vec3 fragCol;
layout(location = 0) out vec4 outColor;
layout(push_constant) uniform Push{
mat4 transform;
mat4 modelMatrix;
}push;
void main(){
outColor = vec4(fragCol, 1.0);
}
The dark parts of the model remain dark even while rotating on the z-axis. It is like the color is baked into the model which isn't the case.
I tried calculating the color on the fragment shader but that didn't work either.
Code calculating the transform matrix:
auto projectionMatrix = camera.getProjection() * camera.getView();
for (auto& obj : gameObjects) {
//rotating doesn't change the dark pixels.
obj.transform.rotation.y += glm::radians(45.0f) * deltaTime;
SimplePushConstantData push{};
auto modelMatrix = obj.transform.mat4();
push.transform = projectionMatrix * modelMatrix;
push.modelMatrix = modelMatrix;
vkCmdPushConstants(commandBuffer, pipelineLayout,
VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT,
0, sizeof(SimplePushConstantData), &push);
obj.model->bind(commandBuffer);
obj.model->draw(commandBuffer);
}

PyOpenGL texture rendering black

I have been following an online tutorial on OpenGL lately to test integration of PyOpenGL into a PyQt5 application with QOpenGLWidget. I was able to follow every episode with no issues until the textures episode; I cannot get the texture to render at all.
My code is as follows (wrappers removed to make the code simpler, but I have tested it to make sure it is the same):
def FirstFrame():
positions = [
-0.5, -0.5, 0.0, 0.0,
+0.5, -0.5, 1.0, 0.0,
+0.5, +0.5, 1.0, 1.0,
-0.5, +0.5, 0.0, 1.0
]
indices = [
0, 1, 2,
2, 3, 0,
]
global g_vao
vao = g_vao = glGenVertexArrays(1)
glBindVertexArray(vao)
global g_posBuffer
posBuffer = g_posBuffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, posBuffer)
glBufferData(GL_ARRAY_BUFFER, (GLfloat * len(positions))(*positions), GL_STATIC_DRAW)
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(GLfloat), c_void_p(0))
glEnableVertexAttribArray(1)
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(GLfloat), c_void_p(8))
global g_idxBuffer
idxBuffer = g_idxBuffer = glGenBuffers(1)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, idxBuffer)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, (GLuint * len(indices))(*indices), GL_STATIC_DRAW)
vertexShader = """
#version 330 core
layout(location = 0) in vec4 position;
layout(location = 1) in vec2 texCoord;
out vec2 v_TexCoord;
void main()
{
gl_Position = position;
v_TexCoord = texCoord;
}
"""
fragmentShader = """
#version 330 core
layout(location = 0) out vec4 color;
layout(location = 1) out vec4 colorTemp;
in vec2 v_TexCoord;
uniform sampler2D u_Texture;
void main()
{
vec4 texColor = texture(u_Texture, v_TexCoord);
color = texColor;
}
"""
global g_shader
shader = g_shader = compileProgram(compileShader(vertexShader, GL_VERTEX_SHADER),
compileShader(fragmentShader, GL_FRAGMENT_SHADER))
glUseProgram(shader)
global g_data
g_data, width, height = ParseRGBA8DDS("ChernoLogo.dds") # Parses input DDS file and returns bytearray of flipped texture
localBuffer = (GLubyte * len(g_data)).from_buffer(g_data) # Making a copy instead of using from_buffer does not work either
global g_tex
tex = g_tex = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, tex)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_FALSE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, localBuffer) # Passing g_data directly does not work either
glBindTexture(GL_TEXTURE_2D, 0)
glActiveTexture(GL_TEXTURE0 + 0)
glBindTexture(GL_TEXTURE_2D, tex)
global g_tex_loc
tex_loc = g_tex_loc = glGetUniformLocation(shader, "u_Texture")
assert tex_loc != -1
glUniform1i(tex_loc, 0)
def MainLoopIteration():
glClear(GL_COLOR_BUFFER_BIT)
global g_shader, g_r, g_vao, g_idxBuffer
glUseProgram(g_shader)
glBindVertexArray(g_vao)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, g_idxBuffer)
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, c_void_p(0))
I have tried color = vec4(v_TexCoord, 0.0, 1.0) and color = vec4(u_Texture, 0.0, 0.0, 1.0) (and binding to slot 1) in the fragment shader to make sure these variables are set correctly and indeed they are.
In fact, I have checked texColor and it looks to be (0.0, 0.0, 0.0, 1.0) for all pixels.
I have also verified the output of ParseRGBA8DDS() by hand and it looks to be correct.
I found the issue. This is something the tutorial does not mention, but I needed to rebind and reactivate the texture on each frame. Althought I'm not sure why it still works for the person giving the tutorial.

Having and error with my shaders and don't know how to fix [closed]

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 3 years ago.
Improve this question
I am trying to set my shaders in my code and is not running
I am at my Mac, my partner can run it perfectly but I can't, I have OpenGl 3.3
This is part of my code where the error happens, I have tried changing the version of the shaders, running it in different environments, used PyCharm, the idle, Jupyter notebook...
if __name__ == "__main__":
# Initialize glfw
if not glfw.init():
sys.exit()
width = 800
height = 600
window = glfw.create_window(width, height, "Window name", None, None)
if not window:
glfw.terminate()
sys.exit()
glfw.make_context_current(window)
# Connecting the callback function 'on_key' to handle keyboard events
glfw.set_key_callback(window, on_key)
# Defining shaders for our pipeline
vertex_shader = """
#version 330
in vec3 position;
in vec3 color;
out vec3 fragColor;
void main()
{
fragColor = color;
gl_Position = vec4(position, 1.0f);
}
"""
fragment_shader = """
#version 330
in vec3 fragColor;
out vec4 outColor;
void main()
{
outColor = vec4(fragColor, 1.0f);
}
"""
vertex_shader2 = """
#version 330
in vec3 position;
in vec3 color;
out vec3 fragColor;
void main()
{
fragColor = color;
gl_Position = vec4(2 * position, 1.0f);
}
"""
fragment_shader2 = """
#version 330
in vec3 fragColor;
out vec4 outColor;
void main()
{
float meanColor = (fragColor.r + fragColor.g + fragColor.b) / 3;
outColor = vec4(meanColor, meanColor, meanColor, 1.0f);
}
"""
# Assembling the shader program (pipeline) with both shaders
shaderProgram = OpenGL.GL.shaders.compileProgram(
OpenGL.GL.shaders.compileShader(vertex_shader, GL_VERTEX_SHADER),
OpenGL.GL.shaders.compileShader(fragment_shader, GL_FRAGMENT_SHADER))
shaderProgram2 = OpenGL.GL.shaders.compileProgram(
OpenGL.GL.shaders.compileShader(vertex_shader2, GL_VERTEX_SHADER),
OpenGL.GL.shaders.compileShader(fragment_shader2, GL_FRAGMENT_SHADER))
It should draw a triangle, but show the following error:
Traceback (most recent call last):
File "/Users/gustavo/PycharmProjects/untitled1/oli.py", line 186, in <module>
OpenGL.GL.shaders.compileShader(vertex_shader, GL_VERTEX_SHADER),
File "/Users/gustavo/PycharmProjects/untitled1/venv/lib/python3.7/site-packages/OpenGL/GL/shaders.py", line 226, in compileShader
shaderType,
RuntimeError: ('Shader compile failure (0): b'ERROR: 0:2: '' : version '130' is not supported\n ERROR: 0:11: 'f' : syntax error: syntax error\n '', [b'
#version 130
in vec3 position;
in vec3 color;
out vec3 fragColor;
void main()
{
fragColor = color;
gl_Position = vec4(position, 1.0f);
}
'], GL_VERTEX_SHADER)
MacOS uses Legacy Profile as default for all created OpenGL context. Therefor by default only OpenGL up to 2.1 and GLSL up to 1.20 is supported.
To use OpenGL 3.2+ you need to switch to the Core Profile. The naming there is a little bit confusing because it stats only 3.2Core profile, but actually this 3.2 or later (every OpenGL profile that is supported by the system/driver that is backwards compatible to 3.2)
For glut (depends on the version of glut if it works) the command on MacOS is:
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
Sorry I am not aware of python version but it should be similar functions.

Inputs and Outputs of the Geometry Shader

I was wondering if anyone would be so kind as to pin-point the problem with my program. I am certain the setback has something to do with the way in which data is passed through the GS. If, for instance, the geometry shader is taken out of the code (modifying the other two stages to accommodate for the change as well), I end up with a operational pipeline. And if I modify the data input of the GS to accept PS_INPUT instead of VS_DATA, the program does not crash, but outputs a blank blue screen. My intent here is to create a collection of squares on a two-dimensional plane, so blank blue screens are not exactly what I am going for.
Texture2D txDiffuse[26] : register(t0);
SamplerState samLinear : register(s0); //For Texturing
#define AWR_MAX_SHADE_LAY 1024
cbuffer ConstantBuffer : register(b0)
{
float4 Matrix_Array[30];
matrix Screen;
float GM;
float GA;
float GD;
float epsilon;
}
// Includes Layer Data
cbuffer CBLayer : register(b1)
{
float4 Array_Fill_Color[AWR_MAX_SHADE_LAY];
float4 Array_Line_Color[AWR_MAX_SHADE_LAY];
float Array_Width[AWR_MAX_SHADE_LAY];
float Array_Line_Pattern[AWR_MAX_SHADE_LAY];
float Array_Z[AWR_MAX_SHADE_LAY];
float Array_Thickness[AWR_MAX_SHADE_LAY];
}
//Input for Vertex Shader
struct VS_DATA
{
float4 Pos : POSITION;
int M2W_index : M2W_INDEX;
int Layer_index : LAYER_INDEX;
};
//Input for Pixel Shader
struct PS_INPUT{
float4 Pos : SV_POSITION;
float4 Color : COLOR;
int Layer_index : LAYER_INDEX;
};
//Vertex Shader
VS_DATA VS(VS_DATA input)// Vertex Shader
{
VS_DATA output = (VS_DATA)0;
//Model to World Transform
float xm = input.Pos.x, yw = input.Pos.y, zm = input.Pos.z, ww = input.Pos.w, xw, zw;
float4 transformation = Matrix_Array[input.M2W_index];
xw = ((xm)*transformation.y - (zm)*transformation.x) + transformation.z;
zw = ((xm)*transformation.x + (zm)*transformation.y) + transformation.w;
//set color
int valid_index = input.Layer_index;
output.Color = Array_Fill_Color[valid_index];
output.Color.a = 0.0;
//output.Vertex_index = input.Vertex_index;
//output.Next_Vertex_index = input.Next_Vertex_index;
//Snapping process
float sgn_x = (xw >= 0) ? 1.0 : -1.0;
float sgn_z = (zw >= 0) ? 1.0 : -1.0;
int floored_x = (int)((xw + (sgn_x*GA) + epsilon)*GD);
int floored_z = (int)((zw + (sgn_z*GA) + epsilon)*GD);
output.Pos.x = ((float)floored_x)*GM;
output.Pos.y = yw;
output.Pos.z = ((float)floored_z)*GM;
output.Pos.w = ww;
int another_valid_index = input.Layer_index;
output.Layer_index = another_valid_index;
// Transform to Screen Space
output.Pos = mul(output.Pos, Screen);
return output;
}
[maxvertexcount(6)]
void GS_Line(line VS_DATA points[2], inout TriangleStream<PS_INPUT> output)
{
float4 p0 = points[0].Pos;
float4 p1 = points[1].Pos;
float w0 = p0.w;
float w1 = p1.w;
p0.xyz /= p0.w;
p1.xyz /= p1.w;
float3 line01 = p1 - p0;
float3 dir = normalize(line01);
float3 ratio = float3(700.0, 0.0, 700.0);
ratio = normalize(ratio);
float3 unit_z = normalize(float3(0.0, -1.0, 0.0));
float3 normal = normalize(cross(unit_z, dir) * ratio);
float width = 0.01;
PS_INPUT v[4];
float3 dir_offset = dir * ratio * width;
float3 normal_scaled = normal * ratio * width;
float3 p0_ex = p0 - dir_offset;
float3 p1_ex = p1 + dir_offset;
v[0].Pos = float4(p0_ex - normal_scaled, 1) * w0;
v[0].Color = float4(1.0, 1.0, 1.0, 1.0);
v[0].Layer_index = 1;
v[1].Pos = float4(p0_ex + normal_scaled, 1) * w0;
v[1].Color = float4(1.0, 1.0, 1.0, 1.0);
v[1].Layer_index = 1;
v[2].Pos = float4(p1_ex + normal_scaled, 1) * w1;
v[2].Color = float4(1.0, 1.0, 1.0, 1.0);
v[2].Layer_index = 1;
v[3].Pos = float4(p1_ex - normal_scaled, 1) * w1;
v[3].Color = float4(1.0, 1.0, 1.0, 1.0);
v[3].Layer_index = 1;
output.Append(v[2]);
output.Append(v[1]);
output.Append(v[0]);
output.RestartStrip();
output.Append(v[3]);
output.Append(v[2]);
output.Append(v[0]);
output.RestartStrip();
}
//Pixel Shader
float4 PS(PS_INPUT input) : SV_Target{
float2 Tex = float2(input.Pos.x / (8.0), input.Pos.y / (8.0));
int the_index = input.Layer_index;
float4 tex0 = txDiffuse[25].Sample(samLinear, Tex);
if (tex0.r > 0.0)
tex0 = float4(1.0, 1.0, 1.0, 1.0);
else
tex0 = float4(0.0, 0.0, 0.0, 0.0);
if (tex0.r == 0.0)
discard;
tex0 *= input.Color;
return tex0;
}
If you compile your vertex shader as it is, you will have the following error :
(line 53) : invalid subscript 'Color'
output.Color = Array_Fill_Color[valid_index];
output is of type VS_DATA which does not contain color.
If you change your VS definition as :
PS_INPUT VS(VS_DATA input)// Vertex Shader
{
PS_INPUT output = (PS_INPUT)0;
//rest of the code here
Then your vs will compile, but then you will have a mismatched layout with GS (GS still expects a line of VS_DATA as input, and now you provide PS_INPUT to it)
This will not give you any error until you draw (and generally runtime will silently fail, you would have a mismatch message in case debug layer is on)
So you also need to modify your GS to accept PS_INPUT as input eg:
[maxvertexcount(6)]
void GS_Line(line PS_INPUT points[2], inout TriangleStream<PS_INPUT> output)

Resources