In windows7 with latest opengl and in Ubuntu 12.04 with freeglut3 app, I can draw shadows correctly by using shadow volume method. However, when I run my code on an older version of Debian GNU/Linux 6.0.6, shadows do not be drawn correctly and unfortunately I am supposed to draw these shadows :(
I think I have culling problem because when I draw colored shadow volume I can see the volume correctly. However, culling of front and back faces of volume create a problem and shadow could not be drawn. I see nothing about shadows.
Here is my casting shadow code:
glDisable(GL_LIGHTING);
glDepthMask(GL_FALSE);
glDepthFunc(GL_LEQUAL);
glEnable(GL_STENCIL_TEST);
glColorMask(0, 0, 0, 0);
glStencilFunc(GL_ALWAYS, 1, 0xffffffff);
// first pass, stencil operation decreases stencil value
glFrontFace(GL_CCW);
glStencilOp(GL_KEEP, GL_KEEP, GL_INCR);
for (int i = 0; i < triangles.size(); ++i)
if (triangles[i]->visible)
for (int j = 0; j < 3; ++j)
{
Triangle *k = triangles[i]->neigh[j];
if ((k == NULL) || (!k->visible))
{
// here we have an edge, we must draw a polygon
p1 = triangles[i]->p[j];
p2 = triangles[i]->p[(j+1)%3];
//calculate the length of the vector
v1.x = (p1.x - wlp[0])*100;
v1.y = (p1.y - wlp[1])*100;
v1.z = (p1.z - wlp[2])*100;
v2.x = (p2.x - wlp[0])*100;
v2.y = (p2.y - wlp[1])*100;
v2.z = (p2.z - wlp[2])*100;
//draw the polygon
glBegin(GL_TRIANGLE_STRIP);
glVertex3f(p1.x, p1.y, p1.z);
glVertex3f(p1.x + v1.x, p1.y + v1.y, p1.z + v1.z);
glVertex3f(p2.x, p2.y, p2.z);
glVertex3f(p2.x + v2.x, p2.y + v2.y, p2.z + v2.z);
glEnd();
}
}
// second pass, stencil operation increases stencil value
glFrontFace(GL_CW);
glStencilOp(GL_KEEP, GL_KEEP, GL_DECR);
for (int i = 0; i < triangles.size(); ++i)
if (triangles[i]->visible)
for (int j = 0; j < 3; ++j)
{
Triangle *k = triangles[i]->neigh[j];
if ((k == NULL) || (!k->visible))
{
// here we have an edge, we must draw a polygon
p1 = triangles[i]->p[j];
p2 = triangles[i]->p[(j+1)%3];
//calculate the length of the vector
v1.x = (p1.x - wlp[0])*100;
v1.y = (p1.y - wlp[1])*100;
v1.z = (p1.z - wlp[2])*100;
v2.x = (p2.x - wlp[0])*100;
v2.y = (p2.y - wlp[1])*100;
v2.z = (p2.z - wlp[2])*100;
//draw the polygon
glBegin(GL_TRIANGLE_STRIP);
glVertex3f(p1.x, p1.y, p1.z);
glVertex3f(p1.x + v1.x, p1.y + v1.y, p1.z + v1.z);
glVertex3f(p2.x, p2.y, p2.z);
glVertex3f(p2.x + v2.x, p2.y + v2.y, p2.z + v2.z);
glEnd();
}
}
glFrontFace(GL_CCW);
glColorMask(1, 1, 1, 1);
//draw a shadowing rectangle covering the entire screen
glColor4f(0.0f, 0.0f, 0.0f, 0.4f);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glStencilFunc(GL_NOTEQUAL, 0, 0xffffffff);
glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP);
glPushMatrix();
glLoadIdentity();
glBegin(GL_TRIANGLE_STRIP);
glVertex3f(-1, 1,-1);
glVertex3f(-1,-1,-1);
glVertex3f( 1, 1,-1);
glVertex3f( 1,-1,-1);
glEnd();
glPopMatrix();
glDisable(GL_BLEND);
glDepthFunc(GL_LEQUAL);
glDepthMask(GL_TRUE);
glEnable(GL_LIGHTING);
glDisable(GL_STENCIL_TEST);
glShadeModel(GL_SMOOTH);
glPopMatrix();
and here is my initialization:
glEnable(GL_DEPTH_TEST); // Enable depth Buffering
glEnable(GL_COLOR_MATERIAL); // Enable color tracking
glEnable(GL_NORMALIZE); // Enables vector normalization (optional)
glClearColor( 0.4f, 0.4f, 0.8f, 1.0f ); // Set initial value of color buffer (Set background color also)
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); // Really Nice Perspective Calculations
glDepthFunc(GL_LEQUAL);
// Lighting initialization
glEnable(GL_TEXTURE_2D);
glShadeModel(GL_SMOOTH);
glEnable(GL_LIGHTING);
glLightfv(GL_LIGHT1, GL_POSITION , light.position); //Set position.
glLightfv(GL_LIGHT1, GL_AMBIENT , light.ambient); //Set ambient light.
glLightfv(GL_LIGHT1, GL_DIFFUSE , light.diffuse); //Set diffuse component.
glLightfv(GL_LIGHT1, GL_SPECULAR , light.specular); //Set specular component.
glEnable(GL_LIGHT1);
glMaterialfv(GL_FRONT, GL_AMBIENT, mat.ambient); // Set Material Ambience
glMaterialfv(GL_FRONT, GL_DIFFUSE, mat.diffuse); // Set Material Diffuse
glMaterialfv(GL_FRONT, GL_SPECULAR, mat.specular); // Set Material Specular
glCullFace(GL_BACK); // Set Culling Face To Back Face
glEnable(GL_CULL_FACE); // Enable Culling
glClearColor(0.1f, 1.0f, 0.5f, 1.0f); // Set Clear Color (Greenish Color)
// Initialize camera
glMatrixMode(GL_PROJECTION); // Switch to projection matrix
glLoadIdentity(); // Clear current matrix to identity matrix
gluPerspective(80, 1 , 0.5 , 1000000); // Set projection of camera (You can modify the arguments if needed.)
glMatrixMode(GL_MODELVIEW);
Make sure the mode you pass to glutInitDisplayMode() includes GLUT_STENCIL, otherwise you may not get a stencil buffer.
Related
I am facing an issue with blank spaces in my shapes (ex. triangle and circle). How should I deal with that problem?
Might it be because of antialiasing missing (if so how should i properly enable it for drawing such shapes)? Or is there a problem in my drawing method itself?
void graphics::draw_line_internal(int x0, int y0, int x1, int y1, rgba color)
{
if (this->initilized == false)
return;
UINT viewportNumber = 1;
D3D11_VIEWPORT vp;
this->device_context->RSGetViewports(&viewportNumber, &vp);
float xx0 = 2.0f * (x0 - 0.5f) / vp.Width - 1.0f;
float yy0 = 1.0f - 2.0f * (y0 - 0.5f) / vp.Height;
float xx1 = 2.0f * (x1 - 0.5f) / vp.Width - 1.0f;
float yy1 = 1.0f - 2.0f * (y1 - 0.5f) / vp.Height;
D3D11_MAPPED_SUBRESOURCE map_data;
this->device_context->Map(this->vertex_buffer.Get(), NULL, D3D11_MAP_WRITE_DISCARD, NULL, &map_data);
COLOR_VERTEX* v = NULL;
D3DXCOLOR cashed_color = color.convert_to_D3DXCOLOR();
v = (COLOR_VERTEX*)map_data.pData;
{
v[0] = { D3DXVECTOR3(xx0, yy0, 0) , cashed_color };
v[1] = { D3DXVECTOR3(xx1, yy1, 0) , cashed_color };
}
this->device_context->Unmap(this->vertex_buffer.Get(), NULL);
const float blend_factor[4] = { 0.f, 0.f, 0.f, 0.f };
this->device_context->OMSetBlendState(this->blend_state.Get(), blend_factor, 0xffffffff);
this->device_context->OMSetDepthStencilState(this->depth_stencil_state.Get(), 0);
this->device_context->IASetInputLayout(this->input_layout.Get());
UINT stride = sizeof(COLOR_VERTEX);
UINT offset = 0;
this->device_context->IASetVertexBuffers(0, 1, this->vertex_buffer.GetAddressOf(), &stride, &offset);
this->device_context->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP);
this->device_context->VSSetShader(this->vertex_shader.Get(), NULL, 0);
this->device_context->PSSetShader(this->pixel_shader.Get(), NULL, 0);
this->device_context->OMSetRenderTargets(1, this->render_target_view.GetAddressOf(), NULL);
this->device_context->Draw(2, 0);
}
Line points from the screenshot are (20, 150) and (70, 20).
The most likely cause of your problem would be that the input data is incorrect. For the correct behaviour it must be guaranteed that xx1, yy1 of one segment is the same as xx0, yy0 of the next segment. However if I understand correctly the first image you provided was produced with one draw call? This seems to rule out this possibility.
In general black spots could be caused by garbage values in the depth stencil, but you are not using one :D . Additionally some of the segments in your images also have an offset to them. So this possibility is ruled out as well.
By the way the whole point of D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP is to draw the entire curve at once. Just generate all points at once (without duplication), put them in a buffer, copy them to the GPU, and call Draw once. No funny business.
At this point I myself am quite lost in this whole situation. The only thing I can recommend is to disable everything but the essentials - shaders, input layout, vertex buffer and a render target - and look for a minimal repro.
I have a virtual scanner that generates a 2.5-D view of a point cloud (i.e. a 2D-projection of a 3D point cloud) depending on camera position. I'm using the vtkCamera.GetProjectionTransformMatrix() to get transformation matrix from world/global to camera coordinates.
However, if the input point cloud has color information for points I would like to preserve it.
Here are the relevant lines:
boost::shared_ptr<pcl::visualization::PCLVisualizer> vis; // camera location, viewpoint and up direction for vis were already defined before
vtkSmartPointer<vtkRendererCollection> rens = vis->getRendererCollection();
vtkSmartPointer<vtkRenderWindow> win = vis->getRenderWindow();
win->SetSize(xres, yres); // xres and yres are predefined resolutions
win->Render();
float dwidth = 2.0f / float(xres),
dheight = 2.0f / float(yres);
float *depth = new float[xres * yres];
win->GetZbufferData(0, 0, xres - 1, yres - 1, &(depth[0]));
vtkRenderer *ren = rens->GetFirstRenderer();
vtkCamera *camera = ren->GetActiveCamera();
vtkSmartPointer<vtkMatrix4x4> projection_transform = camera->GetProjectionTransformMatrix(ren->GetTiledAspectRatio(), 0, 1);
Eigen::Matrix4f mat1;
for (int i = 0; i < 4; ++i)
for (int j = 0; j < 4; ++j)
mat1(i, j) = static_cast<float> (projection_transform->Element[i][j]);
mat1 = mat1.inverse().eval();
Now, mat1 is used to transform coordinates to camera-view:
pcl::PointCloud<pcl::PointXYZ>::Ptr &cloud;
int ptr = 0;
for (int y = 0; y < yres; ++y)
{
for (int x = 0; x < xres; ++x, ++ptr)
{
pcl::PointXYZ &pt = (*cloud)[ptr];
if (depth[ptr] == 1.0)
{
pt.x = pt.y = pt.z = std::numeric_limits<float>::quiet_NaN();
continue;
}
Eigen::Vector4f world_coords(dwidth * float(x) - 1.0f,
dheight * float(y) - 1.0f,
depth[ptr],
1.0f);
world_coords = mat1 * world_coords;
float w3 = 1.0f / world_coords[3];
world_coords[0] *= w3;
world_coords[1] *= w3;
world_coords[2] *= w3;
pt.x = static_cast<float> (world_coords[0]);
pt.y = static_cast<float> (world_coords[1]);
pt.z = static_cast<float> (world_coords[2]);
}
}
I want the virtual scanner to return pcl::PointXYZRGB point cloud with color information.
Any help on how to implement this from someone experienced in VTK would save some of my time.
It's possible that I missed a relevant question already asked here - in that case, please point me to it. Thanks.
If I understand correctly that you want to get the color in which the point was rendered into the win RenderWindow, you should be able to get the data from the rendering buffer by calling
float* pixels = win->GetRGBAPixelData(0, 0, xres - 1, yres - 1, 0/1).
This should give you each pixel of the rendering buffer as an array in the format [R0, G0, B0, A0, R1, G1, B1, A1, R2....]. The last parameter which I wrote as 0/1 is whether the data should be taken from front or back opengl buffers. I presume by default double buffering should be on, so then you want to read from back buffer (use '1'), but I am not sure.
Once you have that, you can get the color in your second loop for all pixels that belong to points (depth[ptr] != 1.0) as:
pt.R = pixels[4*ptr];
pt.G = pixels[4*ptr + 1];
pt.B = pixels[4*ptr + 2];
You should call win->ReleaseRGBAPixelData(pixels) once you're done with it.
(Sorry for my bad English.)
I'm new to Stack Overflow and writing a 3D game application with MS Visual C++ 2015 compiler, Direct3D 9 and HLSL(Shader model 3.0).
I've implemented a deferred rendering logic with 4 render target textures.
I stored depth values of pixels in a render target texture and created a shadow map. Here are the results. (All meshes have black color because the meshes have small size and close to the camera. The far plane distance value is 1000.0f.)
The depth texture and the shadow map.
I rendered a full screen quad with shadow mapping shaders and outputted shadows with red color to confirm the shader is working correctly.
But, It seems that the shaders output wrong results. The shadow map texture output repeats on the mesh surfaces.
https://www.youtube.com/watch?v=1URGgoCR6Zc
Here is the shadow mapping vertex shader to draw the quad.
struct VsInput {
float4 position : POSITION0;
};
struct VsOutput {
float4 position : POSITION0;
float4 cameraViewRay : TEXCOORD0;
};
float4x4 matInverseCameraViewProjection;
float4 cameraWorldPosition;
float farDistance;
VsOutput vs_main(VsInput input) {
VsOutput output = (VsOutput)0;
output.position = input.position;
output.cameraViewRay = mul(float4(input.position.xy, 1.0f, 1.0f) * farDistance, matInverseCameraViewProjection);
output.cameraViewRay /= output.cameraViewRay.w;
output.cameraViewRay.xyz -= cameraWorldPosition.xyz;
return output;
}
And here is the shadow mapping pixel shader to draw the quad.
struct PsInput {
float2 screenPosition : VPOS;
float4 viewRay : TEXCOORD0;
};
struct PsOutput {
float4 color : COLOR0;
};
texture depthMap;
texture shadowMap;
sampler depthMapSampler = sampler_state {
Texture = (depthMap);
AddressU = CLAMP;
AddressV = CLAMP;
MagFilter = POINT;
MinFilter = POINT;
MipFilter = POINT;
};
sampler shadowMapSampler = sampler_state {
Texture = (shadowMap);
AddressU = CLAMP;
AddressV = CLAMP;
MagFilter = POINT;
MinFilter = POINT;
MipFilter = POINT;
};
//float4x4 matCameraView;
float4x4 matLightView;
float4x4 matLightProjection;
float4 cameraWorldPosition;
float4 lightWorldPosition;
float2 halfPixel;
float epsilon;
float farDistance;
PsOutput ps_main(PsInput input) {
PsOutput output = (PsOutput)0;
output.color.a = 1.0f;
//Reconstruct the world position using the view-space linear depth value.
float2 textureUv = input.screenPosition * halfPixel * 2.0f - halfPixel;
float viewDepth = tex2D(depthMapSampler, textureUv).r;
float3 eye = input.viewRay.xyz * viewDepth;
float4 worldPosition = float4((eye + cameraWorldPosition.xyz), 1.0f);
//Test if the reconstructed world position has right coordinate values.
//output.color = mul(worldPosition, matCameraView).z / farDistance;
float4 positionInLightView = mul(worldPosition, matLightView);
float lightDepth = positionInLightView.z / farDistance;
float4 positionInLightProjection = mul(positionInLightView, matLightProjection);
positionInLightProjection /= positionInLightProjection.w;
//If-statement doesn't work???
float condition = positionInLightProjection.x >= -1.0f;
condition *= positionInLightProjection.x <= 1.0f;
condition *= positionInLightProjection.y >= -1.0f;
condition *= positionInLightProjection.y <= 1.0f;
condition *= positionInLightProjection.z >= 0.0f;
condition *= positionInLightProjection.z <= 1.0f;
condition *= positionInLightProjection.w > 0.0f;
float2 shadowMapUv = float2(
positionInLightProjection.x * 0.5f + 0.5f,
-positionInLightProjection.y * 0.5f + 0.5f
);
//If-statement doesn't work???
float condition2 = shadowMapUv.x >= 0.0f;
condition2 *= shadowMapUv.x <= 1.0f;
condition2 *= shadowMapUv.y >= 0.0f;
condition2 *= shadowMapUv.y <= 1.0f;
float viewDepthInShadowMap = tex2D(
shadowMapSampler,
shadowMapUv
).r;
output.color.r = lightDepth > viewDepthInShadowMap + epsilon;
output.color.r *= condition;
output.color.r *= condition2;
return output;
}
It seems that the uv for the shadow map has some wrong values, but i can't figure out what's the real problem.
Many thanks for any help.
EDIT : I've updated the shader codes. I decided to use view-space linear depth and confirmed that the world position has right value. I really don't understand why the shadow map coordinate values have wrong values...
It really looks like you are using a wrong bias. Google up "Shadow Acne" and you should find your answer to your problem. Also the resolution of the shadowmap could be a problem.
I found the solution.
The first problem was that the render target texture had wrong texture format. I should have used D3DFMT_R32F. (I had used D3DFMT_A8R8G8B8.)
And i added these lines in my shadow mapping pixel shader.
//Reconstruct the world position using the view-space linear depth value.
float2 textureUv = input.screenPosition * halfPixel * 2.0f - halfPixel;
float4 viewPosition = float4(input.viewRay.xyz * tex2D(depthMapSampler, textureUv).r, 1.0f);
float4 worldPosition = mul(viewPosition, matInverseCameraView);
...
//If-statement doesn't work???
float condition = positionInLightProjection.x >= -1.0f;
condition *= positionInLightProjection.x <= 1.0f;
condition *= positionInLightProjection.y >= -1.0f;
condition *= positionInLightProjection.y <= 1.0f;
condition *= positionInLightProjection.z >= 0.0f;
condition *= positionInLightProjection.z <= 1.0f;
condition *= viewPosition.z < farDistance;
The last line was the key and solved my second problem. The 'farDistance' is the far plane distance of the camera frustum. I'm still trying to understand why that is needed.
You can use saturate to clamp the positionInLightProjection and compare it against the unsaturated variable. This way you can verify that positionInLightProjection is within 0..1.
if ((saturate(positionInLightProjection.x) == positionInLightProjection.x) && (saturate(positionInLightProjection.y) == positionInLightProjection.y)) {
// we are in the view of light
// todo: compare depth values from shadow map and current scene depth
} else {
// this is shadow for sure!
}
Lately I implemented the FXAA algorithm into my OpenGL application. I haven't understand this algorithm completely by now but I know that it uses contrast data of the final image to selectively apply blurring. As a post processing effect that makes sense. B since I use deferred shading in my application I already have a depth texture of the scene. Using that it might be much easier and more precise to find edges for applying blur there.
So is there a known antialiasing algorithm using the depth texture instead of the final image to find the edges? By fakes I mean an antialiasing algorithm based on a pixel basis instead of a vertex basis.
After some research I found out that my idea is widely used already in deferred renderers. I decided to post this answer because I came up with my own implementation which I want to share with the community.
Based on the gradient changes of the depth and the angle changes of the normals, there is blurring applied to the pixel.
// GLSL fragment shader
#version 330
in vec2 coord;
out vec4 image;
uniform sampler2D image_tex;
uniform sampler2D position_tex;
uniform sampler2D normal_tex;
uniform vec2 frameBufSize;
void depth(out float value, in vec2 offset)
{
value = texture2D(position_tex, coord + offset / frameBufSize).z / 1000.0f;
}
void normal(out vec3 value, in vec2 offset)
{
value = texture2D(normal_tex, coord + offset / frameBufSize).xyz;
}
void main()
{
// depth
float dc, dn, ds, de, dw;
depth(dc, vec2( 0, 0));
depth(dn, vec2( 0, +1));
depth(ds, vec2( 0, -1));
depth(de, vec2(+1, 0));
depth(dw, vec2(-1, 0));
float dvertical = abs(dc - ((dn + ds) / 2));
float dhorizontal = abs(dc - ((de + dw) / 2));
float damount = 1000 * (dvertical + dhorizontal);
// normals
vec3 nc, nn, ns, ne, nw;
normal(nc, vec2( 0, 0));
normal(nn, vec2( 0, +1));
normal(ns, vec2( 0, -1));
normal(ne, vec2(+1, 0));
normal(nw, vec2(-1, 0));
float nvertical = dot(vec3(1), abs(nc - ((nn + ns) / 2.0)));
float nhorizontal = dot(vec3(1), abs(nc - ((ne + nw) / 2.0)));
float namount = 50 * (nvertical + nhorizontal);
// blur
const int radius = 1;
vec3 blur = vec3(0);
int n = 0;
for(float u = -radius; u <= +radius; ++u)
for(float v = -radius; v <= +radius; ++v)
{
blur += texture2D(image_tex, coord + vec2(u, v) / frameBufSize).rgb;
n++;
}
blur /= n;
// result
float amount = mix(damount, namount, 0.5);
vec3 color = texture2D(image_tex, coord).rgb;
image = vec4(mix(color, blur, min(amount, 0.75)), 1.0);
}
For comparison, this is the scene without any anti-aliasing.
This is the result with anti-aliasing applied.
You may need to view the images at their full resolution to judge the effect. In my view the result is adequate for the simple implementation. The best thing is that there are nearly no jagged artifacts when the camera moves.
XNA doesn't have any methods which support circle drawing.
Normally when I had to draw circle, always with the same color, I just made image with that circle and then I could display it as a sprite.
But now the color of the circle is specified during runtime, any ideas how to deal with that?
You can simply make an image of a circle with a Transparent background and the coloured part of the circle as White. Then, when it comes to drawing the circles in the Draw() method, select the tint as what you want it to be:
Texture2D circle = CreateCircle(100);
// Change Color.Red to the colour you want
spriteBatch.Draw(circle, new Vector2(30, 30), Color.Red);
Just for fun, here is the CreateCircle method:
public Texture2D CreateCircle(int radius)
{
int outerRadius = radius*2 + 2; // So circle doesn't go out of bounds
Texture2D texture = new Texture2D(GraphicsDevice, outerRadius, outerRadius);
Color[] data = new Color[outerRadius * outerRadius];
// Colour the entire texture transparent first.
for (int i = 0; i < data.Length; i++)
data[i] = Color.TransparentWhite;
// Work out the minimum step necessary using trigonometry + sine approximation.
double angleStep = 1f/radius;
for (double angle = 0; angle < Math.PI*2; angle += angleStep)
{
// Use the parametric definition of a circle: http://en.wikipedia.org/wiki/Circle#Cartesian_coordinates
int x = (int)Math.Round(radius + radius * Math.Cos(angle));
int y = (int)Math.Round(radius + radius * Math.Sin(angle));
data[y * outerRadius + x + 1] = Color.White;
}
texture.SetData(data);
return texture;
}