For an experiment I want to pass all edges of a triangle to the pixel shader and manually calculate the pixel position with the triangle edges and bayrcentric coodinates.
In order to do this I wrote a geometry shader that passes all edges of my triangle to the pixel shader:
struct GeoIn
{
float4 projPos : SV_POSITION;
float3 position : POSITION;
};
struct GeoOut
{
float4 projPos : SV_POSITION;
float3 position : POSITION;
float3 p[3] : TRIPOS;
};
[maxvertexcount(3)]
void main(triangle GeoIn i[3], inout TriangleStream<GeoOut> OutputStream)
{
GeoOut o;
// add triangle data
for(uint idx = 0; idx < 3; ++idx)
{
o.p[idx] = i[idx].position;
}
// generate verices
for(idx = 0; idx < 3; ++idx)
{
o.projPos = i[idx].projPos;
o.position = i[idx].position;
OutputStream.Append(o);
}
OutputStream.RestartStrip();
}
The pixel shader outputs the manually reconstructed position:
struct PixelIn
{
float4 projPos : SV_POSITION;
float3 position : POSITION;
float3 p[3] : TRIPOS;
float3 bayr : SV_Barycentrics;
};
float4 main(PixelIn i) : SV_TARGET
{
float3 pos = i.bayr.x * i.p[0] + i.bayr.y * i.p[1] + i.bayr.z * i.p[2];
return float4(abs(pos), 1.0);
}
And I get the following (expected) result:
However, when I modify my PixelIn struct by adding nointerpolation to p[3]:
struct PixelIn
{
...
nointerpolation float3 p[3] : TRIPOS;
};
I get:
I did not expect a different result because I am not changing the values of p[] for a single triangle in the geometry shader. I tried debugging it by changing the output to float4(abs(i.p[0]), 1.0); with and without interpolation. Without nointerpolation the values of p[] do not vary within a triangle (which makes sense, because all should have the same value). With nointerpolation the values of p[] do change slightly. Why is that the case? I thought nointerpolate was not supposed to interpolate anything.
Edit:
This is the wireframe of my geometry:
Related
I have the .dds cube map texture loaded it in and put it on the sphere model. Obviously it doesn't look right, because I would have to map the texture to the right points on the sphere.
how can I map the texture to the right points on the sphere?
somethings like that:
TextureCube TEXTURE_REFLECTION;
//...
VS_OUTPUT vs(VS_INPUT IN) {
//...
float4 worldPosition = mul(float4(IN.Position.xyz, 1.0f), IN.World);
OUT.worldpos = worldPosition.xyz;
//...
}
PS_OUTPUT ps(VS_OUTPUT IN) {
//...
float4 ColorTex = TEXTURE_DIFFUSE.Sample(SAMPLER_DEFAULT, IN.TexCoord);
float3 normalFromMap = mul(2.0f * TEXTURE_NORMAL.Sample(SAMPLER_DEFAULT, IN.TexCoord).xyz - 1.0f), IN.tangentToWorld);
//...
float3 incident = -normalize(CAMERA_POSITION - IN.worldpos);
float3 reflectionVector = reflect(incident, normalFromMap);
ColorTex.rgb = lerp(ColorTex.rgb, TEXTURE_REFLECTION.Sample(SAMPLER_DEFAULT, reflectionVector).rgb, MATERIAL_REFLECTIVITY);
}
I figured it out.
Pixel shader:
TextureCube CubeMap: register(t0);
SamplerState TexSampler : register(s0);
float4 main(LightingPixelShaderInput input) : SV_Target
{
float4 cubeTexture = CubeMap.Sample(TexSampler, input.worldNormal);
//light calculations
float3 finalColour = (gAmbientColour + diffuseLights) * cubeTexture.rgb +
(specularLights) * cubeTexture.a;
return float4(finalColour, 1.0f);
}
I am developing a small program that load 3d models using assimp, but it does not render the model. At first I thought that vertices and indices were not loaded correctly but this is not the case ( I printed on a txt file vertices and indices). I think that the probem might be with the position of the model and camera. The application does not return any error, it runs properly.
Vertex Struct:
struct Vertex {
XMFLOAT3 position;
XMFLOAT2 texture;
XMFLOAT3 normal;
};
Input layout:
D3D12_INPUT_ELEMENT_DESC inputLayout[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 },
{ "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 12, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 },
{ "NORMAL", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, D3D12_APPEND_ALIGNED_ELEMENT, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 }
};
Vertices, texcoords, normals and indices loader:
model = new ModelMesh();
std::vector<XMFLOAT3> positions;
std::vector<XMFLOAT3> normals;
std::vector<XMFLOAT2> texCoords;
std::vector<unsigned int> indices;
model->LoadMesh("beast.x", positions, normals,
texCoords, indices);
// Create vertex buffer
if (positions.size() == 0)
{
MessageBox(0, L"Vertices vector is empty.",
L"Error", MB_OK);
}
Vertex* vList = new Vertex[positions.size()];
for (size_t i = 0; i < positions.size(); i++)
{
Vertex vert;
XMFLOAT3 pos = positions[i];
vert.position = XMFLOAT3(pos.x, pos.y, pos.z);
XMFLOAT3 norm = normals[i];
vert.normal = XMFLOAT3(norm.x, norm.y, norm.z);
XMFLOAT2 tex = texCoords[i];
vert.texture = XMFLOAT2(tex.x, tex.y);
vList[i] = vert;
}
int vBufferSize = sizeof(vList);
Build of the camera and views:
XMMATRIX tmpMat = XMMatrixPerspectiveFovLH(45.0f*(3.14f/180.0f), (float)Width / (float)Height, 0.1f, 1000.0f);
XMStoreFloat4x4(&cameraProjMat, tmpMat);
// set starting camera state
cameraPosition = XMFLOAT4(0.0f, 2.0f, -4.0f, 0.0f);
cameraTarget = XMFLOAT4(0.0f, 0.0f, 0.0f, 0.0f);
cameraUp = XMFLOAT4(0.0f, 1.0f, 0.0f, 0.0f);
// build view matrix
XMVECTOR cPos = XMLoadFloat4(&cameraPosition);
XMVECTOR cTarg = XMLoadFloat4(&cameraTarget);
XMVECTOR cUp = XMLoadFloat4(&cameraUp);
tmpMat = XMMatrixLookAtLH(cPos, cTarg, cUp);
XMStoreFloat4x4(&cameraViewMat, tmpMat);
cube1Position = XMFLOAT4(0.0f, 0.0f, 0.0f, 0.0f);
XMVECTOR posVec = XMLoadFloat4(&cube1Position);
tmpMat = XMMatrixTranslationFromVector(posVec);
XMStoreFloat4x4(&cube1RotMat, XMMatrixIdentity());
XMStoreFloat4x4(&cube1WorldMat, tmpMat);
Update function :
XMStoreFloat4x4(&cube1WorldMat, worldMat);
XMMATRIX viewMat = XMLoadFloat4x4(&cameraViewMat); // load view matrix
XMMATRIX projMat = XMLoadFloat4x4(&cameraProjMat); // load projection matrix
XMMATRIX wvpMat = XMLoadFloat4x4(&cube1WorldMat) * viewMat * projMat; // create wvp matrix
XMMATRIX transposed = XMMatrixTranspose(wvpMat); // must transpose wvp matrix for the gpu
XMStoreFloat4x4(&cbPerObject.wvpMat, transposed); // store transposed wvp matrix in constant buffer
memcpy(cbvGPUAddress[frameIndex], &cbPerObject, sizeof(cbPerObject));
VERTEX SHADER:
struct VS_INPUT
{
float4 pos : POSITION;
float2 tex: TEXCOORD;
float3 normal : NORMAL;
};
struct VS_OUTPUT
{
float4 pos: SV_POSITION;
float2 tex: TEXCOORD;
float3 normal: NORMAL;
};
cbuffer ConstantBuffer : register(b0)
{
float4x4 wvpMat;
};
VS_OUTPUT main(VS_INPUT input)
{
VS_OUTPUT output;
output.pos = mul(input.pos, wvpMat);
return output;
}
Hope it is a long code to read but I don't understand what is going wrong with this code. Hope somebody can help me.
A few things to try/check:
Make your background clear color grey. That way, if you are drawing black triangles you will see them.
Turn backface culling off in the rendering state, in case your triangles are back to front.
Turn depth test off in the rendering state.
Turn off alpha blending.
You don't show your pixel shader, but try writing a constant color to see if your lighting calculation is broken.
Use NVIDIA's nSight tool, or the Visual Studio Graphics debugger to see what your graphics pipeline is doing.
Those are usually the things I try first...
I'm having massive difficulties with setting up a perspective projection with direct x.
I've been stuck on this for weeks, any help would be appreciated.
As far as I can my pipeline set up is fine, in as far as the shader is getting exactly the data I am packing it, so I'll forgo including that code, unless I'm asked for it.
I am using glm for maths (therefore column matrices) on the host side. It's set to use a left handed coordinate system. However I couldn't seem to get its projection matrix to work,
(I get a blank screen if I use the matrix I get from it) so I wrote this - which I hope only to be temporary.
namespace
{
glm::mat4
perspective()
{
DirectX::XMMATRIX dx = DirectX::XMMatrixPerspectiveFovLH(glm::radians(60.0f), 1, 0.01, 1000.0f);
glm::mat4 glfromdx;
for (int c = 0; c < 3; c++)
{
for (int r = 0; r < 3; r++)
{
glfromdx[c][r] = DirectX::XMVectorGetByIndex(dx.r[c], r);
}
}
return glfromdx;
}
}
this does hand me a projection matrix that works - but I don't really think that it is working. I'm not certain whether
its an actual projection, and my clipspace is miniscule. If I move my camera around at all it clips. Photos included.
photo 1: (straight on)
photo 2: (cam tilted up)
photo 3: (cam tilted left)
Here is my vertex shader
https://gist.github.com/anonymous/31726881a5476e6c8513573909bf4dc6
cbuffer offsets
{
float4x4 model; //<------currently just identity
float4x4 view;
float4x4 proj;
}
struct idata
{
float3 position : POSITION;
float4 colour : COLOR;
};
struct odata
{
float4 position : SV_POSITION;
float4 colour : COLOR;
};
void main(in idata dat, out odata odat)
{
odat.colour = dat.colour;
odat.position = float4(dat.position, 1.0f);
//point' = point * model * view * projection
float4x4 mvp = mul(model, mul(view, proj));
//column matrices.
odat.position = mul(mvp, odat.position);
}
And here is my fragment shader
https://gist.github.com/anonymous/342a707e603b9034deb65eb2c2101e91
struct idata
{
float4 position : SV_POSITION;
float4 colour : COLOR;
};
float4 main(in idata data) : SV_TARGET
{
return float4(data.colour[0], data.colour[1], data.colour[2] , 1.0f);
}
also full camera implementation is here
https://gist.github.com/anonymous/b15de44f08852cbf4fa4d6b9fafa0d43
https://gist.github.com/anonymous/8702429212c411ddb04f5163b1d885b9
Finally these are the vertices being passed in
position 3 colour 4
(0.0f, 0.5f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f),
(0.45f, -0.5f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f),
(-0.45f, -0.5f, 0.0f, 0.0f, 0.0f, 1.0f, 1.0f)
Edit:
I changed the P*M*V*P calculation in the shader to this;
odat.position = mul(mul(mul(proj, view), model), odat.position);
still having the same issue.
Edit 2:
Hmm, the above changes mixed with a change of what generates the perspective, now just purely using glm.
glm::mat4 GLtoDX_NDC(glm::translate(glm::vec3(0.0, 0.0, 0.5)) *
glm::scale(glm::vec3(1.0, 1.0, 0.5)));
glm::mat4 gl = GLtoDX_NDC * glm::perspectiveFovLH(60.0f, 500.0f, 500.0f, 100.0f, 0.1f);
This works, in as far as object no longer cull at about a micrometre, and are projected to their approximate sizes. However, now everything is upside down and rotating causes a shear...
I am trying to perform diffuse reflection in hlsl. Currently I am working on vertex shader. Unfortunatelly I get following error, when trying to compile with fxc.exe:
C:\Users\BBaczek\Projects\MyApp\VertexShader.vs.hlsl(2,10-25)
: error X4500: overlapping register semantics not yet implemented 'c1'
C:\Users\BBaczek\Projects\MyApp\VertexShader.vs.hlsl(2,10-25)
: error X4500: overlapping register semantics not yet implemented 'c2'
C:\Users\BBaczek\Projects\MyApp\VertexShader.vs.hlsl(2,10-25)
: error X4500: overlapping register semantics not yet implemented 'c3'
Vertex shader code:
float4x4 WorldViewProj : register(c0);
float4x4 inv_world_matrix : register(c1);
float4 LightAmbient;
float4 LightPosition;
struct VertexData
{
float4 Position : POSITION;
float4 Normal : NORMAL;
float3 UV : TEXCOORD;
};
struct VertexShaderOutput
{
float4 Position : POSITION;
float3 Color: COLOR;
};
VertexShaderOutput main(VertexData vertex)
{
VertexShaderOutput output;
vertex.Normal = normalize(vertex.Normal);
float4 newColor = LightAmbient;
vector obj_light = mul(LightPosition, inv_world_matrix);
vector LightDir = normalize(obj_light - vertex.Position);
float DiffuseAttn = max(0, dot(vertex.Normal, LightDir));
vector light = { 0.8, 0.8, 0.8, 1 };
newColor += light * DiffuseAttn;
output.Position = mul(vertex.Position, WorldViewProj);
output.Color = float3(newColor.r, newColor.g, newColor.b);
return output;
}
And command I use to perform compilation:
fxc /T vs_2_0 /O3 /Zpr /Fo VertexShader.vs VertexShader.vs.hlsl
Why am I getting this error? What can I do to prevent this?
Found it out - I am not deleting this question, because someone might find it useful.
What you need to do is changing
float4x4 WorldViewProj : register(c0);
float4x4 inv_world_matrix : register(c1);
to
float4x4 WorldViewProj : register(c0);
float4x4 inv_world_matrix : register(c4);
I am not sure what is that ok, but I assume that float4x4 is going to take more space in that buffer (4x4 - so it takes 4 places). I think that explanation is a bit silly but it works.
I have a simple function that creates a square that covers the entire screen, I use it for applying post-processing effects, however as far as I can tell it has been the cause of countless errors.
When I run my code in PIX I get the following mesh, but the square should be straight and covering the screen, shouldn't it?
My vertex shader does no transformation and simply passes position information to the pixel shader.
The function that creates the square is as follows:
private void InitializeGeometry()
{
meshes = new Dictionary<Vector3, Mesh>();
//build array of vertices for one square
ppVertex[] vertexes = new ppVertex[4];
//vertexes[0].Position = new Vector3(-1f, -1f, 0.25f);
vertexes[0].Position = new Vector3(-1, -1, 1f);
vertexes[1].Position = new Vector3(-1, 1, 1f);
vertexes[2].Position = new Vector3(1, -1, 1f);
vertexes[3].Position = new Vector3(1, 1, 1f);
vertexes[0].TexCoords = new Vector2(0, 0);
vertexes[1].TexCoords = new Vector2(0, 1);
vertexes[2].TexCoords = new Vector2(1, 0);
vertexes[3].TexCoords = new Vector2(1, 1);
//build index array for the vertices to build a quad from two triangles
short[] indexes = { 0, 1, 2, 1, 3, 2 };
//create the data stream to push the vertex data into the buffer
DataStream vertices = new DataStream(Marshal.SizeOf(typeof(Vertex)) * 4, true, true);
//load the data stream
vertices.WriteRange(vertexes);
//reset the data position
vertices.Position = 0;
//create the data stream to push the index data into the buffer
DataStream indices = new DataStream(sizeof(short) * 6, true, true);
//load the data stream
indices.WriteRange(indexes);
//reset the data position
indices.Position = 0;
//create the mesh object
Mesh mesh = new Mesh();
//create the description of the vertex buffer
D3D.BufferDescription vbd = new BufferDescription();
vbd.BindFlags = D3D.BindFlags.VertexBuffer;
vbd.CpuAccessFlags = D3D.CpuAccessFlags.None;
vbd.OptionFlags = ResourceOptionFlags.None;
vbd.SizeInBytes = Marshal.SizeOf(typeof(Vertex)) * 4;
vbd.Usage = ResourceUsage.Default;
//create and assign the vertex buffer to the mesh, filling it with data
mesh.VertexBuffer = new D3D.Buffer(device, vertices, vbd);
//create the description of the index buffer
D3D.BufferDescription ibd = new BufferDescription();
ibd.BindFlags = D3D.BindFlags.IndexBuffer;
ibd.CpuAccessFlags = D3D.CpuAccessFlags.None;
ibd.OptionFlags = ResourceOptionFlags.None;
ibd.SizeInBytes = sizeof(short) * 6;
ibd.Usage = ResourceUsage.Default;
//create and assign the index buffer to the mesh, filling it with data
mesh.IndexBuffer = new D3D.Buffer(device, indices, ibd);
//get vertex and index counts
mesh.vertices = vertexes.GetLength(0);
mesh.indices = indexes.Length;
//close the data streams
indices.Close();
vertices.Close();
meshes.Add(new Vector3(0), mesh);
}
and when I render the square:
private void DrawScene()
{
lock (meshes)
{
foreach (Mesh mesh in meshes.Values)
{
if (mesh.indices > 0)
{
try
{
//if (camera.SphereInFrustum(mesh.BoundingSphere, sphereRadius))
//{
context.InputAssembler.SetVertexBuffers(0, new VertexBufferBinding(mesh.VertexBuffer, Marshal.SizeOf(typeof(Vertex)), 0));
context.InputAssembler.SetIndexBuffer(mesh.IndexBuffer, Format.R16_UInt, 0);
context.DrawIndexed(mesh.indices, 0, 0);
//}
}
catch (Exception err)
{
MessageBox.Show(err.Message);
}
}
}
}
}
EDIT: I've added the vertex shader being run
cbuffer EveryFrame : register(cb0)
{
float3 diffuseColor : packoffset(c0);
float3 lightdir : packoffset(c1);
};
cbuffer EveryMotion : register(cb1)
{
float4x4 WorldViewProjection : packoffset(c0);
float4x4 LightWorldViewProjection : packoffset(c4);
};
struct VS_IN
{
float3 position : POSITION;
float3 normal : NORMAL;
float4 col : TEXCOORD;
};
struct PS_IN
{
float4 position : SV_POSITION;
float4 col : TEXCOORD;
float3 normal : NORMAL;
};
PS_IN VS(VS_IN input)
{
PS_IN output;
output.position = float4(input.position,1);
output.col = input.col;
output.normal = input.normal;
return output;
}
Here's PIX's vertex output.
PreVS:
PostVS:
And here's the dissassembly PIX generated when I chose to debug vertex 0
//
// Generated by Microsoft (R) HLSL Shader Compiler 9.29.952.3111
//
//
//
// Input signature:
//
// Name Index Mask Register SysValue Format Used
// ---------------- ----- ------ -------- -------- ------ ------
// POSITION 0 xyz 0 NONE float xyz
// NORMAL 0 xyz 1 NONE float xyz
// TEXCOORD 0 xyzw 2 NONE float
//
//
// Output signature:
//
// Name Index Mask Register SysValue Format Used
// ---------------- ----- ------ -------- -------- ------ ------
// SV_POSITION 0 xyzw 0 POS float xyzw
// TEXCOORD 0 xyzw 1 NONE float xyzw
// NORMAL 0 xyz 2 NONE float xyz
//
vs_4_0
dcl_input v0.xyz
dcl_input v1.xyz
dcl_output_siv o0.xyzw , position
dcl_output o1.xyzw
dcl_output o2.xyz
mov o0.xyz, v0.xyzx
mov o0.w, l(1.000000)
mov o1.xyzw, l(1.000000, 1.000000, 1.000000, 1.000000)
mov o2.xyz, v1.xyzx
ret
// Approximately 5 instruction slots used
I've also added the input assembler:
private void SetPPInputAssembler(Shader shader)
{
InputElement[] elements = new[] {
new InputElement("POSITION",0,Format.R32G32B32_Float,0),
new InputElement("NORMAL",0,Format.R32G32B32_Float,12,0),
new InputElement("TEXCOORD",0,Format.R32G32_Float,24,0),
};
InputLayout layout = new InputLayout(device, shader.InputSignature, elements);
context.InputAssembler.InputLayout = layout;
context.InputAssembler.PrimitiveTopology = PrimitiveTopology.TriangleList;
}
Obviously your vertex input positions don't match the values you want to give in.
For the first vertex the values look good until the z-coordinate of the texture coordinates.
You are defining a Vector2D in your program Vertex-struct, but a Vector4D in the Vertexshader Vertex-struct and things get mixed up.
just change VS_IN to this:
struct VS_IN
{
float3 position : POSITION;
float3 normal : NORMAL;
float2 col : TEXCOORD; // float2 instead of float4
};
I'm not sure though if you really want to have colors or rather texcoords. If you really want to have colors float4 would be right, but then you had to change
vertexes[0].TexCoords = new Vector2(0, 0);
into
vertexes[0].TexCoords = new Vector4(0, 0, 0, 0);
Either way, one of those variables is misnamed and probably the reason for the confusion.