I have the .dds cube map texture loaded it in and put it on the sphere model. Obviously it doesn't look right, because I would have to map the texture to the right points on the sphere.
how can I map the texture to the right points on the sphere?
somethings like that:
TextureCube TEXTURE_REFLECTION;
//...
VS_OUTPUT vs(VS_INPUT IN) {
//...
float4 worldPosition = mul(float4(IN.Position.xyz, 1.0f), IN.World);
OUT.worldpos = worldPosition.xyz;
//...
}
PS_OUTPUT ps(VS_OUTPUT IN) {
//...
float4 ColorTex = TEXTURE_DIFFUSE.Sample(SAMPLER_DEFAULT, IN.TexCoord);
float3 normalFromMap = mul(2.0f * TEXTURE_NORMAL.Sample(SAMPLER_DEFAULT, IN.TexCoord).xyz - 1.0f), IN.tangentToWorld);
//...
float3 incident = -normalize(CAMERA_POSITION - IN.worldpos);
float3 reflectionVector = reflect(incident, normalFromMap);
ColorTex.rgb = lerp(ColorTex.rgb, TEXTURE_REFLECTION.Sample(SAMPLER_DEFAULT, reflectionVector).rgb, MATERIAL_REFLECTIVITY);
}
I figured it out.
Pixel shader:
TextureCube CubeMap: register(t0);
SamplerState TexSampler : register(s0);
float4 main(LightingPixelShaderInput input) : SV_Target
{
float4 cubeTexture = CubeMap.Sample(TexSampler, input.worldNormal);
//light calculations
float3 finalColour = (gAmbientColour + diffuseLights) * cubeTexture.rgb +
(specularLights) * cubeTexture.a;
return float4(finalColour, 1.0f);
}
Related
For an experiment I want to pass all edges of a triangle to the pixel shader and manually calculate the pixel position with the triangle edges and bayrcentric coodinates.
In order to do this I wrote a geometry shader that passes all edges of my triangle to the pixel shader:
struct GeoIn
{
float4 projPos : SV_POSITION;
float3 position : POSITION;
};
struct GeoOut
{
float4 projPos : SV_POSITION;
float3 position : POSITION;
float3 p[3] : TRIPOS;
};
[maxvertexcount(3)]
void main(triangle GeoIn i[3], inout TriangleStream<GeoOut> OutputStream)
{
GeoOut o;
// add triangle data
for(uint idx = 0; idx < 3; ++idx)
{
o.p[idx] = i[idx].position;
}
// generate verices
for(idx = 0; idx < 3; ++idx)
{
o.projPos = i[idx].projPos;
o.position = i[idx].position;
OutputStream.Append(o);
}
OutputStream.RestartStrip();
}
The pixel shader outputs the manually reconstructed position:
struct PixelIn
{
float4 projPos : SV_POSITION;
float3 position : POSITION;
float3 p[3] : TRIPOS;
float3 bayr : SV_Barycentrics;
};
float4 main(PixelIn i) : SV_TARGET
{
float3 pos = i.bayr.x * i.p[0] + i.bayr.y * i.p[1] + i.bayr.z * i.p[2];
return float4(abs(pos), 1.0);
}
And I get the following (expected) result:
However, when I modify my PixelIn struct by adding nointerpolation to p[3]:
struct PixelIn
{
...
nointerpolation float3 p[3] : TRIPOS;
};
I get:
I did not expect a different result because I am not changing the values of p[] for a single triangle in the geometry shader. I tried debugging it by changing the output to float4(abs(i.p[0]), 1.0); with and without interpolation. Without nointerpolation the values of p[] do not vary within a triangle (which makes sense, because all should have the same value). With nointerpolation the values of p[] do change slightly. Why is that the case? I thought nointerpolate was not supposed to interpolate anything.
Edit:
This is the wireframe of my geometry:
I am developing a small program that load 3d models using assimp, but it does not render the model. At first I thought that vertices and indices were not loaded correctly but this is not the case ( I printed on a txt file vertices and indices). I think that the probem might be with the position of the model and camera. The application does not return any error, it runs properly.
Vertex Struct:
struct Vertex {
XMFLOAT3 position;
XMFLOAT2 texture;
XMFLOAT3 normal;
};
Input layout:
D3D12_INPUT_ELEMENT_DESC inputLayout[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 },
{ "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 12, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 },
{ "NORMAL", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, D3D12_APPEND_ALIGNED_ELEMENT, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 }
};
Vertices, texcoords, normals and indices loader:
model = new ModelMesh();
std::vector<XMFLOAT3> positions;
std::vector<XMFLOAT3> normals;
std::vector<XMFLOAT2> texCoords;
std::vector<unsigned int> indices;
model->LoadMesh("beast.x", positions, normals,
texCoords, indices);
// Create vertex buffer
if (positions.size() == 0)
{
MessageBox(0, L"Vertices vector is empty.",
L"Error", MB_OK);
}
Vertex* vList = new Vertex[positions.size()];
for (size_t i = 0; i < positions.size(); i++)
{
Vertex vert;
XMFLOAT3 pos = positions[i];
vert.position = XMFLOAT3(pos.x, pos.y, pos.z);
XMFLOAT3 norm = normals[i];
vert.normal = XMFLOAT3(norm.x, norm.y, norm.z);
XMFLOAT2 tex = texCoords[i];
vert.texture = XMFLOAT2(tex.x, tex.y);
vList[i] = vert;
}
int vBufferSize = sizeof(vList);
Build of the camera and views:
XMMATRIX tmpMat = XMMatrixPerspectiveFovLH(45.0f*(3.14f/180.0f), (float)Width / (float)Height, 0.1f, 1000.0f);
XMStoreFloat4x4(&cameraProjMat, tmpMat);
// set starting camera state
cameraPosition = XMFLOAT4(0.0f, 2.0f, -4.0f, 0.0f);
cameraTarget = XMFLOAT4(0.0f, 0.0f, 0.0f, 0.0f);
cameraUp = XMFLOAT4(0.0f, 1.0f, 0.0f, 0.0f);
// build view matrix
XMVECTOR cPos = XMLoadFloat4(&cameraPosition);
XMVECTOR cTarg = XMLoadFloat4(&cameraTarget);
XMVECTOR cUp = XMLoadFloat4(&cameraUp);
tmpMat = XMMatrixLookAtLH(cPos, cTarg, cUp);
XMStoreFloat4x4(&cameraViewMat, tmpMat);
cube1Position = XMFLOAT4(0.0f, 0.0f, 0.0f, 0.0f);
XMVECTOR posVec = XMLoadFloat4(&cube1Position);
tmpMat = XMMatrixTranslationFromVector(posVec);
XMStoreFloat4x4(&cube1RotMat, XMMatrixIdentity());
XMStoreFloat4x4(&cube1WorldMat, tmpMat);
Update function :
XMStoreFloat4x4(&cube1WorldMat, worldMat);
XMMATRIX viewMat = XMLoadFloat4x4(&cameraViewMat); // load view matrix
XMMATRIX projMat = XMLoadFloat4x4(&cameraProjMat); // load projection matrix
XMMATRIX wvpMat = XMLoadFloat4x4(&cube1WorldMat) * viewMat * projMat; // create wvp matrix
XMMATRIX transposed = XMMatrixTranspose(wvpMat); // must transpose wvp matrix for the gpu
XMStoreFloat4x4(&cbPerObject.wvpMat, transposed); // store transposed wvp matrix in constant buffer
memcpy(cbvGPUAddress[frameIndex], &cbPerObject, sizeof(cbPerObject));
VERTEX SHADER:
struct VS_INPUT
{
float4 pos : POSITION;
float2 tex: TEXCOORD;
float3 normal : NORMAL;
};
struct VS_OUTPUT
{
float4 pos: SV_POSITION;
float2 tex: TEXCOORD;
float3 normal: NORMAL;
};
cbuffer ConstantBuffer : register(b0)
{
float4x4 wvpMat;
};
VS_OUTPUT main(VS_INPUT input)
{
VS_OUTPUT output;
output.pos = mul(input.pos, wvpMat);
return output;
}
Hope it is a long code to read but I don't understand what is going wrong with this code. Hope somebody can help me.
A few things to try/check:
Make your background clear color grey. That way, if you are drawing black triangles you will see them.
Turn backface culling off in the rendering state, in case your triangles are back to front.
Turn depth test off in the rendering state.
Turn off alpha blending.
You don't show your pixel shader, but try writing a constant color to see if your lighting calculation is broken.
Use NVIDIA's nSight tool, or the Visual Studio Graphics debugger to see what your graphics pipeline is doing.
Those are usually the things I try first...
I'm having massive difficulties with setting up a perspective projection with direct x.
I've been stuck on this for weeks, any help would be appreciated.
As far as I can my pipeline set up is fine, in as far as the shader is getting exactly the data I am packing it, so I'll forgo including that code, unless I'm asked for it.
I am using glm for maths (therefore column matrices) on the host side. It's set to use a left handed coordinate system. However I couldn't seem to get its projection matrix to work,
(I get a blank screen if I use the matrix I get from it) so I wrote this - which I hope only to be temporary.
namespace
{
glm::mat4
perspective()
{
DirectX::XMMATRIX dx = DirectX::XMMatrixPerspectiveFovLH(glm::radians(60.0f), 1, 0.01, 1000.0f);
glm::mat4 glfromdx;
for (int c = 0; c < 3; c++)
{
for (int r = 0; r < 3; r++)
{
glfromdx[c][r] = DirectX::XMVectorGetByIndex(dx.r[c], r);
}
}
return glfromdx;
}
}
this does hand me a projection matrix that works - but I don't really think that it is working. I'm not certain whether
its an actual projection, and my clipspace is miniscule. If I move my camera around at all it clips. Photos included.
photo 1: (straight on)
photo 2: (cam tilted up)
photo 3: (cam tilted left)
Here is my vertex shader
https://gist.github.com/anonymous/31726881a5476e6c8513573909bf4dc6
cbuffer offsets
{
float4x4 model; //<------currently just identity
float4x4 view;
float4x4 proj;
}
struct idata
{
float3 position : POSITION;
float4 colour : COLOR;
};
struct odata
{
float4 position : SV_POSITION;
float4 colour : COLOR;
};
void main(in idata dat, out odata odat)
{
odat.colour = dat.colour;
odat.position = float4(dat.position, 1.0f);
//point' = point * model * view * projection
float4x4 mvp = mul(model, mul(view, proj));
//column matrices.
odat.position = mul(mvp, odat.position);
}
And here is my fragment shader
https://gist.github.com/anonymous/342a707e603b9034deb65eb2c2101e91
struct idata
{
float4 position : SV_POSITION;
float4 colour : COLOR;
};
float4 main(in idata data) : SV_TARGET
{
return float4(data.colour[0], data.colour[1], data.colour[2] , 1.0f);
}
also full camera implementation is here
https://gist.github.com/anonymous/b15de44f08852cbf4fa4d6b9fafa0d43
https://gist.github.com/anonymous/8702429212c411ddb04f5163b1d885b9
Finally these are the vertices being passed in
position 3 colour 4
(0.0f, 0.5f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f),
(0.45f, -0.5f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f),
(-0.45f, -0.5f, 0.0f, 0.0f, 0.0f, 1.0f, 1.0f)
Edit:
I changed the P*M*V*P calculation in the shader to this;
odat.position = mul(mul(mul(proj, view), model), odat.position);
still having the same issue.
Edit 2:
Hmm, the above changes mixed with a change of what generates the perspective, now just purely using glm.
glm::mat4 GLtoDX_NDC(glm::translate(glm::vec3(0.0, 0.0, 0.5)) *
glm::scale(glm::vec3(1.0, 1.0, 0.5)));
glm::mat4 gl = GLtoDX_NDC * glm::perspectiveFovLH(60.0f, 500.0f, 500.0f, 100.0f, 0.1f);
This works, in as far as object no longer cull at about a micrometre, and are projected to their approximate sizes. However, now everything is upside down and rotating causes a shear...
I am trying to perform diffuse reflection in hlsl. Currently I am working on vertex shader. Unfortunatelly I get following error, when trying to compile with fxc.exe:
C:\Users\BBaczek\Projects\MyApp\VertexShader.vs.hlsl(2,10-25)
: error X4500: overlapping register semantics not yet implemented 'c1'
C:\Users\BBaczek\Projects\MyApp\VertexShader.vs.hlsl(2,10-25)
: error X4500: overlapping register semantics not yet implemented 'c2'
C:\Users\BBaczek\Projects\MyApp\VertexShader.vs.hlsl(2,10-25)
: error X4500: overlapping register semantics not yet implemented 'c3'
Vertex shader code:
float4x4 WorldViewProj : register(c0);
float4x4 inv_world_matrix : register(c1);
float4 LightAmbient;
float4 LightPosition;
struct VertexData
{
float4 Position : POSITION;
float4 Normal : NORMAL;
float3 UV : TEXCOORD;
};
struct VertexShaderOutput
{
float4 Position : POSITION;
float3 Color: COLOR;
};
VertexShaderOutput main(VertexData vertex)
{
VertexShaderOutput output;
vertex.Normal = normalize(vertex.Normal);
float4 newColor = LightAmbient;
vector obj_light = mul(LightPosition, inv_world_matrix);
vector LightDir = normalize(obj_light - vertex.Position);
float DiffuseAttn = max(0, dot(vertex.Normal, LightDir));
vector light = { 0.8, 0.8, 0.8, 1 };
newColor += light * DiffuseAttn;
output.Position = mul(vertex.Position, WorldViewProj);
output.Color = float3(newColor.r, newColor.g, newColor.b);
return output;
}
And command I use to perform compilation:
fxc /T vs_2_0 /O3 /Zpr /Fo VertexShader.vs VertexShader.vs.hlsl
Why am I getting this error? What can I do to prevent this?
Found it out - I am not deleting this question, because someone might find it useful.
What you need to do is changing
float4x4 WorldViewProj : register(c0);
float4x4 inv_world_matrix : register(c1);
to
float4x4 WorldViewProj : register(c0);
float4x4 inv_world_matrix : register(c4);
I am not sure what is that ok, but I assume that float4x4 is going to take more space in that buffer (4x4 - so it takes 4 places). I think that explanation is a bit silly but it works.
For an academic project I'm trying to write a code for drawing billboards from scratch; now I'm at the point of making them translucent. I've managed to make them look good against the background but they still may cover each other with their should-be-transparent corners, like in this picture:
I'm not sure what I'm doing wrong. This is the effect file I'm using to draw the billboards. I've omitted the parts related to the vertex shader, which I think is irrelevant right now.
//cut
texture Texture;
texture MaskTexture;
sampler Sampler = sampler_state
{
Texture = (Texture);
MinFilter = Linear;
MagFilter = Linear;
MipFilter = Point;
AddressU = Clamp;
AddressV = Clamp;
};
sampler MaskSampler = sampler_state
{
Texture = (MaskTexture);
MinFilter = Linear;
MagFilter = Linear;
MipFilter = Point;
AddressU = Clamp;
AddressV = Clamp;
};
//cut
struct VertexShaderOutput
{
float4 Position : POSITION0;
float4 Color : COLOR;
float2 TexCoord : TEXCOORD0;
};
//cut
float4 PixelShaderFunction(VertexShaderOutput input) : COLOR0
{
float4 result = tex2D(Sampler, input.TexCoord) * input.Color;
float4 mask = tex2D(MaskSampler, input.TexCoord);
float alpha = mask.r;
result.rgb *= alpha;
result.a = alpha;
return result;
}
technique Technique1
{
pass Pass1
{
VertexShader = compile vs_2_0 VertexShaderFunction();
PixelShader = compile ps_2_0 PixelShaderFunction();
AlphaBlendEnable = true;
SrcBlend = SrcAlpha;
DestBlend = InvSrcAlpha;
}
}
I've got two textures, named Texture and MaskTexture, the latter being in grayscale. The billboards are, most likely, in the same vertex buffer and are drawn with a single call of GraphicsDevice.DrawIndexedPrimitives() from XNA.
I've got a feeling I'm not doing the whole thing right.
You have to draw them in order. From farthest to closest.
I have found a solution. The shaders are fine, the problem turned out to be in the XNA code, so sorry for drawing your attention to the wrong thing.
The solution is to enable a depth stencil buffer (whatever it is) before drawing the billboards:
device.DepthStencilState = DepthStencilState.DepthRead;
It can be disabled afterwards:
device.DepthStencilState = DepthStencilState.Default;