Screen-space square looking distorted in PIX - geometry

I have a simple function that creates a square that covers the entire screen, I use it for applying post-processing effects, however as far as I can tell it has been the cause of countless errors.
When I run my code in PIX I get the following mesh, but the square should be straight and covering the screen, shouldn't it?
My vertex shader does no transformation and simply passes position information to the pixel shader.
The function that creates the square is as follows:
private void InitializeGeometry()
{
meshes = new Dictionary<Vector3, Mesh>();
//build array of vertices for one square
ppVertex[] vertexes = new ppVertex[4];
//vertexes[0].Position = new Vector3(-1f, -1f, 0.25f);
vertexes[0].Position = new Vector3(-1, -1, 1f);
vertexes[1].Position = new Vector3(-1, 1, 1f);
vertexes[2].Position = new Vector3(1, -1, 1f);
vertexes[3].Position = new Vector3(1, 1, 1f);
vertexes[0].TexCoords = new Vector2(0, 0);
vertexes[1].TexCoords = new Vector2(0, 1);
vertexes[2].TexCoords = new Vector2(1, 0);
vertexes[3].TexCoords = new Vector2(1, 1);
//build index array for the vertices to build a quad from two triangles
short[] indexes = { 0, 1, 2, 1, 3, 2 };
//create the data stream to push the vertex data into the buffer
DataStream vertices = new DataStream(Marshal.SizeOf(typeof(Vertex)) * 4, true, true);
//load the data stream
vertices.WriteRange(vertexes);
//reset the data position
vertices.Position = 0;
//create the data stream to push the index data into the buffer
DataStream indices = new DataStream(sizeof(short) * 6, true, true);
//load the data stream
indices.WriteRange(indexes);
//reset the data position
indices.Position = 0;
//create the mesh object
Mesh mesh = new Mesh();
//create the description of the vertex buffer
D3D.BufferDescription vbd = new BufferDescription();
vbd.BindFlags = D3D.BindFlags.VertexBuffer;
vbd.CpuAccessFlags = D3D.CpuAccessFlags.None;
vbd.OptionFlags = ResourceOptionFlags.None;
vbd.SizeInBytes = Marshal.SizeOf(typeof(Vertex)) * 4;
vbd.Usage = ResourceUsage.Default;
//create and assign the vertex buffer to the mesh, filling it with data
mesh.VertexBuffer = new D3D.Buffer(device, vertices, vbd);
//create the description of the index buffer
D3D.BufferDescription ibd = new BufferDescription();
ibd.BindFlags = D3D.BindFlags.IndexBuffer;
ibd.CpuAccessFlags = D3D.CpuAccessFlags.None;
ibd.OptionFlags = ResourceOptionFlags.None;
ibd.SizeInBytes = sizeof(short) * 6;
ibd.Usage = ResourceUsage.Default;
//create and assign the index buffer to the mesh, filling it with data
mesh.IndexBuffer = new D3D.Buffer(device, indices, ibd);
//get vertex and index counts
mesh.vertices = vertexes.GetLength(0);
mesh.indices = indexes.Length;
//close the data streams
indices.Close();
vertices.Close();
meshes.Add(new Vector3(0), mesh);
}
and when I render the square:
private void DrawScene()
{
lock (meshes)
{
foreach (Mesh mesh in meshes.Values)
{
if (mesh.indices > 0)
{
try
{
//if (camera.SphereInFrustum(mesh.BoundingSphere, sphereRadius))
//{
context.InputAssembler.SetVertexBuffers(0, new VertexBufferBinding(mesh.VertexBuffer, Marshal.SizeOf(typeof(Vertex)), 0));
context.InputAssembler.SetIndexBuffer(mesh.IndexBuffer, Format.R16_UInt, 0);
context.DrawIndexed(mesh.indices, 0, 0);
//}
}
catch (Exception err)
{
MessageBox.Show(err.Message);
}
}
}
}
}
EDIT: I've added the vertex shader being run
cbuffer EveryFrame : register(cb0)
{
float3 diffuseColor : packoffset(c0);
float3 lightdir : packoffset(c1);
};
cbuffer EveryMotion : register(cb1)
{
float4x4 WorldViewProjection : packoffset(c0);
float4x4 LightWorldViewProjection : packoffset(c4);
};
struct VS_IN
{
float3 position : POSITION;
float3 normal : NORMAL;
float4 col : TEXCOORD;
};
struct PS_IN
{
float4 position : SV_POSITION;
float4 col : TEXCOORD;
float3 normal : NORMAL;
};
PS_IN VS(VS_IN input)
{
PS_IN output;
output.position = float4(input.position,1);
output.col = input.col;
output.normal = input.normal;
return output;
}
Here's PIX's vertex output.
PreVS:
PostVS:
And here's the dissassembly PIX generated when I chose to debug vertex 0
//
// Generated by Microsoft (R) HLSL Shader Compiler 9.29.952.3111
//
//
//
// Input signature:
//
// Name Index Mask Register SysValue Format Used
// ---------------- ----- ------ -------- -------- ------ ------
// POSITION 0 xyz 0 NONE float xyz
// NORMAL 0 xyz 1 NONE float xyz
// TEXCOORD 0 xyzw 2 NONE float
//
//
// Output signature:
//
// Name Index Mask Register SysValue Format Used
// ---------------- ----- ------ -------- -------- ------ ------
// SV_POSITION 0 xyzw 0 POS float xyzw
// TEXCOORD 0 xyzw 1 NONE float xyzw
// NORMAL 0 xyz 2 NONE float xyz
//
vs_4_0
dcl_input v0.xyz
dcl_input v1.xyz
dcl_output_siv o0.xyzw , position
dcl_output o1.xyzw
dcl_output o2.xyz
mov o0.xyz, v0.xyzx
mov o0.w, l(1.000000)
mov o1.xyzw, l(1.000000, 1.000000, 1.000000, 1.000000)
mov o2.xyz, v1.xyzx
ret
// Approximately 5 instruction slots used
I've also added the input assembler:
private void SetPPInputAssembler(Shader shader)
{
InputElement[] elements = new[] {
new InputElement("POSITION",0,Format.R32G32B32_Float,0),
new InputElement("NORMAL",0,Format.R32G32B32_Float,12,0),
new InputElement("TEXCOORD",0,Format.R32G32_Float,24,0),
};
InputLayout layout = new InputLayout(device, shader.InputSignature, elements);
context.InputAssembler.InputLayout = layout;
context.InputAssembler.PrimitiveTopology = PrimitiveTopology.TriangleList;
}

Obviously your vertex input positions don't match the values you want to give in.
For the first vertex the values look good until the z-coordinate of the texture coordinates.
You are defining a Vector2D in your program Vertex-struct, but a Vector4D in the Vertexshader Vertex-struct and things get mixed up.
just change VS_IN to this:
struct VS_IN
{
float3 position : POSITION;
float3 normal : NORMAL;
float2 col : TEXCOORD; // float2 instead of float4
};
I'm not sure though if you really want to have colors or rather texcoords. If you really want to have colors float4 would be right, but then you had to change
vertexes[0].TexCoords = new Vector2(0, 0);
into
vertexes[0].TexCoords = new Vector4(0, 0, 0, 0);
Either way, one of those variables is misnamed and probably the reason for the confusion.

Related

Android Studio Opengl ES get problem with weird color mixturing

Basically I've created a plane and rotate it 5 times to be a cube. I made cube with different colors of each side. And did some rotation with touch event.
Everthing was good so far, but the cube turned out to be like this.
Please help it's been driving me crazy!
My cube code:
public class PhotoCube{
public ArrayList<FloatBuffer> vertexBufferList = new ArrayList<>();
public int[][] lists = {
{0,1,0,0}, //front
{1,0,0,-90}, //top
{0,1,0,-90}, //left
{0,1,0,90}, //
{1,0,0,90}, //bottom
{0,1,0,180} //right
};
// number of coordinates per vertex in this array
static final int COORDS_PER_VERTEX = 3;
static final float[] coords = { // in counterclockwise order:
0.5f, 0.5f, 0.5f, // top right
-0.5f, 0.5f, 0.5f, // top left
-0.5f, -0.5f, 0.5f, // bottom left
0.5f, -0.5f, 0.5f // bottom right
};
static final float[][] colorList = {
{1f,0f,0f,1f},
{0f,1f,0f,1f},
{1f,1f,1f,1f},
{1f,1f,0f,1f},
{1f,0f,1f,1f},
{0f,1f,1f,1f}
};
public PhotoCube() {
final int maxVertices = 6;
for(int[] list:lists){
float[] currentCoords = coords.clone();
for(int i=0;i<12;i+=3){
float x = coords[i];
float y = coords[i+1];
float z = coords[i+2];
double angle = Math.toRadians(list[3]);
currentCoords[i]=(float) ((list[0]==1)?x:(list[1]==1)?x*Math.cos(angle)+z*Math.sin(angle):x*Math.cos(angle)-y*Math.sin(angle));
currentCoords[i+1]=(float) ((list[0]==1)?y*Math.cos(angle)-z*Math.sin(angle):(list[1]==1)?y:x*Math.sin(angle)+y*Math.cos(angle));
currentCoords[i+2]=(float) ((list[0]==1)?z*Math.cos(angle)+y*Math.sin(angle):(list[1]==1)?z*Math.cos(angle)-x*Math.sin(angle):z);
}
ByteBuffer bb = ByteBuffer.allocateDirect(
// (number of coordinate values * 4 bytes per float)
currentCoords.length * 4);
// use the device hardware's native byte order
bb.order(ByteOrder.nativeOrder());
// create a floating point buffer from the ByteBuffer
FloatBuffer vertexBuffer = bb.asFloatBuffer();
// add the coordinates to the FloatBuffer
vertexBuffer.put(currentCoords);
// set the buffer to read the first coordinate
vertexBuffer.position(0);
if(vertexBufferList.size()==maxVertices){
vertexBufferList.remove(0);
}
vertexBufferList.add(vertexBuffer);
ByteBuffer dlb = ByteBuffer.allocateDirect(
// (# of coordinate values * 2 bytes per short)
drawOrder.length * 2);
dlb.order(ByteOrder.nativeOrder());
drawListBuffer = dlb.asShortBuffer();
drawListBuffer.put(drawOrder);
drawListBuffer.position(0);
createProgram();
GLES20.glLinkProgram(mProgram);
// creates OpenGL ES program executables
}
}
public void createProgram(){
// create empty OpenGL ES Program
mProgram = GLES20.glCreateProgram();
int vertexShader = MyGLRenderer.loadShader(GLES20.GL_VERTEX_SHADER,
vertexShaderCode);
int fragmentShader = MyGLRenderer.loadShader(GLES20.GL_FRAGMENT_SHADER,
fragmentShaderCode);
// add the vertex shader to program
GLES20.glAttachShader(mProgram, vertexShader);
// add the fragment shader to program
GLES20.glAttachShader(mProgram, fragmentShader);
}
public void draw(int order) {
final int vertexStride = COORDS_PER_VERTEX * 4; // 4 bytes per vertex
// Add program to OpenGL ES environment
GLES20.glUseProgram(mProgram);
// get handle to vertex shader's vPosition member
int positionHandle = GLES20.glGetAttribLocation(mProgram, "vPosition");
// Enable a handle to the triangle vertices
GLES20.glEnableVertexAttribArray(positionHandle);
// Prepare the triangle coordinate data
GLES20.glVertexAttribPointer(positionHandle, COORDS_PER_VERTEX,
GLES20.GL_FLOAT, true,
vertexStride, vertexBufferList.get(order));
// get handle to fragment shader's vColor member
int colorHandle = GLES20.glGetUniformLocation(mProgram, "vColor");
// Set color for drawing the triangle
GLES20.glUniform4fv(colorHandle, 1, colorList[order], 0);
GLES20.glDrawElements(
GLES20.GL_TRIANGLES,
drawOrder.length,
GL_UNSIGNED_SHORT,
drawListBuffer);
// Disable vertex array
//GLES20.glDisableVertexAttribArray(positionHandle);
}
}
My Renderer code:
public class MyGLRenderer implements GLSurfaceView.Renderer {
private PhotoCube mPhotoCube;
public final float[] vPMatrix = new float[16];
private final float[] projectionMatrix = new float[16];
private final float[] viewMatrix = new float[16];
private int vPMatrixHandle = -1;
private volatile float mAngleX = 0;
private volatile float mAngleY = 0;
private float[] rotationMX = new float[16];
private float[] rotationMY = new float[16];
private float[] scratch = new float[16];
public MyGLRenderer(Context context){
}
public static int loadShader(int type, String shaderCode) {
int shader = GLES20.glCreateShader(type);
// add the source code to the shader and compile it
GLES20.glShaderSource(shader, shaderCode);
GLES20.glCompileShader(shader);
return shader;
}
public void onSurfaceCreated(GL10 unused, EGLConfig config) {
// Set the background frame color
GLES20.glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
mPhotoCube = new PhotoCube();
vPMatrixHandle = GLES20.glGetUniformLocation(mPhotoCube.mProgram, "uVPMatrix");
onDrawFrame(unused);
}
public void onDrawFrame(GL10 unused) {
// Redraw background color
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
GLES20.glEnable(GLES20.GL_BLEND);
GLES20.glBlendFuncSeparate(GLES20.GL_SRC_ALPHA, GLES20.GL_ONE_MINUS_SRC_ALPHA, GLES20.GL_ZERO, GLES20.GL_ONE);
Matrix.setRotateM(rotationMX, 0, -mAngleX, 0, 1, 0);
Matrix.setLookAtM(viewMatrix, 0, 0, 0, 5, 0f, 0f, -5f, 0f, 1.0f, 0f);
// Calculate the projection and view transformation
Matrix.multiplyMM(vPMatrix, 0, projectionMatrix, 0, viewMatrix, 0);
Matrix.multiplyMM(scratch, 0, vPMatrix, 0, rotationMX, 0);
Matrix.setRotateM(rotationMY, 0, -mAngleY, scratch[0], scratch[4], scratch[8]);
Matrix.multiplyMM(scratch, 0, scratch, 0, rotationMY, 0);
vPMatrixHandle = GLES20.glGetUniformLocation(mPhotoCube.mProgram, "uMVPMatrix");
GLES20.glUniformMatrix4fv(vPMatrixHandle, 1, false, scratch, 0);
for(int i=0;i<mPhotoCube.lists.length;i++){
mPhotoCube.draw(i);
}
}
public void onSurfaceChanged(GL10 unused, int width, int height) {
GLES20.glViewport(0, 0, width, height);
float ratio = (float) width / height;
// this projection matrix is applied to object coordinates
// in the onDrawFrame() method
Matrix.frustumM(projectionMatrix, 0, -ratio, ratio, -1, 1, 3, 7);
}
}
Here's my shader code
final String vertexShaderCode =
"uniform mat4 uMVPMatrix;" +
"attribute vec4 vPosition;" +
"void main() {" +
" gl_Position = uMVPMatrix * vPosition;" +
"}";
final String fragmentShaderCode =
"precision mediump float;" +
"uniform vec4 vColor;" +
"void main() {" +
" gl_FragColor = vColor;" +
"}";
Thank you in advance.
You have to enable the Depth Test Depth test ensures that fragments that lie behind other fragments are discarded:
GLES20.glEnable(GLES20.GL_DEPTH_TEST);
When you enable the depth test you must also clear the depth buffer:
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT | GLES20.GL_DEPTH_BUFFER_BIT);
However, if you have transparent objects and want to use Blending, the depth test must be disabled and you must draw the triangles of the meshes in sorted order from back to front. Also see OpenGL depth sorting

Simple 3D Shape faces not rendering as expected - OpenGL ES in Android Studio

I am trying to make a rotating octahedron display correctly, I have successfully achieved other shapes such as a cube and tetrahedron, but I am experiencing some difficulty with this one.
Here is the simple obj file I am using:
v 0 -1 0
v 1 0 0
v 0 0 1
v -1 0 0
v 0 1 0
v 0 0 -1
#
f 1 2 3
f 4 1 3
f 5 4 3
f 2 5 3
f 2 1 6
f 1 4 6
f 4 5 6
f 5 2 6
My code is as follows:
class Shape(context: Context) {
private var mProgram: Int = 0
// Use to access and set the view transformation
private var mMVPMatrixHandle: Int = 0
//For Projection and Camera Transformations
private var vertexShaderCode = (
// This matrix member variable provides a hook to manipulate
// the coordinates of the objects that use this vertex shader
"uniform mat4 uMVPMatrix;" +
"attribute vec4 vPosition;" +
//"attribute vec4 vColor;" +
//"varying vec4 vColorVarying;" +
"void main() {" +
// the matrix must be included as a modifier of gl_Position
// Note that the uMVPMatrix factor *must be first* in order
// for the matrix multiplication product to be correct.
" gl_Position = uMVPMatrix * vPosition;" +
//"vColorVarying = vColor;"+
"}")
private var fragmentShaderCode = (
"precision mediump float;" +
"uniform vec4 vColor;" +
//"varying vec4 vColorVarying;"+
"void main() {" +
//" gl_FragColor = vColorVarying;" +
" gl_FragColor = vColor;" +
"}")
internal var shapeColor = arrayOf<FloatArray>(
//front face (grey)
floatArrayOf(0f, 0f, 0f, 1f), //black
floatArrayOf(0f, 0f, 1f, 1f),
floatArrayOf(0f, 1f, 0f, 1f),
floatArrayOf(1f, 0f, 0f, 1f), // red
floatArrayOf(1f, 1f, 0f, 1f),
floatArrayOf(1f, 0f, 1f, 1f),
floatArrayOf(1f, 0f, 1f, 1f),
floatArrayOf(0f, 1f, 1f, 1f)
)
private var mPositionHandle: Int = 0
private var mColorHandle: Int = 0
// var objLoader = ObjLoader(context, "tetrahedron.txt")
// var objLoader = ObjLoader(context, "cube.txt")
var objLoader = ObjLoader(context, "octahedron.txt")
var shapeCoords: FloatArray
var numFaces: Int = 0
var vertexBuffer: FloatBuffer
var drawOrder: Array<ShortArray>
lateinit var drawListBuffer: ShortBuffer
init {
//assign coordinates and order in which to draw them (obtained from obj loader class)
shapeCoords = objLoader.vertices.toFloatArray()
drawOrder = objLoader.faces.toTypedArray()
numFaces = objLoader.numFaces
// initialize vertex byte buffer for shape coordinates
val bb = ByteBuffer.allocateDirect(
// (# of coordinate varues * 4 bytes per float)
shapeCoords.size * 4
)
bb.order(ByteOrder.nativeOrder())
vertexBuffer = bb.asFloatBuffer()
vertexBuffer.put(shapeCoords)
vertexBuffer.position(0)
// create empty OpenGL ES Program
mProgram = GLES20.glCreateProgram()
val vertexShader = loadShader(
GLES20.GL_VERTEX_SHADER,
vertexShaderCode
)
val fragmentShader = loadShader(
GLES20.GL_FRAGMENT_SHADER,
fragmentShaderCode
)
// add the vertex shader to program
GLES20.glAttachShader(mProgram, vertexShader)
// add the fragment shader to program
GLES20.glAttachShader(mProgram, fragmentShader)
// creates OpenGL ES program executables
GLES20.glLinkProgram(mProgram)
}
var vertexStride = COORDS_PER_VERTEX * 4 // 4 bytes per vertex
fun draw(mvpMatrix: FloatArray) { // pass in the calculated transformation matrix
for (face in 0 until numFaces) {
// Add program to OpenGL ES environment
GLES20.glUseProgram(mProgram)
// get handle to vertex shader's vPosition member
mPositionHandle = GLES20.glGetAttribLocation(mProgram, "vPosition")
// get handle to fragment shader's vColor member
mColorHandle = GLES20.glGetUniformLocation(mProgram, "vColor")
// Enable a handle to the cube vertices
GLES20.glEnableVertexAttribArray(mPositionHandle)
// Prepare the cube coordinate data
GLES20.glVertexAttribPointer(
mPositionHandle, COORDS_PER_VERTEX,
GLES20.GL_FLOAT, false,
vertexStride, vertexBuffer
)
GLES20.glUniform4fv(mColorHandle, 1, shapeColor[face], 0)
// get handle to shape's transformation matrix
mMVPMatrixHandle = GLES20.glGetUniformLocation(mProgram, "uMVPMatrix")
// Pass the projection and view transformation to the shader
GLES20.glUniformMatrix4fv(mMVPMatrixHandle, 1, false, mvpMatrix, 0)
// initialize byte buffer for the draw list
var dlb = ByteBuffer.allocateDirect(
// (# of coordinate values * 2 bytes per short)
drawOrder[face].size * 2
)
dlb.order(ByteOrder.nativeOrder())
drawListBuffer = dlb.asShortBuffer()
drawListBuffer.put(drawOrder[face])
drawListBuffer.position(0)
GLES20.glDrawElements(
GLES20.GL_TRIANGLES,
dlb.capacity(),
GLES20.GL_UNSIGNED_SHORT,
drawListBuffer //position indices
)
}
// Disable vertex array
GLES20.glDisableVertexAttribArray(mMVPMatrixHandle)
}
companion object {
// number of coordinates per vertex in this array
internal var COORDS_PER_VERTEX = 3
}
}
class MyGLRenderer1(val context: Context) : GLSurfaceView.Renderer {
private lateinit var mShape: Shape
#Volatile
var mDeltaX = 0f
#Volatile
var mDeltaY = 0f
#Volatile
var mTotalDeltaX = 0f
#Volatile
var mTotalDeltaY = 0f
private val mMVPMatrix = FloatArray(16)
private val mProjectionMatrix = FloatArray(16)
private val mViewMatrix = FloatArray(16)
private val mRotationMatrix = FloatArray(16)
private val mAccumulatedRotation = FloatArray(16)
private val mCurrentRotation = FloatArray(16)
private val mTemporaryMatrix = FloatArray(16)
override fun onDrawFrame(gl: GL10?) {
// Redraw background color
// Redraw background color
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT or GLES20.GL_DEPTH_BUFFER_BIT)
val scratch = FloatArray(16)
// Create a rotation transformation for the square
Matrix.setIdentityM(mRotationMatrix, 0)
Matrix.setIdentityM(mCurrentRotation, 0)
Matrix.rotateM(mCurrentRotation, 0, mDeltaX, 0.0f, 1.0f, 0.0f)
// Matrix.rotateM(mCurrentRotation, 0, mDeltaY, 1.0f, 0.0f, 0.0f)
// Multiply the current rotation by the accumulated rotation, and then set the accumulated
// rotation to the result.
Matrix.multiplyMM(
mTemporaryMatrix,
0,
mCurrentRotation,
0,
mAccumulatedRotation,
0
)
System.arraycopy(mTemporaryMatrix, 0, mAccumulatedRotation, 0, 16)
// Rotate the cube taking the overall rotation into account.
Matrix.multiplyMM(
mTemporaryMatrix,
0,
mRotationMatrix,
0,
mAccumulatedRotation,
0
)
System.arraycopy(mTemporaryMatrix, 0, mRotationMatrix, 0, 16)
// Set the camera position (View matrix)
Matrix.setLookAtM(mViewMatrix, 0, 2f, 2f, -5f, 0f, 0f, 0f, 0f, 1.0f, 0.0f)
//Calculate the projection and view transformation
Matrix.multiplyMM(mMVPMatrix, 0, mProjectionMatrix, 0, mViewMatrix, 0)
// Combine the rotation matrix with the projection and camera view
// Note that the mMVPMatrix factor *must be first* in order
// for the matrix multiplication product to be correct.
Matrix.multiplyMM(scratch, 0, mMVPMatrix, 0, mRotationMatrix, 0)
gl?.glDisable(GL10.GL_CULL_FACE)
// Draw shape
mShape.draw(scratch)
}
override fun onSurfaceChanged(gl: GL10?, width: Int, height: Int) {
GLES20.glViewport(0, 0, width, height);
val ratio: Float = width.toFloat() / height.toFloat()
// this projection matrix is applied to object coordinates
// in the onDrawFrame() method
Matrix.frustumM(mProjectionMatrix, 0, -ratio, ratio, -1.0f, 1.0f, 3.0f, 7.0f)
}
override fun onSurfaceCreated(gl: GL10?, config: EGLConfig?) {
GLES20.glEnable(GLES20.GL_DEPTH_TEST)
// initialize a square
mShape = Shape(context)
// Initialize the accumulated rotation matrix
Matrix.setIdentityM(mAccumulatedRotation, 0)
}
}
fun loadShader(type: Int, shaderCode: String): Int {
return GLES20.glCreateShader(type).also { shader ->
GLES20.glShaderSource(shader, shaderCode)
GLES20.glCompileShader(shader)
}
}
class ObjLoader(context: Context, file: String) {
var numFaces: Int = 0
var vertices = Vector<Float>()
var normals = Vector<Float>()
var textures = Vector<Float>()
var faces = mutableListOf<ShortArray>()
init {
val reader: BufferedReader
val isr = InputStreamReader(context.assets.open(file))
reader = BufferedReader(isr)
var line = reader.readLine()
// read file until EOF
while (line != null) {
val parts = line.split((" ").toRegex()).dropLastWhile({ it.isEmpty() }).toTypedArray()
when (parts[0]) {
"v" -> {
var part1 = parts[1].toFloat()
var part2 = parts[2].toFloat()
var part3 = parts[3].toFloat()
// vertices
vertices.add(part1)
vertices.add(part2)
vertices.add(part3)
}
"vt" -> {
// textures
textures.add(parts[1].toFloat())
textures.add(parts[2].toFloat())
}
"vn" -> {
// normals
normals.add(parts[1].toFloat())
normals.add(parts[2].toFloat())
normals.add(parts[3].toFloat())
}
"f" -> {
// faces: vertex/texture/normal
faces.add(shortArrayOf(parts[1].toShort(), parts[2].toShort(), parts[3].toShort()))
println("dbg: points are "+ parts[1]+" "+parts[2]+" "+parts[3])
}
}
line = reader.readLine()
}
numFaces = faces.size
}}
The shape produced can be seen in the following screenshots, it is also visible on the black surface that there is possibly some sort of z fighting taking place? The black triangle flickers red and yellow:
Sometimes the following shapes are produced, flickering in and out of existence, in different colours:
Any help is much appreciated, thanks in advance.
Edit:
I have managed to make the vertices plot correctly thanks to the below answer however there is still this flickering going on, I really appreciate the help.
Array indices start at 0, but Wavefront (.obj) indices start at 1:
faces.add(shortArrayOf(parts[1].toShort(), parts[2].toShort(), parts[3].toShort()))
faces.add(shortArrayOf(
parts[1].toShort()-1, parts[2].toShort()-1, parts[3].toShort()-1))

hlsl nointerpolation behaviour

For an experiment I want to pass all edges of a triangle to the pixel shader and manually calculate the pixel position with the triangle edges and bayrcentric coodinates.
In order to do this I wrote a geometry shader that passes all edges of my triangle to the pixel shader:
struct GeoIn
{
float4 projPos : SV_POSITION;
float3 position : POSITION;
};
struct GeoOut
{
float4 projPos : SV_POSITION;
float3 position : POSITION;
float3 p[3] : TRIPOS;
};
[maxvertexcount(3)]
void main(triangle GeoIn i[3], inout TriangleStream<GeoOut> OutputStream)
{
GeoOut o;
// add triangle data
for(uint idx = 0; idx < 3; ++idx)
{
o.p[idx] = i[idx].position;
}
// generate verices
for(idx = 0; idx < 3; ++idx)
{
o.projPos = i[idx].projPos;
o.position = i[idx].position;
OutputStream.Append(o);
}
OutputStream.RestartStrip();
}
The pixel shader outputs the manually reconstructed position:
struct PixelIn
{
float4 projPos : SV_POSITION;
float3 position : POSITION;
float3 p[3] : TRIPOS;
float3 bayr : SV_Barycentrics;
};
float4 main(PixelIn i) : SV_TARGET
{
float3 pos = i.bayr.x * i.p[0] + i.bayr.y * i.p[1] + i.bayr.z * i.p[2];
return float4(abs(pos), 1.0);
}
And I get the following (expected) result:
However, when I modify my PixelIn struct by adding nointerpolation to p[3]:
struct PixelIn
{
...
nointerpolation float3 p[3] : TRIPOS;
};
I get:
I did not expect a different result because I am not changing the values of p[] for a single triangle in the geometry shader. I tried debugging it by changing the output to float4(abs(i.p[0]), 1.0); with and without interpolation. Without nointerpolation the values of p[] do not vary within a triangle (which makes sense, because all should have the same value). With nointerpolation the values of p[] do change slightly. Why is that the case? I thought nointerpolate was not supposed to interpolate anything.
Edit:
This is the wireframe of my geometry:

3d model not rendered (DirectX 12)

I am developing a small program that load 3d models using assimp, but it does not render the model. At first I thought that vertices and indices were not loaded correctly but this is not the case ( I printed on a txt file vertices and indices). I think that the probem might be with the position of the model and camera. The application does not return any error, it runs properly.
Vertex Struct:
struct Vertex {
XMFLOAT3 position;
XMFLOAT2 texture;
XMFLOAT3 normal;
};
Input layout:
D3D12_INPUT_ELEMENT_DESC inputLayout[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 },
{ "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 12, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 },
{ "NORMAL", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, D3D12_APPEND_ALIGNED_ELEMENT, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 }
};
Vertices, texcoords, normals and indices loader:
model = new ModelMesh();
std::vector<XMFLOAT3> positions;
std::vector<XMFLOAT3> normals;
std::vector<XMFLOAT2> texCoords;
std::vector<unsigned int> indices;
model->LoadMesh("beast.x", positions, normals,
texCoords, indices);
// Create vertex buffer
if (positions.size() == 0)
{
MessageBox(0, L"Vertices vector is empty.",
L"Error", MB_OK);
}
Vertex* vList = new Vertex[positions.size()];
for (size_t i = 0; i < positions.size(); i++)
{
Vertex vert;
XMFLOAT3 pos = positions[i];
vert.position = XMFLOAT3(pos.x, pos.y, pos.z);
XMFLOAT3 norm = normals[i];
vert.normal = XMFLOAT3(norm.x, norm.y, norm.z);
XMFLOAT2 tex = texCoords[i];
vert.texture = XMFLOAT2(tex.x, tex.y);
vList[i] = vert;
}
int vBufferSize = sizeof(vList);
Build of the camera and views:
XMMATRIX tmpMat = XMMatrixPerspectiveFovLH(45.0f*(3.14f/180.0f), (float)Width / (float)Height, 0.1f, 1000.0f);
XMStoreFloat4x4(&cameraProjMat, tmpMat);
// set starting camera state
cameraPosition = XMFLOAT4(0.0f, 2.0f, -4.0f, 0.0f);
cameraTarget = XMFLOAT4(0.0f, 0.0f, 0.0f, 0.0f);
cameraUp = XMFLOAT4(0.0f, 1.0f, 0.0f, 0.0f);
// build view matrix
XMVECTOR cPos = XMLoadFloat4(&cameraPosition);
XMVECTOR cTarg = XMLoadFloat4(&cameraTarget);
XMVECTOR cUp = XMLoadFloat4(&cameraUp);
tmpMat = XMMatrixLookAtLH(cPos, cTarg, cUp);
XMStoreFloat4x4(&cameraViewMat, tmpMat);
cube1Position = XMFLOAT4(0.0f, 0.0f, 0.0f, 0.0f);
XMVECTOR posVec = XMLoadFloat4(&cube1Position);
tmpMat = XMMatrixTranslationFromVector(posVec);
XMStoreFloat4x4(&cube1RotMat, XMMatrixIdentity());
XMStoreFloat4x4(&cube1WorldMat, tmpMat);
Update function :
XMStoreFloat4x4(&cube1WorldMat, worldMat);
XMMATRIX viewMat = XMLoadFloat4x4(&cameraViewMat); // load view matrix
XMMATRIX projMat = XMLoadFloat4x4(&cameraProjMat); // load projection matrix
XMMATRIX wvpMat = XMLoadFloat4x4(&cube1WorldMat) * viewMat * projMat; // create wvp matrix
XMMATRIX transposed = XMMatrixTranspose(wvpMat); // must transpose wvp matrix for the gpu
XMStoreFloat4x4(&cbPerObject.wvpMat, transposed); // store transposed wvp matrix in constant buffer
memcpy(cbvGPUAddress[frameIndex], &cbPerObject, sizeof(cbPerObject));
VERTEX SHADER:
struct VS_INPUT
{
float4 pos : POSITION;
float2 tex: TEXCOORD;
float3 normal : NORMAL;
};
struct VS_OUTPUT
{
float4 pos: SV_POSITION;
float2 tex: TEXCOORD;
float3 normal: NORMAL;
};
cbuffer ConstantBuffer : register(b0)
{
float4x4 wvpMat;
};
VS_OUTPUT main(VS_INPUT input)
{
VS_OUTPUT output;
output.pos = mul(input.pos, wvpMat);
return output;
}
Hope it is a long code to read but I don't understand what is going wrong with this code. Hope somebody can help me.
A few things to try/check:
Make your background clear color grey. That way, if you are drawing black triangles you will see them.
Turn backface culling off in the rendering state, in case your triangles are back to front.
Turn depth test off in the rendering state.
Turn off alpha blending.
You don't show your pixel shader, but try writing a constant color to see if your lighting calculation is broken.
Use NVIDIA's nSight tool, or the Visual Studio Graphics debugger to see what your graphics pipeline is doing.
Those are usually the things I try first...

how to apply gradient effect on Image GDI

How can I apply gradient effect on image like this image in c#. I have a transparent image with black drawing I want to apply 2 color gradient on the image is this possible in gdi?
Here is the effect i want to achieve
http://postimg.org/image/ikz1ie7ip/
You create a PathGradientBrush and then you draw your texts with that brush.
To create a bitmap filled with a gradient brush you could do something like:
public Bitmap GradientImage(int width, int height, Color color1, Color color2, float angle)
{
var r = new Rectangle(0, 0, width, height);
var bmp = new Bitmap(width, height);
using (var brush = new LinearGradientBrush(r, color1, color2, angle, true))
using (var g = Graphics.FromImage(bmp))
g.FillRectangle(brush, r);
return bmp;
}
So now that you have an image with the gradient in it, all you have to do is to bring over the alpha channel from your original image into the newly created image. We can take the transferOneARGBChannelFromOneBitmapToAnother function from a blog post I once wrote:
public enum ChannelARGB
{
Blue = 0,
Green = 1,
Red = 2,
Alpha = 3
}
public static void transferOneARGBChannelFromOneBitmapToAnother(
Bitmap source,
Bitmap dest,
ChannelARGB sourceChannel,
ChannelARGB destChannel )
{
if ( source.Size!=dest.Size )
throw new ArgumentException();
Rectangle r = new Rectangle( Point.Empty, source.Size );
BitmapData bdSrc = source.LockBits( r, ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb );
BitmapData bdDst = dest.LockBits( r, ImageLockMode.ReadWrite, PixelFormat.Format32bppArgb );
unsafe
{
byte* bpSrc = (byte*)bdSrc.Scan0.ToPointer();
byte* bpDst = (byte*)bdDst.Scan0.ToPointer();
bpSrc += (int)sourceChannel;
bpDst += (int)destChannel;
for ( int i = r.Height * r.Width; i > 0; i-- )
{
*bpDst = *bpSrc;
bpSrc += 4;
bpDst += 4;
}
}
source.UnlockBits( bdSrc );
dest.UnlockBits( bdDst );
}
Now you could do something like:
var newImage = GradientImage( original.Width, original.Height, Color.Yellow, Color.Blue, 45 );
transferOneARGBChannelFromOneBitmapToAnother( original, newImage, ChannelARGB.Alpha, ChannelARGB.Alpha );
And there you are. :-)

Resources