How to absolutely ensure Direct3D9 hook overlay rendering on top - hook

I'm trying to hook IDirect3DDevice9::Present or ::EndScene and render my own overlay (which, for now, is just a simple rectangle) on top of everything else in a D3D9 application, but my overlay seems to be appearing and disappearing quite randomly. The drawing code I'm currently using is:
typedef struct CUSTOMVERTEX {
float x, y, z, rwh;
DWORD color;
};
#define CUSTOMFVF (D3DFVF_XYZRHW | D3DFVF_DIFFUSE)
void draw() {
// the positions are just for testing purposes, so they don't really make sense
CUSTOMVERTEX vertices[] = {
{ 0, 0, 0, 1.0f, D3DCOLOR_XRGB(255, 255, 255) },
{ 0, cursor_pos.y+500, 0, 1.0f, D3DCOLOR_XRGB(127, 255, 255) },
{ cursor_pos.x, cursor_pos.y, 0, 1.0f, D3DCOLOR_XRGB(255,255, 255) },
{ cursor_pos.x, 600, 0, 1.0f, D3DCOLOR_XRGB(127, 0, 0) }
};
if (vBuffer == 0) return;
VOID* pVoid;
vBuffer->Lock(0, 0, &pVoid, 0);
memcpy(pVoid, vertices, sizeof(vertices));
vBuffer->Unlock();
d3dDevice->SetRenderState(D3DRS_LIGHTING, FALSE);
d3dDevice->SetRenderState(D3DRS_ALPHABLENDENABLE, FALSE);
D3DMATRIX orthographicMatrix;
D3DMATRIX identityMatrix;
// MAKE_D3DMATRIX should be equivalent to D3DXMATRIX constructor
D3DMATRIX viewMatrix = MAKE_D3DMATRIX(
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
((float)-(get_window_width()/2)), ((float)-(get_window_height()/ 2)), 0, 1
);
// MAKE_ORTHO_LH is equivalent to D3DMatrixOrthoLH
MAKE_ORTHO_LH(&orthographicMatrix, (FLOAT)get_window_width(), (FLOAT)get_window_height(), -1.0, 1.0);
// and this to D3DMatrixIdentity
MAKE_IDENTITY(&identityMatrix); // and this to D3DMatrixIdentity
d3dDevice->SetTransform(D3DTS_PROJECTION, &orthographicMatrix);
d3dDevice->SetTransform(D3DTS_WORLD, &identityMatrix);
d3dDevice->SetTransform(D3DTS_VIEW, &viewMatrix);
d3dDevice->SetRenderState(D3DRS_ZENABLE, false);
d3dDevice->SetFVF(CUSTOMFVF);
d3dDevice->SetStreamSource(0, vBuffer, 0, sizeof(CUSTOMVERTEX));
d3dDevice->DrawPrimitive(D3DPT_TRIANGLEFAN, 0, 2);
d3dDevice->SetRenderState(D3DRS_ZENABLE, true);
}
I can't seem to figure out why this would cause the overlay to pop in and out of existence at seemingly random points in time.. What am I missing?

I found a foolproof method to render my stuff directly to the backbuffer (on CPU):
IDirect3DSwapChain9 *sc;
if (FAILED(d3dDevice->GetSwapChain(0, &sc))) {
PRINT("GetSwapChain failed\n");
return;
}
IDirect3DSurface9 *s;
if (FAILED(sc->GetBackBuffer(0, D3DBACKBUFFER_TYPE_MONO, &s))) {
PRINT("GetBackBuffer failed\n");
return;
}
D3DLOCKED_RECT r;
if (FAILED(s->LockRect(&r, NULL, D3DLOCK_DONOTWAIT))) {
PRINT("LockRect failed\n");
return;
}
// here, find out pixel format and manipulate
// pixel buffer through r->pBits, using r->Pitch accordingly
s->UnlockRect();
s->Release();
sc->Release();
Will most probably incur a performance penalty, but in my application this doesn't really make a difference. A more optimal way would be to harness the GPU's hwacceled rendering capabilities, but I currently lack the D3D knowledge to do so.

Related

How to compile shader with newest Effect Framework?

I usually use the Microsoft DirectX SDK (June 2010) which is deprecated. I download vs2015 which includes newest directx sdk and latest version FX11 on github. I don't know how to compile shader now.
before I can compile shader like this
ID3D10Blob *compiledShader = 0;
ID3D10Blob *compilationMsgs = 0;
result = D3DX11CompileFromFile("SolidColor.fx", 0, 0, 0, "fx_5_0", shaderFlags,
0, 0, &compiledShader, &compilationMsgs, 0);
if (compilationMsgs != 0)
{
MessageBox(0, (char*)compilationMsgs->GetBufferPointer(), 0, 0);
compilationMsgs->Release();
compilationMsgs = 0;
}
if (FAILED(result))
{
MessageBox(0, "error", 0, 0);
return false;
}
result = D3DX11CreateEffectFromMemory(compiledShader->GetBufferPointer(), compiledShader->GetBufferSize(),
0, m_pd3dDevice, &m_pFx);
compiledShader->Release();
if (FAILED(result))
{
MessageBox(0, "error", 0, 0);
return false;
}
m_pTechnique = m_pFx->GetTechniqueByName("ColorTech");
m_pFxWorldViewProj = m_pFx->GetVariableByName("gWorldViewProj")->AsMatrix();
But now how to compile shader? use D3DX11CompileEffectFromFile or D3DX11CreateEffectFromFile? Please give sample code,thank you.
I know, it is so easy, just one function D3DX11CompileEffectFromFile is OK.
//compile shader
ID3DBlob* errorBlob;
DWORD shaderFlags = D3DCOMPILE_ENABLE_STRICTNESS;
#if defined _DEBUG || defined DEBUG
shaderFlags = D3DCOMPILE_DEBUG;
#endif
hr = D3DX11CompileEffectFromFile(L"color.fx", nullptr, D3D_COMPILE_STANDARD_FILE_INCLUDE, shaderFlags,
0, m_pd3dDevice, &m_pFx, &errorBlob);
if (FAILED(hr))
{
MessageBox(nullptr, (LPCWSTR)errorBlob->GetBufferPointer(), L"error", MB_OK);
return hr;
}
m_pTechnique = m_pFx->GetTechniqueByName("ColorTech");
m_pFxWorldViewProj = m_pFx->GetVariableByName("gWorldViewProj")->AsMatrix();

How can I add new ncurses subwindows in response to user demand?

I'm trying to write an ncurses program which adds new windows in response to the user pressing keys. For example, consider the following C++ code:
#include <iostream>
#include "curses.h"
using namespace std;
int main()
{
WINDOW * win = initscr();
start_color();
noecho();
WINDOW * sub = subwin(win, 20, 20, 2, 2);
wborder(sub, 0, 0, 0, 0, 0, 0, 0, 0);
keypad(win, TRUE);
while (true)
{
int c = wgetch(win);
if (c == KEY_DOWN)
{
WINDOW* box = subwin(sub, 2, 2, (rand() % 20) + 2, (rand() % 20) + 2);
wborder(box, 0, 0, 0, 0, 0, 0, 0, 0);
}
else if (c == KEY_UP)
{
wrefresh(sub);
}
}
endwin();
return 0;
}
The user can press the down key to create new windows as many times as they want, but wrefresh will only draw them once. This appears to be related to the call to wgetch, a program that doesn't respond to keys works fine. Calling refresh also causes the problem.
The subwin man page says:
When using this routine, it is necessary to call touchwin or
touchline on orig before calling wrefresh on the subwindow.
Creating a sub-window does not actually change the parent window, so after the first refresh the parent window has not changed, touching the window marks it as changed.
So change:
wrefresh(sub);
to
touchwin(sub);
wrefresh(sub);

OpenGL - Directional bright Light?

The Light, I added, looks on objects weak? I want it brighter and I don't want to give the Light position. I mean, I want a bright Light on all the screen with same brightness in the middle or at the corners?
float[] lightColor = {1, 1, 1, 0};
float[] lightPosition = {0, 0, 10, 0};
#Override
public void render () {
...
gl.glEnable(GL10.GL_LIGHTING);
...
gl.glEnable(GL10.GL_LIGHT0);
gl.glLightfv(GL10.GL_LIGHT0, GL10.GL_DIFFUSE, lightColor, 0);
gl.glLightfv(GL10.GL_LIGHT0, GL10.GL_POSITION, lightPosition, 0);
}

Reading from multiple render targets in DirectX

I have two render target views, my back buffer and a Texture2D that represents a specialized mask (I can't use the stencil buffer for what I need). My render method looks roughly like this:
// clear the render target and depth stencil views, set render targets,
// update subresources, set vertex buffers, topology, input layout, etc.
// draw to the back buffer and the mask
m_d3dContext->PSSetShader(m_pixelShader1.Get(), nullptr, 0);
m_d3dContext->IASetIndexBuffer(m_indexBuffer1.Get(), DXGI_FORMAT_R16_UINT, 0);
m_d3dContext->DrawIndexed(m_indexCount1, 0, 0);
// read from the mask and draw to the back buffer
m_d3dContext->PSSetShader(m_pixelShader2.Get(), nullptr, 0);
m_d3dContext->IASetIndexBuffer(m_indexBuffer2.Get(), DXGI_FORMAT_R16_UINT, 0);
m_d3dContext->DrawIndexed(m_indexCount2, 0, 0);
PixelShader1 looks like this (leaving out the input struct and some other parts):
struct PixelShaderInput
{
float4 Color : COLOR;
float2 Texture : TEXCOORD;
};
struct PixelShaderOutput
{
float4 Color : SV_TARGET0;
float4 Mask : SV_TARGET1;
};
PixelShaderOutput main(PixelShaderInput input)
{
PixelShaderOutput output;
output.Color.rgba = input.Color;
output.Mask.rgba = float4(1, 0, 0, 1);
return output;
}
PixelShader2 looks like this:
struct PixelShaderInput
{
float3 Color : COLOR0;
float2 Texture : TEXCOORD0;
float4 Mask : COLOR1;
};
struct PixelShaderOutput
{
float4 Color : SV_TARGET0;
};
PixelShaderOutput main(PixelShaderInput input)
{
PixelShaderOutput output;
// do some work with input.Texture
if (input.Mask.x == 0)
{
output.Color = float4(0, 0, 0, 1); }
}
else
{
output.Color = float4(1, 1, 1, 1);
}
return output;
}
Using Visual Studio's graphics debugging tools, I can see that the mask texture is written correctly by PixelShader1. At the end of the frame, there is red geometry appearing in the right positions. Further the output to the back buffer (which looks different) is also as expected. So I strongly believe that PixelShader1 is correct.
However, I am getting unexpected results from PixelShader2. Stepping through it, the values for input.Mask.x are all over the place. One would expect them to be either 1 or 0, as that is all the other pixel shader sets them to be, but instead the values look an awful lot like the texture coordinates.
So, first of all, is what I'm trying to do even possible? If it is, can anybody spot my error in how I'm reading COLOR1? I have been stumped by this for hours, so any help would be greatly appreciated.
I'm working in DirectX 11, shader model 4.0 level 9_1, and vc110.
You have a few concepts wrong in how your PixelShader2 is written. PixelShader1 writes to two render textures the values of Color and Mask. In PixelShader 2 you will need to read these values from textures, and not as vertex input.
Texture2D<float4> MaskTexture;
SamplerState MaskSampler;
struct PixelShaderInput
{
float3 Color : COLOR0;
float2 Texture : TEXCOORD0;
};
struct PixelShaderOutput
{
float4 Color : SV_TARGET0;
};
PixelShaderOutput main(PixelShaderInput input)
{
PixelShaderOutput output;
// do some work with input.Texture
float maskValue = MaskTexture.Sample(MaskSampler, Texture).x;
if (maskValue == 0)
{
output.Color = float4(0, 0, 0, 1);
}
else
{
output.Color = float4(1, 1, 1, 1);
}
return output;
}
So in order to do that you will need to pass in MaskTexture (this is your second render target output from PixelShader1) as a ShaderResource, via SetShaderResources(). You will also need to create a SamplerState and set that into the device also via SetSamplerStates().

Convert code in Opengl es 1.0 to Opengl es 2

This is the code:
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, mFont->mTexId);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
// Bind our vertex data
glVertexPointer(2, GL_FLOAT, 0, mVertices);
glEnableClientState(GL_VERTEX_ARRAY);
glTexCoordPointer(2, GL_FLOAT, 0, mUVs);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
// Draw the text
glDrawElements(GL_TRIANGLES, 6 * mNumberOfQuads, GL_UNSIGNED_BYTE, mIndices);
I tried with the next code, but it is not working, the problem is that I´m beginning to learn Opengl Es and I don´t understand a lot of things.
// Enable texturing, bind the font's texture and set up blending
//glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, mFont->mTexId);
//glEnable(GL_BLEND);
//glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
// Bind our vertex data
//glVertexPointer(2, GL_FLOAT, 0, mVertices);
//void VertexPointer(int size,enum type,sizei stride, void *pointer );
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 20, mVertices);
//void VertexAttribPointer( uint index, int size, enum type, boolean normalized, sizei stride, const void *pointer );
//glEnableClientState(GL_VERTEX_ARRAY);
//glEnableVertexAttribArray(VERTEX_ARRAY);
// glTexCoordPointer(2, GL_FLOAT, 0, mUVs);
//glEnableClientState(GL_TEXTURE_COORD_ARRAY);
// Bind the VBO so we can fill it with data
glBindBuffer(GL_ARRAY_BUFFER, 2);
// Draw the text
glDrawArrays(GL_TRIANGLES, 0, 6 * mNumberOfQuads);
//void DrawArrays( enum mode, int first, sizei count );
//glDrawElements(GL_TRIANGLES, , GL_UNSIGNED_BYTE, );
//void DrawElements(enummode,sizeicount,enumtype, void *indices );*/
Try compiling and binding a vertex and pixel shader before you submit your geometry. There's no fixed-function pipeline at all in ES 2.0.

Resources