OpenGL - Directional bright Light? - object

The Light, I added, looks on objects weak? I want it brighter and I don't want to give the Light position. I mean, I want a bright Light on all the screen with same brightness in the middle or at the corners?
float[] lightColor = {1, 1, 1, 0};
float[] lightPosition = {0, 0, 10, 0};
#Override
public void render () {
...
gl.glEnable(GL10.GL_LIGHTING);
...
gl.glEnable(GL10.GL_LIGHT0);
gl.glLightfv(GL10.GL_LIGHT0, GL10.GL_DIFFUSE, lightColor, 0);
gl.glLightfv(GL10.GL_LIGHT0, GL10.GL_POSITION, lightPosition, 0);
}

Related

How to absolutely ensure Direct3D9 hook overlay rendering on top

I'm trying to hook IDirect3DDevice9::Present or ::EndScene and render my own overlay (which, for now, is just a simple rectangle) on top of everything else in a D3D9 application, but my overlay seems to be appearing and disappearing quite randomly. The drawing code I'm currently using is:
typedef struct CUSTOMVERTEX {
float x, y, z, rwh;
DWORD color;
};
#define CUSTOMFVF (D3DFVF_XYZRHW | D3DFVF_DIFFUSE)
void draw() {
// the positions are just for testing purposes, so they don't really make sense
CUSTOMVERTEX vertices[] = {
{ 0, 0, 0, 1.0f, D3DCOLOR_XRGB(255, 255, 255) },
{ 0, cursor_pos.y+500, 0, 1.0f, D3DCOLOR_XRGB(127, 255, 255) },
{ cursor_pos.x, cursor_pos.y, 0, 1.0f, D3DCOLOR_XRGB(255,255, 255) },
{ cursor_pos.x, 600, 0, 1.0f, D3DCOLOR_XRGB(127, 0, 0) }
};
if (vBuffer == 0) return;
VOID* pVoid;
vBuffer->Lock(0, 0, &pVoid, 0);
memcpy(pVoid, vertices, sizeof(vertices));
vBuffer->Unlock();
d3dDevice->SetRenderState(D3DRS_LIGHTING, FALSE);
d3dDevice->SetRenderState(D3DRS_ALPHABLENDENABLE, FALSE);
D3DMATRIX orthographicMatrix;
D3DMATRIX identityMatrix;
// MAKE_D3DMATRIX should be equivalent to D3DXMATRIX constructor
D3DMATRIX viewMatrix = MAKE_D3DMATRIX(
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
((float)-(get_window_width()/2)), ((float)-(get_window_height()/ 2)), 0, 1
);
// MAKE_ORTHO_LH is equivalent to D3DMatrixOrthoLH
MAKE_ORTHO_LH(&orthographicMatrix, (FLOAT)get_window_width(), (FLOAT)get_window_height(), -1.0, 1.0);
// and this to D3DMatrixIdentity
MAKE_IDENTITY(&identityMatrix); // and this to D3DMatrixIdentity
d3dDevice->SetTransform(D3DTS_PROJECTION, &orthographicMatrix);
d3dDevice->SetTransform(D3DTS_WORLD, &identityMatrix);
d3dDevice->SetTransform(D3DTS_VIEW, &viewMatrix);
d3dDevice->SetRenderState(D3DRS_ZENABLE, false);
d3dDevice->SetFVF(CUSTOMFVF);
d3dDevice->SetStreamSource(0, vBuffer, 0, sizeof(CUSTOMVERTEX));
d3dDevice->DrawPrimitive(D3DPT_TRIANGLEFAN, 0, 2);
d3dDevice->SetRenderState(D3DRS_ZENABLE, true);
}
I can't seem to figure out why this would cause the overlay to pop in and out of existence at seemingly random points in time.. What am I missing?
I found a foolproof method to render my stuff directly to the backbuffer (on CPU):
IDirect3DSwapChain9 *sc;
if (FAILED(d3dDevice->GetSwapChain(0, &sc))) {
PRINT("GetSwapChain failed\n");
return;
}
IDirect3DSurface9 *s;
if (FAILED(sc->GetBackBuffer(0, D3DBACKBUFFER_TYPE_MONO, &s))) {
PRINT("GetBackBuffer failed\n");
return;
}
D3DLOCKED_RECT r;
if (FAILED(s->LockRect(&r, NULL, D3DLOCK_DONOTWAIT))) {
PRINT("LockRect failed\n");
return;
}
// here, find out pixel format and manipulate
// pixel buffer through r->pBits, using r->Pitch accordingly
s->UnlockRect();
s->Release();
sc->Release();
Will most probably incur a performance penalty, but in my application this doesn't really make a difference. A more optimal way would be to harness the GPU's hwacceled rendering capabilities, but I currently lack the D3D knowledge to do so.

How can I add new ncurses subwindows in response to user demand?

I'm trying to write an ncurses program which adds new windows in response to the user pressing keys. For example, consider the following C++ code:
#include <iostream>
#include "curses.h"
using namespace std;
int main()
{
WINDOW * win = initscr();
start_color();
noecho();
WINDOW * sub = subwin(win, 20, 20, 2, 2);
wborder(sub, 0, 0, 0, 0, 0, 0, 0, 0);
keypad(win, TRUE);
while (true)
{
int c = wgetch(win);
if (c == KEY_DOWN)
{
WINDOW* box = subwin(sub, 2, 2, (rand() % 20) + 2, (rand() % 20) + 2);
wborder(box, 0, 0, 0, 0, 0, 0, 0, 0);
}
else if (c == KEY_UP)
{
wrefresh(sub);
}
}
endwin();
return 0;
}
The user can press the down key to create new windows as many times as they want, but wrefresh will only draw them once. This appears to be related to the call to wgetch, a program that doesn't respond to keys works fine. Calling refresh also causes the problem.
The subwin man page says:
When using this routine, it is necessary to call touchwin or
touchline on orig before calling wrefresh on the subwindow.
Creating a sub-window does not actually change the parent window, so after the first refresh the parent window has not changed, touching the window marks it as changed.
So change:
wrefresh(sub);
to
touchwin(sub);
wrefresh(sub);

Reading from multiple render targets in DirectX

I have two render target views, my back buffer and a Texture2D that represents a specialized mask (I can't use the stencil buffer for what I need). My render method looks roughly like this:
// clear the render target and depth stencil views, set render targets,
// update subresources, set vertex buffers, topology, input layout, etc.
// draw to the back buffer and the mask
m_d3dContext->PSSetShader(m_pixelShader1.Get(), nullptr, 0);
m_d3dContext->IASetIndexBuffer(m_indexBuffer1.Get(), DXGI_FORMAT_R16_UINT, 0);
m_d3dContext->DrawIndexed(m_indexCount1, 0, 0);
// read from the mask and draw to the back buffer
m_d3dContext->PSSetShader(m_pixelShader2.Get(), nullptr, 0);
m_d3dContext->IASetIndexBuffer(m_indexBuffer2.Get(), DXGI_FORMAT_R16_UINT, 0);
m_d3dContext->DrawIndexed(m_indexCount2, 0, 0);
PixelShader1 looks like this (leaving out the input struct and some other parts):
struct PixelShaderInput
{
float4 Color : COLOR;
float2 Texture : TEXCOORD;
};
struct PixelShaderOutput
{
float4 Color : SV_TARGET0;
float4 Mask : SV_TARGET1;
};
PixelShaderOutput main(PixelShaderInput input)
{
PixelShaderOutput output;
output.Color.rgba = input.Color;
output.Mask.rgba = float4(1, 0, 0, 1);
return output;
}
PixelShader2 looks like this:
struct PixelShaderInput
{
float3 Color : COLOR0;
float2 Texture : TEXCOORD0;
float4 Mask : COLOR1;
};
struct PixelShaderOutput
{
float4 Color : SV_TARGET0;
};
PixelShaderOutput main(PixelShaderInput input)
{
PixelShaderOutput output;
// do some work with input.Texture
if (input.Mask.x == 0)
{
output.Color = float4(0, 0, 0, 1); }
}
else
{
output.Color = float4(1, 1, 1, 1);
}
return output;
}
Using Visual Studio's graphics debugging tools, I can see that the mask texture is written correctly by PixelShader1. At the end of the frame, there is red geometry appearing in the right positions. Further the output to the back buffer (which looks different) is also as expected. So I strongly believe that PixelShader1 is correct.
However, I am getting unexpected results from PixelShader2. Stepping through it, the values for input.Mask.x are all over the place. One would expect them to be either 1 or 0, as that is all the other pixel shader sets them to be, but instead the values look an awful lot like the texture coordinates.
So, first of all, is what I'm trying to do even possible? If it is, can anybody spot my error in how I'm reading COLOR1? I have been stumped by this for hours, so any help would be greatly appreciated.
I'm working in DirectX 11, shader model 4.0 level 9_1, and vc110.
You have a few concepts wrong in how your PixelShader2 is written. PixelShader1 writes to two render textures the values of Color and Mask. In PixelShader 2 you will need to read these values from textures, and not as vertex input.
Texture2D<float4> MaskTexture;
SamplerState MaskSampler;
struct PixelShaderInput
{
float3 Color : COLOR0;
float2 Texture : TEXCOORD0;
};
struct PixelShaderOutput
{
float4 Color : SV_TARGET0;
};
PixelShaderOutput main(PixelShaderInput input)
{
PixelShaderOutput output;
// do some work with input.Texture
float maskValue = MaskTexture.Sample(MaskSampler, Texture).x;
if (maskValue == 0)
{
output.Color = float4(0, 0, 0, 1);
}
else
{
output.Color = float4(1, 1, 1, 1);
}
return output;
}
So in order to do that you will need to pass in MaskTexture (this is your second render target output from PixelShader1) as a ShaderResource, via SetShaderResources(). You will also need to create a SamplerState and set that into the device also via SetSamplerStates().

Allegro program triggers breakpoint?

So I'm learning about sprite programming and we're using allegro. When I run one of the sample programs I get the message: rotatesprite.exe has triggered a breakpoint. This was in visual studio. I can't get allegro to work outside of visual studio
sample program:
#include <allegro.h>
#define WHITE makecol(255,255,255)
int main(void)
{
int x, y;
float angle = 0;
BITMAP *tank;
//initialize program
allegro_init();
install_keyboard();
set_color_depth(32);
set_gfx_mode(GFX_AUTODETECT_WINDOWED, 640, 480, 0, 0);
textout_ex(screen,font,"Rotate: LEFT / RIGHT arrow keys",
0,0,WHITE,0);
//load tank sprite
tank = load_bitmap("C:\Users\Jason\Desktop\module7\tank.bmp", NULL);
//calculate center of screen
//x = SCREEN_W/2 - tank->w/2;
//y = SCREEN_H/2 - tank->h/2;
x=SCREEN_W/2;
y=SCREEN_H/2;
//draw tank at starting location
rotate_sprite(screen, tank, x, y, 0);
//main loop
while(!key[KEY_ESC])
{
//wait for keypress
if (keypressed())
{
//left arrow rotates left
if (key[KEY_LEFT])
{
angle -= 0.1;
if (angle < 0) angle = 256;
rotate_sprite(screen, tank, x, y, itofix(angle));
}
//right arrow rotates right
if (key[KEY_RIGHT])
{
angle += 0.1;
if (angle > 256) angle = 0;
rotate_sprite(screen, tank, x, y, itofix(angle));
}
//display angle
textprintf_ex(screen, font, 0, 10, WHITE, 0,
"Angle = %f", angle);
}
}
allegro_exit();
return 0;
}
END_OF_MAIN()
the program triggering the breakpoint is: crt0msg.c off of the disk.
snippet of code:
#ifdef _DEBUG
/*
* Report error.
*
* If _CRT_ERROR has _CRTDBG_REPORT_WNDW on, and user chooses
* "Retry", call the debugger.
*
* Otherwise, continue execution.
*
*/
if (rterrnum!=_RT_CRNL&&rterrnum!=_RT_BANNER&&rterrnum!=_RT_CRT_NOTINIT)
{
if (1 == _CrtDbgReport(_CRT_ERROR, NULL, 0, NULL,rterrs[tblindx].rterrtxt))
_CrtDbgBreak();
}
#endif /* _DEBUG */
tank = load_bitmap("C:\Users\Jason\Desktop\module7\tank.bmp", NULL);
Your compiler should be warning you about that string since it contains invalid escaped characters. You should use double back slashes or single forward slashes:
tank = load_bitmap("C:\\Users\\Jason\\Desktop\\module7\\tank.bmp", NULL);
// or
tank = load_bitmap("C:/Users/Jason/Desktop/module7/tank.bmp", NULL);
The latter format is recommended because it is cross platform. (Minus the whole bit about hard coding an absolute path.)
Finally, you really need to check return codes:
if (!tank) {
// gracefully report error and exit
}
Otherwise the program will crash somewhere else and it will be harder to debug.

Convert code in Opengl es 1.0 to Opengl es 2

This is the code:
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, mFont->mTexId);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
// Bind our vertex data
glVertexPointer(2, GL_FLOAT, 0, mVertices);
glEnableClientState(GL_VERTEX_ARRAY);
glTexCoordPointer(2, GL_FLOAT, 0, mUVs);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
// Draw the text
glDrawElements(GL_TRIANGLES, 6 * mNumberOfQuads, GL_UNSIGNED_BYTE, mIndices);
I tried with the next code, but it is not working, the problem is that I´m beginning to learn Opengl Es and I don´t understand a lot of things.
// Enable texturing, bind the font's texture and set up blending
//glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, mFont->mTexId);
//glEnable(GL_BLEND);
//glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
// Bind our vertex data
//glVertexPointer(2, GL_FLOAT, 0, mVertices);
//void VertexPointer(int size,enum type,sizei stride, void *pointer );
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 20, mVertices);
//void VertexAttribPointer( uint index, int size, enum type, boolean normalized, sizei stride, const void *pointer );
//glEnableClientState(GL_VERTEX_ARRAY);
//glEnableVertexAttribArray(VERTEX_ARRAY);
// glTexCoordPointer(2, GL_FLOAT, 0, mUVs);
//glEnableClientState(GL_TEXTURE_COORD_ARRAY);
// Bind the VBO so we can fill it with data
glBindBuffer(GL_ARRAY_BUFFER, 2);
// Draw the text
glDrawArrays(GL_TRIANGLES, 0, 6 * mNumberOfQuads);
//void DrawArrays( enum mode, int first, sizei count );
//glDrawElements(GL_TRIANGLES, , GL_UNSIGNED_BYTE, );
//void DrawElements(enummode,sizeicount,enumtype, void *indices );*/
Try compiling and binding a vertex and pixel shader before you submit your geometry. There's no fixed-function pipeline at all in ES 2.0.

Resources