Relating vertex buffers and vertex attributes in openGL - linux

My entire code that is supposed to draw a triangle on the screen is:
#include <iostream>
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <string.h>
const GLint WIDTH=800, HEIGHT=600;
GLuint VAO, VBO, shader;
//Vertex Shader
/*static const char**/
const GLchar* vShader = "\n"
"\n"
"#version 330 \n"
"layout (location=0) in vec3 pos;\n"
"void main(){\n"
"gl_Position = vec4(pos.x,pos.y,pos.z,1.0);\n"
"\n"
"}\n"
"";
// fragment shader
const GLchar* fShader = "\n"
"#version 330 \n"
"out vec4 colour;\n"
"void main(){\n"
"colour = vec4(1.0, 0.0, 0.0, 1.0);\n"
"}\n"
"\n"
"\n";
void CreateTriangle(){
GLfloat vertices[] = {
-1.0f, -1.0f, 0.0f,
1.0f, -1.0f, 0.0f,
0.0f, 1.0f, 0.0f
};
// vertex arrays
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
// vertex buffers
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER,VBO);
glBufferData(GL_ARRAY_BUFFER,sizeof(GLfloat)*9,vertices,GL_STATIC_DRAW);
glVertexAttribPointer(0,3, GL_FLOAT,GL_FALSE,0, 0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
void AddShader(GLuint theProgram, const GLchar* shaderCode, GLenum shaderType){
GLuint theShader = glCreateShader(shaderType);
const GLchar* theCode[1];
theCode[0] = shaderCode;
GLint codeLength[1];
codeLength[0] = strlen(shaderCode);
glShaderSource(theShader, 1, theCode, codeLength);
glCompileShader(theShader);
GLint result=0;
GLchar eLog[1024]={};
glGetShaderiv(theShader, GL_COMPILE_STATUS, &result);
if(!result){
glGetShaderInfoLog(theShader,sizeof(eLog),NULL, eLog);
std::cout<< "Error compiling"<<shaderType<<" "<<eLog <<std::endl;
return;
}
glAttachShader(theProgram,theShader);
}
void CompileShader(){
shader = glCreateProgram();
if(!shader){
std::cout<<"Error Creating Shader Program";
return;
}
AddShader(shader, vShader,GL_VERTEX_SHADER);
AddShader(shader, fShader,GL_FRAGMENT_SHADER);
// getting error codes
GLint result=0;
GLchar eLog[1024]={0};
// Creates the executables in the graphic card
glLinkProgram(shader);
// get information if program is linked properly
glGetProgramiv(shader, GL_LINK_STATUS, &result);
if(!result){
glGetProgramInfoLog(shader,sizeof(eLog),NULL,eLog);
std::cout<<"Error linking program"<<eLog<<std::endl;
return;
}
glValidateProgram(shader);
glGetProgramiv(shader,GL_VALIDATE_STATUS,&result);
if(!result){
glGetProgramInfoLog(shader, sizeof(eLog),NULL, eLog);
std::cout<<"Error validating program"<<eLog<<std::endl;
return;
}
}
int main(void){
if(!glfwInit()){
std::cout << "glfw initialization failed" << std::endl;
glfwTerminate();
return 1;
}
// glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR,3);
// glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR,3);
// glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
// glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
GLFWwindow *mainWindow = glfwCreateWindow(WIDTH, HEIGHT, "NEW WINDOW", NULL, NULL);
if(!mainWindow){
std::cout<< "Window creation failed" <<std::endl;
glfwTerminate();
return 1;
}
int bufferWidth, bufferHeight;
glfwGetFramebufferSize(mainWindow, &bufferWidth, &bufferHeight);
glfwMakeContextCurrent(mainWindow);
if(glewInit() != GLEW_OK){
std::cout << "GLEW Initialization failed" << std::endl;
glfwDestroyWindow(mainWindow);
glfwTerminate();
return 1;
}
glViewport(0,0,bufferWidth, bufferHeight);
CreateTriangle();
CompileShader();
while(!glfwWindowShouldClose(mainWindow)){
glfwPollEvents();
glUseProgram(shader);
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glBindVertexArray(VAO);
glDrawArrays(GL_TRIANGLES,0,3);
glBindVertexArray(0);
glUseProgram(0);
glfwSwapBuffers(mainWindow);
std::cout<<"something"<<std::endl;
}
return 0;
}
It essentially draws a black screen and there's no error whatsoever but is supposed to draw a red triangle so I'm trying to debug this code and there is essentially some parts in the code that I don't understand.
1) How does the VBO (Vertex Buffer Object) relate to the VAO (Vertex Attribute object), we basically defined these using the following inside of the CreateTriangles() function:
...
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
// vertex buffers
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER,VBO);
glBufferData(GL_ARRAY_BUFFER,sizeof(GLfloat)*9,vertices,GL_STATIC_DRAW);
glVertexAttribPointer(0,3, GL_FLOAT,GL_FALSE,0, 0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
...
Note that we already unbind both the VAO and VBO, but during the drawing call inside the while loop:
while(!glfwWindowShouldClose(mainWindow)){
glfwPollEvents();
glUseProgram(shader);
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glBindVertexArray(VAO);
glDrawArrays(GL_TRIANGLES,0,3);
glBindVertexArray(0);
glUseProgram(0);
glfwSwapBuffers(mainWindow);
std::cout<<"something"<<std::endl;
}
we only rebind the VAO and not the VBO, which I think may be the result of the error, but I don't know for sure.
Also, the tutorial says that the VBO is bound inside the VAO but I don't see there being any linking or anything that related VBO to the VAO in the code, so I'm really being confused on how is it that we're binding them together and why we're only binding back the VAO and not the VBO during the drawing stage?
I'm using Linux OS and used the following to compile:
g++ -std=c++17 main.cpp -o main -lGL -lGLEW -lglfw && ./main

From comments above, credit to #Rabbid76:
When glVertexAttribPointer is called than the a name reference of the current VBO is stored in the current VAO. The current VAO is bound by glBindVertexArray(VAO); and the current VBO is bound by glBindBuffer(GL_ARRAY_BUFFER,VBO);. glVertexAttribPointer associates VBO to the resource index 0 in VAO. This association is stored in the state vector of VAO. So it is sufficient to bind VAO(glBindVertexArray(VAO)) before the draw call. Hence, only binding the VAO is sufficient.
As for the problem stated in the comments regarding the black screen try
Updating the glew version and your graphic card drive. If updating glew is not possible, then just set the glExperimental=GL_TRUE.

Related

How to load resources in the background?

So first things first:
I know that OpenGL does not have a real concept of threaded execution. Creating a "shared context" and giving commands to said shared context does not equal to "things happen in parallel", but at the least I thought that uploading data for a texture would work without much of a hitch. So what I set up is a window class that, upon construction, also constructs a loader (putting all code here for a minimal working example isn't feasable):
Window Members:
class Window {
[...]
private:
GLFWwindow* p_handle;
GLFWwindow* p_loader_handle;
const char* p_title;
std::thread loader;
std::mutex queue_mutex;
std::condition_variable queue_cv;
std::queue<Task> task_queue;
Window Constructor
video::Window::Window(unsigned int width, unsigned int height, const char* title, bool decoration, bool vsync) : loader() {
this->p_title = title;
glfwWindowHint(GLFW_DECORATED, decoration);
glfwWindowHint(GLFW_VISIBLE, true);
this->p_handle = glfwCreateWindow((int) width, (int) height, title, NULL, NULL);
if (!p_handle) {
std::cerr << "Window could not be created. Exiting ..." << std::endl;
glfwTerminate();
exit(32);
}
glfwMakeContextCurrent(this->p_handle);
glfwWindowHint(GLFW_VISIBLE, false);
this->p_loader_handle = glfwCreateWindow(16, 16, "Loader", NULL, this->p_handle);
if (!p_loader_handle) {
std::cerr << "Window could not be created. Exiting ..." << std::endl;
glfwTerminate();
exit(33);
}
if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress)) {
exit(66);
}
this->loader = std::thread(&Window::await_tasks, this);
[...]
The await tasks function:
void video::Window::await_tasks() {
glfwMakeContextCurrent(this->p_loader_handle);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glClearColor(0.0, 0.0, 0.0, 1.0);
while (!this->should_close()) {
std::unique_lock<std::mutex> lock(this->queue_mutex);
this->queue_cv.wait(lock, [this]() {return this->should_close() || !(this->task_queue.empty()); });
if (this->should_close()) {
return;
}
Task task = this->task_queue.front();
this->task_queue.pop();
lock.unlock();
task.task(task.data);
}
}
And when I want something to be done in the background I call:
void video::Window::task(std::function<void(void*)> task, void *data) {
std::unique_lock<std::mutex> lock(this->queue_mutex);
this->task_queue.emplace(task, data);
this->queue_cv.notify_one();
}
So yeah, this is a basic thread pool implementation with a single thread in the pool.
What the problem is, is that when I now try to load a texture in the background, the resulting images seem to be only partial or not at all uploaded to the GPU.
The code executed e.g. as a task is this, using lodepng as a quick and easy png decoder:
void image::read_png(GLuint& width, GLuint& height, GLvoid **data, std::string filename)
{
std::vector<unsigned char> image_vector;
unsigned int error = lodepng::decode(image_vector, width, height, filename);
if (error) {
std::cerr << "[ERROR] lodepng error: " << error << std::endl;
std::cerr << "[ERROR] lodepng was unable to load: " << filename << std::endl;
}
// std::cout << "[INFO] Image of dimensions " << width << " x " << height << " and a size of " << image_vector.size() << " bytes loaded." << std::endl;
unsigned char *image_array = new unsigned char[image_vector.size()];
unsigned char *row;
for (unsigned int i = 0; i < height; i++) {
row = &image_vector[i * width * 4];
for (unsigned j = 0; j < width * 4; j++) {
image_array[j + (height - i - 1) * width * 4] = row[j];
}
}
*data = image_array;
}
openGL::Texture image::load_texture(std::string filename)
{
GLuint width;
GLuint height;
GLvoid *data;
texture_loader(width, height, &data, filename);
return openGL::Texture(width, height, data);
}
image::Loader image::texture_loader = image::read_png;
Where the constructor of the Texture wrapper is as follows:
openGL::Texture::Texture(GLuint width, GLuint height, GLvoid* data)
{
GLint bound;
glGetIntegerv(GL_TEXTURE_BINDING_2D, &bound);
glGenTextures(1, &(this->texture));
glBindTexture(GL_TEXTURE_2D, this->texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
glBindTexture(GL_TEXTURE_2D, bound);
this->width = width;
this->height = height;
}
Debugging the internal memory with NVidia NSight results in the same internal data uploaded as can be seen by this test image:
Yet when I upload the thing on the main Thread and use the same image and same process, I get this:
I am very much at a loss about what is happening here.
What I want to implement is a smooth loading screen, one that animates while work is being done in the background, not something that is stuttering about as one resource is loaded per frame and then the image is being processed and presented while waiting for larger files to make the loading stutter and break like we are used to for so many years in all kinds of applications and games.

creat colored cube consisting triangles in android studio

I only managed to fix a triangle. This is the code but do not know what to do to get it as a cube.
I tried to fix the code but did not get the cube, so this code is before I destroyed it. Will try to get it to rotate too.
public class Triangle {
static final int VERTEX_POS_SIZE = 4;
static final int COLOR_SIZE = 4;
static final int VERTEX_POS_INDEX = 0;
static final int COLOR_INDEX = 1;
static final int VERTEX_POS_OFFSET = 0;
static final int COLOR_OFFSET = 0;
static final int VERTEX_ATTRIB_SIZE = VERTEX_POS_SIZE;
static final int COLOR_ATTRIB_SIZE = COLOR_SIZE;
private final int VERTEX_COUNT = triangleData.length / VERTEX_ATTRIB_SIZE;
private FloatBuffer vertexDataBuffer;
private FloatBuffer colorDataBuffer;
static float triangleData[] = { // in counterclockwise order:
0.0f, 0.0f, 0.0f, 1.0f, // top
1.0f, 0.0f, 0.0f, 1.0f, // bottom left
1.0f, 0.0f, -1.0f, 1.0f, // bottom right
0.0f, 0.0f, -1.0f, 1.0f,
0.0f, 1.0f, 0.0f, 1.0f,
1.0f, 1.0f, 0.0f, 1.0f,
1.0f, 1.0f, -1.0f, 1.0f,
0.0f, 1.0f, -1.0f, 1.0f,
};
static float colorData[] = { // in counterclockwise order:
1.0f, 0.0f, 0.0f, 1.0f, // Red
0.0f, 1.0f, 0.0f, 1.0f, // Green
0.0f, 0.0f, 1.0f, 1.0f// Blue
};
// Set color with red, green, blue and alpha (opacity) values
float color[] = { 0.63671875f, 0.76953125f, 0.22265625f, 1.0f };
private final int mProgram;
private final String vertexShaderCode =
// This matrix member variable provides a hook to manipulate
// the coordinates of the objects that use this vertex shader
"attribute vec4 vPosition; \n" +
"attribute vec4 vColor; \n" +
"uniform mat4 uMVPMatrix;\n" +
"varying vec4 c; \n" +
"void main() { \n" +
" c = vColor; \n" +
// the matrix must be included as a modifier of gl_Position
// Note that the uMVPMatrix factor *must be first* in order
// for the matrix multiplication product to be correct.
" gl_Position = uMVPMatrix * vPosition;\n" +
"}";
private final String fragmentShaderCode =
"precision mediump float;\n" +
"varying vec4 c;\n" +
"void main() {\n" +
" gl_FragColor = c;\n" +
"}";
// Use to access and set the view transformation
private int mMVPMatrixHandle;
private int positionHandle;
private int colorHandle;
public Triangle() {
// initialize vertex byte buffer for shape coordinates
ByteBuffer bbv = ByteBuffer.allocateDirect(
// (number of coordinate values * 4 bytes per float)
triangleData.length * 4);
// use the device hardware's native byte order
bbv.order(ByteOrder.nativeOrder());
// create a floating point buffer from the ByteBuffer
vertexDataBuffer = bbv.asFloatBuffer();
// add the coordinates to the FloatBuffer
vertexDataBuffer.put(triangleData);
// set the buffer to read the first coordinate
vertexDataBuffer.position(0);
// initialize vertex byte buffer for shape coordinates
ByteBuffer bbc = ByteBuffer.allocateDirect(
// (number of coordinate values * 4 bytes per float)
colorData.length * 4);
// use the device hardware's native byte order
bbc.order(ByteOrder.nativeOrder());
// create a floating point buffer from the ByteBuffer
colorDataBuffer = bbc.asFloatBuffer();
// add the coordinates to the FloatBuffer
colorDataBuffer.put(colorData);
// set the buffer to read the first coordinate
colorDataBuffer.position(0);
int vertexShader = CGRenderer.loadShader(GLES20.GL_VERTEX_SHADER,
vertexShaderCode);
int fragmentShader = CGRenderer.loadShader(GLES20.GL_FRAGMENT_SHADER,
fragmentShaderCode);
// create empty OpenGL ES Program
mProgram = GLES20.glCreateProgram();
// add the vertex shader to program
GLES20.glAttachShader(mProgram, vertexShader);
// add the fragment shader to program
GLES20.glAttachShader(mProgram, fragmentShader);
// creates OpenGL ES program executables
GLES20.glLinkProgram(mProgram);
}
public void draw(float[] mvpMatrix) {
// Add program to OpenGL ES environment
GLES20.glUseProgram(mProgram);
// get handle to shape's transformation matrix
mMVPMatrixHandle = GLES20.glGetUniformLocation(mProgram, "uMVPMatrix");
positionHandle = GLES20.glGetAttribLocation(mProgram, "vPosition");
GLES20.glEnableVertexAttribArray(positionHandle);
// Prepare the triangle coordinate data
GLES20.glVertexAttribPointer(positionHandle, VERTEX_POS_SIZE,
GLES20.GL_FLOAT, false,
VERTEX_ATTRIB_SIZE * 4, vertexDataBuffer);
colorHandle = GLES20.glGetAttribLocation(mProgram, "vColor");
GLES20.glEnableVertexAttribArray(colorHandle);
GLES20.glVertexAttribPointer(colorHandle, COLOR_SIZE,
GLES20.GL_FLOAT, false,
COLOR_ATTRIB_SIZE * 4, colorDataBuffer);
// Pass the projection and view transformation to the shader
GLES20.glUniformMatrix4fv(mMVPMatrixHandle, 1, false, mvpMatrix, 0);
// Draw the triangle
GLES20.glDrawArrays(GLES20.GL_TRIANGLES, 0, VERTEX_COUNT);
// Disable vertex array
GLES20.glDisableVertexAttribArray(positionHandle);
GLES20.glDisableVertexAttribArray(colorHandle);
}
}

OpenGL double buffers on Ubuntu does not work

I'm testing out several of Sumantha Guha's code and there's something that isn't working quite right... All of the sample code where he uses GLUT_DOUBLE and glutSwapBuffers() does not work on my ubuntu machine, but works on my Windows machine. More accurately the window that pops out simply traces the background.
I've had this issue before on Windows where Flush and single buffers don't work, but now this is happening on Linux where Double buffers and glutSwapBuffers do not work. Any idea as to what may be causing this?
Sample of code that I tried loading. Compiles fine, just get a window that traces the background.
///////////////////////////////////////////////////////////////////////////////////////////////////////
// loadTextures.cpp
//
// This stripped-down program shows how to load both external and program-generated images as textures.
//
// NOTE: The Textures folder must be in the same one as this program.
//
// Interaction:
// Press the left and right arrow keys to rotate the square.
// Press space to toggle between textures.
// Press delete to reset.
//
// Sumanta Guha
///////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////
// TEXTURE GREDITS:
// canLabel.bmp, thanks anonymous.
// canTop.bmp, thanks www.acoustica.com.
// cray2.bmp, thanks NASA website www.nasa.gov.
// grass.bmp, thanks www.amazingtextures.com.
// launch.bmp, thanks NASA website www.nasa.gov.
// nightsky.bmp, thanks anonymous.
// sky.bmp, thanks www.mega-tex.nl.
// trees.bmp, thanks anonymous.
////////////////////////////////////////////////
#include <cstdlib>
#include <iostream>
#include <fstream>
#ifdef __APPLE__
# include <GLUT/glut.h>
# include <OpenGL/glext.h>
#else
# include <GL/glut.h>
# include <GL/glext.h>
#endif
using namespace std;
// Globals.
static unsigned int texture[2]; // Array of texture indices.
static unsigned char chessboard[64][64][3]; // Storage for chessboard image.
static float angle = 0.0; // Angle to rotate textured square.
static int id = 0; // Currently displayed texture id.
// Struct of bitmap file.
struct BitMapFile
{
int sizeX;
int sizeY;
unsigned char *data;
};
// Routine to read a bitmap file.
// Works only for uncompressed bmp files of 24-bit color.
BitMapFile *getBMPData(string filename)
{
BitMapFile *bmp = new BitMapFile;
unsigned int size, offset, headerSize;
// Read input file name.
ifstream infile(filename.c_str(), ios::binary);
// Get the starting point of the image data.
infile.seekg(10);
infile.read((char *) &offset, 4);
// Get the header size of the bitmap.
infile.read((char *) &headerSize,4);
// Get width and height values in the bitmap header.
infile.seekg(18);
infile.read( (char *) &bmp->sizeX, 4);
infile.read( (char *) &bmp->sizeY, 4);
// Allocate buffer for the image.
size = bmp->sizeX * bmp->sizeY * 24;
bmp->data = new unsigned char[size];
// Read bitmap data.
infile.seekg(offset);
infile.read((char *) bmp->data , size);
// Reverse color from bgr to rgb.
int temp;
for (int i = 0; i < size; i += 3)
{
temp = bmp->data[i];
bmp->data[i] = bmp->data[i+2];
bmp->data[i+2] = temp;
}
return bmp;
}
// Load external textures.
void loadExternalTextures()
{
// Local storage for bmp image data.
BitMapFile *image[1];
// Load the texture.
image[0] = getBMPData("Textures/launch.bmp");
// Activate texture index texture[0].
glBindTexture(GL_TEXTURE_2D, texture[0]);
// Set texture parameters for wrapping.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
// Set texture parameters for filtering.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
// Specify an image as the texture to be bound with the currently active texture index.
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, image[0]->sizeX, image[0]->sizeY, 0,
GL_RGB, GL_UNSIGNED_BYTE, image[0]->data);
}
// Routine to load a program-generated image as a texture.
void loadProceduralTextures()
{
// Activate texture index texture[1].
glBindTexture(GL_TEXTURE_2D, texture[1]);
// Set texture parameters for wrapping.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
// Set texture parameters for filtering.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
// Specify an image as the texture to be bound with the currently active texture index.
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 64, 64, 0, GL_RGB, GL_UNSIGNED_BYTE, chessboard);
}
// Create 64 x 64 RGB image of a chessboard.
void createChessboard(void)
{
int i, j;
for (i = 0; i < 64; i++)
for (j = 0; j < 64; j++)
if ( ( ((i/8)%2) && ((j/8)%2) ) || ( !((i/8)%2) && !((j/8)%2) ) )
{
chessboard[i][j][0] = 0x00;
chessboard[i][j][1] = 0x00;
chessboard[i][j][2] = 0x00;
}
else
{
chessboard[i][j][0] = 0xFF;
chessboard[i][j][1] = 0xFF;
chessboard[i][j][2] = 0xFF;
}
}
// Initialization routine.
void setup(void)
{
glClearColor(0.8, 0.8, 0.8, 0.0);
// Create texture index array.
glGenTextures(2, texture);
// Load external texture and generate and load procedural texture.
loadExternalTextures();
createChessboard();
loadProceduralTextures();
// Turn on OpenGL texturing.
glEnable(GL_TEXTURE_2D);
// Specify how texture values combine with current surface color values.
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
}
// Drawing routine.
void drawScene(void)
{
glClear(GL_COLOR_BUFFER_BIT);
glLoadIdentity();
gluLookAt(0.0, 0.0, 20.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0);
glRotatef(angle, 0.0, 1.0, 0.0);
// Activate a texture.
glBindTexture(GL_TEXTURE_2D, texture[id]);
// Map the texture onto a square polygon.
glBegin(GL_POLYGON);
glTexCoord2f(0.0, 0.0); glVertex3f(-10.0, -10.0, 0.0);
glTexCoord2f(1.0, 0.0); glVertex3f(10.0, -10.0, 0.0);
glTexCoord2f(1.0, 1.0); glVertex3f(10.0, 10.0, 0.0);
glTexCoord2f(0.0, 1.0); glVertex3f(-10.0, 10.0, 0.0);
glEnd();
glutSwapBuffers();
}
// OpenGL window reshape routine.
void resize(int w, int h)
{
glViewport(0, 0, (GLsizei)w, (GLsizei)h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glFrustum(-5.0, 5.0, -5.0, 5.0, 5.0, 100.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
// Keyboard input processing routine.
void keyInput(unsigned char key, int x, int y)
{
switch(key)
{
case 27:
exit(0);
break;
case ' ':
id++;
if (id == 2) id = 0;
glutPostRedisplay();
break;
case 127:
angle = 0.0;
glutPostRedisplay();
break;
default:
break;
}
}
// Callback routine for non-ASCII key entry.
void specialKeyInput(int key, int x, int y)
{
if (key == GLUT_KEY_LEFT)
{
angle -= 5.0;
if (angle < 0.0) angle += 360.0;
}
if (key == GLUT_KEY_RIGHT)
{
angle += 5.0;
if (angle > 360.0) angle -= 360.0;
}
glutPostRedisplay();
}
// Routine to output interaction instructions to the C++ window.
void printInteraction(void)
{
cout << "Interaction:" << endl;
cout << "Press the left and right arrow keys to rotate the square." << endl
<< "Press space to toggle between textures." << endl
<< "Press delete to reset." << endl;
}
// Main routine.
int main(int argc, char **argv)
{
printInteraction();
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB);
glutInitWindowSize(500, 500);
glutInitWindowPosition(100, 100);
glutCreateWindow("loadTextures.cpp");
setup();
glutDisplayFunc(drawScene);
glutReshapeFunc(resize);
glutKeyboardFunc(keyInput);
glutSpecialFunc(specialKeyInput);
glutMainLoop();
return 0;
}

Loading Textures in Init()

Trying to texture a skybox and loading the textures fine with "ImageLoader::createJPG", which loads a texture file in the proper format and my code textures it to the polygons just fine if I load them in every display loop (which is obviously an awful performance hit.)
But when I try the same code in init() just to load them once, display doesn't even think they exist (I just end up with a cube of the default colour.)
GLuint skyFront; etc. is declared before anything else at the top of the file just after the #includes.
No amount of enables or texParameters seems to make init() want to load the textures. Any ideas?
GLuint skyTop;
GLuint skyFront;
GLuint skyBack;
GLuint skyBottom;
GLuint skyLeft;
GLuint skyRight;
void display()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(0,0,-0.6);
glTranslatef(0, -0.4, -1);
glPushMatrix();
glDisable(GL_DEPTH_TEST);
glDisable(GL_LIGHTING);
glDisable(GL_BLEND);
//glEnable(GL_LIGHT0);
//glTranslatef(camPos.x, camPos.y,camPos.z);
//glDepthMask(GL_FALSE);
glDisable(GL_CULL_FACE);
//draw skybox
glLoadIdentity();
glEnable(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
glColor3f(0.0, 1.0, 0.0);
glTranslatef(0.0,-2.0,-20.0);
GLuint skyTop = ImageLoader::createJPG("../TestModels/top.jpg");
GLuint skyBottom = ImageLoader::createJPG("../TestModels/bottom.jpg");
GLuint skyLeft = ImageLoader::createJPG("../TestModels/left.jpg");
GLuint skyFront = ImageLoader::createJPG("../TestModels/front.jpg");
GLuint skyRight = ImageLoader::createJPG("../TestModels/right.jpg");
GLuint skyBack = ImageLoader::createJPG("../TestModels/back.jpg");
//FRONT
glBindTexture(GL_TEXTURE_2D, skyFront);
glBegin(GL_QUADS);
glTexCoord2f(0, 0); glVertex3f(-SKYBOXSIZE, SKYBOXSIZE, -SKYBOXSIZE); //A
glTexCoord2f(0, 1); glVertex3f(-SKYBOXSIZE, -SKYBOXSIZE, -SKYBOXSIZE); //B
glTexCoord2f(1, 1); glVertex3f(SKYBOXSIZE, -SKYBOXSIZE, -SKYBOXSIZE); //C
glTexCoord2f(1, 0); glVertex3f(SKYBOXSIZE, SKYBOXSIZE, -SKYBOXSIZE); //D
glEnd();
glBindTexture(GL_TEXTURE_2D, skyLeft);
//LEFT
glBegin(GL_QUADS);
glTexCoord2f(1, 1); glVertex3f(SKYBOXSIZE, SKYBOXSIZE, -SKYBOXSIZE); //D
glTexCoord2f(0, 1); glVertex3f(SKYBOXSIZE, -SKYBOXSIZE, -SKYBOXSIZE); //C
glTexCoord2f(0, 0); glVertex3f(SKYBOXSIZE, -SKYBOXSIZE, SKYBOXSIZE); //E
glTexCoord2f(1, 0); glVertex3f(SKYBOXSIZE, SKYBOXSIZE, SKYBOXSIZE); //F
glEnd();
etc. etc.
edit:
init() code looks like this:
void init()
{
glewInit()
GLuint skyFront = ImageLoader::createJPG("../TestModels/front.jpg"); //load the texture, damnit
glClearColor(0.0,0.0,0.0,0.0);
glShadeModel(GL_SMOOTH);
//light position and colour
GLfloat light_position[] = { 0.0, 0.0, 20.0,0.0 };
GLfloat white_light[] = {0.8,0.8,0.8,0.0};
GLfloat diff_light[] = {1.0,1.0,1.0,0.0};
GLfloat spec_light[] = {1.0,1.0,1.0,0.0};
glLightfv(GL_LIGHT0, GL_AMBIENT, white_light);
glLightfv(GL_LIGHT0, GL_DIFFUSE, diff_light);
glLightfv(GL_LIGHT0, GL_SPECULAR, spec_light);
glLightfv(GL_LIGHT0, GL_POSITION, light_position);
//ambient light
GLfloat ambient[] = {0.3,0.3,0.3};
glMaterialfv(GL_FRONT, GL_AMBIENT, ambient);
//diffuse material component
GLfloat diff[] = {0.6,0.6,0.6};
glMaterialfv(GL_FRONT, GL_DIFFUSE, diff);
//specular material component
GLfloat WhiteSpec[] = {1,1,1};
glMaterialfv(GL_FRONT, GL_SPECULAR, WhiteSpec);
GLfloat shininess = 50;
glMaterialf(GL_FRONT, GL_SHININESS, shininess);
//ENABLE LIGHTING AND DEPTH TEST
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_DEPTH_TEST);
cout << " loading model " << endl;
if(objLoader.loadModel("../TestModels/hummer.obj", model))//returns true if the model is loaded, puts the model in the model parameter
{
cout << " model loaded " << endl;
//if you want to translate the object to the origin of the screen,
//first calculate the centre of the object, then move all the vertices
//back so that the centre is on the origin.
model.calcCentrePoint();
model.centreOnZero();
model.calcVertNormalsUsingOctree(); //the method will construct the octree if it hasn't already been created.
//turn on VBO by setting useVBO to true in 3dmodel.cpp default constructor - only permitted on 8 series cards and higher
if(!model.useImmediateMode || model.useVBO)
{
model.initDrawElements();
}
if(model.useVBO)
{
model.initVBO();
model.deleteVertexFaceData();
}
}
else
{
cout << " model failed to load " << endl;
}
}
And all the rest of the GL commands go through and even the model I load goes through fine. TexPerameters and so on are defined in ImageLoader, yeah..
These are global:
GLuint skyTop;
GLuint skyFront;
GLuint skyBack;
GLuint skyBottom;
GLuint skyLeft;
GLuint skyRight;
But this sets a local:
void init()
{
glewInit()
GLuint skyFront = ImageLoader::createJPG("../TestModels/front.jpg"); //load the texture, damnit
You need to not re-declare the skyFront variable, and use the global instead.

Any GLES examples, in C++, on x86 Linux?

I'm looking for a good source of GLES2 samples for C++ (or C) on x86 Linux with Xorg.
The samples I can find are all in Objective C for iOS, or Java for Android, or JavaScript for WebGL.
The Kronos web site has a "tutorials" section that contains two lines saying "our tutorials index will go here." Given that GLES2 is 5 years old, I don't have much hope on a sudden surge of content there.
I already know OpenGL pretty well. I'd just like some convenient source for copy-and-paste context set-up code, really. Where can I find something like that?
Mesa demos!
http://cgit.freedesktop.org/mesa/demos
http://cgit.freedesktop.org/mesa/demos/tree/src/egl/opengles2
http://cgit.freedesktop.org/mesa/demos/tree/src/egl/opengles2/es2tri.c
GLFW, Mesa, Ubuntu 16.04 AMD64
I'm not sure if GLUT supports GLES, but GLFW does, greatly simplifying window management.
sudo apt-get install libglfw3-dev libgles2-mesa-dev
gcc glfw_triangle.c -lGLESv2 -lglfw
Output:
Source:
#include <stdio.h>
#include <stdlib.h>
#define GLFW_INCLUDE_ES2
#include <GLFW/glfw3.h>
static const GLuint WIDTH = 800;
static const GLuint HEIGHT = 600;
static const GLchar* vertex_shader_source =
"#version 100\n"
"attribute vec3 position;\n"
"void main() {\n"
" gl_Position = vec4(position, 1.0);\n"
"}\n";
static const GLchar* fragment_shader_source =
"#version 100\n"
"void main() {\n"
" gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);\n"
"}\n";
static const GLfloat vertices[] = {
0.0f, 0.5f, 0.0f,
0.5f, -0.5f, 0.0f,
-0.5f, -0.5f, 0.0f,
};
GLint common_get_shader_program(const char *vertex_shader_source, const char *fragment_shader_source) {
enum Consts {INFOLOG_LEN = 512};
GLchar infoLog[INFOLOG_LEN];
GLint fragment_shader;
GLint shader_program;
GLint success;
GLint vertex_shader;
/* Vertex shader */
vertex_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertex_shader, 1, &vertex_shader_source, NULL);
glCompileShader(vertex_shader);
glGetShaderiv(vertex_shader, GL_COMPILE_STATUS, &success);
if (!success) {
glGetShaderInfoLog(vertex_shader, INFOLOG_LEN, NULL, infoLog);
printf("ERROR::SHADER::VERTEX::COMPILATION_FAILED\n%s\n", infoLog);
}
/* Fragment shader */
fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragment_shader, 1, &fragment_shader_source, NULL);
glCompileShader(fragment_shader);
glGetShaderiv(fragment_shader, GL_COMPILE_STATUS, &success);
if (!success) {
glGetShaderInfoLog(fragment_shader, INFOLOG_LEN, NULL, infoLog);
printf("ERROR::SHADER::FRAGMENT::COMPILATION_FAILED\n%s\n", infoLog);
}
/* Link shaders */
shader_program = glCreateProgram();
glAttachShader(shader_program, vertex_shader);
glAttachShader(shader_program, fragment_shader);
glLinkProgram(shader_program);
glGetProgramiv(shader_program, GL_LINK_STATUS, &success);
if (!success) {
glGetProgramInfoLog(shader_program, INFOLOG_LEN, NULL, infoLog);
printf("ERROR::SHADER::PROGRAM::LINKING_FAILED\n%s\n", infoLog);
}
glDeleteShader(vertex_shader);
glDeleteShader(fragment_shader);
return shader_program;
}
int main(void) {
GLuint shader_program, vbo;
GLint pos;
GLFWwindow* window;
glfwInit();
glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 2);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0);
window = glfwCreateWindow(WIDTH, HEIGHT, __FILE__, NULL, NULL);
glfwMakeContextCurrent(window);
printf("GL_VERSION : %s\n", glGetString(GL_VERSION) );
printf("GL_RENDERER : %s\n", glGetString(GL_RENDERER) );
shader_program = common_get_shader_program(vertex_shader_source, fragment_shader_source);
pos = glGetAttribLocation(shader_program, "position");
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glViewport(0, 0, WIDTH, HEIGHT);
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glVertexAttribPointer(pos, 3, GL_FLOAT, GL_FALSE, 0, (GLvoid*)0);
glEnableVertexAttribArray(pos);
glBindBuffer(GL_ARRAY_BUFFER, 0);
while (!glfwWindowShouldClose(window)) {
glfwPollEvents();
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(shader_program);
glDrawArrays(GL_TRIANGLES, 0, 3);
glfwSwapBuffers(window);
}
glDeleteBuffers(1, &vbo);
glfwTerminate();
return EXIT_SUCCESS;
}
The key line lines of code are:
#define GLFW_INCLUDE_ES2
#include <GLFW/glfw3.h>
GLFW_INCLUDE_ES2 is documented at: http://www.glfw.org/docs/latest/build_guide.html#build_macros and a quick look at the source shows that it forwards to GLES:
#elif defined(GLFW_INCLUDE_ES2)
#include <GLES2/gl2.h>
#if defined(GLFW_INCLUDE_GLEXT)
#include <GLES2/gl2ext.h>
#endif
This source seems to be is in the common subset of GLES and OpenGL (like much of GLES), and also compiles with -lGL if we remove the #define GLFW_INCLUDE_ES2.
If we add things which are not in GLES like immediate rendering glBegin, link fails as expected.
See also: How to develop OpenGL ES (GLES) 2.0 applications on Linux?
Credits: genpfult made the code much more correct.
ARM Mali OpenGL ES SDK
download from: http://malideveloper.arm.com/resources/sdks/opengl-es-sdk-for-linux/
open the documentation HTML on a browser
follow the "Quick Start Guide", it's simple
Contains several interesting open source examples + windowing system boilerplate (X11 + EGL).
The build system supports easy cross compilation for ARM / Mali SoCs, but I haven't tested that yet.
The key component included seems to be the "OpenGL ES Emulator" http://malideveloper.arm.com/resources/tools/opengl-es-emulator/ which "maps OpenGL ES 3.2 API calls to the OpenGL API". But that does not ship with source, only precompiled.
Uses a custom enterprisey EULA that appears to be permissive, but yeah, ask your lawyer.
Tested on SDK v2.4.4.

Resources