3ds object is not visible (beginner) - object

As a part of my project i need to draw some 3d models.
dont have time to learn about 3ds files and how to load them,
which looks to me pretty complicated,
so i found functions on the web,
but the object wont render (or maybe render but not seen).
here is the relevant source code :
void initGL(int *argc, char **argv)
{
glutInit(argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DEPTH | GLUT_DOUBLE);
glutInitWindowSize(width, height);
glutCreateWindow("CUDA Particle System");
GLenum err = glewInit();
if (GLEW_OK != err)
{
printf ("Error: openGL is not supported");
exit (0);
}
glEnable(GL_DEPTH_TEST);
glEnable (GL_CULL_FACE);
glCullFace(GL_FRONT);
glClearColor( 0.7, 0.7 , 0.9 , 0 );
glutReportErrors();
glFrustum(-1,1,-1,1,0.3,100);
Chesspawn.load( &chesspawn ,"chesspawn.3DS");
/*glClear (GL_COLOR_BUFFER_BIT);
glColor3f ( 0.7, 0.7 , 0.9 );
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-1,1,-1,1,-1,1);
gluLookAt(0,0,0,0,0,-100,0,1,0);
glViewport (250, 250, width, height);*/
}
void display()
{
sdkStartTimer(&timer.timer);
p_system->update(time_Step);
//set Positions vbo
renderer->setVertexBuffer(p_system->getPosVbo(),p_system->getNumParticles());
//render
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
// view transform
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glGetFloatv(GL_MODELVIEW_MATRIX, modelView);
gluLookAt (Params_Camera[CAMERA_X], Params_Camera[CAMERA_Y], Params_Camera[CAMERA_Z], // camera
Params_Camera[CAMERA_X] + Params_Camera[DIR_X], Params_Camera[CAMERA_Y] , Params_Camera[CAMERA_Z] + Params_Camera[DIR_Z], // center
0, 1, 0 // up
);
renderer->display();
drawFloor();
glScaled (0.001,0.001,1);
Chesspawn.render(&chesspawn);
//glutSolidSphere (0.5,50,50);
/*glTranslated (0,0,-10);
glutSolidSphere(0.1,20,20);*/
sdkStopTimer(&timer.timer);
glutSwapBuffers();
glutReportErrors();
char sFps[256];
fps = timer.computeFps();
sprintf(sFps, " %s: %d particles %3.1f fps",proc, numParticles, fps);
if (fps <24.0)
printf ("Slow fps : %f \n", fps);
glutSetWindowTitle(sFps);
}
and this is the load and render functions:
static char C_3dsObject::load (obj_type_ptr p_object, char *p_filename)
{
int i; //Index variable
int File_Length = 0;
FILE *l_file; //File pointer
unsigned short l_chunk_id; //Chunk identifier
unsigned int l_chunk_lenght; //Chunk lenght
unsigned char l_char; //Char variable
unsigned short l_qty; //Number of elements in each chunk
unsigned short l_face_flags; //Flag that stores some face information
if ((l_file=fopen (p_filename, "rb"))== NULL)
return 0; //Open the file
fseek (l_file, 0, SEEK_END); // get file length
File_Length = ftell (l_file);
fseek(l_file, 0L, SEEK_SET);
while (ftell (l_file) < File_Length ) //Loop to scan the whole file
{
//getche(); //Insert this command for debug (to wait for keypress for each chuck reading)
fread (&l_chunk_id, 2, 1, l_file); //Read the chunk header
//printf("ChunkID: %x\n",l_chunk_id);
fread (&l_chunk_lenght, 4, 1, l_file); //Read the lenght of the chunk
//printf("ChunkLenght: %x\n",l_chunk_lenght);
switch (l_chunk_id)
{
//----------------- MAIN3DS -----------------
// Description: Main chunk, contains all the other chunks
// Chunk ID: 4d4d
// Chunk Lenght: 0 + sub chunks
//-------------------------------------------
case 0x4d4d:
break;
//----------------- EDIT3DS -----------------
// Description: 3D Editor chunk, objects layout info
// Chunk ID: 3d3d (hex)
// Chunk Lenght: 0 + sub chunks
//-------------------------------------------
case 0x3d3d:
break;
//--------------- EDIT_OBJECT ---------------
// Description: Object block, info for each object
// Chunk ID: 4000 (hex)
// Chunk Lenght: len(object name) + sub chunks
//-------------------------------------------
case 0x4000:
i=0;
do
{
fread (&l_char, 1, 1, l_file);
p_object->name[i]=l_char;
i++;
}while(l_char != '\0' && i<20);
break;
//--------------- OBJ_TRIMESH ---------------
// Description: Triangular mesh, contains chunks for 3d mesh info
// Chunk ID: 4100 (hex)
// Chunk Lenght: 0 + sub chunks
//-------------------------------------------
case 0x4100:
break;
//--------------- TRI_VERTEXL ---------------
// Description: Vertices list
// Chunk ID: 4110 (hex)
// Chunk Lenght: 1 x unsigned short (number of vertices)
// + 3 x float (vertex coordinates) x (number of vertices)
// + sub chunks
//-------------------------------------------
case 0x4110:
fread (&l_qty, sizeof (unsigned short), 1, l_file);
p_object->vertices_qty = l_qty;
//printf("Number of vertices: %d\n",l_qty);
for (i=0; i<l_qty; i++)
{
fread (&p_object->vertex[i].x, sizeof(float), 1, l_file);
//printf("Vertices list x: %f\n",p_object->vertex[i].x);
fread (&p_object->vertex[i].y, sizeof(float), 1, l_file);
//printf("Vertices list y: %f\n",p_object->vertex[i].y);
fread (&p_object->vertex[i].z, sizeof(float), 1, l_file);
//printf("Vertices list z: %f\n",p_object->vertex[i].z);
//Insert into the database
}
break;
//--------------- TRI_FACEL1 ----------------
// Description: Polygons (faces) list
// Chunk ID: 4120 (hex)
// Chunk Lenght: 1 x unsigned short (number of polygons)
// + 3 x unsigned short (polygon points) x (number of polygons)
// + sub chunks
//-------------------------------------------
case 0x4120:
fread (&l_qty, sizeof (unsigned short), 1, l_file);
p_object->polygons_qty = l_qty;
//printf("Number of polygons: %d\n",l_qty);
for (i=0; i<l_qty; i++)
{
fread (&p_object->polygon[i].a, sizeof (unsigned short), 1, l_file);
//printf("Polygon point a: %d\n",p_object->polygon[i].a);
fread (&p_object->polygon[i].b, sizeof (unsigned short), 1, l_file);
//printf("Polygon point b: %d\n",p_object->polygon[i].b);
fread (&p_object->polygon[i].c, sizeof (unsigned short), 1, l_file);
//printf("Polygon point c: %d\n",p_object->polygon[i].c);
fread (&l_face_flags, sizeof (unsigned short), 1, l_file);
//printf("Face flags: %x\n",l_face_flags);
}
break;
//------------- TRI_MAPPINGCOORS ------------
// Description: Vertices list
// Chunk ID: 4140 (hex)
// Chunk Lenght: 1 x unsigned short (number of mapping points)
// + 2 x float (mapping coordinates) x (number of mapping points)
// + sub chunks
//-------------------------------------------
//----------- Skip unknow chunks ------------
//We need to skip all the chunks that currently we don't use
//We use the chunk lenght information to set the file pointer
//to the same level next chunk
//-------------------------------------------
default:
fseek(l_file, l_chunk_lenght-6, SEEK_CUR);
}
}
fclose (l_file); // Closes the file stream
return (1); // Returns ok
}
void render (obj_type_ptr object)
{
int l_index;
//glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//glMatrixMode(GL_MODELVIEW); // Modeling transformation
glLoadIdentity();
//glTranslatef(0.0,0.0,-500.0);
glColor3d(1,1,0);
// Rotations of the object (the model matrix is multiplied by the rotation matrices)
glRotatef(rotation_x,1.0,0.0,0.0);
glRotatef(rotation_y,0.0,1.0,0.0);
glRotatef(rotation_z,0.0,0.0,1.0);
// TODO: Add your message handler code here
//rotation_x = rotation_x + rotation_x_increment; /////
//rotation_y = rotation_y + rotation_y_increment;
//rotation_z = rotation_z + rotation_z_increment;
if (rotation_x > 359) rotation_x = 0;
if (rotation_y > 359) rotation_y = 0;
if (rotation_z > 359) rotation_z = 0;
// glBegin and glEnd delimit the vertices that define a primitive (in our case triangles)
glBegin(GL_TRIANGLES);
for (l_index=0;l_index < object->polygons_qty;l_index++)
{
//----------------- FIRST VERTEX -----------------
// Coordinates of the first vertex
glVertex3f( object->vertex[ object->polygon[l_index].a ].x,
object->vertex[ object->polygon[l_index].a ].y,
object->vertex[ object->polygon[l_index].a ].z);
//Vertex definition
//----------------- SECOND VERTEX -----------------
// Coordinates of the second vertex
//float x= object.vertex[ object.polygon[l_index].b ].x;
glVertex3f( object->vertex[ object->polygon[l_index].b ].x,
object->vertex[ object->polygon[l_index].b ].y,
object->vertex[ object->polygon[l_index].b ].z);
//----------------- THIRD VERTEX -----------------
// Coordinates of the Third vertex
glVertex3f( object->vertex[ object->polygon[l_index].c ].x,
object->vertex[ object->polygon[l_index].c ].y,
object->vertex[ object->polygon[l_index].c ].z);
}
glEnd();
glutSwapBuffers();
}
what i tried so far:
debuging & checking if the vertices have a valid values:
for the 3ds file that was attached to the code - seems that yes
for other 3ds files i found - no.
trying to scale the object up and down - no result
replacing all the code inside render() with glutSolidSphere(0.5,50,50) - the sphere is visible
as you see in render() i disabled glClear() and glMatrixMode()
thats because i think they not needed,
if enabled they make all other object disapear without seeing the 3ds object.
my questions are:
is load() looks correct?
are there other vertions of 3ds file which not "feet" for this load()?
is there any setting to openGL which i can try to change in order to see the object?
do you know other 3d model format which is simpler to load and yet well distribution
and easy to find, as 3ds is?

do you know other 3d model format which is simpler to load and yet well distribution and easy to find, as 3ds is?
To load objects from 3d studio max, you can try using the obj wavefront format, (there is an option in 3ds max to import in obj format) It is human readable format with triplets of vertex, vertex normals, and vertex textures, which can be easily parsed and passed to glut for rendering.
I wrote some code for loading 3d models, source available here
It parses the obj files and stores the vertices, texture coordinates and normals in arrays which are passed to glut function for rendering
More about the obj format here

If you are beginner for rendering 3d models then don't try 3ds models use obj format. Obj is simple and human readable, simple to understand and write your own parser. You can learn simple tutorials form here and start to write your own parser.

Related

Is there a way to speed up audio .wav file using some SDL_Mixer's function?

I am making a simple game whose audio speed should increase as the player is approaching the end of the level it is playing. So now I was wondering if there was a way to do this using SDL_Mixer. If SDL_Mixer is not the way to go could you please tell me how could I make this change in the audio file itself to make it faster. I am working with a 8-bit .wav file with 2 channels at the samplerate of 22050.
According to this forum here: https://forums.libsdl.org/viewtopic.php?p=44663, you can use a different library called "SoLoud" to change the playback speed of your sounds on the fly. You can get/see more details on SoLoud here: http://sol.gfxile.net/soloud/. From what I can tell, you cannot do this using SDL2, and SoLoud seems easy enough to use, so that would be my suggestion.
A few years back I was trying to achieve something very similar and, after a lot of web search, I came up with this solution, involving using Mix_RegisterEffect function, which got close:
#include <SDL2/SDL.h>
#include <SDL2/SDL_mixer.h>
#include <iostream>
#include <cstdlib>
#include <cmath>
/* global vars */
Uint16 audioFormat; // current audio format constant
int audioFrequency, // frequency rate of the current audio format
audioChannelCount, // number of channels of the current audio format
audioAllocatedMixChannelsCount; // number of mix channels allocated
static inline Uint16 formatSampleSize(Uint16 format)
{
return (format & 0xFF) / 8;
}
// Get chunk time length (in ms) given its size and current audio format
static int computeChunkLengthMillisec(int chunkSize)
{
/* bytes / samplesize == sample points */
const Uint32 points = chunkSize / formatSampleSize(audioFormat);
/* sample points / channels == sample frames */
const Uint32 frames = (points / audioChannelCount);
/* (sample frames * 1000) / frequency == play length, in ms */
return ((frames * 1000) / audioFrequency);
}
// Custom handler object to control which part of the Mix_Chunk's audio data will be played, with which pitch-related modifications.
// This needed to be a template because the actual Mix_Chunk's data format may vary (AUDIO_U8, AUDIO_S16, etc) and the data type varies with it (Uint8, Sint16, etc)
// The AudioFormatType should be the data type that is compatible with the current SDL_mixer-initialized audio format.
template<typename AudioFormatType>
struct PlaybackSpeedEffectHandler
{
const AudioFormatType* const chunkData; // pointer to the chunk sample data (as array)
const float& speedFactor; // the playback speed factor
int position; // current position of the sound, in ms
const int duration; // the duration of the sound, in ms
const int chunkSize; // the size of the sound, as a number of indexes (or sample points). thinks of this as a array size when using the proper array type (instead of just Uint8*).
const bool loop; // flags whether playback should stay looping
const bool attemptSelfHalting; // flags whether playback should be halted by this callback when playback is finished
bool altered; // true if this playback has been pitched by this handler
PlaybackSpeedEffectHandler(const Mix_Chunk& chunk, const float& speed, bool loop, bool trySelfHalt)
: chunkData(reinterpret_cast<AudioFormatType*>(chunk.abuf)), speedFactor(speed),
position(0), duration(computeChunkLengthMillisec(chunk.alen)),
chunkSize(chunk.alen / formatSampleSize(audioFormat)),
loop(loop), attemptSelfHalting(trySelfHalt), altered(false)
{}
// processing function to be able to change chunk speed/pitch.
void modifyStreamPlaybackSpeed(int mixChannel, void* stream, int length)
{
AudioFormatType* buffer = static_cast<AudioFormatType*>(stream);
const int bufferSize = length / sizeof(AudioFormatType); // buffer size (as array)
const int bufferDuration = computeChunkLengthMillisec(length); // buffer time duration
const float speedFactor = this->speedFactor; // take a "snapshot" of speed factor
// if there is still sound to be played
if(position < duration || loop)
{
// if playback is unaltered and pitch is required (for the first time)
if(!altered && speedFactor != 1.0f)
altered = true; // flags playback modification and proceed to the pitch routine.
if(altered) // if unaltered, this pitch routine is skipped
{
const float delta = 1000.0/audioFrequency, // normal duration of each sample
vdelta = delta*speedFactor; // virtual stretched duration, scaled by 'speedFactor'
for(int i = 0; i < bufferSize; i += audioChannelCount)
{
const int j = i/audioChannelCount; // j goes from 0 to size/channelCount, incremented 1 by 1
const float x = position + j*vdelta; // get "virtual" index. its corresponding value will be interpolated.
const int k = floor(x / delta); // get left index to interpolate from original chunk data (right index will be this plus 1)
const float proportion = (x / delta) - k; // get the proportion of the right value (left will be 1.0 minus this)
// usually just 2 channels: 0 (left) and 1 (right), but who knows...
for(int c = 0; c < audioChannelCount; c++)
{
// check if k will be within bounds
if(k*audioChannelCount + audioChannelCount - 1 < chunkSize || loop)
{
AudioFormatType leftValue = chunkData[( k * audioChannelCount + c) % chunkSize],
rightValue = chunkData[((k+1) * audioChannelCount + c) % chunkSize];
// put interpolated value on 'data' (linear interpolation)
buffer[i + c] = (1-proportion)*leftValue + proportion*rightValue;
}
else // if k will be out of bounds (chunk bounds), it means we already finished; thus, we'll pass silence
{
buffer[i + c] = 0;
}
}
}
}
// update position
position += bufferDuration * speedFactor; // this is not exact since a frame may play less than its duration when finished playing, but its simpler
// reset position if looping
if(loop) while(position > duration)
position -= duration;
}
else // if we already played the whole sound but finished earlier than expected by SDL_mixer (due to faster playback speed)
{
// set silence on the buffer since Mix_HaltChannel() poops out some of it for a few ms.
for(int i = 0; i < bufferSize; i++)
buffer[i] = 0;
if(attemptSelfHalting)
Mix_HaltChannel(mixChannel); // XXX unsafe call, since it locks audio; but no safer solution was found yet...
}
}
// Mix_EffectFunc_t callback that redirects to handler method (handler passed via userData)
static void mixEffectFuncCallback(int channel, void* stream, int length, void* userData)
{
static_cast<PlaybackSpeedEffectHandler*>(userData)->modifyStreamPlaybackSpeed(channel, stream, length);
}
// Mix_EffectDone_t callback that deletes the handler at the end of the effect usage (handler passed via userData)
static void mixEffectDoneCallback(int, void *userData)
{
delete static_cast<PlaybackSpeedEffectHandler*>(userData);
}
// function to register a handler to this channel for the next playback.
static void registerEffect(int channel, const Mix_Chunk& chunk, const float& speed, bool loop, bool trySelfHalt)
{
Mix_RegisterEffect(channel, mixEffectFuncCallback, mixEffectDoneCallback, new PlaybackSpeedEffectHandler(chunk, speed, loop, trySelfHalt));
}
};
// Register playback speed effect handler according to the current audio format; effect valid for a single playback; if playback is looped, lasts until it's halted
void setupPlaybackSpeedEffect(const Mix_Chunk* const chunk, const float& speed, int channel, bool loop=false, bool trySelfHalt=false)
{
// select the register function for the current audio format and register the effect using the compatible handlers
// XXX is it correct to behave the same way to all S16 and U16 formats? Should we create case statements for AUDIO_S16SYS, AUDIO_S16LSB, AUDIO_S16MSB, etc, individually?
switch(audioFormat)
{
case AUDIO_U8: PlaybackSpeedEffectHandler<Uint8 >::registerEffect(channel, *chunk, speed, loop, trySelfHalt); break;
case AUDIO_S8: PlaybackSpeedEffectHandler<Sint8 >::registerEffect(channel, *chunk, speed, loop, trySelfHalt); break;
case AUDIO_U16: PlaybackSpeedEffectHandler<Uint16>::registerEffect(channel, *chunk, speed, loop, trySelfHalt); break;
default:
case AUDIO_S16: PlaybackSpeedEffectHandler<Sint16>::registerEffect(channel, *chunk, speed, loop, trySelfHalt); break;
case AUDIO_S32: PlaybackSpeedEffectHandler<Sint32>::registerEffect(channel, *chunk, speed, loop, trySelfHalt); break;
case AUDIO_F32: PlaybackSpeedEffectHandler<float >::registerEffect(channel, *chunk, speed, loop, trySelfHalt); break;
}
}
// example
// run the executable passing an filename of a sound file that SDL_mixer is able to open (ogg, wav, ...)
int main(int argc, char** argv)
{
if(argc < 2) { std::cout << "missing argument" << std::endl; return 0; }
SDL_Init(SDL_INIT_AUDIO);
Mix_OpenAudio(MIX_DEFAULT_FREQUENCY, MIX_DEFAULT_FORMAT, MIX_DEFAULT_CHANNELS, 4096);
Mix_QuerySpec(&audioFrequency, &audioFormat, &audioChannelCount); // query specs
audioAllocatedMixChannelsCount = Mix_AllocateChannels(MIX_CHANNELS);
float speed = 1.0;
Mix_Chunk* chunk = Mix_LoadWAV(argv[1]);
if(chunk != NULL)
{
const int channel = Mix_PlayChannelTimed(-1, chunk, -1, 8000);
setupPlaybackSpeedEffect(chunk, speed, channel, true);
// loop for 8 seconds, changing the pitch dynamically
while(SDL_GetTicks() < 8000)
speed = 1 + 0.25*sin(0.001*SDL_GetTicks());
}
else
std::cout << "no data" << std::endl;
Mix_FreeChunk(chunk);
Mix_CloseAudio();
Mix_Quit();
SDL_Quit();
return EXIT_SUCCESS;
}
While this works, it's not a perfect solution, since the result has some artifacts (crackling) in most cases, which I wasn't able to figure out why.
Github gist I created for this a while ago.

How to use alsa to play sounds simultaneously in c?

I'm using alsa lib in c under linux.
I'd like to load several wav files and play them depending on some test conditions.
I'm using the following code, but it needs to be improved:
// A simple C example to play a mono or stereo, 16-bit 44KHz
// WAVE file using ALSA. This goes directly to the first
// audio card (ie, its first set of audio out jacks). It
// uses the snd_pcm_writei() mode of outputting waveform data,
// blocking.
//
// Compile as so to create "alsawave":
// gcc -o alsawave alsawave.c -lasound
//
// Run it from a terminal, specifying the name of a WAVE file to play:
// ./alsawave MyWaveFile.wav
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
// Include the ALSA .H file that defines ALSA functions/data
#include <alsa/asoundlib.h>
#pragma pack (1)
/////////////////////// WAVE File Stuff /////////////////////
// An IFF file header looks like this
typedef struct _FILE_head
{
unsigned char ID[4]; // could be {'R', 'I', 'F', 'F'} or {'F', 'O', 'R', 'M'}
unsigned int Length; // Length of subsequent file (including remainder of header). This is in
// Intel reverse byte order if RIFF, Motorola format if FORM.
unsigned char Type[4]; // {'W', 'A', 'V', 'E'} or {'A', 'I', 'F', 'F'}
} FILE_head;
// An IFF chunk header looks like this
typedef struct _CHUNK_head
{
unsigned char ID[4]; // 4 ascii chars that is the chunk ID
unsigned int Length; // Length of subsequent data within this chunk. This is in Intel reverse byte
// order if RIFF, Motorola format if FORM. Note: this doesn't include any
// extra byte needed to pad the chunk out to an even size.
} CHUNK_head;
// WAVE fmt chunk
typedef struct _FORMAT {
short wFormatTag;
unsigned short wChannels;
unsigned int dwSamplesPerSec;
unsigned int dwAvgBytesPerSec;
unsigned short wBlockAlign;
unsigned short wBitsPerSample;
// Note: there may be additional fields here, depending upon wFormatTag
} FORMAT;
#pragma pack()
// Size of the audio card hardware buffer. Here we want it
// set to 1024 16-bit sample points. This is relatively
// small in order to minimize latency. If you have trouble
// with underruns, you may need to increase this, and PERIODSIZE
// (trading off lower latency for more stability)
#define BUFFERSIZE (2*1024)
// How many sample points the ALSA card plays before it calls
// our callback to fill some more of the audio card's hardware
// buffer. Here we want ALSA to call our callback after every
// 64 sample points have been played
#define PERIODSIZE (2*64)
// Handle to ALSA (audio card's) playback port
snd_pcm_t *PlaybackHandle;
// Handle to our callback thread
snd_async_handler_t *CallbackHandle;
// Points to loaded WAVE file's data
unsigned char *WavePtr;
// Size (in frames) of loaded WAVE file's data
snd_pcm_uframes_t WaveSize;
// Sample rate
unsigned short WaveRate;
// Bit resolution
unsigned char WaveBits;
// Number of channels in the wave file
unsigned char WaveChannels;
// The name of the ALSA port we output to. In this case, we're
// directly writing to hardware card 0,0 (ie, first set of audio
// outputs on the first audio card)
static const char SoundCardPortName[] = "default";
// For WAVE file loading
static const unsigned char Riff[4] = { 'R', 'I', 'F', 'F' };
static const unsigned char Wave[4] = { 'W', 'A', 'V', 'E' };
static const unsigned char Fmt[4] = { 'f', 'm', 't', ' ' };
static const unsigned char Data[4] = { 'd', 'a', 't', 'a' };
/********************** compareID() *********************
* Compares the passed ID str (ie, a ptr to 4 Ascii
* bytes) with the ID at the passed ptr. Returns TRUE if
* a match, FALSE if not.
*/
static unsigned char compareID(const unsigned char * id, unsigned char * ptr)
{
register unsigned char i = 4;
while (i--)
{
if ( *(id)++ != *(ptr)++ ) return(0);
}
return(1);
}
/********************** waveLoad() *********************
* Loads a WAVE file.
*
* fn = Filename to load.
*
* RETURNS: 0 if success, non-zero if not.
*
* NOTE: Sets the global "WavePtr" to an allocated buffer
* containing the wave data, and "WaveSize" to the size
* in sample points.
*/
static unsigned char waveLoad(const char *fn)
{
const char *message;
FILE_head head;
register int inHandle;
if ((inHandle = open(fn, O_RDONLY)) == -1)
message = "didn't open";
// Read in IFF File header
else
{
if (read(inHandle, &head, sizeof(FILE_head)) == sizeof(FILE_head))
{
// Is it a RIFF and WAVE?
if (!compareID(&Riff[0], &head.ID[0]) || !compareID(&Wave[0], &head.Type[0]))
{
message = "is not a WAVE file";
goto bad;
}
// Read in next chunk header
while (read(inHandle, &head, sizeof(CHUNK_head)) == sizeof(CHUNK_head))
{
// ============================ Is it a fmt chunk? ===============================
if (compareID(&Fmt[0], &head.ID[0]))
{
FORMAT format;
// Read in the remainder of chunk
if (read(inHandle, &format.wFormatTag, sizeof(FORMAT)) != sizeof(FORMAT)) break;
// Can't handle compressed WAVE files
if (format.wFormatTag != 1)
{
message = "compressed WAVE not supported";
goto bad;
}
WaveBits = (unsigned char)format.wBitsPerSample;
WaveRate = (unsigned short)format.dwSamplesPerSec;
WaveChannels = format.wChannels;
}
// ============================ Is it a data chunk? ===============================
else if (compareID(&Data[0], &head.ID[0]))
{
// Size of wave data is head.Length. Allocate a buffer and read in the wave data
if (!(WavePtr = (unsigned char *)malloc(head.Length)))
{
message = "won't fit in RAM";
goto bad;
}
if (read(inHandle, WavePtr, head.Length) != head.Length)
{
free(WavePtr);
break;
}
// Store size (in frames)
WaveSize = (head.Length * 8) / ((unsigned int)WaveBits * (unsigned int)WaveChannels);
close(inHandle);
return(0);
}
// ============================ Skip this chunk ===============================
else
{
if (head.Length & 1) ++head.Length; // If odd, round it up to account for pad byte
lseek(inHandle, head.Length, SEEK_CUR);
}
}
}
message = "is a bad WAVE file";
bad: close(inHandle);
}
printf("%s %s\n", fn, message);
return(1);
}
/********************** play_audio() **********************
* Plays the loaded waveform.
*
* NOTE: ALSA sound card's handle must be in the global
* "PlaybackHandle". A pointer to the wave data must be in
* the global "WavePtr", and its size of "WaveSize".
*/
static void play_audio(void)
{
register snd_pcm_uframes_t count, frames;
// Output the wave data
count = 0;
do
{
frames = snd_pcm_writei(PlaybackHandle, WavePtr + count, WaveSize - count);
// If an error, try to recover from it
if (frames < 0)
frames = snd_pcm_recover(PlaybackHandle, frames, 0);
if (frames < 0)
{
printf("Error playing wave: %s\n", snd_strerror(frames));
break;
}
// Update our pointer
count += frames;
} while (count < WaveSize);
// Wait for playback to completely finish
//if (count == WaveSize)
//snd_pcm_drain(PlaybackHandle);
}
/*********************** free_wave_data() *********************
* Frees any wave data we loaded.
*
* NOTE: A pointer to the wave data be in the global
* "WavePtr".
*/
static void free_wave_data(void)
{
if (WavePtr) free(WavePtr);
WavePtr = 0;
}
int main(int argc, char **argv)
{
// No wave data loaded yet
WavePtr = 0;
if (argc < 2)
printf("You must supply the name of a 16-bit mono WAVE file to play\n");
// Load the wave file
else if (!waveLoad(argv[1]))
{
register int err;
// Open audio card we wish to use for playback
if ((err = snd_pcm_open(&PlaybackHandle, &SoundCardPortName[0], SND_PCM_STREAM_PLAYBACK, 0)) < 0)
printf("Can't open audio %s: %s\n", &SoundCardPortName[0], snd_strerror(err));
else
{
switch (WaveBits)
{
case 8:
err = SND_PCM_FORMAT_U8;
break;
case 16:
err = SND_PCM_FORMAT_S16;
break;
case 24:
err = SND_PCM_FORMAT_S24;
break;
case 32:
err = SND_PCM_FORMAT_S32;
break;
}
// Set the audio card's hardware parameters (sample rate, bit resolution, etc)
if ((err = snd_pcm_set_params(PlaybackHandle, err, SND_PCM_ACCESS_RW_INTERLEAVED, WaveChannels, WaveRate, 1, 100000)) < 0)
printf("Can't set sound parameters: %s\n", snd_strerror(err));
// Play the waveform
else
play_audio();
int i;
usleep(10000);
play_audio();
play_audio();
// Close sound card
snd_pcm_close(PlaybackHandle);
}
}
// Free the WAVE data
free_wave_data();
return(0);
}
As I would like to play multiple sounds simultaneously, I started to try to play the same sound more than once, so I commented the following lines:
if (count == WaveSize)
snd_pcm_drain(PlaybackHandle);
in the play_audio function.
Unfortunately, that doesn't really works, because if I try to play the same sound more than once, it works, but, if I insert a long delay before I play the sound, nothing is played.
for instance, in the main function
play_audio();
usleep(10000);
play_audio();
play_audio();
works, and I can hear the same sound three times. But, if I use usleep(100000), I hear the sound only once.
Another problem is that it has to wait for the first sound to end before it starts to play the next one.
So, I'd like to be able to send more than one sound, and play several sounds at the same time. I would like to mix them manually (it's not really difficult). The main function will contain a while loop with some tests to determine which sound(s) need to be played.
I thought about putting play_audio in a thread and run it in an infinite loop, and have the main thread that modifies (mix, etc.) WavePtr.
I just don't really know if this is the right way, or if there is a more efficient method.
Any suggestions? Thanks.

Configuring SDI input/output for passthru input card->gpu->output card

I'm using the Quadro SDI SDK along with my Quadro K6000+ SDI Input & output cards and have converted the included cudaDVP SDK sample to send raw image buffers directly to the GPU from the SDI input card.
In the next step I display the data via opengl bindings. Finally I want to output the same identical data to my output card, and this is where I'm having troubles.
I am getting correct input data and I do manage to output to the screen but there appears to be some data modifications happening in the SDI output pipeline as the outgoing image is not quite correct (wrong colors etc). I'm passing the raw input buffer as can be seen below.
Which output card configuration should I use to match my input settings (see below)?
Which if any modifications to the OpenGL output texture configuration are required (see below)?
Input/output Capture/receive options & GL bindings in order of being called in the application:
........
Display *dpy = XOpenDisplay(NULL);
//scan the systems for GPUs
int num_gpus = ScanHW(dpy,gpuList);
if(num_gpus < 1)
exit(1);
//grab the first GPU for now for DVP
HGPUNV *g = &gpuList[0];
////////////////////////////////////////////////////////////////////////////////////////////////
/// Input related SDI settings and OpenGL settings
// Query input , in our case Video format: NV_CTRL_GVIO_VIDEO_FORMAT_576I_50_00_SMPTE259_PAL
XNVCTRLQueryTargetAttribute(dpy, NV_CTRL_TARGET_TYPE_GVI, 0, 0, NV_CTRL_GVIO_DETECTED_VIDEO_FORMAT, &m_videoFormat);
//
// Set desired parameters
//
// Signal format.
XNVCTRLSetTargetAttribute(dpy, NV_CTRL_TARGET_TYPE_GVI, 0, 0, NV_CTRL_GVIO_REQUESTED_VIDEO_FORMAT, m_videoFormat);
//Bits per component - 10 bits
XNVCTRLSetTargetAttribute(dpy, NV_CTRL_TARGET_TYPE_GVI, 0, 0, NV_CTRL_GVI_REQUESTED_STREAM_BITS_PER_COMPONENT, NV_CTRL_GVI_BITS_PER_COMPONENT_10);
// Component sampling -422
XNVCTRLSetTargetAttribute(dpy, NV_CTRL_TARGET_TYPE_GVI, 0, 0, NV_CTRL_GVI_REQUESTED_STREAM_COMPONENT_SAMPLING, NV_CTRL_GVI_COMPONENT_SAMPLING_422);
// Chroma expansion OFF
XNVCTRLSetTargetAttribute(dpy, NV_CTRL_TARGET_TYPE_GVI, 0, 0, NV_CTRL_GVI_REQUESTED_STREAM_CHROMA_EXPAND, NV_CTRL_GVI_CHROMA_EXPAND_FALSE);
// Query the width and height of the input signal format
XNVCTRLQueryAttribute(dpy, g->deviceXScreen, value, NV_CTRL_GVIO_VIDEO_FORMAT_WIDTH, &m_videoWidth);
XNVCTRLQueryAttribute(dpy, g->deviceXScreen, value, NV_CTRL_GVIO_VIDEO_FORMAT_HEIGHT, &m_videoHeight);
....
GLuint m_videoSlot; // Video slot number
GLuint m_vidBuf; // Video capture buffers
GLint m_bufPitch; // Buffer pitch
GLuint m_vidTex; // Video capture textures
m_videoSlot = 1;
//////////////////////////////////////
////////// OpenGL related settings
// No video color conversion desired ( we want the raw data, NULL )
glVideoCaptureStreamParameterfvNV(m_videoSlot, 0, GL_VIDEO_COLOR_CONVERSION_MATRIX_NV, NULL);
glVideoCaptureStreamParameterfvNV(m_videoSlot, 0,GL_VIDEO_COLOR_CONVERSION_MAX_NV, NULL);
glVideoCaptureStreamParameterfvNV(m_videoSlot, 0,GL_VIDEO_COLOR_CONVERSION_MIN_NV, NULL);
glVideoCaptureStreamParameterfvNV(m_videoSlot, 0,GL_VIDEO_COLOR_CONVERSION_OFFSET_NV, NULL);
// Set the buffer object capture data format. - IE number of components in a pixel
glVideoCaptureStreamParameterivNV(m_videoSlot, 0, GL_VIDEO_BUFFER_INTERNAL_FORMAT_NV, &GL_Z6Y10Z6CB10Z6Y10Z6CR10_422_NV);
// Get the video buffer pitch
glGetVideoCaptureStreamivNV(m_videoSlot, 0, GL_VIDEO_BUFFER_PITCH_NV, &m_bufPitch);
// Bind the buffer
glBindBufferARB(GL_VIDEO_BUFFER_NV, m_vidBuf);
// Allocate required space in video capture buffer
glBufferDataARB(GL_VIDEO_BUFFER_NV, m_bufPitch * m_videoHeight, NULL, GL_STREAM_READ_ARB);
// Bind the buffer to the video capture device.
glBindVideoCaptureStreamBufferNV(m_videoSlot, 0, GL_FRAME_NV, 0);
////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////
/// SDI Output card settings
GLuint m_CudaOutTexture; // Texture to send to the output device
GLuint m_CudaOutBuffer; // Texture to send to the output device
GLuint m_OutTexture;
// Set video format - same as input - NV_CTRL_GVIO_VIDEO_FORMAT_576I_50_00_SMPTE259_PAL
XNVCTRLSetAttribute(dpy, m_outputOptions.xscreen, 0, NV_CTRL_GVO_OUTPUT_VIDEO_FORMAT, m_videoFormat);
// Set data format format.
// Attempted several different settings here....
data_format = NV_CTRL_GVO_DATA_FORMAT_R8G8B8_TO_YCRCB422;
//data_format = NV_CTRL_GVO_DATA_FORMAT_X10X10X10_422_PASSTHRU;
//data_format = NV_CTRL_GVO_DATA_FORMAT_X8X8X8_422_PASSTHRU;
//data_format = NV_CTRL_GVO_DATA_FORMAT_R10G10B10_TO_YCRCB422;
//data_format = NV_CTRL_GVO_DATA_FORMAT_X12X12X12_422_PASSTHRU;
//data_format = NV_CTRL_GVO_DATA_FORMAT_Y10CR10CB10_TO_YCRCB444;
//data_format = NV_CTRL_GVO_DATA_FORMAT_X10X8X8_422_PASSTHRU;
//data_format = NV_CTRL_GVO_ENABLE_RGB_DATA_DISABLE
XNVCTRLSetAttribute(dpy, m_outputOptions.xscreen, 0, NV_CTRL_GVO_DATA_FORMAT, data_format);
// Set sync mode
XNVCTRLSetAttribute(dpy, m_outputOptions.xscreen, 0, NV_CTRL_GVO_SYNC_MODE, NV_CTRL_GVO_SYNC_MODE_FREE_RUNNING);
// Set sync source
XNVCTRLSetAttribute(dpy, m_outputOptions.xscreen, 0, NV_CTRL_GVO_SYNC_SOURCE, NV_CTRL_GVO_SYNC_SOURCE_SDI);
// Set flip queue length
XNVCTRLSetAttribute(dpy, m_outputOptions.xscreen, 0, NV_CTRL_GVO_FLIP_QUEUE_SIZE, 5);
.....
///////////////////////////////////////////////////////////////////
// OpenGL related settings for output
//Setup the output after the capture is configured.
glGenTextures(1, &m_OutTexture);
glBindTexture(GL_TEXTURE_RECTANGLE_NV, m_OutTexture);
glTexParameterf(GL_TEXTURE_RECTANGLE_NV, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_RECTANGLE_NV, 0, GL_RGBA8, WIDTH, HEIGHT, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0 );
////////////////////////////////////////////////////////
//Setup GL output from cuda
// Create and initialize a texture objects.
glGenBuffersARB(1, &m_CudaOutBuffer);
assert(glGetError() == GL_NO_ERROR);
glBindBufferARB(GL_VIDEO_BUFFER_NV, m_CudaOutBuffer);
assert(glGetError() == GL_NO_ERROR);
// Allocate required space in video capture buffer
glBufferDataARB(GL_VIDEO_BUFFER_NV, pitch * height, NULL, GL_STREAM_COPY);
glGenTextures(1, &m_CudaOutTexture);
glBindTexture(GL_TEXTURE_RECTANGLE_NV, m_CudaOutTexture);
glTexParameterf(GL_TEXTURE_RECTANGLE_NV, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
assert(glGetError() == GL_NO_ERROR);
glTexImage2D(GL_TEXTURE_RECTANGLE_NV, 0, GL_RGBA8, width,height,
glBindTexture(GL_TEXTURE_RECTANGLE_NV, 0);
//register the buffer objects
cudaRegisterBuffers();
....
////////////////////////////////////////////////////////////////
/////////// Data transfer from GPU to output device buffer (to be outputted to the SDI output card)
GLint inBuf = m_vidBuf;
// Map buffer(s) into CUDA
cudaError_t cerr;
unsigned char *inDevicePtr;
cerr = cudaGLMapBufferObject((void**)&inDevicePtr, inBuf);
cudaCheckErrors("map");
unsigned char *outDevicePtr;
cerr = cudaGLMapBufferObject((void**)&outDevicePtr, m_CudaOutBuffer);
cudaCheckErrors("map");
//
// Dummy CUDA operation:
// Perform CUDA Operations here such as a kernel call with outDevicePtr and inDevicePtr.
//
unsigned int pitch = m_SDIin.getBufferObjectPitch(0);
unsigned int height = m_SDIin.getHeight();
cudaMemcpy(outDevicePtr,inDevicePtr,pitch*height,cudaMemcpyDeviceToDevice);
////////////////////////////////////////////////////////
/////// output
GLboolean C_cudaDVP::OutputVideo()
{
if(!m_SDIoutEnabled)
return GL_FALSE;
//send the texture to SDI out.
glBindTexture(GL_TEXTURE_RECTANGLE_NV, m_OutTexture);
glEnable(GL_TEXTURE_RECTANGLE_NV);
glPresentFrameKeyedNV(1, 0,
0, 0,
GL_FRAME_NV,
GL_TEXTURE_RECTANGLE_NV, m_OutTexture, 0,
GL_NONE, 0, 0);
GLenum l_eVal = glGetError();
glBindTexture(GL_TEXTURE_RECTANGLE_NV, 0);
if (l_eVal != GL_NO_ERROR) {
fprintf(stderr, "glPresentFameKeyedNV returned error: %s\n", gluErrorString(l_eVal));
return FALSE;
}
return GL_TRUE;
}
.....
// After which we call:
// To skip a costly data copy from video buffer to texture we
// can just bind a video buffer to GL_PIXEL_UNPACK_BUFFER_ARB and call
// glTexSubImage2D referencing into the buffer with the PixelData pointer
// set to 0.
glBindTexture(GL_TEXTURE_RECTANGLE_NV, m_CudaOutTexture);
//glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, m_SDIin.getBufferObjectHandle(0));
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, m_CudaOutBuffer);
glPixelStorei(GL_UNPACK_ROW_LENGTH,pitch/4);
glTexSubImage2D(GL_TEXTURE_RECTANGLE_NV, 0, 0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBindTexture(GL_TEXTURE_RECTANGLE_NV, 0);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
//////////////////////////////////////////////////////

How to load windows cursors (.cur) in Linux using XLib and Xcursors?

I'm trying to load a .cur file via Xcursor library and i just can't make it work. I'm working with linux (Bodhi + Lubuntu) on virtualbox. Here's my code:
my includes
// GLFW
#define GLFW_DLL // use the GLFW .dll
#include "gl/GLFW/glfw3.h"
...
#if defined(__linux__)
// GLFW native expoose
#define GLFW_EXPOSE_NATIVE_X11 // expose X11 and GLX context (linux)
#define GLFW_EXPOSE_NATIVE_GLX
#include "gl/GLFW/glfw3native.h" // for low-level expose
// X Window
#include <X11/X.h>
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xcursor/Xcursor.h>
#endif
in program, after GLFW window creation
#if defined(__linux__)
Display* dpy = glfwGetX11Display();
Window xwindow = glfwGetX11Window(window);
Cursor crs = XcursorFilenameLoadCursor(dpy,"somefolder/default.cur");
XDefineCursor(dpy,xwindow,crs);
#endif
...
#if defined(__linux__)
XFreeCursor(dpy,crs);
#endif
Any obvious error? I can't really find sources about Xcursor and i'm close to switch to typical OpenGL textures and get over it.
NOTE: I'm already disabled Virtualbox integrated mouse, so the mouse is entirely on guest machine and i run it fullscreen.
Since i solved my problem i decided it's a good idea to post my results, in hope of helping someone trying to make his application more cross-platform.
My code follows, and explanation follows the code.
ESSENTIAL
X11 development headers package (etc. libx11-dev)
Xcursor development headers package (etc. libxcursor-dev)
A portable <stdint.h> header, like this one.
CODE
...
// to store colors
struct COLOR {
uint8_t r,g,b,a;
COLOR() {
this->r = this->g = this->b = this->a = 0;
}
COLOR(uint8_t r,uint8_t g,uint8_t b) {
this->r = r;
this->g = g;
this->b = b;
this->a = 255;
}
bool operator == (COLOR c) const {
return (r == c.r && g == c.g && b == c.b);
}
};
size_t get_bit(int32_t number,int8_t position) {
size_t bitmask = 1 << position;
return (number & bitmask) ? 1 : 0;
}
....
// load cursor
#if defined(_WIN32)
// etc. use standard WinApi code, (HCURSOR)LoadImage(..) and such
#endif
#if defined(__linux__)
Display* display = XOpenDisplay(NULL);
string filename = "mycursor.cur"; // <-- add your .cur cursor here
char buf[4];
FILE* fp = fopen(filename.c_str(),"rb");
fread(buf,1,2,fp); // reserved. always 0
fread(buf,1,2,fp); // image type; we're only interested for .cur (=2)
int16_t imagetype = buf[0] | (buf[1]<<8);
if (imagetype!=2) {
// error: file is not a valid .cur file
return;
}
fread(buf,1,2,fp); // number of images
// we're only interested in the first image
fread(buf,1,1,fp); // width (0 to 255; 0=means 256 width)
int8_t width = (buf[0]==0 ? 256 : buf[0]);
fread(buf,1,1,fp); // height (0 to 255; 0=means 256 height)
int8_t height = (buf[0]==0 ? 256 : buf[0]);
fread(buf,1,1,fp); // number of colors in palette (0 for no palette)
fread(buf,1,1,fp); // reserved. should be 0
fread(buf,1,2,fp); // hotspot x
int16_t hotspot_x = buf[0] | (buf[1]<<8);
fread(buf,1,2,fp); // hotspot y
int16_t hotspot_y = buf[0] | (buf[1]<<8);
fread(buf,1,4,fp); // image data in bytes
fread(buf,1,4,fp); // offset to image data
// Now we need to verify if image in .cur is BMP or PNG (Vista+)
// We can't just check 'magic' since if it's BMP, the header will be missing (PNG is whole)
// So we search for PNG magic; if doesnt exist, we have a BMP!
// NOTE: for simplicity we go only for BMP for the moment.
// So just check if 'biSize' is 40 (Windows NT & 3.1x or later)
// BITMAPINFOHEADER
fread(buf,1,4,fp); // biSize
int32_t biSize = (buf[0]&0xff) | (buf[1]<<8) | (buf[2]<<16) | (buf[3]<<24);
if (biSize!=40) {
// error: file does not contain valid BMP data;
return;
}
fread(buf,1,4,fp); // biWidth
int32_t biWidth = (buf[0]&0xff) | (buf[1]<<8) | (buf[2]<<16) | (buf[3]<<24);
fread(buf,1,4,fp); // biHeight (if positive => bottom-up, if negative => up-bottom)
int32_t biHeight = (buf[0]&0xff) | (buf[1]<<8) | (buf[2]<<16) | (buf[3]<<24);
fread(buf,1,2,fp); // biPlanes
fread(buf,1,2,fp); // biBitCount
int16_t biBitCount = (buf[0]&0xff) | (buf[1]<<8) | (buf[2]<<16) | (buf[3]<<24);
if (biBitCount!=24 && biBitCount!=32) {
// error: only 24/32 bits supported;
return;
}
fread(buf,1,4,fp); // biCompression
int32_t biCompression = (buf[0]&0xff) | (buf[1]<<8) | (buf[2]<<16) | (buf[3]<<24);
// we want only uncompressed BMP data
if (biCompression!=0 /*BI_RGB*/ ) {
// error: file is compressed; only uncompressed BMP is supported;
return;
}
fread(buf,1,4,fp); // biSizeImage
fread(buf,1,4,fp); // biXPelsPerMeter
fread(buf,1,4,fp); // biYPelsPerMeter
fread(buf,1,4,fp); // biClrUsed
fread(buf,1,4,fp); // biClrImportant
// DECODE IMAGE
uint8_t origin = (biHeight>0 ? 0 : 1); // 0=bottom-up, 1=up-bottom
// there are cases where BMP sizes are NOT the same with cursor; we use the cursor ones
biWidth = width;
biHeight = height;
COLOR* pixels = new COLOR[biWidth * biHeight];
for(int32_t y=0;y<biHeight;y++) {
for(int32_t x=0;x<biWidth;x++) {
uint32_t offset = ((origin==1?y:biHeight-1-y)*biWidth)+x;
// read pixels by number of bits
switch(biBitCount) {
// 24-bit
case 24:
fread(buf,1,3,fp);
pixels[offset] = COLOR(buf[0],buf[1],buf[2]);
break;
// 32-bit
case 32:
fread(buf,1,4,fp);
pixels[offset] = COLOR(buf[0],buf[1],buf[2]);
pixels[offset].a = buf[3];
break;
}
}
}
// read mask
// mask is 1-bit-per-pixel for describing the cursor bitmap's opaque and transparent pixels.
// so for 1 pixel we need 1 bit, for etc. 32 pixels width, we need 32*1 bits (or 4 bytes per line)
// so for etc. 32 pixels height we need 4*32 = 128 bytes or [mask bytes per line * height]
uint16_t mask_bytes_per_line = biWidth / 8;
uint16_t mask_bytes = mask_bytes_per_line * biHeight;
char* mask = new char[mask_bytes];
fread(mask,1,mask_bytes,fp);
fclose(fp);
// reverse every [mask_bytes_per_line] bytes; (etc. for 4 bytes per line do: 0,1,2,3 => 3,2,1,0) -> you really need to ask Microsoft for this 8)
char rmask[4];
for(uint16_t x=0;x<mask_bytes;x+=mask_bytes_per_line) {
for(uint16_t r=0;r<mask_bytes_per_line;r++) {
rmask[r] = mask[x+mask_bytes_per_line-1-r];
}
// copy the reversed line
for(uint16_t r=0;r<mask_bytes_per_line;r++) {
mask[x+r] = rmask[r];
}
}
// now extract all bits from mask bytes into new array (equal to width*height)
vector<uint8_t> bits;
for(uint16_t x=0;x<mask_bytes;x++) {
for(uint8_t b=0;b<8;b++) {
bits.push_back( get_bit(mask[x],b) );
}
}
delete[] mask;
// reverse vector (?)
reverse(bits.begin(),bits.end());
// now the bits contains =1 (transparent) and =0 (opaque) which we use on cursor directly
XcursorImage* cimg = XcursorImageCreate(biWidth,biHeight);
cimg->xhot = hotspot_x;
cimg->yhot = hotspot_y;
// create the image
for(int32_t y=0;y<biHeight;y++) {
for(int32_t x=0;x<biWidth;x++) {
uint32_t offset = (y*biWidth)+x;
COLOR pix = pixels[offset];
cimg->pixels[offset] = ((bits[offset]==1?0:pix.a)<<24) + (pix.r<<16) + (pix.g<<8) + (pix.b);
}
}
// create cursor from image and release the image
Cursor cursor = XcursorImageLoadCursor(display,cimg);
XcursorImageDestroy(cimg);
...
// set the cursor
XDefineCursor(display,yourwindow,cursor);
XFlush(display);
...
// free cursor
if (cursor!=None) {
XFreeCursor(display,cursor);
}
The above code takes a Windows .cur cursor file and creates an Xcursor for use in X11 window system. Of course, there are lots of limitations to my .cur format handling but one can easily add his own improvements to the above code (like say supporting 8-bit cursors). The above code not only take care of alpha-transparency but also 32-bit alpha-blended cursors

OpenGL double buffers on Ubuntu does not work

I'm testing out several of Sumantha Guha's code and there's something that isn't working quite right... All of the sample code where he uses GLUT_DOUBLE and glutSwapBuffers() does not work on my ubuntu machine, but works on my Windows machine. More accurately the window that pops out simply traces the background.
I've had this issue before on Windows where Flush and single buffers don't work, but now this is happening on Linux where Double buffers and glutSwapBuffers do not work. Any idea as to what may be causing this?
Sample of code that I tried loading. Compiles fine, just get a window that traces the background.
///////////////////////////////////////////////////////////////////////////////////////////////////////
// loadTextures.cpp
//
// This stripped-down program shows how to load both external and program-generated images as textures.
//
// NOTE: The Textures folder must be in the same one as this program.
//
// Interaction:
// Press the left and right arrow keys to rotate the square.
// Press space to toggle between textures.
// Press delete to reset.
//
// Sumanta Guha
///////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////
// TEXTURE GREDITS:
// canLabel.bmp, thanks anonymous.
// canTop.bmp, thanks www.acoustica.com.
// cray2.bmp, thanks NASA website www.nasa.gov.
// grass.bmp, thanks www.amazingtextures.com.
// launch.bmp, thanks NASA website www.nasa.gov.
// nightsky.bmp, thanks anonymous.
// sky.bmp, thanks www.mega-tex.nl.
// trees.bmp, thanks anonymous.
////////////////////////////////////////////////
#include <cstdlib>
#include <iostream>
#include <fstream>
#ifdef __APPLE__
# include <GLUT/glut.h>
# include <OpenGL/glext.h>
#else
# include <GL/glut.h>
# include <GL/glext.h>
#endif
using namespace std;
// Globals.
static unsigned int texture[2]; // Array of texture indices.
static unsigned char chessboard[64][64][3]; // Storage for chessboard image.
static float angle = 0.0; // Angle to rotate textured square.
static int id = 0; // Currently displayed texture id.
// Struct of bitmap file.
struct BitMapFile
{
int sizeX;
int sizeY;
unsigned char *data;
};
// Routine to read a bitmap file.
// Works only for uncompressed bmp files of 24-bit color.
BitMapFile *getBMPData(string filename)
{
BitMapFile *bmp = new BitMapFile;
unsigned int size, offset, headerSize;
// Read input file name.
ifstream infile(filename.c_str(), ios::binary);
// Get the starting point of the image data.
infile.seekg(10);
infile.read((char *) &offset, 4);
// Get the header size of the bitmap.
infile.read((char *) &headerSize,4);
// Get width and height values in the bitmap header.
infile.seekg(18);
infile.read( (char *) &bmp->sizeX, 4);
infile.read( (char *) &bmp->sizeY, 4);
// Allocate buffer for the image.
size = bmp->sizeX * bmp->sizeY * 24;
bmp->data = new unsigned char[size];
// Read bitmap data.
infile.seekg(offset);
infile.read((char *) bmp->data , size);
// Reverse color from bgr to rgb.
int temp;
for (int i = 0; i < size; i += 3)
{
temp = bmp->data[i];
bmp->data[i] = bmp->data[i+2];
bmp->data[i+2] = temp;
}
return bmp;
}
// Load external textures.
void loadExternalTextures()
{
// Local storage for bmp image data.
BitMapFile *image[1];
// Load the texture.
image[0] = getBMPData("Textures/launch.bmp");
// Activate texture index texture[0].
glBindTexture(GL_TEXTURE_2D, texture[0]);
// Set texture parameters for wrapping.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
// Set texture parameters for filtering.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
// Specify an image as the texture to be bound with the currently active texture index.
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, image[0]->sizeX, image[0]->sizeY, 0,
GL_RGB, GL_UNSIGNED_BYTE, image[0]->data);
}
// Routine to load a program-generated image as a texture.
void loadProceduralTextures()
{
// Activate texture index texture[1].
glBindTexture(GL_TEXTURE_2D, texture[1]);
// Set texture parameters for wrapping.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
// Set texture parameters for filtering.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
// Specify an image as the texture to be bound with the currently active texture index.
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 64, 64, 0, GL_RGB, GL_UNSIGNED_BYTE, chessboard);
}
// Create 64 x 64 RGB image of a chessboard.
void createChessboard(void)
{
int i, j;
for (i = 0; i < 64; i++)
for (j = 0; j < 64; j++)
if ( ( ((i/8)%2) && ((j/8)%2) ) || ( !((i/8)%2) && !((j/8)%2) ) )
{
chessboard[i][j][0] = 0x00;
chessboard[i][j][1] = 0x00;
chessboard[i][j][2] = 0x00;
}
else
{
chessboard[i][j][0] = 0xFF;
chessboard[i][j][1] = 0xFF;
chessboard[i][j][2] = 0xFF;
}
}
// Initialization routine.
void setup(void)
{
glClearColor(0.8, 0.8, 0.8, 0.0);
// Create texture index array.
glGenTextures(2, texture);
// Load external texture and generate and load procedural texture.
loadExternalTextures();
createChessboard();
loadProceduralTextures();
// Turn on OpenGL texturing.
glEnable(GL_TEXTURE_2D);
// Specify how texture values combine with current surface color values.
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
}
// Drawing routine.
void drawScene(void)
{
glClear(GL_COLOR_BUFFER_BIT);
glLoadIdentity();
gluLookAt(0.0, 0.0, 20.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0);
glRotatef(angle, 0.0, 1.0, 0.0);
// Activate a texture.
glBindTexture(GL_TEXTURE_2D, texture[id]);
// Map the texture onto a square polygon.
glBegin(GL_POLYGON);
glTexCoord2f(0.0, 0.0); glVertex3f(-10.0, -10.0, 0.0);
glTexCoord2f(1.0, 0.0); glVertex3f(10.0, -10.0, 0.0);
glTexCoord2f(1.0, 1.0); glVertex3f(10.0, 10.0, 0.0);
glTexCoord2f(0.0, 1.0); glVertex3f(-10.0, 10.0, 0.0);
glEnd();
glutSwapBuffers();
}
// OpenGL window reshape routine.
void resize(int w, int h)
{
glViewport(0, 0, (GLsizei)w, (GLsizei)h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glFrustum(-5.0, 5.0, -5.0, 5.0, 5.0, 100.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
// Keyboard input processing routine.
void keyInput(unsigned char key, int x, int y)
{
switch(key)
{
case 27:
exit(0);
break;
case ' ':
id++;
if (id == 2) id = 0;
glutPostRedisplay();
break;
case 127:
angle = 0.0;
glutPostRedisplay();
break;
default:
break;
}
}
// Callback routine for non-ASCII key entry.
void specialKeyInput(int key, int x, int y)
{
if (key == GLUT_KEY_LEFT)
{
angle -= 5.0;
if (angle < 0.0) angle += 360.0;
}
if (key == GLUT_KEY_RIGHT)
{
angle += 5.0;
if (angle > 360.0) angle -= 360.0;
}
glutPostRedisplay();
}
// Routine to output interaction instructions to the C++ window.
void printInteraction(void)
{
cout << "Interaction:" << endl;
cout << "Press the left and right arrow keys to rotate the square." << endl
<< "Press space to toggle between textures." << endl
<< "Press delete to reset." << endl;
}
// Main routine.
int main(int argc, char **argv)
{
printInteraction();
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB);
glutInitWindowSize(500, 500);
glutInitWindowPosition(100, 100);
glutCreateWindow("loadTextures.cpp");
setup();
glutDisplayFunc(drawScene);
glutReshapeFunc(resize);
glutKeyboardFunc(keyInput);
glutSpecialFunc(specialKeyInput);
glutMainLoop();
return 0;
}

Resources