Multithreaded game program suddenly locks up on glXSwapBuffers - linux

I'm polishing up a Linux game program I wrote, and after about 10 minutes of playing, it suddenly slows down to 1 frame per thirty seconds or so, slowing the entire system down as well. Even after interrupting the process, the system continues to be slow for about a minute.
In multiple tests I've interrupted the process in GDB when the slowdown occurs, and it is always in the middle of a call to glXSwapBuffers.
It happens regardless of game state or input. The only thing that prevents it is not beginning playback of a repeating music track in a separate thread: the thread still runs, but it doesn't constantly write to the sound card buffer. I've ensured that two shared lists are properly locked.
Has anybody run into a problem with glXSwapBuffers and other, seemingly unrelated threads?
The OS is Ubuntu 9, using the Mesa 7.6.0 implementation of OpenGL and ALSA libasound2 1.0.20-3. I updated my NVIDIA drivers for my GeForce 6800 graphics card this morning, but to no avail.
(Relevant?) code follows.
Display functions:
int DisplayInterface::init()
{
xDisplay = XOpenDisplay(NULL);
if (xDisplay == NULL)
{
printf("Error: cannot connect to the X server\n");
return -1;
}
rootWindow = DefaultRootWindow(xDisplay);
fbConfigs = glXChooseFBConfig(xDisplay, DefaultScreen(xDisplay), fbAttributes, &numConfigs);
if (fbConfigs == NULL)
{
printf("Error: no X framebuffer configuration available as specified\n");
return -1;
}
visualInfo = glXGetVisualFromFBConfig(xDisplay, fbConfigs[0]);
if (visualInfo == NULL)
{
printf("Error: no appropriate X visual found\n");
return -1;
}
colorMap = XCreateColormap(xDisplay, rootWindow, visualInfo->visual, AllocNone);
xAttributes.colormap = colorMap;
xAttributes.event_mask = ExposureMask | KeyPressMask | KeyReleaseMask; // need KeyPress and KeyRelease for InputInterface
gameWindow = XCreateWindow(xDisplay, rootWindow, 0, 0, displayWidth, displayHeight, 0, visualInfo->depth, InputOutput, visualInfo->visual, CWColormap | CWEventMask, &xAttributes);
XMapWindow(xDisplay, gameWindow);
XStoreName(xDisplay, gameWindow, "Vuess Vow Vong Vo Vold Vown Vhe Vey");
glxWindow = glXCreateWindow(xDisplay, fbConfigs[0], gameWindow, NULL);
renderContext = glXCreateNewContext(xDisplay, fbConfigs[0], GLX_RGBA_TYPE, NULL, GL_TRUE);
glXMakeContextCurrent(xDisplay, glxWindow, glxWindow, renderContext);
//glViewport(0, 0, displayWidth, displayHeight);
glViewport(-2.0 * displayWidth, -2.0 * displayHeight, 5.0 * displayWidth, 5.0 * displayHeight);
//glMatrixMode(GL_PROJECTION);
//glLoadIdentity();
//gluOrtho2D(0.0, (GLfloat)displayWidth, 0.0, (GLfloat)displayHeight);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glPixelZoom((GLfloat)((float)displayWidth / (float) pictureWidth), (GLfloat)((float)displayHeight / (float) pictureHeight));
glClearColor((float)clearColor[0] / 255.0, (float)clearColor[1] / 255.0, (float)clearColor[2] / 255.0, (float)clearColor[3] / 255.0);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_BLEND);
glClear(GL_COLOR_BUFFER_BIT);
return 0;
}
// draw a Sprite from left to right and from top to bottom, starting at the given pixel
void DisplayInterface::draw(Sprite *sprite, Pixel& pixel)
{
if (sprite == NULL)
{
return;
}
pixelstorage_t *spritePixels = sprite->getPixelData();
const unsigned int format = sprite->getPixelFormat();
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
gluOrtho2D(-2.0 * (GLfloat)displayWidth, 3.0 * (GLfloat)displayWidth, -2.0 * (GLfloat)displayHeight, 3.0 * (GLfloat)displayHeight);
glRasterPos2i(pixel.x * (int)displayWidth / (int)pictureWidth, (int)displayHeight - (pixel.y + (int)sprite->getHeight()) * (int)displayHeight / (int)pictureHeight);
switch (format)
{
case SPRITE_RGBA:
glDrawPixels(sprite->getWidth(), sprite->getHeight(), GL_RGBA, PIXEL_TYPE, spritePixels);
}
glPopMatrix();
glMatrixMode(GL_MODELVIEW);
}
void DisplayInterface::finalizeFrame()
{
glFinish();
glXSwapBuffers(xDisplay, glxWindow);
}
Playback thread functions:
int writeFramesToHwBuffer(pcmsamplestorage_t *frames, snd_pcm_sframes_t numframes)
{
int pcmreturn;
while ((pcmreturn = snd_pcm_writei(pcm_handle, frames, numframes)) < 0)
{
snd_pcm_prepare(pcm_handle);
fprintf(stderr, "Speaker Interface error: hardware buffer underrun.\n");
}
return pcmreturn;
}
void *playback(void *arg)
{
int i;
unsigned int availableframes;
unsigned int framesFromThisBuffer;
unsigned int framesThisTime;
pcmsamplestorage_t *frames_mix;
pcmsamplestorage_t *frames_track;
unsigned int framesOffset;
std::list<struct playbackState *>::iterator stateIter;
while (1)
{
if (snd_pcm_wait(pcm_handle, 1000) < 0)
{
fprintf(stderr, "Speaker Interface error: poll failed.\n");
break;
}
if ((availableframes = snd_pcm_avail_update(pcm_handle)) < 0)
{
if (availableframes == -EPIPE)
{
fprintf(stderr, "Speaker Interface error: an xrun occured.\n");
break;
}
else
{
fprintf(stderr, "Speaker Interface error: unknown ALSA avail update return value (%d).\n", availableframes);
break;
}
}
// mix and write more frequently than necessary
while (availableframes > 0)
{
framesThisTime = std::min(availableframes, 1024u);
availableframes -= framesThisTime;
//printf("Frames this time: %d / frames left to go: %d\n", framesThisTime, availableframes);
frames_mix = new pcmsamplestorage_t[framesThisTime * 2];
for (i = 0; i < framesThisTime * 2; i++)
{
frames_mix[i] = 0;
}
// BEGIN CRITICAL SECTION
if (pthread_mutex_lock(&soundslists_lock) != 0)
{
fprintf(stderr, "Speaker Interface error: couldn't lock sounds lists from playback thread.\n");
}
printf("soundsPlaying has %d elements.\n", (int)soundsPlaying.size());
printf("soundsToStop has %d elements.\n", (int)soundsToStop.size());
for (stateIter = soundsPlaying.begin(); stateIter != soundsPlaying.end(); stateIter++)
{
frames_track = (*stateIter)->sound->getSamples();
if ((*stateIter)->deliveredframes < (*stateIter)->totalframes)
{
if ((*stateIter)->repeating)
{
framesFromThisBuffer = framesThisTime;
}
else
{
// mix in silence if we reach the end of this sound's buffer
framesFromThisBuffer = std::min(framesThisTime, (*stateIter)->totalframes - (*stateIter)->deliveredframes);
}
for (i = 0; i < framesFromThisBuffer * 2; i++)
{
// add samples to the mix, potentially running off the end of this buffer and wrapping around
if (SHRT_MAX - frames_mix[i] < frames_track[((*stateIter)->deliveredframes * 2 + i) % ((*stateIter)->totalframes * 2)])
{
// prevent overflow
frames_mix[i] = SHRT_MAX;
}
else if (SHRT_MIN - frames_mix[i] > frames_track[((*stateIter)->deliveredframes * 2 + i) % ((*stateIter)->totalframes * 2)])
{
// prevent underflow
frames_mix[i] = SHRT_MIN;
}
else
{
frames_mix[i] += frames_track[((*stateIter)->deliveredframes * 2 + i) % ((*stateIter)->totalframes * 2)];
}
}
(*stateIter)->deliveredframes = ((*stateIter)->deliveredframes + framesFromThisBuffer);
if ((*stateIter)->repeating)
{
(*stateIter)->deliveredframes = (*stateIter)->deliveredframes % (*stateIter)->totalframes;
}
}
else
{
soundsToStop.push_back(stateIter);
}
}
writeFramesToHwBuffer(frames_mix, framesThisTime);
delete frames_mix;
for (std::list<std::list<struct playbackState *>::iterator>::iterator stateiterIter = soundsToStop.begin(); stateiterIter != soundsToStop.end(); stateiterIter++)
{
soundsPlaying.erase(*stateiterIter);
free(**stateiterIter);
stateiterIter = soundsToStop.erase(stateiterIter);
}
if (pthread_mutex_unlock(&soundslists_lock) != 0)
{
fprintf(stderr, "Speaker Interface error: couldn't unlock sounds lists from playback thread.\n");
}
// END CRITICAL SECTION
}
}
}

The OS is Ubuntu 9, using the Mesa 7.6.0 implementation of OpenGL and ALSA libasound2 1.0.20-3. I updated my NVIDIA drivers for my GeForce 6800 graphics card this morning, but to no avail.
You can be using a Mesa libGL.so, or NVIDIA libGL.so, but not both at the same time. I suggest you try a different OpenGL driver (e.g. really use Mesa. Check glxinfo | grep OpenGL.vendor)
As a wild guess: glXSwapBuffers often will interface with the vertical sync on your screen, you might try playing with options for that (see Google).

Related

Is there a way to create GLX context after Xlib's window creation?

I'm trying to create OpenGLx context after the Xlib's window creation. I'm trying to separate the Xlib window creation and opengl context creation into two different phases.
Win32 window-opengl context creation was rather simple but I couldnt find any resource that illustrates the same process with Xlib-opengl in linux
This is how its done for xlib-linux
GLint glxAttribs[] = {
GLX_RGBA,
GLX_DOUBLEBUFFER,
GLX_DEPTH_SIZE, 24,
GLX_STENCIL_SIZE, 8,
GLX_RED_SIZE, 8,
GLX_GREEN_SIZE, 8,
GLX_BLUE_SIZE, 8,
GLX_SAMPLE_BUFFERS, 0,
GLX_SAMPLES, 0,
None
};
XVisualInfo* visual = glXChooseVisual(display, screenId, glxAttribs);
XSetWindowAttributes windowAttribs;
windowAttribs.border_pixel = BlackPixel(display, screenId);
windowAttribs.background_pixel = WhitePixel(display, screenId);
windowAttribs.override_redirect = True;
windowAttribs.colormap = XCreateColormap(display, RootWindow(display, screenId), visual->visual, AllocNone);
windowAttribs.event_mask = ExposureMask;
window = XCreateWindow(display, RootWindow(display, screenId), 0, 0, 320, 200, 0, visual->depth, InputOutput, visual->visual, CWBackPixel | CWColormap | CWBorderPixel | CWEventMask, &windowAttribs);
This is how its done in windows
const WindowsWindow* pWin32Window = (const WindowsWindow*)pOwnerWindow;
HWND windowHandle = pWin32Window->GetWin32WindowHandle();
HDC windowDeviceContext = pWin32Window->GetWin32WindowDeviceContext();
/*
* Create pixel format
*/
PIXELFORMATDESCRIPTOR pfd = { sizeof(pfd),1 };
memset(&pfd, 0, sizeof(PIXELFORMATDESCRIPTOR));
pfd.nSize = sizeof(PIXELFORMATDESCRIPTOR);
pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
pfd.iPixelType = PFD_TYPE_RGBA;
pfd.nVersion = 1;
pfd.cColorBits = OpenGLDeviceUtilsWin32::GetColorBits(desc.SwapchainBufferFormat);
pfd.cAlphaBits = OpenGLDeviceUtilsWin32::GetAlphaBits(desc.SwapchainBufferFormat);
pfd.cDepthBits = OpenGLDeviceUtilsWin32::GetDepthBits(desc.SwapchainDepthStencilBufferFormat);
pfd.cStencilBits = OpenGLDeviceUtilsWin32::GetStencilBits(desc.SwapchainDepthStencilBufferFormat);
pfd.cAuxBuffers = 3;
pfd.iLayerType = PFD_MAIN_PLANE;
const int pixelFormatIndex = ChoosePixelFormat(windowDeviceContext, &pfd);
ASSERT(pixelFormatIndex != 0,"OpenGLDevice","Invalid pixel format");
ASSERT(SetPixelFormat(windowDeviceContext, pixelFormatIndex, &pfd), "OpenGLDevice", "Win32 window rejected the specified pixel format");
HGLRC tempContext = wglCreateContext(windowDeviceContext);
ASSERT(tempContext != NULL, "OpenGLDevice", "Creation of wgl dummy context failed!");
wglMakeCurrent(windowDeviceContext, tempContext);
PFNWGLCREATECONTEXTATTRIBSARBPROC wglCreateContextAttribsARB = NULL;
wglCreateContextAttribsARB = (PFNWGLCREATECONTEXTATTRIBSARBPROC)wglGetProcAddress("wglCreateContextAttribsARB");
ASSERT(wglCreateContextAttribsARB != NULL, "OpenGLDevice", "WGL get proc address failed!");
But I would expect something like this.
Create xlib window
Check for glx attribs if the window can support that pixel format
Create glx context using pixel format
But instead it goes as
Create window with your specific glx attribs
Create glx context
I wonder if there is a way for us to create window without letting xlib know we are going to use it for opengl and implement OpenGL specific setup for window creation process.
I'm trying to create OpenGLx context after the Xlib's window creation.
I don't really see your problem. On Win32 the usual stanza is:
Create window
Select pixelformat
Set pixelformat on window
Get HDC from window and use it to create context
On GLX the stanza is:
Select visual for window
Create window that's compatible with visual
Create OpenGL context with the selected visual
Take note that in both Win32 and GLX there is no hard tie between the window and the OpenGL context. As long as the pixelformat/visual of a OpenGL context and a window are compatiple, you can use them with each other.
The only difference between GLX and Win32 is, how the pixelformat/visual is communicated to OpenGL context creation. In GLX it's done directy, in Win32 the pixelformat is communicated in a rather convoluted way by means of the HDC of a window. And take note that in order to obtain a modern OpenGL context you actually have to go the route of OpenGL context creation with attributes which works exactly the same in Win32 and GLX (with Win32 needing the added steps of creating a dummy OpenGL context first in order to obtain the function pointers to the wglCreateContextAttribsARB functions, which are directly available in GLX).
Honestly, I do not understand your motivation.
Many implementations like GLFW gets Visual from GLX/EGL APIs (including glXChooseFBConfig) and use it when creating a window. The GLX/EGL stuff part can be abstracted by writing wrappers, so I don't see the need to go to the trouble of avoiding it.
That being said, it is still possible to avoid it, so I wrote the sample code for you.
// To build, execute the command below.
// c++ -Wall -Wextra -std=c++17 -o main main.cpp -lX11 -lGLX -lGL
#include <cstdio>
#include <chrono>
#include <thread>
#include <sys/time.h>
#include <unistd.h>
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <GL/glx.h>
#include <GL/glxext.h>
#define OGL_MAJOR_VERSION 3
#define OGL_MINOR_VERSION 3
#define WINDOW_WIDTH 640
#define WINDOW_HEIGHT 360
#define FPS 60
static double get_time() {
static timeval s_tTimeVal;
gettimeofday(&s_tTimeVal, NULL);
double time = s_tTimeVal.tv_sec * 1000.0; // sec to ms
time += s_tTimeVal.tv_usec / 1000.0; // us to ms
return time;
}
struct TestWindowConfig {
int width = 640;
int height = 360;
};
class TestWindow final {
public:
explicit TestWindow(const TestWindowConfig& config) : m_config(config) {}
virtual ~TestWindow() {
if (m_display) {
if (m_xid) {
XDestroyWindow(m_display, m_xid);
}
XCloseDisplay(m_display);
}
}
bool create() {
m_display = XOpenDisplay(NULL);
if (!m_display) {
fprintf(stderr, "XOpenDisplay() failed\n");
return false;
}
XSetWindowAttributes x_attr;
x_attr.override_redirect = False;
x_attr.border_pixel = 0;
m_xid = XCreateWindow(m_display, DefaultRootWindow(m_display), 0, 0, m_config.width,
m_config.height, 0, CopyFromParent, InputOutput, CopyFromParent,
CWOverrideRedirect | CWBorderPixel, &x_attr);
if (!m_xid) {
fprintf(stderr, "XOpenDisplay() failed\n");
return false;
}
XStoreName(m_display, m_xid, "X11-GLX Sample");
XMapWindow(m_display, m_xid);
m_wm_delete_window = XInternAtom(m_display, "WM_DELETE_WINDOW", True);
XSetWMProtocols(m_display, m_xid, &m_wm_delete_window, 1);
return true;
}
void show() const {
if (m_display && m_xid) {
XMapRaised(m_display, m_xid);
}
}
bool poll_events() {
if (!m_display) {
fprintf(stderr, "Display is null\n");
return false;
}
while (XPending(m_display) > 0) {
XEvent ev;
XNextEvent(m_display, &ev);
if (ev.type == ClientMessage) {
if ((Atom)ev.xclient.data.l[0] == m_wm_delete_window) {
m_should_close = true;
}
}
}
return true;
}
bool should_close() const { return m_should_close; }
Display* display() const { return m_display; }
Window xid() const { return m_xid; }
int screen_id() const { return DefaultScreen(m_display); }
private:
TestWindowConfig m_config;
Display* m_display = nullptr;
Window m_xid = 0;
Atom m_wm_delete_window;
bool m_should_close = false;
};
class TestGLContext final {
public:
explicit TestGLContext() = default;
virtual ~TestGLContext() = default;
bool create(const TestWindow& window) {
// clang-format off
int visual_attr[] = {
GLX_DRAWABLE_TYPE, GLX_WINDOW_BIT,
GLX_RENDER_TYPE, GLX_RGBA_BIT,
GLX_RED_SIZE, 8,
GLX_GREEN_SIZE, 8,
GLX_BLUE_SIZE, 8,
GLX_ALPHA_SIZE, 8,
GLX_DEPTH_SIZE, 0,
GLX_STENCIL_SIZE, 0,
GLX_DOUBLEBUFFER, True,
None
};
// clang-format on
int cfg_count;
auto fb_configs =
glXChooseFBConfig(window.display(), window.screen_id(), visual_attr, &cfg_count);
if (!fb_configs || (cfg_count < 1)) {
fprintf(stderr, "glXChooseFBConfig(): No config found\n");
return false;
}
PFNGLXCREATECONTEXTATTRIBSARBPROC glXCreateContextAttribsARB =
(PFNGLXCREATECONTEXTATTRIBSARBPROC)glXGetProcAddressARB(
(const GLubyte*)"glXCreateContextAttribsARB");
if (!glXCreateContextAttribsARB) {
fprintf(stderr, "Failed to load glXCreateContextAttribsARB\n");
return false;
}
// clang-format off
int ctx_attr[] = {
GLX_CONTEXT_PROFILE_MASK_ARB, GLX_CONTEXT_CORE_PROFILE_BIT_ARB,
GLX_CONTEXT_MAJOR_VERSION_ARB, OGL_MAJOR_VERSION,
GLX_CONTEXT_MINOR_VERSION_ARB, OGL_MINOR_VERSION,
0, 0
};
// clang-format on
m_ctx = glXCreateContextAttribsARB(window.display(), fb_configs[0], NULL, True, ctx_attr);
if (!m_ctx) {
fprintf(stderr, "Failed to create GLX Context\n");
return false;
}
m_should_destroy = true;
return true;
}
bool make_current(const TestWindow& window) {
if (glXMakeCurrent(window.display(), window.xid(), m_ctx) != True) {
fprintf(stderr, "glXMakeCurrent() Failed\n");
return false;
}
return true;
}
void swap_buffers(const TestWindow& window) { glXSwapBuffers(window.display(), window.xid()); }
static void* get_proc_address(const char* name) {
return reinterpret_cast<void*>(glXGetProcAddress((const GLubyte*)name));
}
void destroy(const TestWindow& window) {
glXDestroyContext(window.display(), m_ctx);
m_should_destroy = false;
}
bool should_destroy() const { return m_should_destroy; }
private:
GLXContext m_ctx;
bool m_should_destroy = false;
};
int main() {
// 1. Prepare Window and OpenGL Context
// In normal design, TestWindow should have its GLContext within itself.
// But, in order to fit your needs, I separated these explicitly.
TestWindowConfig config{.width = WINDOW_WIDTH, .height = WINDOW_HEIGHT};
TestWindow window{config};
TestGLContext glctx{};
if (!window.create()) {
return 1;
}
if (!glctx.create(window) || !glctx.make_current(window)) {
if (glctx.should_destroy()) {
glctx.destroy(window);
}
return 1;
}
// 2. Load OpenGL functions
// In normal cases, you are always recommended to use loader libraries like glad.
// In this example, I omited the loading part.
//
// if (!gladLoadGLLoader((GLADloadproc)glctx.get_proc_address)) {
// fprintf(stderr, "Failed to load OpenGL functions\n");
// return 1;
// }
// 3. Show the window and call OpenGL APIs
// As above, there are various problems in this implentation for real use.
window.show();
double last_time = get_time();
while (true) {
if (!window.poll_events() || window.should_close()) {
break;
}
auto delta_ms = get_time() - last_time;
if (auto diff = (1000.0 / FPS) - delta_ms; diff > 0) {
std::this_thread::sleep_for(std::chrono::milliseconds((long)diff));
continue;
}
// fprintf(stderr, "delta: %f\n", delta_ms);
glViewport(0, 0, config.width, config.height);
glClearColor(0.0f, 0.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glctx.swap_buffers(window);
last_time = get_time();
}
glctx.destroy(window);
return 0;
}

Multithread decoding of Video PID of Mpeg2Ts using FFMPEG

I'm working on an app in VC++ to display video frames of a video Pid of mpeg2ts stream using FFMPEG and need to do the same, for other mpeg2stream simultaneously by using multi thread process,my source code is:
int main (int argc, char* argv[])
{
av_register_all();
avformat_network_init();
pFormatCtx = avformat_alloc_context();
if(avformat_open_input(&pFormatCtx,filepath,NULL,NULL)!=0){
printf("Couldn't open input stream.\n");
return -1;
}
if(avformat_find_stream_info(pFormatCtx,NULL)<0){
printf("Couldn't find stream information.\n");
return -1;
}
videoindex=-1;
for(i=0; i<pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
videoindex=i;
break;
}
if(videoindex==-1){
printf("Didn't find a video stream.\n");
return -1;
}
pCodecCtx=pFormatCtx->streams[videoindex]->codec;
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL){
printf("Codec not found.\n");
return -1;
}
if(avcodec_open2(pCodecCtx, pCodec,NULL)<0){
printf("Could not open codec.\n");
return -1;
}
pFrame=av_frame_alloc();
pFrameYUV=av_frame_alloc();
out_buffer=(uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
packet=(AVPacket *)av_malloc(sizeof(AVPacket));
//Output Info-----------------------------
printf("--------------- File Information ----------------\n");
av_dump_format(pFormatCtx,0,filepath,0);
printf("-------------------------------------------------\n");
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
#if OUTPUT_YUV420P
fp_yuv=fopen("output.yuv","wb+");
#endif
if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
printf( "Could not initialize SDL - %s\n", SDL_GetError());
return -1;
}
screen_w = pCodecCtx->width;
screen_h = pCodecCtx->height;
//SDL 2.0 Support for multiple windows
screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
screen_w, screen_h, SDL_WINDOW_OPENGL);
if(!screen) {
printf("SDL: could not create window - exiting:%s\n",SDL_GetError());
return -1;
}
sdlRenderer = SDL_CreateRenderer(screen, -1, 0);
//IYUV: Y + U + V (3 planes)
//YV12: Y + V + U (3 planes)
sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,pCodecCtx->width,pCodecCtx->height);
sdlRect.x=0;
sdlRect.y=0;
sdlRect.w=screen_w;
sdlRect.h=screen_h;
//SDL End----------------------
BYTE buffer [4] ;
int nSize = 0 ;
int nByteCnt = 0 ;
int nPreviuosPos = 0 ;
mpgfile = fopen ("D:\\00_Projects\\Farzan II\\SampleData\\Yahsat1996V_N_PID(2101).pes", "rb");
while(av_read_frame(pFormatCtx, packet)>=0 /*&& nSize > 0*/)
{
if(packet->stream_index==videoindex)
{
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
if(ret < 0)
{
printf("Decode Error.\n");
return -1;
}
if(got_picture)
{
sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
pFrameYUV->data, pFrameYUV->linesize);
#if OUTPUT_YUV420P
y_size=pCodecCtx->width*pCodecCtx->height;
fwrite(pFrameYUV->data[0],1,y_size,fp_yuv); //Y
fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv); //U
fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv); //V
#endif
//SDL---------------------------
#if 0
SDL_UpdateTexture( sdlTexture, NULL, pFrameYUV->data[0], pFrameYUV->linesize[0] );
#else
SDL_UpdateYUVTexture(sdlTexture, &sdlRect,
pFrameYUV->data[0], pFrameYUV->linesize[0],
pFrameYUV->data[1], pFrameYUV->linesize[1],
pFrameYUV->data[2], pFrameYUV->linesize[2]);
#endif
SDL_RenderClear( sdlRenderer );
SDL_RenderCopy( sdlRenderer, sdlTexture, NULL, &sdlRect);
SDL_RenderPresent( sdlRenderer );
//SDL End-----------------------
//Delay 40ms
SDL_Delay(40);
}
}
av_free_packet(packet);
}
//flush decoder
//FIX: Flush Frames remained in Codec
while (1) {
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
if (ret < 0)
break;
if (!got_picture)
break;
sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
pFrameYUV->data, pFrameYUV->linesize);
#if OUTPUT_YUV420P
int y_size=pCodecCtx->width*pCodecCtx->height;
fwrite(pFrameYUV->data[0],1,y_size,fp_yuv); //Y
fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv); //U
fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv); //V
#endif
//SDL---------------------------
SDL_UpdateTexture( sdlTexture, &sdlRect, pFrameYUV->data[0], pFrameYUV->linesize[0] );
SDL_RenderClear( sdlRenderer );
SDL_RenderCopy( sdlRenderer, sdlTexture, NULL, &sdlRect);
SDL_RenderPresent( sdlRenderer );
//SDL End-----------------------
//Delay 40ms
SDL_Delay(40);
}
sws_freeContext(img_convert_ctx);
#if OUTPUT_YUV420P
fclose(fp_yuv);
#endif
SDL_Quit();
av_frame_free(&pFrameYUV);
av_frame_free(&pFrame);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
return 0;
}
it works well when i call it in One thread but,after calling this function in multi thread ,the error of access violation occurred , is there anyone to guide me to solution?

Filesystem has been set read-only for clusters badly computed error

my sd cart run a long time in embed linux system,but sometime the filesystem set readonly and print bellow msg:
clusters badly computed (587 != 531)
FAT: Filesystem panic (dev hda6)
i see source code from linux kernel as show this: who can explain why this error,tks very much.
/*
* fat_chain_add() adds a new cluster to the chain of clusters represented
* by inode.
*/
int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster)
{
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
int ret, new_fclus, last;
/*
* We must locate the last cluster of the file to add this new
* one (new_dclus) to the end of the link list (the FAT).
*/
last = new_fclus = 0;
if (MSDOS_I(inode)->i_start) {
int fclus, dclus;
ret = fat_get_cluster(inode, FAT_ENT_EOF, &fclus, &dclus);
if (ret < 0)
return ret;
new_fclus = fclus + 1;
last = dclus;
}
/* add new one to the last of the cluster chain */
if (last) {
struct fat_entry fatent;
fatent_init(&fatent);
ret = fat_ent_read(inode, &fatent, last);
if (ret >= 0) {
int wait = inode_needs_sync(inode);
ret = fat_ent_write(inode, &fatent, new_dclus, wait);
fatent_brelse(&fatent);
}
if (ret < 0)
return ret;
/*
* FIXME:Although we can add this cache, fat_cache_add() is
* assuming to be called after linear search with fat_cache_id.
*/
// fat_cache_add(inode, new_fclus, new_dclus);
} else {
MSDOS_I(inode)->i_start = new_dclus;
MSDOS_I(inode)->i_logstart = new_dclus;
/*
* Since generic_write_sync() synchronizes regular files later,
* we sync here only directories.
*/
if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) {
ret = fat_sync_inode(inode);
if (ret)
return ret;
} else
mark_inode_dirty(inode);
}
if (new_fclus != (inode->i_blocks >> (sbi->cluster_bits - 9))) {
fat_fs_error(sb, "clusters badly computed (%d != %llu)",
new_fclus,
(llu)(inode->i_blocks >> (sbi->cluster_bits - 9)));
fat_cache_inval_inode(inode);
}
inode->i_blocks += nr_cluster << (sbi->cluster_bits - 9);
return 0;
}

How to set dhrystone benchmark clock rate for emulator?

I usually use sysconfig(_SC_CLK_TCK) in linux to get the clock rate (which always returns 100). The problem is I want to use dhrystone benchmark with Atari Mint (TOS). I installed atari mint on an emulator called ARanyM. I used sysconfig(_SC_CLK_TCK) here also but it return something like 4294967295 (this actually all 1 value in 32-bit)
Does any body has any suggestions ?
With Linux I use:
#include <time.h>
double theseSecs = 0.0;
double startSecs = 0.0;
double secs;
double CPUsecs = 0.0;
double CPUutilisation = 0.0;
double answer = 0;
clock_t starts;
void start_CPU_time()
{
starts = clock();;
return;
}
void end_CPU_time()
{
CPUsecs = (double)(clock() - starts)/(double)CLOCKS_PER_SEC;
return;
}
struct timespec tp1;
void getSecs()
{
clock_gettime(CLOCK_REALTIME, &tp1);
theseSecs = tp1.tv_sec + tp1.tv_nsec / 1e9;
return;
}
void start_time()
{
getSecs();
startSecs = theseSecs;
return;
}
void end_time()
{
getSecs();
secs = theseSecs - startSecs;
return;
}
void calculate()
{
int i, j;
for (i=1, i<1000001; i++)
{
for (i=j, j<1000001; j++)
{
answer = answer * (float)i / 1000000.0;
}
}
}
void main()
{
start_time();
start_CPU_time();
calculate();
end_time();
end_CPU_time();
CPUutilisation = CPUsecs / secs / 100.0;
printf"/n Answer %8.3f, Elapsed Time %7.4f, CPU Time %7.4f,
CPU Utilisation %8.4f/n, answer, secs, CPUsecs, CPUutilisation);
}

Uinput and Raspberry Pi

I tried to ask this question on the Raspberry Pi forums, but I have received no responses at all. I thought I might query the minds of the StackOverflow community that has been so helpful in the past.
I'm writing a userspace driver for the Raspberry Pi (specifically, may be ported to other platforms later) which makes use of the bcm2835 library (GPIO) and uinput (Linux user-input virtual devices). I need to read GPIO pins and translate their values into simulated keypresses on a virtual keyboard. The GPIO part has been completed, and the translation part is also completed. Unfortunately, the virtual-keyboard part has not been solved. Uinput refuses to cooperate.
Now, the exact same code works perfectly on a Debian desktop machine. The evdev and uinput modules are required, both of which were loaded in all test cases. On the desktop, inputs can be triggered, however, on the Raspberry Pi, I can verify that the GPIO subsystem has registered the input, but the uinput events do not trigger. Does anyone have a lead on what I might do?
Thank you very much, if you need any information, logs, or otherwise, please let me know and I will post them as soon as I can.
This is a complete solution that works for me. I have a custom-made keypad and these are the keys I have defined. Here is the link to original pdf I used.
Of course you can define whatever key you want, just add it to the array.
Note: this code only works with elevated permission.
int allowed_keys[allowed_KEYS_size][2] = {0};
void main()
{
init_keys();
int fd = open_uinput();
int key_evt = getKeyEVT(49); // ASCII code for 1
// simulate key press and key release
emit(fd, EV_KEY, key_evt, 1);
emit(fd, EV_SYN, SYN_REPORT, 0);
emit(fd, EV_KEY, key_evt, 0);
emit(fd, EV_SYN, SYN_REPORT, 0);
}
long int emit(int fd, int type, int code, int val)
{
struct input_event ie;
ie.type = type;
ie.code = code;
ie.value = val;
/* timestamp values below are ignored */
ie.time.tv_sec = 0;
ie.time.tv_usec = 0;
long int y = write(fd, &ie, sizeof(ie));
return y;
}
int open_uinput()
{
int fdui = open("/dev/uinput", O_WRONLY | O_NONBLOCK);
if (fdui < 0)
{
printf("uinput fd creation failed!\n");
exit(EXIT_FAILURE);
}
ioctl(fdui, UI_SET_EVBIT, EV_KEY);
ioctl(fdui, UI_SET_EVBIT, EV_SYN); //added by behzad
for (int i = 0; i < allowed_KEYS_size; i++)
ioctl(fdui, UI_SET_KEYBIT, allowed_keys[i][1]);
struct uinput_setup usetup;
memset(&usetup, 0, sizeof(usetup));
usetup.id.bustype = BUS_USB;
usetup.id.vendor = 0x1234; /* sample vendor */
usetup.id.product = 0x5678; /* sample product */
strcpy(usetup.name, "My Keypad. Ver 1.1");
ioctl(fdui, UI_DEV_SETUP, &usetup);
ioctl(fdui, UI_DEV_CREATE);
sleep(2);
return fdui;
}
int getKeyEVT(int k)
{
for (int i = 0; i < allowed_KEYS_size; i++)
{
if (allowed_keys[i][0] == k)
return allowed_keys[i][1];
}
return -1;
}
void init_keys()
{
// Reference:
// https://www.alt-codes.net/arrow_alt_codes.php
// /usr/include/linux/input-event-codes.h
allowed_keys[0][0] = 48; //ASCII ---> 0
allowed_keys[0][1] = KEY_0; //LINUX
allowed_keys[1][0] = 49; //ASCII
allowed_keys[1][1] = KEY_1; //LINUX
allowed_keys[2][0] = 50; //ASCII
allowed_keys[2][1] = KEY_2; //LINUX
allowed_keys[3][0] = 51; //ASCII
allowed_keys[3][1] = KEY_3; //LINUX
}

Resources