how to store pieces in gomoku - visual-c++

am currently learning MFC and decided to make the game Gomoku. this is the code I have so far.
**mainframe.h**
class CMainFrame : public CFrameWnd
{
public:
CMainFrame();
protected:
DECLARE_DYNAMIC(CMainFrame)
public:
public:
public:
public:
virtual ~CMainFrame();
#ifdef _DEBUG
virtual void AssertValid() const;
virtual void Dump(CDumpContext& dc) const;
#endif
protected:
DECLARE_MESSAGE_MAP()
void DrawBoard(CDC* pDC);
int m_nNextChar;
int board[15][15];
static const int EMPTY = 0, WHITE = 1, BLACK = 2;
public:
afx_msg void OnLButtonDown(UINT nFlags, CPoint point);
afx_msg void OnRButtonDown(UINT nFlags, CPoint point);
afx_msg void OnPaint();
};
**mainframe.cpp**
#include "stdafx.h"
#include "01.win32tomfc.h"
#include "MainFrm.h"
#ifdef _DEBUG
#define new DEBUG_NEW
#endif
// CMainFrame
IMPLEMENT_DYNAMIC(CMainFrame, CFrameWnd)
BEGIN_MESSAGE_MAP(CMainFrame, CFrameWnd)
ON_WM_LBUTTONDOWN()
ON_WM_RBUTTONDOWN()
ON_WM_PAINT()
END_MESSAGE_MAP()
int diameter = 23;
int size = 40;
int xCod;
int yCod;
int xCodx;
int yCody;
// CMainFrame ¹¹Ôì/Îö¹¹
CMainFrame::CMainFrame()
{
m_nNextChar = BLACK;
Create(NULL, _T("Generic Sample Application"));
CRect rect(0, 0, 700, 700);
CalcWindowRect(&rect);
SetWindowPos(NULL, 0, 0, rect.Width(), rect.Height(),
SWP_NOZORDER | SWP_NOMOVE | SWP_NOREDRAW);
}
CMainFrame::~CMainFrame()
{
}
// CMainFrame Õï¶Ï
#ifdef _DEBUG
void CMainFrame::AssertValid() const
{
CFrameWnd::AssertValid();
}
void CMainFrame::Dump(CDumpContext& dc) const
{
CFrameWnd::Dump(dc);
}
#endif //_DEBUG
// CMainFrame ÏûÏ¢´¦Àí³ÌÐò
void CMainFrame::DrawBoard(CDC * pDC)
{
CPen pen(PS_SOLID, 1, RGB(0, 0, 0));
CPen* pOldPen = pDC->SelectObject(&pen);
for (int i = 1; i <= 16; i++) {
pDC->MoveTo(40 * i, 40);
pDC->LineTo(40 * i, 640);
pDC->MoveTo(40, 40 * i);
pDC->LineTo(640, 40 * i);
}
pDC->SelectObject(pOldPen);
}
void CMainFrame::OnLButtonDown(UINT nFlags, CPoint point)
{
CClientDC dc(this);
CPen pen(PS_SOLID, 1, RGB(0, 0, 0));
CPen* pOldPen = dc.SelectObject(&pen);
dc.SelectStockObject(BLACK_BRUSH);
xCod = (point.x + (size / 2)) / size;
xCod = (xCod * size) - diameter / 2;
yCod = (point.y + (size / 2)) / size;
yCod = (yCod * size) - diameter / 2;
xCodx = xCod + diameter;
yCody = yCod + diameter;
if (m_nNextChar != BLACK )
return;
else {
if (xCod > 20 && yCod <= 640 && xCodx < 655 && yCody > 40) {
dc.Ellipse(xCod, yCod, xCodx, yCody);
}
}
m_nNextChar = WHITE;
CFrameWnd::OnLButtonDown(nFlags, point);
}
void CMainFrame::OnRButtonDown(UINT nFlags, CPoint point)
{
CClientDC dc(this);
CPen pen(PS_SOLID, 1, RGB(0, 0, 0));
CPen* pOldPen = dc.SelectObject(&pen);
dc.SelectStockObject(WHITE_BRUSH);
xCod = (point.x + (size / 2)) / size;
xCod = (xCod * size) - diameter / 2;
yCod = (point.y + (size / 2)) / size;
yCod = (yCod * size) - diameter / 2;
xCodx = xCod + diameter;
yCody = yCod + diameter;
if (m_nNextChar != WHITE)
return;
else {
if (xCod > 20 && yCod <= 640 && xCodx < 655 && yCody > 40) {
dc.Ellipse(xCod, yCod, xCodx, yCody);
}
}
m_nNextChar = BLACK;
CFrameWnd::OnRButtonDown(nFlags, point);
}
void CMainFrame::OnPaint()
{
CPaintDC dc(this);
DrawBoard(&dc);
}
the code I have draws a 15 X 15 grid in the function DrawBoard() and draws the black and white pieces in the OnLButtonDown and OnRButtonDown respectively. thing is when I run the program and click to draw the black piece and then the white piece, the white piece can be drawn over the black piece and vice versa. so I thought to create a two-dimensional array board[15][15] to store a piece when its drawn so that a different piece cannot be drawn over a current piece would be best(am I on the right track here). I tried but I can't seem to figure out how to do it. am not very good at programming and realize this might be easy but some help would really be appreciated. please explain how I would go about it the right way.
this is what I tried.
void CMainFrame::OnLButtonDown(UINT nFlags, CPoint point)
{
CClientDC dc(this);
CPen pen(PS_SOLID, 1, RGB(0, 0, 0));
CPen* pOldPen = dc.SelectObject(&pen);
dc.SelectStockObject(BLACK_BRUSH);
xCod = (point.x + (size / 2)) / size;
xCod = (xCod * size) - diameter / 2;
yCod = (point.y + (size / 2)) / size;
yCod = (yCod * size) - diameter / 2;
xCodx = xCod + diameter;
yCody = yCod + diameter;
if ((m_nNextChar != BLACK) && (board[xCod][yCod] = WHITE) )
return;
else {
if (xCod > 20 && yCod <= 640 && xCodx < 655 && yCody > 40) {
dc.Ellipse(xCod, yCod, xCodx, yCody);
board[xCod][yCod] = BLACK;
}
}
void CMainFrame::OnRButtonDown(UINT nFlags, CPoint point)
{
CClientDC dc(this);
CPen pen(PS_SOLID, 1, RGB(0, 0, 0));
CPen* pOldPen = dc.SelectObject(&pen);
dc.SelectStockObject(WHITE_BRUSH);
xCod = (point.x + (size / 2)) / size;
xCod = (xCod * size) - diameter / 2;
yCod = (point.y + (size / 2)) / size;
yCod = (yCod * size) - diameter / 2;
xCodx = xCod + diameter;
yCody = yCod + diameter;
if (m_nNextChar != WHITE && (board[xCod][yCod] = BLACK))
return;
else {
if (xCod > 20 && yCod <= 640 && xCodx < 655 && yCody > 40) {
dc.Ellipse(xCod, yCod, xCodx, yCody);
board[xCod][yCod] = WHITE;
}
}
m_nNextChar = BLACK;
CFrameWnd::OnRButtonDown(nFlags, point);
}

You should do all the drawings in OnPaint. Don't draw in other functions such as OnLButtonDown. Instead, get the necessary information from OnLButtonDown and call Invalidate, this will repaint the window.
Here is an example. For simplicity, I created a structure info and a 2-dimensional array data. data stores all the information for each cell, that is the rectangle and color. You have to initialize data once, and paint based on the information in data
#include <vector>
class CMainFrame : public CFrameWnd
{
...
struct info
{
CRect rect;
int color;
};
std::vector<std::vector<info>> data;
};
CMainFrame::CMainFrame()
{
...
data.resize(15);
for(int i = 0; i < data.size(); i++)
data[i].resize(15);
int xoffset = 20;
int yoffset = 20;
for(int row = 0; row < 15; row++)
{
for(int col = 0; col < 15; col++)
{
data[row][col].rect.SetRect(0, 0, size + 1, size + 1);
data[row][col].rect.MoveToXY(xoffset + row * size, yoffset + col * size);
}
}
}
void CMainFrame::OnLButtonDown(UINT nFlags, CPoint point)
{
CFrameWnd::OnLButtonDown(nFlags, point);
for(int row = 0; row < 15; row++)
{
for(int col = 0; col < 15; col++)
{
if(data[row][col].color)
break;
if(data[row][col].rect.PtInRect(point))
{
data[row][col].color = WHITE;
break;
}
}
}
Invalidate(FALSE);
}
void CMainFrame::OnRButtonDown(UINT nFlags, CPoint point)
{
CFrameWnd::OnRButtonDown(nFlags, point);
for(int row = 0; row < 15; row++)
{
for(int col = 0; col < 15; col++)
{
if(data[row][col].color)
break;
if(data[row][col].rect.PtInRect(point))
{
data[row][col].color = BLACK;
break;
}
}
}
Invalidate(FALSE);
}
void CMainFrame::OnPaint()
{
CPaintDC dc(this);
CPen pen(PS_SOLID, 1, RGB(0, 0, 0));
dc.SelectObject(&pen);
CBrush white, black;
white.CreateSolidBrush(RGB(255, 255, 255));
black.CreateSolidBrush(RGB(0, 0, 0));
for(int row = 0; row < 15; row++)
{
for(int col = 0; col < 15; col++)
{
dc.Rectangle(data[row][col].rect);
if(data[row][col].color)
{
CBrush *oldbrush;
if(data[row][col].color == WHITE)
oldbrush = dc.SelectObject(&white);
else
oldbrush = dc.SelectObject(&black);
dc.Ellipse(data[row][col].rect);
dc.SelectObject(oldbrush);
}
}
}
}

Related

Display not rendering right color on screen imx6ul

I am using imx6ul with linux. I am trying to display Red , green and blue color on LCD one after another after 3 seconds.
Here is how I have written my app code.
static int setup_fb(struct fb_info *fb, int id)
{
int retval = EXIT_SUCCESS;
struct fb_fix_screeninfo fb_fix;
char path[16];
memset(fb, 0, sizeof(struct fb_info));
snprintf(&path[0], ARRAY_SIZE(path), "/dev/fb%d", id);
fb->id = id;
if ((fb->fd = open(path, O_RDWR, 0)) < 0) {
fprintf(stderr, "Unable to open %s\n", path);
return EXIT_FAILURE;
}
if ((retval = ioctl(fb->fd, FBIOBLANK, FB_BLANK_UNBLANK)) < 0) {
fprintf(stderr, "Unable to unblank %s\n", path);
return retval;
}
if ((retval = ioctl(fb->fd, FBIOGET_FSCREENINFO, &fb_fix)) < 0) {
fprintf(stderr, "Could not get fix screen info for %s\n", path);
return retval;
}
if ((retval = ioctl(fb->fd, FBIOGET_VSCREENINFO, &fb->screen_info)) < 0) {
fprintf(stderr, "Could not get screen info for %s\n", path);
return retval;
}
fprintf(stderr,"%s: screen info: %dx%d (virtual: %dx%d) # %d-bpp\n\n",
fb->name,
fb->screen_info.xres,
fb->screen_info.yres,
fb->screen_info.xres_virtual,
fb->screen_info.yres_virtual,
fb->screen_info.bits_per_pixel);
/* Map the device to memory*/
fb->size = fb->screen_info.xres_virtual * fb->screen_info.yres_virtual * fb->screen_info.bits_per_pixel / 8;
fb->fb = (unsigned short *)mmap(0, fb->size, PROT_READ | PROT_WRITE, MAP_SHARED, fb->fd, 0);
if (fb->fb == MAP_FAILED) {
fprintf(stderr, "Error: failed mapping framebuffer %s to memory!\n", fb->name);
return EXIT_FAILURE;
}
return retval;
}
static int fill_red(struct fb_info *fb)
{
int i;
int retval = 0;
fprintf(stderr,"#%s: Set colorspace to 16-bpp\n", fb->name);
fb->screen_info.bits_per_pixel = 16;
if ((retval = ioctl(fb->fd, FBIOPUT_VSCREENINFO, &fb->screen_info)) < 0)
{
fprintf(stderr, "#%s: Could not set screen info!\n", fb->name);
return retval;
}
fprintf(stderr,"#%s: Fill the screen in RED %d\n", fb->name, fb->size);
for (i = 0; i < fb->size / 2; i++)
{
fb->fb[i] = 0xF800;
}
if ((retval = ioctl(fb->fd, FBIO_WAITFORVSYNC, &fb->screen_info)) < 0)
{
fprintf(stderr, "#%s: FBIO_WAITFORVSYNC error!\n", fb->name);
return retval;
}
return retval;
}
int display_test(void)
{
int retval = EXIT_SUCCESS;
if ((retval = setup_fb(&fb0, 0)) < 0)
goto exit;
if ((retval = setup_fb(&fb1, 1)) < 0)
goto exit;
fb0.screen_info.bits_per_pixel = 16;
fb0.screen_info.yoffset = 0;
if ((retval = ioctl(fb0.fd, FBIOPUT_VSCREENINFO, &fb0.screen_info)) < 0) {
fprintf(stderr, "#%s: Could not set screen info!\n", fb0.name);
goto exit;
}
fb1.screen_info.bits_per_pixel = 16;
fb1.screen_info.yoffset = 0;
if ((retval = ioctl(fb1.fd, FBIOPUT_VSCREENINFO, &fb1.screen_info)) < 0) {
fprintf(stderr, "#%s: Could not set screen info!\n", fb1.name);
goto exit;
}
retval = fill_red(&fb0); //Fill LCD with red color
sleep(3);
retval = fill_green(&fb0); //Fill LCD with green color
sleep(3);
retval = fill_blue(&fb0); //Fill LCD with blue color
exit:
cleanup_fb(&fb0);
cleanup_fb(&fb1);
if (retval != EXIT_SUCCESS)
{
fprintf(stderr, "%s.\n", strerror(-retval));
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
After I run the application , I see for first 6 seconds LCD displays half black screen and the half bottom screen is bit mix of colors. After 6 seconds it displays blue color on the full screen correctly.
My fill color function for red blue green is same, except its put different value in framebuffer for each color.
fb->fb[i] = 0xF800; //red
fb->fb[i] = 0x07E0; //green
fb->fb[i] = 0x1F00;//blue
Can somebody please help to let me know f there is any synchronization issue or is there any other IOCTL to do it.
Regards,
Asma

ffmpeg record video plays too fast

I'm a college student and I am studying FFmpeg now.
I have wrote a software that can record desktops and audio('virtual-audio-capturer') with FFmpeg.And I am now writing Audio and Video Synchronization.
I met some problems that video recording plays too fast.
When I look for audio and video synchronization help on the Internet,I find a formula for calculating PTS :
pts = n * ((1 / timbase)/ fps)
When I use this formula,I find a phenomenon.
1.The higher frame rate is,the faster the video playback speed.
2.The slower the frame rate, the faster the video playback.
Also I find while the framerate is 10,the video playback speed will be right.
Why has this situation happened?
I have thought this question for three days. I really hope someone can help me solve this problem.
I really appreciate the help.
#include "stdafx.h"
#ifdef __cplusplus
extern "C"
{
#endif
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
#include "libavutil/audio_fifo.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libavutil/imgutils.h"
#include "libavutil/mathematics.h"
#include "libavutil/samplefmt.h"
#include "libavutil/time.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/file.h"
#include "libavutil/mem.h"
#include "libavutil/frame.h"
#include "libavfilter/avfilter.h"
#include "libswresample/swresample.h"
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avfilter.lib")
#pragma comment(lib, "avfilter.lib")
#pragma comment(lib, "postproc.lib")
#pragma comment(lib, "swresample.lib")
#pragma comment(lib, "swscale.lib")
#ifdef __cplusplus
};
#endif
AVFormatContext *pFormatCtx_Video = NULL, *pFormatCtx_Audio = NULL, *pFormatCtx_Out = NULL;
AVCodecContext *outVideoCodecCtx = NULL;
AVCodecContext *outAudioCodecCtx = NULL;
AVStream *pVideoStream = NULL, *pAudioStream = NULL;
AVCodec *outAVCodec;
AVCodec *outAudioCodec;
AVCodecContext *pCodecCtx_Video;
AVCodec *pCodec_Video;
AVFifoBuffer *fifo_video = NULL;
AVAudioFifo *fifo_audio = NULL;
int VideoIndex, AudioIndex;
int codec_id;
CRITICAL_SECTION AudioSection, VideoSection;
SwsContext *img_convert_ctx;
int frame_size = 0;
uint8_t *picture_buf = NULL, *frame_buf = NULL;
bool bCap = true;
DWORD WINAPI ScreenCapThreadProc( LPVOID lpParam );
DWORD WINAPI AudioCapThreadProc( LPVOID lpParam );
int OpenVideoCapture()
{
AVInputFormat *ifmt=av_find_input_format("gdigrab");
AVDictionary *options = NULL;
av_dict_set(&options, "framerate", "60", NULL);
if(avformat_open_input(&pFormatCtx_Video, "desktop", ifmt, &options)!=0)
{
printf("Couldn't open input stream.(无法打开视频输入流)\n");
return -1;
}
if(avformat_find_stream_info(pFormatCtx_Video,NULL)<0)
{
printf("Couldn't find stream information.(无法获取视频流信息)\n");
return -1;
}
if (pFormatCtx_Video->streams[0]->codec->codec_type != AVMEDIA_TYPE_VIDEO)
{
printf("Couldn't find video stream information.(无法获取视频流信息)\n");
return -1;
}
pCodecCtx_Video = pFormatCtx_Video->streams[0]->codec;
pCodec_Video = avcodec_find_decoder(pCodecCtx_Video->codec_id);
if(pCodec_Video == NULL)
{
printf("Codec not found.(没有找到解码器)\n");
return -1;
}
if(avcodec_open2(pCodecCtx_Video, pCodec_Video, NULL) < 0)
{
printf("Could not open codec.(无法打开解码器)\n");
return -1;
}
av_dump_format(pFormatCtx_Video, 0, NULL, 0);
img_convert_ctx = sws_getContext(pCodecCtx_Video->width, pCodecCtx_Video->height, pCodecCtx_Video->pix_fmt,
pCodecCtx_Video->width, pCodecCtx_Video->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
frame_size = avpicture_get_size(pCodecCtx_Video->pix_fmt, pCodecCtx_Video->width, pCodecCtx_Video->height);
fifo_video = av_fifo_alloc(30 * avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx_Video->width, pCodecCtx_Video->height));
return 0;
}
static char *dup_wchar_to_utf8(wchar_t *w)
{
char *s = NULL;
int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
s = (char *) av_malloc(l);
if (s)
WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
return s;
}
int OpenAudioCapture()
{
AVInputFormat *pAudioInputFmt = av_find_input_format("dshow");
char * psDevName = dup_wchar_to_utf8(L"audio=virtual-audio-capturer");
if (avformat_open_input(&pFormatCtx_Audio, psDevName, pAudioInputFmt,NULL) < 0)
{
printf("Couldn't open input stream.(无法打开音频输入流)\n");
return -1;
}
if(avformat_find_stream_info(pFormatCtx_Audio,NULL)<0)
return -1;
if(pFormatCtx_Audio->streams[0]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
{
printf("Couldn't find video stream information.(无法获取音频流信息)\n");
return -1;
}
AVCodec *tmpCodec = avcodec_find_decoder(pFormatCtx_Audio->streams[0]->codec->codec_id);
if(0 > avcodec_open2(pFormatCtx_Audio->streams[0]->codec, tmpCodec, NULL))
{
printf("can not find or open audio decoder!\n");
}
av_dump_format(pFormatCtx_Audio, 0, NULL, 0);
return 0;
}
int OpenOutPut()
{
AVStream *pVideoStream = NULL, *pAudioStream = NULL;
const char *outFileName = "test.mp4";
avformat_alloc_output_context2(&pFormatCtx_Out, NULL, NULL, outFileName);
if (pFormatCtx_Video->streams[0]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
VideoIndex = 0;
pVideoStream = avformat_new_stream(pFormatCtx_Out, NULL);
if (!pVideoStream)
{
printf("can not new stream for output!\n");
return -1;
}
outVideoCodecCtx = avcodec_alloc_context3(outAVCodec);
if ( !outVideoCodecCtx )
{
printf("Error : avcodec_alloc_context3()\n");
return -1;
}
//set codec context param
outVideoCodecCtx = pVideoStream->codec;
outVideoCodecCtx->codec_id = AV_CODEC_ID_MPEG4;
outVideoCodecCtx->width = pFormatCtx_Video->streams[0]->codec->width;
outVideoCodecCtx->height = pFormatCtx_Video->streams[0]->codec->height;
outVideoCodecCtx->time_base = pFormatCtx_Video->streams[0]->codec->time_base;
outVideoCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
outVideoCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
if (codec_id == AV_CODEC_ID_H264)
{
av_opt_set(outVideoCodecCtx->priv_data, "preset", "slow", 0);
}
outAVCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
if( !outAVCodec )
{
printf("\n\nError : avcodec_find_encoder()");
return -1;
}
if (pFormatCtx_Out->oformat->flags & AVFMT_GLOBALHEADER)
outVideoCodecCtx->flags |=CODEC_FLAG_GLOBAL_HEADER;
if ((avcodec_open2(outVideoCodecCtx,outAVCodec, NULL)) < 0)
{
printf("can not open the encoder\n");
return -1;
}
}
if(pFormatCtx_Audio->streams[0]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
{
AVCodecContext *pOutputCodecCtx;
AudioIndex = 1;
pAudioStream = avformat_new_stream(pFormatCtx_Out, NULL);
pAudioStream->codec->codec = avcodec_find_encoder(pFormatCtx_Out->oformat->audio_codec);
pOutputCodecCtx = pAudioStream->codec;
pOutputCodecCtx->sample_rate = pFormatCtx_Audio->streams[0]->codec->sample_rate;
pOutputCodecCtx->channel_layout = pFormatCtx_Out->streams[0]->codec->channel_layout;
pOutputCodecCtx->channels = av_get_channel_layout_nb_channels(pAudioStream->codec->channel_layout);
if(pOutputCodecCtx->channel_layout == 0)
{
pOutputCodecCtx->channel_layout = AV_CH_LAYOUT_STEREO;
pOutputCodecCtx->channels = av_get_channel_layout_nb_channels(pOutputCodecCtx->channel_layout);
}
pOutputCodecCtx->sample_fmt = pAudioStream->codec->codec->sample_fmts[0];
AVRational time_base={1, pAudioStream->codec->sample_rate};
pAudioStream->time_base = time_base;
//audioCodecCtx->time_base = time_base;
pOutputCodecCtx->codec_tag = 0;
if (pFormatCtx_Out->oformat->flags & AVFMT_GLOBALHEADER)
pOutputCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;
if (avcodec_open2(pOutputCodecCtx, pOutputCodecCtx->codec, 0) < 0)
{
printf("编码器打开失败,退出程序\n");
return -1;
}
}
if (!(pFormatCtx_Out->oformat->flags & AVFMT_NOFILE))
{
if(avio_open(&pFormatCtx_Out->pb, outFileName, AVIO_FLAG_WRITE) < 0)
{
printf("can not open output file handle!\n");
return -1;
}
}
if(avformat_write_header(pFormatCtx_Out, NULL) < 0)
{
printf("can not write the header of the output file!\n");
return -1;
}
return 0;
}
int _tmain(int argc, _TCHAR* argv[])
{
av_register_all();
avdevice_register_all();
if (OpenVideoCapture() < 0)
{
return -1;
}
if (OpenAudioCapture() < 0)
{
return -1;
}
if (OpenOutPut() < 0)
{
return -1;
}
// int fps;
/*printf("输入帧率:");
scanf_s("%d",&fps);
if ( NULL == fps)
{
fps = 10;
}*/
InitializeCriticalSection(&VideoSection);
InitializeCriticalSection(&AudioSection);
AVFrame *picture = av_frame_alloc();
int size = avpicture_get_size(pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
pFormatCtx_Out->streams[VideoIndex]->codec->width, pFormatCtx_Out->streams[VideoIndex]->codec->height);
picture_buf = new uint8_t[size];
avpicture_fill((AVPicture *)picture, picture_buf,
pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
pFormatCtx_Out->streams[VideoIndex]->codec->width,
pFormatCtx_Out->streams[VideoIndex]->codec->height);
//star cap screen thread
CreateThread( NULL, 0, ScreenCapThreadProc, 0, 0, NULL);
//star cap audio thread
CreateThread( NULL, 0, AudioCapThreadProc, 0, 0, NULL);
int64_t cur_pts_v=0,cur_pts_a=0;
int VideoFrameIndex = 0, AudioFrameIndex = 0;
while(1)
{
if (_kbhit() != 0 && bCap)
{
bCap = false;
Sleep(2000);
}
if (fifo_audio && fifo_video)
{
int sizeAudio = av_audio_fifo_size(fifo_audio);
int sizeVideo = av_fifo_size(fifo_video);
//缓存数据写完就结束循环
if (av_audio_fifo_size(fifo_audio) <= pFormatCtx_Out->streams[AudioIndex]->codec->frame_size &&
av_fifo_size(fifo_video) <= frame_size && !bCap)
{
break;
}
}
if(av_compare_ts(cur_pts_v, pFormatCtx_Out->streams[VideoIndex]->time_base,
cur_pts_a,pFormatCtx_Out->streams[AudioIndex]->time_base) <= 0)
{
if (av_fifo_size(fifo_video) < frame_size && !bCap)
{
cur_pts_v = 0x7fffffffffffffff;
}
if(av_fifo_size(fifo_video) >= size)
{
EnterCriticalSection(&VideoSection);
av_fifo_generic_read(fifo_video, picture_buf, size, NULL); //将数据从avfifobuffer馈送到用户提供的回调。
LeaveCriticalSection(&VideoSection);
avpicture_fill((AVPicture *)picture, picture_buf,
pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
pFormatCtx_Out->streams[VideoIndex]->codec->width,
pFormatCtx_Out->streams[VideoIndex]->codec->height); //根据指定的图像参数和提供的图像数据缓冲区设置图片字段。
//pts = n * ((1 / timbase)/ fps);
//picture->pts = VideoFrameIndex * ((pFormatCtx_Video->streams[0]->time_base.den / pFormatCtx_Video->streams[0]->time_base.num) / 24);
picture->pts = VideoFrameIndex * ((outVideoCodecCtx->time_base.den * 100000 / outVideoCodecCtx->time_base.num) / 180);
int got_picture = 0;
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
//从帧中获取输入的原始视频数据
int ret = avcodec_encode_video2(pFormatCtx_Out->streams[VideoIndex]->codec, &pkt, picture, &got_picture);
if(ret < 0)
{
continue;
}
if (got_picture==1)
{
pkt.stream_index = VideoIndex;
/*int count = 1;
pkt.pts = pkt.dts = count * ((pFormatCtx_Video->streams[0]->time_base.den / pFormatCtx_Video->streams[0]->time_base.num) / 15);
count++;*/
//x = pts * (timebase1.num / timebase1.den )* (timebase2.den / timebase2.num);
pkt.pts = av_rescale_q_rnd(pkt.pts, pFormatCtx_Video->streams[0]->time_base,
pFormatCtx_Out->streams[VideoIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, pFormatCtx_Video->streams[0]->time_base,
pFormatCtx_Out->streams[VideoIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
pkt.duration = ((pFormatCtx_Out->streams[0]->time_base.den / pFormatCtx_Out->streams[0]->time_base.num) / 60);
//pkt.duration = 1000/60;
//pkt.pts = pkt.dts = Count * (ofmt_ctx->streams[stream_index]->time_base.den) /ofmt_ctx->streams[stream_index]->time_base.num / 10;
//Count++;
cur_pts_v = pkt.pts;
ret = av_interleaved_write_frame(pFormatCtx_Out, &pkt);
//delete[] pkt.data;
av_free_packet(&pkt);
}
VideoFrameIndex++;
}
}
else
{
if (NULL == fifo_audio)
{
continue;//还未初始化fifo
}
if (av_audio_fifo_size(fifo_audio) < pFormatCtx_Out->streams[AudioIndex]->codec->frame_size && !bCap)
{
cur_pts_a = 0x7fffffffffffffff;
}
if(av_audio_fifo_size(fifo_audio) >=
(pFormatCtx_Out->streams[AudioIndex]->codec->frame_size > 0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size : 1024))
{
AVFrame *frame;
frame = av_frame_alloc();
frame->nb_samples = pFormatCtx_Out->streams[AudioIndex]->codec->frame_size>0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size: 1024;
frame->channel_layout = pFormatCtx_Out->streams[AudioIndex]->codec->channel_layout;
frame->format = pFormatCtx_Out->streams[AudioIndex]->codec->sample_fmt;
frame->sample_rate = pFormatCtx_Out->streams[AudioIndex]->codec->sample_rate;
av_frame_get_buffer(frame, 0);
EnterCriticalSection(&AudioSection);
av_audio_fifo_read(fifo_audio, (void **)frame->data,
(pFormatCtx_Out->streams[AudioIndex]->codec->frame_size > 0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size : 1024));
LeaveCriticalSection(&AudioSection);
AVPacket pkt_out;
av_init_packet(&pkt_out);
int got_picture = -1;
pkt_out.data = NULL;
pkt_out.size = 0;
frame->pts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
if (avcodec_encode_audio2(pFormatCtx_Out->streams[AudioIndex]->codec, &pkt_out, frame, &got_picture) < 0)
{
printf("can not decoder a frame");
}
av_frame_free(&frame);
if (got_picture)
{
pkt_out.stream_index = AudioIndex;
pkt_out.pts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
pkt_out.dts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
pkt_out.duration = pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
cur_pts_a = pkt_out.pts;
int ret = av_interleaved_write_frame(pFormatCtx_Out, &pkt_out);
av_free_packet(&pkt_out);
}
AudioFrameIndex++;
}
}
}
delete[] picture_buf;
av_fifo_free(fifo_video);
av_audio_fifo_free(fifo_audio);
av_write_trailer(pFormatCtx_Out);
avio_close(pFormatCtx_Out->pb);
avformat_free_context(pFormatCtx_Out);
if (pFormatCtx_Video != NULL)
{
avformat_close_input(&pFormatCtx_Video);
pFormatCtx_Video = NULL;
}
if (pFormatCtx_Audio != NULL)
{
avformat_close_input(&pFormatCtx_Audio);
pFormatCtx_Audio = NULL;
}
return 0;
}
DWORD WINAPI ScreenCapThreadProc( LPVOID lpParam )
{
AVPacket packet;
int got_picture;
AVFrame *pFrame;
pFrame=av_frame_alloc();
AVFrame *picture = av_frame_alloc();
int size = avpicture_get_size(pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
pFormatCtx_Out->streams[VideoIndex]->codec->width,
pFormatCtx_Out->streams[VideoIndex]->codec->height);
avpicture_fill((AVPicture *)picture, picture_buf,
pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
pFormatCtx_Out->streams[VideoIndex]->codec->width,
pFormatCtx_Out->streams[VideoIndex]->codec->height);
FILE *p = NULL;
p = fopen("proc_test.yuv", "wb+");
av_init_packet(&packet);
int height = pFormatCtx_Out->streams[VideoIndex]->codec->height;
int width = pFormatCtx_Out->streams[VideoIndex]->codec->width;
int y_size=height*width;
while(bCap)
{
packet.data = NULL;
packet.size = 0;
if (av_read_frame(pFormatCtx_Video, &packet) < 0)
{
continue;
}
if(packet.stream_index == 0)
{
if (avcodec_decode_video2(pCodecCtx_Video, pFrame, &got_picture, &packet) < 0)
{
printf("Decode Error.(解码错误)\n");
continue;
}
if (got_picture)
{
sws_scale(img_convert_ctx,
(const uint8_t* const*)pFrame->data,
pFrame->linesize,
0,
pFormatCtx_Out->streams[VideoIndex]->codec->height,
picture->data,
picture->linesize);
if (av_fifo_space(fifo_video) >= size)
{
EnterCriticalSection(&VideoSection);
av_fifo_generic_write(fifo_video, picture->data[0], y_size, NULL);
av_fifo_generic_write(fifo_video, picture->data[1], y_size/4, NULL);
av_fifo_generic_write(fifo_video, picture->data[2], y_size/4, NULL);
LeaveCriticalSection(&VideoSection);
}
}
}
av_free_packet(&packet);
}
av_frame_free(&pFrame);
av_frame_free(&picture);
return 0;
}
DWORD WINAPI AudioCapThreadProc( LPVOID lpParam )
{
AVPacket pkt;
AVFrame *frame;
frame = av_frame_alloc();
int gotframe;
while(bCap)
{
pkt.data = NULL;
pkt.size = 0;
if(av_read_frame(pFormatCtx_Audio,&pkt) < 0)
{
continue;
}
if (avcodec_decode_audio4(pFormatCtx_Audio->streams[0]->codec, frame, &gotframe, &pkt) < 0)
{
av_frame_free(&frame);
printf("can not decoder a frame");
break;
}
av_free_packet(&pkt);
if (!gotframe)
{
printf("没有获取到数据,继续下一次");
continue;
}
if (NULL == fifo_audio)
{
fifo_audio = av_audio_fifo_alloc(pFormatCtx_Audio->streams[0]->codec->sample_fmt,
pFormatCtx_Audio->streams[0]->codec->channels, 30 * frame->nb_samples);
}
int buf_space = av_audio_fifo_space(fifo_audio);
if (av_audio_fifo_space(fifo_audio) >= frame->nb_samples)
{
EnterCriticalSection(&AudioSection);
av_audio_fifo_write(fifo_audio, (void **)frame->data, frame->nb_samples);
LeaveCriticalSection(&AudioSection);
}
}
av_frame_free(&frame);
return 0;
}
Maybe there is another way to calculate PTS and DTS
I hope whatever the frame rate is,video playback speed is right.Not too fast or too slow.
Finally, I found out the reason for the problem.
The frame rate of video generated by video recording is determined by the recorded video stream.My computer's maximum frame rate for gdigrab is 10 frames,so if I set it more than 10 frames, the playback speed will be fast.And if i set it less than 10 frames, the playback speed will be slow.But i run code on my computer where I play games.Whatever i choose 10 frames or 15 frames,the palyback speed will be correct.
Still,i don't know the reason why my gdigrab's framerate is only 0-10 frames.There are a number of factors that affect the frame rate of video recording,such as CPU Graphics, card,display and Memory.
Here is the final code
capture screen and audio with FFMPEG
#include "stdafx.h"
#ifdef __cplusplus
extern "C"
{
#endif
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
#include "libavutil/audio_fifo.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libavutil/imgutils.h"
#include "libavutil/mathematics.h"
#include "libavutil/samplefmt.h"
#include "libavutil/time.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/file.h"
#include "libavutil/mem.h"
#include "libavutil/frame.h"
#include "libavfilter/avfilter.h"
#include "libswresample/swresample.h"
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avfilter.lib")
#pragma comment(lib, "avfilter.lib")
#pragma comment(lib, "postproc.lib")
#pragma comment(lib, "swresample.lib")
#pragma comment(lib, "swscale.lib")
#ifdef __cplusplus
};
#endif
AVFormatContext *pFormatCtx_Video = NULL, *pFormatCtx_Audio = NULL, *pFormatCtx_Out = NULL;
AVCodecContext *outVideoCodecCtx = NULL;
AVCodecContext *outAudioCodecCtx = NULL;
AVStream *pVideoStream = NULL, *pAudioStream = NULL;
AVCodec *outAVCodec;
AVCodec *outAudioCodec;
AVCodecContext *pCodecCtx_Video;
AVCodec *pCodec_Video;
AVFifoBuffer *fifo_video = NULL;
AVAudioFifo *fifo_audio = NULL;
int VideoIndex, AudioIndex;
int codec_id;
CRITICAL_SECTION AudioSection, VideoSection;
SwsContext *img_convert_ctx;
int frame_size = 0;
uint8_t *picture_buf = NULL;
bool bCap = true;
DWORD WINAPI ScreenCapThreadProc( LPVOID lpParam );
DWORD WINAPI AudioCapThreadProc( LPVOID lpParam );
static char *dup_wchar_to_utf8(wchar_t *w)
{
char *s = NULL;
int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
s = (char *) av_malloc(l);
if (s)
WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
return s;
}
int OpenVideoCapture()
{
int fps = 10;
char opt;
printf("选择视频播放帧率:\n");
printf("A 5帧\n");
printf("B 10帧\n");
printf("C 15帧\n");
printf("D 20帧\n");
printf("E 25帧\n");
printf("F 30帧\n");
opt = getchar();
AVDictionary *options = NULL;
switch (opt)
{
case 'A':
fps = 5;
av_dict_set(&options, "framerate", "5", 0);
break;
case 'a':
fps = 5;
av_dict_set(&options, "framerate", "5", 0);
break;
case 'B':
fps = 10;
av_dict_set(&options, "framerate", "10", 0);
break;
case 'b':
fps = 10;
av_dict_set(&options, "framerate", "10", 0);
break;
case 'C':
fps = 15;
av_dict_set(&options, "framerate", "15", 0);
break;
case 'c':
fps = 15;
av_dict_set(&options, "framerate", "15", 0);
break;
case 'D':
fps = 20;
av_dict_set(&options, "framerate", "20", 0);
break;
case 'd':
fps = 20;
av_dict_set(&options, "framerate", "20", 0);
break;
case 'E':
fps = 25;
av_dict_set(&options, "framerate", "25", 0);
break;
case 'e':
fps = 25;
av_dict_set(&options, "framerate", "25", 0);
break;
case 'F':
fps = 30;
av_dict_set(&options, "framerate", "30", 0);
break;
case 'f':
fps = 30;
av_dict_set(&options, "framerate", "30", 0);
break;
default:
printf("选项输入错误\n");
return -1;
}
//AVDictionary *options = NULL;
//av_dict_set(&options, "framerate", "15", 0);
AVInputFormat *ifmt=av_find_input_format("gdigrab");
if(avformat_open_input(&pFormatCtx_Video, "desktop", ifmt, &options) != 0)
{
printf("Couldn't open input stream.(无法打开视频输入流)\n");
return -1;
}
if(avformat_find_stream_info(pFormatCtx_Video,NULL)<0)
{
printf("Couldn't find stream information.(无法获取视频流信息)\n");
return -1;
}
if (pFormatCtx_Video->streams[0]->codec->codec_type != AVMEDIA_TYPE_VIDEO)
{
printf("Couldn't find video stream information.(无法获取视频流信息)\n");
return -1;
}
pCodecCtx_Video = pFormatCtx_Video->streams[0]->codec;
pCodec_Video = avcodec_find_decoder(pCodecCtx_Video->codec_id);
if(pCodec_Video == NULL)
{
printf("Codec not found.(没有找到解码器)\n");
return -1;
}
if(avcodec_open2(pCodecCtx_Video, pCodec_Video, NULL) < 0)
{
printf("Could not open codec.(无法打开解码器)\n");
return -1;
}
av_dump_format(pFormatCtx_Video, 0, NULL, 0);
img_convert_ctx = sws_getContext(pCodecCtx_Video->width,
pCodecCtx_Video->height,
pCodecCtx_Video->pix_fmt,
pCodecCtx_Video->width,
pCodecCtx_Video->height,
PIX_FMT_YUV420P,
SWS_BICUBIC, NULL, NULL, NULL);
frame_size = avpicture_get_size(pCodecCtx_Video->pix_fmt, pCodecCtx_Video->width, pCodecCtx_Video->height);
fifo_video = av_fifo_alloc(60 * avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx_Video->width, pCodecCtx_Video->height));
av_dict_free(&options);
return 0;
}
int OpenAudioCapture()
{
AVInputFormat *pAudioInputFmt = av_find_input_format("dshow");
AVDictionary *opt = NULL;
char * psDevName = dup_wchar_to_utf8(L"audio=virtual-audio-capturer");
if (avformat_open_input(&pFormatCtx_Audio, psDevName, pAudioInputFmt, &opt) < 0)
{
printf("Couldn't open input stream.(无法打开音频输入流)\n");
return -1;
}
if(avformat_find_stream_info(pFormatCtx_Audio,NULL)<0)
return -1;
if(pFormatCtx_Audio->streams[0]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
{
printf("Couldn't find video stream information.(无法获取音频流信息)\n");
return -1;
}
AVCodec *tmpCodec = avcodec_find_decoder(pFormatCtx_Audio->streams[0]->codec->codec_id);
if(0 > avcodec_open2(pFormatCtx_Audio->streams[0]->codec, tmpCodec, NULL))
{
printf("can not find or open audio decoder!\n");
}
av_dump_format(pFormatCtx_Audio, 0, NULL, 0);
return 0;
}
int OpenOutPut()
{
AVStream *pVideoStream = NULL, *pAudioStream = NULL;
const char *outFileName = "test.mp4";
avformat_alloc_output_context2(&pFormatCtx_Out, NULL, NULL, outFileName);
if (pFormatCtx_Video->streams[0]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
VideoIndex = 0;
pVideoStream = avformat_new_stream(pFormatCtx_Out, NULL);
if (!pVideoStream)
{
printf("can not new stream for output!\n");
return -1;
}
outVideoCodecCtx = avcodec_alloc_context3(outAVCodec);
if ( !outVideoCodecCtx )
{
printf("Error : avcodec_alloc_context3()\n");
return -1;
}
outVideoCodecCtx = pVideoStream->codec;
outVideoCodecCtx->codec_id = AV_CODEC_ID_MPEG4;
outVideoCodecCtx->width = pFormatCtx_Video->streams[0]->codec->width;
outVideoCodecCtx->height = pFormatCtx_Video->streams[0]->codec->height;
outVideoCodecCtx->time_base = pFormatCtx_Video->streams[0]->codec->time_base;;
outVideoCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
outVideoCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
if (codec_id == AV_CODEC_ID_H264)
{
av_opt_set(outVideoCodecCtx->priv_data, "preset", "slow", 0);
}
outAVCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
if( !outAVCodec )
{
printf("\n\nError : avcodec_find_encoder()");
return -1;
}
if (pFormatCtx_Out->oformat->flags & AVFMT_GLOBALHEADER)
outVideoCodecCtx->flags |=CODEC_FLAG_GLOBAL_HEADER;
if ((avcodec_open2(outVideoCodecCtx,outAVCodec, NULL)) < 0)
{
printf("can not open the encoder\n");
return -1;
}
}
if(pFormatCtx_Audio->streams[0]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
{
AVCodecContext *pOutputCodecCtx;
AudioIndex = 1;
pAudioStream = avformat_new_stream(pFormatCtx_Out, NULL);
pAudioStream->codec->codec = avcodec_find_encoder(pFormatCtx_Out->oformat->audio_codec);
pOutputCodecCtx = pAudioStream->codec;
pOutputCodecCtx->sample_rate = pFormatCtx_Audio->streams[0]->codec->sample_rate;
pOutputCodecCtx->channel_layout = pFormatCtx_Out->streams[0]->codec->channel_layout;
pOutputCodecCtx->channels = av_get_channel_layout_nb_channels(pAudioStream->codec->channel_layout);
if(pOutputCodecCtx->channel_layout == 0)
{
pOutputCodecCtx->channel_layout = AV_CH_LAYOUT_STEREO;
pOutputCodecCtx->channels = av_get_channel_layout_nb_channels(pOutputCodecCtx->channel_layout);
}
pOutputCodecCtx->sample_fmt = pAudioStream->codec->codec->sample_fmts[0];
AVRational time_base={1, pAudioStream->codec->sample_rate};
pAudioStream->time_base = time_base;
pOutputCodecCtx->codec_tag = 0;
if (pFormatCtx_Out->oformat->flags & AVFMT_GLOBALHEADER)
pOutputCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;
if (avcodec_open2(pOutputCodecCtx, pOutputCodecCtx->codec, 0) < 0)
{
printf("编码器打开失败,退出程序\n");
return -1;
}
}
if (!(pFormatCtx_Out->oformat->flags & AVFMT_NOFILE))
{
if(avio_open(&pFormatCtx_Out->pb, outFileName, AVIO_FLAG_WRITE) < 0)
{
printf("can not open output file handle!\n");
return -1;
}
}
if(avformat_write_header(pFormatCtx_Out, NULL) < 0)
{
printf("can not write the header of the output file!\n");
return -1;
}
return 0;
}
int _tmain(int argc, _TCHAR* argv[])
{
av_register_all();
avdevice_register_all();
if (OpenVideoCapture() < 0)
{
return -1;
}
if (OpenAudioCapture() < 0)
{
return -1;
}
if (OpenOutPut() < 0)
{
return -1;
}
InitializeCriticalSection(&VideoSection);
InitializeCriticalSection(&AudioSection);
AVFrame *picture = av_frame_alloc();
int size = avpicture_get_size(pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
pFormatCtx_Out->streams[VideoIndex]->codec->width, pFormatCtx_Out->streams[VideoIndex]->codec->height);
picture_buf = new uint8_t[size];
avpicture_fill((AVPicture *)picture, picture_buf,
pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
pFormatCtx_Out->streams[VideoIndex]->codec->width,
pFormatCtx_Out->streams[VideoIndex]->codec->height);
//star cap screen thread
CreateThread( NULL, 0, ScreenCapThreadProc, 0, 0, NULL);
//star cap audio thread
CreateThread( NULL, 0, AudioCapThreadProc, 0, 0, NULL);
int64_t cur_pts_v=0,cur_pts_a=0;
int64_t VideoFrameIndex = 0, AudioFrameIndex = 0;
int64_t count = 1;
int64_t video_pre_pts = 0;
while(1)
{
if (_kbhit() != 0 && bCap)
{
bCap = false;
Sleep(2000);
}
if (fifo_audio && fifo_video)
{
int sizeAudio = av_audio_fifo_size(fifo_audio);
int sizeVideo = av_fifo_size(fifo_video);
//缓存数据写完就结束循环
if (av_audio_fifo_size(fifo_audio) <= pFormatCtx_Out->streams[AudioIndex]->codec->frame_size &&
av_fifo_size(fifo_video) <= frame_size && !bCap)
{
break;
}
}
if(av_compare_ts(cur_pts_v, pFormatCtx_Out->streams[VideoIndex]->time_base, cur_pts_a,pFormatCtx_Out->streams[AudioIndex]->time_base) <= 0)
{
if (av_fifo_size(fifo_video) < frame_size && !bCap)
{
cur_pts_v = 0x7fffffffffffffff;
}
if(av_fifo_size(fifo_video) >= size)
{
//将数据从avfifobuffer馈送到用户提供的回调。
EnterCriticalSection(&VideoSection);
av_fifo_generic_read(fifo_video, picture_buf, size, NULL);
LeaveCriticalSection(&VideoSection);
//根据指定的图像参数和提供的图像数据缓冲区设置图片字段。
avpicture_fill((AVPicture *)picture, picture_buf,
pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
pFormatCtx_Out->streams[VideoIndex]->codec->width,
pFormatCtx_Out->streams[VideoIndex]->codec->height);
//pts = n * ((1 / timbase)/ fps);
//picture->pts = VideoFrameIndex * ((pFormatCtx_Video->streams[0]->time_base.den / pFormatCtx_Video->streams[0]->time_base.num) / 15);
picture->pts = av_rescale_q(VideoFrameIndex,outVideoCodecCtx->time_base,pFormatCtx_Video->streams[0]->time_base);
printf("picture->pts: %d\n",picture->pts);
int got_picture = 0;
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
//从帧中获取输入的原始视频数据
int ret = avcodec_encode_video2(pFormatCtx_Out->streams[VideoIndex]->codec, &pkt, picture, &got_picture);
if(ret < 0)
{
continue;
}
if (got_picture==1)
{
pkt.stream_index = VideoIndex;
//pFormatCtx_Video //pFormatCtx_Out
pkt.pts = av_rescale_q_rnd(pkt.pts, pFormatCtx_Video->streams[0]->time_base, pFormatCtx_Out->streams[VideoIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
printf("pkt.pts = %d\n",pkt.pts);
pkt.dts = av_rescale_q_rnd(pkt.dts, pFormatCtx_Video->streams[0]->time_base, pFormatCtx_Out->streams[VideoIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
printf("pkt.dts = %d\n",pkt.dts);
pkt.duration = ((pFormatCtx_Out->streams[0]->time_base.den / pFormatCtx_Out->streams[0]->time_base.num) / outVideoCodecCtx->time_base.den);
//pkt.duration = 1;
//pkt.duration = av_rescale_q(pkt.duration,outVideoCodecCtx->time_base,pFormatCtx_Video->streams[0]->time_base);
printf("pkt.duration = %d\n",pkt.duration);
pkt.pos = -1;
cur_pts_v = pkt.pts;
ret = av_interleaved_write_frame(pFormatCtx_Out, &pkt);
if (ret<0)
{
printf("Error muxing packet\n");
break;
}
av_free_packet(&pkt);
}
VideoFrameIndex++;
}
}
else
{
if (NULL == fifo_audio)
{
continue;//还未初始化fifo
}
if (av_audio_fifo_size(fifo_audio) < pFormatCtx_Out->streams[AudioIndex]->codec->frame_size && !bCap)
{
cur_pts_a = 0x7fffffffffffffff;
}
if(av_audio_fifo_size(fifo_audio) >=
(pFormatCtx_Out->streams[AudioIndex]->codec->frame_size > 0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size : 1024))
{
AVFrame *frame;
frame = av_frame_alloc();
frame->nb_samples = pFormatCtx_Out->streams[AudioIndex]->codec->frame_size>0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size: 1024;
frame->channel_layout = pFormatCtx_Out->streams[AudioIndex]->codec->channel_layout;
frame->format = pFormatCtx_Out->streams[AudioIndex]->codec->sample_fmt;
frame->sample_rate = pFormatCtx_Out->streams[AudioIndex]->codec->sample_rate;
av_frame_get_buffer(frame, 0);
EnterCriticalSection(&AudioSection);
av_audio_fifo_read(fifo_audio, (void **)frame->data,
(pFormatCtx_Out->streams[AudioIndex]->codec->frame_size > 0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size : 1024));
LeaveCriticalSection(&AudioSection);
AVPacket pkt_out;
av_init_packet(&pkt_out);
int got_picture = -1;
pkt_out.data = NULL;
pkt_out.size = 0;
frame->pts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
if (avcodec_encode_audio2(pFormatCtx_Out->streams[AudioIndex]->codec, &pkt_out, frame, &got_picture) < 0)
{
printf("can not decoder a frame");
}
av_frame_free(&frame);
if (got_picture)
{
pkt_out.stream_index = AudioIndex;
pkt_out.pts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
pkt_out.dts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
pkt_out.duration = pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
cur_pts_a = pkt_out.pts;
int ret = av_interleaved_write_frame(pFormatCtx_Out, &pkt_out);
if (ret<0)
{
printf("Error muxing packet\n");
break;
}
av_free_packet(&pkt_out);
}
AudioFrameIndex++;
}
}
}
delete[] picture_buf;
av_fifo_free(fifo_video);
av_audio_fifo_free(fifo_audio);
av_write_trailer(pFormatCtx_Out);
avio_close(pFormatCtx_Out->pb);
avformat_free_context(pFormatCtx_Out);
if (pFormatCtx_Video != NULL)
{
avformat_close_input(&pFormatCtx_Video);
pFormatCtx_Video = NULL;
}
if (pFormatCtx_Audio != NULL)
{
avformat_close_input(&pFormatCtx_Audio);
pFormatCtx_Audio = NULL;
}
return 0;
}
DWORD WINAPI ScreenCapThreadProc( LPVOID lpParam )
{
AVPacket packet;
int got_picture;
AVFrame *pFrame;
pFrame=av_frame_alloc();
AVFrame *picture = av_frame_alloc();
int size = avpicture_get_size(pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
pFormatCtx_Out->streams[VideoIndex]->codec->width,
pFormatCtx_Out->streams[VideoIndex]->codec->height);
avpicture_fill((AVPicture *)picture, picture_buf,
pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
pFormatCtx_Out->streams[VideoIndex]->codec->width,
pFormatCtx_Out->streams[VideoIndex]->codec->height);
av_init_packet(&packet);
int height = pFormatCtx_Out->streams[VideoIndex]->codec->height;
int width = pFormatCtx_Out->streams[VideoIndex]->codec->width;
int y_size=height*width;
while(bCap)
{
packet.data = NULL;
packet.size = 0;
if (av_read_frame(pFormatCtx_Video, &packet) < 0)
{
continue;
}
if(packet.stream_index == 0)
{
if (avcodec_decode_video2(pCodecCtx_Video, pFrame, &got_picture, &packet) < 0)
{
printf("Decode Error.(解码错误)\n");
continue;
}
if (got_picture)
{
sws_scale(img_convert_ctx,
(const uint8_t* const*)pFrame->data,
pFrame->linesize,
0,
pFormatCtx_Out->streams[VideoIndex]->codec->height,
picture->data,
picture->linesize);
if (av_fifo_space(fifo_video) >= size)
{
EnterCriticalSection(&VideoSection);
av_fifo_generic_write(fifo_video, picture->data[0], y_size, NULL);
av_fifo_generic_write(fifo_video, picture->data[1], y_size/4, NULL);
av_fifo_generic_write(fifo_video, picture->data[2], y_size/4, NULL);
LeaveCriticalSection(&VideoSection);
}
}
}
av_free_packet(&packet);
}
av_frame_free(&pFrame);
av_frame_free(&picture);
return 0;
}
DWORD WINAPI AudioCapThreadProc( LPVOID lpParam )
{
AVPacket pkt;
AVFrame *frame;
frame = av_frame_alloc();
int gotframe;
while(bCap)
{
pkt.data = NULL;
pkt.size = 0;
if(av_read_frame(pFormatCtx_Audio,&pkt) < 0)
{
continue;
}
if (avcodec_decode_audio4(pFormatCtx_Audio->streams[0]->codec, frame, &gotframe, &pkt) < 0)
{
av_frame_free(&frame);
printf("can not decoder a frame");
break;
}
av_free_packet(&pkt);
if (!gotframe)
{
printf("没有获取到数据,继续下一次");
continue;
}
if (NULL == fifo_audio)
{
fifo_audio = av_audio_fifo_alloc(pFormatCtx_Audio->streams[0]->codec->sample_fmt,
pFormatCtx_Audio->streams[0]->codec->channels, 30 * frame->nb_samples);
}
int buf_space = av_audio_fifo_space(fifo_audio);
if (av_audio_fifo_space(fifo_audio) >= frame->nb_samples)
{
EnterCriticalSection(&AudioSection);
av_audio_fifo_write(fifo_audio, (void **)frame->data, frame->nb_samples);
LeaveCriticalSection(&AudioSection);
}
}
av_frame_free(&frame);
return 0;
}

increasing the Framerate to this Processing Script

I'm currently working on this script for a project I'm doing, and I'm not happy with the current frame rate. It's bouncing around FPS, and I need at least 30 so I can further use it in a video installation without lag.
Since my math is terrible, and I didn't quite understand how the original other did the math, I can't figure out where the redundant code is.
I've been working on that for a whole day with no results.
Holler if you need the original .aif or the svg.
I think you can substitute with any kind of audio track, because as much as I can gather, these calculation are run separately.
import ddf.minim.*;
import ddf.minim.analysis.*;
import ddf.minim.effects.*;
import ddf.minim.signals.*;
import ddf.minim.spi.*;
import ddf.minim.ugens.*;
Minim minim;
AudioPlayer data1;
AudioPlayer data2;
AudioPlayer data3;
AudioPlayer data4;
/* OpenProcessing Tweak of *#*http://www.openprocessing.org/sketch/6598*#* */
/* !do not delete the line above, required for linking your tweak if you upload again */
float grid[][][];
PGraphics boundary;
int page = 0;
int sides = 3;
float w = PI/10;
PShape world;
public void setup() {
size(1920, 1080, FX2D);
//fullScreen(FX2D, 0);
frameRate(30);
grid = new float[2][width-1][height-1];
boundary = createGraphics(width, height);
world = loadShape("world.svg");
makeBoundary(sides);
loadPixels();
minim = new Minim (this);
data1 = minim.loadFile ("indo.aif");
data1.loop();
data1.play();
data2 = minim.loadFile ("chile.aif");
data2.loop();
data2.play();
data3 = minim.loadFile ("fuku.aif");
data3.loop();
data3.play();
data4 = minim.loadFile ("samoa.aif");
data4.loop();
data4.play();
}
void makeBoundary(int sides) {
for (int i=0; i<width-1; i++) {
for (int j=0; j<height-1; j++) {
grid[page][i][j] = 0;
grid[page^1][i][j] = 0;
}
}
boundary.beginDraw();
boundary.background(0);
boundary.fill(255);
boundary.shape(world, 0, 0, 1860, 1020);
boundary.endDraw();
}
public void draw() {
println(frameRate);
float thresh = map(mouseX, 0, width, 0, 255);
oscillator(data1, 380, 700, 32); // Indonesian Wellenquelle
oscillator(data2, 1470, 780, 32);
oscillator(data3, 570, 400, 32);
oscillator(data4, 700, 750, 32);
for (int i=1; i<width-2; i++) {
for (int j=1; j<height-2; j++) {
int wa = boundary.pixels[i+width*j];
float dx = (wa == 0xffffffff) ? 0: (grid[page][i-1][j] + grid[page][i+1][j]);
float dy = (wa == 0xffffffff) ? 0: (grid[page][i][j-1] + grid[page][i][j+1]);
float value = (dx+dy )/2 - grid[page^1][i][j] ;
grid[page^1][i][j] = value * 0.992;
int val = (int) (abs(value) * 512);
val = val > 255 ? 255 : val;
pixels[i+j*width] = 0xFF000000 | val << 16 | val << 8 | val;
}
}
page ^= 1;
updatePixels();
fill(0);
ellipse(380, 700, 64, 64);
ellipse(1470, 780, 64, 64);
ellipse( 570, 400, 64, 64);
ellipse(700, 750, 64, 64);
}
public void oscillator(AudioPlayer data, int x, int y, int r) {
//grid[page][cx][cy] = sou.mix.level() * 60; // Zahl ändert Anzahl von Kreisen/Wellen
//w += PI/16;
int r2 = r * r;
int area = r2 << 2;
int rr = r << 1;
for (int i = 0; i < area; i++)
{
int tx = (i % rr) - r;
int ty = (i / rr) - r;
if (tx * tx + ty * ty <= r2)
grid[page][x + tx][y + ty] = data.mix.level() * 30;
//SetPixel(x + tx, y + ty, c);
}
}
public void mousePressed() {
makeBoundary(++sides);
}
public void keyPressed() {
if (key == ' ') {
sides = 3;
makeBoundary(sides);
}
//saveFrame();
}

Problems while using threading with arguments

I have been trying to get this working for a while and I am not very skilled at C++/CLI. I am trying to loop through a 2d array and when it contains certain number create a thread to run but I keep getting errors while compiling.
Here is the thread creation:
if (map[x][y] == 8)
{
Pos^ p = gcnew Pos(x, y, map);
Thread^ t = gcnew Thread(gcnew ParameterizedThreadStart(p, &Pos::moverX));
t->Start(p);
}
else if (map[x][y] == 9)
{
Pos^ p = gcnew Pos(x, y, map);
Thread^ t = gcnew Thread(gcnew ParameterizedThreadStart(p, &Pos::moverY));
t->Start(p);
}
Here is the Pos class:
public ref class Pos
{
public:
static int px, py;
static int ** mapa;
Pos(int x, int y, int ** map)
{
px = x;
py = y;
mapa = map;
}
int getX(){ return px; }
int getY(){ return py; }
int** getMap(){ return mapa; }
static void moverX(Pos p)
{
int dy = 1;
while (true)
{
if (mapa[p.getX()+dy][p.getY()] == 1){ dy *= -1; }
Console::BackgroundColor = ConsoleColor::Black;
Console::SetCursorPosition(p.getY() + 30, p.getX() + 5);
cout << " ";
Console::SetCursorPosition(p.getY() + 30, p.getX() + 5+dy);
cout << (char)164;
Thread::Sleep(1000);
}
}
static void moverY(Pos p)
{
int dy = 1;
while (true)
{
if (mapa[p.getX()][p.getY() + dy] == 1){ dy *= -1; }
Console::BackgroundColor = ConsoleColor::Black;
Console::SetCursorPosition(p.getY() + 30, p.getX() + 5);
cout << " ";
Console::SetCursorPosition(p.getY() + 30 + dy, p.getX() + 5);
cout << (char)164;
Thread::Sleep(1000);
}
}
};

Performance difference between DrawLine and DrawLines?

Im using GDI+ in C++ to draw a chart control. I want to know if there is any performance difference between the above 2 functions. I am not lazy to write code for DrawLines() but it is that doing so makes my code complex. So im weighin the chances of whether to make code execution faster at the expense of reducing readability and potentially increasing errors and bugs.
Any help wud be appreciated.
Eraj.
There shouldn't be a significant difference between the two for most drawing activities, but to be sure, I wrote up a test project to compare the difference between them (well, actually 3 of them).
For a very large number of lines (x25000) on my machine, DrawLines() (640ms) was about 50% faster over DrawLine() (420ms). To be honest here, I also misread the question the first time around and wrote my initial test in C#. Performance was about the same between the two, which is to be expected as .NET Graphics are based upon GDI+.
Just out of curiosity, I tried regular GDI, which I expect would be faster. Using the win32 PolyLine() (530ms) function was about 20% faster, with 45000 lines. This is 116% faster than using GDI+ DrawLines(). Even more stunning, perhaps, is that using win32 LineTo() instead of GDI+ DrawLine() results in times under 125ms. With an assumed time of 125ms and 45000 lines, this method is at least 800% faster. (Timer resolution and thread timing make it difficult to measure performance in this threshold without resorting to QueryPerformanceCounter and other timing methods of higher frequency.)
However, I should caution against making the assumption that this is a significant bottleneck in drawing code. Many of the performance improvements that can be made will have nothing to do with what objects have to be drawn. I would guess that your requirements will probably dictate that a few hundred items may need to be drawn in normal operation for your control. In that case, I would recommend you write your drawing code to be as straightforward and bug-free as you can, as debugging drawing issues can be an expensive use of time and potentially less beneficial as improving the rest of your control or your application.
Also, if you need to actively update thousands of items, you will see much higher performance gains by moving to a back-buffered solution. This should also make it easier to develop code to draw your control, aside from managing the off-screen buffer.
Here are my source code examples. Each of them handles mouse clicks to alternate between using bulk drawing versus itemized drawing.
GDI+, hosted in a barebones MFC SDI App
This assumes that someone has already declared GDI+ headers and written code to initialize/teardown GDI+.
In ChildView.h
// Attributes
public:
bool m_bCompositeMode;
// Operations
public:
void RedrawScene(Graphics &g, int lineCount, int width, int height);
PointF *CreatePoints(int lineCount, int width, int height);
void ReportTime(Graphics &g, int lineCount, DWORD tickSpan);
public:
afx_msg void OnLButtonUp(UINT nFlags, CPoint point);
In ChildView.cpp, added to PreCreateWindow()
m_bCompositeMode = false;
Remainder of ChildView.cpp, including OnPaint() and Message Map changes.
BEGIN_MESSAGE_MAP(CChildView, CWnd)
ON_WM_PAINT()
ON_WM_LBUTTONUP()
END_MESSAGE_MAP()
void CChildView::OnPaint()
{
CPaintDC dc(this); // device context for painting
RECT rcClient;
::GetClientRect(this->GetSafeHwnd(), &rcClient);
Graphics g(dc.GetSafeHdc());
g.Clear(Color(0, 0, 0));
RedrawScene(g, 25000, rcClient.right - rcClient.left, rcClient.bottom - rcClient.top);
}
void CChildView::RedrawScene(Graphics &g, int lineCount, int width, int height)
{
DWORD tickStart = 0;
DWORD tickEnd = 0;
Pen p(Color(0, 0, 0x7F));
PointF *pts = CreatePoints(lineCount, width, height);
tickStart = GetTickCount();
if (m_bCompositeMode)
{
g.DrawLines(&p, pts, lineCount);
}
else
{
int i = 0;
int imax = lineCount - 1;
for (i = 0; i < imax; i++)
{
g.DrawLine(&p, pts[i], pts[i + 1]);
}
}
tickEnd = GetTickCount();
delete[] pts;
ReportTime(g, lineCount, tickEnd - tickStart);
}
void CChildView::ReportTime(Graphics &g, int lineCount, DWORD tickSpan)
{
CString strDisp;
if(m_bCompositeMode)
{
strDisp.Format(_T("Graphics::DrawLines(Pen *, PointF *, INT) x%d took %dms"), lineCount, tickSpan);
}
else
{
strDisp.Format(_T("Graphics::DrawLine(Pen *, PointF, PointF) x%d took %dms"), lineCount, tickSpan);
}
// Note: sloppy, but simple.
Font font(L"Arial", 14.0f);
PointF ptOrigin(0.0f, 0.0f);
SolidBrush br(Color(255, 255, 255));
Status s = g.DrawString(strDisp, -1, &font, ptOrigin, &br);
}
PointF* CChildView::CreatePoints(int lineCount, int width, int height)
{
if(lineCount <= 0)
{
PointF *ptEmpty = new PointF[2];
ptEmpty[0].X = 0;
ptEmpty[0].Y = 0;
ptEmpty[1].X = 0;
ptEmpty[1].Y = 0;
return ptEmpty;
}
PointF *pts = new PointF[lineCount + 1];
int i = 1;
while(i < lineCount)
{
pts[i].X = (float)(rand() % width);
pts[i].Y = (float)(rand() % height);
i++;
}
return pts;
}
void CChildView::OnLButtonUp(UINT nFlags, CPoint point)
{
m_bCompositeMode = !m_bCompositeMode;
this->Invalidate();
CWnd::OnLButtonUp(nFlags, point);
}
C#.NET, hosted in a basebones WinForms App, with default class Form1
Set a default size for the form, equal to the size of the MFC version if you are comparing the two. A size-change handler could be added as well.
public Form1()
{
InitializeComponent();
bCompositeMode = false;
}
bool bCompositeMode;
private void Form1_Paint(object sender, PaintEventArgs e)
{
e.Graphics.Clear(Color.Black);
RedrawScene(e.Graphics, 25000, this.ClientRectangle.Width, this.ClientRectangle.Height);
}
private void RedrawScene(Graphics g, int lineCount, int width, int height)
{
DateTime dtStart = DateTime.MinValue;
DateTime dtEnd = DateTime.MinValue;
using (Pen p = new Pen(Color.Navy))
{
Point[] pts = CreatePoints(lineCount, width, height);
dtStart = DateTime.Now;
if (bCompositeMode)
{
g.DrawLines(p, pts);
}
else
{
int i = 0;
int imax = pts.Length - 1;
for (i = 0; i < imax; i++)
{
g.DrawLine(p, pts[i], pts[i + 1]);
}
}
dtEnd = DateTime.Now;
}
ReportTime(g, lineCount, dtEnd - dtStart);
}
private void ReportTime(Graphics g, int lineCount, TimeSpan ts)
{
string strDisp = null;
if (bCompositeMode)
{
strDisp = string.Format("DrawLines(Pen, Point[]) x{0} took {1}ms", lineCount, ts.Milliseconds);
}
else
{
strDisp = string.Format("DrawLine(Pen, Point, Point) x{0} took {1}ms", lineCount, ts.Milliseconds);
}
// Note: sloppy, but simple.
using (Font font = new Font(FontFamily.GenericSansSerif, 14.0f, FontStyle.Regular))
{
g.DrawString(strDisp, font, Brushes.White, 0.0f, 0.0f);
}
}
private Point[] CreatePoints(int count, int width, int height)
{
Random rnd = new Random();
if (count <= 0) { return new Point[] { new Point(0,0), new Point(0,0)}; }
Point[] pts = new Point[count + 1];
pts[0] = new Point(0, 0);
int i = 1;
while (i <= count)
{
pts[i] = new Point(rnd.Next(width), rnd.Next(height));
i++;
}
return pts;
}
private void Form1_Click(object sender, EventArgs e)
{
bCompositeMode = !bCompositeMode;
Invalidate();
}
Regular GDI, hosted in a barebones MFC SDI App
In ChildView.h
// Attributes
public:
bool m_bCompositeMode;
// Operations
public:
void RedrawScene(HDC hdc, int lineCount, int width, int height);
POINT *CreatePoints(int lineCount, int width, int height);
void ReportTime(HDC hdc, int lineCount, DWORD tickSpan);
public:
afx_msg void OnLButtonUp(UINT nFlags, CPoint point);
In ChildView.cpp
Update PreCreateWindow() just as in the GDI+ sample.
BEGIN_MESSAGE_MAP(CChildView, CWnd)
ON_WM_PAINT()
ON_WM_LBUTTONUP()
END_MESSAGE_MAP()
void CChildView::OnPaint()
{
CPaintDC dc(this); // device context for painting
HDC hdc = dc.GetSafeHdc();
HBRUSH brClear = (HBRUSH)::GetStockObject(BLACK_BRUSH);
RECT rcClient;
::GetClientRect(this->m_hWnd, &rcClient);
::FillRect(hdc, &rcClient, brClear);
::DeleteObject(brClear);
RedrawScene(hdc, 45000, rcClient.right - rcClient.left, rcClient.bottom - rcClient.top);
}
void CChildView::RedrawScene(HDC hdc, int lineCount, int width, int height)
{
DWORD tickStart = 0;
DWORD tickEnd = 0;
HPEN p = ::CreatePen(PS_SOLID, 1, RGB(0, 0, 0x7F));
POINT *pts = CreatePoints(lineCount, width, height);
HGDIOBJ prevPen = SelectObject(hdc, p);
tickStart = GetTickCount();
if(m_bCompositeMode)
{
::Polyline(hdc, pts, lineCount);
}
else
{
::MoveToEx(hdc, pts[0].x, pts[0].y, &(pts[0]));
int i = 0;
int imax = lineCount;
for(i = 1; i < imax; i++)
{
::LineTo(hdc, pts[i].x, pts[i].y);
}
}
tickEnd = GetTickCount();
::SelectObject(hdc, prevPen);
delete pts;
::DeleteObject(p);
ReportTime(hdc, lineCount, tickEnd - tickStart);
}
POINT *CChildView::CreatePoints(int lineCount, int width, int height)
{
if(lineCount <= 0)
{
POINT *ptEmpty = new POINT[2];
memset(&ptEmpty, 0, sizeof(POINT) * 2);
return ptEmpty;
}
POINT *pts = new POINT[lineCount + 1];
int i = 1;
while(i < lineCount)
{
pts[i].x = rand() % width;
pts[i].y = rand() % height;
i++;
}
return pts;
}
void CChildView::ReportTime(HDC hdc, int lineCount, DWORD tickSpan)
{
CString strDisp;
if(m_bCompositeMode)
{
strDisp.Format(_T("PolyLine(HDC, POINT *, int) x%d took %dms"), lineCount, tickSpan);
}
else
{
strDisp.Format(_T("LineTo(HDC, HPEN, int, int) x%d took %dms"), lineCount, tickSpan);
}
HFONT font = (HFONT)::GetStockObject(SYSTEM_FONT);
HFONT fontPrev = (HFONT)::SelectObject(hdc, font);
RECT rcClient;
::GetClientRect(this->m_hWnd, &rcClient);
::ExtTextOut(hdc, 0, 0, ETO_CLIPPED, &rcClient, strDisp.GetString(), strDisp.GetLength(), NULL);
::SelectObject(hdc, fontPrev);
::DeleteObject(font);
}
void CChildView::OnLButtonUp(UINT nFlags, CPoint point)
{
m_bCompositeMode = !m_bCompositeMode;
this->Invalidate();
CWnd::OnLButtonUp(nFlags, point);
}

Resources