Media Foundation EVR no video displaying - visual-c++

I've been trying in vain to come up with a no frills example of displaying video using Microsoft's Media Foundation Enhanced Video Renderer (EVR). I'm testing on Windows 7 with Visual Studio 2013.
I'm pretty sure I've got the media types configured correctly as I can export and save the buffer from the IMFSample in my read loop to a bitmap. I can also get the video to render IF I get MF to automatically generate the topology but in this case I need to wire up the source reader and sink writer manually so I can get access to the different parts of the pipeline.
I have used mftrace to see if I can spot anything different between the automatically generated topology and the manually wired up example but nothing obvious jumps out.
The code is below (full sample project at https://github.com/sipsorcery/mediafoundationsamples/tree/master/MFVideoEVR).
Is there a step I've missed to get the IMFSample from the SinkWriter to display on the video window? I've been looking at a few examples that go deeper into the DirectX pipeline but should that be necessary or is the EVR meant to abstract those mechanics aways?
#include <stdio.h>
#include <tchar.h>
#include <evr.h>
#include <mfapi.h>
#include <mfplay.h>
#include <mfreadwrite.h>
#include <mferror.h>
#include "..\Common\MFUtility.h"
#include <windows.h>
#include <windowsx.h>
#pragma comment(lib, "mf.lib")
#pragma comment(lib, "evr.lib")
#pragma comment(lib, "mfplat.lib")
#pragma comment(lib, "mfplay.lib")
#pragma comment(lib, "mfreadwrite.lib")
#pragma comment(lib, "mfuuid.lib")
#pragma comment(lib, "Strmiids")
#pragma comment(lib, "wmcodecdspuuid.lib")
#define CHECK_HR(hr, msg) if (hr != S_OK) { printf(msg); printf("Error: %.2X.\n", hr); goto done; }
void InitializeWindow();
// Constants
const WCHAR CLASS_NAME[] = L"MFVideoEVR Window Class";
const WCHAR WINDOW_NAME[] = L"MFVideoEVR";
// Globals.
HWND _hwnd;
using namespace System::Threading::Tasks;
int main()
{
CoInitializeEx(NULL, COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE);
MFStartup(MF_VERSION);
IMFMediaSource *videoSource = NULL;
UINT32 videoDeviceCount = 0;
IMFAttributes *videoConfig = NULL;
IMFActivate **videoDevices = NULL;
IMFSourceReader *videoReader = NULL;
WCHAR *webcamFriendlyName;
IMFMediaType *videoSourceOutputType = NULL, *pvideoSourceModType = NULL, *pSrcOutMediaType = NULL;
IMFSourceResolver *pSourceResolver = NULL;
IUnknown* uSource = NULL;
IMFMediaSource *mediaFileSource = NULL;
IMFAttributes *pVideoReaderAttributes = NULL;
IMFMediaType *pVideoOutType = NULL;
MF_OBJECT_TYPE ObjectType = MF_OBJECT_INVALID;
IMFMediaSink *pVideoSink = NULL;
IMFStreamSink *pStreamSink = NULL;
IMFMediaTypeHandler *pMediaTypeHandler = NULL;
IMFMediaType *pMediaType = NULL;
IMFMediaType *pSinkMediaType = NULL;
IMFSinkWriter *pSinkWriter = NULL;
IMFVideoRenderer *pVideoRenderer = NULL;
IMFVideoPresenter *pVideoPresenter = nullptr;
IMFVideoDisplayControl *pVideoDisplayControl = nullptr;
IMFGetService *pService = nullptr;
IMFActivate* pActive = NULL;
MFVideoNormalizedRect nrcDest = { 0.5f, 0.5f, 1.0f, 1.0f };
IMFPresentationTimeSource *pSystemTimeSource = nullptr;
IMFMediaType *sinkPreferredType = nullptr;
IMFPresentationClock *pClock = NULL;
IMFPresentationTimeSource *pTimeSource = NULL;
CHECK_HR(MFTRegisterLocalByCLSID(
__uuidof(CColorConvertDMO),
MFT_CATEGORY_VIDEO_PROCESSOR,
L"",
MFT_ENUM_FLAG_SYNCMFT,
0,
NULL,
0,
NULL
), "Error registering colour converter DSP.\n");
Task::Factory->StartNew(gcnew Action(InitializeWindow));
Sleep(1000);
if (_hwnd == nullptr)
{
printf("Failed to initialise video window.\n");
goto done;
}
// Set up the reader for the file.
CHECK_HR(MFCreateSourceResolver(&pSourceResolver), "MFCreateSourceResolver failed.\n");
CHECK_HR(pSourceResolver->CreateObjectFromURL(
L"..\\..\\MediaFiles\\big_buck_bunny.mp4", // URL of the source.
MF_RESOLUTION_MEDIASOURCE, // Create a source object.
NULL, // Optional property store.
&ObjectType, // Receives the created object type.
&uSource // Receives a pointer to the media source.
), "Failed to create media source resolver for file.\n");
CHECK_HR(uSource->QueryInterface(IID_PPV_ARGS(&mediaFileSource)), "Failed to create media file source.\n");
CHECK_HR(MFCreateAttributes(&pVideoReaderAttributes, 2), "Failed to create attributes object for video reader.\n");
CHECK_HR(pVideoReaderAttributes->SetGUID(MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE, MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_GUID), "Failed to set dev source attribute type for reader config.\n");
CHECK_HR(pVideoReaderAttributes->SetUINT32(MF_SOURCE_READER_ENABLE_VIDEO_PROCESSING, 1), "Failed to set enable video processing attribute type for reader config.\n");
CHECK_HR(MFCreateSourceReaderFromMediaSource(mediaFileSource, pVideoReaderAttributes, &videoReader),
"Error creating media source reader.\n");
CHECK_HR(videoReader->GetCurrentMediaType((DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM, &videoSourceOutputType),
"Error retrieving current media type from first video stream.\n");
Console::WriteLine("Default output media type for source reader:");
Console::WriteLine(GetMediaTypeDescription(videoSourceOutputType));
Console::WriteLine();
// Set the video output type on the source reader.
CHECK_HR(MFCreateMediaType(&pvideoSourceModType), "Failed to create video output media type.\n");
CHECK_HR(pvideoSourceModType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video), "Failed to set video output media major type.\n");
CHECK_HR(pvideoSourceModType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32), "Failed to set video sub-type attribute on EVR input media type.\n");
CHECK_HR(pvideoSourceModType->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive), "Failed to set interlace mode attribute on EVR input media type.\n");
CHECK_HR(pvideoSourceModType->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE), "Failed to set independent samples attribute on EVR input media type.\n");
CHECK_HR(MFSetAttributeRatio(pvideoSourceModType, MF_MT_PIXEL_ASPECT_RATIO, 1, 1), "Failed to set pixel aspect ratio attribute on EVR input media type.\n");
CHECK_HR(CopyAttribute(videoSourceOutputType, pvideoSourceModType, MF_MT_FRAME_SIZE), "Failed to copy video frame size attribute from input file to output sink.\n");
CHECK_HR(CopyAttribute(videoSourceOutputType, pvideoSourceModType, MF_MT_FRAME_RATE), "Failed to copy video frame rate attribute from input file to output sink.\n");
CHECK_HR(videoReader->SetCurrentMediaType((DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM, NULL, pvideoSourceModType), "Failed to set media type on source reader.\n");
Console::WriteLine("Output media type set on source reader:");
Console::WriteLine(GetMediaTypeDescription(pvideoSourceModType));
Console::WriteLine();
// Create EVR sink .
//CHECK_HR(MFCreateVideoRenderer(__uuidof(IMFMediaSink), (void**)&pVideoSink), "Failed to create video sink.\n");
CHECK_HR(MFCreateVideoRendererActivate(_hwnd, &pActive), "Failed to created video rendered activation context.\n");
CHECK_HR(pActive->ActivateObject(IID_IMFMediaSink, (void**)&pVideoSink), "Failed to activate IMFMediaSink interface on video sink.\n");
// Initialize the renderer before doing anything else including querying for other interfaces (https://msdn.microsoft.com/en-us/library/windows/desktop/ms704667(v=vs.85).aspx).
CHECK_HR(pVideoSink->QueryInterface(__uuidof(IMFVideoRenderer), (void**)&pVideoRenderer), "Failed to get video Renderer interface from EVR media sink.\n");
CHECK_HR(pVideoRenderer->InitializeRenderer(NULL, NULL), "Failed to initialise the video renderer.\n");
CHECK_HR(pVideoSink->QueryInterface(__uuidof(IMFGetService), (void**)&pService), "Failed to get service interface from EVR media sink.\n");
CHECK_HR(pService->GetService(MR_VIDEO_RENDER_SERVICE, __uuidof(IMFVideoDisplayControl), (void**)&pVideoDisplayControl), "Failed to get video display control interface from service interface.\n");
CHECK_HR(pVideoSink->GetStreamSinkByIndex(0, &pStreamSink), "Failed to get video renderer stream by index.\n");
CHECK_HR(pStreamSink->GetMediaTypeHandler(&pMediaTypeHandler), "Failed to get media type handler.\n");
// Set the video output type on the source reader.
CHECK_HR(MFCreateMediaType(&pVideoOutType), "Failed to create video output media type.\n");
CHECK_HR(pVideoOutType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video), "Failed to set video output media major type.\n");
CHECK_HR(pVideoOutType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32), "Failed to set video sub-type attribute on EVR input media type.\n");
CHECK_HR(pVideoOutType->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive), "Failed to set interlace mode attribute on EVR input media type.\n");
CHECK_HR(pVideoOutType->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE), "Failed to set independent samples attribute on EVR input media type.\n");
CHECK_HR(MFSetAttributeRatio(pVideoOutType, MF_MT_PIXEL_ASPECT_RATIO, 1, 1), "Failed to set pixel aspect ratio attribute on EVR input media type.\n");
CHECK_HR(CopyAttribute(videoSourceOutputType, pVideoOutType, MF_MT_FRAME_SIZE), "Failed to copy video frame size attribute from input file to output sink.\n");
CHECK_HR(CopyAttribute(videoSourceOutputType, pVideoOutType, MF_MT_FRAME_RATE), "Failed to copy video frame rate attribute from input file to output sink.\n");
//CHECK_HR(pMediaTypeHandler->GetMediaTypeByIndex(0, &pSinkMediaType), "Failed to get sink media type.\n");
CHECK_HR(pMediaTypeHandler->SetCurrentMediaType(pVideoOutType), "Failed to set current media type.\n");
Console::WriteLine("Input media type set on EVR:");
Console::WriteLine(GetMediaTypeDescription(pVideoOutType));
Console::WriteLine();
CHECK_HR(MFCreatePresentationClock(&pClock), "Failed to create presentation clock.\n");
CHECK_HR(MFCreateSystemTimeSource(&pTimeSource), "Failed to create system time source.\n");
CHECK_HR(pClock->SetTimeSource(pTimeSource), "Failed to set time source.\n");
//CHECK_HR(pClock->Start(0), "Error starting presentation clock.\n");
CHECK_HR(pVideoSink->SetPresentationClock(pClock), "Failed to set presentation clock on video sink.\n");
Console::WriteLine("Press any key to start video sampling...");
Console::ReadLine();
IMFSample *videoSample = NULL;
DWORD streamIndex, flags;
LONGLONG llTimeStamp;
while (true)
{
CHECK_HR(videoReader->ReadSample(
MF_SOURCE_READER_FIRST_VIDEO_STREAM,
0, // Flags.
&streamIndex, // Receives the actual stream index.
&flags, // Receives status flags.
&llTimeStamp, // Receives the time stamp.
&videoSample // Receives the sample or NULL.
), "Error reading video sample.");
if (flags & MF_SOURCE_READERF_ENDOFSTREAM)
{
printf("End of stream.\n");
break;
}
if (flags & MF_SOURCE_READERF_STREAMTICK)
{
printf("Stream tick.\n");
}
if (!videoSample)
{
printf("Null video sample.\n");
}
else
{
printf("Attempting to write sample to stream sink.\n");
CHECK_HR(videoSample->SetSampleTime(llTimeStamp), "Error setting the video sample time.\n");
//CHECK_HR(videoSample->SetSampleDuration(41000000), "Error setting the video sample duration.\n");
CHECK_HR(pStreamSink->ProcessSample(videoSample), "Streamsink process sample failed.\n");
}
SafeRelease(&videoSample);
}
done:
printf("finished.\n");
getchar();
return 0;
}
LRESULT CALLBACK WindowProc(HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam)
{
return DefWindowProc(hwnd, uMsg, wParam, lParam);
}
void InitializeWindow()
{
WNDCLASS wc = { 0 };
wc.lpfnWndProc = WindowProc;
wc.hInstance = GetModuleHandle(NULL);
wc.hCursor = LoadCursor(NULL, IDC_ARROW);
wc.lpszClassName = CLASS_NAME;
if (RegisterClass(&wc))
{
_hwnd = CreateWindow(
CLASS_NAME,
WINDOW_NAME,
WS_OVERLAPPEDWINDOW,
CW_USEDEFAULT,
CW_USEDEFAULT,
640,
480,
NULL,
NULL,
GetModuleHandle(NULL),
NULL
);
if (_hwnd)
{
ShowWindow(_hwnd, SW_SHOWDEFAULT);
MSG msg = { 0 };
while (true)
{
if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE))
{
TranslateMessage(&msg);
DispatchMessage(&msg);
}
else
{
Sleep(1);
}
}
}
}
}

Yes it seems that EVR needs Video Samples
The video sample object is a specialized implementation of the IMFSample interface for use with the Enhanced Video Renderer (EVR).
But it seems not to be needed to manually provide Video Samples if the decoder supports DXVA IMFVideoSampleAllocator
The Media Session uses this interface to allocate samples for the EVR, unless the upstream decoder supports DirectX Video Acceleration (DXVA).
I think you should investigate under IMFVideoSampleAllocator, because your software acts like a mediasession.
You also need to call clock start, just before "while loop video processing".
You also need to release all interfaces, stop clock and call some shutdown method.
Like David Wohlferd said, you should handle Media Sink Events too, for better improvment of your program.
EDIT :
#include <stdio.h>
#include <tchar.h>
#include <evr.h>
#include <mfapi.h>
#include <mfplay.h>
#include <mfreadwrite.h>
#include <mferror.h>
#include "MFUtility.h"
#include <windows.h>
#include <windowsx.h>
#include <d3d9.h>
#include <mfobjects.h>
#include <Dxva2api.h>
#pragma comment(lib, "mf.lib")
#pragma comment(lib, "evr.lib")
#pragma comment(lib, "mfplat.lib")
#pragma comment(lib, "mfplay.lib")
#pragma comment(lib, "mfreadwrite.lib")
#pragma comment(lib, "mfuuid.lib")
#pragma comment(lib, "Strmiids")
#pragma comment(lib, "wmcodecdspuuid.lib")
#pragma comment(lib, "d3d9.lib")
#pragma comment(lib, "Dxva2.lib")
#define CHECK_HR(hr, msg) if (hr != S_OK) { printf(msg); printf("Error: %.2X.\n", hr); goto done; }
void InitializeWindow();
HRESULT CreateD3DSample(IDirect3DSwapChain9 *pSwapChain, IMFSample **ppVideoSample);
// Constants
const WCHAR CLASS_NAME[] = L"MFVideoEVR Window Class";
const WCHAR WINDOW_NAME[] = L"MFVideoEVR";
// Globals.
HWND _hwnd;
LPDIRECT3D9 _d3d; // the pointer to our Direct3D interface
LPDIRECT3DDEVICE9 _d3ddev; // the pointer to the device class
IDirect3DSwapChain9 * _pSwapChain;
IDirect3DTexture9 *_pd3dTexture;
#define VIDEO_WIDTH 320
#define VIDEO_HEIGHT 240
using namespace System::Threading::Tasks;
int main()
{
CoInitializeEx(NULL, COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE);
MFStartup(MF_VERSION);
IMFMediaSource *videoSource = NULL;
UINT32 videoDeviceCount = 0;
IMFAttributes *videoConfig = NULL;
IMFActivate **videoDevices = NULL;
IMFSourceReader *videoReader = NULL;
WCHAR *webcamFriendlyName;
IMFMediaType *videoSourceOutputType = NULL, *pvideoSourceModType = NULL, *pSrcOutMediaType = NULL;
IMFSourceResolver *pSourceResolver = NULL;
IUnknown* uSource = NULL;
IMFMediaSource *mediaFileSource = NULL;
IMFAttributes *pVideoReaderAttributes = NULL;
IMFMediaType *pVideoOutType = NULL;
MF_OBJECT_TYPE ObjectType = MF_OBJECT_INVALID;
IMFMediaSink *pVideoSink = NULL;
IMFStreamSink *pStreamSink = NULL;
IMFMediaTypeHandler *pMediaTypeHandler = NULL;
IMFMediaType *pMediaType = NULL;
IMFMediaType *pSinkMediaType = NULL;
IMFSinkWriter *pSinkWriter = NULL;
IMFVideoRenderer *pVideoRenderer = NULL;
IMFVideoPresenter *pVideoPresenter = nullptr;
IMFVideoDisplayControl *pVideoDisplayControl = nullptr;
IMFGetService *pService = nullptr;
IMFActivate* pActive = NULL;
MFVideoNormalizedRect nrcDest = { 0.5f, 0.5f, 1.0f, 1.0f };
IMFPresentationTimeSource *pSystemTimeSource = nullptr;
IMFMediaType *sinkPreferredType = nullptr;
IMFPresentationClock *pClock = NULL;
IMFPresentationTimeSource *pTimeSource = NULL;
IDirect3DDeviceManager9 * pD3DManager = NULL;
IMFVideoSampleAllocator* pEvrSampleAllocator = nullptr;
// Add
IMFVideoSampleAllocator* pVideoSampleAllocator = NULL;
IMFSample* pD3DVideoSample = NULL;
RECT rc = { 0, 0, VIDEO_WIDTH, VIDEO_HEIGHT };
CHECK_HR(MFTRegisterLocalByCLSID(
__uuidof(CColorConvertDMO),
MFT_CATEGORY_VIDEO_PROCESSOR,
L"",
MFT_ENUM_FLAG_SYNCMFT,
0,
NULL,
0,
NULL
), "Error registering colour converter DSP.\n");
Task::Factory->StartNew(gcnew Action(InitializeWindow));
Sleep(1000);
if (_hwnd == nullptr)
{
printf("Failed to initialise video window.\n");
goto done;
}
// Create EVR sink .
//CHECK_HR(MFCreateVideoRenderer(__uuidof(IMFMediaSink), (void**)&pVideoSink), "Failed to create video sink.\n");
CHECK_HR(MFCreateVideoRendererActivate(_hwnd, &pActive), "Failed to created video rendered activation context.\n");
CHECK_HR(pActive->ActivateObject(IID_IMFMediaSink, (void**)&pVideoSink), "Failed to activate IMFMediaSink interface on video sink.\n");
// Initialize the renderer before doing anything else including querying for other interfaces (https://msdn.microsoft.com/en-us/library/windows/desktop/ms704667(v=vs.85).aspx).
CHECK_HR(pVideoSink->QueryInterface(__uuidof(IMFVideoRenderer), (void**)&pVideoRenderer), "Failed to get video Renderer interface from EVR media sink.\n");
CHECK_HR(pVideoRenderer->InitializeRenderer(NULL, NULL), "Failed to initialise the video renderer.\n");
CHECK_HR(pVideoSink->QueryInterface(__uuidof(IMFGetService), (void**)&pService), "Failed to get service interface from EVR media sink.\n");
CHECK_HR(pService->GetService(MR_VIDEO_RENDER_SERVICE, __uuidof(IMFVideoDisplayControl), (void**)&pVideoDisplayControl), "Failed to get video display control interface from service interface.\n");
CHECK_HR(pVideoDisplayControl->SetVideoWindow(_hwnd), "Failed to SetVideoWindow.\n");
CHECK_HR(pVideoDisplayControl->SetVideoPosition(NULL, &rc), "Failed to SetVideoPosition.\n");
CHECK_HR(MFGetService(pVideoSink, MR_VIDEO_ACCELERATION_SERVICE, IID_PPV_ARGS(&pD3DManager)), "Failed to get Direct3D manager from EVR media sink.\n");
//CHECK_HR(MFGetService(pVideoSink, MR_VIDEO_ACCELERATION_SERVICE, IID_PPV_ARGS(&pEvrSampleAllocator)), "Failed to get sample allocator from EVR media sink.\n");
//CHECK_HR(pService->GetService(MR_VIDEO_ACCELERATION_SERVICE, __uuidof(IMFVideoSampleAllocator), (void**)pEvrSampleAllocator), "Failed to get sample allocator from EVR media sink.\n");
// Set up the reader for the file.
CHECK_HR(MFCreateSourceResolver(&pSourceResolver), "MFCreateSourceResolver failed.\n");
CHECK_HR(pSourceResolver->CreateObjectFromURL(
L"big_buck_bunny_240p_5mb.mp4", // URL of the source.
MF_RESOLUTION_MEDIASOURCE, // Create a source object.
NULL, // Optional property store.
&ObjectType, // Receives the created object type.
&uSource // Receives a pointer to the media source.
), "Failed to create media source resolver for file.\n");
CHECK_HR(uSource->QueryInterface(IID_PPV_ARGS(&mediaFileSource)), "Failed to create media file source.\n");
CHECK_HR(MFCreateAttributes(&pVideoReaderAttributes, 2), "Failed to create attributes object for video reader.\n");
//CHECK_HR(pVideoReaderAttributes->SetGUID(MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE, MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_GUID), "Failed to set dev source attribute type for reader config.\n");
CHECK_HR(pVideoReaderAttributes->SetUINT32(MF_SOURCE_READER_ENABLE_VIDEO_PROCESSING, 1), "Failed to set enable video processing attribute type for reader config.\n");
//CHECK_HR(pVideoReaderAttributes->SetUINT32(MF_SOURCE_READER_ENABLE_ADVANCED_VIDEO_PROCESSING, 1), "Failed to set enable advanced video processing attribute type for reader config.\n");
//CHECK_HR(pVideoReaderAttributes->SetUnknown(MF_SOURCE_READER_D3D_MANAGER, pD3DManager), "Failed to set D3D manager attribute type for reader config.\n");
CHECK_HR(MFCreateSourceReaderFromMediaSource(mediaFileSource, pVideoReaderAttributes, &videoReader),
"Error creating media source reader.\n");
CHECK_HR(videoReader->GetCurrentMediaType((DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM, &videoSourceOutputType),
"Error retrieving current media type from first video stream.\n");
Console::WriteLine("Default output media type for source reader:");
Console::WriteLine(GetMediaTypeDescription(videoSourceOutputType));
Console::WriteLine();
// Set the video output type on the source reader.
CHECK_HR(MFCreateMediaType(&pvideoSourceModType), "Failed to create video output media type.\n");
CHECK_HR(pvideoSourceModType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video), "Failed to set video output media major type.\n");
CHECK_HR(pvideoSourceModType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32), "Failed to set video sub-type attribute on EVR input media type.\n");
CHECK_HR(pvideoSourceModType->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive), "Failed to set interlace mode attribute on EVR input media type.\n");
CHECK_HR(pvideoSourceModType->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE), "Failed to set independent samples attribute on EVR input media type.\n");
CHECK_HR(MFSetAttributeRatio(pvideoSourceModType, MF_MT_PIXEL_ASPECT_RATIO, 1, 1), "Failed to set pixel aspect ratio attribute on EVR input media type.\n");
CHECK_HR(CopyAttribute(videoSourceOutputType, pvideoSourceModType, MF_MT_FRAME_SIZE), "Failed to copy video frame size attribute from input file to output sink.\n");
CHECK_HR(CopyAttribute(videoSourceOutputType, pvideoSourceModType, MF_MT_FRAME_RATE), "Failed to copy video frame rate attribute from input file to output sink.\n");
//CHECK_HR(pvideoSourceModType->SetUnknown(MF_SOURCE_READER_D3D_MANAGER, pD3DManager), "Failed to set D3D manager attribute type on EVR input media type.\n");
CHECK_HR(videoReader->SetCurrentMediaType((DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM, NULL, pvideoSourceModType), "Failed to set media type on source reader.\n");
Console::WriteLine("Output media type set on source reader:");
Console::WriteLine(GetMediaTypeDescription(pvideoSourceModType));
Console::WriteLine();
CHECK_HR(pVideoSink->GetStreamSinkByIndex(0, &pStreamSink), "Failed to get video renderer stream by index.\n");
CHECK_HR(pStreamSink->GetMediaTypeHandler(&pMediaTypeHandler), "Failed to get media type handler.\n");
// Set the video output type on the source reader.
CHECK_HR(MFCreateMediaType(&pVideoOutType), "Failed to create video output media type.\n");
CHECK_HR(pVideoOutType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video), "Failed to set video output media major type.\n");
CHECK_HR(pVideoOutType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32), "Failed to set video sub-type attribute on EVR input media type.\n");
CHECK_HR(pVideoOutType->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive), "Failed to set interlace mode attribute on EVR input media type.\n");
CHECK_HR(pVideoOutType->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE), "Failed to set independent samples attribute on EVR input media type.\n");
CHECK_HR(MFSetAttributeRatio(pVideoOutType, MF_MT_PIXEL_ASPECT_RATIO, 1, 1), "Failed to set pixel aspect ratio attribute on EVR input media type.\n");
CHECK_HR(CopyAttribute(videoSourceOutputType, pVideoOutType, MF_MT_FRAME_SIZE), "Failed to copy video frame size attribute from input file to output sink.\n");
CHECK_HR(CopyAttribute(videoSourceOutputType, pVideoOutType, MF_MT_FRAME_RATE), "Failed to copy video frame rate attribute from input file to output sink.\n");
//CHECK_HR(pVideoOutType->SetUnknown(MF_SOURCE_READER_D3D_MANAGER, pD3DManager), "Failed to set D3D manager attribute on EVR input media type.\n");
//CHECK_HR(pMediaTypeHandler->GetMediaTypeByIndex(0, &pSinkMediaType), "Failed to get sink media type.\n");
CHECK_HR(pMediaTypeHandler->SetCurrentMediaType(pVideoOutType), "Failed to set current media type.\n");
Console::WriteLine("Input media type set on EVR:");
Console::WriteLine(GetMediaTypeDescription(pVideoOutType));
Console::WriteLine();
// https://msdn.microsoft.com/fr-fr/library/windows/desktop/aa473823(v=vs.85).aspx
CHECK_HR(MFGetService(pStreamSink, MR_VIDEO_ACCELERATION_SERVICE, IID_PPV_ARGS(&pVideoSampleAllocator)), "Failed to get IMFVideoSampleAllocator.\n");
CHECK_HR(MFGetService(pVideoSink, MR_VIDEO_ACCELERATION_SERVICE, IID_PPV_ARGS(&pD3DManager)), "Failed to get D3DManager.\n");
CHECK_HR(pVideoSampleAllocator->SetDirectXManager(pD3DManager), "Failed to set D3DManager.\n");
CHECK_HR(pVideoSampleAllocator->InitializeSampleAllocator(1, pVideoOutType), "Failed to InitializeSampleAllocator.\n");
CHECK_HR(pVideoSampleAllocator->AllocateSample(&pD3DVideoSample), "Failed to AllocateSample.\n");
CHECK_HR(MFCreatePresentationClock(&pClock), "Failed to create presentation clock.\n");
CHECK_HR(MFCreateSystemTimeSource(&pTimeSource), "Failed to create system time source.\n");
CHECK_HR(pClock->SetTimeSource(pTimeSource), "Failed to set time source.\n");
CHECK_HR(pVideoSink->SetPresentationClock(pClock), "Failed to set presentation clock on video sink.\n");
CHECK_HR(pClock->Start(0), "Error starting presentation clock.\n");
// Wait for the Sink to start
Sleep(1000);
//_d3d = Direct3DCreate9(D3D_SDK_VERSION); // create the Direct3D interface
//D3DPRESENT_PARAMETERS d3dpp; // create a struct to hold various device information
//ZeroMemory(&d3dpp, sizeof(d3dpp)); // clear out the struct for use
//d3dpp.Windowed = TRUE; // program windowed, not fullscreen
//d3dpp.SwapEffect = D3DSWAPEFFECT_DISCARD; // discard old frames
//d3dpp.hDeviceWindow = _hwnd; // set the window to be used by Direct3D
//d3dpp.BackBufferFormat = D3DFMT_X8R8G8B8; // set the back buffer format to 32-bit
//d3dpp.BackBufferWidth = 640; // set the width of the buffer
//d3dpp.BackBufferHeight = 360; // set the height of the buffer
//// create a device class using this information and information from the d3dpp stuct
//_d3d->CreateDevice(D3DADAPTER_DEFAULT,
// D3DDEVTYPE_HAL,
// _hwnd,
// D3DCREATE_SOFTWARE_VERTEXPROCESSING,
// &d3dpp,
// &_d3ddev);
//CHECK_HR(_d3ddev->GetSwapChain(0, &_pSwapChain), "Failed to get swap chain from D3D device.\n");
////_d3ddev->CreateTexture(640, 360, 0, 0, D3DFMT_X8R8G8B8, D3DPOOL_DEFAULT, &_pd3dTexture, NULL);
//// clear the window to a deep blue
//_d3ddev->Clear(0, NULL, D3DCLEAR_TARGET, D3DCOLOR_XRGB(0, 40, 100), 1.0f, 0);
//_d3ddev->BeginScene(); // begins the 3D scene
//_d3ddev->EndScene(); // ends the 3D scene
//_d3ddev->Present(NULL, NULL, NULL, NULL); // displays the created frame
Console::WriteLine("Press any key to start video sampling...");
//Console::ReadLine();
IMFSample *videoSample = NULL;
DWORD streamIndex, flags;
LONGLONG llTimeStamp;
bool clockStarted = false;
IMFSample *d3dSample = nullptr;
LONGLONG llVideoTimeStamp = 0;
UINT32 uiAttribute = 0;
IMFMediaBuffer* pSrcBuffer = NULL;
IMFMediaBuffer* pDstBuffer = NULL;
IMF2DBuffer* p2DBuffer = NULL;
BYTE* pbBuffer = NULL;
DWORD dwBuffer = 0;
while (true)
{
CHECK_HR(videoReader->ReadSample(
MF_SOURCE_READER_FIRST_VIDEO_STREAM,
0, // Flags.
&streamIndex, // Receives the actual stream index.
&flags, // Receives status flags.
&llTimeStamp, // Receives the time stamp.
&videoSample // Receives the sample or NULL.
), "Error reading video sample.");
if (flags & MF_SOURCE_READERF_ENDOFSTREAM)
{
printf("End of stream.\n");
break;
}
if (flags & MF_SOURCE_READERF_STREAMTICK)
{
printf("Stream tick.\n");
}
if (!videoSample)
{
printf("Null video sample.\n");
}
else
{
/*if (!clockStarted)
{
clockStarted = true;
CHECK_HR(pClock->Start(llTimeStamp), "Error starting the presentation clock.\n");
}*/
printf("Attempting to write sample to stream sink.\n");
//CHECK_HR(videoSample->SetSampleTime(llTimeStamp), "Error setting the video sample time.\n");
//CHECK_HR(videoSample->SetSampleDuration(41000000), "Error setting the video sample duration.\n");
/*CHECK_HR(CreateD3DSample(_pSwapChain, &d3dSample), "Failed to create 3D sample.\n");
CHECK_HR(d3dSample->SetSampleTime(llTimeStamp), "Error setting the 3D sample time.\n");*/
//CHECK_HR(pStreamSink->ProcessSample(videoSample), "Streamsink process sample failed.\n");
//CHECK_HR(pStreamSink->ProcessSample(d3dSample), "Streamsink process sample failed.\n");
CHECK_HR(videoSample->GetSampleTime(&llVideoTimeStamp), ".\n");
CHECK_HR(pD3DVideoSample->SetSampleTime(llVideoTimeStamp), ".\n");
CHECK_HR(videoSample->GetSampleDuration(&llVideoTimeStamp), ".\n");
CHECK_HR(pD3DVideoSample->SetSampleDuration(llVideoTimeStamp), ".\n");
CHECK_HR(videoSample->ConvertToContiguousBuffer(&pSrcBuffer), ".\n");
CHECK_HR(pSrcBuffer->Lock(&pbBuffer, NULL, &dwBuffer), ".\n");
CHECK_HR(pD3DVideoSample->GetBufferByIndex(0, &pDstBuffer), ".\n");
CHECK_HR(pDstBuffer->QueryInterface(IID_PPV_ARGS(&p2DBuffer)), ".\n");
CHECK_HR(p2DBuffer->ContiguousCopyFrom(pbBuffer, dwBuffer), ".\n");
CHECK_HR(pSrcBuffer->Unlock(), ".\n");
CHECK_HR(videoSample->GetUINT32(MFSampleExtension_FrameCorruption, &uiAttribute), ".\n");
CHECK_HR(pD3DVideoSample->SetUINT32(MFSampleExtension_FrameCorruption, uiAttribute), ".\n");
CHECK_HR(videoSample->GetUINT32(MFSampleExtension_Discontinuity, &uiAttribute), ".\n");
CHECK_HR(pD3DVideoSample->SetUINT32(MFSampleExtension_Discontinuity, uiAttribute), ".\n");
CHECK_HR(videoSample->GetUINT32(MFSampleExtension_CleanPoint, &uiAttribute), ".\n");
CHECK_HR(pD3DVideoSample->SetUINT32(MFSampleExtension_CleanPoint, uiAttribute), ".\n");
CHECK_HR(videoSample->GetUINT32(MFSampleExtension_CleanPoint, &uiAttribute), ".\n");
CHECK_HR(pD3DVideoSample->SetUINT32(MFSampleExtension_CleanPoint, uiAttribute), ".\n");
CHECK_HR(pStreamSink->ProcessSample(pD3DVideoSample), "Streamsink process sample failed.\n");
Sleep(75);
}
SafeRelease(&p2DBuffer);
SafeRelease(&pDstBuffer);
SafeRelease(&pSrcBuffer);
SafeRelease(&videoSample);
}
done:
// Todo : stop clock - shutdown object - release object
SafeRelease(_d3ddev); // close and release the 3D device
SafeRelease(_d3d); // close and release Direct3D
printf("finished.\n");
getchar();
return 0;
}
HRESULT CreateD3DSample(
IDirect3DSwapChain9 *pSwapChain,
IMFSample **ppVideoSample
)
{
// Caller holds the object lock.
D3DCOLOR clrBlack = D3DCOLOR_ARGB(0xFF, 0x00, 0x00, 0x00);
IDirect3DSurface9* pSurface = NULL;
IMFSample* pSample = NULL;
// Get the back buffer surface.
HRESULT hr = pSwapChain->GetBackBuffer(
0, D3DBACKBUFFER_TYPE_MONO, &pSurface);
if (FAILED(hr))
{
goto done;
}
// Fill it with black.
hr = _d3ddev->ColorFill(pSurface, NULL, clrBlack);
if (FAILED(hr))
{
goto done;
}
// Create the sample.
hr = MFCreateVideoSampleFromSurface(pSurface, &pSample);
if (FAILED(hr))
{
goto done;
}
// Return the pointer to the caller.
*ppVideoSample = pSample;
(*ppVideoSample)->AddRef();
done:
SafeRelease(&pSurface);
SafeRelease(&pSample);
return hr;
}
LRESULT CALLBACK WindowProc(HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam)
{
return DefWindowProc(hwnd, uMsg, wParam, lParam);
}
void InitializeWindow()
{
WNDCLASS wc = { 0 };
wc.lpfnWndProc = WindowProc;
wc.hInstance = GetModuleHandle(NULL);
wc.hCursor = LoadCursor(NULL, IDC_ARROW);
wc.lpszClassName = CLASS_NAME;
if (RegisterClass(&wc))
{
_hwnd = CreateWindow(
CLASS_NAME,
WINDOW_NAME,
WS_OVERLAPPEDWINDOW,
CW_USEDEFAULT,
CW_USEDEFAULT,
VIDEO_WIDTH,
VIDEO_HEIGHT,
NULL,
NULL,
GetModuleHandle(NULL),
NULL
);
if (_hwnd)
{
ShowWindow(_hwnd, SW_SHOWDEFAULT);
MSG msg = { 0 };
while (true)
{
if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE))
{
TranslateMessage(&msg);
DispatchMessage(&msg);
}
else
{
Sleep(1);
}
}
}
}
}

Happened across a post on the MSDN forums which makes mention of the fact that the EVR requires a D3D surface sample rather than a standard IMFSample. I did suspect the problem might be something like that. I don't know exactly how to supply the D3D sample to the EVR and I have already had a few goes at doing so but at least I now know which direction to follow.

Related

std::Future_error when using std::promise

I'm trying to make a video player. I have added a thread to time how long a video should be show on the screen. I'm trying to decode the video and update window in the main thread; the second thread will get the packets, see how long the packet should be displayed, and send the packet to main thread then wait for time to elapse.
For some reason I get this error:
terminate called after throwing an instance of 'std::future_error'
what(): std::future_error: No associated state
What's causing the error?
My Code:
extern "C"{
//FFmpeg libraries
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
//SDL2 libraries
#include <SDL2/SDL.h>
}
// compatibility with newer API
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
#define av_frame_alloc avcodec_alloc_frame
#define av_frame_free avcodec_free_frame
#endif
//C++ libraries
#include <memory>
#include <stdio.h>
#include <iostream>
#include <chrono>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <future>
typedef struct PacketQueue {
AVPacketList *first_pkt, *last_pkt;
} PacketQueue;
std::atomic<bool> quitting;
std::mutex mutex;
std::condition_variable convar;
int packet_queue_put(PacketQueue *q, AVPacket *pkt){
AVPacketList *pkt1;
if(av_dup_packet(pkt) < 0){
return -1;
}
pkt1 = (AVPacketList*) av_malloc(sizeof(AVPacketList));
if(!pkt1){
return -1;
}
pkt1->pkt = *pkt;
pkt1->next = NULL;
std::lock_guard<std::mutex> lock(mutex);
if (!q->last_pkt){
q->first_pkt = pkt1;
}else{
q->last_pkt->next = pkt1;
}
q->last_pkt = pkt1;
convar.notify_all();
return 0;
}
static int packet_queue_get(PacketQueue *q, AVPacket *pkt){
AVPacketList *pkt1;
int ret;
std::unique_lock<std::mutex> lk(mutex);
while(1){
if(quitting){
ret = -1;
break;
}
pkt1 = q->first_pkt;
if(pkt1){
q->first_pkt = pkt1->next;
if(!q->first_pkt){
q->last_pkt = NULL;
}
*pkt = pkt1->pkt;
av_free(pkt1);
ret = 1;
break;
}else {
convar.wait_for(lk, std::chrono::milliseconds(1));
}
}
return ret;
}
void videoTimerFunc(AVRational time_base, PacketQueue* videoq, std::promise<AVPacket> prms){
AVPacket pkt;
int64_t last_pts = 0;
int64_t frameDelay;
AVRational microseconds = {1, 1000000};
while(!quitting){
// Getting packet and check if there are more packets
if(!packet_queue_get(videoq, &pkt)){
// Close programme
quitting = true;
}else {
// Send packet and create timer
frameDelay = av_rescale_q(pkt.dts, time_base, microseconds) - last_pts;
last_pts = av_rescale_q(pkt.dts, time_base, microseconds);
prms.set_value(pkt);
std::this_thread::sleep_for(std::chrono::microseconds(frameDelay));
}
}
}
int main(int argc, char *argv[]){
AVFormatContext* FormatCtx = nullptr;
AVCodecContext* CodecCtxOrig = nullptr;
AVCodecContext* CodecCtx = nullptr;
AVCodec* Codec = nullptr;
int videoStream;
AVFrame* Frame = nullptr;
AVPacket packet;
struct SwsContext* SwsCtx = nullptr;
PacketQueue videoq;
std::promise<AVPacket> pktprms;
std::future<AVPacket> pktftr = pktprms.get_future();
int frameFinished;
int64_t lastPTS;
SDL_Event event;
SDL_Window* screen;
SDL_Renderer* renderer;
SDL_Texture* texture;
std::shared_ptr<Uint8> yPlane, uPlane, vPlane;
int uvPitch;
if (argc != 2) {
fprintf(stderr, "Usage: %s <file>\n", argv[0]);
return -1;
}
// Register all formats and codecs
av_register_all();
// Initialise SDL2
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
fprintf(stderr, "Couldn't initialise SDL - %s\n", SDL_GetError());
return -1;
}
// Setting things up
quitting = false;
memset(&videoq, 0, sizeof(PacketQueue));
// Open video file
if(avformat_open_input(&FormatCtx, argv[1], NULL, NULL) != 0){
fprintf(stderr, "Couldn't open file\n");
return -1; // Couldn't open file
}
// Retrieve stream information
if(avformat_find_stream_info(FormatCtx, NULL) < 0){
fprintf(stderr, "Couldn't find stream information\n");
// Close the video file
avformat_close_input(&FormatCtx);
return -1; // Couldn't find stream information
}
// Find the video stream
videoStream = av_find_best_stream(FormatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
if(videoStream < 0){
fprintf(stderr, "Couldn't find video stream\n");
// Close the video file
avformat_close_input(&FormatCtx);
return -1; // Didn't find a video stream
}
// Get a pointer to the codec context for the video stream
CodecCtxOrig = FormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
Codec = avcodec_find_decoder(CodecCtxOrig->codec_id);
if(Codec == NULL){
fprintf(stderr, "Unsupported codec\n");
// Close the codec
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1; // Codec not found
}
// Copy context
CodecCtx = avcodec_alloc_context3(Codec);
if(avcodec_copy_context(CodecCtx, CodecCtxOrig) != 0){
fprintf(stderr, "Couldn't copy codec context");
// Close the codec
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1; // Error copying codec context
}
// Open codec
if(avcodec_open2(CodecCtx, Codec, NULL) < 0){
fprintf(stderr, "Couldn't open codec\n");
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1; // Could not open codec
}
// Allocate video frame
Frame = av_frame_alloc();
// Make a screen to put our video
screen = SDL_CreateWindow("Video Player", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, CodecCtx->width, CodecCtx->height, 0);
if(!screen){
fprintf(stderr, "SDL: could not create window - exiting\n");
quitting = true;
// Clean up SDL2
SDL_Quit();
// Free the YUV frame
av_frame_free(&Frame);
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1;
}
renderer = SDL_CreateRenderer(screen, -1, 0);
if(!renderer){
fprintf(stderr, "SDL: could not create renderer - exiting\n");
quitting = true;
// Clean up SDL2
SDL_DestroyWindow(screen);
SDL_Quit();
// Free the YUV frame
av_frame_free(&Frame);
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1;
}
// Allocate a place to put our YUV image on that screen
texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING, CodecCtx->width, CodecCtx->height);
if(!texture){
fprintf(stderr, "SDL: could not create texture - exiting\n");
quitting = true;
// Clean up SDL2
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(screen);
SDL_Quit();
// Free the YUV frame
av_frame_free(&Frame);
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1;
}
// Initialise SWS context for software scaling
SwsCtx = sws_getContext(CodecCtx->width, CodecCtx->height, CodecCtx->pix_fmt,
CodecCtx->width, CodecCtx->height, PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);
if(!SwsCtx){
fprintf(stderr, "Couldn't create sws context\n");
quitting = true;
// Clean up SDL2
SDL_DestroyTexture(texture);
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(screen);
SDL_Quit();
// Free the YUV frame
av_frame_free(&Frame);
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1;
}
// set up YV12 pixel array (12 bits per pixel)
yPlane = std::shared_ptr<Uint8>((Uint8 *)::operator new (CodecCtx->width * CodecCtx->height, std::nothrow));
uPlane = std::shared_ptr<Uint8>((Uint8 *)::operator new (CodecCtx->width * CodecCtx->height / 4, std::nothrow));
vPlane = std::shared_ptr<Uint8>((Uint8 *)::operator new (CodecCtx->width * CodecCtx->height / 4, std::nothrow));
uvPitch = CodecCtx->width / 2;
if (!yPlane || !uPlane || !vPlane) {
fprintf(stderr, "Could not allocate pixel buffers - exiting\n");
quitting = true;
// Clean up SDL2
SDL_DestroyTexture(texture);
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(screen);
SDL_Quit();
// Free the YUV frame
av_frame_free(&Frame);
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1;
}
std::thread videoTimerThread(videoTimerFunc, FormatCtx->streams[videoStream]->time_base, &videoq, std::move(pktprms));
while (!quitting) {
// Check for more packets
if(av_read_frame(FormatCtx, &packet) >= 0){
// Check what stream it belongs to
if (packet.stream_index == videoStream) {
packet_queue_put(&videoq, &packet);
}else{
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
}
// Check if its time to update
if(pktftr.wait_for(std::chrono::milliseconds(1)) == std::future_status::ready){
// Getting packet
packet = pktftr.get();
// Decode video frame
avcodec_decode_video2(CodecCtx, Frame, &frameFinished, &packet);
// Did we get a video frame?
if (frameFinished) {
AVPicture pict;
pict.data[0] = yPlane.get();
pict.data[1] = uPlane.get();
pict.data[2] = vPlane.get();
pict.linesize[0] = CodecCtx->width;
pict.linesize[1] = uvPitch;
pict.linesize[2] = uvPitch;
// Convert the image into YUV format that SDL uses
sws_scale(SwsCtx, (uint8_t const * const *) Frame->data, Frame->linesize, 0, CodecCtx->height, pict.data, pict.linesize);
SDL_UpdateYUVTexture(texture, NULL, yPlane.get(), CodecCtx->width, uPlane.get(), uvPitch, vPlane.get(), uvPitch);
SDL_RenderClear(renderer);
SDL_RenderCopy(renderer, texture, NULL, NULL);
SDL_RenderPresent(renderer);
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
SDL_PollEvent(&event);
switch (event.type) {
case SDL_QUIT:
quitting = true;
break;
default:
break;
}
}
videoTimerThread.join();
//SDL2 clean up
SDL_DestroyTexture(texture);
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(screen);
SDL_Quit();
// Free the YUV frame
av_frame_free(&Frame);
// Free Sws
sws_freeContext(SwsCtx);
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return 0;
}
You can't use std::promise and std::future in a loop. Thanks to Igor Tandetnik, for commenting that.

Saving a JPG from a BMP using GDI+ with a set DPI

I have an image in BMP form and I want a C++ program to save it to JPG using GDI+, after reading some GDI+ documentation I came up with this program:
#include <windows.h>
#include <objidl.h>
#include <gdiplus.h>
#include "GdiplusHelperFunctions.h"
#pragma comment (lib,"Gdiplus.lib")
VOID SaveFile()
{
// Initialize GDI+.
Gdiplus::GdiplusStartupInput gdiplusStartupInput;
ULONG_PTR gdiplusToken;
GdiplusStartup(&gdiplusToken, &gdiplusStartupInput, NULL);
CLSID encoderClsid;
Status stat;
EncoderParameters encoderParameters;
ULONG quality;
Image* image = new Gdiplus::Image(L"plot.bmp");
// Get the CLSID of the PNG encoder.
GetEncoderClsid(L"image/jpeg", &encoderClsid);
encoderParameters.Count = 1;
encoderParameters.Parameter[0].Guid = EncoderQuality;
encoderParameters.Parameter[0].Type = EncoderParameterValueTypeLong;
encoderParameters.Parameter[0].NumberOfValues = 1;
quality = 100;
encoderParameters.Parameter[0].Value = &quality;
stat = image->Save(L"plot100.jpg", &encoderClsid, &encoderParameters);
if (stat == Ok)
printf("plot.jpg was saved successfully\n");
else
printf("Failure: stat = %d\n", stat);
delete image;
GdiplusShutdown(gdiplusToken);
return;
}
int main()
{
SaveFile();
return 0;
}
But the image is saving with a horizontal and vertical resolution of 7dpi no matter what the value of "quelity" is, I need to save the jpg with a 96dpi, how can I set that?
Thank you in advance.
A modified version of the function SaveFile() solved the problem:
VOID SaveFile()
{
// Initialize GDI+.
Gdiplus::GdiplusStartupInput gdiplusStartupInput;
ULONG_PTR gdiplusToken;
GdiplusStartup(&gdiplusToken, &gdiplusStartupInput, NULL);
CLSID encoderClsid;
Status stat;
EncoderParameters encoderParameters;
ULONG quality;
Gdiplus::Bitmap* bitmap = new Gdiplus::Bitmap(L"plot.bmp");
Gdiplus::REAL dpi = 72;
bitmap->SetResolution(dpi,dpi);
// Get the CLSID of the PNG encoder.
GetEncoderClsid(L"image/jpeg", &encoderClsid);
encoderParameters.Count = 1;
encoderParameters.Parameter[0].Guid = EncoderQuality;
encoderParameters.Parameter[0].Type = EncoderParameterValueTypeLong;
encoderParameters.Parameter[0].NumberOfValues = 1;
quality = 100;
encoderParameters.Parameter[0].Value = &quality;
stat = bitmap->Save(L"plot.jpg", &encoderClsid, &encoderParameters);
if (stat == Ok)
printf("plot.jpg was saved successfully\n");
else
printf("Failure: stat = %d\n", stat);
delete bitmap;
GdiplusShutdown(gdiplusToken);
return;
}

app crashed at avformat_find_stream_info

I write a native method to simply get the codec id of a video, but my app never pass this line 'avformat_find_stream_info(pFormatCtx, NULL)'
here is my native method:
int get_video_info(JNIEnv *env, jobject thiz, jstring strInFile){
AVFormatContext *pFmtCtx;
AVCodecContext *pCCtx;
AVCodec *pCd;
AVFrame *picture;
int i, videoStream = -1, error = 0;
const char *in_file_name = (*env)->GetStringUTFChars(env, strInFile, NULL);
av_register_all();
LOGI(1, "HERE 0 %s", in_file_name); // app passed here
/*Open video file*/
pFmtCtx = avformat_alloc_context();
if((error=avformat_open_input(&pFmtCtx, in_file_name, NULL, NULL)) < 0){
LOGE(1, "Couldn't open file, error-code: %d with file url: %s", error, in_file_name);
return -1;
}
LOGI(1, "HERE 1 Duration: %d", pFmtCtx->duration); //app passed here
/*Retrieve the stream information, APP CRASH RIGHT HERE*/
if(avformat_find_stream_info(pFormatCtx, NULL)<0){
LOGE(1, "Couldn't retrieve stream information");
avformat_free_context(pFmtCtx);
return -1; // Couldn’t find stream information
}
LOGI(1, "HERE 2");
//Find the first video stream
videoStream=-1;
for(i=0; i<pFormatCtx->nb_streams; i++) {
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
videoStream=i;
break;
}
}
if(videoStream==-1){
avformat_free_context(pFmtCtx);
LOGE(1, "Didn't find a video stream");
return -1; // Didn’t find a video stream
}
// Get a pointer to the codec context for the video stream
pCCtx=pFormatCtx->streams[videoStream]->codec;
avformat_free_context(pFmtCtx);
(*env)->ReleaseStringUTFChars(env, strInFile, in_file_name);
return pCCtx->codec_id;
}
Question: Why I always fail finding stream info like this? Please help me fix it. Thanks
You are using pFormatCtx (which was not initialized) instead of pFmtCtx in several places in your code!
You could set pFormatCtx = pFmtCtx after avformat_alloc_context():

DirectShow, capture Still Image

i'm not experienced in Windows programming, i try to capture a Still Image from a WebCam using DirectShow. I wrote a small application based on CommandCam.cpp, which can be found here:
http://batchloaf.wordpress.com/commandcam/
I basically added the code that can be found here:
http://msdn.microsoft.com/en-us/library/windows/desktop/dd318622%28v=vs.85%29.aspx
But the call to pBuilder->FindPin(pCap, PINDIR_OUTPUT, &PIN_CATEGORY_STILL, ... fails and i can't get the Pin for the Still Image.
I doubt that because using other Webcam programs i can get a Still Image from my Microsoft LifeCam Studio.
I wonder what i am doing wrong? I tried placing the call in different places in the application but it never succeeded.
Thanks for any hints,
Torsten.
The relevant part of the code is this:
// Get video input device name
hr = pMoniker->BindToStorage(0, 0, IID_PPV_ARGS(&pPropBag));
VariantInit(&var);
hr = pPropBag->Read(L"FriendlyName", &var, 0);
fprintf(stderr, "Capture device: %ls\n", var.bstrVal);
VariantClear(&var);
// Create capture filter and add to graph
hr = pMoniker->BindToObject(0, 0, IID_IBaseFilter, (void**)&pCap);
if (hr != S_OK) exit_message("Could not create capture filter", 1);
// Add capture filter to graph
hr = pGraph->AddFilter(pCap, L"Capture Filter");
if (hr != S_OK) exit_message("Could not add capture filter to graph", 1);
// Create sample grabber filter
hr = CoCreateInstance(CLSID_SampleGrabber, NULL,
CLSCTX_INPROC_SERVER, IID_IBaseFilter,
(void**)&pSampleGrabberFilter);
if (hr != S_OK)
exit_message("Could not create Sample Grabber filter", 1);
// Query the ISampleGrabber interface of the sample grabber filter
hr = pSampleGrabberFilter->QueryInterface(
DexterLib::IID_ISampleGrabber, (void**)&pSampleGrabber);
if (hr != S_OK)
exit_message("Could not get ISampleGrabber interface to sample grabber filter", 1);
// Enable sample buffering in the sample grabber filter
hr = pSampleGrabber->SetBufferSamples(TRUE);
if (hr != S_OK)
exit_message("Could not enable sample buffering in the sample grabber", 1);
// Set media type in sample grabber filter
AM_MEDIA_TYPE mt;
ZeroMemory(&mt, sizeof(AM_MEDIA_TYPE));
mt.majortype = MEDIATYPE_Video;
mt.subtype = MEDIASUBTYPE_RGB24;
hr = pSampleGrabber->SetMediaType((DexterLib::_AMMediaType *)&mt);
if (hr != S_OK)
exit_message("Could not set media type in sample grabber", 1);
// Add sample grabber filter to filter graph
hr = pGraph->AddFilter(pSampleGrabberFilter, L"SampleGrab");
if (hr != S_OK)
exit_message("Could not add Sample Grabber to filter graph", 1);
// Create Null Renderer filter
hr = CoCreateInstance(CLSID_NullRenderer, NULL,
CLSCTX_INPROC_SERVER, IID_IBaseFilter,
(void**)&pNullRenderer);
if (hr != S_OK)
exit_message("Could not create Null Renderer filter", 1);
// Add Null Renderer filter to filter graph
hr = pGraph->AddFilter(pNullRenderer, L"NullRender");
if (hr != S_OK)
exit_message("Could not add Null Renderer to filter graph", 1);
// Connect up the filter graph's capture stream
hr = pBuilder->RenderStream(
&PIN_CATEGORY_CAPTURE, &MEDIATYPE_Video,
pCap, pSampleGrabberFilter, pNullRenderer);
if (hr != S_OK)
exit_message("Could not render capture video stream", 1);
hr = pBuilder->RenderStream(
&PIN_CATEGORY_PREVIEW, &MEDIATYPE_Video,
pCap, NULL, NULL);
if (hr != S_OK && hr != VFW_S_NOPREVIEWPIN)
exit_message("Could not render preview video stream", 1);
// Get media control interfaces to graph builder object
hr = pGraph->QueryInterface(IID_IMediaControl,
(void**)&pMediaControl);
if (hr != S_OK) exit_message("Could not get media control interface", 1);
// Run graph
while(1)
{
hr = pMediaControl->Run();
// Hopefully, the return value was S_OK or S_FALSE
if (hr == S_OK) break; // graph is now running
if (hr == S_FALSE) continue; // graph still preparing to run
// If the Run function returned something else,
// there must be a problem
fprintf(stderr, "Error: %u\n", hr);
exit_message("Could not run filter graph", 1);
}
Sleep(2000);
// get the StillImage Pin
hr = pCap->QueryInterface(IID_IAMVideoControl, (void**)&pAMVidControl);
if (hr != S_OK) exit_message("Could not get IAMVideoControl", 1);
hr = pBuilder->FindPin(pCap, PINDIR_OUTPUT, &PIN_CATEGORY_STILL, NULL, FALSE, 0, &pPin);
if (hr != S_OK)
exit_message("Could not get Pin of category StillImage", 1);
hr = pAMVidControl->SetMode(pPin, VideoControlFlag_Trigger);
if (hr != S_OK) exit_message("Could set mode VideoControlFlag_Trigger", 1);
Some video capture device source filters do not expose a still image capture pin.
Did you try using the EnumPins method to find whether it actually has one, or if there is just a preview pin?
You could also use GraphEdit to have a look at the filter's pins.
If the device only has a preview pin, you will have to use that to grab your image. You can use the Smart Tee Filter to split your graph into preview and capture.

Capture screenshot of OpenGL/Direct3D 3rd party application

I want to capture the contents (client area) of a window, in my VS2008, MFC, C++ project. I have tried using the PrintWindow technique described here:
How to get screenshot of a window as bitmap object in C++?
I have also tried blitting the contents of the window using the following code:
void captureWindow(int winId)
{
HDC handle(::GetDC(HWND(winId)));
CDC sourceContext;
CBitmap bm;
CDC destContext;
if( !sourceContext.Attach(handle) )
{
printf("Failed to attach to window\n");
goto cleanup;
}
RECT winRect;
sourceContext.GetWindow()->GetWindowRect(&winRect);
int width = winRect.right-winRect.left;
int height = winRect.bottom-winRect.top;
destContext.CreateCompatibleDC( &sourceContext );
if(!bm.CreateCompatibleBitmap(&sourceContext, width, height)) {
printf("Failed to create bm\n");
goto cleanup;
}
{
//show a message in the window to enable us to visually confirm we got the right window
CRect rcText( 0, 0, 0 ,0 );
CPen pen(PS_SOLID, 5, 0x00ffff);
sourceContext.SelectObject(&pen);
const char *msg = "Window Captured!";
sourceContext.DrawText( msg, &rcText, DT_CALCRECT );
sourceContext.DrawText( msg, &rcText, DT_CENTER );
HGDIOBJ hOldDest = destContext.SelectObject(bm);
if( hOldDest==NULL )
{
printf("SelectObject failed with error %d\n", GetLastError());
goto cleanup;
}
if ( !destContext.BitBlt( 0, 0, width, height, &sourceContext, 0, 0, SRCCOPY ) ){
printf("Failed to blit\n");
goto cleanup;
}
//assume this function saves the bitmap to a file
saveImage(bm, "capture.bmp");
destContext.SelectObject(hOldDest);
}
cleanup:
destContext.DeleteDC();
sourceContext.Detach();
::ReleaseDC(0, handle);
}
The code works fine for most applications. The specific application where I need to capture the screenshot however, has a window that I think is rendered using OpenGl or Direct3D. Both methods will capture most of the app just fine, but the "3d" area will be left black or garbled.
I do not have access to the application code, so I cannot change it in any way.
Is there any way to capture all the contents, including the "3d" window?
The data in your 3D area is generated by the graphics adapter further down the graphics funnel and may not be available to your application to read the byte data from the rendering context. In OpenGL you can can use glReadPixels() to pull that data back up the funnel into your application memory. See here for usage:
http://www.opengl.org/sdk/docs/man/xhtml/glReadPixels.xml

Resources