Related
I'm trying to create OpenGLx context after the Xlib's window creation. I'm trying to separate the Xlib window creation and opengl context creation into two different phases.
Win32 window-opengl context creation was rather simple but I couldnt find any resource that illustrates the same process with Xlib-opengl in linux
This is how its done for xlib-linux
GLint glxAttribs[] = {
GLX_RGBA,
GLX_DOUBLEBUFFER,
GLX_DEPTH_SIZE, 24,
GLX_STENCIL_SIZE, 8,
GLX_RED_SIZE, 8,
GLX_GREEN_SIZE, 8,
GLX_BLUE_SIZE, 8,
GLX_SAMPLE_BUFFERS, 0,
GLX_SAMPLES, 0,
None
};
XVisualInfo* visual = glXChooseVisual(display, screenId, glxAttribs);
XSetWindowAttributes windowAttribs;
windowAttribs.border_pixel = BlackPixel(display, screenId);
windowAttribs.background_pixel = WhitePixel(display, screenId);
windowAttribs.override_redirect = True;
windowAttribs.colormap = XCreateColormap(display, RootWindow(display, screenId), visual->visual, AllocNone);
windowAttribs.event_mask = ExposureMask;
window = XCreateWindow(display, RootWindow(display, screenId), 0, 0, 320, 200, 0, visual->depth, InputOutput, visual->visual, CWBackPixel | CWColormap | CWBorderPixel | CWEventMask, &windowAttribs);
This is how its done in windows
const WindowsWindow* pWin32Window = (const WindowsWindow*)pOwnerWindow;
HWND windowHandle = pWin32Window->GetWin32WindowHandle();
HDC windowDeviceContext = pWin32Window->GetWin32WindowDeviceContext();
/*
* Create pixel format
*/
PIXELFORMATDESCRIPTOR pfd = { sizeof(pfd),1 };
memset(&pfd, 0, sizeof(PIXELFORMATDESCRIPTOR));
pfd.nSize = sizeof(PIXELFORMATDESCRIPTOR);
pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
pfd.iPixelType = PFD_TYPE_RGBA;
pfd.nVersion = 1;
pfd.cColorBits = OpenGLDeviceUtilsWin32::GetColorBits(desc.SwapchainBufferFormat);
pfd.cAlphaBits = OpenGLDeviceUtilsWin32::GetAlphaBits(desc.SwapchainBufferFormat);
pfd.cDepthBits = OpenGLDeviceUtilsWin32::GetDepthBits(desc.SwapchainDepthStencilBufferFormat);
pfd.cStencilBits = OpenGLDeviceUtilsWin32::GetStencilBits(desc.SwapchainDepthStencilBufferFormat);
pfd.cAuxBuffers = 3;
pfd.iLayerType = PFD_MAIN_PLANE;
const int pixelFormatIndex = ChoosePixelFormat(windowDeviceContext, &pfd);
ASSERT(pixelFormatIndex != 0,"OpenGLDevice","Invalid pixel format");
ASSERT(SetPixelFormat(windowDeviceContext, pixelFormatIndex, &pfd), "OpenGLDevice", "Win32 window rejected the specified pixel format");
HGLRC tempContext = wglCreateContext(windowDeviceContext);
ASSERT(tempContext != NULL, "OpenGLDevice", "Creation of wgl dummy context failed!");
wglMakeCurrent(windowDeviceContext, tempContext);
PFNWGLCREATECONTEXTATTRIBSARBPROC wglCreateContextAttribsARB = NULL;
wglCreateContextAttribsARB = (PFNWGLCREATECONTEXTATTRIBSARBPROC)wglGetProcAddress("wglCreateContextAttribsARB");
ASSERT(wglCreateContextAttribsARB != NULL, "OpenGLDevice", "WGL get proc address failed!");
But I would expect something like this.
Create xlib window
Check for glx attribs if the window can support that pixel format
Create glx context using pixel format
But instead it goes as
Create window with your specific glx attribs
Create glx context
I wonder if there is a way for us to create window without letting xlib know we are going to use it for opengl and implement OpenGL specific setup for window creation process.
I'm trying to create OpenGLx context after the Xlib's window creation.
I don't really see your problem. On Win32 the usual stanza is:
Create window
Select pixelformat
Set pixelformat on window
Get HDC from window and use it to create context
On GLX the stanza is:
Select visual for window
Create window that's compatible with visual
Create OpenGL context with the selected visual
Take note that in both Win32 and GLX there is no hard tie between the window and the OpenGL context. As long as the pixelformat/visual of a OpenGL context and a window are compatiple, you can use them with each other.
The only difference between GLX and Win32 is, how the pixelformat/visual is communicated to OpenGL context creation. In GLX it's done directy, in Win32 the pixelformat is communicated in a rather convoluted way by means of the HDC of a window. And take note that in order to obtain a modern OpenGL context you actually have to go the route of OpenGL context creation with attributes which works exactly the same in Win32 and GLX (with Win32 needing the added steps of creating a dummy OpenGL context first in order to obtain the function pointers to the wglCreateContextAttribsARB functions, which are directly available in GLX).
Honestly, I do not understand your motivation.
Many implementations like GLFW gets Visual from GLX/EGL APIs (including glXChooseFBConfig) and use it when creating a window. The GLX/EGL stuff part can be abstracted by writing wrappers, so I don't see the need to go to the trouble of avoiding it.
That being said, it is still possible to avoid it, so I wrote the sample code for you.
// To build, execute the command below.
// c++ -Wall -Wextra -std=c++17 -o main main.cpp -lX11 -lGLX -lGL
#include <cstdio>
#include <chrono>
#include <thread>
#include <sys/time.h>
#include <unistd.h>
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <GL/glx.h>
#include <GL/glxext.h>
#define OGL_MAJOR_VERSION 3
#define OGL_MINOR_VERSION 3
#define WINDOW_WIDTH 640
#define WINDOW_HEIGHT 360
#define FPS 60
static double get_time() {
static timeval s_tTimeVal;
gettimeofday(&s_tTimeVal, NULL);
double time = s_tTimeVal.tv_sec * 1000.0; // sec to ms
time += s_tTimeVal.tv_usec / 1000.0; // us to ms
return time;
}
struct TestWindowConfig {
int width = 640;
int height = 360;
};
class TestWindow final {
public:
explicit TestWindow(const TestWindowConfig& config) : m_config(config) {}
virtual ~TestWindow() {
if (m_display) {
if (m_xid) {
XDestroyWindow(m_display, m_xid);
}
XCloseDisplay(m_display);
}
}
bool create() {
m_display = XOpenDisplay(NULL);
if (!m_display) {
fprintf(stderr, "XOpenDisplay() failed\n");
return false;
}
XSetWindowAttributes x_attr;
x_attr.override_redirect = False;
x_attr.border_pixel = 0;
m_xid = XCreateWindow(m_display, DefaultRootWindow(m_display), 0, 0, m_config.width,
m_config.height, 0, CopyFromParent, InputOutput, CopyFromParent,
CWOverrideRedirect | CWBorderPixel, &x_attr);
if (!m_xid) {
fprintf(stderr, "XOpenDisplay() failed\n");
return false;
}
XStoreName(m_display, m_xid, "X11-GLX Sample");
XMapWindow(m_display, m_xid);
m_wm_delete_window = XInternAtom(m_display, "WM_DELETE_WINDOW", True);
XSetWMProtocols(m_display, m_xid, &m_wm_delete_window, 1);
return true;
}
void show() const {
if (m_display && m_xid) {
XMapRaised(m_display, m_xid);
}
}
bool poll_events() {
if (!m_display) {
fprintf(stderr, "Display is null\n");
return false;
}
while (XPending(m_display) > 0) {
XEvent ev;
XNextEvent(m_display, &ev);
if (ev.type == ClientMessage) {
if ((Atom)ev.xclient.data.l[0] == m_wm_delete_window) {
m_should_close = true;
}
}
}
return true;
}
bool should_close() const { return m_should_close; }
Display* display() const { return m_display; }
Window xid() const { return m_xid; }
int screen_id() const { return DefaultScreen(m_display); }
private:
TestWindowConfig m_config;
Display* m_display = nullptr;
Window m_xid = 0;
Atom m_wm_delete_window;
bool m_should_close = false;
};
class TestGLContext final {
public:
explicit TestGLContext() = default;
virtual ~TestGLContext() = default;
bool create(const TestWindow& window) {
// clang-format off
int visual_attr[] = {
GLX_DRAWABLE_TYPE, GLX_WINDOW_BIT,
GLX_RENDER_TYPE, GLX_RGBA_BIT,
GLX_RED_SIZE, 8,
GLX_GREEN_SIZE, 8,
GLX_BLUE_SIZE, 8,
GLX_ALPHA_SIZE, 8,
GLX_DEPTH_SIZE, 0,
GLX_STENCIL_SIZE, 0,
GLX_DOUBLEBUFFER, True,
None
};
// clang-format on
int cfg_count;
auto fb_configs =
glXChooseFBConfig(window.display(), window.screen_id(), visual_attr, &cfg_count);
if (!fb_configs || (cfg_count < 1)) {
fprintf(stderr, "glXChooseFBConfig(): No config found\n");
return false;
}
PFNGLXCREATECONTEXTATTRIBSARBPROC glXCreateContextAttribsARB =
(PFNGLXCREATECONTEXTATTRIBSARBPROC)glXGetProcAddressARB(
(const GLubyte*)"glXCreateContextAttribsARB");
if (!glXCreateContextAttribsARB) {
fprintf(stderr, "Failed to load glXCreateContextAttribsARB\n");
return false;
}
// clang-format off
int ctx_attr[] = {
GLX_CONTEXT_PROFILE_MASK_ARB, GLX_CONTEXT_CORE_PROFILE_BIT_ARB,
GLX_CONTEXT_MAJOR_VERSION_ARB, OGL_MAJOR_VERSION,
GLX_CONTEXT_MINOR_VERSION_ARB, OGL_MINOR_VERSION,
0, 0
};
// clang-format on
m_ctx = glXCreateContextAttribsARB(window.display(), fb_configs[0], NULL, True, ctx_attr);
if (!m_ctx) {
fprintf(stderr, "Failed to create GLX Context\n");
return false;
}
m_should_destroy = true;
return true;
}
bool make_current(const TestWindow& window) {
if (glXMakeCurrent(window.display(), window.xid(), m_ctx) != True) {
fprintf(stderr, "glXMakeCurrent() Failed\n");
return false;
}
return true;
}
void swap_buffers(const TestWindow& window) { glXSwapBuffers(window.display(), window.xid()); }
static void* get_proc_address(const char* name) {
return reinterpret_cast<void*>(glXGetProcAddress((const GLubyte*)name));
}
void destroy(const TestWindow& window) {
glXDestroyContext(window.display(), m_ctx);
m_should_destroy = false;
}
bool should_destroy() const { return m_should_destroy; }
private:
GLXContext m_ctx;
bool m_should_destroy = false;
};
int main() {
// 1. Prepare Window and OpenGL Context
// In normal design, TestWindow should have its GLContext within itself.
// But, in order to fit your needs, I separated these explicitly.
TestWindowConfig config{.width = WINDOW_WIDTH, .height = WINDOW_HEIGHT};
TestWindow window{config};
TestGLContext glctx{};
if (!window.create()) {
return 1;
}
if (!glctx.create(window) || !glctx.make_current(window)) {
if (glctx.should_destroy()) {
glctx.destroy(window);
}
return 1;
}
// 2. Load OpenGL functions
// In normal cases, you are always recommended to use loader libraries like glad.
// In this example, I omited the loading part.
//
// if (!gladLoadGLLoader((GLADloadproc)glctx.get_proc_address)) {
// fprintf(stderr, "Failed to load OpenGL functions\n");
// return 1;
// }
// 3. Show the window and call OpenGL APIs
// As above, there are various problems in this implentation for real use.
window.show();
double last_time = get_time();
while (true) {
if (!window.poll_events() || window.should_close()) {
break;
}
auto delta_ms = get_time() - last_time;
if (auto diff = (1000.0 / FPS) - delta_ms; diff > 0) {
std::this_thread::sleep_for(std::chrono::milliseconds((long)diff));
continue;
}
// fprintf(stderr, "delta: %f\n", delta_ms);
glViewport(0, 0, config.width, config.height);
glClearColor(0.0f, 0.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glctx.swap_buffers(window);
last_time = get_time();
}
glctx.destroy(window);
return 0;
}
I've been struggling with a resource leak seemingly caused by NVIDIA's h.264 encoder MFT. Each time a frame is submitted to the encoder, the reference count of my D3D device is incremented by 1, and this reference is not given up even after shutting down the MFT. A bunch of threads are leaked as well.
I'm almost ready to bring this up with NVIDIA, but I'd like to first make sure there's nothing obvious I have missed. Please see my implementation below - I've tried to keep it as concise and clear as possible.
Arguments for why this might be a problem with NVIDIA's encoder:
This only happens with NVIDIA's encoder. No leak is observed when running on e.g. Intel's QuickSync.
Arguments for why this might be a problem in my code:
I've tried using a SinkWriter to write DXGI surfaces to a file in a similar fashion, and here the leak is not present. Unfortunately I don't have access to the source code of SinkWriter. I would be very happy if anyone could point me to some working sample code that I could compare against.
#pragma comment(lib, "D3D11.lib")
#pragma comment(lib, "mfplat.lib")
#pragma comment(lib, "mf.lib")
#pragma comment(lib, "evr.lib")
#pragma comment(lib, "mfuuid.lib")
#pragma comment(lib, "Winmm.lib")
// std
#include <iostream>
#include <string>
// Windows
#include <windows.h>
#include <atlbase.h>
// DirectX
#include <d3d11.h>
// Media Foundation
#include <mfapi.h>
#include <mfplay.h>
#include <mfreadwrite.h>
#include <mferror.h>
#include <Codecapi.h>
// Error handling
#define CHECK(x) if (!(x)) { printf("%s(%d) %s was false\n", __FILE__, __LINE__, #x); throw std::exception(); }
#define CHECK_HR(x) { HRESULT hr_ = (x); if (FAILED(hr_)) { printf("%s(%d) %s failed with 0x%x\n", __FILE__, __LINE__, #x, hr_); throw std::exception(); } }
// Constants
constexpr UINT ENCODE_WIDTH = 1920;
constexpr UINT ENCODE_HEIGHT = 1080;
constexpr UINT ENCODE_FRAMES = 120;
void runEncode();
int main()
{
CHECK_HR(CoInitializeEx(NULL, COINIT_APARTMENTTHREADED));
CHECK_HR(MFStartup(MF_VERSION));
for (;;)
{
runEncode();
if (getchar() == 'q')
break;
}
CHECK_HR(MFShutdown());
return 0;
}
void runEncode()
{
CComPtr<ID3D11Device> device;
CComPtr<ID3D11DeviceContext> context;
CComPtr<IMFDXGIDeviceManager> deviceManager;
CComPtr<IMFVideoSampleAllocatorEx> allocator;
CComPtr<IMFTransform> transform;
CComPtr<IMFAttributes> transformAttrs;
CComQIPtr<IMFMediaEventGenerator> eventGen;
DWORD inputStreamID;
DWORD outputStreamID;
// ------------------------------------------------------------------------
// Initialize D3D11
// ------------------------------------------------------------------------
CHECK_HR(D3D11CreateDevice(NULL, D3D_DRIVER_TYPE_HARDWARE, NULL, D3D11_CREATE_DEVICE_VIDEO_SUPPORT | D3D11_CREATE_DEVICE_DEBUG, NULL, 0, D3D11_SDK_VERSION, &device, NULL, &context));
{
// Probably not necessary in this application, but maybe the MFT requires it?
CComQIPtr<ID3D10Multithread> mt(device);
CHECK(mt);
mt->SetMultithreadProtected(TRUE);
}
// Create device manager
UINT resetToken;
CHECK_HR(MFCreateDXGIDeviceManager(&resetToken, &deviceManager));
CHECK_HR(deviceManager->ResetDevice(device, resetToken));
// ------------------------------------------------------------------------
// Initialize hardware encoder MFT
// ------------------------------------------------------------------------
{
// Find the encoder
CComHeapPtr<IMFActivate*> activateRaw;
UINT32 activateCount = 0;
// Input & output types
MFT_REGISTER_TYPE_INFO inInfo = { MFMediaType_Video, MFVideoFormat_NV12 };
MFT_REGISTER_TYPE_INFO outInfo = { MFMediaType_Video, MFVideoFormat_H264 };
// Query for the adapter LUID to get a matching encoder for the device.
CComQIPtr<IDXGIDevice> dxgiDevice(device);
CHECK(dxgiDevice);
CComPtr<IDXGIAdapter> adapter;
CHECK_HR(dxgiDevice->GetAdapter(&adapter));
DXGI_ADAPTER_DESC adapterDesc;
CHECK_HR(adapter->GetDesc(&adapterDesc));
CComPtr<IMFAttributes> enumAttrs;
CHECK_HR(MFCreateAttributes(&enumAttrs, 1));
CHECK_HR(enumAttrs->SetBlob(MFT_ENUM_ADAPTER_LUID, (BYTE*)&adapterDesc.AdapterLuid, sizeof(LUID)));
CHECK_HR(MFTEnum2(MFT_CATEGORY_VIDEO_ENCODER, MFT_ENUM_FLAG_HARDWARE | MFT_ENUM_FLAG_SORTANDFILTER, &inInfo, &outInfo, enumAttrs, &activateRaw, &activateCount));
CHECK(activateCount != 0);
// Choose the first returned encoder
CComPtr<IMFActivate> activate = activateRaw[0];
// Memory management
for (UINT32 i = 0; i < activateCount; i++)
activateRaw[i]->Release();
// Activate
CHECK_HR(activate->ActivateObject(IID_PPV_ARGS(&transform)));
// Get attributes
CHECK_HR(transform->GetAttributes(&transformAttrs));
}
// ------------------------------------------------------------------------
// Query encoder name (not necessary, but nice) and unlock for async use
// ------------------------------------------------------------------------
{
UINT32 nameLength = 0;
std::wstring name;
CHECK_HR(transformAttrs->GetStringLength(MFT_FRIENDLY_NAME_Attribute, &nameLength));
// IMFAttributes::GetString returns a null-terminated wide string
name.resize((size_t)nameLength + 1);
CHECK_HR(transformAttrs->GetString(MFT_FRIENDLY_NAME_Attribute, &name[0], (UINT32)name.size(), &nameLength));
name.resize(nameLength);
printf("Using %ls\n", name.c_str());
// Unlock the transform for async use and get event generator
CHECK_HR(transformAttrs->SetUINT32(MF_TRANSFORM_ASYNC_UNLOCK, TRUE));
CHECK(eventGen = transform);
}
// Get stream IDs (expect 1 input and 1 output stream)
{
HRESULT hr = transform->GetStreamIDs(1, &inputStreamID, 1, &outputStreamID);
if (hr == E_NOTIMPL)
{
inputStreamID = 0;
outputStreamID = 0;
hr = S_OK;
}
CHECK_HR(hr);
}
// ------------------------------------------------------------------------
// Configure hardware encoder MFT
// ------------------------------------------------------------------------
// Set D3D manager
CHECK_HR(transform->ProcessMessage(MFT_MESSAGE_SET_D3D_MANAGER, reinterpret_cast<ULONG_PTR>(deviceManager.p)));
// Set output type
CComPtr<IMFMediaType> outputType;
CHECK_HR(MFCreateMediaType(&outputType));
CHECK_HR(outputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video));
CHECK_HR(outputType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264));
CHECK_HR(outputType->SetUINT32(MF_MT_AVG_BITRATE, 30000000));
CHECK_HR(MFSetAttributeSize(outputType, MF_MT_FRAME_SIZE, ENCODE_WIDTH, ENCODE_HEIGHT));
CHECK_HR(MFSetAttributeRatio(outputType, MF_MT_FRAME_RATE, 60, 1));
CHECK_HR(outputType->SetUINT32(MF_MT_INTERLACE_MODE, 2));
CHECK_HR(outputType->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE));
CHECK_HR(transform->SetOutputType(outputStreamID, outputType, 0));
// Set input type
CComPtr<IMFMediaType> inputType;
CHECK_HR(transform->GetInputAvailableType(inputStreamID, 0, &inputType));
CHECK_HR(inputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video));
CHECK_HR(inputType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_NV12));
CHECK_HR(MFSetAttributeSize(inputType, MF_MT_FRAME_SIZE, ENCODE_WIDTH, ENCODE_HEIGHT));
CHECK_HR(MFSetAttributeRatio(inputType, MF_MT_FRAME_RATE, 60, 1));
CHECK_HR(transform->SetInputType(inputStreamID, inputType, 0));
// ------------------------------------------------------------------------
// Create sample allocator
// ------------------------------------------------------------------------
{
MFCreateVideoSampleAllocatorEx(IID_PPV_ARGS(&allocator));
CHECK(allocator);
CComPtr<IMFAttributes> allocAttrs;
MFCreateAttributes(&allocAttrs, 2);
CHECK_HR(allocAttrs->SetUINT32(MF_SA_D3D11_BINDFLAGS, D3D11_BIND_RENDER_TARGET));
CHECK_HR(allocAttrs->SetUINT32(MF_SA_D3D11_USAGE, D3D11_USAGE_DEFAULT));
CHECK_HR(allocator->SetDirectXManager(deviceManager));
CHECK_HR(allocator->InitializeSampleAllocatorEx(1, 2, allocAttrs, inputType));
}
// ------------------------------------------------------------------------
// Start encoding
// ------------------------------------------------------------------------
CHECK_HR(transform->ProcessMessage(MFT_MESSAGE_COMMAND_FLUSH, NULL));
CHECK_HR(transform->ProcessMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, NULL));
CHECK_HR(transform->ProcessMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, NULL));
// Encode loop
for (int i = 0; i < ENCODE_FRAMES; i++)
{
// Get next event
CComPtr<IMFMediaEvent> event;
CHECK_HR(eventGen->GetEvent(0, &event));
MediaEventType eventType;
CHECK_HR(event->GetType(&eventType));
switch (eventType)
{
case METransformNeedInput:
{
CComPtr<IMFSample> sample;
CHECK_HR(allocator->AllocateSample(&sample));
CHECK_HR(transform->ProcessInput(inputStreamID, sample, 0));
// Dereferencing the device once after feeding each frame "fixes" the leak.
//device.p->Release();
break;
}
case METransformHaveOutput:
{
DWORD status;
MFT_OUTPUT_DATA_BUFFER outputBuffer = {};
outputBuffer.dwStreamID = outputStreamID;
CHECK_HR(transform->ProcessOutput(0, 1, &outputBuffer, &status));
DWORD bufCount;
DWORD bufLength;
CHECK_HR(outputBuffer.pSample->GetBufferCount(&bufCount));
CComPtr<IMFMediaBuffer> outBuffer;
CHECK_HR(outputBuffer.pSample->GetBufferByIndex(0, &outBuffer));
CHECK_HR(outBuffer->GetCurrentLength(&bufLength));
printf("METransformHaveOutput buffers=%d, bytes=%d\n", bufCount, bufLength);
// Release the sample as it is not processed further.
if (outputBuffer.pSample)
outputBuffer.pSample->Release();
if (outputBuffer.pEvents)
outputBuffer.pEvents->Release();
break;
}
}
}
// ------------------------------------------------------------------------
// Finish encoding
// ------------------------------------------------------------------------
CHECK_HR(transform->ProcessMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM, NULL));
CHECK_HR(transform->ProcessMessage(MFT_MESSAGE_NOTIFY_END_STREAMING, NULL));
CHECK_HR(transform->ProcessMessage(MFT_MESSAGE_COMMAND_DRAIN, NULL));
// Shutdown
printf("Finished encoding\n");
// I've tried all kinds of things...
//CHECK_HR(transform->ProcessMessage(MFT_MESSAGE_SET_D3D_MANAGER, reinterpret_cast<ULONG_PTR>(nullptr)));
//transform->SetInputType(inputStreamID, NULL, 0);
//transform->SetOutputType(outputStreamID, NULL, 0);
//transform->DeleteInputStream(inputStreamID);
//deviceManager->ResetDevice(NULL, resetToken);
CHECK_HR(MFShutdownObject(transform));
}
I think the answer is “yes”.
I saw the problem before: Is it possible to shut down a D3D device?
To workaround, I stopped re-creating D3D devices. Instead I’m using a global CAtlMap collection. The keys are uint64_t containing LUID of the GPU from DXGI_ADAPTER_DESC::AdapterLuid field. The values are structures with 2 fields, CComPtr<ID3D11Device> and CComPtr<IMFDXGIDeviceManager>
I have created an MFC dialog based application to study tab control. In a tab control it is possible to set application specific data to each tab.
I am trying to understand how to set/retrieve the data for individual tabs of the tab control.
Here is a sample application I am creating. Each tab of the control is supposed to store some GPU info.
As I understand, there are 3 steps to add application specific data.
Create a user defined structure, whose 1st member should be of type TCITEMHEADER.
struct GPU {
std::wstring name;
int busid;
};
struct tabData {
TCITEMHEADER tabItemHeader;
GPU gpu;
};
Tell the tab control about the extra bytes, the user defined structure is going to take. This I am doing in DoDataExchange().
int extraBytes = sizeof(tabData) - sizeof(TCITEMHEADER);
auto status = tabCtrl1.SetItemExtra(extraBytes);
Set user defined data while adding tabs.
static int tabCtr = 0;
tabData td;
td.tabItemHeader.pszText = _T("TabX");
td.tabItemHeader.mask = TCIF_TEXT;
td.gpu.name = L"AMD NVIDIA";
td.gpu.busid = 101;
TabCtrl_InsertItem(tabCtrl1.GetSafeHwnd(), tabCtr, &td);
Now to get the data, we simply have to call TabCtrl_GetItem().
tabData td2;
td2.tabItemHeader.pszText = new TCHAR[20];
td2.tabItemHeader.cchTextMax = 20;
td2.tabItemHeader.mask = TCIF_TEXT;
td2.gpu.busid = 0;
TabCtrl_GetItem(tabCtrl1.GetSafeHwnd(), 0, &td2);
But as we can see in the following image. I do get the tab text (pszText member - data Item 1 in image), but not the extra data that I had associated with it previously (Data Items 2 and 3 in image).
Which step am I missing?
Why is the structure associated with application defined data not getting populated?
Additional Info
Here is the complete code for the application.
CPP File:
// tabCtrlStackOverflowDlg.cpp : implementation file
//
#include "stdafx.h"
#include "tabCtrlStackOverflow.h"
#include "tabCtrlStackOverflowDlg.h"
#include "afxdialogex.h"
#include <string>
#ifdef _DEBUG
#define new DEBUG_NEW
#endif
struct GPU {
std::wstring name;
int busid;
};
struct tabData
{
TCITEMHEADER tabItemHeader;
GPU gpu;
};
CtabCtrlStackOverflowDlg::CtabCtrlStackOverflowDlg(CWnd* pParent /*=NULL*/)
: CDialogEx(IDD_TABCTRLSTACKOVERFLOW_DIALOG, pParent)
{
m_hIcon = AfxGetApp()->LoadIcon(IDR_MAINFRAME);
}
void CtabCtrlStackOverflowDlg::DoDataExchange(CDataExchange* pDX)
{
CDialogEx::DoDataExchange(pDX);
DDX_Control(pDX, IDC_TAB1, tabCtrl1);
int extraBytes = sizeof(tabData) - sizeof(TCITEMHEADER);
auto status = tabCtrl1.SetItemExtra(extraBytes);
wchar_t *t = status ? L"SetItemExtra() success" : L"SetItemExtra() fail";
GetDlgItem(IDC_STATUSTEXT)->SetWindowTextW(t);
}
BEGIN_MESSAGE_MAP(CtabCtrlStackOverflowDlg, CDialogEx)
ON_WM_PAINT()
ON_WM_QUERYDRAGICON()
ON_BN_CLICKED(IDADDTAB, &CtabCtrlStackOverflowDlg::OnBnClickedAddtab)
ON_BN_CLICKED(IDC_GETITEM0, &CtabCtrlStackOverflowDlg::OnBnClickedGetitem0)
ON_BN_CLICKED(IDCLOSE, &CtabCtrlStackOverflowDlg::OnBnClickedClose)
END_MESSAGE_MAP()
// CtabCtrlStackOverflowDlg message handlers
BOOL CtabCtrlStackOverflowDlg::OnInitDialog()
{
CDialogEx::OnInitDialog();
// Set the icon for this dialog. The framework does this automatically
// when the application's main window is not a dialog
SetIcon(m_hIcon, TRUE); // Set big icon
SetIcon(m_hIcon, FALSE); // Set small icon
// TODO: Add extra initialization here
return TRUE; // return TRUE unless you set the focus to a control
}
// If you add a minimize button to your dialog, you will need the code below
// to draw the icon. For MFC applications using the document/view model,
// this is automatically done for you by the framework.
void CtabCtrlStackOverflowDlg::OnPaint()
{
if (IsIconic())
{
CPaintDC dc(this); // device context for painting
SendMessage(WM_ICONERASEBKGND, reinterpret_cast<WPARAM>(dc.GetSafeHdc()), 0);
// Center icon in client rectangle
int cxIcon = GetSystemMetrics(SM_CXICON);
int cyIcon = GetSystemMetrics(SM_CYICON);
CRect rect;
GetClientRect(&rect);
int x = (rect.Width() - cxIcon + 1) / 2;
int y = (rect.Height() - cyIcon + 1) / 2;
// Draw the icon
dc.DrawIcon(x, y, m_hIcon);
}
else
{
CDialogEx::OnPaint();
}
}
// The system calls this function to obtain the cursor to display while the user drags
// the minimized window.
HCURSOR CtabCtrlStackOverflowDlg::OnQueryDragIcon()
{
return static_cast<HCURSOR>(m_hIcon);
}
void CtabCtrlStackOverflowDlg::OnBnClickedAddtab()
{
static int tabCtr = 0;
tabData td;
td.tabItemHeader.pszText = _T("TabX");
td.tabItemHeader.mask = TCIF_TEXT;
td.gpu.name = L"AMD NVIDIA";
td.gpu.busid = 101;
int status = TabCtrl_InsertItem(tabCtrl1.GetSafeHwnd(), tabCtr, &td);
wchar_t *t = L"";
if (status == -1)
{
t = L"TabCtrl_InsertItem() Fail";
}
else
{
t = L"TabCtrl_InsertItem() success";
}
GetDlgItem(IDC_STATUSTEXT)->SetWindowTextW(t);
tabCtr++;
}
void CtabCtrlStackOverflowDlg::OnBnClickedGetitem0()
{
tabData td2;
td2.tabItemHeader.pszText = new TCHAR[20];
td2.tabItemHeader.cchTextMax = 20;
td2.tabItemHeader.mask = TCIF_TEXT;
td2.gpu.busid = 0;
if (TabCtrl_GetItem(tabCtrl1.GetSafeHwnd(), 0, &td2) == TRUE)
{
std::wstring text = td2.tabItemHeader.pszText;
text += std::wstring(L" ") + td2.gpu.name;
GetDlgItem(IDC_STATUSTEXT)->SetWindowTextW(text.c_str());
}
else
{
GetDlgItem(IDC_STATUSTEXT)->SetWindowTextW(_T("TabCtrl_GetItem()
error"));
}
}
void CtabCtrlStackOverflowDlg::OnBnClickedClose()
{
CDialog::OnCancel();
}
Header File:
// tabCtrlStackOverflowDlg.h : header file
//
#pragma once
#include "afxcmn.h"
// CtabCtrlStackOverflowDlg dialog
class CtabCtrlStackOverflowDlg : public CDialogEx
{
// Construction
public:
CtabCtrlStackOverflowDlg(CWnd* pParent = NULL); // standard constructor
// Dialog Data
#ifdef AFX_DESIGN_TIME
enum { IDD = IDD_TABCTRLSTACKOVERFLOW_DIALOG };
#endif
protected:
virtual void DoDataExchange(CDataExchange* pDX); // DDX/DDV support
// Implementation
protected:
HICON m_hIcon;
// Generated message map functions
virtual BOOL OnInitDialog();
afx_msg void OnPaint();
afx_msg HCURSOR OnQueryDragIcon();
DECLARE_MESSAGE_MAP()
public:
CTabCtrl tabCtrl1;
afx_msg void OnBnClickedAddtab();
afx_msg void OnBnClickedGetitem0();
afx_msg void OnBnClickedClose();
};
Solution Summary
From Barmak Shemirani's answer here are the 3 reasons my code wasn't working. Must read his answer for better understanding.
TCIF_PARAM must be set in mask, while doing TCM_INSERTITEM, and TCM_GETITEM.
I was using local variables created on stack (tabData td2; object). The reference to this variable was becoming invalid as soon as it was going out of scope.
Using std::wstring in the structure being used for TCM_INSERTITEM. It is better to use data types whose size can be accurately be determined (like plain old data types.).
As Barmak Shemirani points out in comments, the documentation for TCITEMHEADER is scarce. His answer provides a thorough explanation.
Conflict with documentation
Documentation for TCITEMHEADER does not mention using TCIF_PARAM flag. Maybe that's a mistake in documention!
It's better if SetItemExtra is moved to OnInitDialog after default procedure is called. This ensures SetItemExtra is called only once when control is empty.
The structure GPU has a std::wstring member whose data size is unknown at the start. TCM_INSERTITEM cannot make a copy of this data unless you have a simple POD structure.
To store the data in the tab, replace std::wstring with wchar_t name[100] so that data is a simple POD structure with fixed size.
struct GPU
{
//std::wstring name;
wchar_t name[100];
int busid;
};
struct tabData
{
TCITEMHEADER tabItemHeader;
GPU gpu;
};
void CMyDialog::OnBnClickedAddtab()
{
int index = tab.GetItemCount();
wchar_t tabname[50];
wsprintf(tabname, L"Tab %d", index);
tabData sdata = { 0 };
sdata.tabItemHeader.mask = TCIF_TEXT | TCIF_PARAM;
sdata.tabItemHeader.pszText = tabname;
wsprintf(sdata.gpu.name, L"AMD NVIDIA %d", index);
sdata.gpu.busid = 101;
tab.SendMessage(TCM_INSERTITEM, index, (LPARAM)(TCITEMHEADER*)(&sdata));
}
void CMyDialog::OnBnClickedGetitem0()
{
int index = tab.GetCurSel();
tabData data = { 0 };
wchar_t buf[20] = { 0 };
data.tabItemHeader.pszText = buf;
data.tabItemHeader.cchTextMax = sizeof(buf)/sizeof(wchar_t);
data.tabItemHeader.mask = TCIF_TEXT | TCIF_PARAM;
if(tab.SendMessage(TCM_GETITEM, index, (LPARAM)(TCITEMHEADER*)(&data)))
{
CString str;
str.Format(L"%d %s", data.gpu.busid, data.gpu.name);
GetDlgItem(IDC_STATIC1)->SetWindowText(str);
}
}
Alternative method:
If std::wstring name; cannot be replaced with wchar_t buffer, we have to define a separate permanent data, for example using std::vector. Then we use the lParam value in TCITEM to point to the vector.
This method only needs the standard 4 bytes of lParam, it doesn't require TCITEMHEADER and SetItemExtra. You can even define std::vector<GPU>. Example:
std::vector<tabData> m_data;
BOOL CMyDialog::OnInitDialog()
{
CDialogEx::OnInitDialog();
tabData data;
data.gpu.name = L"AMD NVIDIA1";
data.gpu.busid = 101;
m_data.push_back(data);
data.gpu.name = L"AMD NVIDIA2";
data.gpu.busid = 102;
m_data.push_back(data);
return TRUE;
}
void CMyDialog::OnBnClickedAddtab()
{
static int tabCtr = 0;
if(tabCtr >= (int)m_data.size())
return;
TCITEM item = { 0 };
item.pszText = _T("TabX");
item.mask = TCIF_TEXT | TCIF_PARAM;
item.lParam = (LPARAM)&m_data[tabCtr];
tab.InsertItem(tabCtr, &item);
tabCtr++;
}
void CMyDialog::OnBnClickedGetitem0()
{
TCITEM item = { 0 };
item.mask = TCIF_TEXT | TCIF_PARAM;
if(tab.GetItem(tab.GetCurSel(), &item) == TRUE)
{
tabData* ptr = (tabData*)item.lParam;
CString str;
str.Format(L"%d %s", ptr->gpu.busid, ptr->gpu.name.c_str());
GetDlgItem(IDC_STATIC1)->SetWindowText(str);
}
}
I am trying to output a string from the PIC's USART and have it display on Tera Term. I am using the:
PIC18F4331
Sparkfun Bluesmirf RN-42
MPLAB v8.85
Tera Term
I've been working at this code for a couple of days and I am not seeing a single response. A couple of things that I think may be causing the issue is the baud rate and/or not having an interrupt routine. But is there a need for an interrupt if I am only attempting to transmit? Please, can someone guide me? Also, when using printf, I am seeing a response through the bluetooth but in strange symbolic form. For example, þþþ.
The code is a modification of one found online.
// Libraries
#include <p18f4331.h>
#include <stdio.h>
// Configuations
#pragma config OSC = XT
#pragma config WDTEN = OFF
#pragma config PWRTEN = OFF
#pragma config FCMEN = OFF
#pragma config IESO = OFF
#pragma config BOREN = ON
#pragma config BORV = 27
#pragma config WDPS = 128
#pragma config T1OSCMX = ON
#pragma config PWMPIN = ON
#pragma config MCLRE = ON
#pragma config LVP = OFF
#pragma config STVREN = OFF
#pragma config PWM4MX = RD5
// Definitions
#define _XTAL_FREQ 4000000
#define BAUDRATE 9600
void EUSART(void)
{
TRISC = 0b10000000;
SPBRG = 25;
TXSTAbits.CSRC = 0; // Baud Rate Generated Externally
TXSTAbits.TX9 = 0; // 8-Bit Transmission
TXSTAbits.TXEN = 1; // Transmit Enabled
TXSTAbits.SYNC = 0; // Asynchronous Mode
TXSTAbits.BRGH = 1; // High Baud Rate
TXSTAbits.TRMT = 0; // Transmit Shift Register When TSR Is Full
RCSTAbits.SPEN = 1; // Serial Port Enabled
RCSTAbits.RX9 = 0; // 8-Bit Reception
RCSTAbits.CREN = 1; // Enables Receive
}
void SendByteSerially(unsigned char Byte) // Writes a character to the serial port
{
while(!PIR1bits.TXIF) ; // wait for previous transmission to finish
TXREG = Byte;
}
unsigned char ReceiveByteSerially(void) // Reads a character from the serial port
{
while(!PIR1bits.RCIF) continue; // Wait for transmission to receive
return RCREG;
}
void SendStringSerially(const rom unsigned char* st)
{
while(*st) SendByteSerially(*st++);
}
void delayMS(unsigned int x)
{
unsigned char y;
for(;x > 0; x--) for(y=0; y< 82;y++);
}
void main(void)
{
unsigned char SerialData;
EUSART();
SendStringSerially("Hello World");
while(1)
{
SerialData = ReceiveByteSerially();
SendByteSerially(SerialData);
delayMS(1000);
}
}
You are using a PIC18, be sure that BRG16 equals 0 since you're using BRGH
BAUDCTL.BRG16 = 0;
because SPBRGH16 is the higher byte of SPBRG, and that may change the baudrate value of your USART.
Plus, be sure you're in the PIR1 bank. In MPLAB, that would be
banksel PIR1; //Not sure if there's an ending coma
My two functions to transmit via UART when properly initialized ( In MikroC ) :
void vTx232 (UC ucSend)
{
STATUS.RP0 = PIR1; //Sure we're in PIR1
while (PIR1.TXIF == 0);//While last TX not done
TXREG = ucSend; //Input param into TXREG
}
void vTxString(UC *ucpString)
{
while (*ucpString!= 0x00) //While string not at its end
{
vTx232(*ucpString); //Send string character
ucpString++; //Increm string pointer
}
}
I have previously used MiniZip (zlib wrapper) to unzip archives. MiniZip cannot be used for Metro applications as it uses deprecated APIs in "iowin32.c" -- CreateFile() and SetFilePointer().
I thought that would be an easy fix and created "iowinrt.c" with CreateFile() and SetFilePointer() replaced with CreateFile2() and SetFilePointerEx(). While this way I obtained a version of MiniZip that uses only approved Win8 APIs, it still turned out to be useless -- I forgot about sandboxing. If I pick a file using FileOpenPicker() and pass its path to my modified MiniZip I still cannot open it -- CreateFile2() will fail with "Access is denied." message.
So it appears that old C API for file access if now mostly useless; it is my understanding that in order to fix this I would need to reimplement my "iowinrt" in C++/CX using the new async file access. Are there any other options? I think I saw somewhere that WinRT does have compress/uncompress functionality but that it only works on individual files, not archives.
Additional requirements it that I need this to work in memory.
For a moment I thought I had a solution via .NET Framework 4.5:
I found this piece of info about how to create .NET classes that can be used from C++/CX:
http://social.msdn.microsoft.com/Forums/en-US/winappswithnativecode/thread/3ff383d0-0c9f-4a30-8987-ff2b23957f01
.NET Framework 4.5 contains ZipArchive and ZipArchiveEntry classes in System.IO.Compression:
http://msdn.microsoft.com/en-us/library/system.io.compression.ziparchive%28v=vs.110%29.aspx#Y0
http://msdn.microsoft.com/en-us/library/system.io.compression.ziparchiveentry%28v=vs.110%29.aspx#Y0
I thought I could create C# Metro Class Library with WinMD Output type exposing ZipArchive and ZipArchiveEntry then use that in my C++/CX project. However, even if it worked it would not work in-memory; it appears that ZipArchive and ZipArchiveEntry work only with files.
Got reading from archive working. Explanation and code below but really just a hack at this point, to see if it's possible at all. I just kept modifying things until I got something working; this is just an example of what works and by no means a production quality code (it's not re-entrant for start). There are undoubtedly many things that are bad/unnecessary/wtf so feel free to use comments to help with clean up.
As mentioned previously, it is no longer enough to pass path to the library -- unless file is in one of KnownFolders (documents, home, media, music, pictures, removable or videos) you end up with "access is denied" message. Instead, library must be able to accept StorageFile^, as returned from FileOpenPicker. At least I haven't found any other way to do it, maybe someone knows better?
MiniZip provides Windows filesystem access layer for zlib via iowin32.h/.c. This still works in desktop mode for old-style apps, but does not work for Metro apps as it uses deprecated APIs and relies on paths. To get MiniZip going on Windows 8, a complete rewrite of iowin32 is required.
To get things working again, first thing was to find a way to pass StorageFile^ all the way down to iowinrt (Windows 8 replacement for iowin32). Fortunately, that was not a problem as MiniZip provides two styles of open file functions -- ones that accept pointer to char, and the others accepting pointer to void. Since ^ is still just a pointer, casting StorageFile^ to void* and than back to StorageFile^ works fine.
Now that I was able to pass StorageFile^ to my new iowinrt, the next problem was how to make new async C++ file access API work with Zlib. In order to support very old C compilers, Zlib is written with old K&R style C. VisualStudio compiler will refuse to compile this as C++, it has to be compiled as C, and new iowinrt must be compiled as C++ of course -- keep that in mind when creating your project. Other things to note about VS project is that I did it as Visual C++ Windows Metro style Static Library although DLL should also work but then you must also define macro to export MiniZip API (I haven't tried this, don't know which macro you have to use). I think I also had to set "Consume Windows Runtime Extension" (/ZW), set "Not Using Precompiled Headers" and add _CRT_SECURE_NO_WARNINGS and _CRT_NONSTDC_NO_WARNINGS to Preprocessor Definitions.
As for iowinrt itself, I've split it in two files. One holds two sealed ref classes -- reader and writer objects; they accept StorageFile^. Reader implements Read, Tell, SeekFromBeginning, SeekFromCurrent and SeekFromEnd (the reason for 3 Seek methods is because ref sealed classes have to stick with RT types and that apparently excludes enums so I just took the easy route). Writer implements just Write at the moment, haven't used it yet.
This is FileReader code:
#include "pch.h"
#include "FileAccess.h" // FileReader and FileWriter
using namespace Concurrency;
using namespace Windows::Security::Cryptography;
using namespace CFileAccess;
FileReader::FileReader(StorageFile^ archive)
{
if (nullptr != archive)
{
create_task(archive->OpenReadAsync()).then([this](IRandomAccessStreamWithContentType^ archiveStream)
{
if (nullptr != archiveStream)
{
_readStream = archiveStream;
}
}).wait();
}
} // end of constructor
int32 FileReader::Read(WriteOnlyArray<byte>^ fileData)
{
int32 bytesRead = 0;
if ((nullptr != _readStream) && (fileData->Length > 0))
{
try
{
auto inputStreamReader = ref new DataReader(_readStream);
create_task(inputStreamReader->LoadAsync(fileData->Length)).then([&](task<unsigned int> dataRead)
{
try
{
bytesRead = dataRead.get();
if (bytesRead)
{
inputStreamReader->ReadBytes(fileData);
}
}
catch (Exception^ e)
{
bytesRead = -1;
}
inputStreamReader->DetachStream();
}).wait();
}
catch (Exception^ e)
{
bytesRead = -1;
}
}
return (bytesRead);
} // end of method Read()
int64 FileReader::Tell(void)
{
int64 ret = -1;
if (nullptr != _readStream)
{
ret = _readStream->Position;
}
return (ret);
} // end of method Tell()
int64 FileReader::SeekFromBeginning(uint64 offset)
{
int64 ret = -1;
if ((nullptr != _readStream) && (offset < _readStream->Size))
{
_readStream->Seek(offset);
ret = 0;
}
return (ret);
} // end of method SeekFromBeginning()
int64 FileReader::SeekFromCurrent(uint64 offset)
{
int64 ret = -1;
if ((nullptr != _readStream) && ((_readStream->Position + offset) < _readStream->Size))
{
_readStream->Seek(_readStream->Position + offset);
ret = 0;
}
return (ret);
} // end of method SeekFromCurrent()
int64 FileReader::SeekFromEnd(uint64 offset)
{
int64 ret = -1;
if ((nullptr != _readStream) && ((_readStream->Size - offset) >= 0))
{
_readStream->Seek(_readStream->Size - offset);
ret = 0;
}
return (ret);
} // end of method SeekFromEnd()
iowinrt sits between MiniZip and FileReader (and FileWriter). It's too long to give everything here but this should be sufficient to reconstruct the rest since it's mostly just more of the same with different function names, plus a bunch of fill_winRT_filefuncxxx() which are obvious:
#include "zlib.h"
#include "ioapi.h"
#include "iowinrt.h"
#include "FileAccess.h"
using namespace Windows::Security::Cryptography;
using namespace Platform;
using namespace CFileAccess;
static FileReader^ g_fileReader = nullptr;
static FileWriter^ g_fileWriter = nullptr;
static StorageFile^ g_storageFile = nullptr;
[...]
static voidpf winRT_translate_open_mode(int mode)
{
if (nullptr != g_storageFile)
{
if ((mode & ZLIB_FILEFUNC_MODE_READWRITEFILTER)==ZLIB_FILEFUNC_MODE_READ)
{
g_fileWriter = nullptr;
g_fileReader = ref new FileReader(g_storageFile);
}
else if (mode & ZLIB_FILEFUNC_MODE_EXISTING)
{
g_fileReader = nullptr;
g_fileWriter = ref new FileWriter(g_storageFile);
}
else if (mode & ZLIB_FILEFUNC_MODE_CREATE)
{
g_fileReader = nullptr;
g_fileWriter = ref new FileWriter(g_storageFile);
}
}
return (nullptr != g_fileReader ? reinterpret_cast<voidpf>(g_fileReader) : reinterpret_cast<voidpf>(g_fileWriter));
}
voidpf ZCALLBACK winRT_open64_file_func (voidpf opaque,const void* storageFile,int mode)
{
g_storageFile = reinterpret_cast<StorageFile^>(const_cast<void*>(storageFile));
return (winRT_translate_open_mode(mode));
}
[...]
Long ZCALLBACK winRT_read_file_func (voidpf opaque, voidpf stream, void* buf,uLong size)
{
uLong bytesRead = 0;
if (nullptr != g_fileReader)
{
auto fileData = ref new Platform::Array<byte>(size);
bytesRead = g_fileReader->Read(fileData);
memcpy(buf, fileData->Data, fileData->Length);
}
return (bytesRead);
}
uLong ZCALLBACK winRT_write_file_func (voidpf opaque,voidpf stream,const void* buf,uLong size)
{
uLong bytesWritten = 0;
if (nullptr != g_fileWriter)
{
auto bytes = ref new Array<uint8>(reinterpret_cast<uint8*>(const_cast<void*>(buf)), size);
IBuffer ^writeBuffer = CryptographicBuffer::CreateFromByteArray(bytes);
bytesWritten = g_fileWriter->Write(writeBuffer);
}
return (bytesWritten);
}
long ZCALLBACK winRT_tell_file_func (voidpf opaque,voidpf stream)
{
long long ret = 0;
if (nullptr != g_fileReader)
{
ret = g_fileReader->Tell();
}
return (static_cast<long>(ret));
}
ZPOS64_T ZCALLBACK winRT_tell64_file_func (voidpf opaque, voidpf stream)
{
ZPOS64_T ret = 0;
if (nullptr != g_fileReader)
{
ret = g_fileReader->Tell();
}
return (ret);
}
[...]
long ZCALLBACK winRT_seek64_file_func (voidpf opaque, voidpf stream,ZPOS64_T offset,int origin)
{
long long ret = -1;
if (nullptr != g_fileReader)
{
switch (origin)
{
case ZLIB_FILEFUNC_SEEK_CUR :
ret = g_fileReader->SeekFromCurrent(offset);
break;
case ZLIB_FILEFUNC_SEEK_END :
ret = g_fileReader->SeekFromEnd(offset);
break;
case ZLIB_FILEFUNC_SEEK_SET :
ret = g_fileReader->SeekFromBeginning(offset);
break;
default:
// should never happen!
ret = -1;
break;
}
}
return (static_cast<long>(ret));
}
int ZCALLBACK winRT_close_file_func (voidpf opaque, voidpf stream)
{
g_fileWriter = nullptr;
g_fileReader = nullptr;
return (0);
}
int ZCALLBACK winRT_error_file_func (voidpf opaque,voidpf stream)
{
/// #todo Get errors from FileAccess
return (0);
}
This is enough to get MiniZip going (at least for reading) but you have to take care how you call MiniZip functions -- since Metro is all about async and blocking UI thread will end up with exception, you must wrap access in tasks:
FileOpenPicker^ openPicker = ref new FileOpenPicker();
openPicker->ViewMode = PickerViewMode::List;
openPicker->SuggestedStartLocation = PickerLocationId::ComputerFolder;
openPicker->FileTypeFilter->Append(".zip");
task<IVectorView<StorageFile^>^>(openPicker->PickMultipleFilesAsync()).then([this](IVectorView<StorageFile^>^ files)
{
if (files->Size > 0)
{
std::for_each(begin(files), end(files), [this](StorageFile ^file)
{ // open selected zip archives
create_task([this, file]()
{
OpenArchive(file);
[...]
});
});
}
else
{
rootPage->NotifyUserBackgroundThread("No files were returned.", NotifyType::ErrorMessage);
}
});
[...]
bool OpenArchive(StorageFile^ archive)
{
bool isArchiveOpened = false;
if (nullptr != archive)
{ // open ZIP archive
zlib_filefunc64_def ffunc;
fill_winRT_filefunc64(&ffunc);
unzFile archiveObject = NULL;
create_task([this, &ffunc, archive]()
{
archiveObject = unzOpen2_64(reinterpret_cast<const void*>(archive), &ffunc);
}).wait();
if (NULL != archiveObject)
{
[...]