Direct3D Renders to wrong texture - visual-c++

In the code below I try render a sphere to a texture (renderTargetView), that texture is supposed to be rendered to the back buffer on a quad. I'm experimenting with it to use with glow later. The problem right now is that the sphere is rendered to the back buffer and is visible, it doesn't even matter if I comment out the lower half of the code, same result. I'm using this background color D3DXCOLOR(0.4f, 0.2f, 0.0f) here, but that is not what's on the screen, it's the color set with back buffer devcon->ClearRenderTargetView(backbuffer, D3DXCOLOR(0.0f, 0.2f, 0.4f, 1.0f)); that is showing.
// Set and clear render target
devcon->OMSetRenderTargets(1, &renderTargetView, zbuffer);
devcon->ClearRenderTargetView(renderTargetView, D3DXCOLOR(0.4f, 0.2f, 0.0f, 1.0f));
devcon->PSSetShaderResources(0, 1, &pTextureSun);
devcon->UpdateSubresource(pCBuffer, 0, 0, &cBuffer, 0, 0);
devcon->Draw(sizeOfVertexVector, 0);
// set the shader objects
/*
devcon->VSSetShader(pVS_glow, 0, 0);
devcon->PSSetShader(pPS_glow, 0, 0);
// set the render target as the back buffer
devcon->OMSetRenderTargets(1, &backbuffer, zbuffer);
devcon->IASetVertexBuffers(0, 1, &pVBufferQuad, &stride, &offset);
devcon->UpdateSubresource(pCBuffer, 0, 0, &cBuffer, 0, 0);
devcon->PSSetShaderResources(0, 1, &shaderResourceView);
devcon->Draw(6, 0);*/
Here is where the back and depth buffers set up (I have been trying with different formats such as DXGI_FORMAT_D24_UNORM_S8_UINT for depth buffer and others but no luck):
// create a struct to hold information about the swap chain
DXGI_SWAP_CHAIN_DESC scd;
// clear out the struct for use
ZeroMemory(&scd, sizeof(DXGI_SWAP_CHAIN_DESC));
// fill the swap chain description struct
scd.BufferCount = 1; // one back buffer
scd.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; // use 32-bit color
scd.BufferDesc.Width = SCREEN_WIDTH; // set the back buffer width
scd.BufferDesc.Height = SCREEN_HEIGHT; // set the back buffer height
scd.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT; // how swap chain is to be used
scd.OutputWindow = hWnd; // the window to be used
scd.SampleDesc.Count = 4; // how many multisamples
scd.Windowed = TRUE; // windowed/full-screen mode
scd.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH; // allow full-screen switching
// create a device, device context and swap chain using the information in the scd struct
D3D11CreateDeviceAndSwapChain(NULL,
D3D_DRIVER_TYPE_HARDWARE,
NULL,
NULL,
NULL,
NULL,
D3D11_SDK_VERSION,
&scd,
&swapchain,
&dev,
NULL,
&devcon);
// create the depth buffer texture
D3D11_TEXTURE2D_DESC texd;
ZeroMemory(&texd, sizeof(texd));
texd.Width = SCREEN_WIDTH;
texd.Height = SCREEN_HEIGHT;
texd.ArraySize = 1;
texd.MipLevels = 1;
texd.SampleDesc.Count = 4;
texd.Format = DXGI_FORMAT_D24_UNORM_S8_UINT;
texd.BindFlags = D3D11_BIND_DEPTH_STENCIL;
ID3D11Texture2D *pDepthBuffer;
dev->CreateTexture2D(&texd, NULL, &pDepthBuffer);
// create the depth buffer
D3D11_DEPTH_STENCIL_VIEW_DESC dsvd;
ZeroMemory(&dsvd, sizeof(dsvd));
dsvd.Format = DXGI_FORMAT_D24_UNORM_S8_UINT;
dsvd.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2DMS;
dev->CreateDepthStencilView(pDepthBuffer, &dsvd, &zbuffer);
pDepthBuffer->Release();
// get the address of the back buffer
ID3D11Texture2D *pBackBuffer;
swapchain->GetBuffer(0, __uuidof(ID3D11Texture2D), (LPVOID*)&pBackBuffer);
// use the back buffer address to create the render target
dev->CreateRenderTargetView(pBackBuffer, NULL, &backbuffer);
pBackBuffer->Release();
// set the render target as the back buffer
devcon->OMSetRenderTargets(1, &backbuffer, zbuffer);
// Set the viewport
D3D11_VIEWPORT viewport;
ZeroMemory(&viewport, sizeof(D3D11_VIEWPORT));
viewport.TopLeftX = 0; // set the left to 0
viewport.TopLeftY = 0; // set the top to 0
viewport.Width = SCREEN_WIDTH; // set the width to the window's width
viewport.Height = SCREEN_HEIGHT; // set the height to the window's height
viewport.MinDepth = 0; // the closest an object can be on the depth buffer is 0.0
viewport.MaxDepth = 1; // the farthest an object can be on the depth buffer is 1.0
devcon->RSSetViewports(1, &viewport);
And the render-to-texture:
D3D11_TEXTURE2D_DESC textureDesc;
D3D11_RENDER_TARGET_VIEW_DESC renderTargetViewDesc;
D3D11_SHADER_RESOURCE_VIEW_DESC shaderResourceViewDesc;
// Initialize the render target texture description.
ZeroMemory(&textureDesc, sizeof(textureDesc));
// Setup the render target texture description.
textureDesc.Width = SCREEN_WIDTH;
textureDesc.Height = SCREEN_HEIGHT;
textureDesc.MipLevels = 1;
textureDesc.ArraySize = 1;
textureDesc.Format = DXGI_FORMAT_R32G32B32A32_FLOAT;
textureDesc.SampleDesc.Count = 1;
textureDesc.Usage = D3D11_USAGE_DEFAULT;
textureDesc.BindFlags = D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE;
textureDesc.CPUAccessFlags = 0;
textureDesc.MiscFlags = 0;
dev->CreateTexture2D(&textureDesc, NULL, &renderTargetTexture);
// Setup the description of the render target view.
renderTargetViewDesc.Format = textureDesc.Format;
renderTargetViewDesc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
renderTargetViewDesc.Texture2D.MipSlice = 0;
dev->CreateRenderTargetView(renderTargetTexture, &renderTargetViewDesc, &renderTargetView);
// Setup the description of the shader resource view.
shaderResourceViewDesc.Format = textureDesc.Format;
shaderResourceViewDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
shaderResourceViewDesc.Texture2D.MostDetailedMip = 0;
shaderResourceViewDesc.Texture2D.MipLevels = 1;
// Create the shader resource view.
dev->CreateShaderResourceView(renderTargetTexture, &shaderResourceViewDesc, &shaderResourceView);
Any ideas?
UPDATE
This code is executed before rendering to texture, if I comment out it..
// set the render target as the back buffer
devcon->OMSetRenderTargets(1, &backbuffer, zbuffer);
cBuffer.doBlur = 0;
devcon->PSSetShaderResources(0, 1, &pTextureEarth);
devcon->UpdateSubresource(pCBuffer, 0, 0, &cBuffer, 0, 0);
devcon->Draw(sizeOfVertexVector, 0);
...this now renders to texture, currently it just shows the background color of the texture, not the sphere.
// Set and clear render target
devcon->OMSetRenderTargets(1, &renderTargetView, zbuffer);
devcon->ClearRenderTargetView(renderTargetView, D3DXCOLOR(0.4f, 0.2f, 0.0f, 0.3f));
devcon->PSSetShaderResources(0, 1, &pTextureSun);
devcon->UpdateSubresource(pCBuffer, 0, 0, &cBuffer, 0, 0);
devcon->Draw(sizeOfVertexVector, 0);
// set the shader objects
devcon->IASetInputLayout(pLayoutGlow);
devcon->VSSetShader(pVS_glow, 0, 0);
devcon->PSSetShader(pPS_glow, 0, 0);
// set the render target as the back buffer
devcon->OMSetRenderTargets(1, &backbuffer, zbuffer);
devcon->IASetVertexBuffers(0, 1, &pVBufferQuad, &stride, &offset);
devcon->UpdateSubresource(pCBuffer, 0, 0, &cBuffer, 0, 0);
devcon->PSSetShaderResources(0, 1, &shaderResourceView);
devcon->Draw(6, 0);

The problem was that blend wasn't enabled.

I had problems with rendet to texture before because I bound a shader resource view to the same texture as what was the render target, which is forbidden. If you use Visual Studio 2012, you should try checking your code in debug mode. Aren't you making the same mistake as me?

Related

Why is my stretchblt images losing colours?

I am having some difficulties in correctly populating a CListCtrl with thumbnails of monitor displays.
On the right of my CDialog I have a static control and I render the image on a white canvas like this:
void CCenterCursorOnScreenDlg::OnDrawItem(int nIDCtl, LPDRAWITEMSTRUCT lpDrawItemStruct)
{
if (nIDCtl == IDC_STATIC_MONITOR && !m_imgPreview.IsNull())
{
// Set the mode
SetStretchBltMode(lpDrawItemStruct->hDC, HALFTONE);
// Wipe the canvas
FillRect(lpDrawItemStruct->hDC, &lpDrawItemStruct->rcItem, static_cast<HBRUSH>(GetStockObject(WHITE_BRUSH)));
// Get canvas rectangle
const CRect rectCanvas(lpDrawItemStruct->rcItem);
// Calculate ratio factors
const float nRatioImage = m_imgPreview.GetWidth() / static_cast<float>(m_imgPreview.GetHeight());
const float nRatioCanvas = rectCanvas.Width() / static_cast<float>(rectCanvas.Height());
// Calculate new rectangle size
// Account for portrait images (negative values)
CRect rectDraw = rectCanvas;
if (nRatioImage > nRatioCanvas)
rectDraw.SetRect(0, 0, rectDraw.right, static_cast<int>(rectDraw.right / nRatioImage));
else if (nRatioImage < nRatioCanvas)
rectDraw.SetRect(0, 0, static_cast<int>((rectDraw.bottom * nRatioImage)), rectDraw.bottom);
// Add a margin
rectDraw.DeflateRect(5, 5);
// Move to center
const CSize ptOffset = rectCanvas.CenterPoint() - rectDraw.CenterPoint();
rectDraw.OffsetRect(ptOffset);
// Add a black frame
FrameRect(lpDrawItemStruct->hDC, &lpDrawItemStruct->rcItem, static_cast<HBRUSH>(GetStockObject(BLACK_BRUSH)));
// Draw
m_imgPreview.Draw(lpDrawItemStruct->hDC, rectDraw);
return;
}
CDialogEx::OnDrawItem(nIDCtl, lpDrawItemStruct);
}
The above works beautifully:
But I have problems with the CListCtrl versions of the images. For instance, I am losing the colouring as you can see.
My CImageList is created like this:
m_ImageListThumb.Create(THUMBNAIL_WIDTH, THUMBNAIL_HEIGHT, ILC_COLOR32, 0, 1);
m_ListThumbnail.SetImageList(&m_ImageListThumb, LVSIL_NORMAL);
I then create all the thumbnails by calling DrawThumbnails() in OnInitDialog:
void CCenterCursorOnScreenDlg::DrawThumbnails()
{
int monitorIndex = 0;
m_ListThumbnail.SetRedraw(FALSE);
for (auto& strMonitor : m_monitors.strMonitorNames)
{
CImage img;
CreateMonitorThumbnail(monitorIndex, img, true);
CBitmap* pImage = new CBitmap();
pImage->Attach((HBITMAP)img);
m_ImageListThumb.Add(pImage, nullptr);
CString strMonitorDesc = m_monitors.strMonitorNames.at(monitorIndex);
strMonitorDesc.AppendFormat(L" (Screen %d)", monitorIndex + 1);
m_ListThumbnail.InsertItem(monitorIndex, strMonitorDesc, monitorIndex);
monitorIndex++;
delete pImage;
}
m_ListThumbnail.SetRedraw(TRUE);
}
The CreateMonitorThumbnail function:
BOOL CCenterCursorOnScreenDlg::CreateMonitorThumbnail(const int iMonitorIndex, CImage &rImage, bool bSmall)
{
const CRect rcCapture = m_monitors.rcMonitors.at(iMonitorIndex);
// destroy the currently contained bitmap to create a new one
rImage.Destroy();
auto nWidth = rcCapture.Width();
auto nHeight = rcCapture.Height();
if (bSmall)
{
nWidth = THUMBNAIL_WIDTH;
nHeight = THUMBNAIL_HEIGHT;
}
// create bitmap and attach it to this object
if (!rImage.Create(nWidth, nHeight, 32, 0))
{
AfxMessageBox(L"Cannot create image!", MB_ICONERROR);
return FALSE;
}
// create virtual screen DC
CDC dcScreen;
dcScreen.CreateDC(_T("DISPLAY"), nullptr, nullptr, nullptr);
// copy the contents from the virtual screen DC
BOOL bRet = FALSE;
if (bSmall)
{
CRect rt(0, 0, nWidth, nHeight);
//::FillRect(rImage.GetDC(), rt, static_cast<HBRUSH>(GetStockObject(WHITE_BRUSH)));
bRet = ::StretchBlt(rImage.GetDC(), 0, 0,
nWidth,
nHeight,
dcScreen.m_hDC,
rcCapture.left,
rcCapture.top,
rcCapture.Width(),
rcCapture.Height(), SRCCOPY | CAPTUREBLT);
}
else
{
bRet = ::BitBlt(rImage.GetDC(), 0, 0,
rcCapture.Width(),
rcCapture.Height(),
dcScreen.m_hDC,
rcCapture.left,
rcCapture.top, SRCCOPY | CAPTUREBLT);
}
// do cleanup and return
dcScreen.DeleteDC();
rImage.ReleaseDC();
return bRet;
}
Ideally I want to have exactly the same kind of visual image as on the right, but obviously resized down. How do I fix this?
I simplified the converting from CImage to CBitmap but it made no difference:
void CCenterCursorOnScreenDlg::DrawThumbnails()
{
int monitorIndex = 0;
// Stop redrawing the CListCtrl
m_ListThumbnail.SetRedraw(FALSE);
// Loop monitor info
for (auto& strMonitor : m_monitors.strMonitorNames)
{
// Create the thumbnail image
CImage monitorThumbnail;
CreateMonitorThumbnail(monitorIndex, monitorThumbnail, true);
// Convert it to a CBitmap
CBitmap* pMonitorThumbnailBitmap = CBitmap::FromHandle(monitorThumbnail);
// Add the CBitmap to the CImageList
m_ImageListThumb.Add(pMonitorThumbnailBitmap, nullptr);
// Build the caption description
CString strMonitorDesc = m_monitors.strMonitorNames.at(monitorIndex);
strMonitorDesc.AppendFormat(L" (Screen %d)", monitorIndex + 1);
// Add the item to the CListCtrl
m_ListThumbnail.InsertItem(monitorIndex, strMonitorDesc, monitorIndex);
monitorIndex++;
}
// Start redrawiung the CListCtrl again
m_ListThumbnail.SetRedraw(TRUE);
}
If I change my code to pass false for the last parameter, so that it uses the original captured images without scaling down:
The colours are god there, so it is when I do:
if (bSmall)
{
CRect rt(0, 0, nWidth, nHeight);
//::FillRect(rImage.GetDC(), rt, static_cast<HBRUSH>(GetStockObject(WHITE_BRUSH)));
bRet = ::StretchBlt(rImage.GetDC(), 0, 0,
nWidth,
nHeight,
dcScreen.m_hDC,
rcCapture.left,
rcCapture.top,
rcCapture.Width(),
rcCapture.Height(), SRCCOPY | CAPTUREBLT);
}
that it messes up.
My issue was did not have anything to do with OnDrawItem. I simply included that to indicate how the image on the right was being rendered. I thought it may helped as background information. But it has probably confused the question and I may take it out in the long run!
Based on the comments I was reminded about SetStretchBltMode which was missing from CreateMonitorThumbnail. So, I now have this function:
BOOL CCenterCursorOnScreenDlg::CreateMonitorThumbnail(const int iMonitorIndex, CImage &rImage, bool bResizeAsThumbnail)
{
const CRect rcCapture = m_monitors.rcMonitors.at(iMonitorIndex);
// Destroy the currently contained bitmap to create a new one
rImage.Destroy();
// Massage the dimensions as we want a thumbnail
auto nWidth = rcCapture.Width();
auto nHeight = rcCapture.Height();
if (bResizeAsThumbnail)
{
nWidth = m_iThumbnailWidth;
auto dRatio = rcCapture.Width() / nWidth;
//nHeight = m_iThumbnailHeight;
nHeight = static_cast<int>(rcCapture.Height() / dRatio);
if (nHeight > m_iThumbnailHeight)
{
AfxMessageBox(L"Need to investigate!");
}
}
// Create bitmap and attach it to this object
if (!rImage.Create(nWidth, nHeight, 32, 0))
{
AfxMessageBox(L"Cannot create image!", MB_ICONERROR);
return FALSE;
}
// Create virtual screen DC
CDC dcScreen;
dcScreen.CreateDC(L"DISPLAY", nullptr, nullptr, nullptr);
// Copy (or resize) the contents from the virtual screen DC
BOOL bRet = FALSE;
auto dcImage = rImage.GetDC();
if (bResizeAsThumbnail)
{
// Set the mode first!
SetStretchBltMode(dcImage, COLORONCOLOR);
CPen penBlack;
penBlack.CreatePen(PS_SOLID, 3, RGB(0, 0, 0));
::Rectangle(dcImage, 0, 0, m_iThumbnailWidth, m_iThumbnailHeight);
int iTop = (m_iThumbnailHeight - nHeight) / 2;
// Copy (and resize)
bRet = ::StretchBlt(dcImage, 0, iTop,
nWidth,
nHeight,
dcScreen.m_hDC,
rcCapture.left,
rcCapture.top,
rcCapture.Width(),
rcCapture.Height(), SRCCOPY | CAPTUREBLT);
}
else
{
// Copy
bRet = ::BitBlt(dcImage, 0, 0,
rcCapture.Width(),
rcCapture.Height(),
dcScreen.m_hDC,
rcCapture.left,
rcCapture.top, SRCCOPY | CAPTUREBLT);
}
// Do cleanup and return
dcScreen.DeleteDC();
rImage.ReleaseDC();
return bRet;
}
That was the key to getting the thumbnail showing with the right colours:

Mismatch between print preview and real print output

I want to print a bitmap. To avoid printing small bitmap I set CScrollView mode as MM_LOMETRIC with sizes 3830 x 1995. I have created the bitmap and made the bitblt to the screen. There were everythig just like I want on the screen and on the print preview but when I printed the document I've got very bad result.
The same picture on print preview.
It seems to me that printer does not see a bitmap the same way as print preview does.
Pay attantion that the first ractangle puts directly on the DC and memDC puts into it.
Are there any ideas how to fix this mismatch between print previw and the real printing?
Project files
void OnDraw()
{
CPen pen;
pen.CreatePen(PS_SOLID, 1, RGB(0, 0, 0));
CPen* OldPen = pDC->SelectObject(&pen);
CRect rcView;
GetClientRect(rcView);
int iClientWidth = rcView.right;
int iClientHeight = rcView.bottom;
int iMemWidth = 1900;
int iMemHeight = 950;
CDC memDC;
CBitmap memBitmap;
memDC.CreateCompatibleDC(pDC);
memBitmap.CreateCompatibleBitmap(pDC, iMemWidth, iMemHeight);
memDC.SelectObject(&memBitmap);
memDC.SetMapMode(MM_LOMETRIC);
CPen pen1;
pen1.CreatePen(PS_SOLID, 3, RGB(0, 0, 0));
memDC.SelectObject(&pen1);
CBrush brBK;
brBK.CreateSolidBrush(RGB(255, 255, 255));
memDC.SelectObject(&brBK);
RECT rc;
rc.left = 0;
rc.top = 0;
rc.right = iMemWidth;
rc.bottom = iMemHeight;
memDC.FillRect(&rc, &brBK);
memDC.Rectangle(rc.left, rc.top, rc.right, -rc.bottom);
memDC.MoveTo(0, 0);
memDC.LineTo(1900, -950);
memDC.MoveTo(0, -950);
memDC.LineTo(200, -750);
CFont font;
font.CreateFont(
50, // nHeight
0, // nWidth
0, // nEscapement
0, // nOrientation
FW_NORMAL, // nWeight
FALSE, // bItalic
FALSE, // bUnderline
0, // cStrikeOut
ANSI_CHARSET, // nCharSet
OUT_DEFAULT_PRECIS, // nOutPrecision
CLIP_DEFAULT_PRECIS, // nClipPrecision
DEFAULT_QUALITY, // nQuality
DEFAULT_PITCH | FF_SWISS, // nPitchAndFamily
_T("Arial"));
memDC.SelectObject(&font);
memDC.TextOut(100, -100, _T("Hello"));
pDC->BitBlt(10, -10, iMemWidth, -iMemHeight, &memDC, 0, 0, SRCCOPY);
font.DeleteObject();
brBK.DeleteObject();
memDC.DeleteDC();
memBitmap.DeleteObject();
pen.DeleteObject();
pen1.DeleteObject();
}
void OnInitialUpdate()
{
CScrollView::OnInitialUpdate();
CSize sizeTotal;
sizeTotal.cx = 3830;
sizeTotal.cy = 1995;
SetScrollSizes(MM_LOMETRIC, sizeTotal);
}

how to apply gradient effect on Image GDI

How can I apply gradient effect on image like this image in c#. I have a transparent image with black drawing I want to apply 2 color gradient on the image is this possible in gdi?
Here is the effect i want to achieve
http://postimg.org/image/ikz1ie7ip/
You create a PathGradientBrush and then you draw your texts with that brush.
To create a bitmap filled with a gradient brush you could do something like:
public Bitmap GradientImage(int width, int height, Color color1, Color color2, float angle)
{
var r = new Rectangle(0, 0, width, height);
var bmp = new Bitmap(width, height);
using (var brush = new LinearGradientBrush(r, color1, color2, angle, true))
using (var g = Graphics.FromImage(bmp))
g.FillRectangle(brush, r);
return bmp;
}
So now that you have an image with the gradient in it, all you have to do is to bring over the alpha channel from your original image into the newly created image. We can take the transferOneARGBChannelFromOneBitmapToAnother function from a blog post I once wrote:
public enum ChannelARGB
{
Blue = 0,
Green = 1,
Red = 2,
Alpha = 3
}
public static void transferOneARGBChannelFromOneBitmapToAnother(
Bitmap source,
Bitmap dest,
ChannelARGB sourceChannel,
ChannelARGB destChannel )
{
if ( source.Size!=dest.Size )
throw new ArgumentException();
Rectangle r = new Rectangle( Point.Empty, source.Size );
BitmapData bdSrc = source.LockBits( r, ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb );
BitmapData bdDst = dest.LockBits( r, ImageLockMode.ReadWrite, PixelFormat.Format32bppArgb );
unsafe
{
byte* bpSrc = (byte*)bdSrc.Scan0.ToPointer();
byte* bpDst = (byte*)bdDst.Scan0.ToPointer();
bpSrc += (int)sourceChannel;
bpDst += (int)destChannel;
for ( int i = r.Height * r.Width; i > 0; i-- )
{
*bpDst = *bpSrc;
bpSrc += 4;
bpDst += 4;
}
}
source.UnlockBits( bdSrc );
dest.UnlockBits( bdDst );
}
Now you could do something like:
var newImage = GradientImage( original.Width, original.Height, Color.Yellow, Color.Blue, 45 );
transferOneARGBChannelFromOneBitmapToAnother( original, newImage, ChannelARGB.Alpha, ChannelARGB.Alpha );
And there you are. :-)

How to update Texture2D in pixel shader every frame (in D3D10)?

Using D3D10, I am drawing a 2d rectangle and want to fill it with a texture (bitmap) that should change a few times every second (like displaying video).
I am using a shader effect, with a Texture2D variable, and trying to update a ID3D10EffectShaderResourceVariable and redraw the mesh.
My actual usage will be by copying bitmaps from memory, and using UpdateSubresource.
But it did not work, so I reduced it to test switching between two DDS images.
The result is that it draws the first image as expected, but keeps drawing it instead of switching between the two images.
I am new to D3D. Can you explain if this method can work at all, or suggest the right way to do it.
The shader effect:
Texture2D txDiffuse;
SamplerState samLinear
{
Filter = MIN_MAG_MIP_LINEAR;
AddressU = Wrap;
AddressV = Wrap;
};
struct VS_INPUT
{
float4 Pos : POSITION;
float2 Tex : TEXCOORD;
};
struct PS_INPUT
{
float4 Pos : SV_POSITION;
float2 Tex : TEXCOORD0;
};
PS_INPUT VS( VS_INPUT input )
{
PS_INPUT output = (PS_INPUT)0;
output.Pos = input.Pos;
output.Tex = input.Tex;
return output;
}
float4 PS( PS_INPUT input) : SV_Target
{
return txDiffuse.Sample( samLinear, input.Tex );
}
technique10 Render
{
pass P0
{
SetVertexShader( CompileShader( vs_4_0, VS() ) );
SetGeometryShader( NULL );
SetPixelShader( CompileShader( ps_4_0, PS() ) );
}
}
Code (skipped many parts):
ID3D10ShaderResourceView* g_pTextureRV = NULL;
ID3D10EffectShaderResourceVariable* g_pDiffuseVariable = NULL;
D3DX10CreateEffectFromResource(gInstance, MAKEINTRESOURCE(IDR_RCDATA1), NULL, NULL, NULL, "fx_4_0", dwShaderFlags, 0, device, NULL, NULL, &g_pEffect, NULL, NULL);
g_pTechnique = g_pEffect->GetTechniqueByName( "Render" );
g_pDiffuseVariable = g_pEffect->GetVariableByName( "txDiffuse" )->AsShaderResource();
// this part is called on Frame render:
device->CreateRenderTargetView( backBuffer, NULL, &rtView);
device->ClearRenderTargetView( rtView, ClearColor );
if(g_pTextureRV != NULL) {
g_pTextureRV->Release();
g_pTextureRV = NULL;
}
D3DX10CreateShaderResourceViewFromFile(device, pCurrentDDSFilePath, NULL, NULL, &g_pTextureRV, NULL );
g_pDiffuseVariable->SetResource( g_pTextureRV );
D3D10_TECHNIQUE_DESC techDesc;
g_pTechnique->GetDesc( &techDesc );
for( UINT p = 0; p < techDesc.Passes; ++p )
{
g_pTechnique->GetPassByIndex( p )->Apply( 0 );
direct2dDrawingContext->dev->Draw( 6, 0 );
}
// ... present the current back buffer
One solution, not necessarily the best, but one that doesn't use custom shaders, follows (I wrote it in C# / Managed DirectX but it should be easy to transcode.)
Bitmap bmp; //the bitmap that you will use to update the texture
Texture tex; //the texture that DirectX will render
void Render()
{
//render some stuff
bmp = GetNextTextureFrame(); //whatever you do to update your bitmap
Surface s = tex.GetSurfaceLevel(0);
Graphics g = s.GetGraphics();
//IntPtr hdc = g.GetHdc();
//BitBlt(hdc, 0, 0, bmp.Width, bmp.Height, bmpHdc, 0, 0, 0xcc0020);
g.DrawImageUnscaled(bmp, 0, 0);
g.ReleaseHdc(hdc);
s.ReleaseGraphics();
device.SetTexture(0, tex);
//now render your primitives
//render some more stuff
//present
}
The commented out lines are the way I actually did it, using an hBitmap and DC with BitBlt, because it's faster than GDI+. A lot of people will probably tell you that the above is a bad way to do it, because of all the memory locking that has to occur, and they're probably right. But I was able to achieve 30fps with multiple 1920x1080 textures, so regardless of whether it's proper, it works.

Screen-space square looking distorted in PIX

I have a simple function that creates a square that covers the entire screen, I use it for applying post-processing effects, however as far as I can tell it has been the cause of countless errors.
When I run my code in PIX I get the following mesh, but the square should be straight and covering the screen, shouldn't it?
My vertex shader does no transformation and simply passes position information to the pixel shader.
The function that creates the square is as follows:
private void InitializeGeometry()
{
meshes = new Dictionary<Vector3, Mesh>();
//build array of vertices for one square
ppVertex[] vertexes = new ppVertex[4];
//vertexes[0].Position = new Vector3(-1f, -1f, 0.25f);
vertexes[0].Position = new Vector3(-1, -1, 1f);
vertexes[1].Position = new Vector3(-1, 1, 1f);
vertexes[2].Position = new Vector3(1, -1, 1f);
vertexes[3].Position = new Vector3(1, 1, 1f);
vertexes[0].TexCoords = new Vector2(0, 0);
vertexes[1].TexCoords = new Vector2(0, 1);
vertexes[2].TexCoords = new Vector2(1, 0);
vertexes[3].TexCoords = new Vector2(1, 1);
//build index array for the vertices to build a quad from two triangles
short[] indexes = { 0, 1, 2, 1, 3, 2 };
//create the data stream to push the vertex data into the buffer
DataStream vertices = new DataStream(Marshal.SizeOf(typeof(Vertex)) * 4, true, true);
//load the data stream
vertices.WriteRange(vertexes);
//reset the data position
vertices.Position = 0;
//create the data stream to push the index data into the buffer
DataStream indices = new DataStream(sizeof(short) * 6, true, true);
//load the data stream
indices.WriteRange(indexes);
//reset the data position
indices.Position = 0;
//create the mesh object
Mesh mesh = new Mesh();
//create the description of the vertex buffer
D3D.BufferDescription vbd = new BufferDescription();
vbd.BindFlags = D3D.BindFlags.VertexBuffer;
vbd.CpuAccessFlags = D3D.CpuAccessFlags.None;
vbd.OptionFlags = ResourceOptionFlags.None;
vbd.SizeInBytes = Marshal.SizeOf(typeof(Vertex)) * 4;
vbd.Usage = ResourceUsage.Default;
//create and assign the vertex buffer to the mesh, filling it with data
mesh.VertexBuffer = new D3D.Buffer(device, vertices, vbd);
//create the description of the index buffer
D3D.BufferDescription ibd = new BufferDescription();
ibd.BindFlags = D3D.BindFlags.IndexBuffer;
ibd.CpuAccessFlags = D3D.CpuAccessFlags.None;
ibd.OptionFlags = ResourceOptionFlags.None;
ibd.SizeInBytes = sizeof(short) * 6;
ibd.Usage = ResourceUsage.Default;
//create and assign the index buffer to the mesh, filling it with data
mesh.IndexBuffer = new D3D.Buffer(device, indices, ibd);
//get vertex and index counts
mesh.vertices = vertexes.GetLength(0);
mesh.indices = indexes.Length;
//close the data streams
indices.Close();
vertices.Close();
meshes.Add(new Vector3(0), mesh);
}
and when I render the square:
private void DrawScene()
{
lock (meshes)
{
foreach (Mesh mesh in meshes.Values)
{
if (mesh.indices > 0)
{
try
{
//if (camera.SphereInFrustum(mesh.BoundingSphere, sphereRadius))
//{
context.InputAssembler.SetVertexBuffers(0, new VertexBufferBinding(mesh.VertexBuffer, Marshal.SizeOf(typeof(Vertex)), 0));
context.InputAssembler.SetIndexBuffer(mesh.IndexBuffer, Format.R16_UInt, 0);
context.DrawIndexed(mesh.indices, 0, 0);
//}
}
catch (Exception err)
{
MessageBox.Show(err.Message);
}
}
}
}
}
EDIT: I've added the vertex shader being run
cbuffer EveryFrame : register(cb0)
{
float3 diffuseColor : packoffset(c0);
float3 lightdir : packoffset(c1);
};
cbuffer EveryMotion : register(cb1)
{
float4x4 WorldViewProjection : packoffset(c0);
float4x4 LightWorldViewProjection : packoffset(c4);
};
struct VS_IN
{
float3 position : POSITION;
float3 normal : NORMAL;
float4 col : TEXCOORD;
};
struct PS_IN
{
float4 position : SV_POSITION;
float4 col : TEXCOORD;
float3 normal : NORMAL;
};
PS_IN VS(VS_IN input)
{
PS_IN output;
output.position = float4(input.position,1);
output.col = input.col;
output.normal = input.normal;
return output;
}
Here's PIX's vertex output.
PreVS:
PostVS:
And here's the dissassembly PIX generated when I chose to debug vertex 0
//
// Generated by Microsoft (R) HLSL Shader Compiler 9.29.952.3111
//
//
//
// Input signature:
//
// Name Index Mask Register SysValue Format Used
// ---------------- ----- ------ -------- -------- ------ ------
// POSITION 0 xyz 0 NONE float xyz
// NORMAL 0 xyz 1 NONE float xyz
// TEXCOORD 0 xyzw 2 NONE float
//
//
// Output signature:
//
// Name Index Mask Register SysValue Format Used
// ---------------- ----- ------ -------- -------- ------ ------
// SV_POSITION 0 xyzw 0 POS float xyzw
// TEXCOORD 0 xyzw 1 NONE float xyzw
// NORMAL 0 xyz 2 NONE float xyz
//
vs_4_0
dcl_input v0.xyz
dcl_input v1.xyz
dcl_output_siv o0.xyzw , position
dcl_output o1.xyzw
dcl_output o2.xyz
mov o0.xyz, v0.xyzx
mov o0.w, l(1.000000)
mov o1.xyzw, l(1.000000, 1.000000, 1.000000, 1.000000)
mov o2.xyz, v1.xyzx
ret
// Approximately 5 instruction slots used
I've also added the input assembler:
private void SetPPInputAssembler(Shader shader)
{
InputElement[] elements = new[] {
new InputElement("POSITION",0,Format.R32G32B32_Float,0),
new InputElement("NORMAL",0,Format.R32G32B32_Float,12,0),
new InputElement("TEXCOORD",0,Format.R32G32_Float,24,0),
};
InputLayout layout = new InputLayout(device, shader.InputSignature, elements);
context.InputAssembler.InputLayout = layout;
context.InputAssembler.PrimitiveTopology = PrimitiveTopology.TriangleList;
}
Obviously your vertex input positions don't match the values you want to give in.
For the first vertex the values look good until the z-coordinate of the texture coordinates.
You are defining a Vector2D in your program Vertex-struct, but a Vector4D in the Vertexshader Vertex-struct and things get mixed up.
just change VS_IN to this:
struct VS_IN
{
float3 position : POSITION;
float3 normal : NORMAL;
float2 col : TEXCOORD; // float2 instead of float4
};
I'm not sure though if you really want to have colors or rather texcoords. If you really want to have colors float4 would be right, but then you had to change
vertexes[0].TexCoords = new Vector2(0, 0);
into
vertexes[0].TexCoords = new Vector4(0, 0, 0, 0);
Either way, one of those variables is misnamed and probably the reason for the confusion.

Resources