I'm working on a path/ray tracer in c++. I'm now working on loading obj files. But some objects have flipped normals after loaded in. I can't figure this out where this behaviour comes from or how to fix it.
See image for better understanding of the issue.
image showing current behaviour:
Link to full GitHub page
First I thought it was an issue with the normals behind the surface. but after rendering the color based on the surface normal. It's obvious that the normals are flipped in some cases.
Here is my very basic code of loading the model.
//OBJ Loader object.
bool OBJLoader::loadMesh (std::string filePath){
// If the file is not an .obj file return false
if (filePath.substr(filePath.size() - 4, 4) != ".obj"){
std::cout << "No .obj file found at given file location: "<<filePath << std::endl;
}
//Open file stream
std::ifstream file(filePath);
//check if file is open.
if (!file.is_open()){
std::cout << "File was not opened!" << std::endl;
return false;
}
//Do file loading.
std::cout << "Parsing obj-file: "<<filePath << std::endl;
//constuct mesh data.
bool smoothShading = false;
std::string obj_name;
std::vector<Vertex> vertices;
std::vector<Vect3> Positions;
std::vector<Vect3> Normals;
std::vector<Vect2> UVs;
std::vector<unsigned int> V_indices;
//the current line
std::string currentLine;
//loop over each line and parse the needed data.
while(std::getline(file, currentLine)){
//for now we just print the line
//std::cout << currentLine << std::endl;
if(algorithm::startsWith(currentLine, "s ")){
std::vector<std::string> line_split = algorithm::split(currentLine,' ');
if( line_split[1] == std::string("off")){
smoothShading = false;
}else if(line_split[1] == std::string("1")){
//enalbe smooth shading;
smoothShading = true;
}
}
//check if the line starts with v -> vertex.
if(algorithm::startsWith(currentLine, "o ")){
//construct new vertex position.
std::vector<std::string> line_split = algorithm::split(currentLine,' ');
obj_name = line_split[1];
}
//check if the line starts with v -> vertex.
if(algorithm::startsWith(currentLine, "v ")){
//construct new vertex position.
std::vector<std::string> line_split = algorithm::split(currentLine,' ');
float x = std::stof(line_split[1]);
float y = std::stof(line_split[2]);
float z = std::stof(line_split[3]);
Vect3 pos = Vect3(x,y,z);
Positions.push_back(pos);
}
//check if the line starts with vt -> vertex uv.
if(algorithm::startsWith(currentLine, "vt ")){
//construct new vertex uv.
std::vector<std::string> line_split = algorithm::split(currentLine,' ');
float u = std::stof(line_split[1]);
float v = std::stof(line_split[2]);
Vect2 uv = Vect2(u,v);
UVs.push_back(uv);
}
//check if the line starts with vn -> vertex normals.
if(algorithm::startsWith(currentLine, "vn ")){
//construct new vertex normal.
std::vector<std::string> line_split = algorithm::split(currentLine,' ');
float x = std::stof(line_split[1]);
float y = std::stof(line_split[2]);
float z = std::stof(line_split[3]);
Vect3 normal = Vect3(x,y,z);
Normals.push_back(normal);
}
//check if the line starts with f -> constuct faces.
if(algorithm::startsWith(currentLine, "f ")){
//construct new vertex position.
std::vector<std::string> line_split = algorithm::split(currentLine,' ');
//#NOTE: this only works when mesh is already triangulated.
//Parse all vertices.
std::vector<std::string> vertex1 = algorithm::split(line_split[1],'/');
std::vector<std::string> vertex2 = algorithm::split(line_split[2],'/');
std::vector<std::string> vertex3 = algorithm::split(line_split[3],'/');
if(vertex1.size() <= 1){
//VERTEX 1
Vect3 position = Positions[std::stoi(vertex1[0])-1];
Vertex v1(position);
vertices.push_back(v1);
//VERTEX 2
position = Positions[std::stoi(vertex2[0])-1];
Vertex v2(position);
vertices.push_back(v2);
//VERTEX 3
position = Positions[std::stoi(vertex3[0])-1];
Vertex v3(position);
vertices.push_back(v3);
//Add to Indices array.
//calculate the index number
//The 3 comes from 3 vertices per face.
unsigned int index = vertices.size() - 3;
V_indices.push_back(index);
V_indices.push_back(index+1);
V_indices.push_back(index+2);
}
//check if T exist.
else if(vertex1[1] == ""){
//NO Uv
//V -> index in the positions array.
//N -> index in the normals array.
//VERTEX 1
Vect3 position = Positions[std::stoi(vertex1[0])-1];
Vect3 normal = Normals[std::stoi(vertex1[2])-1];
Vertex v1(position,normal);
vertices.push_back(v1);
//VERTEX 2
position = Positions[std::stoi(vertex2[0])-1];
normal = Normals[std::stoi(vertex2[2])-1];
Vertex v2(position,normal);
vertices.push_back(v2);
//VERTEX 3
position = Positions[std::stoi(vertex3[0])-1];
normal = Normals[std::stoi(vertex3[2])-1];
Vertex v3(position,normal);
vertices.push_back(v3);
//Add to Indices array.
//calculate the index number
//The 3 comes from 3 vertices per face.
unsigned int index = vertices.size() - 3;
V_indices.push_back(index);
V_indices.push_back(index+1);
V_indices.push_back(index+2);
}else if (vertex1[1] != ""){
//We have UV
//V -> index in the positions array.
//T -> index of UV
//N -> index in the normals array.
//VERTEX 1
Vect3 position = Positions[std::stoi(vertex1[0])-1];
Vect2 uv = UVs[std::stoi(vertex1[1])-1];
Vect3 normal = Normals[std::stoi(vertex1[2])-1];
Vertex v1(position,normal,uv);
vertices.push_back(v1);
//VERTEX 2
position = Positions[std::stoi(vertex2[0])-1];
uv = UVs[std::stoi(vertex2[1])-1];
normal = Normals[std::stoi(vertex2[2])-1];
Vertex v2(position,normal,uv);
vertices.push_back(v2);
//VERTEX 3
position = Positions[std::stoi(vertex3[0])-1];
uv = UVs[std::stoi(vertex3[1])-1];
normal = Normals[std::stoi(vertex3[2])-1];
Vertex v3(position,normal,uv);
vertices.push_back(v3);
//Add to Indices array.
//calculate the index number
//The 3 comes from 3 vertices per face.
unsigned int index = vertices.size() - 3;
V_indices.push_back(index);
V_indices.push_back(index+1);
V_indices.push_back(index+2);
}
//We can check here in which format. V/T/N, V//N, V//, ...
//For now we ignore this and use V//N.
}
}
//close stream
file.close();
Positions.clear();
Normals.clear();
UVs.clear();
//reorder the arrays so the coresponding index match the position,uv and normal.
for (Vertex v: vertices) {
Positions.push_back(v.getPosition());
Normals.push_back(v.getNormal());
UVs.push_back(v.getUV());
}
//Load mesh data.
_mesh = Mesh(smoothShading,obj_name, Positions, Normals, UVs, V_indices);
//return true, succes.
return true;
After this the model is inserted in a grid structure for faster intersection tests.
for(int i= 0;i<mesh._indices.size();i=i+3){
Triangle* tri;
if(mesh.smoothShading){
tri = new SmoothTriangle(Point3(mesh._positions[mesh._indices[i]]),
Point3(mesh._positions[mesh._indices[i+1]]),
Point3(mesh._positions[mesh._indices[i+2]]),
Normal(mesh._normals[mesh._indices[i]]),
Normal(mesh._normals[mesh._indices[i+1]]),
Normal(mesh._normals[mesh._indices[i+2]]),material);
}else{
tri = new Triangle(Point3(mesh._positions[mesh._indices[i]]),
Point3(mesh._positions[mesh._indices[i+1]]),Point3(mesh._positions[mesh._indices[i+2]]),Normal(mesh._normals[mesh._indices[i]]),material);
}
add_object(tri);
}
constructCells();
Maybe good to add is the code for interpolating normals
Normal SmoothTriangle::calculate_normal(double gamma, double beta){
return (Normal((1 - beta - gamma) * n0 + beta * n1 + gamma * n2)).normalize();
}
FIXED
I Fixed the issue. It was not in my OBJ loader. the model was exported from blender and when exporting it applied all modifiers, but the Solidify caused some back faces to clip with the front faces, after exporting to .obj file. After removing this modifier everything was back to "normal" (Just a funny pun to finish this answer)
It might be nothing wrong in your code I assume that the obj is corrupted as some obj models have flipped normals ...
Wavefront obj format does not specify the normal direction at all I saw even models without consistency so some normals points out others in. You can not even be sure the faces have single winding rule. So its safer to use bidirectional normals (you know using
|dot(normal,light)|
instead of
dot(normal,light)
and no face culling or recompute the normals and even winding rule on your own after load.
The bidirectional normals/lighting are sometimes set by different material settings for each side of face FRONT and BACK or FRONT_AND_BACK or DOUBLE_SIDED etc or its configuration... just look in your gfx API for such stuff. To turn off the face culling look for things like CULL_FACE
Related
I've been trying to learn screen-space techniques, specifically Ray-marching ones but I have been struggling to get a single working example to continue learning from and solidify my knowledge. I'm implementing Screen-space shadows following this article but my result just seems to be a white image and I cannot seem to understand why. The code makes sense to me but the result does not seem to be right. I can't seem to understand where I might have gone wrong while attempting this screen-space ray-marching technique and would appreciate any insight to that will help me continue learning.
Using Vulkan + GLSL
Full shader: screen_space_shadows.glsl
// calculate screen space shadows
float computeScreenSpaceShadow()
{
vec3 FragPos = texture(gPosition, uvCoords).rgb;
vec4 ViewSpaceLightPosition = camera.view * light.LightPosition;
vec3 LightDirection = ViewSpaceLightPosition.xyz - FragPos.xyz;
// Ray position and direction in view-space.
vec3 RayPos = texture(gPosition, uvCoords).xyz; // ray start position
vec3 RayDirection = normalize(-LightDirection.xyz);
// Save original depth of the position
float DepthOriginal = RayPos.z;
// Ray step
vec3 RayStep = RayDirection * STEP_LENGTH;
float occlusion = 0.0;
for(uint i = 0; i < MAX_STEPS; i++)
{
RayPos += RayStep;
vec2 Ray_UV = ViewToScreen(RayPos);
// Make sure the UV is inside screen-space
if(!ValidRay(Ray_UV)){
return 1.0;
}
// Compute difference between ray and cameras depth
float DepthZ = linearize_depth(texture(depthMap, Ray_UV).x);
float DepthDelta = RayPos.z - DepthZ;
// Check if camera cannot see the ray. Ray depth must be larger than camera depth = positive delta
bool canCameraSeeRay = (DepthDelta > 0.0) && (DepthDelta < THICKNESS);
bool occludedByOriginalPixel = abs(RayPos.z - DepthOriginal) < MAX_DELTA_FROM_ORIGINAL_DEPTH;
if(canCameraSeeRay && occludedByOriginalPixel)
{
// Mark as occluded
occlusion = 1.0;
break;
}
}
return 1.0 - occlusion;
}
Output
I am using the following code to convert a Bitmap to Complex and vice versa.
Even though those were directly copied from Accord.NET framework, while testing these static methods, I have discovered that, repeated use of these static methods cause 'data-loss'. As a result, the end output/result becomes distorted.
public partial class ImageDataConverter
{
#region private static Complex[,] FromBitmapData(BitmapData bmpData)
private static Complex[,] ToComplex(BitmapData bmpData)
{
Complex[,] comp = null;
if (bmpData.PixelFormat == PixelFormat.Format8bppIndexed)
{
int width = bmpData.Width;
int height = bmpData.Height;
int offset = bmpData.Stride - (width * 1);//1 === 1 byte per pixel.
if ((!Tools.IsPowerOf2(width)) || (!Tools.IsPowerOf2(height)))
{
throw new Exception("Imager width and height should be n of 2.");
}
comp = new Complex[width, height];
unsafe
{
byte* src = (byte*)bmpData.Scan0.ToPointer();
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++, src++)
{
comp[y, x] = new Complex((float)*src / 255,
comp[y, x].Imaginary);
}
src += offset;
}
}
}
else
{
throw new Exception("EightBppIndexedImageRequired");
}
return comp;
}
#endregion
public static Complex[,] ToComplex(Bitmap bmp)
{
Complex[,] comp = null;
if (bmp.PixelFormat == PixelFormat.Format8bppIndexed)
{
BitmapData bmpData = bmp.LockBits( new Rectangle(0, 0, bmp.Width, bmp.Height),
ImageLockMode.ReadOnly,
PixelFormat.Format8bppIndexed);
try
{
comp = ToComplex(bmpData);
}
finally
{
bmp.UnlockBits(bmpData);
}
}
else
{
throw new Exception("EightBppIndexedImageRequired");
}
return comp;
}
public static Bitmap ToBitmap(Complex[,] image, bool fourierTransformed)
{
int width = image.GetLength(0);
int height = image.GetLength(1);
Bitmap bmp = Imager.CreateGrayscaleImage(width, height);
BitmapData bmpData = bmp.LockBits(
new Rectangle(0, 0, width, height),
ImageLockMode.ReadWrite,
PixelFormat.Format8bppIndexed);
int offset = bmpData.Stride - width;
double scale = (fourierTransformed) ? Math.Sqrt(width * height) : 1;
unsafe
{
byte* address = (byte*)bmpData.Scan0.ToPointer();
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++, address++)
{
double min = System.Math.Min(255, image[y, x].Magnitude * scale * 255);
*address = (byte)System.Math.Max(0, min);
}
address += offset;
}
}
bmp.UnlockBits(bmpData);
return bmp;
}
}
(The DotNetFiddle link of the complete source code)
(ImageDataConverter)
Output:
As you can see, FFT is working correctly, but, I-FFT isn't.
That is because bitmap to complex and vice versa isn't working as expected.
What could be done to correct the ToComplex() and ToBitmap() functions so that they don't loss data?
I do not code in C# so handle this answer with extreme prejudice!
Just from a quick look I spotted few problems:
ToComplex()
Is converting BMP into 2D complex matrix. When you are converting you are leaving imaginary part unchanged, but at the start of the same function you have:
Complex[,] complex2D = null;
complex2D = new Complex[width, height];
So the imaginary parts are either undefined or zero depends on your complex class constructor. This means you are missing half of the data needed for reconstruction !!! You should restore the original complex matrix from 2 images one for real and second for imaginary part of the result.
ToBitmap()
You are saving magnitude which is I think sqrt( Re*Re + Im*Im ) so it is power spectrum not the original complex values and so you can not reconstruct back... You should store Re,Im in 2 separate images.
8bit per pixel
That is not much and can cause significant round off errors after FFT/IFFT so reconstruction can be really distorted.
[Edit1] Remedy
There are more options to repair this for example:
use floating complex matrix for computations and bitmap only for visualization.
This is the safest way because you avoid additional conversion round offs. This approach has the best precision. But you need to rewrite your DIP/CV algorithms to support complex domain matrices instead of bitmaps which require not small amount of work.
rewrite your conversions to support real and imaginary part images
Your conversion is really bad as it does not store/restore Real and Imaginary parts as it should and also it does not account for negative values (at least I do not see it instead they are cut down to zero which is WRONG). I would rewrite the conversion to this:
// conversion scales
float Re_ofset=256.0,Re_scale=512.0/255.0;
float Im_ofset=256.0,Im_scale=512.0/255.0;
private static Complex[,] ToComplex(BitmapData bmpRe,BitmapData bmpIm)
{
//...
byte* srcRe = (byte*)bmpRe.Scan0.ToPointer();
byte* srcIm = (byte*)bmpIm.Scan0.ToPointer();
complex c = new Complex(0.0,0.0);
// for each line
for (int y = 0; y < height; y++)
{
// for each pixel
for (int x = 0; x < width; x++, src++)
{
complex2D[y, x] = c;
c.Real = (float)*(srcRe*Re_scale)-Re_ofset;
c.Imaginary = (float)*(srcIm*Im_scale)-Im_ofset;
}
src += offset;
}
//...
}
public static Bitmap ToBitmapRe(Complex[,] complex2D)
{
//...
float Re = (complex2D[y, x].Real+Re_ofset)/Re_scale;
Re = min(Re,255.0);
Re = max(Re, 0.0);
*address = (byte)Re;
//...
}
public static Bitmap ToBitmapIm(Complex[,] complex2D)
{
//...
float Im = (complex2D[y, x].Imaginary+Im_ofset)/Im_scale;
Re = min(Im,255.0);
Re = max(Im, 0.0);
*address = (byte)Im;
//...
}
Where:
Re_ofset = min(complex2D[,].Real);
Im_ofset = min(complex2D[,].Imaginary);
Re_scale = (max(complex2D[,].Real )-min(complex2D[,].Real ))/255.0;
Im_scale = (max(complex2D[,].Imaginary)-min(complex2D[,].Imaginary))/255.0;
or cover bigger interval then the complex matrix values.
You can also encode both Real and Imaginary parts to single image for example first half of image could be Real and next the Imaginary part. In that case you do not need to change the function headers nor names at all .. but you would need to handle the images as 2 joined squares each with different meaning ...
You can also use RGB images where R = Real, B = Imaginary or any other encoding that suites you.
[Edit2] some examples to make my points more clear
example of approach #1
The image is in form of floating point 2D complex matrix and the images are created only for visualization. There is little rounding error this way. The values are not normalized so the range is <0.0,255.0> per pixel/cell at first but after transforms and scaling it could change greatly.
As you can see I added scaling so all pixels are multiplied by 315 to actually see anything because the FFT output values are small except of few cells. But only for visualization the complex matrix is unchanged.
example of approach #2
Well as I mentioned before you do not handle negative values, normalize values to range <0,1> and back by scaling and rounding off and using only 8 bits per pixel to store the sub results. I tried to simulate that with my code and here is what I got (using complex domain instead of wrongly used power spectrum like you did). Here C++ source only as an template example as you do not have the functions and classes behind it:
transform t;
cplx_2D c;
rgb2i(bmp0);
c.ld(bmp0,bmp0);
null_im(c);
c.mul(1.0/255.0);
c.mul(255.0); c.st(bmp0,bmp1); c.ld(bmp0,bmp1); i2iii(bmp0); i2iii(bmp1); c.mul(1.0/255.0);
bmp0->SaveToFile("_out0_Re.bmp");
bmp1->SaveToFile("_out0_Im.bmp");
t. DFFT(c,c);
c.wrap();
c.mul(255.0); c.st(bmp0,bmp1); c.ld(bmp0,bmp1); i2iii(bmp0); i2iii(bmp1); c.mul(1.0/255.0);
bmp0->SaveToFile("_out1_Re.bmp");
bmp1->SaveToFile("_out1_Im.bmp");
c.wrap();
t.iDFFT(c,c);
c.mul(255.0); c.st(bmp0,bmp1); c.ld(bmp0,bmp1); i2iii(bmp0); i2iii(bmp1); c.mul(1.0/255.0);
bmp0->SaveToFile("_out2_Re.bmp");
bmp1->SaveToFile("_out2_Im.bmp");
And here the sub results:
As you can see after the DFFT and wrap the image is really dark and most of the values are rounded off. So the result after unwrap and IDFFT is really pure.
Here some explanations to code:
c.st(bmpre,bmpim) is the same as your ToBitmap
c.ld(bmpre,bmpim) is the same as your ToComplex
c.mul(scale) multiplies complex matrix c by scale
rgb2i converts RGB to grayscale intensity <0,255>
i2iii converts grayscale intensity ro grayscale RGB image
I'm not really good in this puzzles but double check this dividing.
comp[y, x] = new Complex((float)*src / 255, comp[y, x].Imaginary);
You can loose precision as it is described here
Complex class definition in Remarks section.
May be this happens in your case.
Hope this helps.
I am trying to write a simple shader to draw 3D line with thickness just to learn geometry shader in unity. However I am facing problem with the output from the shader when setting the input of the geometry shader to lineadj topology, which i suspect has something to do with the "weird" value of the third and fourth vertex taken in by the geometry shader.
This is how i generate my mesh from a c# script:
public static GameObject DrawShaderLine( Vector3[] posCont, float thickness, Material mat)
{
GameObject line = CreateObject ("Line", mat);
line.GetComponent<Renderer>().material.SetFloat("_Thickness", thickness);
int posContLen = posCont.Length;
int newVerticeLen = posContLen + 2;
Vector3[] newVertices = new Vector3[newVerticeLen];
newVertices[0] = posCont[0] + (posCont[0]-posCont[1]);
for(int i =0; i < posContLen; ++i)
{
newVertices[i+1] = posCont[i];
}
newVertices[newVerticeLen-1] = posCont[posContLen-1] + ( posCont[posContLen-1] - posCont[posContLen-2]);
List<int> newIndices = new List<int>();
for(int i = 1; i< newVerticeLen-2; ++i)
{
newIndices.Add(i-1);
newIndices.Add(i);
newIndices.Add(i+1);
newIndices.Add(i+2);
}
Mesh mesh = (line.GetComponent (typeof(MeshFilter)) as MeshFilter).mesh;
mesh.Clear ();
mesh.vertices = newVertices;
//mesh.triangles = newTriangles;
mesh.SetIndices(newIndices.ToArray(), MeshTopology.LineStripe, 0);
return line;
}
And this is the GS that is running in the shader program
v2g vert(appdata_base v)
{
v2g OUT;
OUT.pos = v.vertex;
return OUT;
}
[maxvertexcount(2)]
void geom(lineadj v2g p[4], inout LineStream<g2f> triStream)
{
float4x4 vp = mul(UNITY_MATRIX_MVP, _World2Object);
g2f OUT;
float4 pos0 = mul(vp, p[0].pos);
float4 pos1 = mul(vp, p[1].pos);
float4 pos2 = mul(vp, p[2].pos);
float4 pos3 = mul(vp, p[3].pos);
OUT.pos = pos1;
OUT.c = half4(1,0,0,1);
triStream.Append(OUT);
OUT.pos = pos2;
OUT.c = half4(0,1,0,1);
triStream.Append(OUT);
triStream.RestartStrip();
}
From my understanding, lineadj will take in 4 vertex with vertex[0] and vertex[3] being the adjacent vertexes. So by drawing vertex 1 and vertex 2 i am suppose to get my line drawn. However this is the output i get
This input data vertex position is (-20,0,0) and (0,-20,0) which is marked by the center 2 squares. The top left and bottom right cube are the position of the adjacent vertex generated by the c# function. As you can see the line seem to be connecting to position (0,0,0) and the lines are flickering rapidly, which make me suspect that vertex in the GS is corrupted? Start of the line is colored red and the end of the line is colored green.
If I edit the GS to output pos0 and pos1 instead of pos1 and pos2, i get this
with no flickering lines.
and if i plot pos2 and pos3, the result is way crazier(pos2 and pos 3 seems to be rubbish value).
I have been trying to debug this for the whole day but with no progress, so I need some help here! Thanks in advance
When I try to sum up N previous frames stored in a list and then dividing by num frames, the background model produced is not as expected. I can tell because I've tried the algo in Matlab earlier on the same video.
class TemporalMeanFilter {
private:
int learningCount;
list<Mat> history;
int height, width;
Mat buildModel(){
if(history.size() == 0)
return Mat();
Mat image_avg(height, width, CV_8U, Scalar(0));
double alpha = (1.0/history.size());
list<Mat>::iterator it = history.begin();
cout << "History size: " << history.size() << " Weight per cell: " << alpha << endl;
while(it != history.end()){
image_avg += (*it * alpha);
it++;
}
return image_avg;
}
public:
TemporalMeanFilter(int height, int width, int learningCount){
this->learningCount = learningCount;
this->height = height;
this->width = width;
}
void applyFrameDifference(Mat& frame_diff, Mat& bg_model, Mat& input_frame){
if(history.size() == learningCount)
history.pop_front();
history.push_back(input_frame);
bg_model = buildModel();
frame_diff = bg_model - input_frame;
}
};
//The main looks like this
// ... reading video from file
TemporalMeanFilter meanFilter(height, width, 50); //background subtraction algorithm
meanFilter.applyFrameDifference(diff_frame, bg_model, curr_frame);
//... displaying on screen ... prog ends
Image:
http://imagegur.com/images/untitled.png
The left one is the bg_model, the middle is the curr_frame, and the right one is the output.
Maybe it's because of the rounding off done on CV_U8? I tried changing to CV_32FC1, but then the program just crashed because for some reason it couldn't add two CV_32FC1 Matrices.
Any insight would be greatly appreciated. Thanks!
More Info:
Inside the class, I now keep the average in a CV_16UC1 Mat to prevent clipping, how it results in an error after successive addition.
The add function / operator + both change the type of result from CV_16UC1 to CV8UC1. This error is caused by that. Any suggestion how to ask it preserve the original datatype? (PS: I asked politely... didn't work)
background_model += *it;
OpenCV Error: Bad argument (When the input arrays in add/subtract/multiply/divid
e functions have different types, the output array type must be explicitly speci
fied) in unknown function, file C:\buildslave64\win64_amdocl\2_4_PackSlave-win32
-vc11-shared\opencv\modules\core\src\arithm.cpp, line 1313
You're right that it's almost certainly the rounding errors you get by accumulating scaled greyscale values. There's no reason why it should crash using floating point pixels though, so you should try something like this:
Mat buildModel()
{
if (history.size() == 0)
return Mat();
Mat image_avg = Mat::zeros(height, width, CV_32FC1);
double alpha = (1.0 / history.size());
list<Mat>::iterator it = history.begin();
while (it != history.end())
{
Mat image_temp;
(*it).convertTo(image_temp, CV_32FC1);
image_avg += image_temp;
it++;
}
image_avg *= alpha;
return image_avg;
}
Depending on what you want to do with the result, you may need to normalize it or rescale it or convert back to greyscale before display etc.
I'm learning DirectX, using the book "Sherrod A., Jones W. - Beginning DirectX 11 Game Programming - 2011" Now I'm exploring the 4th chapter about drawing text.
Please, help we to fix my function, that I'm using to draw a string on the screen. I've already loaded font texture and in the function I create some sprites with letters and define texture coordinates for them. This compiles correctly, but doesn't draw anything. What's wrong?
bool DirectXSpriteGame :: DrawString(char* StringToDraw, float StartX, float StartY)
{
//VAR
HRESULT D3DResult; //The result of D3D functions
int i; //Counters
const int IndexA = static_cast<char>('A'); //ASCII index of letter A
const int IndexZ = static_cast<char>('Z'); //ASCII index of letter Z
int StringLenth = strlen(StringToDraw); //Lenth of drawing string
float ScreenCharWidth = static_cast<float>(LETTER_WIDTH) / static_cast<float>(SCREEN_WIDTH); //Width of the single char on the screen(in %)
float ScreenCharHeight = static_cast<float>(LETTER_HEIGHT) / static_cast<float>(SCREEN_HEIGHT); //Height of the single char on the screen(in %)
float TexelCharWidth = 1.0f / static_cast<float>(LETTERS_NUM); //Width of the char texel(in the texture %)
float ThisStartX; //The start x of the current letter, drawingh
float ThisStartY; //The start y of the current letter, drawingh
float ThisEndX; //The end x of the current letter, drawing
float ThisEndY; //The end y of the current letter, drawing
int LetterNum; //Letter number in the loaded font
int ThisLetter; //The current letter
D3D11_MAPPED_SUBRESOURCE MapResource; //Map resource
VertexPos* ThisSprite; //Vertecies of the current sprite, drawing
//VAR
//Clamping string, if too long
if(StringLenth > LETTERS_NUM)
{
StringLenth = LETTERS_NUM;
}
//Mapping resource
D3DResult = _DeviceContext -> Map(_vertexBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &MapResource);
if(FAILED(D3DResult))
{
throw("Failed to map resource");
}
ThisSprite = (VertexPos*)MapResource.pData;
for(i = 0; i < StringLenth; i++)
{
//Creating geometry for the letter sprite
ThisStartX = StartX + ScreenCharWidth * static_cast<float>(i);
ThisStartY = StartY;
ThisEndX = ThisStartX + ScreenCharWidth;
ThisEndY = StartY + ScreenCharHeight;
ThisSprite[0].Position = XMFLOAT3(ThisEndX, ThisEndY, 1.0f);
ThisSprite[1].Position = XMFLOAT3(ThisEndX, ThisStartY, 1.0f);
ThisSprite[2].Position = XMFLOAT3(ThisStartX, ThisStartY, 1.0f);
ThisSprite[3].Position = XMFLOAT3(ThisStartX, ThisStartY, 1.0f);
ThisSprite[4].Position = XMFLOAT3(ThisStartX, ThisEndY, 1.0f);
ThisSprite[5].Position = XMFLOAT3(ThisEndX, ThisEndY, 1.0f);
ThisLetter = static_cast<char>(StringToDraw[i]);
//Defining the letter place(number) in the font
if(ThisLetter < IndexA || ThisLetter > IndexZ)
{
//Invalid character, the last character in the font, loaded
LetterNum = IndexZ - IndexA + 1;
}
else
{
LetterNum = ThisLetter - IndexA;
}
//Unwraping texture on the geometry
ThisStartX = TexelCharWidth * static_cast<float>(LetterNum);
ThisStartY = 0.0f;
ThisEndY = 1.0f;
ThisEndX = ThisStartX + TexelCharWidth;
ThisSprite[0].TextureCoords = XMFLOAT2(ThisEndX, ThisEndY);
ThisSprite[1].TextureCoords = XMFLOAT2(ThisEndX, ThisStartY);
ThisSprite[2].TextureCoords = XMFLOAT2(ThisStartX, ThisStartY);
ThisSprite[3].TextureCoords = XMFLOAT2(ThisStartX, ThisStartY);
ThisSprite[4].TextureCoords = XMFLOAT2(ThisStartX, ThisEndY);
ThisSprite[5].TextureCoords = XMFLOAT2(ThisEndX, ThisEndY);
ThisSprite += VERTEX_IN_RECT_NUM;
}
for(i = 0; i < StringLenth; i++, ThisSprite -= VERTEX_IN_RECT_NUM);
_DeviceContext -> Unmap(_vertexBuffer, 0);
_DeviceContext -> Draw(VERTEX_IN_RECT_NUM * StringLenth, 0);
return true;
}
Although the piece of code constructing the Vertex Array seems correct to me at first glance, it seems like you are trying to Draw your vertices with a Shader which has not been set yet !
It is difficult to precisely answer you without looking at the whole code, but I can guess that you will need to do something like that :
1) Create Vertex and Pixel Shaders by compiling them first from their respective buffers
2) Create the Input Layout description, which describes the Input Buffers that will be read by the Input Assembler stage. It will have to match your VertexPos structure and your shader structure.
3) Set the Shader parameters.
4) Only now you can Set Shader rendering parameters : Set the InputLayout, as well as the Vertex and Pixel Shaders that will be used to render your triangles by something like :
_DeviceContext -> Unmap(_vertexBuffer, 0);
_DeviceContext->IASetInputLayout(myInputLayout);
_DeviceContext->VSSetShader(myVertexShader, NULL, 0); // Set Vertex shader
_DeviceContext->PSSetShader(myPixelShader, NULL, 0); // Set Pixel shader
_DeviceContext -> Draw(VERTEX_IN_RECT_NUM * StringLenth, 0);
This link should help you achieve what you want to do : http://www.rastertek.com/dx11tut12.html
Also, I recommend you to set an IndexBuffer and to use the method DrawIndexed to render your triangles for performance reasons : It will allow the graphics adapter to store vertices in a vertex cache, allowing recently-used vertex to be fetched from the cache instead of reading it from the vertex buffer.
More about this concern can be found on MSDN : http://msdn.microsoft.com/en-us/library/windows/desktop/bb147325(v=vs.85).aspx
Hope this helps!
P.S : Also, don't forget to release the resources after using them by calling Release().