I am trying to save patches from OpenSubdiv subdivision using Open Cascade for one of my app that I am trying to build for subdivision surfaces. This is pretty similar to the thread - https://groups.google.com/g/opensubdiv/c/ujdolv75a4Q/m/djyq6xIqAAAJ
But I am not quite getting the result and I am not sure what I am missing.
The overall shape of the output looks correct but surfaces are protruding into each other and they are not "smooth" at boundaries. So it looks like I am not able to query the patches right or something is wrong in converting the patches into the "Geom_BezierSurface" from Open Cascade. If you could just point me where I could start looking, that would be really helpful. I have spent some time on this but not getting anywhere.
Here is what my output looks like right now with 160 faces created.
If I just render first two faces from the output, they look like this
And here is my code
#include "Geom_BezierSurface.hxx"
#include <opensubdiv/far/topologyDescriptor.h>
#include <opensubdiv/far/primvarRefiner.h>
#include <opensubdiv/far/patchTableFactory.h>
#include <opensubdiv/far/patchMap.h>
#include <opensubdiv/far/ptexIndices.h>
#include <cassert>
#include <cstdio>
#include <cstring>
#include <cfloat>
using namespace OpenSubdiv;
typedef double Real;
static int const g_nverts = 5;
static double const g_verts[24] = { 0.0f, 0.0f, 20.0f,
0.0f, -20.0f, 0.0f,
20.0f, 0.0f, 0.0f,
0.0f, 20.0f, 0.0f,
-20.0f, 0.0f, 0.0f, };
static int const g_vertsperface[5] = { 3, 3, 3, 3, 4 };
static int const g_nfaces = 5;
static int const g_faceverts[16] = { 0, 1, 2,
0, 2, 3,
0, 3, 4,
0, 4, 1,
4, 3, 2, 1 };
static int const g_ncreases = 4;
static int const g_creaseverts[8] = { 4, 3, 3, 2, 2, 1, 1, 4 };
static float const g_creaseweights[4] = { 3.0f, 3.0f, 3.0f, 3.0f };
// Creates a Far::TopologyRefiner from the pyramid shape above
static Far::TopologyRefiner* createTopologyRefiner();
static Far::TopologyRefiner* createTopologyRefiner()
{
typedef Far::TopologyDescriptor Descriptor;
Sdc::SchemeType type = OpenSubdiv::Sdc::SCHEME_CATMARK;
Sdc::Options options;
options.SetVtxBoundaryInterpolation(Sdc::Options::VTX_BOUNDARY_EDGE_ONLY);
Descriptor desc;
desc.numVertices = g_nverts;
desc.numFaces = g_nfaces;
desc.numVertsPerFace = g_vertsperface;
desc.vertIndicesPerFace = g_faceverts;
desc.numCreases = g_ncreases;
desc.creaseVertexIndexPairs = g_creaseverts;
desc.creaseWeights = g_creaseweights;
// Instantiate a FarTopologyRefiner from the descriptor.
Far::TopologyRefiner* refiner =
Far::TopologyRefinerFactory<Descriptor>::Create(desc,
Far::TopologyRefinerFactory<Descriptor>::Options(type, options));
return refiner;
}
//------------------------------------------------------------------------------
// Vertex container implementation.
//
struct Vertex {
// Minimal required interface ----------------------
Vertex() { }
void Clear(void* = 0) {
point[0] = point[1] = point[2] = 0.0f;
}
void AddWithWeight(Vertex const& src, Real weight) {
point[0] += weight * src.point[0];
point[1] += weight * src.point[1];
point[2] += weight * src.point[2];
}
Real point[3];
};
void CModelingDoc::OnFace()
{
// Generate a FarTopologyRefiner (see far_tutorial_0 for details).
Far::TopologyRefiner* refiner = createTopologyRefiner();
// Adaptively refine the topology with an isolation level capped at 3
// because the sharpest crease in the shape is 3.0f (in g_creaseweights[])
int maxIsolation = 3;
refiner->RefineAdaptive(
Far::TopologyRefiner::AdaptiveOptions(maxIsolation));
// Generate a set of Far::PatchTable that we will use to evaluate the
// surface limit
Far::PatchTableFactory::Options patchOptions;
patchOptions.endCapType =
Far::PatchTableFactory::Options::ENDCAP_GREGORY_BASIS;
Far::PatchTable const* patchTable =
Far::PatchTableFactory::Create(*refiner, patchOptions);
// Compute the total number of points we need to evaluate patchtable.
// we use local points around extraordinary features.
int nRefinerVertices = refiner->GetNumVerticesTotal();
int nLocalPoints = patchTable->GetNumLocalPoints();
// Create a buffer to hold the position of the refined verts and
// local points, then copy the coarse positions at the beginning.
std::vector<Vertex> verts(nRefinerVertices + nLocalPoints);
memcpy(&verts[0], g_verts, g_nverts * 3 * sizeof(double));
// Adaptive refinement may result in fewer levels than maxIsolation.
int nRefinedLevels = refiner->GetNumLevels();
// Interpolate vertex primvar data : they are the control vertices
// of the limit patches (see far_tutorial_0 for details)
Vertex* src = &verts[0];
for (int level = 1; level < nRefinedLevels; ++level)
{
Vertex* dst = src + refiner->GetLevel(level - 1).GetNumVertices();
Far::PrimvarRefiner(*refiner).Interpolate(level, src, dst);
src = dst;
}
// Evaluate local points from interpolated vertex primvars.
patchTable->ComputeLocalPointValues(&verts[0], &verts[nRefinerVertices]);
std::vector<TopoDS_Face> mySurfaces;
// Loop through each patch and save out 4x4 vertices each
int na = patchTable->GetNumPatchArrays();
bool error;
for (int i = 0; i < na; i++)
{
Far::PatchDescriptor pd = patchTable->GetPatchArrayDescriptor(i);
if (pd == 6) // Type::REGULAR
{
Far::ConstIndexArray arraycvs = patchTable->GetPatchArrayVertices(i);
int np = patchTable->GetNumPatches(i);
for (int patch = 0; patch < np; patch++)
{
Far::ConstIndexArray cvs = patchTable->GetPatchVertices(i, patch);
int cvCount = cvs.size();
TColgp_Array2OfPnt surfVerts(1, 4, 1, 4);
for (int cv = 0; cv < cvCount; cv++)
{
int division = (int)((cv + 1) / 4);
int remainder = (cv + 1) % 4;
int firstIndex = remainder == 0 ? division : division + 1;
int secondIndex = remainder == 0 ? 4 : remainder;
surfVerts.SetValue(firstIndex, secondIndex, gp_Pnt(verts[cvs[cv]].point[0], verts[cvs[cv]].point[1], verts[cvs[cv]].point[2]));
}
Handle(Geom_BezierSurface) BZ1 =
new Geom_BezierSurface(surfVerts);
TopoDS_Face newFace = BRepBuilderAPI_MakeFace(BZ1, Precision::Confusion());
mySurfaces.push_back(newFace);
}
}
}
for (int i = 0; i < mySurfaces.size(); i++)
{
Quantity_NameOfColor myColor = static_cast<Quantity_NameOfColor>((i % 505) + 1);
Handle(AIS_Shape) myFace = new AIS_Shape(mySurfaces[i]);
myAISContext->SetColor(myFace, myColor, Standard_False);
myAISContext->SetMaterial(myFace, Graphic3d_NOM_PLASTIC, Standard_False);
myAISContext->Display(myFace, Standard_False);
}
}
Somewhere in that code you have the wrong knot vector (I posted the original question in the thread you referenced). From the Google Groups thread:
Ah, figured out that my knot vector was the issue:
{ 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0 };
What I needed was:
{ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0 };
Related
So I am creating an endless terrain.
I can create the terrain but my chunks have gaps betwean them and they don't align properly.
I think the problem might be caused by my Noise Generation script, but I am not sure.
This is my Noise Generation script
public static class Noise_GENERATOR
{
public static float[,] GenerateNoise(int chunkSize, int octaves, int seed, float noiseScale, float persistence, float lacunarity, Vector2 offset)
{
float[,] noiseMap = new float[chunkSize, chunkSize];
System.Random prng = new System.Random(seed);
Vector2[] octaveOffsets = new Vector2[octaves];
float maxPossibleHeight = 0;
float amplitude = 1;
float frequency = 1;
for (int i = 0; i < octaves; i++)
{
float offsetX = prng.Next(-100000, 100000) + offset.x;
float offsetY = prng.Next(-100000, 100000) + offset.y;
octaveOffsets[i] = new Vector2(offsetX, offsetY);
maxPossibleHeight += amplitude;
amplitude *= persistence;
}
if (noiseScale <= 0)
{
noiseScale = 0.0001f;
}
float maxLocalNoiseHeight = float.MinValue;
float minLocalNoiseHeight = float.MaxValue;
float halfWidth = chunkSize / 2f;
float halfHeight = chunkSize / 2f;
for (int y = 0; y < chunkSize; y++)
{
for (int x = 0; x < chunkSize; x++)
{
amplitude = 1;
frequency = 1;
float noiseHeight = 0;
for (int i = 0; i < octaves; i++)
{
float sampleX = (x-halfWidth + octaveOffsets[i].x) / noiseScale * frequency;
float sampleY = (y-halfHeight + octaveOffsets[i].y) / noiseScale * frequency;
float perlinValue = Mathf.PerlinNoise(sampleX, sampleY) * 2 - 1;
noiseHeight += perlinValue * amplitude;
amplitude *= persistence;
frequency *= lacunarity;
}
if (noiseHeight > maxLocalNoiseHeight)
{
maxLocalNoiseHeight = noiseHeight;
}
else if (noiseHeight < minLocalNoiseHeight)
{
minLocalNoiseHeight = noiseHeight;
}
noiseMap[x, y] = noiseHeight;
float normalizedHeight = (noiseMap[x, y] + 1) / (maxPossibleHeight / 0.9f);
noiseMap[x, y] = Mathf.Clamp(normalizedHeight, 0, int.MaxValue);
}
}
return noiseMap;
}
}
To Generate the height of a mesh, I am using Animation Curve and multiplying it by elevationScale variable.
float height = heightCurve.Evaluate(noiseMap[x, y]) * elevationScale;
I thought about accesing each Terrain chunk and getting the height of the edges and matching them together but that would look really weird and I don't know how to do it properly.
EDIT: Here just in case my Mesh generator script and how I am creating the Terrain chunk
public static class Mesh_GENERATOR
{
public static MeshData GenerateChunkMesh(int chunkSize,float[,] noiseMapData,float elevationScale,AnimationCurve terrainCurve,int LODlevel )
{
float[,] noiseMap = noiseMapData;
AnimationCurve heightCurve = new AnimationCurve(terrainCurve.keys);
//Setup variables
Vector3[] vertices = new Vector3[chunkSize * chunkSize];
int[] triangles = new int[(chunkSize - 1) * (chunkSize - 1) * 6];
Vector2[] uvs = new Vector2[chunkSize * chunkSize];
int triangle = 0;
int levelOfDetailIncrement = (LODlevel == 0) ? 1 : LODlevel * 2;
int numberOfVerticesPerRow = (chunkSize) / levelOfDetailIncrement + 1;
for (int y = 0; y < chunkSize; y++)
{
for (int x = 0; x < chunkSize; x++)
{
int i = y * chunkSize + x;
//Create vertices at position and center mesh
float height = heightCurve.Evaluate(noiseMap[x, y]) * elevationScale;
Vector2 percentPosition = new Vector2(x / (chunkSize - 1f), y / (chunkSize -1f ));
Vector3 vertPosition = new Vector3(percentPosition.x * 2 - 1, 0, percentPosition.y * 2 - 1) * chunkSize/2;
vertPosition.y = height;
vertices[i] = vertPosition;
uvs[i] = new Vector2((float)x / chunkSize, (float)y / chunkSize);
//Construct triangles
if (x != chunkSize - 1 && y != chunkSize - 1)
{
triangles[triangle + 0] = i + chunkSize;
triangles[triangle + 1] = i + chunkSize + 1;
triangles[triangle + 2] = i;
triangles[triangle + 3] = i + chunkSize + 1;
triangles[triangle + 4] = i + 1;
triangles[triangle + 5] = i;
triangle += 6;
}
}
}
MeshData meshData = new MeshData(chunkSize, vertices, triangles, uvs);
return meshData;
}
}
public class MeshData
{
public int chunkSize;
public Vector3[] vertices;
public int[] triangles;
public Vector2[] uvs;
public Mesh mesh;
public MeshData(int chunkSize,Vector3[] vertices,int[] triangles, Vector2[] uvs)
{
this.chunkSize = chunkSize;
this.vertices = vertices;
this.triangles = triangles;
this.uvs = uvs;
}
public Mesh CreateMesh()
{
if(mesh == null) { mesh = new Mesh(); } else { mesh.Clear(); }
mesh.indexFormat = UnityEngine.Rendering.IndexFormat.UInt32;
mesh.vertices = vertices;
mesh.triangles = triangles;
mesh.uv = uvs;
mesh.RecalculateNormals();
return mesh;
}
}
And here is my TerrainChunk
public class TerrainChunk
{
GameObject meshObject;
Vector2 position;
Bounds bounds;
MeshRenderer meshRenderer;
MeshFilter meshFilter;
public TerrainChunk(Vector2 coord, int chunkSize, Transform parent,Material terrainMaterial)
{
position = coord * chunkSize;
bounds = new Bounds(position, Vector2.one * chunkSize);
Vector3 positionV3 = new Vector3(position.x , 0, position.y );
Debug.Log("CHUNK: COORD" + coord + "POSITION" + position + "POSITION3V" + positionV3);
meshObject = new GameObject("Terrain Chunk");
meshFilter = meshObject.AddComponent<MeshFilter>();
meshRenderer = meshObject.AddComponent<MeshRenderer>();
meshRenderer.material = terrainMaterial;
meshObject.transform.position = positionV3;
meshObject.transform.parent = parent;
SetVisible(false);
worldGenerator.RequestMapData(position,OnNoiseDataReceived);
}
void OnNoiseDataReceived(MapData mapData)
{
worldGenerator.RequestMeshData(mapData, OnMeshDataReceived);
}
void OnMeshDataReceived(MeshData meshData)
{
meshFilter.mesh = meshData.CreateMesh();
}
public void UpdateTerrainChunk(Vector2 viewerPosition, int maxRenderDistance)
{
float viewerDstFromNearestEdge = Mathf.Sqrt(bounds.SqrDistance(viewerPosition));
bool visible = viewerDstFromNearestEdge <= maxRenderDistance;
SetVisible(visible);
}
public void SetVisible(bool visible)
{
meshObject.SetActive(visible);
}
public bool IsVisible()
{
return meshObject.activeSelf;
}
}
}
If I undestand all your values and variables correctly.
The problem might lay in the Noise Generator.
You need to create the chunkSize to be bigger by 1 so if you are passing 250 you will need to pass 251, as the for loop in the Noise Generator stops at 249 and not 250. (I might be wrong about this ), If you do this the mesh generator will now have the right values for calculation.
So your chunksize variable should look like this
chunkSize = chunkSize + 1;
Now there will still be smaller gaps and the mesh will clip through each other, so to fix this you will need to position the Chunk and you do it this way ->
(If your coord serves as a direction in which the chunk will be created from your World generator object -> for example chunks pointing North will be with values x:0 y:1, chunks pointing West will be x:-1 y:0, NorthWest chunks x:-1 y:-1 and so on), you may need to change the 0.5f to your values so the chunks align properly.
Vector3 positionV3 = new Vector3(position.x + (coord.x + 0.5f), 0, position.y + (coord.y + 0.5f) );
There still may be some smaller gaps visible in the terrain, but this can be fixed by playing with the values, or you can try and access each Chunk and get the edge vertices and their heights and connect the chunks together this way.
I am new to Direct3D11 and I am currently trying to create a texture programatically within my code using this code I found online:
// Some Constants
int w = 256;
int h = 256;
int bpp = 4;
int *buf = new int[w*h];
//declarations
ID3D11Texture2D* tex;
D3D11_TEXTURE2D_DESC sTexDesc;
D3D11_SUBRESOURCE_DATA tbsd;
// filling the image
for (int i = 0; i<h; i++)
for (int j = 0; j<w; j++)
{
if ((i & 32) == (j & 32))
buf[i*w + j] = 0x00000000;
else
buf[i*w + j] = 0xffffffff;
}
// setting up D3D11_SUBRESOURCE_DATA
tbsd.pSysMem = (void *)buf;
tbsd.SysMemPitch = w*bpp;
tbsd.SysMemSlicePitch = w*h*bpp; // Not needed since this is a 2d texture
// initializing sTexDesc
sTexDesc.Width = w;
sTexDesc.Height = h;
sTexDesc.MipLevels = 1;
sTexDesc.ArraySize = 1;
sTexDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
sTexDesc.SampleDesc.Count = 1;
sTexDesc.SampleDesc.Quality = 0;
sTexDesc.Usage = D3D11_USAGE_DEFAULT;
sTexDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
sTexDesc.CPUAccessFlags = 0;
sTexDesc.MiscFlags = 0;
hr = m_pd3dDevice->CreateTexture2D(&sTexDesc, &tbsd, &tex);
and that' all fine and dandy, but I am a bit confused about how to actually load this into the shader. Below I initialized this ID3D11ShaderResourceView:
ID3D11ShaderResourceView* m_pTextureRV = nullptr;
I found on the Microsoft tutorials I need to use the CreateShaderResourceView. But how exactly do I use it? I tried this:
hr = m_pd3dDevice->CreateShaderResourceView(tex, NULL , m_pTextureRV);
but it gives me an error, telling me that m_pTextureRV is not a valid argument for the function. What am I doing wrong here?
The correct way to call that function is:
hr = m_pd3dDevice->CreateShaderResourceView(tex, nullptr, &m_pTextureRV);
Remember that ID3D11ShaderResourceView* is a pointer to an interface. You need a pointer-to-a-pointer to get a new instance of one back.
You should really consider using a COM smart-pointer like Microsoft::WRL::ComPtr instead of raw pointers for these interfaces.
Once you have created the shader resource view for your texture object, then you need to associate it with whatever slot the HLSL expects to find it in. So, for example, if you were to write an HLSL source file as:
Texture2D texture : register( t0 );
SamplerState sampler: register( s0 );
float4 PS(float2 tex : TEXCOORD0) : SV_Target
{
return texture.Sample( sampler, tex );
}
Then compile it as a Pixel Shader, and bind it to the render pipeline via PSSetShader. Then you'd need to call:
ID3D11ShaderResourceView* srv[1] = { m_pTextureRV };
m_pImmediateContext->PSSetShaderResources( 0, 1, srv );
Of course you also need a ID3D11SamplerState* sampler bound as well:
ID3D11SamplerState* m_pSamplerLinear = nullptr;
D3D11_SAMPLER_DESC sampDesc = {};
sampDesc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
sampDesc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
sampDesc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
sampDesc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
sampDesc.ComparisonFunc = D3D11_COMPARISON_NEVER;
sampDesc.MinLOD = 0;
sampDesc.MaxLOD = D3D11_FLOAT32_MAX;
hr = m_pd3dDevice->CreateSamplerState( &sampDesc, &m_pSamplerLinear );
Then when you are about to draw:
m_pImmediateContext->PSSetSamplers( 0, 1, &m_pSamplerLinear );
I strongly recommend you check out the DirectX Tool Kit and the tutorials there.
Related post: Stride of the BitmapData is different than the original
dimension.
I have taken the source code from here and modified it.
The code is generating a variety of exceptions in different occasions.
.
Error in BitmapLocker.cs
At the following line in Lock(),
// Copy data from IntegerPointer to _imageData
Marshal.Copy(IntegerPointer, _imageData, 0, _imageData.Length);
The following exception is being generated:
An unhandled exception of type 'System.AccessViolationException'
occurred in mscorlib.dll
Additional information: Attempted to read or write protected memory.
This is often an indication that other memory is corrupt.
For the following driver code,
double[,] mask = new double[,]
{
{ .11, .11, .11, },
{ .11, .11, .11, },
{ .11, .11, .11, },
};
Bitmap bitmap = ImageDataConverter.ToBitmap(mask);
BitmapLocker locker = new BitmapLocker(bitmap);
locker.Lock();
for (int i = 0; i < bitmap.Width; i++)
{
for (int j = 0; j < bitmap.Height; j++)
{
Color c = locker.GetPixel(i, j);
locker.SetPixel(i, j, c);
}
}
locker.Unlock();
At the following line in GetPixel(),
if (i > dataLength)
{
throw new IndexOutOfRangeException();
}
An unhandled exception of type 'System.IndexOutOfRangeException'
occurred in Simple.ImageProcessing.Framework.dll
Additional information: Index was outside the bounds of the array.
.
At the following line in SetPixel(),
if (ColorDepth == 8)
{
_imageData[i] = color.B;
}
An unhandled exception of type 'System.Exception' occurred in
Simple.ImageProcessing.Framework.dll
Additional information: (0, 0), 262144, Index was outside the bounds
of the array., i=262144
.
Error in Driver program
At the line,
Color c = bmp.GetPixel(i, j);
An unhandled exception of type 'System.InvalidOperationException'
occurred in System.Drawing.dll
Additional information: Bitmap region is already locked.
Source Code:
public class BitmapLocker : IDisposable
{
//private properties
Bitmap _bitmap = null;
bool _isLocked = false;
BitmapData _bitmapData = null;
private byte[] _imageData = null;
//public properties
public IntPtr IntegerPointer { get; private set; }
public int Width { get { return _bitmap.Width; } }
public int Height { get { return _bitmap.Height; } }
public int Stride { get { return _bitmapData.Stride; } }
public int ColorDepth { get { return Bitmap.GetPixelFormatSize(_bitmap.PixelFormat); } }
public int Channels { get { return ColorDepth / 8; } }
public int PaddingOffset { get { return _bitmapData.Stride - (_bitmap.Width * Channels); } }
public PixelFormat ImagePixelFormat { get { return _bitmap.PixelFormat; } }
public bool IsGrayscale { get { return Grayscale.IsGrayscale(_bitmap); } }
//Constructor
public BitmapLocker(Bitmap source)
{
IntegerPointer = IntPtr.Zero;
this._bitmap = source;
}
/// Lock bitmap
public void Lock()
{
if (_isLocked == false)
{
try
{
// Lock bitmap (so that no movement of data by .NET framework) and return bitmap data
_bitmapData = _bitmap.LockBits(
new Rectangle(0, 0, _bitmap.Width, _bitmap.Height),
ImageLockMode.ReadWrite,
_bitmap.PixelFormat);
// Create byte array to copy pixel values
int noOfBitsNeededForStorage = _bitmapData.Stride * _bitmapData.Height;
int noOfBytesNeededForStorage = noOfBitsNeededForStorage / 8;
_imageData = new byte[noOfBytesNeededForStorage * ColorDepth];//# of bytes needed for storage
IntegerPointer = _bitmapData.Scan0;
// Copy data from IntegerPointer to _imageData
Marshal.Copy(IntegerPointer, _imageData, 0, _imageData.Length);
_isLocked = true;
}
catch (Exception)
{
throw;
}
}
else
{
throw new Exception("Bitmap is already locked.");
}
}
/// Unlock bitmap
public void Unlock()
{
if (_isLocked == true)
{
try
{
// Copy data from _imageData to IntegerPointer
Marshal.Copy(_imageData, 0, IntegerPointer, _imageData.Length);
// Unlock bitmap data
_bitmap.UnlockBits(_bitmapData);
_isLocked = false;
}
catch (Exception)
{
throw;
}
}
else
{
throw new Exception("Bitmap is not locked.");
}
}
public Color GetPixel(int x, int y)
{
Color clr = Color.Empty;
// Get color components count
int channels = ColorDepth / 8;
// Get start index of the specified pixel
int i = (Height - y - 1) * Stride + x * channels;
int dataLength = _imageData.Length - channels;
if (i > dataLength)
{
throw new IndexOutOfRangeException();
}
if (ColorDepth == 32) // For 32 bpp get Red, Green, Blue and Alpha
{
byte b = _imageData[i];
byte g = _imageData[i + 1];
byte r = _imageData[i + 2];
byte a = _imageData[i + 3]; // a
clr = Color.FromArgb(a, r, g, b);
}
if (ColorDepth == 24) // For 24 bpp get Red, Green and Blue
{
byte b = _imageData[i];
byte g = _imageData[i + 1];
byte r = _imageData[i + 2];
clr = Color.FromArgb(r, g, b);
}
if (ColorDepth == 8)
// For 8 bpp get color value (Red, Green and Blue values are the same)
{
byte c = _imageData[i];
clr = Color.FromArgb(c, c, c);
}
return clr;
}
public void SetPixel(int x, int y, Color color)
{
// Get color components count
int cCount = ColorDepth / 8;
// Get start index of the specified pixel
int i = ((Height - y -1) * Stride + x * cCount);
try
{
if (ColorDepth == 32) // For 32 bpp set Red, Green, Blue and Alpha
{
_imageData[i] = color.B;
_imageData[i + 1] = color.G;
_imageData[i + 2] = color.R;
_imageData[i + 3] = color.A;
}
if (ColorDepth == 24) // For 24 bpp set Red, Green and Blue
{
_imageData[i] = color.B;
_imageData[i + 1] = color.G;
_imageData[i + 2] = color.R;
}
if (ColorDepth == 8)
// For 8 bpp set color value (Red, Green and Blue values are the same)
{
_imageData[i] = color.B;
}
}
catch(Exception ex)
{
throw new Exception("("+x+", "+y+"), "+_imageData.Length+", "+ ex.Message+", i=" + i);
}
}
public void Dispose()
{
Dispose(true);
GC.SuppressFinalize(this);
}
protected virtual void Dispose(bool disposing)
{
if (disposing)
{
// free managed resources
_bitmap = null;
_bitmapData = null;_imageData = null;IntegerPointer = IntPtr.Zero;
}
// free native resources if there are any.
//private properties
//public properties
}
}
.
ImageDataConverter.cs
public static Bitmap ToBitmap(double[,] input)
{
int width = input.GetLength(0);
int height = input.GetLength(1);
Bitmap output = Grayscale.CreateGrayscaleImage(width, height);
BitmapData data = output.LockBits(new Rectangle(0, 0, width, height),
ImageLockMode.WriteOnly,
output.PixelFormat);
int pixelSize = System.Drawing.Image.GetPixelFormatSize(PixelFormat.Format8bppIndexed) / 8;
int offset = data.Stride - width * pixelSize;
double Min = 0.0;
double Max = 255.0;
unsafe
{
byte* address = (byte*)data.Scan0.ToPointer();
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
double v = 255 * (input[x, y] - Min) / (Max - Min);
byte value = unchecked((byte)v);
for (int c = 0; c < pixelSize; c++, address++)
{
*address = value;
}
}
address += offset;
}
}
output.UnlockBits(data);
return output;
}
Here is the picture I used for the test,
int noOfBitsNeededForStorage = _bitmapData.Stride * _bitmapData.Height;
That is the most essential bug in the code. Stride * Height are the number of bytes needed for storage. So it doesn't make the _imageData array large enough and IndexOutOfRangeException is the expected outcome.
int pixelSize = System.Drawing.Image.GetPixelFormatSize(PixelFormat.Format8bppIndexed) / 8;
Lots of possible mishaps from this statement. It hard-codes the pixel format to 8bpp but that is not the actual pixel format that the LockBits() call used. Which was output.PixelFormat. Notably fatal on the sample image, although it is not clear how it is used in the code, 8bpp is a very awkward pixel format since it requires a palette. The PNG codec will create a 32bpp image in memory, even though the original file uses 8bpp. You must use output.PixelFormat here to get a match with the locked data and adjust the pixel writing code accordingly. Not clear why it is being used at all, the SetPixel() method provided by the library code should already be good enough.
int dataLength = _imageData.Length - channels;
Unclear what that statement tries to do, subtracting the number of channels is not a sensible operation. It will generate a spurious IndexOutOfRangeException. There is no obvious reason to help, the CLR already provides array index checking on the _imageData array. So just delete that code.
Additional information: Bitmap region is already locked.
Exception handling in the code is not confident, a possible reason for this exception. In general it must be noted that the underlying bitmap is completely inaccessible, other than through _imageData, after the Lock() method was called and Unlock() wasn't called yet. Best way to do this is with try/finally with the Unlock() call in the finally block so you can always be sure that the bitmap doesn't remain locked by accident.
byte c = _imageData[i];
This is not correct, except in the corner case of an 8bpp image that has a palette that was explicitly created to handle grayscale images. The default palette for an 8bpp image does not qualify that requirement nor is it something you can blindly rely on when loading images from a file. Indexed pixel formats where a dreadful hack that was necessary in the early 1990s because video adapters where not yet powerful enough. It no longer makes any sense at all today. Note that SetPixel() also doesn't handle a 16-bit pixel formats. And that the PNG codec will never create an 8bpp memory image and cannot encode an 8bpp file. Best advice is to eliminate 8bpp support completely to arrive at more reliable code.
In fact, the point of directly accessing pixel data is to make image manipulation fast. There is only one pixel format that consistently produces fast code, it is Format32bppArgb. The pixels can now be accessed with an int* instead of a byte*, moving pixels ~4 times faster. And no special tweaks are necessary to deal with stride or special-case the code for methods like SetPixel(). So pass that format into LockBits(), the codec will do the work necessary if the actual image format is not 32bpp as well.
I should note that Format32bppPArgb is the fast pixel format for displaying images on the screen since it is compatible with the pixel format used by all modern video adapters. But that isn't the point of this code and dealing with the pre-multiplied alpha is awkward.
Note: Giving the background of my previous question once again so as to find all the related stuff at one source.
I'm capturing an image from an android mobile device and it’s in JPEG format. The image is of 72X72DPI and 24 bit. When I try to convert this JPEG image to TIFF using LibTiff.Net and to set the tag Photometric Interpretation = 0 for MinIsWhite, the image turns negative (the white becomes black and black becomes white). The environment is Windows 8.1 64 bit, Visual Studio 2012. The tag must have value 0, where 0 = white is zero.
I absolutely must use Photometric.MINISWHITE in images so tried inverting image data before writing it to TIFF as per the below code. But then the compression changes to LZW instead of CCITT4,Photometric is changed to MINISBLACK from MINISWHITE, FIllorder tag is removed, PlanarConfig tag is removed, New tag Predictor is added with value 1 and the image turns negative again.
public partial class Form1 : Form
{
private const TiffTag TIFFTAG_ASCIITAG = (TiffTag)666;
private const TiffTag TIFFTAG_LONGTAG = (TiffTag)667;
private const TiffTag TIFFTAG_SHORTTAG = (TiffTag)668;
private const TiffTag TIFFTAG_RATIONALTAG = (TiffTag)669;
private const TiffTag TIFFTAG_FLOATTAG = (TiffTag)670;
private const TiffTag TIFFTAG_DOUBLETAG = (TiffTag)671;
private const TiffTag TIFFTAG_BYTETAG = (TiffTag)672;
public Form1()
{
InitializeComponent();
}
private void button1_Click(object sender, EventArgs e)
{
using (Bitmap bmp = new Bitmap(#"D:\Projects\ITests\images\IMG_2.jpg"))
{
// convert jpg image to tiff
byte[] tiffBytes = GetTiffImageBytes(bmp, false);
File.WriteAllBytes(#"D:\Projects\ITests\images\output.tif", tiffBytes);
//Invert the tiff image
Bitmap bmpTiff = new Bitmap(#"D:\Projects\ITests\images\output.tif");
Bitmap FBitmap = Transform(bmpTiff);
FBitmap.Save(#"D:\Projects\ITests\images\invOutput1.tif");
}
}
public static byte[] GetTiffImageBytes(Bitmap img, bool byScanlines)
{
try
{
byte[] raster = GetImageRasterBytes(img);
using (MemoryStream ms = new MemoryStream())
{
using (Tiff tif = Tiff.ClientOpen("InMemory", "w", ms, new TiffStream()))
{
if (tif == null)
return null;
tif.SetField(TiffTag.IMAGEWIDTH, img.Width);
tif.SetField(TiffTag.IMAGELENGTH, img.Height);
tif.SetField(TiffTag.COMPRESSION, Compression.CCITTFAX4);
tif.SetField(TiffTag.PHOTOMETRIC, Photometric.MINISWHITE);
tif.SetField(TiffTag.ROWSPERSTRIP, img.Height);
tif.SetField(TiffTag.XRESOLUTION, 200);
tif.SetField(TiffTag.YRESOLUTION, 200);
tif.SetField(TiffTag.SUBFILETYPE, 0);
tif.SetField(TiffTag.BITSPERSAMPLE, 1);
tif.SetField(TiffTag.FILLORDER, FillOrder.LSB2MSB);
tif.SetField(TiffTag.ORIENTATION, BitMiracle.LibTiff.Classic.Orientation.TOPLEFT);
tif.SetField(TiffTag.SAMPLESPERPIXEL, 1);
tif.SetField(TiffTag.RESOLUTIONUNIT, ResUnit.INCH);
tif.SetField(TiffTag.PLANARCONFIG, PlanarConfig.CONTIG);
int tiffStride = tif.ScanlineSize();
int stride = raster.Length / img.Height;
if (byScanlines)
{
// raster stride MAY be bigger than TIFF stride (due to padding in raster bits)
for (int i = 0, offset = 0; i < img.Height; i++)
{
bool res = tif.WriteScanline(raster, offset, i, 0);
if (!res)
return null;
offset += stride;
}
}
else
{
if (tiffStride < stride)
{
// raster stride is bigger than TIFF stride
// this is due to padding in raster bits
// we need to create correct TIFF strip and write it into TIFF
byte[] stripBits = new byte[tiffStride * img.Height];
for (int i = 0, rasterPos = 0, stripPos = 0; i < img.Height; i++)
{
System.Buffer.BlockCopy(raster, rasterPos, stripBits, stripPos, tiffStride);
rasterPos += stride;
stripPos += tiffStride;
}
// Write the information to the file
int n = tif.WriteEncodedStrip(0, stripBits, stripBits.Length);
if (n <= 0)
return null;
}
else
{
// Write the information to the file
int n = tif.WriteEncodedStrip(0, raster, raster.Length);
if (n <= 0)
return null;
}
}
}
return ms.GetBuffer();
}
}
catch (Exception)
{
return null;
}
}
public static byte[] GetImageRasterBytes(Bitmap img)
{
// Specify full image
Rectangle rect = new Rectangle(0, 0, img.Width, img.Height);
Bitmap bmp = img;
byte[] bits = null;
try
{
// Lock the managed memory
if (img.PixelFormat != PixelFormat.Format1bppIndexed)
bmp = convertToBitonal(img);
BitmapData bmpdata = bmp.LockBits(rect, ImageLockMode.ReadOnly, PixelFormat.Format1bppIndexed);
// Declare an array to hold the bytes of the bitmap.
bits = new byte[bmpdata.Stride * bmpdata.Height];
// Copy the sample values into the array.
Marshal.Copy(bmpdata.Scan0, bits, 0, bits.Length);
// Release managed memory
bmp.UnlockBits(bmpdata);
}
finally
{
if (bmp != img)
bmp.Dispose();
}
return bits;
}
private static Bitmap convertToBitonal(Bitmap original)
{
int sourceStride;
byte[] sourceBuffer = extractBytes(original, out sourceStride);
// Create destination bitmap
Bitmap destination = new Bitmap(original.Width, original.Height,
PixelFormat.Format1bppIndexed);
destination.SetResolution(original.HorizontalResolution, original.VerticalResolution);
// Lock destination bitmap in memory
BitmapData destinationData = destination.LockBits(
new Rectangle(0, 0, destination.Width, destination.Height),
ImageLockMode.WriteOnly, PixelFormat.Format1bppIndexed);
// Create buffer for destination bitmap bits
int imageSize = destinationData.Stride * destinationData.Height;
byte[] destinationBuffer = new byte[imageSize];
int sourceIndex = 0;
int destinationIndex = 0;
int pixelTotal = 0;
byte destinationValue = 0;
int pixelValue = 128;
int height = destination.Height;
int width = destination.Width;
int threshold = 500;
for (int y = 0; y < height; y++)
{
sourceIndex = y * sourceStride;
destinationIndex = y * destinationData.Stride;
destinationValue = 0;
pixelValue = 128;
for (int x = 0; x < width; x++)
{
// Compute pixel brightness (i.e. total of Red, Green, and Blue values)
pixelTotal = sourceBuffer[sourceIndex + 1] + sourceBuffer[sourceIndex + 2] +
sourceBuffer[sourceIndex + 3];
if (pixelTotal > threshold)
destinationValue += (byte)pixelValue;
if (pixelValue == 1)
{
destinationBuffer[destinationIndex] = destinationValue;
destinationIndex++;
destinationValue = 0;
pixelValue = 128;
}
else
{
pixelValue >>= 1;
}
sourceIndex += 4;
}
if (pixelValue != 128)
destinationBuffer[destinationIndex] = destinationValue;
}
Marshal.Copy(destinationBuffer, 0, destinationData.Scan0, imageSize);
destination.UnlockBits(destinationData);
return destination;
}
private static byte[] extractBytes(Bitmap original, out int stride)
{
Bitmap source = null;
try
{
// If original bitmap is not already in 32 BPP, ARGB format, then convert
if (original.PixelFormat != PixelFormat.Format32bppArgb)
{
source = new Bitmap(original.Width, original.Height, PixelFormat.Format32bppArgb);
source.SetResolution(original.HorizontalResolution, original.VerticalResolution);
using (Graphics g = Graphics.FromImage(source))
{
g.DrawImageUnscaled(original, 0, 0);
}
}
else
{
source = original;
}
// Lock source bitmap in memory
BitmapData sourceData = source.LockBits(
new Rectangle(0, 0, source.Width, source.Height),
ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
// Copy image data to binary array
int imageSize = sourceData.Stride * sourceData.Height;
byte[] sourceBuffer = new byte[imageSize];
Marshal.Copy(sourceData.Scan0, sourceBuffer, 0, imageSize);
// Unlock source bitmap
source.UnlockBits(sourceData);
stride = sourceData.Stride;
return sourceBuffer;
}
finally
{
if (source != original)
source.Dispose();
}
}
public Bitmap Transform(Bitmap bitmapImage)
{
var bitmapRead = bitmapImage.LockBits(new Rectangle(0, 0, bitmapImage.Width, bitmapImage.Height), ImageLockMode.ReadOnly, PixelFormat.Format32bppPArgb);
var bitmapLength = bitmapRead.Stride * bitmapRead.Height;
var bitmapBGRA = new byte[bitmapLength];
Marshal.Copy(bitmapRead.Scan0, bitmapBGRA, 0, bitmapLength);
bitmapImage.UnlockBits(bitmapRead);
for (int i = 0; i < bitmapLength; i += 4)
{
bitmapBGRA[i] = (byte)(255 - bitmapBGRA[i]);
bitmapBGRA[i + 1] = (byte)(255 - bitmapBGRA[i + 1]);
bitmapBGRA[i + 2] = (byte)(255 - bitmapBGRA[i + 2]);
// [i + 3] = ALPHA.
}
var bitmapWrite = bitmapImage.LockBits(new Rectangle(0, 0, bitmapImage.Width, bitmapImage.Height), ImageLockMode.WriteOnly, PixelFormat.Format32bppPArgb);
Marshal.Copy(bitmapBGRA, 0, bitmapWrite.Scan0, bitmapLength);
bitmapImage.UnlockBits(bitmapWrite);
return bitmapImage;
}
}
You should invert image bytes in GetTiffImageBytes method, before writing them to TIFF. Also, the Transform method converts bi-level image to 32bpp one and that is why you get LZW compressed image in the end.
So, add the following code
for (int k = 0; k < raster.Length; k++)
raster[k] = (byte)(~raster[k]);
after byte[] raster = GetImageRasterBytes(img); in GetTiffImageBytes method. This will invert image bytes. And don't use the following code
//Invert the tiff image
Bitmap bmpTiff = new Bitmap(#"D:\Projects\ITests\images\output.tif");
Bitmap FBitmap = Transform(bmpTiff);
FBitmap.Save(#"D:\Projects\ITests\images\invOutput1.tif");
My code takes an image and creates a pointillist image through creating ellipses with a pixel's color.
After a while, the image is fully 'painted' and I want to automatically switch to another image in my sketch folder.
I would like to be able to count the number of ellipses generated. Once 'z' ellipses are generated I want to tell my code to erase all ellipses and start over with a new image.
CODE:
PImage img;
int smallPoint, largePoint;
void setup() {
size(1920, 1080);
img = loadImage("rio.jpg");
smallPoint = 12;
largePoint = 12;
imageMode(CENTER);
noStroke();
background(255);
}
void draw() {
for (int i = 0; i < 1000; i++)
{
drawADot();
}
}
void drawADot()
{
int imageWidth = img.width;
int imageHeight = img.height;
int ptSize = int(random(100)) + 4;
float pointillize = map(mouseX, 0, width, smallPoint, largePoint); //not used right now but for controlling ellipse size
int x = int(random(0, imageWidth/8));
int y = int(random(0, imageHeight/8));
color pix = img.get(x*8, y*8);
fill(pix, 255);
ellipse(x*8, y*8, pointillize, pointillize);
}
Store the images in an array, count the dots added, and conditionally (based in number of dots) change the image being used to next one in the array, you can pass the image to the drawADot() function as a parameter. Something like:
PImage img[] = new PImage[2];
int smallPoint, largePoint;
final int DOTSPERDRAW = 500;
int numberOfDots = 0;
final int MAXDOTS = DOTSPERDRAW * 100;
PImage workingImage ;
int index;
void setup() {
size(810, 455);
img[0] = loadImage("http://assets2.exame.abril.com.br/assets/images/2014/8/506584/size_810_16_9_rio.jpg");
img[1] = loadImage("http://upload.wikimedia.org/wikipedia/commons/1/1e/Pilcomayo_rio.jpg");
img[1].resize(810, 0);
smallPoint = 12;
largePoint = 12;
imageMode(CENTER);
noStroke();
background(255);
workingImage = img[0];
}
void draw() {
if (numberOfDots > MAXDOTS) {
index = (index + 1) % img.length;
workingImage = img[index];
numberOfDots = 0;
}
for (int i = 0; i < DOTSPERDRAW; i++)
{
drawADot(workingImage);
}
numberOfDots += DOTSPERDRAW;
}
void drawADot(PImage theImage)
{
int imageWidth = theImage.width;
int imageHeight = theImage.height;
int ptSize = int(random(100)) + 4;
float pointillize = map(mouseX, 0, width, smallPoint, largePoint); //not used right now but for controlling ellipse size
int x = int(random(0, imageWidth/8));
int y = int(random(0, imageHeight/8));
color pix = theImage.get(x*8, y*8);
fill(pix, 255);
ellipse(x*8, y*8, pointillize, pointillize);
}