Object Detection in openCV - visual-c++

I have a problem with my program written in Visual C++ using OpenCV:
i have to capture frames from webcam and find all the various rectangle (it doesn't matter the color).
I try to modify the samples in c, squares.c, but it doesn't work as well, because the program wait any key (different from 'q') to continue.
This is the code. Someone can tell me where is the problem???
Thank you in advance.
//
// Object Detection of squares
// Take images from webcam and find the square in them
//
//
#include "stdafx.h"
#include <stdio.h>
#include <math.h>
#include <string.h>
int thresh = 50;
IplImage* img = 0;
IplImage* img0 = 0;
CvMemStorage* storage = 0;
//const char* wndname = "Square Detection Demo with Webcam";
// helper function:
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
double angle( CvPoint* pt1, CvPoint* pt2, CvPoint* pt0 )
{
double dx1 = pt1->x - pt0->x;
double dy1 = pt1->y - pt0->y;
double dx2 = pt2->x - pt0->x;
double dy2 = pt2->y - pt0->y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
CvSeq* findSquares4( IplImage* img, CvMemStorage* storage )
{
CvSeq* contours;
int i, c, l, N = 11;
CvSize sz = cvSize( img->width & -2, img->height & -2 );
IplImage* timg = cvCloneImage( img ); // make a copy of input image
IplImage* gray = cvCreateImage( sz, 8, 1 );
IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 );
IplImage* tgray;
CvSeq* result;
double s, t;
// create empty sequence that will contain points -
// 4 points per square (the square's vertices)
CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage );
// select the maximum ROI in the image
// with the width and height divisible by 2
cvSetImageROI( timg, cvRect( 0, 0, sz.width, sz.height ));
// down-scale and upscale the image to filter out the noise
cvPyrDown( timg, pyr, 7 );
cvPyrUp( pyr, timg, 7 );
tgray = cvCreateImage( sz, 8, 1 );
// find squares in every color plane of the image
for( c = 0; c < 3; c++ )
{
// extract the c-th color plane
cvSetImageCOI( timg, c+1 );
cvCopy( timg, tgray, 0 );
// try several threshold levels
for( l = 0; l < N; l++ )
{
// hack: use Canny instead of zero threshold level.
// Canny helps to catch squares with gradient shading
if( l == 0 )
{
// apply Canny. Take the upper threshold from slider
// and set the lower to 0 (which forces edges merging)
cvCanny( tgray, gray, 0, thresh, 5 );
// dilate canny output to remove potential
// holes between edge segments
cvDilate( gray, gray, 0, 1 );
}
else
{
// apply threshold if l!=0:
// tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY );
}
// find contours and store them all as a list
cvFindContours( gray, storage, &contours, sizeof(CvContour),
CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
// test each contour
while( contours )
{
// approximate contour with accuracy proportional
// to the contour perimeter
result = cvApproxPoly( contours, sizeof(CvContour), storage,
CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 );
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if( result->total == 4 &&
fabs(cvContourArea(result,CV_WHOLE_SEQ)) > 1000 &&
cvCheckContourConvexity(result) )
{
s = 0;
printf("ciclo for annidato fino a 5\t\n");
for( i = 0; i < 5; i++ )
{
// find minimum angle between joint
// edges (maximum of cosine)
if( i >= 2 )
{
t = fabs(angle(
(CvPoint*)cvGetSeqElem( result, i ),
(CvPoint*)cvGetSeqElem( result, i-2 ),
(CvPoint*)cvGetSeqElem( result, i-1 )));
s = s > t ? s : t;
}
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if( s < 0.3 )
for( i = 0; i < 4; i++ )
cvSeqPush( squares,
(CvPoint*)cvGetSeqElem( result, i ));
}
// take the next contour
contours = contours->h_next;
}
}
}
// release all the temporary images
cvReleaseImage( &gray );
cvReleaseImage( &pyr );
cvReleaseImage( &tgray );
cvReleaseImage( &timg );
return squares;
}
// the function draws all the squares in the image
void drawSquares( IplImage* img, CvSeq* squares )
{
CvSeqReader reader;
IplImage* cpy = cvCloneImage( img );
int i;
// initialize reader of the sequence
cvStartReadSeq( squares, &reader, 0 );
// read 4 sequence elements at a time (all vertices of a square)
for( i = 0; i < squares->total; i += 4 )
{
CvPoint pt[4], *rect = pt;
int count = 4;
// read 4 vertices
CV_READ_SEQ_ELEM( pt[0], reader );
CV_READ_SEQ_ELEM( pt[1], reader );
CV_READ_SEQ_ELEM( pt[2], reader );
CV_READ_SEQ_ELEM( pt[3], reader );
// draw the square as a closed polyline
cvPolyLine( cpy, &rect, &count, 1, 1, CV_RGB(0,255,0), 3, CV_AA, 0 );
}
cvSaveImage("squares.jpg",cpy);
//show the resultant image
//cvShowImage( wndname, cpy );
cvReleaseImage( &cpy );
//return cpy;
}
int _tmain(int argc, _TCHAR* argv[])
{
int key = 0;
IplImage* frame =0;
IplImage* squares=0;
// create memory storage that will contain all the dynamic data
storage = cvCreateMemStorage(0);
CvCapture *camera = cvCreateCameraCapture(CV_CAP_ANY); /* Usa USB camera */
frame = cvQueryFrame(camera);
frame = cvQueryFrame(camera);
frame = cvQueryFrame(camera);
while(key!='q'){
frame = cvQueryFrame(camera);
frame = cvQueryFrame(camera);
if(frame!=NULL){
printf("Got frame\t\n");
cvSaveImage("frame.jpg", frame);
/*img0*/ img = cvLoadImage("frame.jpg");
//img = cvCloneImage( img0 );
cvNamedWindow( "img0", CV_WINDOW_AUTOSIZE);
cvShowImage("img0",/*img0*/img);
// find and draw the squares
drawSquares( img, findSquares4( img, storage ) );
squares = cvLoadImage("squares.jpg");
// create window and a trackbar (slider)
//with parent "image" and set callback
//(the slider regulates upper threshold,
//passed to Canny edge detector)
cvNamedWindow( "main", CV_WINDOW_AUTOSIZE);
cvShowImage("main", squares);
/* wait for key.
Also the function cvWaitKey takes care of event processing */
key = cvWaitKey(0);
}
}
// release both images
cvReleaseImage( &img );
cvReleaseImage( &img0 );
cvReleaseCapture(&camera);
cvDestroyWindow("main");
cvDestroyWindow("img0");
// clear memory storage - reset free space position
cvClearMemStorage( storage );
return 0;
}

I believe your problem is here:
/* wait for key.
Also the function cvWaitKey takes care of event processing */
key = cvWaitKey(0);
Try changing 0 to 10.
I see some other problems in your code. For instance, you create windows inside the while loop, which is not good. Try moving cvNamedWindow() function calls outside your while loop. Also, I'm not sure why you query camera for frames and do not process them?

If your problem is that the window dissappers without waiting for any hit from the keyboard, you can add a cvWaitKey(0) at the end of the code.
Also a getch( ) at the end would help. make sure you include in the headers.

Related

Vulkan: Vertex Buffer doesn't get sent to vertex shader

I am learning Vulkan and started having a problem where no vertices would get displayed.
After analyzing my program with RenderDoc (https://renderdoc.org/builds),
I realized that the buffer containing the vertex and index information contained the rights values.
At the end of the same buffer, the indices data:
The problem is that when I check the data that is transmitted to the vertex shader, it is empty:
Here is the command buffer section where it is supposed to send the data to the shader:
VkDeviceSize indicesOffset = sizeof(Vertex) * this->nbVertices;
VkDeviceSize offsets[] = {0};
vkCmdBindVertexBuffers(commandBuffers[i], 0, 1, &this->vertexBuffer, offsets);
vkCmdBindIndexBuffer(commandBuffers[i], this->vertexBuffer, indicesOffset, VK_INDEX_TYPE_UINT32);
for(size_t j = 0 ; j < this->models.size() ; j++){
Model *model = this->models[j];
uint32_t modelDynamicOffset = j * static_cast<uint32_t>(this->uniformDynamicAlignment);
VkDescriptorSet* modelDescriptorSet = model->getDescriptorSet(i);
vkCmdBindDescriptorSets(this->commandBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 0, 1, modelDescriptorSet, 1, &modelDynamicOffset);
vkCmdDrawIndexed(commandBuffers[i], this->nbIndices, 1, 0, indicesOffset, 0);
}
Also, here is how I create the vertex buffer:
void Application::createVertexBuffers() {
for(Model *model : this->models){
for(Vertex vertex : model->getVertices()){
vertices.push_back(vertex);
}
for(uint32_t index : model->getIndices()){
indices.push_back(index);
}
}
VkDeviceSize vertexBufferSize = sizeof(vertices[0]) * vertices.size();
VkDeviceSize indexBufferSize = sizeof(uint32_t) * indices.size();
this->nbVertices = vertices.size();
this->nbIndices = indices.size();
VkBuffer stagingBuffer;
VkDeviceMemory stagingBufferMemory;
//To CPU
this->createBuffer(vertexBufferSize + indexBufferSize,
VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
stagingBuffer,
stagingBufferMemory);
void *data;
vkMapMemory(device, stagingBufferMemory, 0, vertexBufferSize, 0, &data);
memcpy(data, vertices.data(), (size_t)vertexBufferSize);
vkUnmapMemory(device, stagingBufferMemory);
//Add the index data after vertex data
vkMapMemory(device, stagingBufferMemory, vertexBufferSize, indexBufferSize, 0, &data);
memcpy(data, indices.data(), (size_t)indexBufferSize);
vkUnmapMemory(device, stagingBufferMemory);
//To GPU
this->createBuffer(vertexBufferSize + indexBufferSize,
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
this->vertexBuffer,
this->vertexBufferMemory);
this->copyBuffer(stagingBuffer, this->vertexBuffer, vertexBufferSize + indexBufferSize);
vkDestroyBuffer(device, stagingBuffer, nullptr);
vkFreeMemory(device, stagingBufferMemory, nullptr);
}
If you need more information to help me solve my problem, please tell me.
Thank you.
The indices that renderdoc reports for the render are a bit high.
You pass indicesOffset as vertexOffset in your draw command. Which is:
vertexOffset is the value added to the vertex index before indexing into the vertex buffer.
So replace that with 0 and you should get your proper vertices again.

Using 2D metaballs to draw an outline with a constant thickness

I'm apply the concept of metaballs to a game I'm making in order to show that the player has selected a few ships, like so http://prntscr.com/klgktf
However, my goal is to keep a constant thickness of this outline, and that's not what I'm getting with the current code.
I'm using a GLSL shader to do this, and I pass to the fragmentation shader a uniform array of positions for the ships (u_metaballs).
Vertex shader:
#version 120
void main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
Fragmentation shader:
#version 120
uniform vec2 u_metaballs[128];
void main() {
float intensity = 0;
for(int i = 0; i < 128 && u_metaballs[i].x != 0; i++){
float r = length(u_metaballs[i] - gl_FragCoord.xy);
intensity += 1 / r;
}
gl_FragColor = vec4(0, 0, 0, 0);
if(intensity > .2 && intensity < .21)
gl_FragColor = vec4(.5, 1, .7, .2);
}
I've tried playing around with the intensity ranges, and even changing 1 / r to 10000 / (r ^ 4) which (although it makes no sense) helps a bit, though it does not fix the problem.
Any help or suggestions would be greatly appreciated.
after some more taught it is doable even in single pass ... you just compute the distance to nearest metaball and if less or equal to the boundary thickness render fragment otherwise discard it ... Here example (assuming single quad <-1,+1> is rendered covering whole screen):
Vertex:
// Vertex
varying vec2 pos; // fragment position in world space
void main()
{
pos=gl_Vertex.xy;
gl_Position=ftransform();
}
Fragment:
// Fragment
#version 120
varying vec2 pos;
const float r=0.3; // metabal radius
const float w=0.02; // border line thickness
uniform vec2 u_metaballs[5]=
{
vec2(-0.25,-0.25),
vec2(+0.25,-0.25),
vec2( 0.00,+0.05),
vec2(+0.30,+0.35),
vec2(-1000.1,-1000.1), // end of metaballs
};
void main()
{
int i;
float d;
// d = min distance to any metaball
for (d=r+r+w+w,i=0;u_metaballs[i].x>-1000.0;i++)
d=min(d,length(pos-u_metaballs[i].xy));
// if outside range ignore fragment
if ((d<r)||(d>r+w)) discard;
// otherwise render it
gl_FragColor=vec4(1.0,1.0,1.0,1.0);
}
Preview:

User input displayed at wrong coordinates in OpenCV

I'm trying to display circles at a user accepted input (usually centers), using OpenCV 2.4.3 (VS 2010). On output image (displayed using 'namedWindow') circle seems to shift column-wise as one marks points along columns. Not sure how I should correct this.
Code:
struct OPTIONS{
OPTIONS(): X(-1), Y(-1), drawing_dot(false){}
int X;
int Y;
bool drawing_dot;
};
OPTIONS options;
void my_mouse_callback( int event, int x, int y, int flags, void* param ){
IplImage* image = (IplImage*) param;
switch( event ){
case CV_EVENT_LBUTTONDOWN:
options.X = x;
options.Y = y;
options.drawing_dot = true;
break;
default:
break;
}
}
int main( void ){
IplImage* image = cvLoadImage("Images/TestRealData/img1.bmp");
Mat frame = imread("Images/TestRealData/img1.bmp");
namedWindow("Test", CV_WINDOW_KEEPRATIO);
cvSetMouseCallback("Test", my_mouse_callback, (void*) image);
while( cvWaitKey(15) != 27 ){
if( options.drawing_dot ){
circle(frame, Point(options.X,options.Y), 3, CV_RGB(0,0,255), 2);
options.drawing_dot = false;
}
imshow("Test", frame);
waitKey(10);
}
cvReleaseImage(&image);
return 0;
}
I think the circle does not shift. The mouse cursor may trick our eyes. You may simply check it by increasing the radius and reduce the thickness of the circle outline like:
circle(frame, Point(options.X, options.Y/2), 15, CV_RGB(0, 0, 255), 1);
By the way, I think if you want to draw the circle at the point you click on, options.Y should not be divided by 2.
Found answer after much time lost - Make sure to specify flags in 'namedWindow'. Changing flag to CV_WINDOW_KEEPRATIO did the trick for me. Hope this helps somebody out there.

OpenCL image2d_t writing mostly zeros

I am trying to use OpenCL and image2d_t objects to speed up image convolution. When I noticed that the output was a blank image of all zeros, I simplified the OpenCL kernel to a basic read from the input and write to the output (shown below). With a little bit of tweaking, I got it to write a few scattered pixels of the image into the output image.
I have verified that the image is intact up until the call to read_imageui() in the OpenCL kernel. I wrote the image to GPU memory with CommandQueue::enqueueWriteImage() and immediately read it back into a brand new buffer in CPU memory with CommandQueue::enqueueReadImage(). The result of this call matched the original input image. However, when I retrieve the pixels with read_imageui() in the kernel, the vast majority of the pixels are set to 0.
C++ source:
int height = 112;
int width = 9216;
unsigned int numPixels = height * width;
unsigned int numInputBytes = numPixels * sizeof(uint16_t);
unsigned int numDuplicatedInputBytes = numInputBytes * 4;
unsigned int numOutputBytes = numPixels * sizeof(int32_t);
cl::size_t<3> origin;
origin.push_back(0);
origin.push_back(0);
origin.push_back(0);
cl::size_t<3> region;
region.push_back(width);
region.push_back(height);
region.push_back(1);
std::ifstream imageFile("hri_vis_scan.dat", std::ifstream::binary);
checkErr(imageFile.is_open() ? CL_SUCCESS : -1, "hri_vis_scan.dat");
uint16_t *image = new uint16_t[numPixels];
imageFile.read((char *) image, numInputBytes);
imageFile.close();
// duplicate our single channel image into all 4 channels for Image2D
cl_ushort4 *imageDuplicated = new cl_ushort4[numPixels];
for (int i = 0; i < numPixels; i++)
for (int j = 0; j < 4; j++)
imageDuplicated[i].s[j] = image[i];
cl::Buffer imageBufferOut(context, CL_MEM_WRITE_ONLY, numOutputBytes, NULL, &err);
checkErr(err, "Buffer::Buffer()");
cl::ImageFormat inFormat;
inFormat.image_channel_data_type = CL_UNSIGNED_INT16;
inFormat.image_channel_order = CL_RGBA;
cl::Image2D bufferIn(context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, inFormat, width, height, 0, imageDuplicated, &err);
checkErr(err, "Image2D::Image2D()");
cl::ImageFormat outFormat;
outFormat.image_channel_data_type = CL_UNSIGNED_INT16;
outFormat.image_channel_order = CL_RGBA;
cl::Image2D bufferOut(context, CL_MEM_WRITE_ONLY, outFormat, width, height, 0, NULL, &err);
checkErr(err, "Image2D::Image2D()");
int32_t *imageResult = new int32_t[numPixels];
memset(imageResult, 0, numOutputBytes);
cl_int4 *imageResultDuplicated = new cl_int4[numPixels];
for (int i = 0; i < numPixels; i++)
for (int j = 0; j < 4; j++)
imageResultDuplicated[i].s[j] = 0;
std::ifstream kernelFile("convolutionKernel.cl");
checkErr(kernelFile.is_open() ? CL_SUCCESS : -1, "convolutionKernel.cl");
std::string imageProg(std::istreambuf_iterator<char>(kernelFile), (std::istreambuf_iterator<char>()));
cl::Program::Sources imageSource(1, std::make_pair(imageProg.c_str(), imageProg.length() + 1));
cl::Program imageProgram(context, imageSource);
err = imageProgram.build(devices, "");
checkErr(err, "Program::build()");
cl::Kernel basic(imageProgram, "basic", &err);
checkErr(err, "Kernel::Kernel()");
basic.setArg(0, bufferIn);
basic.setArg(1, bufferOut);
basic.setArg(2, imageBufferOut);
queue.finish();
cl_ushort4 *imageDuplicatedTest = new cl_ushort4[numPixels];
for (int i = 0; i < numPixels; i++)
{
imageDuplicatedTest[i].s[0] = 0;
imageDuplicatedTest[i].s[1] = 0;
imageDuplicatedTest[i].s[2] = 0;
imageDuplicatedTest[i].s[3] = 0;
}
double gpuTimer = clock();
err = queue.enqueueReadImage(bufferIn, CL_FALSE, origin, region, 0, 0, imageDuplicatedTest, NULL, NULL);
checkErr(err, "CommandQueue::enqueueReadImage()");
// Output from above matches input image
err = queue.enqueueNDRangeKernel(basic, cl::NullRange, cl::NDRange(height, width), cl::NDRange(1, 1), NULL, NULL);
checkErr(err, "CommandQueue::enqueueNDRangeKernel()");
queue.flush();
err = queue.enqueueReadImage(bufferOut, CL_TRUE, origin, region, 0, 0, imageResultDuplicated, NULL, NULL);
checkErr(err, "CommandQueue::enqueueReadImage()");
queue.flush();
err = queue.enqueueReadBuffer(imageBufferOut, CL_TRUE, 0, numOutputBytes, imageResult, NULL, NULL);
checkErr(err, "CommandQueue::enqueueReadBuffer()");
queue.finish();
OpenCL kernel:
__kernel void basic(__read_only image2d_t input, __write_only image2d_t output, __global int *result)
{
const sampler_t smp = CLK_NORMALIZED_COORDS_TRUE | //Natural coordinates
CLK_ADDRESS_NONE | //Clamp to zeros
CLK_FILTER_NEAREST; //Don't interpolate
int2 coord = (get_global_id(1), get_global_id(0));
uint4 pixel = read_imageui(input, smp, coord);
result[coord.s0 + coord.s1 * 9216] = pixel.s0;
write_imageui(output, coord, pixel);
}
The coordinates in the kernel are currently mapped to (x, y) = (width, height).
The input image is a single channel greyscale image with 16 bits per pixel, which is why I had to duplicate the channels to fit into OpenCL's Image2D. The output after convolution will be 32 bits per pixel, which is why numOutputBytes is set to that. Also, although the width and height appear weird, the input image's dimensions are 9216x7824, so I'm only taking a portion of it to test the code first, so it doesn't take forever.
I added in a write to global memory after reading from the image in the kernel to see if the issue was reading the image or writing the image. After the kernel executes, this section of global memory also contains mostly zeros.
Any help would be greatly appreciated!
The documentation for read_imageui states that
Furthermore, the read_imagei and read_imageui calls that take integer coordinates must use a sampler with normalized coordinates set to CLK_NORMALIZED_COORDS_FALSE and addressing mode set to CLK_ADDRESS_CLAMP_TO_EDGE, CLK_ADDRESS_CLAMP or CLK_ADDRESS_NONE; otherwise the values returned are undefined.
But you're creating a sampler with CLK_NORMALIZED_COORDS_TRUE (but seem to be passing in non-normalized coords :S ?).

How do I determine means of image blocks using opencv histogram

I'd like to determine the mean block of my image using histogram. Let's say my image has 64 by 64 dimension, I need to divide it into 4 by 4 block then determine each block mean (in other word now I will have 4 blocks).
Using opencv, How do I can utilize my IplImage to determine block mean using histogram bins?
The code below is opencv histogram in order to determine whole image mean:
int i, hist_size = 256;
float max_value,min_value;
float min_idx,max_idx;
float bin_w;
float mean =0, low_mean =0, high_mean =0, variance =0;
float range_0[]={0,256};
float *ranges[]={range_0};
IplImage* im = cvLoadImage("killerbee.jpg");
//Create a single planed image of the same size as the original
IplImage* grayImage = cvCreateImage(cvSize(im->width,im->height),IPL_DEPTH_8U, 1);
//convert the original image to gray
cvCvtColor(im, grayImage, CV_BGR2GRAY);
/* Remark this, since wanna evaluate whole area.
//create a rectangular area to evaluate
CvRect rect = cvRect(0, 0, 500, 600 );
//apply the rectangle to the image and establish a region of interest
cvSetImageROI(grayImage, rect);
End remark*/
//create an image to hold the histogram
IplImage* histImage = cvCreateImage(cvSize(320,200), 8, 1);
//create a histogram to store the information from the image
CvHistogram* hist = cvCreateHist(1, &hist_size, CV_HIST_ARRAY, ranges, 1);
//calculate the histogram and apply to hist
cvCalcHist( &grayImage, hist, 0, NULL );
//grab the min and max values and their indeces
cvGetMinMaxHistValue( hist, &min_value, &max_value, 0, 0);
//scale the bin values so that they will fit in the image representation
cvScale( hist->bins, hist->bins, ((double)histImage->height)/max_value, 0 );
//set all histogram values to 255
cvSet( histImage, cvScalarAll(255), 0 );
//create a factor for scaling along the width
bin_w = cvRound((double)histImage->width/hist_size);
for( i = 0; i < hist_size; i++ ) {
//draw the histogram data onto the histogram image
cvRectangle( histImage, cvPoint(i*bin_w, histImage->height),cvPoint((i+1)*bin_w,histImage->height - cvRound(cvGetReal1D(hist->bins,i))),cvScalarAll(0), -1, 8, 0 );
//get the value at the current histogram bucket
float* bins = cvGetHistValue_1D(hist,i);
//increment the mean value
mean += bins[0];
}
//finish mean calculation
mean /= hist_size;
//display mean value onto output window
cout<<"MEAN VALUE of THIS IMAGE : "<<mean<<"\n";
//go back through now that mean has been calculated in order to calculate variance
for( i = 0; i < hist_size; i++ ) {
float* bins = cvGetHistValue_1D(hist,i);
variance += pow((bins[0] - mean),2);
}
//finish variance calculation
variance /= hist_size;
cvNamedWindow("Original", 0);
cvShowImage("Original", im );
cvNamedWindow("Gray", 0);
cvShowImage("Gray", grayImage );
cvNamedWindow("Histogram", 0);
cvShowImage("Histogram", histImage );
//hold the images until a key is pressed
cvWaitKey(0);
//clean up images
cvReleaseImage(&histImage);
cvReleaseImage(&grayImage);
cvReleaseImage(&im);
//remove windows
cvDestroyWindow("Original");
cvDestroyWindow("Gray");
cvDestroyWindow("Histogram");
Really thanks in advance.
You can do that by histograms, but a much more effective way to do it is an integral image, which does almost what you want.
Read here http://en.wikipedia.org/wiki/Summed_area_table and then use it to calculate the sum of all the pixels in every block. Then divide by the number of pixels in each block (4x4=16). Isn't it nice?
OpenCV has a function to calculate the integral image, with the difficult name cv::integral()
And an even easier way to do it is the humble resize().
Call resize(image64_64, image_16_16, Size(16, 16), INTER_AREA), and the result will be a smaller image whose pixel values have exactly the values you're looking for. Isn't it great?
Just do not forget the INTER_AREA flag. It determines the correct algorithm to be used.

Resources