after background subtraction how to detect segmented object? - visual-c++

I am trying for object detection using opencv 2.4.2. Here is the code for my moving foreground subtraction. Now I want to detect moving object in original frame and draw bounding box around it.
can anybody please help me? how to do that?
#include "stdafx.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/contrib/contrib.hpp"
#include "conio.h"
#include "time.h"
#include "opencv/cvaux.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/calib3d/calib3d.hpp"
using namespace std;
using namespace cv;
int main(int argc, char *argv[])
{
int key = 0;
CvSize imgSize;
CvCapture* capture = cvCaptureFromFile( "S:\\offline object detection database\\TwoEnterShop2cor.MPG" );
IplImage* frame = cvQueryFrame( capture );
imgSize = cvGetSize(frame);
IplImage* grayImage = cvCreateImage( imgSize, IPL_DEPTH_8U, 1);
IplImage* currframe = cvCreateImage(imgSize,IPL_DEPTH_8U,3);
IplImage* destframe = cvCreateImage(imgSize,IPL_DEPTH_8U,3);
if ( !capture )
{
fprintf( stderr, "Cannot open AVI!\n" );
return 1;
}
int fps = ( int )cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
cvNamedWindow( "dest", CV_WINDOW_AUTOSIZE );
while( key != 'y' )
{
frame = cvQueryFrame( capture );
currframe = cvCloneImage( frame );// copy frame to current
frame = cvQueryFrame( capture );// grab frame
cvAbsDiff(frame,currframe,destframe);
cvCvtColor(destframe,grayImage,CV_RGB2GRAY);
cvSmooth(grayImage,grayImage,CV_MEDIAN,3,3,0);
cvAdaptiveThreshold(grayImage,grayImage,230,CV_THRESH_BINARY,CV_ADAPTIVE_THRESH_GAUSSIAN_C,3,5);
cvDilate(grayImage, grayImage, 0,1);
cvErode(grayImage,grayImage, 0, 0);
if(key==27 )break;
cvShowImage( "fram",currframe);
cvShowImage( "dest",grayImage);
key = cvWaitKey( 100 );
}
cvDestroyWindow( "dest" );
cvReleaseCapture( &capture );
return 0;
}

Frame difference is the simplest method of background subtraction, but it's very sensitive to threshold you used and may not get good results.The better way is to estimate background model, compare each frame and background model to determine moving objects.
You can find further information from following links:
Background subtraction in OpenCV(C++)
Background Subtraction with OpenCV 2

Related

Unhandled exception , access violation in opencv cvCaptureFromCAM using Visual c++

I am trying to build a program in Opencv that captures a video and saves it to a file. However, after repeated efforts I continue to get this error : Unhandled exception at 0x201e8efb in basic.exe: 0xC0000005 Access violation, where basic.cpp is my file name.
The same occurs when trying to capture an image.
I have already tried changing cvCaptureFromCAM parameters to CV_CAP_ANY/0/-1/-2/1/2..but none of these worked out.My webcam, works perfectly well for others applications and i have tested it online.
Any help would be greatly appreciated .Thanks in advance.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "C:\opencv\build\include\opencv2\core\core.hpp"
#include "C:\opencv\build\include\opencv2\highgui\highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
void main( ){
CvCapture *capture = cvCaptureFromCAM( 0 );
int width = ( int )cvGetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH );
int height = ( int )cvGetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT );
CvVideoWriter *writer = cvCreateVideoWriter( "myCamCapture.avi", -1, 30, cvSize( width, height ) );
cvNamedWindow("camopen", CV_WINDOW_AUTOSIZE);
IplImage *frame = 0;
while( 1 )
{
frame = cvQueryFrame( capture );
cvShowImage("d",frame);
cvWriteFrame( writer, frame );
char c = cvWaitKey( 30 );
if( c == 27 ) break;
}
}
please, since you're a beginner, don't start with the deprecated c-api, use the c++ one instead.
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/core/core.hpp"
using namespace cv;
int main()
{
VideoCapture cap(0);
while( cap.isOpened() )
{
Mat frame;
if ( ! cap.read(frame) )
break;
imshow("lalala",frame);
int k = waitKey(10);
if ( k==27 )
break;
}
return 0;
}
regarding your error: please triple-check the libs you're linking (compiler version, debug/release, 32/64bit). you're not allowed to mix different settings there

Open CV : IplImage is undefined

I am very new in open cv. I want to display a picture. Here is my code:
#include "stdafx.h"
#include <cv.h>
#include <cvaux.h>
#include <highgui.h>
int main( int argc, char** argv ) {
IplImage* img = cvLoadImage( "C:\Users\Cagin\Desktop\New.jpg" );
cvNamedWindow( “Example1”, CV_WINDOW_AUTOSIZE );
cvShowImage( “Example1”, img );
cvWaitKey(0);
cvReleaseImage( &img );
cvDestroyWindow( “Example1” );
}
It's like doesn't recognize open cv lib. You can see below my solution window:
As I said before I am very new in open cv. Where is my mistake?
you are dealing with some legacy C code here, are you doing this on purpose? Under the latest builds, this would work for you:
using namespace cv;
int main(int argc, char** argv) {
Mat img = imread("C:\Users\Cagin\Desktop\New.jpg");
namedWindow("Example1", CV_WINDOW_AUTOSIZE);
imshow("Example1", img);
waitKey(0);
}
If this doesn't work, that means you haven't configured visual studio properly. Try following the instructions here: http://jepsonsblog.blogspot.com/2012/07/installation-guide-opencv-24-with.html

How to play and detect an object using captured video in background subtractor model?

everyone.! I am using opencv2.4.2. actually I am doing project on object detection. I tried using BackgroundSubtractorMOG model.
But I am not able to load video file from my computer. While running on real time this below code for segmentation works fine.
I have implemented using frame differencing method for object detection. Now I want to segment whole object from the background. I have static background. so can anybody help me in below code how to segment object from captured video. also how to load a video file?
thank you.
#include "stdafx.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/contrib/contrib.hpp"
#include "conio.h"
#include "time.h"
#include "opencv/cvaux.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/calib3d/calib3d.hpp"
using namespace std;
using namespace cv;
int main(int argc, char** argv)
{
//IplImage* tmp_frame;
//std::string arg = argv[1];
//VideoCapture capture();
cv::VideoCapture cap;
/*CvCapture *cap =cvCaptureFromFile("S:\\offline object detection database\\SINGLE PERSON Database\\video4.avi");
if(!cap){
printf("Capture failure\n");
return -1;
}
IplImage* frame=0;
frame = cvQueryFrame(cap);
if(!frame)
return -1;*/
bool update_bg_model = true;
if( argc < 2 )
cap.open(0);
else
cap.open(std::string(argv[1]));
if( !cap.isOpened() )
{
printf("can not open camera or video file\n");
return -1;
}
Mat tmp_frame, bgmask;
cap >> tmp_frame;
if(!tmp_frame.data)
{
printf("can not read data from the video source\n");
return -1;
}
namedWindow("video", 1);
namedWindow("segmented", 1);
BackgroundSubtractorMOG bgsubtractor;
for(;;)
{
//double t = (double)cvGetTickCount();
cap >> tmp_frame;
if( !tmp_frame.data )
break;
bgsubtractor(tmp_frame, bgmask, update_bg_model ? -1 : 0);
//t = (double)cvGetTickCount() - t;
//printf( "%d. %.1f\n", fr, t/(cvGetTickFrequency()*1000.) );
imshow("video", tmp_frame);
imshow("segmented", bgmask);
char keycode = waitKey(30);
if( keycode == 27 ) break;
if( keycode == ' ' )
update_bg_model = !update_bg_model;
}
return 0;
}
The video loading in opencv works for me. To load a video you can try something like this. Once you have captured frame you either do processing in the loop or can call a separate function.
std::cout<<"Video File "<<argv[1]<<std::endl;
cv::VideoCapture input_video(argv[1]);
namedWindow("My_Win",1);
Mat cap_img;
while(input_video.grab())
{
if(input_video.retrieve(cap_img))
{
imshow("My_Win", cap_img);
/* Once you have the image do all the processing here */
/* Or Call your image processing function */
waitKey(1);
}
}
or You can do something
int main(int argc, char*argv[])
{
char *my_file = "C:\\vid_an2\\desp_me.avi";
std::cout<<"Video File "<<my_file<<std::endl;
cv::VideoCapture input_video;
if(input_video.open(my_file))
{
std::cout<<"Video file open "<<std::endl;
}
else
{
std::cout<<"Not able to Video file open "<<std::endl;
}
namedWindow("My_Win",1);
namedWindow("Segemented", 1);
Mat cap_img;
for(;;)
{
input_video >> cap_img;
imshow("My_Win", cap_img);
waitKey(0);
}
return 0;
}

Getting frame from video

#include "opencv2/opencv.hpp"
#pragma comment (lib , "opencv_core244d.lib")
#pragma comment (lib ,"opencv_highgui244d.lib")
#pragma comment(lib , "opencv_imgproc244d.lib")
int main(int argc, char* argv[])
{
CvCapture* capture = cvCaptureFromFile("try.avi");
IplImage* frame = NULL;
do
{
frame = skipNFrames(capture, 1);
cvNamedWindow("frame", CV_WINDOW_AUTOSIZE);
cvShowImage("frame", frame);
cvWaitKey(0);
} while( frame != NULL );
cvReleaseCapture(&capture);
cvDestroyWindow("frame");
cvReleaseImage(&frame);
return 0;
}
This is my program to get frames from the video , but when i run this program , it works , it show me the video , but its not saving the frames automatically (without using any button or mouse) , which should save in my directory
To see each frame of the video individually use cvWaitKey(0). It shows current frame of the video and wait for a key press infinitely. So to see the next frame press a key.
To save each frame individually,
#include<stdio.h>
Declare a global variable
int flag=0;
add following code just below to cvWaitKey(0) :
char *str=new char[50];
flag++;
sprintf(str,"%d",flag);
strcat(str," frame");
strcat(str,".jpg");
Mat image=frame;
imwrite(str,image);
#include"stdafx.h"
#include<cv.h>
#include<highgui.h>
#include<cxcore.h>
int main(int argc, char* argv[]) {
int c=1;
IplImage* img=0;
char buffer[1000];
CvCapture* cv_cap = cvCaptureFromFile("try.avi");
cvNamedWindow("Video",CV_WINDOW_AUTOSIZE);
while(1) {
img=cvQueryFrame(cv_cap);
cvShowImage("Video",img);
sprintf(buffer,"D:/image%u.jpg",c);
cvSaveImage(buffer,img);
c++;
if (cvWaitKey(100)== 27) break;
}
cvDestroyWindow("Video");
return 0;
}
Try this , this will work
You need to use cvSaveImage() to explicitly save each frame.
This should be done in your loop, wherever you want to save the frame.
Obviously, if you want to save each frame with a different name you have to generate different names for each call. #baban shows one way to do it.

Qt with XComposite problem

I'm trying to write a simple program, which redirects all the windows to the backbuffer( as the composite manager does ), then write them to pixmap and save to disk. But I got this error:
(.text.startup+0x5e):-1: error: undefined reference to `XCompositeRedirectSubwindows'
(.text.startup+0x171):-1: error: undefined reference to `XCompositeNameWindowPixmap'
:-1: error: collect2: ld returned 1 exit status
Here is the code :
#include <QApplication>
#include <QDebug>
#include <X11/Xlib.h>
#include <QPaintDevice>
#include <QX11Info>
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xatom.h>
#include <X11/extensions/Xcomposite.h>
#include <X11/extensions/Xrender.h>
#include <X11/extensions/Xdamage.h>
#include <QPixmap>
#include <QWidget>
int main( int argc, char *argv[] )
{
QApplication app( argc, argv );
app.setGraphicsSystem("native");
Picture frontBuffer;
XRenderPictFormat *format;
Window rootWindow;
int depth;
Display *dpy = XOpenDisplay( getenv("DISPLAY") );
rootWindow = XRootWindow( dpy, XDefaultScreen( dpy ) );
depth = DefaultDepth( dpy, DefaultScreen(dpy) );
// Redirect all the windows
XCompositeRedirectSubwindows( dpy, rootWindow, CompositeRedirectManual );
// Get the format
format = XRenderFindVisualFormat( dpy, DefaultVisual( dpy, DefaultScreen(dpy) ) );
XRenderPictureAttributes pa;
pa.subwindow_mode = IncludeInferiors;
// Creating front buffer
frontBuffer = XRenderCreatePicture( dpy, rootWindow, format, CPSubwindowMode, &pa );
uint nwindows;
Window root_return, parent_return, *windows;
XQueryTree( dpy, rootWindow, &root_return,
&parent_return, &windows, &nwindows );
for ( uint i = 0; i < nwindows; i++ ) {
XWindowAttributes attr;
if ( !XGetWindowAttributes( dpy, windows[i], &attr ) )
continue;
Pixmap pix = XCompositeNameWindowPixmap( dpy, windows[i] );
Picture pic = XRenderCreatePicture( dpy, pix, format, 0, 0 );
QPixmap pixmap(540, 900);
XRenderComposite( dpy, PictOpSrc, pic, None, pixmap.x11PictureHandle(),
0, 0, 0, 0, 0 , 0, 540, 900 );
pixmap.save( QString::number( i )+".png", "PNG" );
}
}
XFree( windows );
return app.exec();
}
Did you link your program with libXcomposite? That's the library which defines those functions.
Compile with -lXcomposite or with pkg-config --libs xcomposite.

Resources