I want to capture images from webcam without any post processing, that is NO auto focus , exposure correction , white balance and stuff. Well basically I want to capture continuous frames from webcam and make each frame compare with the previous one and save them to disk only when there is an actual change. Because of the post processing almost every frame is being returned as different for me.
code so far
using namespace cv;
bool identical(cv::Mat m1, cv::Mat m2)
{
if ( m1.cols != m2.cols || m1.rows != m2.rows || m1.channels() != m2.channels() || m1.type() != m2.type() )
{
return false;
}
for ( int i = 0; i < m1.rows; i++ )
{
for ( int j = 0; j < m1.cols; j++ )
{
if ( m1.at<Vec3b>(i, j) != m2.at<Vec3b>(i, j) )
{
return false;
}
}
}
return true;
}
int main() {
CvCapture* capture = cvCaptureFromCAM( 1);
int i=0,firsttime=0;
char filename[40];
Mat img1,img2;
if ( !capture ) {
fprintf( stderr, "ERROR: capture is NULL \n" );
getchar();
return -1;
}
cvNamedWindow( "img1", CV_WINDOW_AUTOSIZE );
cvNamedWindow( "img2", CV_WINDOW_AUTOSIZE );
while ( 1 ) {
IplImage* frame = cvQueryFrame( capture );
img1=frame;
if ( !frame ) {
fprintf( stderr, "ERROR: frame is null...\n" );
getchar();
break;
}
if(firsttime==0){
img2=frame;
fprintf( stderr, "firtstime\n" );
}
if ( (cvWaitKey(10) & 255) == 27 ) break;
i++;
sprintf(filename, "D:\\testimg\\img%d.jpg", i);
cv::cvtColor(img1, img1, CV_BGR2GRAY);
imshow( "img1", img1);
imshow( "img2", img2);
imwrite(filename,img1);
if(identical(img1,img2))
{
//write to diff path
}
img2=imread(filename,1);
firsttime=1;
}
// Release the capture device housekeeping
cvReleaseCapture( &capture );
return 0;
}
While ur at it, I'll be great full if u can suggest a workaround for this using another frame compare solution aswell :)
I had this problem, and the only solution that found and wrote was a program based on Direct-show (in case you're using windows )so no opencv code at all
with a bit of luck, you can get the properties page of your camera, and switch things off there:
VideoCapture cap(0);
cap.set(CV_CAP_PROP_SETTINGS,1);
and please, skip the c-api in favour of c++. it'll go away soon.
forgot to mention : you an change the cam-settings from vlc as well.
#Prince, sorry I have been looking for my Directshow code I didn't found it, and I don't think it will help because I used it for the DirectLink (Black magic Design)card, since I've never did that befor it was pretty hard, my suggestion will be try to use GraphEditPlus :
http://www.infognition.com/GraphEditPlus/
it helps a lot, and it's easy to use !
good luck !
If you just wish to capture frames when there is an actual change, try background subtraction algorithms. Also, instead of just subtracting subsequent frames, use one of the many algorithms already implemented for you in OpenCV - they are much more robust to changes in lightning conditions etc than vanilla background subtraction.
In Python :
backsub = cv2.BackgroundSubtractorMOG2(history=10000,varThreshold=100)
fgmask = backsub.apply(frame, None, 0.01)
Frame is the stream of pictures read from your webcam.
Google for the corresponding function in Cpp.
Related
Ive been trying to make an RPG game on SFML and now Im kind of struggling on the collisions. My problem is quiet simple, I have this 2 layers, Background and Foreground. Background just acts as a background image and Foreground png image is supposed to act as the collisions, having the part where the character is supposed to walk completely free (transparent) and keeping the rest of the structures to use as collisions (Background image here: https://imgur.com/gallery/DA3zGtD Im making the floor transparent while keeping the rest). Ive tried using the getGlobalBounds().intesect with the character sprite without any succes. I wanna keep it as simple as possible, here is what I have until now:
#include <iostream>
#include "Piso1.h"
using namespace std;
Piso1::Piso1(){
};
int Piso1::Draw(RenderWindow &window, Event &evento)
{
srand(time(nullptr));
Soundtrack.openFromFile("../Scenes/Piso1/Sounds/Neon District.wav");
Soundtrack.setLoop(true);
Soundtrack.play();
Texture BGTexture;
BGTexture.loadFromFile("../Scenes/Piso1/Graphics/piso1background.png");
Sprite Background;
Background.setTexture(BGTexture);
Background.setScale(8,7.5);
Background.setPosition(BackX,BackY);
Texture FGTexture;
FGTexture.loadFromFile("../Scenes/Piso1/Graphics/piso1foreground.png");
Sprite Foreground;
Foreground.setTexture(FGTexture);
Foreground.setScale(8,7.5);
Foreground.setPosition(BackX,BackY);
Texture ProtaTextura;
ProtaTextura.loadFromFile("../Scenes/Piso1/Graphics/pSprite.png");
IntRect SpriteBx(0,0,34,47);
Sprite Protagonista(ProtaTextura,SpriteBx);
Protagonista.setPosition((window.getSize().x)/2.35,(window.getSize().y)/3);
Protagonista.setScale(3,3);
while (window.isOpen()) {
while (window.pollEvent(evento)) {
switch (evento.type) {
case Event::Closed:
window.close();
break;
case Event::KeyPressed:
EncounterValue = rand()%1000;
if(EncounterValue > 5){
if(evento.key.code == Keyboard::Down) {
BackY -= 10;
Background.move(0,-10);
Foreground.move(0,-10);
//this is my failed attempt
if(Protagonista.getLocalBounds().intersects(Foreground.getLocalBounds()))
{
Collision.openFromFile("../Scenes/Piso1/Sounds/oof.ogg");
Collision.play();
BackY += 10;
Background.move(0, 10);
Foreground.move(0, 10);
}
if(clock1.getElapsedTime().asMilliseconds()>64){
SpriteBx.top = 0;
if (SpriteBx.left == 0)
SpriteBx.left = 34;
else if (SpriteBx.left==34)
SpriteBx.left= 68;
else if (SpriteBx.left== 68)
SpriteBx.left= 102;
else
SpriteBx.left=0;
Protagonista.setTextureRect(SpriteBx);
clock1.restart();
}
break;
}
else if (evento.key.code == Keyboard::Up) {
BackY += 10;
Background.move(0,10);
Foreground.move(0,10);
if (clock1.getElapsedTime().asMilliseconds()>64)
{
SpriteBx.top = 152;
if (SpriteBx.left == 0)
SpriteBx.left = 34;
else if (SpriteBx.left==34)
SpriteBx.left= 68;
else if (SpriteBx.left== 68)
SpriteBx.left= 102;
else
SpriteBx.left=0;
Protagonista.setTextureRect(SpriteBx);
clock1.restart();
}
break;
}
else if(evento.key.code == Keyboard::Left) {
BackX += 10;
Background.move(10,0);
Foreground.move(10,0);
if (clock1.getElapsedTime().asMilliseconds()>64)
{
SpriteBx.top = 53;
if (SpriteBx.left == 0)
SpriteBx.left = 34;
else if (SpriteBx.left==34)
SpriteBx.left= 68;
else if (SpriteBx.left== 68)
SpriteBx.left= 102;
else
SpriteBx.left=0;
Protagonista.setTextureRect(SpriteBx);
clock1.restart();
}
break;
}
else if(evento.key.code == Keyboard::Right){
BackX -= 10;
Background.move(-10,0);
Foreground.move(-10,0);
if (clock1.getElapsedTime().asMilliseconds()>64)
{
SpriteBx.top = 104;
if (SpriteBx.left == 0)
SpriteBx.left = 34;
else if (SpriteBx.left==34)
SpriteBx.left= 68;
else if (SpriteBx.left== 68)
SpriteBx.left= 102;
else
SpriteBx.left=0;
Protagonista.setTextureRect(SpriteBx);
clock1.restart();
}
break;
}
else if(evento.key.code == Keyboard::C){
Soundtrack.stop();
return 1;
}
}
else{
Soundtrack.stop();
return 0;
}
}
window.clear();
window.draw(Foreground);
window.draw(Background);
window.draw(Protagonista);
window.display();
}
}
}
I think whats happening is that when I want to do the if it takes the whole png instead of just the parts I want as collision. I also wanted to try color collision but I really dont know how to implement it, Im kind of new to SFML. Thanks in advance!
If you wish to have proper collision detection between the player and other objects .intersect() is not good enough. It will either make your character permamently stuck in the structure or make it pass through it without player control, which depends on what behavior you code for this condition.
What would work is detecting from which direction the player is approaching the object, and based on that either make him be moved back a bit or set the speed to 0 if current velocity would take him deeper into the structure.
For example, using intersect() as you are, for case of player colliding with a wall from the left:
if(velocity.x > 0 && player.getLocalBounds().intersect(wall.getLocalBounds()) && player.getLocalBounds().left > wall.getLocalBounds().left)
Then you can use this condition to either stop the player (set the velocity.x to 0), set reverse it so he backs off or do any other kind of behavior you'd wish in the event of a collision.
(Note this is not an ideal solution, but it should work and follow similar logic you've implemented.)
I want to make a soundboard in the Processing language that plays sounds so the computer handles the sounds as if they were inputs from my microphone. This is my only problem about doing a soundboard. How do I make the sounds play as if they were recorded by the microphone?
I have spent an hour searching and trying to get help, but I have nothing to work with.
Minim provides the class AudioInput for monitoring the user’s current record source (this is often set in the sound card control panel), such as the microphone or the line-in
from
http://code.compartmental.net/tools/minim/quickstart/
EDIT:
Have you seen this?
import ddf.minim.*;
import ddf.minim.ugens.*;
Minim minim;
// for recording
AudioInput in;
AudioRecorder recorder;
// for playing back
AudioOutput out;
FilePlayer player;
void setup()
{
size(512, 200, P3D);
minim = new Minim(this);
// get a stereo line-in: sample buffer length of 2048
// default sample rate is 44100, default bit depth is 16
in = minim.getLineIn(Minim.STEREO, 2048);
// create an AudioRecorder that will record from in to the filename specified.
// the file will be located in the sketch's main folder.
recorder = minim.createRecorder(in, "myrecording.wav");
// get an output we can playback the recording on
out = minim.getLineOut( Minim.STEREO );
textFont(createFont("Arial", 12));
}
void draw()
{
background(0);
stroke(255);
// draw the waveforms
// the values returned by left.get() and right.get() will be between -1 and 1,
// so we need to scale them up to see the waveform
for(int i = 0; i < in.left.size()-1; i++)
{
line(i, 50 + in.left.get(i)*50, i+1, 50 + in.left.get(i+1)*50);
line(i, 150 + in.right.get(i)*50, i+1, 150 + in.right.get(i+1)*50);
}
if ( recorder.isRecording() )
{
text("Now recording...", 5, 15);
}
else
{
text("Not recording.", 5, 15);
}
}
void keyReleased()
{
if ( key == 'r' )
{
// to indicate that you want to start or stop capturing audio data,
// you must callstartRecording() and stopRecording() on the AudioRecorder object.
// You can start and stop as many times as you like, the audio data will
// be appended to the end of to the end of the file.
if ( recorder.isRecording() )
{
recorder.endRecord();
}
else
{
recorder.beginRecord();
}
}
if ( key == 's' )
{
// we've filled the file out buffer,
// now write it to a file of the type we specified in setup
// in the case of buffered recording,
// this will appear to freeze the sketch for sometime, if the buffer is large
// in the case of streamed recording,
// it will not freeze as the data is already in the file and all that is being done
// is closing the file.
// save returns the recorded audio in an AudioRecordingStream,
// which we can then play with a FilePlayer
if ( player != null )
{
player.unpatch( out );
player.close();
}
player = new FilePlayer( recorder.save() );
player.patch( out );
player.play();
}
}
It's from here:
http://code.compartmental.net/minim/audiorecorder_class_audiorecorder.html
I'm having a lot of trouble with playing audio clips. I suspect it has something to do with update playing my clip from the start every time it executes the block of code that sends the clip to the audio source. Below is a 'solution' but the fact that it works doesn't even make sense to me. If I just put the audio.play() in the main if statement without checking whether it's less than the audio clip length, I hear nothing but a quiet distortion.
void Update()
{
switch(weatherState)
{
case WeatherStates.Sunny:
break;
case WeatherStates.Rain:
Rain();
break;
case WeatherStates.Snow:
break;
case WeatherStates.ThunderStorm:
break;
}
}
void Rain()
{
if(timeScript.hourOfWeatherEvent != 0)
{
if(timeScript.hourCount >= timeScript.hourOfWeatherEvent)
{
rain.SetActive(true);
if(soundCount < weatherSounds[0].length)
{
soundCount++;
audio.clip = weatherSounds[0];
audio.Play ();
}
timeScript.durationOfWeatherEvent -= Time.deltaTime * timeScript.timeSpeed;
if(timeScript.durationOfWeatherEvent <= 0)
{
rain.SetActive(false);
timeScript.durationOfWeatherEvent = 0;
timeScript.hourOfWeatherEvent = 0;
weatherState = WeatherStates.Sunny;
}
}
}
}
What is the simplest way to capture audio from the built in audio input and be able to read the raw sampled values (as in a .wav) in real time as they come in when requested, like reading from a socket.
Hopefully code that uses one of Apple's frameworks (Audio Queues). Documentation is not very clear, and what I need is very basic.
Try the AudioQueue Framework for this. You mainly have to perform 3 steps:
setup an audio format how to sample the incoming analog audio
start a new recording AudioQueue with AudioQueueNewInput()
Register a callback routine which handles the incoming audio data packages
In step 3 you have a chance to analyze the incoming audio data with AudioQueueGetProperty()
It's roughly like this:
static void HandleAudioCallback (void *aqData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp *inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription *inPacketDesc) {
// Here you examine your audio data
}
static void StartRecording() {
// now let's start the recording
AudioQueueNewInput (&aqData.mDataFormat, // The sampling format how to record
HandleAudioCallback, // Your callback routine
&aqData, // e.g. AudioStreamBasicDescription
NULL,
kCFRunLoopCommonModes,
0,
&aqData.mQueue); // Your fresh created AudioQueue
AudioQueueStart(aqData.mQueue,
NULL);
}
I suggest the Apple AudioQueue Services Programming Guide for detailled information about how to start and stop the AudioQueue and how to setup correctly all ther required objects.
You may also have a closer look into Apple's demo prog SpeakHere. But this is IMHO a bit confusing to start with.
It depends how ' real-time ' you need it
if you need it very crisp, go down right at the bottom level and use audio units. that means setting up an INPUT callback. remember, when this fires you need to allocate your own buffers and then request the audio from the microphone.
ie don't get fooled by the presence of a buffer pointer in the parameters... it is only there because Apple are using the same function declaration for the input and render callbacks.
here is a paste out of one of my projects:
OSStatus dataArrivedFromMic(
void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * dummy_notused )
{
OSStatus status;
RemoteIOAudioUnit* unitClass = (RemoteIOAudioUnit *)inRefCon;
AudioComponentInstance myUnit = unitClass.myAudioUnit;
AudioBufferList ioData;
{
int kNumChannels = 1; // one channel...
enum {
kMono = 1,
kStereo = 2
};
ioData.mNumberBuffers = kNumChannels;
for (int i = 0; i < kNumChannels; i++)
{
int bytesNeeded = inNumberFrames * sizeof( Float32 );
ioData.mBuffers[i].mNumberChannels = kMono;
ioData.mBuffers[i].mDataByteSize = bytesNeeded;
ioData.mBuffers[i].mData = malloc( bytesNeeded );
}
}
// actually GET the data that arrived
status = AudioUnitRender( (void *)myUnit,
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
& ioData );
// take MONO from mic
const int channel = 0;
Float32 * outBuffer = (Float32 *) ioData.mBuffers[channel].mData;
// get a handle to our game object
static KPRing* kpRing = nil;
if ( ! kpRing )
{
//AppDelegate * appDelegate = [UIApplication sharedApplication].delegate;
kpRing = [Game singleton].kpRing;
assert( kpRing );
}
// ... and send it the data we just got from the mic
[ kpRing floatsArrivedFromMic: outBuffer
count: inNumberFrames ];
return status;
}
I have a video capture card with SDK for Visual C++. Color frames (640 x 480) become available to me at 30 fps in a callback from the SDK. Currently, I am writing the entire image sequence out one at a time as individual bmp files in a separate thread -- that's 108,000 files in an hour, or about 100 GB per hour, which is not manageable. I would like to push these incoming frames to one AVI file instead, with optional compression. Where do I even start? Wading through the MSDN DirectShow documentation has confused me so far. Are there better examples out there? Is OpenCV the answer? I've looked at some examples, but I'm not sure OpenCV would even recognize the card as a capture device, nor do I understand how it even recognizes capture devices in the first place. Also, I'm already getting the frames in, I just need to put them out to AVI in some consumer thread that does not back up my producer thread. Thanks for any help.
I've used CAviFile before. It works pretty well, I had to tweak it a bit to allow the user to pick the codec. I took that code from CAviGenerator. The interface for CAviFile is very simple, here's some sample code:
CAviFile *Avi = new CAviFile(fileName.c_str(), 0, 10);
HRESULT res = Avi->AppendNewFrame(Width, Height, ImageBuffer, BitsPerPixel);
if (FAILED(res))
{
std::cout << "Error recording AVI: " << Avi->GetLastErrorMessage() << std::endl;
}
delete Avi;
Obviously you have to ensure your ImageBuffer contains data in the right format etc. But once I got that kind of stuff all sorted out it worked great.
You can either use Video for Windows or DirectShow. Each comes with its own set of codecs. (and can be extended)
Though Microsoft considers VfW deprecated it is still perfectly usable, and is easier to setup than DirectShow.
Well you need to attach an AVI Mux (CLSID_AviDest) to your capture card. You then need to attach a File Writer (CLSID_FileWriter) and it will write out everything for you.
Admittedly Setting up the capture graph is not necessarily easy as DirectShow makes you jump through a million and one hoops.
Its much easier using the ICaptureGraphBuilder2 interface. Thankfully Microsoft have given a really nice rundown of how to do this ...
http://msdn.microsoft.com/en-us/library/dd318627.aspx
Adding an encoder is not easy though and, conveniently, glossed over in that link.
Here is an example of how to enumerate all the video compressors in a system that I wrote for an MFC app of mine.
BOOL LiveInputDlg::EnumerateVideoCompression()
{
CComboBox* pVideoCompression = (CComboBox*)GetDlgItem( IDC_COMBO_VIDEOCOMPRESSION );
pVideoCompression->SetExtendedUI( TRUE );
pVideoCompression->SetCurSel( pVideoCompression->AddString( _T( "<None>" ) ) );
ICreateDevEnum* pDevEnum = NULL;
IEnumMoniker* pEnum = NULL;
HRESULT hr = S_OK;
hr = CoCreateInstance( CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC_SERVER, IID_ICreateDevEnum, (void**)&pDevEnum );
if ( FAILED( hr ) )
{
return FALSE;
}
hr = pDevEnum->CreateClassEnumerator( CLSID_VideoCompressorCategory, &pEnum, 0 );
pDevEnum->Release();
if ( FAILED( hr ) )
{
return FALSE;
}
if ( pEnum )
{
IMoniker* pMoniker = NULL;
hr = pEnum->Next( 1, &pMoniker, NULL );
while( hr == S_OK )
{
IPropertyBag* pPropertyBag = NULL;
hr = pMoniker->BindToStorage( NULL, NULL, IID_IPropertyBag, (void**)&pPropertyBag );
if ( FAILED( hr ) )
{
pMoniker->Release();
pEnum->Release();
return FALSE;
}
VARIANT varName;
VariantInit( &varName );
hr = pPropertyBag->Read( L"Description", &varName, NULL );
if ( FAILED( hr ) )
{
hr = pPropertyBag->Read( L"FriendlyName", &varName, NULL );
if ( FAILED( hr ) )
{
pPropertyBag->Release();
pMoniker->Release();
pEnum->Release();
return FALSE;
}
}
IBaseFilter* pBaseFilter = NULL;
pMoniker->BindToObject( NULL, NULL, IID_IBaseFilter, (void**)&pBaseFilter );
{
USES_CONVERSION;
TCHAR* pName = OLE2T( varName.bstrVal );
int index = pVideoCompression->AddString( pName );
pVideoCompression->SetItemDataPtr( index, pMoniker );
VariantClear( &varName );
pPropertyBag->Release();
}
hr = pEnum->Next( 1, &pMoniker, NULL );
}
pEnum->Release();
}
return TRUE;
}
Good Luck! :)