?? opencvlkdemo.cpp
字號:
#include <windows.h>#include <GL/glut.h>#include "cv.h"#include <iostream>#include <string>#include "VideoManControl.h"
#include "VideoManInputFormat.h"using namespace std;/*This is the opencv example using optical flow.If no argument is passed, a camera is initialized.You can pass the path to a video file (you need the VMDirectshow module)*/VideoManControl videoMan;int screenLeft, screenUp, screenWidth, screenHeight;size_t videoInput;bool fullScreened;double videoLength;VideoManInputFormat format;std::string fileName;//LKdemo sample dataIplImage *image = 0, *grey = 0, *prev_grey = 0, *pyramid = 0, *prev_pyramid = 0, *swap_temp;
IplImage *frame = 0;
int win_size = 10;
const int MAX_COUNT = 500;
CvPoint2D32f* points[2] = {0,0}, *swap_points;
char* status = 0;
int count = 0;
int need_to_init = 0;
int night_mode = 0;
int flags = 0;
int add_remove_pt = 0;
CvPoint pt;
void glutResize(int width, int height){ screenLeft = 0; screenUp = 0; screenWidth = width; screenHeight = height; //Notify to VideoMan the change of the screen size videoMan.changeScreenSize( screenLeft, screenUp, screenWidth, screenHeight );}void glutKeyboard(unsigned char key, int x, int y){ switch (key) { case 27: { exit(0); } case 'r':
need_to_init = 1;
break;
case 'c':
count = 0;
break;
case 'n':
night_mode ^= 1;
break;
default: break; }}void glutSpecialKeyboard(int value, int x, int y){ switch (value) { case GLUT_KEY_F1: { if ( !fullScreened ) glutFullScreen(); else { glutPositionWindow( 0, 20 ); glutReshapeWindow( 640, 480 ); } fullScreened = !fullScreened; break; } }}void InitializeOpenGL(){}bool InitializeVideoMan(){
inputIdentification device;
if ( !fileName.empty() )
{
//Initialize one input from a video file
device.fileName = fileName; //file name
device.identifier = "DSHOW_VIDEO_FILE"; //using directshow
format.timeFormat = SECONDS; //We want the time format in seconds
if ( ( videoInput = videoMan.addVideoInput( device, &format ) ) != -1 )
{
printf("Loaded video file: %s\n", device.fileName.c_str() );
printf("resolution: %d %d\n", format.width, format.height );
//get the length of the video
videoLength = videoMan.getLength( videoInput );
videoMan.playVideo( videoInput );
printf("duration: %f seconds\n\n", videoLength );
}
}
else
{
//Initialize one input from a camera
std::vector<inputIdentification> list; videoMan.getAvailableDevices( list ); //list all the available devices if ( list.size()>0 ) device = list[0]; //take the first
format.showDlg = true;
if ( list.size()>0 && ( videoInput = videoMan.addVideoInput( device, &format ) ) != -1 )
{
videoMan.showPropertyPage( videoInput );
videoMan.getFormat( videoInput, format );
printf("Initilized camera: %s\n", device.friendlyName.c_str() );
printf("resolution: %d %d\n", format.width, format.height );
printf("FPS: %f\n\n", format.fps );
} else { printf("There is no available camera\n"); return false; } } //We want to display all the intialized video inputs videoMan.activateAllVideoInputs(); return true;}void glutDisplay(void){ glClear(GL_COLOR_BUFFER_BIT);
char *frameData = videoMan.getFrame( videoInput );
if ( frameData != NULL )
{
cvSetImageData( frame, frameData, frame->widthStep );
cvCopy( frame, image, 0 );
cvCvtColor( image, grey, CV_BGR2GRAY );
videoMan.releaseFrame( videoInput );
if( night_mode )
cvZero( image );
if( need_to_init )
{
/* automatic initialization */
IplImage* eig = cvCreateImage( cvGetSize(grey), 32, 1 );
IplImage* temp = cvCreateImage( cvGetSize(grey), 32, 1 );
double quality = 0.01;
double min_distance = 10;
count = MAX_COUNT;
cvGoodFeaturesToTrack( grey, eig, temp, points[1], &count,
quality, min_distance, 0, 3, 0, 0.04 );
cvFindCornerSubPix( grey, points[1], count,
cvSize(win_size,win_size), cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
cvReleaseImage( &eig );
cvReleaseImage( &temp );
add_remove_pt = 0;
}
else if( count > 0 )
{
cvCalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid,
points[0], points[1], count, cvSize(win_size,win_size), 3, status, 0,
cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), flags );
flags |= CV_LKFLOW_PYR_A_READY;
int i,k;
glMatrixMode( GL_PROJECTION ); glLoadIdentity(); glOrtho( 0, format.width, 0, format.height, -1.0, 1.0 ); glMatrixMode( GL_MODELVIEW ); glLoadIdentity();
for( i = k = 0; i < count; i++ )
{
if( add_remove_pt )
{
double dx = pt.x - points[1][i].x;
double dy = pt.y - points[1][i].y;
if( dx*dx + dy*dy <= 25 )
{
add_remove_pt = 0;
continue;
}
}
if( !status[i] )
continue;
points[1][k++] = points[1][i];
//cvCircle( image, cvPointFrom32f(points[1][i]), 3, CV_RGB(0,255,0), -1, 8,0);
cvLine( image, cvPointFrom32f(points[1][i]) , cvPointFrom32f(points[0][i]), CV_RGB(0,255,0), 1, 4 );
}
count = k;
}
if( add_remove_pt && count < MAX_COUNT )
{
points[1][count++] = cvPointTo32f(pt);
cvFindCornerSubPix( grey, points[1] + count - 1, 1,
cvSize(win_size,win_size), cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
add_remove_pt = 0;
}
CV_SWAP( prev_grey, grey, swap_temp );
CV_SWAP( prev_pyramid, pyramid, swap_temp );
CV_SWAP( points[0], points[1], swap_points );
need_to_init = 0;
}
videoMan.updateTexture( videoInput, image->imageData ); //Update the texture of the renderer
videoMan.renderFrame( videoInput ); //render the image in the screen //Check if the video file (input number 0) has reached the end if ( videoMan.getPosition( videoInput ) == videoLength ) videoMan.goToFrame( videoInput, 0 ); //restart from the begining glFlush(); glutSwapBuffers();}void showHelp(){ printf("========\n"); printf("keys:\n"); printf("Esc->Exit\n"); printf("F1->Fullscreen\n"); printf("r->Detected features\n"); printf("c->Delete features\n"); printf("n->Night mode Y/N\n"); printf("Left Button->Create feature\n"); printf("========\n");}void clear(){ cvReleaseImageHeader( &frame );
cvReleaseImage( &image );
cvReleaseImage( &grey );
cvReleaseImage( &prev_grey );
cvReleaseImage( &pyramid );
cvReleaseImage( &prev_pyramid );
delete [] points[0];
delete [] points[1];
delete status;}void glutMouseFunc( int button, int state, int x, int y ){ if ( state == GLUT_DOWN && button == GLUT_LEFT_BUTTON ) {
float xn = x;
float yn = screenHeight - y;
videoMan.screenToImageCoords( xn, yn );
pt = cvPoint(xn,yn);
add_remove_pt = 1; }}int main(int argc, char** argv){
cout << "This is the opencv example using optical flow. One video input is initialized" << endl;
cout << "Usage: VMwithDirectShow.exe filePath(string)" << endl;
cout << "Example: VMwithDirectShow.exe c:\\video.avi" << endl; cout << "If no argument is passed, a camera is initialized" << endl; if ( argc > 1 ) { fileName = argv[1]; } glutInitDisplayMode( GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGBA | GLUT_MULTISAMPLE ); glutInitWindowPosition( 0, 0 ); glutInitWindowSize( 640, 480 ); glutInit( &argc, argv ); glutCreateWindow("OpenCV LKdemo with DirectShow"); glutReshapeFunc(glutResize); glutDisplayFunc(glutDisplay); glutIdleFunc(glutDisplay); glutKeyboardFunc(glutKeyboard); glutSpecialFunc(glutSpecialKeyboard); glutMouseFunc(glutMouseFunc); InitializeOpenGL(); if ( !InitializeVideoMan() ) { fprintf(stderr,"Could not initialize capturing...\n"); return -1; } /* allocate all the buffers */ frame = cvCreateImageHeader( cvSize( format.width, format.height ), 8, (int)format.nChannels );
image = cvCreateImage( cvGetSize(frame), 8, 3 );
image->origin = frame->origin;
grey = cvCreateImage( cvGetSize(frame), 8, 1 );
prev_grey = cvCreateImage( cvGetSize(frame), 8, 1 );
pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
prev_pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
status = (char*)cvAlloc(MAX_COUNT);
flags = 0; fullScreened = false; showHelp(); glutMainLoop(); clear(); return 0;}
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -