some samples updated according to new CommandLineParser class

pull/13383/head
Kirill Kornyakov 14 years ago
parent 4ba6793568
commit aadb1669a7
  1. 9
      modules/core/src/cmdparser.cpp
  2. 57
      samples/c/adaptiveskindetector.cpp
  3. 55
      samples/c/bgfg_codebook.cpp
  4. 11
      samples/c/facedetect.cpp
  5. 29
      samples/c/find_obj.cpp
  6. 26
      samples/c/find_obj_ferns.cpp
  7. 34
      samples/c/latentsvmdetect.cpp
  8. 40
      samples/c/mser_sample.cpp
  9. 28
      samples/c/one_way_sample.cpp

@ -35,6 +35,7 @@ void PreprocessArgs(int _argc, const char* _argv[], int& argc, char**& argv)
else if (find_symbol == 0 || find_symbol == ((int)buffer_string.length() - 1))
{
buffer_string.erase(find_symbol, (find_symbol + 1));
if(!buffer_string.empty())
buffer_vector.push_back(buffer_string);
}
else
@ -77,7 +78,14 @@ CommandLineParser::CommandLineParser(int _argc, const char* _argv[])
{
data[cur_name].push_back("");
}
cur_name=argv[i];
while (cur_name.find('-') == 0)
{
cur_name.erase(0,1);
}
was_pushed=false;
if (data.find(cur_name) != data.end())
@ -170,3 +178,4 @@ cv::Size CommandLineParser::fromStringsVec<cv::Size>(const std::vector<std::stri
return res;
}

@ -35,27 +35,17 @@
//M*/
#include <opencv2/core/core.hpp>
#include <opencv2/contrib/contrib.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <cstdio>
#include <cstring>
#include <ctime>
#include <opencv2/contrib/contrib.hpp>
#include <opencv2/highgui/highgui.hpp>
void help(char **argv)
{
std::cout << "\nThis program demonstrates the contributed flesh detector CvAdaptiveSkinDetector which can be found in contrib.cpp\n"
<< "Usage: " << std::endl <<
argv[0] << " fileMask firstFrame lastFrame" << std::endl << std::endl <<
"Example: " << std::endl <<
argv[0] << " C:\\VideoSequences\\sample1\\right_view\\temp_%05d.jpg 0 1000" << std::endl <<
" iterates through temp_00000.jpg to temp_01000.jpg" << std::endl << std::endl <<
"If no parameter specified, this application will try to capture from the default Webcam." << std::endl <<
"Please note: Background should not contain large surfaces with skin tone." <<
"\n\n ESC will stop\n"
"Using OpenCV version %s\n" << CV_VERSION << "\n"
<< std::endl;
}
using namespace std;
using namespace cv;
class ASDFrameHolder
{
@ -159,7 +149,6 @@ void ASDFrameHolder::setImage(IplImage *sourceImage)
//-------------------- ASDFrameSequencer -----------------------//
ASDFrameSequencer::~ASDFrameSequencer()
{
close();
@ -215,7 +204,6 @@ bool ASDCVFrameSequencer::isOpen()
//-------------------- ASDFrameSequencerWebCam -----------------------//
bool ASDFrameSequencerWebCam::open(int cameraIndex)
{
close();
@ -335,8 +323,31 @@ void displayBuffer(IplImage *rgbDestImage, IplImage *buffer, int rValue, int gVa
}
};
int main(int argc, char** argv )
void help(const char *exe_name)
{
std::cout << "\nThis program demonstrates the contributed flesh detector CvAdaptiveSkinDetector which can be found in contrib.cpp\n"
<< "Usage: " << std::endl <<
exe_name << " --fileMask --firstFrame --lastFrame" << std::endl << std::endl <<
"Example: " << std::endl <<
exe_name << " --fileMask=C:\\VideoSequences\\sample1\\right_view\\temp_%05d.jpg --firstFrame=0 --lastFrame=1000" << std::endl <<
" iterates through temp_00000.jpg to temp_01000.jpg" << std::endl << std::endl <<
"If no parameter specified, this application will try to capture from the default Webcam." << std::endl <<
"Please note: Background should not contain large surfaces with skin tone." <<
"\n\n ESC will stop\n"
"Using OpenCV version %s\n" << CV_VERSION << "\n"
<< std::endl;
}
int main(int argc, const char** argv )
{
help(argv[0]);
CommandLineParser parser(argc, argv);
string fileMask = parser.get<string>("fileMask");
int firstFrame = parser.get<int>("firstFrame", 0);
int lastFrame = parser.get<int>("lastFrame", 0);
IplImage *img, *filterMask = NULL;
CvAdaptiveSkinDetector filter(1, CvAdaptiveSkinDetector::MORPHING_METHOD_ERODE_DILATE);
ASDFrameSequencer *sequencer;
@ -347,7 +358,6 @@ int main(int argc, char** argv )
if (argc < 4)
{
help(argv);
sequencer = new ASDFrameSequencerWebCam();
(dynamic_cast<ASDFrameSequencerWebCam*>(sequencer))->open(-1);
@ -358,8 +368,9 @@ int main(int argc, char** argv )
}
else
{
// A sequence of images captured from video source, is stored here
sequencer = new ASDFrameSequencerImageFile();
(dynamic_cast<ASDFrameSequencerImageFile*>(sequencer))->open(argv[1], std::atoi(argv[2]), std::atoi(argv[3]) ); // A sequence of images captured from video source, is stored here
(dynamic_cast<ASDFrameSequencerImageFile*>(sequencer))->open(fileMask.c_str(), firstFrame, lastFrame );
}
std::sprintf(windowName, "%s", "Adaptive Skin Detection Algorithm for Video Sequences");
@ -367,10 +378,6 @@ int main(int argc, char** argv )
cvNamedWindow(windowName, CV_WINDOW_AUTOSIZE);
cvInitFont( &base_font, CV_FONT_VECTOR0, 0.5, 0.5);
// Usage:
// c:\>CvASDSample "C:\VideoSequences\sample1\right_view\temp_%05d.jpg" 0 1000
std::cout << "Press ESC to stop." << std::endl << std::endl;
while ((img = sequencer->getNextImage()) != 0)
{
numFrames++;

@ -25,10 +25,14 @@
#include <stdlib.h>
#include <ctype.h>
#include <opencv2/core/core.hpp>
#include <opencv2/video/background_segm.hpp>
#include <opencv2/imgproc/imgproc_c.h>
#include <opencv2/highgui/highgui.hpp>
using namespace std;
using namespace cv;
//VARIABLES for CODEBOOK METHOD:
CvBGCodeBookModel* model = 0;
const int NCHANNELS = 3;
@ -38,7 +42,9 @@ void help(void)
{
printf("\nLearn background and find foreground using simple average and average difference learning method:\n"
"Originally from the book: Learning OpenCV by O'Reilly press\n"
"\nUSAGE:\nbgfg_codebook [--nframes=300] [movie filename, else from camera]\n"
"\nUSAGE:\n"
"./bgfg_codebook [--nframes=300] \n"
" [--input = movie filename or camera index]\n"
"***Keep the focus on the video windows, NOT the consol***\n\n"
"INTERACTIVE PARAMETERS:\n"
"\tESC,q,Q - quit the program\n"
@ -65,15 +71,20 @@ void help(void)
//USAGE: ch9_background startFrameCollection# endFrameCollection# [movie filename, else from camera]
//If from AVI, then optionally add HighAvg, LowAvg, HighCB_Y LowCB_Y HighCB_U LowCB_U HighCB_V LowCB_V
//
int main(int argc, char** argv)
int main(int argc, const char** argv)
{
const char* filename = 0;
help();
CommandLineParser parser(argc, argv);
string inputName = parser.get<string>("input", "0");
int nframesToLearnBG = parser.get<int>("nframes", 300);
IplImage* rawImage = 0, *yuvImage = 0; //yuvImage is for codebook method
IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0;
CvCapture* capture = 0;
int c, n, nframes = 0;
int nframesToLearnBG = 300;
model = cvCreateBGCodeBookModel();
@ -87,39 +98,31 @@ int main(int argc, char** argv)
bool pause = false;
bool singlestep = false;
for( n = 1; n < argc; n++ )
{
static const char* nframesOpt = "--nframes=";
if( strncmp(argv[n], nframesOpt, strlen(nframesOpt))==0 )
if( inputName.empty() || (isdigit(inputName.c_str()[0]) && inputName.c_str()[1] == '\0') )
{
if( sscanf(argv[n] + strlen(nframesOpt), "%d", &nframesToLearnBG) == 0 )
printf("Capture from camera\n");
capture = cvCaptureFromCAM( inputName.empty() ? 0 : inputName.c_str()[0] - '0' );
int c = inputName.empty() ? 0 : inputName.c_str()[0] - '0' ;
if( !capture)
{
help();
return -1;
printf ("Capture from CAM %d", c);
printf (" didn't work\n");
}
}
else
filename = argv[n];
}
if( !filename )
{
printf("Capture from camera\n");
capture = cvCaptureFromCAM( 0 );
}
else
{
printf("Capture from file %s\n",filename);
capture = cvCreateFileCapture( filename );
}
printf("Capture from file %s\n",inputName.c_str());
capture = cvCreateFileCapture(inputName.c_str());
if( !capture)
{
printf( "Can not initialize video capturing\n\n" );
printf ("Capture from file %s", inputName.c_str());
printf (" didn't work\n");
help();
return -1;
}
}
//MAIN PROCESSING LOOP:
for(;;)
{

@ -34,18 +34,17 @@ int main( int argc, const char** argv )
CommandLineParser parser(argc, argv);
string cascadeName = parser.get<string>("--cascade", "../../data/haarcascades/haarcascade_frontalface_alt.xml");
string cascadeName = parser.get<string>("cascade", "../../data/haarcascades/haarcascade_frontalface_alt.xml");
string nestedCascadeName = parser.get<string>("nested-cascade", "../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml");
double scale = parser.get<double>("scale", 1.0);
string inputName = parser.get<string>("input", "0"); //read from camera by default
if (!cascadeName.empty())
cout << " from which we have cascadeName= " << cascadeName << endl;
string nestedCascadeName = parser.get<string>("--nested-cascade", "../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml");
if (!nestedCascadeName.empty())
cout << " from which we have nestedCascadeName= " << nestedCascadeName << endl;
double scale = parser.get<double>("--scale", 1.0);
string inputName = parser.get<string>("--input", "0"); //read from camera by default
CvCapture* capture = 0;
Mat frame, frameCopy, image;
CascadeClassifier cascade, nestedCascade;

@ -4,6 +4,7 @@
* Author: Liu Liu
* liuliu.1987+opencv@gmail.com
*/
#include <opencv2/core/core.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
@ -14,16 +15,17 @@
#include <vector>
using namespace std;
using namespace cv;
void help()
{
printf(
"This program demonstrated the use of the SURF Detector and Descriptor using\n"
printf( "This program demonstrated the use of the SURF Detector and Descriptor using\n"
"either FLANN (fast approx nearst neighbor classification) or brute force matching\n"
"on planar objects.\n"
"Call:\n"
"./find_obj [<object_filename default box.png> <scene_filename default box_in_scene.png>]\n\n"
"./find_obj [--object_filename]=<object_filename, box.png as default> \n"
"[--scene_filename]=<scene_filename box_in_scene.png as default>]\n\n"
);
}
// define whether to use approximate nearest-neighbor search
@ -209,13 +211,16 @@ locatePlanarObject( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors
return 1;
}
int main(int argc, char** argv)
int main(int argc, const char** argv)
{
const char* object_filename = argc == 3 ? argv[1] : "box.png";
const char* scene_filename = argc == 3 ? argv[2] : "box_in_scene.png";
help();
CommandLineParser parser(argc, argv);
string objectFileName = parser.get<string>("object_filename", "box.png");
string sceneFileName = parser.get<string>("scene_filename", "box_in_scene.png");
CvMemStorage* storage = cvCreateMemStorage(0);
help();
cvNamedWindow("Object", 1);
cvNamedWindow("Object Correspond", 1);
@ -232,13 +237,11 @@ int main(int argc, char** argv)
{{255,255,255}}
};
IplImage* object = cvLoadImage( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
IplImage* image = cvLoadImage( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
IplImage* object = cvLoadImage( objectFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE );
IplImage* image = cvLoadImage( sceneFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE );
if( !object || !image )
{
fprintf( stderr, "Can not load %s and/or %s\n"
"Usage: find_obj [<object_filename> <scene_filename>]\n",
object_filename, scene_filename );
fprintf( stderr, "Can not load %s and/or %s\n", objectFileName.c_str(), sceneFileName.c_str() );
exit(-1);
}
IplImage* object_color = cvCreateImage(cvGetSize(object), 8, 3);

@ -9,30 +9,35 @@
#include <vector>
using namespace cv;
void help()
{
printf( "This program shows the use of the \"fern\" plannar PlanarObjectDetector point\n"
"descriptor classifier"
"Usage:\n"
"./find_obj_ferns [<object_filename default: box.png> <scene_filename default:box_in_scene.png>]\n"
"\n");
"./find_obj_ferns [--object_filename]=<object_filename, box.png as default> \n"
"[--scene_filename]=<scene_filename box_in_scene.png as default>]\n\n");
}
int main(int argc, char** argv)
int main(int argc, const char** argv)
{
const char* object_filename = argc > 1 ? argv[1] : "box.png";
const char* scene_filename = argc > 2 ? argv[2] : "box_in_scene.png";
int i;
help();
CommandLineParser parser(argc, argv);
string objectFileName = parser.get<string>("object_filename", "box.png");
string sceneFileName = parser.get<string>("scene_filename", "box_in_scene.png");
cvNamedWindow("Object", 1);
cvNamedWindow("Image", 1);
cvNamedWindow("Object Correspondence", 1);
Mat object = imread( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
Mat object = imread( objectFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE );
Mat image;
double imgscale = 1;
Mat _image = imread( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
Mat _image = imread( sceneFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE );
resize(_image, image, Size(), 1./imgscale, 1./imgscale, INTER_CUBIC);
@ -40,7 +45,7 @@ int main(int argc, char** argv)
{
fprintf( stderr, "Can not load %s and/or %s\n"
"Usage: find_obj_ferns [<object_filename> <scene_filename>]\n",
object_filename, scene_filename );
objectFileName.c_str(), sceneFileName.c_str() );
exit(-1);
}
@ -60,7 +65,7 @@ int main(int argc, char** argv)
vector<KeyPoint> objKeypoints, imgKeypoints;
PatchGenerator gen(0,256,5,true,0.8,1.2,-CV_PI/2,CV_PI/2,-CV_PI/2,CV_PI/2);
string model_filename = format("%s_model.xml.gz", object_filename);
string model_filename = format("%s_model.xml.gz", objectFileName.c_str());
printf("Trying to load %s ...\n", model_filename.c_str());
FileStorage fs(model_filename, FileStorage::READ);
if( fs.isOpened() )
@ -106,6 +111,7 @@ int main(int argc, char** argv)
t = (double)getTickCount() - t;
printf("%gms\n", t*1000/getTickFrequency());
int i = 0;
if( found )
{
for( i = 0; i < 4; i++ )

@ -1,5 +1,7 @@
#include "opencv2/core/core.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdio.h>
#ifdef HAVE_CONFIG_H
@ -16,14 +18,13 @@ void help()
printf( "This program demonstrated the use of the latentSVM detector.\n"
"It reads in a trained object model and then uses that to detect the object in an image\n"
"Call:\n"
"./latentsvmdetect [<image_filename> <model_filename> [<threads_number>]]\n"
"./latentsvmdetect [--image_filename]=<image_filename, cat.jpg as default> \n"
" [--model_filename] = <model_filename, cat.xml as default> \n"
" [--threads_number] = <number of threads, -1 as default>\n"
" The defaults for image_filename and model_filename are cat.jpg and cat.xml respectively\n"
" Press any key to quit.\n");
}
const char* model_filename = "cat.xml";
const char* image_filename = "cat.jpg";
int tbbNumThreads = -1;
void detect_and_draw_objects( IplImage* image, CvLatentSvmDetector* detector, int numThreads = -1)
{
@ -44,7 +45,6 @@ void detect_and_draw_objects( IplImage* image, CvLatentSvmDetector* detector, in
return;
}
#endif
start = cvGetTickCount();
detections = cvLatentSvmDetectObjects(image, detector, storage, 0.5f, numThreads);
finish = cvGetTickCount();
@ -65,26 +65,24 @@ void detect_and_draw_objects( IplImage* image, CvLatentSvmDetector* detector, in
cvReleaseMemStorage( &storage );
}
int main(int argc, char* argv[])
int main(int argc, const char* argv[])
{
help();
if (argc > 2)
{
image_filename = argv[1];
model_filename = argv[2];
if (argc > 3)
{
tbbNumThreads = atoi(argv[3]);
}
}
IplImage* image = cvLoadImage(image_filename);
CommandLineParser parser(argc, argv);
string imageFileName = parser.get<string>("image_filename", "cat.jpg");
string modelFileName = parser.get<string>("model_filename", "cat.xml");
int tbbNumThreads = parser.get<int>("threads_number", -1);
IplImage* image = cvLoadImage(imageFileName.c_str());
if (!image)
{
printf( "Unable to load the image\n"
"Pass it as the first parameter: latentsvmdetect <path to cat.jpg> <path to cat.xml>\n" );
return -1;
}
CvLatentSvmDetector* detector = cvLoadLatentSvmDetector(model_filename);
CvLatentSvmDetector* detector = cvLoadLatentSvmDetector(modelFileName.c_str());
if (!detector)
{
printf( "Unable to load the model\n"
@ -92,7 +90,9 @@ int main(int argc, char* argv[])
cvReleaseImage( &image );
return -1;
}
detect_and_draw_objects( image, detector, tbbNumThreads );
cvNamedWindow( "test", 0 );
cvShowImage( "test", image );
cvWaitKey(0);

@ -2,17 +2,22 @@
* Copyright<EFBFBD> 2009, Liu Liu All rights reserved.
*/
#include <opencv2/core/core.hpp>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include <iostream>
using namespace std;
using namespace cv;
void help()
{
printf("\nThis program demonstrates the Maximal Extremal Region interest point detector.\n"
"It finds the most stable (in size) dark and white regions as a threshold is increased.\n"
"\nCall:\n"
"./mser_sample <path_and_image_filename, Default is 'puzzle.png'>\n\n");
"./mser_sample [--image_filename] <path_and_image_filename, default is 'puzzle.png'>\n\n");
}
static CvScalar colors[] =
@ -44,33 +49,25 @@ static uchar bcolors[][3] =
};
int main( int argc, char** argv )
int main( int argc, const char** argv )
{
char path[1024];
IplImage* img;
help();
if (argc!=2)
{
strcpy(path,"puzzle.png");
img = cvLoadImage( path, CV_LOAD_IMAGE_GRAYSCALE );
if (!img)
{
printf("\nUsage: mser_sample <path_to_image>\n");
return 0;
}
}
else
{
strcpy(path,argv[1]);
img = cvLoadImage( path, CV_LOAD_IMAGE_GRAYSCALE );
}
CommandLineParser parser(argc, argv);
string imageFileName = parser.get<string>("image_filename", "puzzle.png");
IplImage* img;
img = cvLoadImage( imageFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE );
if (!img)
{
printf("Unable to load image %s\n",path);
printf("Unable to load image %s\n",imageFileName.c_str());
help();
return 0;
}
IplImage* rsp = cvLoadImage( path, CV_LOAD_IMAGE_COLOR );
IplImage* rsp = cvLoadImage( imageFileName.c_str(), CV_LOAD_IMAGE_COLOR );
IplImage* ellipses = cvCloneImage(rsp);
cvCvtColor(img,ellipses,CV_GRAY2BGR);
CvSeq* contours;
@ -129,5 +126,4 @@ int main( int argc, char** argv )
cvReleaseImage(&rsp);
cvReleaseImage(&img);
cvReleaseImage(&ellipses);
}

@ -7,18 +7,20 @@
*
*/
#include <opencv2/core/core.hpp>
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include <string>
void help()
{
printf("\nThis program demonstrates the one way interest point descriptor found in features2d.hpp\n"
"Correspondences are drawn\n");
printf("Format: \n./one_way_sample [path_to_samples] [image1] [image2]\n");
printf("For example: ./one_way_sample ../../../opencv/samples/c scene_l.bmp scene_r.bmp\n");
printf("Format: \n./one_way_sample <path_to_samples> <image1> <image2>\n");
printf("For example: ./one_way_sample --path=../../../opencv/samples/c --first_image=scene_l.bmp --second_image=scene_r.bmp\n");
}
using namespace cv;
@ -26,21 +28,19 @@ using namespace cv;
IplImage* DrawCorrespondences(IplImage* img1, const vector<KeyPoint>& features1, IplImage* img2,
const vector<KeyPoint>& features2, const vector<int>& desc_idx);
int main(int argc, char** argv)
{
const char images_list[] = "one_way_train_images.txt";
const CvSize patch_size = cvSize(24, 24);
const int pose_count = 50;
if (argc != 3 && argc != 4)
int main(int argc, const char** argv)
{
help();
return 0;
}
std::string path_name = argv[1];
std::string img1_name = path_name + "/" + std::string(argv[2]);
std::string img2_name = path_name + "/" + std::string(argv[3]);
CommandLineParser parser(argc, argv);
std::string path_name = parser.get<string>("path", "../../../opencv/samples/c");
std::string img1_name = path_name + "/" + parser.get<string>("first_image", "scene_l.bmp");
std::string img2_name = path_name + "/" + parser.get<string>("second_image", "scene_r.bmp");
const char images_list[] = "one_way_train_images.txt";
const CvSize patch_size = cvSize(24, 24);
const int pose_count = 1; //50
printf("Reading the images...\n");
IplImage* img1 = cvLoadImage(img1_name.c_str(), CV_LOAD_IMAGE_GRAYSCALE);

Loading…
Cancel
Save