mirror of https://github.com/opencv/opencv.git
Merge pull request #25075 from mshabunin:cleanup-imgproc-1
C-API cleanup: apps, imgproc_c and some constants #25075 Merge with https://github.com/opencv/opencv_contrib/pull/3642 * Removed obsolete apps - traincascade and createsamples (please use older OpenCV versions if you need them). These apps relied heavily on C-API * removed all mentions of imgproc C-API headers (imgproc_c.h, types_c.h) - they were empty, included core C-API headers * replaced usage of several C constants with C++ ones (error codes, norm modes, RNG modes, PCA modes, ...) - most part of this PR (split into two parts - all modules and calib+3d - for easier backporting) * removed imgproc C-API headers (as separate commit, so that other changes could be backported to 4.x) Most of these changes can be backported to 4.x.pull/25216/head
parent
1d1faaabef
commit
8cbdd0c833
152 changed files with 820 additions and 18160 deletions
@ -1,4 +0,0 @@ |
||||
file(GLOB SRCS *.cpp) |
||||
ocv_add_application(opencv_createsamples |
||||
MODULES opencv_core opencv_imgproc opencv_objdetect opencv_imgcodecs opencv_highgui opencv_3d opencv_features2d opencv_videoio |
||||
SRCS ${SRCS}) |
@ -1,258 +0,0 @@ |
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
/*
|
||||
* createsamples.cpp |
||||
* |
||||
* Create test/training samples |
||||
*/ |
||||
|
||||
#include "opencv2/core.hpp" |
||||
#include "utility.hpp" |
||||
#include <cstdio> |
||||
#include <cstring> |
||||
#include <cstdlib> |
||||
#include <cmath> |
||||
|
||||
using namespace std; |
||||
|
||||
int main( int argc, char* argv[] ) |
||||
{ |
||||
int i = 0; |
||||
char* nullname = (char*)"(NULL)"; |
||||
char* vecname = NULL; /* .vec file name */ |
||||
char* infoname = NULL; /* file name with marked up image descriptions */ |
||||
char* imagename = NULL; /* single sample image */ |
||||
char* bgfilename = NULL; /* background */ |
||||
int num = 1000; |
||||
int bgcolor = 0; |
||||
int bgthreshold = 80; |
||||
int invert = 0; |
||||
int maxintensitydev = 40; |
||||
double maxxangle = 1.1; |
||||
double maxyangle = 1.1; |
||||
double maxzangle = 0.5; |
||||
int showsamples = 0; |
||||
/* the samples are adjusted to this scale in the sample preview window */ |
||||
double scale = 4.0; |
||||
int width = 24; |
||||
int height = 24; |
||||
double maxscale = -1.0; |
||||
int rngseed = 12345; |
||||
|
||||
if( argc == 1 ) |
||||
{ |
||||
printf( "Usage: %s\n [-info <collection_file_name>]\n" |
||||
" [-img <image_file_name>]\n" |
||||
" [-vec <vec_file_name>]\n" |
||||
" [-bg <background_file_name>]\n [-num <number_of_samples = %d>]\n" |
||||
" [-bgcolor <background_color = %d>]\n" |
||||
" [-inv] [-randinv] [-bgthresh <background_color_threshold = %d>]\n" |
||||
" [-maxidev <max_intensity_deviation = %d>]\n" |
||||
" [-maxxangle <max_x_rotation_angle = %f>]\n" |
||||
" [-maxyangle <max_y_rotation_angle = %f>]\n" |
||||
" [-maxzangle <max_z_rotation_angle = %f>]\n" |
||||
" [-show [<scale = %f>]]\n" |
||||
" [-w <sample_width = %d>]\n [-h <sample_height = %d>]\n" |
||||
" [-maxscale <max sample scale = %f>]\n" |
||||
" [-rngseed <rng seed = %d>]\n", |
||||
argv[0], num, bgcolor, bgthreshold, maxintensitydev, |
||||
maxxangle, maxyangle, maxzangle, scale, width, height, maxscale, rngseed ); |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
for( i = 1; i < argc; ++i ) |
||||
{ |
||||
if( !strcmp( argv[i], "-info" ) ) |
||||
{ |
||||
infoname = argv[++i]; |
||||
} |
||||
else if( !strcmp( argv[i], "-img" ) ) |
||||
{ |
||||
imagename = argv[++i]; |
||||
} |
||||
else if( !strcmp( argv[i], "-vec" ) ) |
||||
{ |
||||
vecname = argv[++i]; |
||||
} |
||||
else if( !strcmp( argv[i], "-bg" ) ) |
||||
{ |
||||
bgfilename = argv[++i]; |
||||
} |
||||
else if( !strcmp( argv[i], "-num" ) ) |
||||
{ |
||||
num = atoi( argv[++i] ); |
||||
} |
||||
else if( !strcmp( argv[i], "-bgcolor" ) ) |
||||
{ |
||||
bgcolor = atoi( argv[++i] ); |
||||
} |
||||
else if( !strcmp( argv[i], "-bgthresh" ) ) |
||||
{ |
||||
bgthreshold = atoi( argv[++i] ); |
||||
} |
||||
else if( !strcmp( argv[i], "-inv" ) ) |
||||
{ |
||||
invert = 1; |
||||
} |
||||
else if( !strcmp( argv[i], "-randinv" ) ) |
||||
{ |
||||
invert = CV_RANDOM_INVERT; |
||||
} |
||||
else if( !strcmp( argv[i], "-maxidev" ) ) |
||||
{ |
||||
maxintensitydev = atoi( argv[++i] ); |
||||
} |
||||
else if( !strcmp( argv[i], "-maxxangle" ) ) |
||||
{ |
||||
maxxangle = atof( argv[++i] ); |
||||
} |
||||
else if( !strcmp( argv[i], "-maxyangle" ) ) |
||||
{ |
||||
maxyangle = atof( argv[++i] ); |
||||
} |
||||
else if( !strcmp( argv[i], "-maxzangle" ) ) |
||||
{ |
||||
maxzangle = atof( argv[++i] ); |
||||
} |
||||
else if( !strcmp( argv[i], "-show" ) ) |
||||
{ |
||||
showsamples = 1; |
||||
if( i+1 < argc && strlen( argv[i+1] ) > 0 && argv[i+1][0] != '-' ) |
||||
{ |
||||
double d; |
||||
d = strtod( argv[i+1], 0 ); |
||||
if( d != -HUGE_VAL && d != HUGE_VAL && d > 0 ) scale = d; |
||||
++i; |
||||
} |
||||
} |
||||
else if( !strcmp( argv[i], "-w" ) ) |
||||
{ |
||||
width = atoi( argv[++i] ); |
||||
} |
||||
else if( !strcmp( argv[i], "-h" ) ) |
||||
{ |
||||
height = atoi( argv[++i] ); |
||||
} |
||||
else if( !strcmp( argv[i], "-maxscale" ) ) |
||||
{ |
||||
maxscale = atof( argv[++i] ); |
||||
} |
||||
else if (!strcmp(argv[i], "-rngseed")) |
||||
{ |
||||
rngseed = atoi(argv[++i]); |
||||
} |
||||
} |
||||
|
||||
cv::setRNGSeed( rngseed ); |
||||
|
||||
printf( "Info file name: %s\n", ((infoname == NULL) ? nullname : infoname ) ); |
||||
printf( "Img file name: %s\n", ((imagename == NULL) ? nullname : imagename ) ); |
||||
printf( "Vec file name: %s\n", ((vecname == NULL) ? nullname : vecname ) ); |
||||
printf( "BG file name: %s\n", ((bgfilename == NULL) ? nullname : bgfilename ) ); |
||||
printf( "Num: %d\n", num ); |
||||
printf( "BG color: %d\n", bgcolor ); |
||||
printf( "BG threshold: %d\n", bgthreshold ); |
||||
printf( "Invert: %s\n", (invert == CV_RANDOM_INVERT) ? "RANDOM" |
||||
: ( (invert) ? "TRUE" : "FALSE" ) ); |
||||
printf( "Max intensity deviation: %d\n", maxintensitydev ); |
||||
printf( "Max x angle: %g\n", maxxangle ); |
||||
printf( "Max y angle: %g\n", maxyangle ); |
||||
printf( "Max z angle: %g\n", maxzangle ); |
||||
printf( "Show samples: %s\n", (showsamples) ? "TRUE" : "FALSE" ); |
||||
if( showsamples ) |
||||
{ |
||||
printf( "Scale: %g\n", scale ); |
||||
} |
||||
printf( "Width: %d\n", width ); |
||||
printf( "Height: %d\n", height ); |
||||
printf( "Max Scale: %g\n", maxscale ); |
||||
printf( "RNG Seed: %d\n", rngseed ); |
||||
|
||||
/* determine action */ |
||||
if( imagename && vecname ) |
||||
{ |
||||
printf( "Create training samples from single image applying distortions...\n" ); |
||||
|
||||
cvCreateTrainingSamples( vecname, imagename, bgcolor, bgthreshold, bgfilename, |
||||
num, invert, maxintensitydev, |
||||
maxxangle, maxyangle, maxzangle, |
||||
showsamples, width, height ); |
||||
|
||||
printf( "Done\n" ); |
||||
} |
||||
else if( imagename && bgfilename && infoname ) |
||||
{ |
||||
printf( "Create test samples from single image applying distortions...\n" ); |
||||
|
||||
cvCreateTestSamples( infoname, imagename, bgcolor, bgthreshold, bgfilename, num, |
||||
invert, maxintensitydev, |
||||
maxxangle, maxyangle, maxzangle, showsamples, width, height, maxscale); |
||||
|
||||
printf( "Done\n" ); |
||||
} |
||||
else if( infoname && vecname ) |
||||
{ |
||||
int total; |
||||
|
||||
printf( "Create training samples from images collection...\n" ); |
||||
|
||||
total = cvCreateTrainingSamplesFromInfo( infoname, vecname, num, showsamples, |
||||
width, height ); |
||||
|
||||
printf( "Done. Created %d samples\n", total ); |
||||
} |
||||
else if( vecname ) |
||||
{ |
||||
printf( "View samples from vec file (press ESC to exit)...\n" ); |
||||
|
||||
cvShowVecSamples( vecname, width, height, scale ); |
||||
|
||||
printf( "Done\n" ); |
||||
} |
||||
else |
||||
{ |
||||
printf( "Nothing to do\n" ); |
||||
} |
||||
|
||||
return 0; |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -1,124 +0,0 @@ |
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __CREATESAMPLES_UTILITY_HPP__ |
||||
#define __CREATESAMPLES_UTILITY_HPP__ |
||||
|
||||
#define CV_VERBOSE 1 |
||||
|
||||
/*
|
||||
* cvCreateTrainingSamples |
||||
* |
||||
* Create training samples applying random distortions to sample image and |
||||
* store them in .vec file |
||||
* |
||||
* filename - .vec file name |
||||
* imgfilename - sample image file name |
||||
* bgcolor - background color for sample image |
||||
* bgthreshold - background color threshold. Pixels those colors are in range |
||||
* [bgcolor-bgthreshold, bgcolor+bgthreshold] are considered as transparent |
||||
* bgfilename - background description file name. If not NULL samples |
||||
* will be put on arbitrary background |
||||
* count - desired number of samples |
||||
* invert - if not 0 sample foreground pixels will be inverted |
||||
* if invert == CV_RANDOM_INVERT then samples will be inverted randomly |
||||
* maxintensitydev - desired max intensity deviation of foreground samples pixels |
||||
* maxxangle - max rotation angles |
||||
* maxyangle |
||||
* maxzangle |
||||
* showsamples - if not 0 samples will be shown |
||||
* winwidth - desired samples width |
||||
* winheight - desired samples height |
||||
*/ |
||||
#define CV_RANDOM_INVERT 0x7FFFFFFF |
||||
|
||||
void cvCreateTrainingSamples( const char* filename, |
||||
const char* imgfilename, int bgcolor, int bgthreshold, |
||||
const char* bgfilename, int count, |
||||
int invert = 0, int maxintensitydev = 40, |
||||
double maxxangle = 1.1, |
||||
double maxyangle = 1.1, |
||||
double maxzangle = 0.5, |
||||
int showsamples = 0, |
||||
int winwidth = 24, int winheight = 24 ); |
||||
|
||||
void cvCreateTestSamples( const char* infoname, |
||||
const char* imgfilename, int bgcolor, int bgthreshold, |
||||
const char* bgfilename, int count, |
||||
int invert, int maxintensitydev, |
||||
double maxxangle, double maxyangle, double maxzangle, |
||||
int showsamples, |
||||
int winwidth, int winheight, double maxscale ); |
||||
|
||||
/*
|
||||
* cvCreateTrainingSamplesFromInfo |
||||
* |
||||
* Create training samples from a set of marked up images and store them into .vec file |
||||
* infoname - file in which marked up image descriptions are stored |
||||
* num - desired number of samples |
||||
* showsamples - if not 0 samples will be shown |
||||
* winwidth - sample width |
||||
* winheight - sample height |
||||
* |
||||
* Return number of successfully created samples |
||||
*/ |
||||
int cvCreateTrainingSamplesFromInfo( const char* infoname, const char* vecfilename, |
||||
int num, |
||||
int showsamples, |
||||
int winwidth, int winheight ); |
||||
|
||||
/*
|
||||
* cvShowVecSamples |
||||
* |
||||
* Shows samples stored in .vec file |
||||
* |
||||
* filename |
||||
* .vec file name |
||||
* winwidth |
||||
* sample width |
||||
* winheight |
||||
* sample height |
||||
* scale |
||||
* the scale each sample is adjusted to |
||||
*/ |
||||
void cvShowVecSamples( const char* filename, int winwidth, int winheight, double scale ); |
||||
|
||||
#endif //__CREATESAMPLES_UTILITY_HPP__
|
@ -1,5 +0,0 @@ |
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS -Woverloaded-virtual -Winconsistent-missing-override -Wsuggest-override) |
||||
file(GLOB SRCS *.cpp) |
||||
ocv_add_application(opencv_traincascade |
||||
MODULES opencv_core opencv_imgproc opencv_objdetect opencv_imgcodecs opencv_highgui opencv_3d opencv_features2d |
||||
SRCS ${SRCS}) |
@ -1,250 +0,0 @@ |
||||
#include "opencv2/core.hpp" |
||||
#include "opencv2/imgproc.hpp" |
||||
|
||||
#include "HOGfeatures.h" |
||||
#include "cascadeclassifier.h" |
||||
|
||||
using namespace std; |
||||
using namespace cv; |
||||
|
||||
CvHOGFeatureParams::CvHOGFeatureParams() |
||||
{ |
||||
maxCatCount = 0; |
||||
name = HOGF_NAME; |
||||
featSize = N_BINS * N_CELLS; |
||||
} |
||||
|
||||
void CvHOGEvaluator::init(const CvFeatureParams *_featureParams, int _maxSampleCount, Size _winSize) |
||||
{ |
||||
CV_Assert( _maxSampleCount > 0); |
||||
int cols = (_winSize.width + 1) * (_winSize.height + 1); |
||||
for (int bin = 0; bin < N_BINS; bin++) |
||||
{ |
||||
hist.push_back(Mat(_maxSampleCount, cols, CV_32FC1)); |
||||
} |
||||
normSum.create( (int)_maxSampleCount, cols, CV_32FC1 ); |
||||
CvFeatureEvaluator::init( _featureParams, _maxSampleCount, _winSize ); |
||||
} |
||||
|
||||
void CvHOGEvaluator::setImage(const Mat &img, uchar clsLabel, int idx) |
||||
{ |
||||
CV_DbgAssert( !hist.empty()); |
||||
CvFeatureEvaluator::setImage( img, clsLabel, idx ); |
||||
vector<Mat> integralHist; |
||||
for (int bin = 0; bin < N_BINS; bin++) |
||||
{ |
||||
integralHist.push_back( Mat(winSize.height + 1, winSize.width + 1, hist[bin].type(), hist[bin].ptr<float>((int)idx)) ); |
||||
} |
||||
Mat integralNorm(winSize.height + 1, winSize.width + 1, normSum.type(), normSum.ptr<float>((int)idx)); |
||||
integralHistogram(img, integralHist, integralNorm, (int)N_BINS); |
||||
} |
||||
|
||||
//void CvHOGEvaluator::writeFeatures( FileStorage &fs, const Mat& featureMap ) const
|
||||
//{
|
||||
// _writeFeatures( features, fs, featureMap );
|
||||
//}
|
||||
|
||||
void CvHOGEvaluator::writeFeatures( FileStorage &fs, const Mat& featureMap ) const |
||||
{ |
||||
int featIdx; |
||||
int componentIdx; |
||||
const Mat_<int>& featureMap_ = (const Mat_<int>&)featureMap; |
||||
fs << FEATURES << "["; |
||||
for ( int fi = 0; fi < featureMap.cols; fi++ ) |
||||
if ( featureMap_(0, fi) >= 0 ) |
||||
{ |
||||
fs << "{"; |
||||
featIdx = fi / getFeatureSize(); |
||||
componentIdx = fi % getFeatureSize(); |
||||
features[featIdx].write( fs, componentIdx ); |
||||
fs << "}"; |
||||
} |
||||
fs << "]"; |
||||
} |
||||
|
||||
void CvHOGEvaluator::generateFeatures() |
||||
{ |
||||
int offset = winSize.width + 1; |
||||
Size blockStep; |
||||
int x, y, t, w, h; |
||||
|
||||
for (t = 8; t <= winSize.width/2; t+=8) //t = size of a cell. blocksize = 4*cellSize
|
||||
{ |
||||
blockStep = Size(4,4); |
||||
w = 2*t; //width of a block
|
||||
h = 2*t; //height of a block
|
||||
for (x = 0; x <= winSize.width - w; x += blockStep.width) |
||||
{ |
||||
for (y = 0; y <= winSize.height - h; y += blockStep.height) |
||||
{ |
||||
features.push_back(Feature(offset, x, y, t, t)); |
||||
} |
||||
} |
||||
w = 2*t; |
||||
h = 4*t; |
||||
for (x = 0; x <= winSize.width - w; x += blockStep.width) |
||||
{ |
||||
for (y = 0; y <= winSize.height - h; y += blockStep.height) |
||||
{ |
||||
features.push_back(Feature(offset, x, y, t, 2*t)); |
||||
} |
||||
} |
||||
w = 4*t; |
||||
h = 2*t; |
||||
for (x = 0; x <= winSize.width - w; x += blockStep.width) |
||||
{ |
||||
for (y = 0; y <= winSize.height - h; y += blockStep.height) |
||||
{ |
||||
features.push_back(Feature(offset, x, y, 2*t, t)); |
||||
} |
||||
} |
||||
} |
||||
|
||||
numFeatures = (int)features.size(); |
||||
} |
||||
|
||||
CvHOGEvaluator::Feature::Feature() |
||||
{ |
||||
for (int i = 0; i < N_CELLS; i++) |
||||
{ |
||||
rect[i] = Rect(0, 0, 0, 0); |
||||
} |
||||
} |
||||
|
||||
CvHOGEvaluator::Feature::Feature( int offset, int x, int y, int cellW, int cellH ) |
||||
{ |
||||
rect[0] = Rect(x, y, cellW, cellH); //cell0
|
||||
rect[1] = Rect(x+cellW, y, cellW, cellH); //cell1
|
||||
rect[2] = Rect(x, y+cellH, cellW, cellH); //cell2
|
||||
rect[3] = Rect(x+cellW, y+cellH, cellW, cellH); //cell3
|
||||
|
||||
for (int i = 0; i < N_CELLS; i++) |
||||
{ |
||||
CV_SUM_OFFSETS(fastRect[i].p0, fastRect[i].p1, fastRect[i].p2, fastRect[i].p3, rect[i], offset); |
||||
} |
||||
} |
||||
|
||||
void CvHOGEvaluator::Feature::write(FileStorage &fs) const |
||||
{ |
||||
fs << CC_RECTS << "["; |
||||
for( int i = 0; i < N_CELLS; i++ ) |
||||
{ |
||||
fs << "[:" << rect[i].x << rect[i].y << rect[i].width << rect[i].height << "]"; |
||||
} |
||||
fs << "]"; |
||||
} |
||||
|
||||
//cell and bin idx writing
|
||||
//void CvHOGEvaluator::Feature::write(FileStorage &fs, int varIdx) const
|
||||
//{
|
||||
// int featComponent = varIdx % (N_CELLS * N_BINS);
|
||||
// int cellIdx = featComponent / N_BINS;
|
||||
// int binIdx = featComponent % N_BINS;
|
||||
//
|
||||
// fs << CC_RECTS << "[:" << rect[cellIdx].x << rect[cellIdx].y <<
|
||||
// rect[cellIdx].width << rect[cellIdx].height << binIdx << "]";
|
||||
//}
|
||||
|
||||
//cell[0] and featComponent idx writing. By cell[0] it's possible to recover all block
|
||||
//All block is necessary for block normalization
|
||||
void CvHOGEvaluator::Feature::write(FileStorage &fs, int featComponentIdx) const |
||||
{ |
||||
fs << CC_RECT << "[:" << rect[0].x << rect[0].y << |
||||
rect[0].width << rect[0].height << featComponentIdx << "]"; |
||||
} |
||||
|
||||
|
||||
void CvHOGEvaluator::integralHistogram(const Mat &img, vector<Mat> &histogram, Mat &norm, int nbins) const |
||||
{ |
||||
CV_Assert( img.type() == CV_8U || img.type() == CV_8UC3 ); |
||||
int x, y, binIdx; |
||||
|
||||
Size gradSize(img.size()); |
||||
Size histSize(histogram[0].size()); |
||||
Mat grad(gradSize, CV_32F); |
||||
Mat qangle(gradSize, CV_8U); |
||||
|
||||
AutoBuffer<int> mapbuf(gradSize.width + gradSize.height + 4); |
||||
int* xmap = mapbuf.data() + 1; |
||||
int* ymap = xmap + gradSize.width + 2; |
||||
|
||||
const int borderType = (int)BORDER_REPLICATE; |
||||
|
||||
for( x = -1; x < gradSize.width + 1; x++ ) |
||||
xmap[x] = borderInterpolate(x, gradSize.width, borderType); |
||||
for( y = -1; y < gradSize.height + 1; y++ ) |
||||
ymap[y] = borderInterpolate(y, gradSize.height, borderType); |
||||
|
||||
int width = gradSize.width; |
||||
AutoBuffer<float> _dbuf(width*4); |
||||
float* dbuf = _dbuf.data(); |
||||
Mat Dx(1, width, CV_32F, dbuf); |
||||
Mat Dy(1, width, CV_32F, dbuf + width); |
||||
Mat Mag(1, width, CV_32F, dbuf + width*2); |
||||
Mat Angle(1, width, CV_32F, dbuf + width*3); |
||||
|
||||
float angleScale = (float)(nbins/CV_PI); |
||||
|
||||
for( y = 0; y < gradSize.height; y++ ) |
||||
{ |
||||
const uchar* currPtr = img.ptr(ymap[y]); |
||||
const uchar* prevPtr = img.ptr(ymap[y-1]); |
||||
const uchar* nextPtr = img.ptr(ymap[y+1]); |
||||
float* gradPtr = grad.ptr<float>(y); |
||||
uchar* qanglePtr = qangle.ptr(y); |
||||
|
||||
for( x = 0; x < width; x++ ) |
||||
{ |
||||
dbuf[x] = (float)(currPtr[xmap[x+1]] - currPtr[xmap[x-1]]); |
||||
dbuf[width + x] = (float)(nextPtr[xmap[x]] - prevPtr[xmap[x]]); |
||||
} |
||||
cartToPolar( Dx, Dy, Mag, Angle, false ); |
||||
for( x = 0; x < width; x++ ) |
||||
{ |
||||
float mag = dbuf[x+width*2]; |
||||
float angle = dbuf[x+width*3]; |
||||
angle = angle*angleScale - 0.5f; |
||||
int bidx = cvFloor(angle); |
||||
angle -= bidx; |
||||
if( bidx < 0 ) |
||||
bidx += nbins; |
||||
else if( bidx >= nbins ) |
||||
bidx -= nbins; |
||||
|
||||
qanglePtr[x] = (uchar)bidx; |
||||
gradPtr[x] = mag; |
||||
} |
||||
} |
||||
integral(grad, norm, grad.depth()); |
||||
|
||||
float* histBuf; |
||||
const float* magBuf; |
||||
const uchar* binsBuf; |
||||
|
||||
int binsStep = (int)( qangle.step / sizeof(uchar) ); |
||||
int histStep = (int)( histogram[0].step / sizeof(float) ); |
||||
int magStep = (int)( grad.step / sizeof(float) ); |
||||
for( binIdx = 0; binIdx < nbins; binIdx++ ) |
||||
{ |
||||
histBuf = histogram[binIdx].ptr<float>(); |
||||
magBuf = grad.ptr<float>(); |
||||
binsBuf = qangle.ptr(); |
||||
|
||||
memset( histBuf, 0, histSize.width * sizeof(histBuf[0]) ); |
||||
histBuf += histStep + 1; |
||||
for( y = 0; y < qangle.rows; y++ ) |
||||
{ |
||||
histBuf[-1] = 0.f; |
||||
float strSum = 0.f; |
||||
for( x = 0; x < qangle.cols; x++ ) |
||||
{ |
||||
if( binsBuf[x] == binIdx ) |
||||
strSum += magBuf[x]; |
||||
histBuf[x] = histBuf[-histStep + x] + strSum; |
||||
} |
||||
histBuf += histStep; |
||||
binsBuf += binsStep; |
||||
magBuf += magStep; |
||||
} |
||||
} |
||||
} |
@ -1,78 +0,0 @@ |
||||
#ifndef _OPENCV_HOGFEATURES_H_ |
||||
#define _OPENCV_HOGFEATURES_H_ |
||||
|
||||
#include "traincascade_features.h" |
||||
|
||||
//#define TEST_INTHIST_BUILD
|
||||
//#define TEST_FEAT_CALC
|
||||
|
||||
#define N_BINS 9 |
||||
#define N_CELLS 4 |
||||
|
||||
#define HOGF_NAME "HOGFeatureParams" |
||||
struct CvHOGFeatureParams : public CvFeatureParams |
||||
{ |
||||
CvHOGFeatureParams(); |
||||
}; |
||||
|
||||
class CvHOGEvaluator : public CvFeatureEvaluator |
||||
{ |
||||
public: |
||||
virtual ~CvHOGEvaluator() {} |
||||
virtual void init(const CvFeatureParams *_featureParams, |
||||
int _maxSampleCount, cv::Size _winSize ); |
||||
virtual void setImage(const cv::Mat& img, uchar clsLabel, int idx); |
||||
virtual float operator()(int varIdx, int sampleIdx) const; |
||||
virtual void writeFeatures( cv::FileStorage &fs, const cv::Mat& featureMap ) const; |
||||
protected: |
||||
virtual void generateFeatures(); |
||||
virtual void integralHistogram(const cv::Mat &img, std::vector<cv::Mat> &histogram, cv::Mat &norm, int nbins) const; |
||||
class Feature |
||||
{ |
||||
public: |
||||
Feature(); |
||||
Feature( int offset, int x, int y, int cellW, int cellH ); |
||||
float calc( const std::vector<cv::Mat> &_hists, const cv::Mat &_normSum, size_t y, int featComponent ) const; |
||||
void write( cv::FileStorage &fs ) const; |
||||
void write( cv::FileStorage &fs, int varIdx ) const; |
||||
|
||||
cv::Rect rect[N_CELLS]; //cells
|
||||
|
||||
struct |
||||
{ |
||||
int p0, p1, p2, p3; |
||||
} fastRect[N_CELLS]; |
||||
}; |
||||
std::vector<Feature> features; |
||||
|
||||
cv::Mat normSum; //for normalization calculation (L1 or L2)
|
||||
std::vector<cv::Mat> hist; |
||||
}; |
||||
|
||||
inline float CvHOGEvaluator::operator()(int varIdx, int sampleIdx) const |
||||
{ |
||||
int featureIdx = varIdx / (N_BINS * N_CELLS); |
||||
int componentIdx = varIdx % (N_BINS * N_CELLS); |
||||
//return features[featureIdx].calc( hist, sampleIdx, componentIdx);
|
||||
return features[featureIdx].calc( hist, normSum, sampleIdx, componentIdx); |
||||
} |
||||
|
||||
inline float CvHOGEvaluator::Feature::calc( const std::vector<cv::Mat>& _hists, const cv::Mat& _normSum, size_t y, int featComponent ) const |
||||
{ |
||||
float normFactor; |
||||
float res; |
||||
|
||||
int binIdx = featComponent % N_BINS; |
||||
int cellIdx = featComponent / N_BINS; |
||||
|
||||
const float *phist = _hists[binIdx].ptr<float>((int)y); |
||||
res = phist[fastRect[cellIdx].p0] - phist[fastRect[cellIdx].p1] - phist[fastRect[cellIdx].p2] + phist[fastRect[cellIdx].p3]; |
||||
|
||||
const float *pnormSum = _normSum.ptr<float>((int)y); |
||||
normFactor = (float)(pnormSum[fastRect[0].p0] - pnormSum[fastRect[1].p1] - pnormSum[fastRect[2].p2] + pnormSum[fastRect[3].p3]); |
||||
res = (res > 0.001f) ? ( res / (normFactor + 0.001f) ) : 0.f; //for cutting negative values, which appear due to floating precision
|
||||
|
||||
return res; |
||||
} |
||||
|
||||
#endif // _OPENCV_HOGFEATURES_H_
|
File diff suppressed because it is too large
Load Diff
@ -1,86 +0,0 @@ |
||||
#ifndef _OPENCV_BOOST_H_ |
||||
#define _OPENCV_BOOST_H_ |
||||
|
||||
#include "traincascade_features.h" |
||||
#include "old_ml.hpp" |
||||
|
||||
struct CvCascadeBoostParams : CvBoostParams |
||||
{ |
||||
float minHitRate; |
||||
float maxFalseAlarm; |
||||
|
||||
CvCascadeBoostParams(); |
||||
CvCascadeBoostParams( int _boostType, float _minHitRate, float _maxFalseAlarm, |
||||
double _weightTrimRate, int _maxDepth, int _maxWeakCount ); |
||||
virtual ~CvCascadeBoostParams() {} |
||||
void write( cv::FileStorage &fs ) const; |
||||
bool read( const cv::FileNode &node ); |
||||
virtual void printDefaults() const; |
||||
virtual void printAttrs() const; |
||||
virtual bool scanAttr( const std::string prmName, const std::string val); |
||||
}; |
||||
|
||||
struct CvCascadeBoostTrainData : CvDTreeTrainData |
||||
{ |
||||
CvCascadeBoostTrainData( const CvFeatureEvaluator* _featureEvaluator, |
||||
const CvDTreeParams& _params ); |
||||
CvCascadeBoostTrainData( const CvFeatureEvaluator* _featureEvaluator, |
||||
int _numSamples, int _precalcValBufSize, int _precalcIdxBufSize, |
||||
const CvDTreeParams& _params = CvDTreeParams() ); |
||||
virtual void setData( const CvFeatureEvaluator* _featureEvaluator, |
||||
int _numSamples, int _precalcValBufSize, int _precalcIdxBufSize, |
||||
const CvDTreeParams& _params=CvDTreeParams() ); |
||||
void precalculate(); |
||||
|
||||
virtual CvDTreeNode* subsample_data( const CvMat* _subsample_idx ); |
||||
|
||||
virtual const int* get_class_labels( CvDTreeNode* n, int* labelsBuf ); |
||||
virtual const int* get_cv_labels( CvDTreeNode* n, int* labelsBuf); |
||||
virtual const int* get_sample_indices( CvDTreeNode* n, int* indicesBuf ); |
||||
|
||||
virtual void get_ord_var_data( CvDTreeNode* n, int vi, float* ordValuesBuf, int* sortedIndicesBuf, |
||||
const float** ordValues, const int** sortedIndices, int* sampleIndicesBuf ); |
||||
virtual const int* get_cat_var_data( CvDTreeNode* n, int vi, int* catValuesBuf ); |
||||
virtual float getVarValue( int vi, int si ); |
||||
virtual void free_train_data(); |
||||
|
||||
const CvFeatureEvaluator* featureEvaluator; |
||||
cv::Mat valCache; // precalculated feature values (CV_32FC1)
|
||||
CvMat _resp; // for casting
|
||||
int numPrecalcVal, numPrecalcIdx; |
||||
}; |
||||
|
||||
class CvCascadeBoostTree : public CvBoostTree |
||||
{ |
||||
public: |
||||
virtual CvDTreeNode* predict( int sampleIdx ) const; |
||||
void write( cv::FileStorage &fs, const cv::Mat& featureMap ); |
||||
void read( const cv::FileNode &node, CvBoost* _ensemble, CvDTreeTrainData* _data ); |
||||
void markFeaturesInMap( cv::Mat& featureMap ); |
||||
protected: |
||||
virtual void split_node_data( CvDTreeNode* n ); |
||||
}; |
||||
|
||||
class CvCascadeBoost : public CvBoost |
||||
{ |
||||
public: |
||||
virtual bool train( const CvFeatureEvaluator* _featureEvaluator, |
||||
int _numSamples, int _precalcValBufSize, int _precalcIdxBufSize, |
||||
const CvCascadeBoostParams& _params=CvCascadeBoostParams() ); |
||||
virtual float predict( int sampleIdx, bool returnSum = false ) const; |
||||
|
||||
float getThreshold() const { return threshold; } |
||||
void write( cv::FileStorage &fs, const cv::Mat& featureMap ) const; |
||||
bool read( const cv::FileNode &node, const CvFeatureEvaluator* _featureEvaluator, |
||||
const CvCascadeBoostParams& _params ); |
||||
void markUsedFeaturesInMap( cv::Mat& featureMap ); |
||||
protected: |
||||
virtual bool set_params( const CvBoostParams& _params ); |
||||
virtual void update_weights( CvBoostTree* tree ); |
||||
virtual bool isErrDesired(); |
||||
|
||||
float threshold; |
||||
float minHitRate, maxFalseAlarm; |
||||
}; |
||||
|
||||
#endif |
@ -1,570 +0,0 @@ |
||||
#include "opencv2/core.hpp" |
||||
|
||||
#include "cascadeclassifier.h" |
||||
#include <queue> |
||||
|
||||
using namespace std; |
||||
using namespace cv; |
||||
|
||||
static const char* stageTypes[] = { CC_BOOST }; |
||||
static const char* featureTypes[] = { CC_HAAR, CC_LBP, CC_HOG }; |
||||
|
||||
CvCascadeParams::CvCascadeParams() : stageType( defaultStageType ), |
||||
featureType( defaultFeatureType ), winSize( cvSize(24, 24) ) |
||||
{ |
||||
name = CC_CASCADE_PARAMS; |
||||
} |
||||
CvCascadeParams::CvCascadeParams( int _stageType, int _featureType ) : stageType( _stageType ), |
||||
featureType( _featureType ), winSize( cvSize(24, 24) ) |
||||
{ |
||||
name = CC_CASCADE_PARAMS; |
||||
} |
||||
|
||||
//---------------------------- CascadeParams --------------------------------------
|
||||
|
||||
void CvCascadeParams::write( FileStorage &fs ) const |
||||
{ |
||||
string stageTypeStr = stageType == BOOST ? CC_BOOST : string(); |
||||
CV_Assert( !stageTypeStr.empty() ); |
||||
fs << CC_STAGE_TYPE << stageTypeStr; |
||||
string featureTypeStr = featureType == CvFeatureParams::HAAR ? CC_HAAR : |
||||
featureType == CvFeatureParams::LBP ? CC_LBP : |
||||
featureType == CvFeatureParams::HOG ? CC_HOG : |
||||
0; |
||||
CV_Assert( !stageTypeStr.empty() ); |
||||
fs << CC_FEATURE_TYPE << featureTypeStr; |
||||
fs << CC_HEIGHT << winSize.height; |
||||
fs << CC_WIDTH << winSize.width; |
||||
} |
||||
|
||||
bool CvCascadeParams::read( const FileNode &node ) |
||||
{ |
||||
if ( node.empty() ) |
||||
return false; |
||||
string stageTypeStr, featureTypeStr; |
||||
FileNode rnode = node[CC_STAGE_TYPE]; |
||||
if ( !rnode.isString() ) |
||||
return false; |
||||
rnode >> stageTypeStr; |
||||
stageType = !stageTypeStr.compare( CC_BOOST ) ? BOOST : -1; |
||||
if (stageType == -1) |
||||
return false; |
||||
rnode = node[CC_FEATURE_TYPE]; |
||||
if ( !rnode.isString() ) |
||||
return false; |
||||
rnode >> featureTypeStr; |
||||
featureType = !featureTypeStr.compare( CC_HAAR ) ? CvFeatureParams::HAAR : |
||||
!featureTypeStr.compare( CC_LBP ) ? CvFeatureParams::LBP : |
||||
!featureTypeStr.compare( CC_HOG ) ? CvFeatureParams::HOG : |
||||
-1; |
||||
if (featureType == -1) |
||||
return false; |
||||
node[CC_HEIGHT] >> winSize.height; |
||||
node[CC_WIDTH] >> winSize.width; |
||||
return winSize.height > 0 && winSize.width > 0; |
||||
} |
||||
|
||||
void CvCascadeParams::printDefaults() const |
||||
{ |
||||
CvParams::printDefaults(); |
||||
cout << " [-stageType <"; |
||||
for( int i = 0; i < (int)(sizeof(stageTypes)/sizeof(stageTypes[0])); i++ ) |
||||
{ |
||||
cout << (i ? " | " : "") << stageTypes[i]; |
||||
if ( i == defaultStageType ) |
||||
cout << "(default)"; |
||||
} |
||||
cout << ">]" << endl; |
||||
|
||||
cout << " [-featureType <{"; |
||||
for( int i = 0; i < (int)(sizeof(featureTypes)/sizeof(featureTypes[0])); i++ ) |
||||
{ |
||||
cout << (i ? ", " : "") << featureTypes[i]; |
||||
if ( i == defaultStageType ) |
||||
cout << "(default)"; |
||||
} |
||||
cout << "}>]" << endl; |
||||
cout << " [-w <sampleWidth = " << winSize.width << ">]" << endl; |
||||
cout << " [-h <sampleHeight = " << winSize.height << ">]" << endl; |
||||
} |
||||
|
||||
void CvCascadeParams::printAttrs() const |
||||
{ |
||||
cout << "stageType: " << stageTypes[stageType] << endl; |
||||
cout << "featureType: " << featureTypes[featureType] << endl; |
||||
cout << "sampleWidth: " << winSize.width << endl; |
||||
cout << "sampleHeight: " << winSize.height << endl; |
||||
} |
||||
|
||||
bool CvCascadeParams::scanAttr( const string prmName, const string val ) |
||||
{ |
||||
bool res = true; |
||||
if( !prmName.compare( "-stageType" ) ) |
||||
{ |
||||
for( int i = 0; i < (int)(sizeof(stageTypes)/sizeof(stageTypes[0])); i++ ) |
||||
if( !val.compare( stageTypes[i] ) ) |
||||
stageType = i; |
||||
} |
||||
else if( !prmName.compare( "-featureType" ) ) |
||||
{ |
||||
for( int i = 0; i < (int)(sizeof(featureTypes)/sizeof(featureTypes[0])); i++ ) |
||||
if( !val.compare( featureTypes[i] ) ) |
||||
featureType = i; |
||||
} |
||||
else if( !prmName.compare( "-w" ) ) |
||||
{ |
||||
winSize.width = atoi( val.c_str() ); |
||||
} |
||||
else if( !prmName.compare( "-h" ) ) |
||||
{ |
||||
winSize.height = atoi( val.c_str() ); |
||||
} |
||||
else |
||||
res = false; |
||||
return res; |
||||
} |
||||
|
||||
//---------------------------- CascadeClassifier --------------------------------------
|
||||
|
||||
bool CvCascadeClassifier::train( const string _cascadeDirName, |
||||
const string _posFilename, |
||||
const string _negFilename, |
||||
int _numPos, int _numNeg, |
||||
int _precalcValBufSize, int _precalcIdxBufSize, |
||||
int _numStages, |
||||
const CvCascadeParams& _cascadeParams, |
||||
const CvFeatureParams& _featureParams, |
||||
const CvCascadeBoostParams& _stageParams, |
||||
bool baseFormatSave, |
||||
double acceptanceRatioBreakValue ) |
||||
{ |
||||
// Start recording clock ticks for training time output
|
||||
double time = (double)getTickCount(); |
||||
|
||||
if( _cascadeDirName.empty() || _posFilename.empty() || _negFilename.empty() ) |
||||
CV_Error( CV_StsBadArg, "_cascadeDirName or _bgfileName or _vecFileName is NULL" ); |
||||
|
||||
string dirName; |
||||
if (_cascadeDirName.find_last_of("/\\") == (_cascadeDirName.length() - 1) ) |
||||
dirName = _cascadeDirName; |
||||
else |
||||
dirName = _cascadeDirName + '/'; |
||||
|
||||
numPos = _numPos; |
||||
numNeg = _numNeg; |
||||
numStages = _numStages; |
||||
if ( !imgReader.create( _posFilename, _negFilename, _cascadeParams.winSize ) ) |
||||
{ |
||||
cout << "Image reader can not be created from -vec " << _posFilename |
||||
<< " and -bg " << _negFilename << "." << endl; |
||||
return false; |
||||
} |
||||
if ( !load( dirName ) ) |
||||
{ |
||||
cascadeParams = _cascadeParams; |
||||
featureParams = CvFeatureParams::create(cascadeParams.featureType); |
||||
featureParams->init(_featureParams); |
||||
stageParams = makePtr<CvCascadeBoostParams>(); |
||||
*stageParams = _stageParams; |
||||
featureEvaluator = CvFeatureEvaluator::create(cascadeParams.featureType); |
||||
featureEvaluator->init( featureParams, numPos + numNeg, cascadeParams.winSize ); |
||||
stageClassifiers.reserve( numStages ); |
||||
}else{ |
||||
// Make sure that if model parameters are preloaded, that people are aware of this,
|
||||
// even when passing other parameters to the training command
|
||||
cout << "---------------------------------------------------------------------------------" << endl; |
||||
cout << "Training parameters are pre-loaded from the parameter file in data folder!" << endl; |
||||
cout << "Please empty this folder if you want to use a NEW set of training parameters." << endl; |
||||
cout << "---------------------------------------------------------------------------------" << endl; |
||||
} |
||||
cout << "PARAMETERS:" << endl; |
||||
cout << "cascadeDirName: " << _cascadeDirName << endl; |
||||
cout << "vecFileName: " << _posFilename << endl; |
||||
cout << "bgFileName: " << _negFilename << endl; |
||||
cout << "numPos: " << _numPos << endl; |
||||
cout << "numNeg: " << _numNeg << endl; |
||||
cout << "numStages: " << numStages << endl; |
||||
cout << "precalcValBufSize[Mb] : " << _precalcValBufSize << endl; |
||||
cout << "precalcIdxBufSize[Mb] : " << _precalcIdxBufSize << endl; |
||||
cout << "acceptanceRatioBreakValue : " << acceptanceRatioBreakValue << endl; |
||||
cascadeParams.printAttrs(); |
||||
stageParams->printAttrs(); |
||||
featureParams->printAttrs(); |
||||
cout << "Number of unique features given windowSize [" << _cascadeParams.winSize.width << "," << _cascadeParams.winSize.height << "] : " << featureEvaluator->getNumFeatures() << "" << endl; |
||||
|
||||
int startNumStages = (int)stageClassifiers.size(); |
||||
if ( startNumStages > 1 ) |
||||
cout << endl << "Stages 0-" << startNumStages-1 << " are loaded" << endl; |
||||
else if ( startNumStages == 1) |
||||
cout << endl << "Stage 0 is loaded" << endl; |
||||
|
||||
double requiredLeafFARate = pow( (double) stageParams->maxFalseAlarm, (double) numStages ) / |
||||
(double)stageParams->max_depth; |
||||
double tempLeafFARate; |
||||
|
||||
for( int i = startNumStages; i < numStages; i++ ) |
||||
{ |
||||
cout << endl << "===== TRAINING " << i << "-stage =====" << endl; |
||||
cout << "<BEGIN" << endl; |
||||
|
||||
if ( !updateTrainingSet( requiredLeafFARate, tempLeafFARate ) ) |
||||
{ |
||||
cout << "Train dataset for temp stage can not be filled. " |
||||
"Branch training terminated." << endl; |
||||
break; |
||||
} |
||||
if( tempLeafFARate <= requiredLeafFARate ) |
||||
{ |
||||
cout << "Required leaf false alarm rate achieved. " |
||||
"Branch training terminated." << endl; |
||||
break; |
||||
} |
||||
if( (tempLeafFARate <= acceptanceRatioBreakValue) && (acceptanceRatioBreakValue >= 0) ){ |
||||
cout << "The required acceptanceRatio for the model has been reached to avoid overfitting of trainingdata. " |
||||
"Branch training terminated." << endl; |
||||
break; |
||||
} |
||||
|
||||
Ptr<CvCascadeBoost> tempStage = makePtr<CvCascadeBoost>(); |
||||
bool isStageTrained = tempStage->train( featureEvaluator, |
||||
curNumSamples, _precalcValBufSize, _precalcIdxBufSize, |
||||
*stageParams ); |
||||
cout << "END>" << endl; |
||||
|
||||
if(!isStageTrained) |
||||
break; |
||||
|
||||
stageClassifiers.push_back( tempStage ); |
||||
|
||||
// save params
|
||||
if( i == 0) |
||||
{ |
||||
std::string paramsFilename = dirName + CC_PARAMS_FILENAME; |
||||
FileStorage fs( paramsFilename, FileStorage::WRITE); |
||||
if ( !fs.isOpened() ) |
||||
{ |
||||
cout << "Parameters can not be written, because file " << paramsFilename |
||||
<< " can not be opened." << endl; |
||||
return false; |
||||
} |
||||
fs << FileStorage::getDefaultObjectName(paramsFilename) << "{"; |
||||
writeParams( fs ); |
||||
fs << "}"; |
||||
} |
||||
// save current stage
|
||||
char buf[32]; |
||||
snprintf(buf, sizeof(buf), "%s%d", "stage", i ); |
||||
string stageFilename = dirName + buf + ".xml"; |
||||
FileStorage fs( stageFilename, FileStorage::WRITE ); |
||||
if ( !fs.isOpened() ) |
||||
{ |
||||
cout << "Current stage can not be written, because file " << stageFilename |
||||
<< " can not be opened." << endl; |
||||
return false; |
||||
} |
||||
fs << FileStorage::getDefaultObjectName(stageFilename) << "{"; |
||||
tempStage->write( fs, Mat() ); |
||||
fs << "}"; |
||||
|
||||
// Output training time up till now
|
||||
double seconds = ( (double)getTickCount() - time)/ getTickFrequency(); |
||||
int days = int(seconds) / 60 / 60 / 24; |
||||
int hours = (int(seconds) / 60 / 60) % 24; |
||||
int minutes = (int(seconds) / 60) % 60; |
||||
int seconds_left = int(seconds) % 60; |
||||
cout << "Training until now has taken " << days << " days " << hours << " hours " << minutes << " minutes " << seconds_left <<" seconds." << endl; |
||||
} |
||||
|
||||
if(stageClassifiers.size() == 0) |
||||
{ |
||||
cout << "Cascade classifier can't be trained. Check the used training parameters." << endl; |
||||
return false; |
||||
} |
||||
|
||||
save( dirName + CC_CASCADE_FILENAME, baseFormatSave ); |
||||
|
||||
return true; |
||||
} |
||||
|
||||
int CvCascadeClassifier::predict( int sampleIdx ) |
||||
{ |
||||
CV_DbgAssert( sampleIdx < numPos + numNeg ); |
||||
for (vector< Ptr<CvCascadeBoost> >::iterator it = stageClassifiers.begin(); |
||||
it != stageClassifiers.end();++it ) |
||||
{ |
||||
if ( (*it)->predict( sampleIdx ) == 0.f ) |
||||
return 0; |
||||
} |
||||
return 1; |
||||
} |
||||
|
||||
bool CvCascadeClassifier::updateTrainingSet( double minimumAcceptanceRatio, double& acceptanceRatio) |
||||
{ |
||||
int64 posConsumed = 0, negConsumed = 0; |
||||
imgReader.restart(); |
||||
int posCount = fillPassedSamples( 0, numPos, true, 0, posConsumed ); |
||||
if( !posCount ) |
||||
return false; |
||||
cout << "POS count : consumed " << posCount << " : " << (int)posConsumed << endl; |
||||
|
||||
int proNumNeg = cvRound( ( ((double)numNeg) * ((double)posCount) ) / numPos ); // apply only a fraction of negative samples. double is required since overflow is possible
|
||||
int negCount = fillPassedSamples( posCount, proNumNeg, false, minimumAcceptanceRatio, negConsumed ); |
||||
if ( !negCount ) |
||||
if ( !(negConsumed > 0 && ((double)negCount+1)/(double)negConsumed <= minimumAcceptanceRatio) ) |
||||
return false; |
||||
|
||||
curNumSamples = posCount + negCount; |
||||
acceptanceRatio = negConsumed == 0 ? 0 : ( (double)negCount/(double)(int64)negConsumed ); |
||||
cout << "NEG count : acceptanceRatio " << negCount << " : " << acceptanceRatio << endl; |
||||
return true; |
||||
} |
||||
|
||||
int CvCascadeClassifier::fillPassedSamples( int first, int count, bool isPositive, double minimumAcceptanceRatio, int64& consumed ) |
||||
{ |
||||
int getcount = 0; |
||||
Mat img(cascadeParams.winSize, CV_8UC1); |
||||
for( int i = first; i < first + count; i++ ) |
||||
{ |
||||
for( ; ; ) |
||||
{ |
||||
if( consumed != 0 && ((double)getcount+1)/(double)(int64)consumed <= minimumAcceptanceRatio ) |
||||
return getcount; |
||||
|
||||
bool isGetImg = isPositive ? imgReader.getPos( img ) : |
||||
imgReader.getNeg( img ); |
||||
if( !isGetImg ) |
||||
return getcount; |
||||
consumed++; |
||||
|
||||
featureEvaluator->setImage( img, isPositive ? 1 : 0, i ); |
||||
if( predict( i ) == 1 ) |
||||
{ |
||||
getcount++; |
||||
printf("%s current samples: %d\r", isPositive ? "POS":"NEG", getcount); |
||||
fflush(stdout); |
||||
break; |
||||
} |
||||
} |
||||
} |
||||
return getcount; |
||||
} |
||||
|
||||
void CvCascadeClassifier::writeParams( FileStorage &fs ) const |
||||
{ |
||||
cascadeParams.write( fs ); |
||||
fs << CC_STAGE_PARAMS << "{"; stageParams->write( fs ); fs << "}"; |
||||
fs << CC_FEATURE_PARAMS << "{"; featureParams->write( fs ); fs << "}"; |
||||
} |
||||
|
||||
void CvCascadeClassifier::writeFeatures( FileStorage &fs, const Mat& featureMap ) const |
||||
{ |
||||
featureEvaluator->writeFeatures( fs, featureMap ); |
||||
} |
||||
|
||||
void CvCascadeClassifier::writeStages( FileStorage &fs, const Mat& featureMap ) const |
||||
{ |
||||
char cmnt[30]; |
||||
int i = 0; |
||||
fs << CC_STAGES << "["; |
||||
for( vector< Ptr<CvCascadeBoost> >::const_iterator it = stageClassifiers.begin(); |
||||
it != stageClassifiers.end();++it, ++i ) |
||||
{ |
||||
snprintf( cmnt, sizeof(cmnt), "stage %d", i ); |
||||
fs.writeComment(cmnt); |
||||
fs << "{"; |
||||
(*it)->write( fs, featureMap ); |
||||
fs << "}"; |
||||
} |
||||
fs << "]"; |
||||
} |
||||
|
||||
bool CvCascadeClassifier::readParams( const FileNode &node ) |
||||
{ |
||||
if ( !node.isMap() || !cascadeParams.read( node ) ) |
||||
return false; |
||||
|
||||
stageParams = makePtr<CvCascadeBoostParams>(); |
||||
FileNode rnode = node[CC_STAGE_PARAMS]; |
||||
if ( !stageParams->read( rnode ) ) |
||||
return false; |
||||
|
||||
featureParams = CvFeatureParams::create(cascadeParams.featureType); |
||||
rnode = node[CC_FEATURE_PARAMS]; |
||||
if ( !featureParams->read( rnode ) ) |
||||
return false; |
||||
return true; |
||||
} |
||||
|
||||
bool CvCascadeClassifier::readStages( const FileNode &node) |
||||
{ |
||||
FileNode rnode = node[CC_STAGES]; |
||||
if (!rnode.empty() || !rnode.isSeq()) |
||||
return false; |
||||
stageClassifiers.reserve(numStages); |
||||
FileNodeIterator it = rnode.begin(); |
||||
for( int i = 0; i < min( (int)rnode.size(), numStages ); i++, it++ ) |
||||
{ |
||||
Ptr<CvCascadeBoost> tempStage = makePtr<CvCascadeBoost>(); |
||||
if ( !tempStage->read( *it, featureEvaluator, *stageParams) ) |
||||
return false; |
||||
stageClassifiers.push_back(tempStage); |
||||
} |
||||
return true; |
||||
} |
||||
|
||||
// For old Haar Classifier file saving
|
||||
#define ICV_HAAR_TYPE_ID "opencv-haar-classifier" |
||||
#define ICV_HAAR_SIZE_NAME "size" |
||||
#define ICV_HAAR_STAGES_NAME "stages" |
||||
#define ICV_HAAR_TREES_NAME "trees" |
||||
#define ICV_HAAR_FEATURE_NAME "feature" |
||||
#define ICV_HAAR_RECTS_NAME "rects" |
||||
#define ICV_HAAR_TILTED_NAME "tilted" |
||||
#define ICV_HAAR_THRESHOLD_NAME "threshold" |
||||
#define ICV_HAAR_LEFT_NODE_NAME "left_node" |
||||
#define ICV_HAAR_LEFT_VAL_NAME "left_val" |
||||
#define ICV_HAAR_RIGHT_NODE_NAME "right_node" |
||||
#define ICV_HAAR_RIGHT_VAL_NAME "right_val" |
||||
#define ICV_HAAR_STAGE_THRESHOLD_NAME "stage_threshold" |
||||
#define ICV_HAAR_PARENT_NAME "parent" |
||||
#define ICV_HAAR_NEXT_NAME "next" |
||||
|
||||
void CvCascadeClassifier::save( const string filename, bool baseFormat ) |
||||
{ |
||||
FileStorage fs( filename, FileStorage::WRITE ); |
||||
|
||||
if ( !fs.isOpened() ) |
||||
return; |
||||
|
||||
fs << FileStorage::getDefaultObjectName(filename); |
||||
if ( !baseFormat ) |
||||
{ |
||||
Mat featureMap; |
||||
getUsedFeaturesIdxMap( featureMap ); |
||||
fs << "{"; |
||||
writeParams( fs ); |
||||
fs << CC_STAGE_NUM << (int)stageClassifiers.size(); |
||||
writeStages( fs, featureMap ); |
||||
writeFeatures( fs, featureMap ); |
||||
} |
||||
else |
||||
{ |
||||
//char buf[256];
|
||||
CvSeq* weak; |
||||
if ( cascadeParams.featureType != CvFeatureParams::HAAR ) |
||||
CV_Error( CV_StsBadFunc, "old file format is used for Haar-like features only"); |
||||
fs << "{:" ICV_HAAR_TYPE_ID; |
||||
fs << ICV_HAAR_SIZE_NAME << "[:" << cascadeParams.winSize.width << |
||||
cascadeParams.winSize.height << "]"; |
||||
fs << ICV_HAAR_STAGES_NAME << "["; |
||||
for( size_t si = 0; si < stageClassifiers.size(); si++ ) |
||||
{ |
||||
fs << "{"; //stage
|
||||
/*snprintf( buf, sizeof(buf), "stage %d", si );
|
||||
CV_CALL( cvWriteComment( fs, buf, 1 ) );*/ |
||||
weak = stageClassifiers[si]->get_weak_predictors(); |
||||
fs << ICV_HAAR_TREES_NAME << "["; |
||||
for( int wi = 0; wi < weak->total; wi++ ) |
||||
{ |
||||
int total_inner_node_idx = -1; |
||||
queue<const CvDTreeNode*> inner_nodes_queue; |
||||
CvCascadeBoostTree* tree = *((CvCascadeBoostTree**) cvGetSeqElem( weak, wi )); |
||||
|
||||
fs << "["; |
||||
/*snprintf( buf, sizeof(buf), "tree %d", wi );
|
||||
CV_CALL( cvWriteComment( fs, buf, 1 ) );*/ |
||||
|
||||
const CvDTreeNode* tempNode; |
||||
|
||||
inner_nodes_queue.push( tree->get_root() ); |
||||
total_inner_node_idx++; |
||||
|
||||
while (!inner_nodes_queue.empty()) |
||||
{ |
||||
tempNode = inner_nodes_queue.front(); |
||||
|
||||
fs << "{"; |
||||
fs << ICV_HAAR_FEATURE_NAME << "{"; |
||||
((CvHaarEvaluator*)featureEvaluator.get())->writeFeature( fs, tempNode->split->var_idx ); |
||||
fs << "}"; |
||||
|
||||
fs << ICV_HAAR_THRESHOLD_NAME << tempNode->split->ord.c; |
||||
|
||||
if( tempNode->left->left || tempNode->left->right ) |
||||
{ |
||||
inner_nodes_queue.push( tempNode->left ); |
||||
total_inner_node_idx++; |
||||
fs << ICV_HAAR_LEFT_NODE_NAME << total_inner_node_idx; |
||||
} |
||||
else |
||||
fs << ICV_HAAR_LEFT_VAL_NAME << tempNode->left->value; |
||||
|
||||
if( tempNode->right->left || tempNode->right->right ) |
||||
{ |
||||
inner_nodes_queue.push( tempNode->right ); |
||||
total_inner_node_idx++; |
||||
fs << ICV_HAAR_RIGHT_NODE_NAME << total_inner_node_idx; |
||||
} |
||||
else |
||||
fs << ICV_HAAR_RIGHT_VAL_NAME << tempNode->right->value; |
||||
fs << "}"; // ICV_HAAR_FEATURE_NAME
|
||||
inner_nodes_queue.pop(); |
||||
} |
||||
fs << "]"; |
||||
} |
||||
fs << "]"; //ICV_HAAR_TREES_NAME
|
||||
fs << ICV_HAAR_STAGE_THRESHOLD_NAME << stageClassifiers[si]->getThreshold(); |
||||
fs << ICV_HAAR_PARENT_NAME << (int)si-1 << ICV_HAAR_NEXT_NAME << -1; |
||||
fs << "}"; //stage
|
||||
} /* for each stage */ |
||||
fs << "]"; //ICV_HAAR_STAGES_NAME
|
||||
} |
||||
fs << "}"; |
||||
} |
||||
|
||||
bool CvCascadeClassifier::load( const string cascadeDirName ) |
||||
{ |
||||
FileStorage fs( cascadeDirName + CC_PARAMS_FILENAME, FileStorage::READ ); |
||||
if ( !fs.isOpened() ) |
||||
return false; |
||||
FileNode node = fs.getFirstTopLevelNode(); |
||||
if ( !readParams( node ) ) |
||||
return false; |
||||
featureEvaluator = CvFeatureEvaluator::create(cascadeParams.featureType); |
||||
featureEvaluator->init( featureParams, numPos + numNeg, cascadeParams.winSize ); |
||||
fs.release(); |
||||
|
||||
char buf[5+10+1] = {0}; |
||||
for ( int si = 0; si < numStages; si++ ) |
||||
{ |
||||
snprintf( buf, sizeof(buf), "%s%d", "stage", si); |
||||
fs.open( cascadeDirName + buf + ".xml", FileStorage::READ ); |
||||
node = fs.getFirstTopLevelNode(); |
||||
if ( !fs.isOpened() ) |
||||
break; |
||||
Ptr<CvCascadeBoost> tempStage = makePtr<CvCascadeBoost>(); |
||||
|
||||
if ( !tempStage->read( node, featureEvaluator, *stageParams )) |
||||
{ |
||||
fs.release(); |
||||
break; |
||||
} |
||||
stageClassifiers.push_back(tempStage); |
||||
} |
||||
return true; |
||||
} |
||||
|
||||
void CvCascadeClassifier::getUsedFeaturesIdxMap( Mat& featureMap ) |
||||
{ |
||||
int varCount = featureEvaluator->getNumFeatures() * featureEvaluator->getFeatureSize(); |
||||
featureMap.create( 1, varCount, CV_32SC1 ); |
||||
featureMap.setTo(Scalar(-1)); |
||||
|
||||
for( vector< Ptr<CvCascadeBoost> >::const_iterator it = stageClassifiers.begin(); |
||||
it != stageClassifiers.end();++it ) |
||||
(*it)->markUsedFeaturesInMap( featureMap ); |
||||
|
||||
for( int fi = 0, idx = 0; fi < varCount; fi++ ) |
||||
if ( featureMap.at<int>(0, fi) >= 0 ) |
||||
featureMap.ptr<int>(0)[fi] = idx++; |
||||
} |
@ -1,125 +0,0 @@ |
||||
#ifndef _OPENCV_CASCADECLASSIFIER_H_ |
||||
#define _OPENCV_CASCADECLASSIFIER_H_ |
||||
|
||||
#include <ctime> |
||||
#include "traincascade_features.h" |
||||
#include "haarfeatures.h" |
||||
#include "lbpfeatures.h" |
||||
#include "HOGfeatures.h" //new |
||||
#include "boost.h" |
||||
|
||||
#define CC_CASCADE_FILENAME "cascade.xml" |
||||
#define CC_PARAMS_FILENAME "params.xml" |
||||
|
||||
#define CC_CASCADE_PARAMS "cascadeParams" |
||||
#define CC_STAGE_TYPE "stageType" |
||||
#define CC_FEATURE_TYPE "featureType" |
||||
#define CC_HEIGHT "height" |
||||
#define CC_WIDTH "width" |
||||
|
||||
#define CC_STAGE_NUM "stageNum" |
||||
#define CC_STAGES "stages" |
||||
#define CC_STAGE_PARAMS "stageParams" |
||||
|
||||
#define CC_BOOST "BOOST" |
||||
#define CC_BOOST_TYPE "boostType" |
||||
#define CC_DISCRETE_BOOST "DAB" |
||||
#define CC_REAL_BOOST "RAB" |
||||
#define CC_LOGIT_BOOST "LB" |
||||
#define CC_GENTLE_BOOST "GAB" |
||||
#define CC_MINHITRATE "minHitRate" |
||||
#define CC_MAXFALSEALARM "maxFalseAlarm" |
||||
#define CC_TRIM_RATE "weightTrimRate" |
||||
#define CC_MAX_DEPTH "maxDepth" |
||||
#define CC_WEAK_COUNT "maxWeakCount" |
||||
#define CC_STAGE_THRESHOLD "stageThreshold" |
||||
#define CC_WEAK_CLASSIFIERS "weakClassifiers" |
||||
#define CC_INTERNAL_NODES "internalNodes" |
||||
#define CC_LEAF_VALUES "leafValues" |
||||
|
||||
#define CC_FEATURES FEATURES |
||||
#define CC_FEATURE_PARAMS "featureParams" |
||||
#define CC_MAX_CAT_COUNT "maxCatCount" |
||||
#define CC_FEATURE_SIZE "featSize" |
||||
|
||||
#define CC_HAAR "HAAR" |
||||
#define CC_MODE "mode" |
||||
#define CC_MODE_BASIC "BASIC" |
||||
#define CC_MODE_CORE "CORE" |
||||
#define CC_MODE_ALL "ALL" |
||||
#define CC_RECTS "rects" |
||||
#define CC_TILTED "tilted" |
||||
|
||||
#define CC_LBP "LBP" |
||||
#define CC_RECT "rect" |
||||
|
||||
#define CC_HOG "HOG" |
||||
|
||||
#ifdef _WIN32 |
||||
#define TIME( arg ) (((double) clock()) / CLOCKS_PER_SEC) |
||||
#else |
||||
#define TIME( arg ) (time( arg )) |
||||
#endif |
||||
|
||||
class CvCascadeParams : public CvParams |
||||
{ |
||||
public: |
||||
enum { BOOST = 0 }; |
||||
static const int defaultStageType = BOOST; |
||||
static const int defaultFeatureType = CvFeatureParams::HAAR; |
||||
|
||||
CvCascadeParams(); |
||||
CvCascadeParams( int _stageType, int _featureType ); |
||||
void write( cv::FileStorage &fs ) const; |
||||
bool read( const cv::FileNode &node ); |
||||
|
||||
void printDefaults() const; |
||||
void printAttrs() const; |
||||
bool scanAttr( const std::string prmName, const std::string val ); |
||||
|
||||
int stageType; |
||||
int featureType; |
||||
cv::Size winSize; |
||||
}; |
||||
|
||||
class CvCascadeClassifier |
||||
{ |
||||
public: |
||||
bool train( const std::string _cascadeDirName, |
||||
const std::string _posFilename, |
||||
const std::string _negFilename, |
||||
int _numPos, int _numNeg, |
||||
int _precalcValBufSize, int _precalcIdxBufSize, |
||||
int _numStages, |
||||
const CvCascadeParams& _cascadeParams, |
||||
const CvFeatureParams& _featureParams, |
||||
const CvCascadeBoostParams& _stageParams, |
||||
bool baseFormatSave = false, |
||||
double acceptanceRatioBreakValue = -1.0 ); |
||||
private: |
||||
int predict( int sampleIdx ); |
||||
void save( const std::string cascadeDirName, bool baseFormat = false ); |
||||
bool load( const std::string cascadeDirName ); |
||||
bool updateTrainingSet( double minimumAcceptanceRatio, double& acceptanceRatio ); |
||||
int fillPassedSamples( int first, int count, bool isPositive, double requiredAcceptanceRatio, int64& consumed ); |
||||
|
||||
void writeParams( cv::FileStorage &fs ) const; |
||||
void writeStages( cv::FileStorage &fs, const cv::Mat& featureMap ) const; |
||||
void writeFeatures( cv::FileStorage &fs, const cv::Mat& featureMap ) const; |
||||
bool readParams( const cv::FileNode &node ); |
||||
bool readStages( const cv::FileNode &node ); |
||||
|
||||
void getUsedFeaturesIdxMap( cv::Mat& featureMap ); |
||||
|
||||
CvCascadeParams cascadeParams; |
||||
cv::Ptr<CvFeatureParams> featureParams; |
||||
cv::Ptr<CvCascadeBoostParams> stageParams; |
||||
|
||||
cv::Ptr<CvFeatureEvaluator> featureEvaluator; |
||||
std::vector< cv::Ptr<CvCascadeBoost> > stageClassifiers; |
||||
CvCascadeImageReader imgReader; |
||||
int numStages, curNumSamples; |
||||
int numPos, numNeg; |
||||
}; |
||||
|
||||
#endif |
@ -1,93 +0,0 @@ |
||||
#include "opencv2/core.hpp" |
||||
|
||||
#include "traincascade_features.h" |
||||
#include "cascadeclassifier.h" |
||||
|
||||
using namespace std; |
||||
using namespace cv; |
||||
|
||||
float calcNormFactor( const Mat& sum, const Mat& sqSum ) |
||||
{ |
||||
CV_DbgAssert( sum.cols > 3 && sqSum.rows > 3 ); |
||||
Rect normrect( 1, 1, sum.cols - 3, sum.rows - 3 ); |
||||
size_t p0, p1, p2, p3; |
||||
CV_SUM_OFFSETS( p0, p1, p2, p3, normrect, sum.step1() ) |
||||
double area = normrect.width * normrect.height; |
||||
const int *sp = sum.ptr<int>(); |
||||
int valSum = sp[p0] - sp[p1] - sp[p2] + sp[p3]; |
||||
const double *sqp = sqSum.ptr<double>(); |
||||
double valSqSum = sqp[p0] - sqp[p1] - sqp[p2] + sqp[p3]; |
||||
return (float) sqrt( (double) (area * valSqSum - (double)valSum * valSum) ); |
||||
} |
||||
|
||||
CvParams::CvParams() : name( "params" ) {} |
||||
void CvParams::printDefaults() const |
||||
{ cout << "--" << name << "--" << endl; } |
||||
void CvParams::printAttrs() const {} |
||||
bool CvParams::scanAttr( const string, const string ) { return false; } |
||||
|
||||
|
||||
//---------------------------- FeatureParams --------------------------------------
|
||||
|
||||
CvFeatureParams::CvFeatureParams() : maxCatCount( 0 ), featSize( 1 ) |
||||
{ |
||||
name = CC_FEATURE_PARAMS; |
||||
} |
||||
|
||||
void CvFeatureParams::init( const CvFeatureParams& fp ) |
||||
{ |
||||
maxCatCount = fp.maxCatCount; |
||||
featSize = fp.featSize; |
||||
} |
||||
|
||||
void CvFeatureParams::write( FileStorage &fs ) const |
||||
{ |
||||
fs << CC_MAX_CAT_COUNT << maxCatCount; |
||||
fs << CC_FEATURE_SIZE << featSize; |
||||
} |
||||
|
||||
bool CvFeatureParams::read( const FileNode &node ) |
||||
{ |
||||
if ( node.empty() ) |
||||
return false; |
||||
maxCatCount = node[CC_MAX_CAT_COUNT]; |
||||
featSize = node[CC_FEATURE_SIZE]; |
||||
return ( maxCatCount >= 0 && featSize >= 1 ); |
||||
} |
||||
|
||||
Ptr<CvFeatureParams> CvFeatureParams::create( int featureType ) |
||||
{ |
||||
return featureType == HAAR ? Ptr<CvFeatureParams>(new CvHaarFeatureParams) : |
||||
featureType == LBP ? Ptr<CvFeatureParams>(new CvLBPFeatureParams) : |
||||
featureType == HOG ? Ptr<CvFeatureParams>(new CvHOGFeatureParams) : |
||||
Ptr<CvFeatureParams>(); |
||||
} |
||||
|
||||
//------------------------------------- FeatureEvaluator ---------------------------------------
|
||||
|
||||
void CvFeatureEvaluator::init(const CvFeatureParams *_featureParams, |
||||
int _maxSampleCount, Size _winSize ) |
||||
{ |
||||
CV_Assert(_maxSampleCount > 0); |
||||
featureParams = (CvFeatureParams *)_featureParams; |
||||
winSize = _winSize; |
||||
numFeatures = 0; |
||||
cls.create( (int)_maxSampleCount, 1, CV_32FC1 ); |
||||
generateFeatures(); |
||||
} |
||||
|
||||
void CvFeatureEvaluator::setImage(const Mat &img, uchar clsLabel, int idx) |
||||
{ |
||||
CV_Assert(img.cols == winSize.width); |
||||
CV_Assert(img.rows == winSize.height); |
||||
CV_Assert(idx < cls.rows); |
||||
cls.ptr<float>(idx)[0] = clsLabel; |
||||
} |
||||
|
||||
Ptr<CvFeatureEvaluator> CvFeatureEvaluator::create(int type) |
||||
{ |
||||
return type == CvFeatureParams::HAAR ? Ptr<CvFeatureEvaluator>(new CvHaarEvaluator) : |
||||
type == CvFeatureParams::LBP ? Ptr<CvFeatureEvaluator>(new CvLBPEvaluator) : |
||||
type == CvFeatureParams::HOG ? Ptr<CvFeatureEvaluator>(new CvHOGEvaluator) : |
||||
Ptr<CvFeatureEvaluator>(); |
||||
} |
@ -1,312 +0,0 @@ |
||||
#include "opencv2/core.hpp" |
||||
#include "opencv2/imgproc.hpp" |
||||
|
||||
#include "haarfeatures.h" |
||||
#include "cascadeclassifier.h" |
||||
|
||||
using namespace std; |
||||
using namespace cv; |
||||
|
||||
CvHaarFeatureParams::CvHaarFeatureParams() : mode(BASIC) |
||||
{ |
||||
name = HFP_NAME; |
||||
} |
||||
|
||||
CvHaarFeatureParams::CvHaarFeatureParams( int _mode ) : mode( _mode ) |
||||
{ |
||||
name = HFP_NAME; |
||||
} |
||||
|
||||
void CvHaarFeatureParams::init( const CvFeatureParams& fp ) |
||||
{ |
||||
CvFeatureParams::init( fp ); |
||||
mode = ((const CvHaarFeatureParams&)fp).mode; |
||||
} |
||||
|
||||
void CvHaarFeatureParams::write( FileStorage &fs ) const |
||||
{ |
||||
CvFeatureParams::write( fs ); |
||||
string modeStr = mode == BASIC ? CC_MODE_BASIC : |
||||
mode == CORE ? CC_MODE_CORE : |
||||
mode == ALL ? CC_MODE_ALL : string(); |
||||
CV_Assert( !modeStr.empty() ); |
||||
fs << CC_MODE << modeStr; |
||||
} |
||||
|
||||
bool CvHaarFeatureParams::read( const FileNode &node ) |
||||
{ |
||||
if( !CvFeatureParams::read( node ) ) |
||||
return false; |
||||
|
||||
FileNode rnode = node[CC_MODE]; |
||||
if( !rnode.isString() ) |
||||
return false; |
||||
string modeStr; |
||||
rnode >> modeStr; |
||||
mode = !modeStr.compare( CC_MODE_BASIC ) ? BASIC : |
||||
!modeStr.compare( CC_MODE_CORE ) ? CORE : |
||||
!modeStr.compare( CC_MODE_ALL ) ? ALL : -1; |
||||
return (mode >= 0); |
||||
} |
||||
|
||||
void CvHaarFeatureParams::printDefaults() const |
||||
{ |
||||
CvFeatureParams::printDefaults(); |
||||
cout << " [-mode <" CC_MODE_BASIC << "(default) | " |
||||
<< CC_MODE_CORE <<" | " << CC_MODE_ALL << endl; |
||||
} |
||||
|
||||
void CvHaarFeatureParams::printAttrs() const |
||||
{ |
||||
CvFeatureParams::printAttrs(); |
||||
string mode_str = mode == BASIC ? CC_MODE_BASIC : |
||||
mode == CORE ? CC_MODE_CORE : |
||||
mode == ALL ? CC_MODE_ALL : 0; |
||||
cout << "mode: " << mode_str << endl; |
||||
} |
||||
|
||||
bool CvHaarFeatureParams::scanAttr( const string prmName, const string val) |
||||
{ |
||||
if ( !CvFeatureParams::scanAttr( prmName, val ) ) |
||||
{ |
||||
if( !prmName.compare("-mode") ) |
||||
{ |
||||
mode = !val.compare( CC_MODE_CORE ) ? CORE : |
||||
!val.compare( CC_MODE_ALL ) ? ALL : |
||||
!val.compare( CC_MODE_BASIC ) ? BASIC : -1; |
||||
if (mode == -1) |
||||
return false; |
||||
} |
||||
return false; |
||||
} |
||||
return true; |
||||
} |
||||
|
||||
//--------------------- HaarFeatureEvaluator ----------------
|
||||
|
||||
void CvHaarEvaluator::init(const CvFeatureParams *_featureParams, |
||||
int _maxSampleCount, Size _winSize ) |
||||
{ |
||||
CV_Assert(_maxSampleCount > 0); |
||||
int cols = (_winSize.width + 1) * (_winSize.height + 1); |
||||
sum.create((int)_maxSampleCount, cols, CV_32SC1); |
||||
tilted.create((int)_maxSampleCount, cols, CV_32SC1); |
||||
normfactor.create(1, (int)_maxSampleCount, CV_32FC1); |
||||
CvFeatureEvaluator::init( _featureParams, _maxSampleCount, _winSize ); |
||||
} |
||||
|
||||
void CvHaarEvaluator::setImage(const Mat& img, uchar clsLabel, int idx) |
||||
{ |
||||
CV_DbgAssert( !sum.empty() && !tilted.empty() && !normfactor.empty() ); |
||||
CvFeatureEvaluator::setImage( img, clsLabel, idx); |
||||
Mat innSum(winSize.height + 1, winSize.width + 1, sum.type(), sum.ptr<int>((int)idx)); |
||||
Mat innSqSum; |
||||
if (((const CvHaarFeatureParams*)featureParams)->mode == CvHaarFeatureParams::ALL) |
||||
{ |
||||
Mat innTilted(winSize.height + 1, winSize.width + 1, tilted.type(), tilted.ptr<int>((int)idx)); |
||||
integral(img, innSum, innSqSum, innTilted); |
||||
} |
||||
else |
||||
integral(img, innSum, innSqSum); |
||||
normfactor.ptr<float>(0)[idx] = calcNormFactor( innSum, innSqSum ); |
||||
} |
||||
|
||||
void CvHaarEvaluator::writeFeatures( FileStorage &fs, const Mat& featureMap ) const |
||||
{ |
||||
_writeFeatures( features, fs, featureMap ); |
||||
} |
||||
|
||||
void CvHaarEvaluator::writeFeature(FileStorage &fs, int fi) const |
||||
{ |
||||
CV_DbgAssert( fi < (int)features.size() ); |
||||
features[fi].write(fs); |
||||
} |
||||
|
||||
void CvHaarEvaluator::generateFeatures() |
||||
{ |
||||
int mode = ((const CvHaarFeatureParams*)((CvFeatureParams*)featureParams))->mode; |
||||
int offset = winSize.width + 1; |
||||
for( int x = 0; x < winSize.width; x++ ) |
||||
{ |
||||
for( int y = 0; y < winSize.height; y++ ) |
||||
{ |
||||
for( int dx = 1; dx <= winSize.width; dx++ ) |
||||
{ |
||||
for( int dy = 1; dy <= winSize.height; dy++ ) |
||||
{ |
||||
// haar_x2
|
||||
if ( (x+dx*2 <= winSize.width) && (y+dy <= winSize.height) ) |
||||
{ |
||||
features.push_back( Feature( offset, false, |
||||
x, y, dx*2, dy, -1, |
||||
x+dx, y, dx , dy, +2 ) ); |
||||
} |
||||
// haar_y2
|
||||
if ( (x+dx <= winSize.width) && (y+dy*2 <= winSize.height) ) |
||||
{ |
||||
features.push_back( Feature( offset, false, |
||||
x, y, dx, dy*2, -1, |
||||
x, y+dy, dx, dy, +2 ) ); |
||||
} |
||||
// haar_x3
|
||||
if ( (x+dx*3 <= winSize.width) && (y+dy <= winSize.height) ) |
||||
{ |
||||
features.push_back( Feature( offset, false, |
||||
x, y, dx*3, dy, -1, |
||||
x+dx, y, dx , dy, +2 ) ); |
||||
} |
||||
// haar_y3
|
||||
if ( (x+dx <= winSize.width) && (y+dy*3 <= winSize.height) ) |
||||
{ |
||||
features.push_back( Feature( offset, false, |
||||
x, y, dx, dy*3, -1, |
||||
x, y+dy, dx, dy, +2 ) ); |
||||
} |
||||
if( mode != CvHaarFeatureParams::BASIC ) |
||||
{ |
||||
// haar_x4
|
||||
if ( (x+dx*4 <= winSize.width) && (y+dy <= winSize.height) ) |
||||
{ |
||||
features.push_back( Feature( offset, false, |
||||
x, y, dx*4, dy, -1, |
||||
x+dx, y, dx*2, dy, +2 ) ); |
||||
} |
||||
// haar_y4
|
||||
if ( (x+dx <= winSize.width ) && (y+dy*4 <= winSize.height) ) |
||||
{ |
||||
features.push_back( Feature( offset, false, |
||||
x, y, dx, dy*4, -1, |
||||
x, y+dy, dx, dy*2, +2 ) ); |
||||
} |
||||
} |
||||
// x2_y2
|
||||
if ( (x+dx*2 <= winSize.width) && (y+dy*2 <= winSize.height) ) |
||||
{ |
||||
features.push_back( Feature( offset, false, |
||||
x, y, dx*2, dy*2, -1, |
||||
x, y, dx, dy, +2, |
||||
x+dx, y+dy, dx, dy, +2 ) ); |
||||
} |
||||
if (mode != CvHaarFeatureParams::BASIC) |
||||
{ |
||||
if ( (x+dx*3 <= winSize.width) && (y+dy*3 <= winSize.height) ) |
||||
{ |
||||
features.push_back( Feature( offset, false, |
||||
x , y , dx*3, dy*3, -1, |
||||
x+dx, y+dy, dx , dy , +9) ); |
||||
} |
||||
} |
||||
if (mode == CvHaarFeatureParams::ALL) |
||||
{ |
||||
// tilted haar_x2
|
||||
if ( (x+2*dx <= winSize.width) && (y+2*dx+dy <= winSize.height) && (x-dy>= 0) ) |
||||
{ |
||||
features.push_back( Feature( offset, true, |
||||
x, y, dx*2, dy, -1, |
||||
x, y, dx, dy, +2 ) ); |
||||
} |
||||
// tilted haar_y2
|
||||
if ( (x+dx <= winSize.width) && (y+dx+2*dy <= winSize.height) && (x-2*dy>= 0) ) |
||||
{ |
||||
features.push_back( Feature( offset, true, |
||||
x, y, dx, 2*dy, -1, |
||||
x, y, dx, dy, +2 ) ); |
||||
} |
||||
// tilted haar_x3
|
||||
if ( (x+3*dx <= winSize.width) && (y+3*dx+dy <= winSize.height) && (x-dy>= 0) ) |
||||
{ |
||||
features.push_back( Feature( offset, true, |
||||
x, y, dx*3, dy, -1, |
||||
x+dx, y+dx, dx, dy, +3 ) ); |
||||
} |
||||
// tilted haar_y3
|
||||
if ( (x+dx <= winSize.width) && (y+dx+3*dy <= winSize.height) && (x-3*dy>= 0) ) |
||||
{ |
||||
features.push_back( Feature( offset, true, |
||||
x, y, dx, 3*dy, -1, |
||||
x-dy, y+dy, dx, dy, +3 ) ); |
||||
} |
||||
// tilted haar_x4
|
||||
if ( (x+4*dx <= winSize.width) && (y+4*dx+dy <= winSize.height) && (x-dy>= 0) ) |
||||
{ |
||||
features.push_back( Feature( offset, true, |
||||
x, y, dx*4, dy, -1, |
||||
x+dx, y+dx, dx*2, dy, +2 ) ); |
||||
} |
||||
// tilted haar_y4
|
||||
if ( (x+dx <= winSize.width) && (y+dx+4*dy <= winSize.height) && (x-4*dy>= 0) ) |
||||
{ |
||||
features.push_back( Feature( offset, true, |
||||
x, y, dx, 4*dy, -1, |
||||
x-dy, y+dy, dx, 2*dy, +2 ) ); |
||||
} |
||||
} |
||||
} |
||||
} |
||||
} |
||||
} |
||||
numFeatures = (int)features.size(); |
||||
} |
||||
|
||||
CvHaarEvaluator::Feature::Feature() |
||||
{ |
||||
tilted = false; |
||||
rect[0].r = rect[1].r = rect[2].r = Rect(0,0,0,0); |
||||
rect[0].weight = rect[1].weight = rect[2].weight = 0; |
||||
} |
||||
|
||||
CvHaarEvaluator::Feature::Feature( int offset, bool _tilted, |
||||
int x0, int y0, int w0, int h0, float wt0, |
||||
int x1, int y1, int w1, int h1, float wt1, |
||||
int x2, int y2, int w2, int h2, float wt2 ) |
||||
{ |
||||
tilted = _tilted; |
||||
|
||||
rect[0].r.x = x0; |
||||
rect[0].r.y = y0; |
||||
rect[0].r.width = w0; |
||||
rect[0].r.height = h0; |
||||
rect[0].weight = wt0; |
||||
|
||||
rect[1].r.x = x1; |
||||
rect[1].r.y = y1; |
||||
rect[1].r.width = w1; |
||||
rect[1].r.height = h1; |
||||
rect[1].weight = wt1; |
||||
|
||||
rect[2].r.x = x2; |
||||
rect[2].r.y = y2; |
||||
rect[2].r.width = w2; |
||||
rect[2].r.height = h2; |
||||
rect[2].weight = wt2; |
||||
|
||||
if( !tilted ) |
||||
{ |
||||
for( int j = 0; j < CV_HAAR_FEATURE_MAX; j++ ) |
||||
{ |
||||
if( rect[j].weight == 0.0F ) |
||||
break; |
||||
CV_SUM_OFFSETS( fastRect[j].p0, fastRect[j].p1, fastRect[j].p2, fastRect[j].p3, rect[j].r, offset ) |
||||
} |
||||
} |
||||
else |
||||
{ |
||||
for( int j = 0; j < CV_HAAR_FEATURE_MAX; j++ ) |
||||
{ |
||||
if( rect[j].weight == 0.0F ) |
||||
break; |
||||
CV_TILTED_OFFSETS( fastRect[j].p0, fastRect[j].p1, fastRect[j].p2, fastRect[j].p3, rect[j].r, offset ) |
||||
} |
||||
} |
||||
} |
||||
|
||||
void CvHaarEvaluator::Feature::write( FileStorage &fs ) const |
||||
{ |
||||
fs << CC_RECTS << "["; |
||||
for( int ri = 0; ri < CV_HAAR_FEATURE_MAX && rect[ri].r.width != 0; ++ri ) |
||||
{ |
||||
fs << "[:" << rect[ri].r.x << rect[ri].r.y << |
||||
rect[ri].r.width << rect[ri].r.height << rect[ri].weight << "]"; |
||||
} |
||||
fs << "]" << CC_TILTED << tilted; |
||||
} |
@ -1,89 +0,0 @@ |
||||
#ifndef _OPENCV_HAARFEATURES_H_ |
||||
#define _OPENCV_HAARFEATURES_H_ |
||||
|
||||
#include "traincascade_features.h" |
||||
|
||||
#define CV_HAAR_FEATURE_MAX 3 |
||||
|
||||
#define HFP_NAME "haarFeatureParams" |
||||
class CvHaarFeatureParams : public CvFeatureParams |
||||
{ |
||||
public: |
||||
enum { BASIC = 0, CORE = 1, ALL = 2 }; |
||||
/* 0 - BASIC = Viola
|
||||
* 1 - CORE = All upright |
||||
* 2 - ALL = All features */ |
||||
|
||||
CvHaarFeatureParams(); |
||||
CvHaarFeatureParams( int _mode ); |
||||
|
||||
virtual void init( const CvFeatureParams& fp ); |
||||
virtual void write( cv::FileStorage &fs ) const; |
||||
virtual bool read( const cv::FileNode &node ); |
||||
|
||||
virtual void printDefaults() const; |
||||
virtual void printAttrs() const; |
||||
virtual bool scanAttr( const std::string prm, const std::string val); |
||||
|
||||
int mode; |
||||
}; |
||||
|
||||
class CvHaarEvaluator : public CvFeatureEvaluator |
||||
{ |
||||
public: |
||||
virtual void init(const CvFeatureParams *_featureParams, |
||||
int _maxSampleCount, cv::Size _winSize ); |
||||
virtual void setImage(const cv::Mat& img, uchar clsLabel, int idx); |
||||
virtual float operator()(int featureIdx, int sampleIdx) const; |
||||
virtual void writeFeatures( cv::FileStorage &fs, const cv::Mat& featureMap ) const; |
||||
void writeFeature( cv::FileStorage &fs, int fi ) const; // for old file fornat
|
||||
protected: |
||||
virtual void generateFeatures(); |
||||
|
||||
class Feature |
||||
{ |
||||
public: |
||||
Feature(); |
||||
Feature( int offset, bool _tilted, |
||||
int x0, int y0, int w0, int h0, float wt0, |
||||
int x1, int y1, int w1, int h1, float wt1, |
||||
int x2 = 0, int y2 = 0, int w2 = 0, int h2 = 0, float wt2 = 0.0F ); |
||||
float calc( const cv::Mat &sum, const cv::Mat &tilted, size_t y) const; |
||||
void write( cv::FileStorage &fs ) const; |
||||
|
||||
bool tilted; |
||||
struct |
||||
{ |
||||
cv::Rect r; |
||||
float weight; |
||||
} rect[CV_HAAR_FEATURE_MAX]; |
||||
|
||||
struct |
||||
{ |
||||
int p0, p1, p2, p3; |
||||
} fastRect[CV_HAAR_FEATURE_MAX]; |
||||
}; |
||||
|
||||
std::vector<Feature> features; |
||||
cv::Mat sum; /* sum images (each row represents image) */ |
||||
cv::Mat tilted; /* tilted sum images (each row represents image) */ |
||||
cv::Mat normfactor; /* normalization factor */ |
||||
}; |
||||
|
||||
inline float CvHaarEvaluator::operator()(int featureIdx, int sampleIdx) const |
||||
{ |
||||
float nf = normfactor.at<float>(0, sampleIdx); |
||||
return !nf ? 0.0f : (features[featureIdx].calc( sum, tilted, sampleIdx)/nf); |
||||
} |
||||
|
||||
inline float CvHaarEvaluator::Feature::calc( const cv::Mat &_sum, const cv::Mat &_tilted, size_t y) const |
||||
{ |
||||
const int* img = tilted ? _tilted.ptr<int>((int)y) : _sum.ptr<int>((int)y); |
||||
float ret = rect[0].weight * (img[fastRect[0].p0] - img[fastRect[0].p1] - img[fastRect[0].p2] + img[fastRect[0].p3] ) + |
||||
rect[1].weight * (img[fastRect[1].p0] - img[fastRect[1].p1] - img[fastRect[1].p2] + img[fastRect[1].p3] ); |
||||
if( rect[2].weight != 0.0f ) |
||||
ret += rect[2].weight * (img[fastRect[2].p0] - img[fastRect[2].p1] - img[fastRect[2].p2] + img[fastRect[2].p3] ); |
||||
return ret; |
||||
} |
||||
|
||||
#endif |
@ -1,186 +0,0 @@ |
||||
#include "opencv2/core.hpp" |
||||
#include "opencv2/core/core_c.h" |
||||
#include "opencv2/imgproc.hpp" |
||||
#include "opencv2/imgcodecs.hpp" |
||||
|
||||
#include "imagestorage.h" |
||||
#include <stdio.h> |
||||
#include <iostream> |
||||
#include <fstream> |
||||
|
||||
using namespace std; |
||||
using namespace cv; |
||||
|
||||
bool CvCascadeImageReader::create( const string _posFilename, const string _negFilename, Size _winSize ) |
||||
{ |
||||
return posReader.create(_posFilename) && negReader.create(_negFilename, _winSize); |
||||
} |
||||
|
||||
CvCascadeImageReader::NegReader::NegReader() |
||||
{ |
||||
src.create( 0, 0 , CV_8UC1 ); |
||||
img.create( 0, 0, CV_8UC1 ); |
||||
point = offset = Point( 0, 0 ); |
||||
scale = 1.0F; |
||||
scaleFactor = 1.4142135623730950488016887242097F; |
||||
stepFactor = 0.5F; |
||||
} |
||||
|
||||
bool CvCascadeImageReader::NegReader::create( const string _filename, Size _winSize ) |
||||
{ |
||||
string str; |
||||
std::ifstream file(_filename.c_str()); |
||||
if ( !file.is_open() ) |
||||
return false; |
||||
|
||||
while( !file.eof() ) |
||||
{ |
||||
std::getline(file, str); |
||||
str.erase(str.find_last_not_of(" \n\r\t")+1); |
||||
if (str.empty()) break; |
||||
if (str.at(0) == '#' ) continue; /* comment */ |
||||
imgFilenames.push_back(str); |
||||
} |
||||
file.close(); |
||||
|
||||
winSize = _winSize; |
||||
last = round = 0; |
||||
return true; |
||||
} |
||||
|
||||
bool CvCascadeImageReader::NegReader::nextImg() |
||||
{ |
||||
Point _offset = Point(0,0); |
||||
size_t count = imgFilenames.size(); |
||||
for( size_t i = 0; i < count; i++ ) |
||||
{ |
||||
src = imread( imgFilenames[last++], IMREAD_GRAYSCALE ); |
||||
if( src.empty() ){ |
||||
last %= count; |
||||
continue; |
||||
} |
||||
round += last / count; |
||||
round = round % (winSize.width * winSize.height); |
||||
last %= count; |
||||
|
||||
_offset.x = std::min( (int)round % winSize.width, src.cols - winSize.width ); |
||||
_offset.y = std::min( (int)round / winSize.width, src.rows - winSize.height ); |
||||
if( !src.empty() && src.type() == CV_8UC1 |
||||
&& _offset.x >= 0 && _offset.y >= 0 ) |
||||
break; |
||||
} |
||||
|
||||
if( src.empty() ) |
||||
return false; // no appropriate image
|
||||
point = offset = _offset; |
||||
scale = max( ((float)winSize.width + point.x) / ((float)src.cols), |
||||
((float)winSize.height + point.y) / ((float)src.rows) ); |
||||
|
||||
Size sz( (int)(scale*src.cols + 0.5F), (int)(scale*src.rows + 0.5F) ); |
||||
resize( src, img, sz, 0, 0, INTER_LINEAR_EXACT ); |
||||
return true; |
||||
} |
||||
|
||||
bool CvCascadeImageReader::NegReader::get( Mat& _img ) |
||||
{ |
||||
CV_Assert( !_img.empty() ); |
||||
CV_Assert( _img.type() == CV_8UC1 ); |
||||
CV_Assert( _img.cols == winSize.width ); |
||||
CV_Assert( _img.rows == winSize.height ); |
||||
|
||||
if( img.empty() ) |
||||
if ( !nextImg() ) |
||||
return false; |
||||
|
||||
Mat mat( winSize.height, winSize.width, CV_8UC1, |
||||
(void*)(img.ptr(point.y) + point.x * img.elemSize()), img.step ); |
||||
mat.copyTo(_img); |
||||
|
||||
if( (int)( point.x + (1.0F + stepFactor ) * winSize.width ) < img.cols ) |
||||
point.x += (int)(stepFactor * winSize.width); |
||||
else |
||||
{ |
||||
point.x = offset.x; |
||||
if( (int)( point.y + (1.0F + stepFactor ) * winSize.height ) < img.rows ) |
||||
point.y += (int)(stepFactor * winSize.height); |
||||
else |
||||
{ |
||||
point.y = offset.y; |
||||
scale *= scaleFactor; |
||||
if( scale <= 1.0F ) |
||||
resize( src, img, Size( (int)(scale*src.cols), (int)(scale*src.rows) ), 0, 0, INTER_LINEAR_EXACT ); |
||||
else |
||||
{ |
||||
if ( !nextImg() ) |
||||
return false; |
||||
} |
||||
} |
||||
} |
||||
return true; |
||||
} |
||||
|
||||
CvCascadeImageReader::PosReader::PosReader() |
||||
{ |
||||
file = 0; |
||||
vec = 0; |
||||
} |
||||
|
||||
bool CvCascadeImageReader::PosReader::create( const string _filename ) |
||||
{ |
||||
if ( file ) |
||||
fclose( file ); |
||||
file = fopen( _filename.c_str(), "rb" ); |
||||
|
||||
if( !file ) |
||||
return false; |
||||
short tmp = 0; |
||||
if( fread( &count, sizeof( count ), 1, file ) != 1 || |
||||
fread( &vecSize, sizeof( vecSize ), 1, file ) != 1 || |
||||
fread( &tmp, sizeof( tmp ), 1, file ) != 1 || |
||||
fread( &tmp, sizeof( tmp ), 1, file ) != 1 ) |
||||
CV_Error_( CV_StsParseError, ("wrong file format for %s\n", _filename.c_str()) ); |
||||
base = sizeof( count ) + sizeof( vecSize ) + 2*sizeof( tmp ); |
||||
if( feof( file ) ) |
||||
return false; |
||||
last = 0; |
||||
vec = (short*) cvAlloc( sizeof( *vec ) * vecSize ); |
||||
CV_Assert( vec ); |
||||
return true; |
||||
} |
||||
|
||||
bool CvCascadeImageReader::PosReader::get( Mat &_img ) |
||||
{ |
||||
CV_Assert( _img.rows * _img.cols == vecSize ); |
||||
uchar tmp = 0; |
||||
size_t elements_read = fread( &tmp, sizeof( tmp ), 1, file ); |
||||
if( elements_read != 1 ) |
||||
CV_Error( CV_StsBadArg, "Can not get new positive sample. The most possible reason is " |
||||
"insufficient count of samples in given vec-file.\n"); |
||||
elements_read = fread( vec, sizeof( vec[0] ), vecSize, file ); |
||||
if( elements_read != (size_t)(vecSize) ) |
||||
CV_Error( CV_StsBadArg, "Can not get new positive sample. Seems that vec-file has incorrect structure.\n"); |
||||
|
||||
if( feof( file ) || last++ >= count ) |
||||
CV_Error( CV_StsBadArg, "Can not get new positive sample. vec-file is over.\n"); |
||||
|
||||
for( int r = 0; r < _img.rows; r++ ) |
||||
{ |
||||
for( int c = 0; c < _img.cols; c++ ) |
||||
_img.ptr(r)[c] = (uchar)vec[r * _img.cols + c]; |
||||
} |
||||
return true; |
||||
} |
||||
|
||||
void CvCascadeImageReader::PosReader::restart() |
||||
{ |
||||
CV_Assert( file ); |
||||
last = 0; |
||||
fseek( file, base, SEEK_SET ); |
||||
} |
||||
|
||||
CvCascadeImageReader::PosReader::~PosReader() |
||||
{ |
||||
if (file) |
||||
fclose( file ); |
||||
cvFree( &vec ); |
||||
} |
@ -1,50 +0,0 @@ |
||||
#ifndef _OPENCV_IMAGESTORAGE_H_ |
||||
#define _OPENCV_IMAGESTORAGE_H_ |
||||
|
||||
|
||||
class CvCascadeImageReader |
||||
{ |
||||
public: |
||||
bool create( const std::string _posFilename, const std::string _negFilename, cv::Size _winSize ); |
||||
void restart() { posReader.restart(); } |
||||
bool getNeg(cv::Mat &_img) { return negReader.get( _img ); } |
||||
bool getPos(cv::Mat &_img) { return posReader.get( _img ); } |
||||
|
||||
private: |
||||
class PosReader |
||||
{ |
||||
public: |
||||
PosReader(); |
||||
virtual ~PosReader(); |
||||
bool create( const std::string _filename ); |
||||
bool get( cv::Mat &_img ); |
||||
void restart(); |
||||
|
||||
short* vec; |
||||
FILE* file; |
||||
int count; |
||||
int vecSize; |
||||
int last; |
||||
int base; |
||||
} posReader; |
||||
|
||||
class NegReader |
||||
{ |
||||
public: |
||||
NegReader(); |
||||
bool create( const std::string _filename, cv::Size _winSize ); |
||||
bool get( cv::Mat& _img ); |
||||
bool nextImg(); |
||||
|
||||
cv::Mat src, img; |
||||
std::vector<std::string> imgFilenames; |
||||
cv::Point offset, point; |
||||
float scale; |
||||
float scaleFactor; |
||||
float stepFactor; |
||||
size_t last, round; |
||||
cv::Size winSize; |
||||
} negReader; |
||||
}; |
||||
|
||||
#endif |
@ -1,67 +0,0 @@ |
||||
#include "opencv2/core.hpp" |
||||
#include "opencv2/imgproc.hpp" |
||||
|
||||
#include "lbpfeatures.h" |
||||
#include "cascadeclassifier.h" |
||||
|
||||
using namespace cv; |
||||
|
||||
CvLBPFeatureParams::CvLBPFeatureParams() |
||||
{ |
||||
maxCatCount = 256; |
||||
name = LBPF_NAME; |
||||
} |
||||
|
||||
void CvLBPEvaluator::init(const CvFeatureParams *_featureParams, int _maxSampleCount, Size _winSize) |
||||
{ |
||||
CV_Assert( _maxSampleCount > 0); |
||||
sum.create((int)_maxSampleCount, (_winSize.width + 1) * (_winSize.height + 1), CV_32SC1); |
||||
CvFeatureEvaluator::init( _featureParams, _maxSampleCount, _winSize ); |
||||
} |
||||
|
||||
void CvLBPEvaluator::setImage(const Mat &img, uchar clsLabel, int idx) |
||||
{ |
||||
CV_DbgAssert( !sum.empty() ); |
||||
CvFeatureEvaluator::setImage( img, clsLabel, idx ); |
||||
Mat innSum(winSize.height + 1, winSize.width + 1, sum.type(), sum.ptr<int>((int)idx)); |
||||
integral( img, innSum ); |
||||
} |
||||
|
||||
void CvLBPEvaluator::writeFeatures( FileStorage &fs, const Mat& featureMap ) const |
||||
{ |
||||
_writeFeatures( features, fs, featureMap ); |
||||
} |
||||
|
||||
void CvLBPEvaluator::generateFeatures() |
||||
{ |
||||
int offset = winSize.width + 1; |
||||
for( int x = 0; x < winSize.width; x++ ) |
||||
for( int y = 0; y < winSize.height; y++ ) |
||||
for( int w = 1; w <= winSize.width / 3; w++ ) |
||||
for( int h = 1; h <= winSize.height / 3; h++ ) |
||||
if ( (x+3*w <= winSize.width) && (y+3*h <= winSize.height) ) |
||||
features.push_back( Feature(offset, x, y, w, h ) ); |
||||
numFeatures = (int)features.size(); |
||||
} |
||||
|
||||
CvLBPEvaluator::Feature::Feature() |
||||
{ |
||||
rect = cvRect(0, 0, 0, 0); |
||||
} |
||||
|
||||
CvLBPEvaluator::Feature::Feature( int offset, int x, int y, int _blockWidth, int _blockHeight ) |
||||
{ |
||||
Rect tr = rect = cvRect(x, y, _blockWidth, _blockHeight); |
||||
CV_SUM_OFFSETS( p[0], p[1], p[4], p[5], tr, offset ) |
||||
tr.x += 2*rect.width; |
||||
CV_SUM_OFFSETS( p[2], p[3], p[6], p[7], tr, offset ) |
||||
tr.y +=2*rect.height; |
||||
CV_SUM_OFFSETS( p[10], p[11], p[14], p[15], tr, offset ) |
||||
tr.x -= 2*rect.width; |
||||
CV_SUM_OFFSETS( p[8], p[9], p[12], p[13], tr, offset ) |
||||
} |
||||
|
||||
void CvLBPEvaluator::Feature::write(FileStorage &fs) const |
||||
{ |
||||
fs << CC_RECT << "[:" << rect.x << rect.y << rect.width << rect.height << "]"; |
||||
} |
@ -1,57 +0,0 @@ |
||||
#ifndef _OPENCV_LBPFEATURES_H_ |
||||
#define _OPENCV_LBPFEATURES_H_ |
||||
|
||||
#include "traincascade_features.h" |
||||
|
||||
#define LBPF_NAME "lbpFeatureParams" |
||||
struct CvLBPFeatureParams : CvFeatureParams |
||||
{ |
||||
CvLBPFeatureParams(); |
||||
|
||||
}; |
||||
|
||||
class CvLBPEvaluator : public CvFeatureEvaluator |
||||
{ |
||||
public: |
||||
virtual ~CvLBPEvaluator() {} |
||||
virtual void init(const CvFeatureParams *_featureParams, |
||||
int _maxSampleCount, cv::Size _winSize ); |
||||
virtual void setImage(const cv::Mat& img, uchar clsLabel, int idx); |
||||
virtual float operator()(int featureIdx, int sampleIdx) const |
||||
{ return (float)features[featureIdx].calc( sum, sampleIdx); } |
||||
virtual void writeFeatures( cv::FileStorage &fs, const cv::Mat& featureMap ) const; |
||||
protected: |
||||
virtual void generateFeatures(); |
||||
|
||||
class Feature |
||||
{ |
||||
public: |
||||
Feature(); |
||||
Feature( int offset, int x, int y, int _block_w, int _block_h ); |
||||
uchar calc( const cv::Mat& _sum, size_t y ) const; |
||||
void write( cv::FileStorage &fs ) const; |
||||
|
||||
cv::Rect rect; |
||||
int p[16]; |
||||
}; |
||||
std::vector<Feature> features; |
||||
|
||||
cv::Mat sum; |
||||
}; |
||||
|
||||
inline uchar CvLBPEvaluator::Feature::calc(const cv::Mat &_sum, size_t y) const |
||||
{ |
||||
const int* psum = _sum.ptr<int>((int)y); |
||||
int cval = psum[p[5]] - psum[p[6]] - psum[p[9]] + psum[p[10]]; |
||||
|
||||
return (uchar)((psum[p[0]] - psum[p[1]] - psum[p[4]] + psum[p[5]] >= cval ? 128 : 0) | // 0
|
||||
(psum[p[1]] - psum[p[2]] - psum[p[5]] + psum[p[6]] >= cval ? 64 : 0) | // 1
|
||||
(psum[p[2]] - psum[p[3]] - psum[p[6]] + psum[p[7]] >= cval ? 32 : 0) | // 2
|
||||
(psum[p[6]] - psum[p[7]] - psum[p[10]] + psum[p[11]] >= cval ? 16 : 0) | // 5
|
||||
(psum[p[10]] - psum[p[11]] - psum[p[14]] + psum[p[15]] >= cval ? 8 : 0) | // 8
|
||||
(psum[p[9]] - psum[p[10]] - psum[p[13]] + psum[p[14]] >= cval ? 4 : 0) | // 7
|
||||
(psum[p[8]] - psum[p[9]] - psum[p[12]] + psum[p[13]] >= cval ? 2 : 0) | // 6
|
||||
(psum[p[4]] - psum[p[5]] - psum[p[8]] + psum[p[9]] >= cval ? 1 : 0)); // 3
|
||||
} |
||||
|
||||
#endif |
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,792 +0,0 @@ |
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "old_ml_precomp.hpp" |
||||
#include <ctype.h> |
||||
|
||||
#define MISS_VAL FLT_MAX |
||||
#define CV_VAR_MISS 0 |
||||
|
||||
CvTrainTestSplit::CvTrainTestSplit() |
||||
{ |
||||
train_sample_part_mode = CV_COUNT; |
||||
train_sample_part.count = -1; |
||||
mix = false; |
||||
} |
||||
|
||||
CvTrainTestSplit::CvTrainTestSplit( int _train_sample_count, bool _mix ) |
||||
{ |
||||
train_sample_part_mode = CV_COUNT; |
||||
train_sample_part.count = _train_sample_count; |
||||
mix = _mix; |
||||
} |
||||
|
||||
CvTrainTestSplit::CvTrainTestSplit( float _train_sample_portion, bool _mix ) |
||||
{ |
||||
train_sample_part_mode = CV_PORTION; |
||||
train_sample_part.portion = _train_sample_portion; |
||||
mix = _mix; |
||||
} |
||||
|
||||
////////////////
|
||||
|
||||
CvMLData::CvMLData() |
||||
{ |
||||
values = missing = var_types = var_idx_mask = response_out = var_idx_out = var_types_out = 0; |
||||
train_sample_idx = test_sample_idx = 0; |
||||
header_lines_number = 0; |
||||
sample_idx = 0; |
||||
response_idx = -1; |
||||
|
||||
train_sample_count = -1; |
||||
|
||||
delimiter = ','; |
||||
miss_ch = '?'; |
||||
//flt_separator = '.';
|
||||
|
||||
rng = &cv::theRNG(); |
||||
} |
||||
|
||||
CvMLData::~CvMLData() |
||||
{ |
||||
clear(); |
||||
} |
||||
|
||||
void CvMLData::free_train_test_idx() |
||||
{ |
||||
cvReleaseMat( &train_sample_idx ); |
||||
cvReleaseMat( &test_sample_idx ); |
||||
sample_idx = 0; |
||||
} |
||||
|
||||
void CvMLData::clear() |
||||
{ |
||||
class_map.clear(); |
||||
|
||||
cvReleaseMat( &values ); |
||||
cvReleaseMat( &missing ); |
||||
cvReleaseMat( &var_types ); |
||||
cvReleaseMat( &var_idx_mask ); |
||||
|
||||
cvReleaseMat( &response_out ); |
||||
cvReleaseMat( &var_idx_out ); |
||||
cvReleaseMat( &var_types_out ); |
||||
|
||||
free_train_test_idx(); |
||||
|
||||
total_class_count = 0; |
||||
|
||||
response_idx = -1; |
||||
|
||||
train_sample_count = -1; |
||||
} |
||||
|
||||
|
||||
void CvMLData::set_header_lines_number( int idx ) |
||||
{ |
||||
header_lines_number = std::max(0, idx); |
||||
} |
||||
|
||||
int CvMLData::get_header_lines_number() const |
||||
{ |
||||
return header_lines_number; |
||||
} |
||||
|
||||
static char *fgets_chomp(char *str, int n, FILE *stream) |
||||
{ |
||||
char *head = fgets(str, n, stream); |
||||
if( head ) |
||||
{ |
||||
for(char *tail = head + strlen(head) - 1; tail >= head; --tail) |
||||
{ |
||||
if( *tail != '\r' && *tail != '\n' ) |
||||
break; |
||||
*tail = '\0'; |
||||
} |
||||
} |
||||
return head; |
||||
} |
||||
|
||||
|
||||
int CvMLData::read_csv(const char* filename) |
||||
{ |
||||
const int M = 1000000; |
||||
const char str_delimiter[3] = { ' ', delimiter, '\0' }; |
||||
FILE* file = 0; |
||||
CvMemStorage* storage; |
||||
CvSeq* seq; |
||||
char *ptr; |
||||
float* el_ptr; |
||||
CvSeqReader reader; |
||||
int cols_count = 0; |
||||
uchar *var_types_ptr = 0; |
||||
|
||||
clear(); |
||||
|
||||
file = fopen( filename, "rt" ); |
||||
|
||||
if( !file ) |
||||
return -1; |
||||
|
||||
std::vector<char> _buf(M); |
||||
char* buf = &_buf[0]; |
||||
|
||||
// skip header lines
|
||||
for( int i = 0; i < header_lines_number; i++ ) |
||||
{ |
||||
if( fgets( buf, M, file ) == 0 ) |
||||
{ |
||||
fclose(file); |
||||
return -1; |
||||
} |
||||
} |
||||
|
||||
// read the first data line and determine the number of variables
|
||||
if( !fgets_chomp( buf, M, file )) |
||||
{ |
||||
fclose(file); |
||||
return -1; |
||||
} |
||||
|
||||
ptr = buf; |
||||
while( *ptr == ' ' ) |
||||
ptr++; |
||||
for( ; *ptr != '\0'; ) |
||||
{ |
||||
if(*ptr == delimiter || *ptr == ' ') |
||||
{ |
||||
cols_count++; |
||||
ptr++; |
||||
while( *ptr == ' ' ) ptr++; |
||||
} |
||||
else |
||||
ptr++; |
||||
} |
||||
|
||||
cols_count++; |
||||
|
||||
if ( cols_count == 0) |
||||
{ |
||||
fclose(file); |
||||
return -1; |
||||
} |
||||
|
||||
// create temporary memory storage to store the whole database
|
||||
el_ptr = new float[cols_count]; |
||||
storage = cvCreateMemStorage(); |
||||
seq = cvCreateSeq( 0, sizeof(*seq), cols_count*sizeof(float), storage ); |
||||
|
||||
var_types = cvCreateMat( 1, cols_count, CV_8U ); |
||||
cvZero( var_types ); |
||||
var_types_ptr = var_types->data.ptr; |
||||
|
||||
for(;;) |
||||
{ |
||||
char *token = NULL; |
||||
int type; |
||||
token = strtok(buf, str_delimiter); |
||||
if (!token) |
||||
break; |
||||
for (int i = 0; i < cols_count-1; i++) |
||||
{ |
||||
str_to_flt_elem( token, el_ptr[i], type); |
||||
var_types_ptr[i] |= type; |
||||
token = strtok(NULL, str_delimiter); |
||||
if (!token) |
||||
{ |
||||
fclose(file); |
||||
delete [] el_ptr; |
||||
return -1; |
||||
} |
||||
} |
||||
str_to_flt_elem( token, el_ptr[cols_count-1], type); |
||||
var_types_ptr[cols_count-1] |= type; |
||||
cvSeqPush( seq, el_ptr ); |
||||
if( !fgets_chomp( buf, M, file ) ) |
||||
break; |
||||
} |
||||
fclose(file); |
||||
|
||||
values = cvCreateMat( seq->total, cols_count, CV_32FC1 ); |
||||
missing = cvCreateMat( seq->total, cols_count, CV_8U ); |
||||
var_idx_mask = cvCreateMat( 1, values->cols, CV_8UC1 ); |
||||
cvSet( var_idx_mask, cvRealScalar(1) ); |
||||
train_sample_count = seq->total; |
||||
|
||||
cvStartReadSeq( seq, &reader ); |
||||
for(int i = 0; i < seq->total; i++ ) |
||||
{ |
||||
const float* sdata = (float*)reader.ptr; |
||||
float* ddata = values->data.fl + cols_count*i; |
||||
uchar* dm = missing->data.ptr + cols_count*i; |
||||
|
||||
for( int j = 0; j < cols_count; j++ ) |
||||
{ |
||||
ddata[j] = sdata[j]; |
||||
dm[j] = ( fabs( MISS_VAL - sdata[j] ) <= FLT_EPSILON ); |
||||
} |
||||
CV_NEXT_SEQ_ELEM( seq->elem_size, reader ); |
||||
} |
||||
|
||||
if ( cvNorm( missing, 0, CV_L1 ) <= FLT_EPSILON ) |
||||
cvReleaseMat( &missing ); |
||||
|
||||
cvReleaseMemStorage( &storage ); |
||||
delete []el_ptr; |
||||
return 0; |
||||
} |
||||
|
||||
const CvMat* CvMLData::get_values() const |
||||
{ |
||||
return values; |
||||
} |
||||
|
||||
const CvMat* CvMLData::get_missing() const |
||||
{ |
||||
CV_FUNCNAME( "CvMLData::get_missing" ); |
||||
__BEGIN__; |
||||
|
||||
if ( !values ) |
||||
CV_ERROR( CV_StsInternal, "data is empty" ); |
||||
|
||||
__END__; |
||||
|
||||
return missing; |
||||
} |
||||
|
||||
const std::map<cv::String, int>& CvMLData::get_class_labels_map() const |
||||
{ |
||||
return class_map; |
||||
} |
||||
|
||||
void CvMLData::str_to_flt_elem( const char* token, float& flt_elem, int& type) |
||||
{ |
||||
|
||||
char* stopstring = NULL; |
||||
flt_elem = (float)strtod( token, &stopstring ); |
||||
assert( stopstring ); |
||||
type = CV_VAR_ORDERED; |
||||
if ( *stopstring == miss_ch && strlen(stopstring) == 1 ) // missed value
|
||||
{ |
||||
flt_elem = MISS_VAL; |
||||
type = CV_VAR_MISS; |
||||
} |
||||
else |
||||
{ |
||||
if ( (*stopstring != 0) && (*stopstring != '\n') && (strcmp(stopstring, "\r\n") != 0) ) // class label
|
||||
{ |
||||
int idx = class_map[token]; |
||||
if ( idx == 0) |
||||
{ |
||||
total_class_count++; |
||||
idx = total_class_count; |
||||
class_map[token] = idx; |
||||
} |
||||
flt_elem = (float)idx; |
||||
type = CV_VAR_CATEGORICAL; |
||||
} |
||||
} |
||||
} |
||||
|
||||
void CvMLData::set_delimiter(char ch) |
||||
{ |
||||
CV_FUNCNAME( "CvMLData::set_delimited" ); |
||||
__BEGIN__; |
||||
|
||||
if (ch == miss_ch /*|| ch == flt_separator*/) |
||||
CV_ERROR(CV_StsBadArg, "delimited, miss_character and flt_separator must be different"); |
||||
|
||||
delimiter = ch; |
||||
|
||||
__END__; |
||||
} |
||||
|
||||
char CvMLData::get_delimiter() const |
||||
{ |
||||
return delimiter; |
||||
} |
||||
|
||||
void CvMLData::set_miss_ch(char ch) |
||||
{ |
||||
CV_FUNCNAME( "CvMLData::set_miss_ch" ); |
||||
__BEGIN__; |
||||
|
||||
if (ch == delimiter/* || ch == flt_separator*/) |
||||
CV_ERROR(CV_StsBadArg, "delimited, miss_character and flt_separator must be different"); |
||||
|
||||
miss_ch = ch; |
||||
|
||||
__END__; |
||||
} |
||||
|
||||
char CvMLData::get_miss_ch() const |
||||
{ |
||||
return miss_ch; |
||||
} |
||||
|
||||
void CvMLData::set_response_idx( int idx ) |
||||
{ |
||||
CV_FUNCNAME( "CvMLData::set_response_idx" ); |
||||
__BEGIN__; |
||||
|
||||
if ( !values ) |
||||
CV_ERROR( CV_StsInternal, "data is empty" ); |
||||
|
||||
if ( idx >= values->cols) |
||||
CV_ERROR( CV_StsBadArg, "idx value is not correct" ); |
||||
|
||||
if ( response_idx >= 0 ) |
||||
chahge_var_idx( response_idx, true ); |
||||
if ( idx >= 0 ) |
||||
chahge_var_idx( idx, false ); |
||||
response_idx = idx; |
||||
|
||||
__END__; |
||||
} |
||||
|
||||
int CvMLData::get_response_idx() const |
||||
{ |
||||
CV_FUNCNAME( "CvMLData::get_response_idx" ); |
||||
__BEGIN__; |
||||
|
||||
if ( !values ) |
||||
CV_ERROR( CV_StsInternal, "data is empty" ); |
||||
__END__; |
||||
return response_idx; |
||||
} |
||||
|
||||
void CvMLData::change_var_type( int var_idx, int type ) |
||||
{ |
||||
CV_FUNCNAME( "CvMLData::change_var_type" ); |
||||
__BEGIN__; |
||||
|
||||
int var_count = 0; |
||||
|
||||
if ( !values ) |
||||
CV_ERROR( CV_StsInternal, "data is empty" ); |
||||
|
||||
var_count = values->cols; |
||||
|
||||
if ( var_idx < 0 || var_idx >= var_count) |
||||
CV_ERROR( CV_StsBadArg, "var_idx is not correct" ); |
||||
|
||||
if ( type != CV_VAR_ORDERED && type != CV_VAR_CATEGORICAL) |
||||
CV_ERROR( CV_StsBadArg, "type is not correct" ); |
||||
|
||||
assert( var_types ); |
||||
if ( var_types->data.ptr[var_idx] == CV_VAR_CATEGORICAL && type == CV_VAR_ORDERED) |
||||
CV_ERROR( CV_StsBadArg, "it`s impossible to assign CV_VAR_ORDERED type to categorical variable" ); |
||||
var_types->data.ptr[var_idx] = (uchar)type; |
||||
|
||||
__END__; |
||||
|
||||
return; |
||||
} |
||||
|
||||
void CvMLData::set_var_types( const char* str ) |
||||
{ |
||||
CV_FUNCNAME( "CvMLData::set_var_types" ); |
||||
__BEGIN__; |
||||
|
||||
const char* ord = 0, *cat = 0; |
||||
int var_count = 0, set_var_type_count = 0; |
||||
if ( !values ) |
||||
CV_ERROR( CV_StsInternal, "data is empty" ); |
||||
|
||||
var_count = values->cols; |
||||
|
||||
assert( var_types ); |
||||
|
||||
ord = strstr( str, "ord" ); |
||||
cat = strstr( str, "cat" ); |
||||
if ( !ord && !cat ) |
||||
CV_ERROR( CV_StsBadArg, "types string is not correct" ); |
||||
|
||||
if ( !ord && strlen(cat) == 3 ) // str == "cat"
|
||||
{ |
||||
cvSet( var_types, cvScalarAll(CV_VAR_CATEGORICAL) ); |
||||
return; |
||||
} |
||||
|
||||
if ( !cat && strlen(ord) == 3 ) // str == "ord"
|
||||
{ |
||||
cvSet( var_types, cvScalarAll(CV_VAR_ORDERED) ); |
||||
return; |
||||
} |
||||
|
||||
if ( ord ) // parse ord str
|
||||
{ |
||||
char* stopstring = NULL; |
||||
if ( ord[3] != '[') |
||||
CV_ERROR( CV_StsBadArg, "types string is not correct" ); |
||||
|
||||
ord += 4; // pass "ord["
|
||||
do |
||||
{ |
||||
int b1 = (int)strtod( ord, &stopstring ); |
||||
if ( *stopstring == 0 || (*stopstring != ',' && *stopstring != ']' && *stopstring != '-') ) |
||||
CV_ERROR( CV_StsBadArg, "types string is not correct" ); |
||||
ord = stopstring + 1; |
||||
if ( (stopstring[0] == ',') || (stopstring[0] == ']')) |
||||
{ |
||||
if ( var_types->data.ptr[b1] == CV_VAR_CATEGORICAL) |
||||
CV_ERROR( CV_StsBadArg, "it`s impossible to assign CV_VAR_ORDERED type to categorical variable" ); |
||||
var_types->data.ptr[b1] = CV_VAR_ORDERED; |
||||
set_var_type_count++; |
||||
} |
||||
else |
||||
{ |
||||
if ( stopstring[0] == '-') |
||||
{ |
||||
int b2 = (int)strtod( ord, &stopstring); |
||||
if ( (*stopstring == 0) || (*stopstring != ',' && *stopstring != ']') ) |
||||
CV_ERROR( CV_StsBadArg, "types string is not correct" ); |
||||
ord = stopstring + 1; |
||||
for (int i = b1; i <= b2; i++) |
||||
{ |
||||
if ( var_types->data.ptr[i] == CV_VAR_CATEGORICAL) |
||||
CV_ERROR( CV_StsBadArg, "it`s impossible to assign CV_VAR_ORDERED type to categorical variable" ); |
||||
var_types->data.ptr[i] = CV_VAR_ORDERED; |
||||
} |
||||
set_var_type_count += b2 - b1 + 1; |
||||
} |
||||
else |
||||
CV_ERROR( CV_StsBadArg, "types string is not correct" ); |
||||
|
||||
} |
||||
} |
||||
while (*stopstring != ']'); |
||||
|
||||
if ( stopstring[1] != '\0' && stopstring[1] != ',') |
||||
CV_ERROR( CV_StsBadArg, "types string is not correct" ); |
||||
} |
||||
|
||||
if ( cat ) // parse cat str
|
||||
{ |
||||
char* stopstring = NULL; |
||||
if ( cat[3] != '[') |
||||
CV_ERROR( CV_StsBadArg, "types string is not correct" ); |
||||
|
||||
cat += 4; // pass "cat["
|
||||
do |
||||
{ |
||||
int b1 = (int)strtod( cat, &stopstring ); |
||||
if ( *stopstring == 0 || (*stopstring != ',' && *stopstring != ']' && *stopstring != '-') ) |
||||
CV_ERROR( CV_StsBadArg, "types string is not correct" ); |
||||
cat = stopstring + 1; |
||||
if ( (stopstring[0] == ',') || (stopstring[0] == ']')) |
||||
{ |
||||
var_types->data.ptr[b1] = CV_VAR_CATEGORICAL; |
||||
set_var_type_count++; |
||||
} |
||||
else |
||||
{ |
||||
if ( stopstring[0] == '-') |
||||
{ |
||||
int b2 = (int)strtod( cat, &stopstring); |
||||
if ( (*stopstring == 0) || (*stopstring != ',' && *stopstring != ']') ) |
||||
CV_ERROR( CV_StsBadArg, "types string is not correct" ); |
||||
cat = stopstring + 1; |
||||
for (int i = b1; i <= b2; i++) |
||||
var_types->data.ptr[i] = CV_VAR_CATEGORICAL; |
||||
set_var_type_count += b2 - b1 + 1; |
||||
} |
||||
else |
||||
CV_ERROR( CV_StsBadArg, "types string is not correct" ); |
||||
|
||||
} |
||||
} |
||||
while (*stopstring != ']'); |
||||
|
||||
if ( stopstring[1] != '\0' && stopstring[1] != ',') |
||||
CV_ERROR( CV_StsBadArg, "types string is not correct" ); |
||||
} |
||||
|
||||
if (set_var_type_count != var_count) |
||||
CV_ERROR( CV_StsBadArg, "types string is not correct" ); |
||||
|
||||
__END__; |
||||
} |
||||
|
||||
const CvMat* CvMLData::get_var_types() |
||||
{ |
||||
CV_FUNCNAME( "CvMLData::get_var_types" ); |
||||
__BEGIN__; |
||||
|
||||
uchar *var_types_out_ptr = 0; |
||||
int avcount, vt_size; |
||||
if ( !values ) |
||||
CV_ERROR( CV_StsInternal, "data is empty" ); |
||||
|
||||
assert( var_idx_mask ); |
||||
|
||||
avcount = cvFloor( cvNorm( var_idx_mask, 0, CV_L1 ) ); |
||||
vt_size = avcount + (response_idx >= 0); |
||||
|
||||
if ( avcount == values->cols || (avcount == values->cols-1 && response_idx == values->cols-1) ) |
||||
return var_types; |
||||
|
||||
if ( !var_types_out || ( var_types_out && var_types_out->cols != vt_size ) ) |
||||
{ |
||||
cvReleaseMat( &var_types_out ); |
||||
var_types_out = cvCreateMat( 1, vt_size, CV_8UC1 ); |
||||
} |
||||
|
||||
var_types_out_ptr = var_types_out->data.ptr; |
||||
for( int i = 0; i < var_types->cols; i++) |
||||
{ |
||||
if (i == response_idx || !var_idx_mask->data.ptr[i]) continue; |
||||
*var_types_out_ptr = var_types->data.ptr[i]; |
||||
var_types_out_ptr++; |
||||
} |
||||
if ( response_idx >= 0 ) |
||||
*var_types_out_ptr = var_types->data.ptr[response_idx]; |
||||
|
||||
__END__; |
||||
|
||||
return var_types_out; |
||||
} |
||||
|
||||
int CvMLData::get_var_type( int var_idx ) const |
||||
{ |
||||
return var_types->data.ptr[var_idx]; |
||||
} |
||||
|
||||
const CvMat* CvMLData::get_responses() |
||||
{ |
||||
CV_FUNCNAME( "CvMLData::get_responses_ptr" ); |
||||
__BEGIN__; |
||||
|
||||
int var_count = 0; |
||||
|
||||
if ( !values ) |
||||
CV_ERROR( CV_StsInternal, "data is empty" ); |
||||
var_count = values->cols; |
||||
|
||||
if ( response_idx < 0 || response_idx >= var_count ) |
||||
return 0; |
||||
if ( !response_out ) |
||||
response_out = cvCreateMatHeader( values->rows, 1, CV_32FC1 ); |
||||
else |
||||
cvInitMatHeader( response_out, values->rows, 1, CV_32FC1); |
||||
cvGetCol( values, response_out, response_idx ); |
||||
|
||||
__END__; |
||||
|
||||
return response_out; |
||||
} |
||||
|
||||
void CvMLData::set_train_test_split( const CvTrainTestSplit * spl) |
||||
{ |
||||
CV_FUNCNAME( "CvMLData::set_division" ); |
||||
__BEGIN__; |
||||
|
||||
int sample_count = 0; |
||||
|
||||
if ( !values ) |
||||
CV_ERROR( CV_StsInternal, "data is empty" ); |
||||
|
||||
sample_count = values->rows; |
||||
|
||||
float train_sample_portion; |
||||
|
||||
if (spl->train_sample_part_mode == CV_COUNT) |
||||
{ |
||||
train_sample_count = spl->train_sample_part.count; |
||||
if (train_sample_count > sample_count) |
||||
CV_ERROR( CV_StsBadArg, "train samples count is not correct" ); |
||||
train_sample_count = train_sample_count<=0 ? sample_count : train_sample_count; |
||||
} |
||||
else // dtype.train_sample_part_mode == CV_PORTION
|
||||
{ |
||||
train_sample_portion = spl->train_sample_part.portion; |
||||
if ( train_sample_portion > 1) |
||||
CV_ERROR( CV_StsBadArg, "train samples count is not correct" ); |
||||
train_sample_portion = train_sample_portion <= FLT_EPSILON || |
||||
1 - train_sample_portion <= FLT_EPSILON ? 1 : train_sample_portion; |
||||
train_sample_count = std::max(1, cvFloor( train_sample_portion * sample_count )); |
||||
} |
||||
|
||||
if ( train_sample_count == sample_count ) |
||||
{ |
||||
free_train_test_idx(); |
||||
return; |
||||
} |
||||
|
||||
if ( train_sample_idx && train_sample_idx->cols != train_sample_count ) |
||||
free_train_test_idx(); |
||||
|
||||
if ( !sample_idx) |
||||
{ |
||||
int test_sample_count = sample_count- train_sample_count; |
||||
sample_idx = (int*)cvAlloc( sample_count * sizeof(sample_idx[0]) ); |
||||
for (int i = 0; i < sample_count; i++ ) |
||||
sample_idx[i] = i; |
||||
train_sample_idx = cvCreateMatHeader( 1, train_sample_count, CV_32SC1 ); |
||||
*train_sample_idx = cvMat( 1, train_sample_count, CV_32SC1, &sample_idx[0] ); |
||||
|
||||
CV_Assert(test_sample_count > 0); |
||||
test_sample_idx = cvCreateMatHeader( 1, test_sample_count, CV_32SC1 ); |
||||
*test_sample_idx = cvMat( 1, test_sample_count, CV_32SC1, &sample_idx[train_sample_count] ); |
||||
} |
||||
|
||||
mix = spl->mix; |
||||
if ( mix ) |
||||
mix_train_and_test_idx(); |
||||
|
||||
__END__; |
||||
} |
||||
|
||||
const CvMat* CvMLData::get_train_sample_idx() const |
||||
{ |
||||
CV_FUNCNAME( "CvMLData::get_train_sample_idx" ); |
||||
__BEGIN__; |
||||
|
||||
if ( !values ) |
||||
CV_ERROR( CV_StsInternal, "data is empty" ); |
||||
__END__; |
||||
|
||||
return train_sample_idx; |
||||
} |
||||
|
||||
const CvMat* CvMLData::get_test_sample_idx() const |
||||
{ |
||||
CV_FUNCNAME( "CvMLData::get_test_sample_idx" ); |
||||
__BEGIN__; |
||||
|
||||
if ( !values ) |
||||
CV_ERROR( CV_StsInternal, "data is empty" ); |
||||
__END__; |
||||
|
||||
return test_sample_idx; |
||||
} |
||||
|
||||
void CvMLData::mix_train_and_test_idx() |
||||
{ |
||||
CV_FUNCNAME( "CvMLData::mix_train_and_test_idx" ); |
||||
__BEGIN__; |
||||
|
||||
if ( !values ) |
||||
CV_ERROR( CV_StsInternal, "data is empty" ); |
||||
__END__; |
||||
|
||||
if ( !sample_idx) |
||||
return; |
||||
|
||||
if ( train_sample_count > 0 && train_sample_count < values->rows ) |
||||
{ |
||||
int n = values->rows; |
||||
for (int i = 0; i < n; i++) |
||||
{ |
||||
int a = (*rng)(n); |
||||
int b = (*rng)(n); |
||||
int t; |
||||
CV_SWAP( sample_idx[a], sample_idx[b], t ); |
||||
} |
||||
} |
||||
} |
||||
|
||||
const CvMat* CvMLData::get_var_idx() |
||||
{ |
||||
CV_FUNCNAME( "CvMLData::get_var_idx" ); |
||||
__BEGIN__; |
||||
|
||||
int avcount = 0; |
||||
|
||||
if ( !values ) |
||||
CV_ERROR( CV_StsInternal, "data is empty" ); |
||||
|
||||
assert( var_idx_mask ); |
||||
|
||||
avcount = cvFloor( cvNorm( var_idx_mask, 0, CV_L1 ) ); |
||||
int* vidx; |
||||
|
||||
if ( avcount == values->cols ) |
||||
return 0; |
||||
|
||||
if ( !var_idx_out || ( var_idx_out && var_idx_out->cols != avcount ) ) |
||||
{ |
||||
cvReleaseMat( &var_idx_out ); |
||||
var_idx_out = cvCreateMat( 1, avcount, CV_32SC1); |
||||
if ( response_idx >=0 ) |
||||
var_idx_mask->data.ptr[response_idx] = 0; |
||||
} |
||||
|
||||
vidx = var_idx_out->data.i; |
||||
|
||||
for(int i = 0; i < var_idx_mask->cols; i++) |
||||
if ( var_idx_mask->data.ptr[i] ) |
||||
{ |
||||
*vidx = i; |
||||
vidx++; |
||||
} |
||||
|
||||
__END__; |
||||
|
||||
return var_idx_out; |
||||
} |
||||
|
||||
void CvMLData::chahge_var_idx( int vi, bool state ) |
||||
{ |
||||
change_var_idx( vi, state ); |
||||
} |
||||
|
||||
void CvMLData::change_var_idx( int vi, bool state ) |
||||
{ |
||||
CV_FUNCNAME( "CvMLData::change_var_idx" ); |
||||
__BEGIN__; |
||||
|
||||
int var_count = 0; |
||||
|
||||
if ( !values ) |
||||
CV_ERROR( CV_StsInternal, "data is empty" ); |
||||
|
||||
var_count = values->cols; |
||||
|
||||
if ( vi < 0 || vi >= var_count) |
||||
CV_ERROR( CV_StsBadArg, "variable index is not correct" ); |
||||
|
||||
assert( var_idx_mask ); |
||||
var_idx_mask->data.ptr[vi] = state; |
||||
|
||||
__END__; |
||||
} |
||||
|
||||
/* End of file. */ |
File diff suppressed because it is too large
Load Diff
@ -1,376 +0,0 @@ |
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef OPENCV_PRECOMP_H |
||||
#define OPENCV_PRECOMP_H |
||||
|
||||
#include "opencv2/core.hpp" |
||||
#include "old_ml.hpp" |
||||
#include "opencv2/core/core_c.h" |
||||
#include "opencv2/core/utility.hpp" |
||||
|
||||
#include "opencv2/core/private.hpp" |
||||
|
||||
#include <assert.h> |
||||
#include <float.h> |
||||
#include <limits.h> |
||||
#include <math.h> |
||||
#include <stdlib.h> |
||||
#include <stdio.h> |
||||
#include <string.h> |
||||
#include <time.h> |
||||
|
||||
#define ML_IMPL CV_IMPL |
||||
#define __BEGIN__ __CV_BEGIN__ |
||||
#define __END__ __CV_END__ |
||||
#define EXIT __CV_EXIT__ |
||||
|
||||
#define CV_MAT_ELEM_FLAG( mat, type, comp, vect, tflag ) \ |
||||
(( tflag == CV_ROW_SAMPLE ) \
|
||||
? (CV_MAT_ELEM( mat, type, comp, vect )) \
|
||||
: (CV_MAT_ELEM( mat, type, vect, comp ))) |
||||
|
||||
/* Convert matrix to vector */ |
||||
#define ICV_MAT2VEC( mat, vdata, vstep, num ) \ |
||||
if( MIN( (mat).rows, (mat).cols ) != 1 ) \
|
||||
CV_ERROR( CV_StsBadArg, "" ); \
|
||||
(vdata) = ((mat).data.ptr); \
|
||||
if( (mat).rows == 1 ) \
|
||||
{ \
|
||||
(vstep) = CV_ELEM_SIZE( (mat).type ); \
|
||||
(num) = (mat).cols; \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
(vstep) = (mat).step; \
|
||||
(num) = (mat).rows; \
|
||||
} |
||||
|
||||
/* get raw data */ |
||||
#define ICV_RAWDATA( mat, flags, rdata, sstep, cstep, m, n ) \ |
||||
(rdata) = (mat).data.ptr; \
|
||||
if( CV_IS_ROW_SAMPLE( flags ) ) \
|
||||
{ \
|
||||
(sstep) = (mat).step; \
|
||||
(cstep) = CV_ELEM_SIZE( (mat).type ); \
|
||||
(m) = (mat).rows; \
|
||||
(n) = (mat).cols; \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
(cstep) = (mat).step; \
|
||||
(sstep) = CV_ELEM_SIZE( (mat).type ); \
|
||||
(n) = (mat).rows; \
|
||||
(m) = (mat).cols; \
|
||||
} |
||||
|
||||
#define ICV_IS_MAT_OF_TYPE( mat, mat_type) \ |
||||
(CV_IS_MAT( mat ) && CV_MAT_TYPE( mat->type ) == (mat_type) && \
|
||||
(mat)->cols > 0 && (mat)->rows > 0) |
||||
|
||||
/*
|
||||
uchar* data; int sstep, cstep; - trainData->data |
||||
uchar* classes; int clstep; int ncl;- trainClasses |
||||
uchar* tmask; int tmstep; int ntm; - typeMask |
||||
uchar* missed;int msstep, mcstep; -missedMeasurements... |
||||
int mm, mn; == m,n == size,dim |
||||
uchar* sidx;int sistep; - sampleIdx |
||||
uchar* cidx;int cistep; - compIdx |
||||
int k, l; == n,m == dim,size (length of cidx, sidx) |
||||
int m, n; == size,dim |
||||
*/ |
||||
#define ICV_DECLARE_TRAIN_ARGS() \ |
||||
uchar* data; \
|
||||
int sstep, cstep; \
|
||||
uchar* classes; \
|
||||
int clstep; \
|
||||
int ncl; \
|
||||
uchar* tmask; \
|
||||
int tmstep; \
|
||||
int ntm; \
|
||||
uchar* missed; \
|
||||
int msstep, mcstep; \
|
||||
int mm, mn; \
|
||||
uchar* sidx; \
|
||||
int sistep; \
|
||||
uchar* cidx; \
|
||||
int cistep; \
|
||||
int k, l; \
|
||||
int m, n; \
|
||||
\
|
||||
data = classes = tmask = missed = sidx = cidx = NULL; \
|
||||
sstep = cstep = clstep = ncl = tmstep = ntm = msstep = mcstep = mm = mn = 0; \
|
||||
sistep = cistep = k = l = m = n = 0; |
||||
|
||||
#define ICV_TRAIN_DATA_REQUIRED( param, flags ) \ |
||||
if( !ICV_IS_MAT_OF_TYPE( (param), CV_32FC1 ) ) \
|
||||
{ \
|
||||
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
ICV_RAWDATA( *(param), (flags), data, sstep, cstep, m, n ); \
|
||||
k = n; \
|
||||
l = m; \
|
||||
} |
||||
|
||||
#define ICV_TRAIN_CLASSES_REQUIRED( param ) \ |
||||
if( !ICV_IS_MAT_OF_TYPE( (param), CV_32FC1 ) ) \
|
||||
{ \
|
||||
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
ICV_MAT2VEC( *(param), classes, clstep, ncl ); \
|
||||
if( m != ncl ) \
|
||||
{ \
|
||||
CV_ERROR( CV_StsBadArg, "Unmatched sizes" ); \
|
||||
} \
|
||||
} |
||||
|
||||
#define ICV_ARG_NULL( param ) \ |
||||
if( (param) != NULL ) \
|
||||
{ \
|
||||
CV_ERROR( CV_StsBadArg, #param " parameter must be NULL" ); \
|
||||
} |
||||
|
||||
#define ICV_MISSED_MEASUREMENTS_OPTIONAL( param, flags ) \ |
||||
if( param ) \
|
||||
{ \
|
||||
if( !ICV_IS_MAT_OF_TYPE( param, CV_8UC1 ) ) \
|
||||
{ \
|
||||
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
ICV_RAWDATA( *(param), (flags), missed, msstep, mcstep, mm, mn ); \
|
||||
if( mm != m || mn != n ) \
|
||||
{ \
|
||||
CV_ERROR( CV_StsBadArg, "Unmatched sizes" ); \
|
||||
} \
|
||||
} \
|
||||
} |
||||
|
||||
#define ICV_COMP_IDX_OPTIONAL( param ) \ |
||||
if( param ) \
|
||||
{ \
|
||||
if( !ICV_IS_MAT_OF_TYPE( param, CV_32SC1 ) ) \
|
||||
{ \
|
||||
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
ICV_MAT2VEC( *(param), cidx, cistep, k ); \
|
||||
if( k > n ) \
|
||||
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
|
||||
} \
|
||||
} |
||||
|
||||
#define ICV_SAMPLE_IDX_OPTIONAL( param ) \ |
||||
if( param ) \
|
||||
{ \
|
||||
if( !ICV_IS_MAT_OF_TYPE( param, CV_32SC1 ) ) \
|
||||
{ \
|
||||
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
ICV_MAT2VEC( *sampleIdx, sidx, sistep, l ); \
|
||||
if( l > m ) \
|
||||
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
|
||||
} \
|
||||
} |
||||
|
||||
/****************************************************************************************/ |
||||
#define ICV_CONVERT_FLOAT_ARRAY_TO_MATRICE( array, matrice ) \ |
||||
{ \
|
||||
CvMat a, b; \
|
||||
int dims = (matrice)->cols; \
|
||||
int nsamples = (matrice)->rows; \
|
||||
int type = CV_MAT_TYPE((matrice)->type); \
|
||||
int i, offset = dims; \
|
||||
\
|
||||
CV_ASSERT( type == CV_32FC1 || type == CV_64FC1 ); \
|
||||
offset *= ((type == CV_32FC1) ? sizeof(float) : sizeof(double));\
|
||||
\
|
||||
b = cvMat( 1, dims, CV_32FC1 ); \
|
||||
cvGetRow( matrice, &a, 0 ); \
|
||||
for( i = 0; i < nsamples; i++, a.data.ptr += offset ) \
|
||||
{ \
|
||||
b.data.fl = (float*)array[i]; \
|
||||
CV_CALL( cvConvert( &b, &a ) ); \
|
||||
} \
|
||||
} |
||||
|
||||
/****************************************************************************************\
|
||||
* Auxiliary functions declarations * |
||||
\****************************************************************************************/ |
||||
|
||||
/* Generates a set of classes centers in quantity <num_of_clusters> that are generated as
|
||||
uniform random vectors in parallelepiped, where <data> is concentrated. Vectors in |
||||
<data> should have horizontal orientation. If <centers> != NULL, the function doesn't |
||||
allocate any memory and stores generated centers in <centers>, returns <centers>. |
||||
If <centers> == NULL, the function allocates memory and creates the matrice. Centers |
||||
are supposed to be oriented horizontally. */ |
||||
CvMat* icvGenerateRandomClusterCenters( int seed, |
||||
const CvMat* data, |
||||
int num_of_clusters, |
||||
CvMat* centers CV_DEFAULT(0)); |
||||
|
||||
/* Fills the <labels> using <probs> by choosing the maximal probability. Outliers are
|
||||
fixed by <oulier_tresh> and have cluster label (-1). Function also controls that there |
||||
weren't "empty" clusters by filling empty clusters with the maximal probability vector. |
||||
If probs_sums != NULL, fills it with the sums of probabilities for each sample (it is |
||||
useful for normalizing probabilities' matrice of FCM) */ |
||||
void icvFindClusterLabels( const CvMat* probs, float outlier_thresh, float r, |
||||
const CvMat* labels ); |
||||
|
||||
typedef struct CvSparseVecElem32f |
||||
{ |
||||
int idx; |
||||
float val; |
||||
} |
||||
CvSparseVecElem32f; |
||||
|
||||
/* Prepare training data and related parameters */ |
||||
#define CV_TRAIN_STATMODEL_DEFRAGMENT_TRAIN_DATA 1 |
||||
#define CV_TRAIN_STATMODEL_SAMPLES_AS_ROWS 2 |
||||
#define CV_TRAIN_STATMODEL_SAMPLES_AS_COLUMNS 4 |
||||
#define CV_TRAIN_STATMODEL_CATEGORICAL_RESPONSE 8 |
||||
#define CV_TRAIN_STATMODEL_ORDERED_RESPONSE 16 |
||||
#define CV_TRAIN_STATMODEL_RESPONSES_ON_OUTPUT 32 |
||||
#define CV_TRAIN_STATMODEL_ALWAYS_COPY_TRAIN_DATA 64 |
||||
#define CV_TRAIN_STATMODEL_SPARSE_AS_SPARSE 128 |
||||
|
||||
int |
||||
cvPrepareTrainData( const char* /*funcname*/, |
||||
const CvMat* train_data, int tflag, |
||||
const CvMat* responses, int response_type, |
||||
const CvMat* var_idx, |
||||
const CvMat* sample_idx, |
||||
bool always_copy_data, |
||||
const float*** out_train_samples, |
||||
int* _sample_count, |
||||
int* _var_count, |
||||
int* _var_all, |
||||
CvMat** out_responses, |
||||
CvMat** out_response_map, |
||||
CvMat** out_var_idx, |
||||
CvMat** out_sample_idx=0 ); |
||||
|
||||
void |
||||
cvSortSamplesByClasses( const float** samples, const CvMat* classes, |
||||
int* class_ranges, const uchar** mask CV_DEFAULT(0) ); |
||||
|
||||
void |
||||
cvCombineResponseMaps (CvMat* _responses, |
||||
const CvMat* old_response_map, |
||||
CvMat* new_response_map, |
||||
CvMat** out_response_map); |
||||
|
||||
void |
||||
cvPreparePredictData( const CvArr* sample, int dims_all, const CvMat* comp_idx, |
||||
int class_count, const CvMat* prob, float** row_sample, |
||||
int as_sparse CV_DEFAULT(0) ); |
||||
|
||||
/* copies clustering [or batch "predict"] results
|
||||
(labels and/or centers and/or probs) back to the output arrays */ |
||||
void |
||||
cvWritebackLabels( const CvMat* labels, CvMat* dst_labels, |
||||
const CvMat* centers, CvMat* dst_centers, |
||||
const CvMat* probs, CvMat* dst_probs, |
||||
const CvMat* sample_idx, int samples_all, |
||||
const CvMat* comp_idx, int dims_all ); |
||||
#define cvWritebackResponses cvWritebackLabels |
||||
|
||||
#define XML_FIELD_NAME "_name" |
||||
cv::FileNode icvFileNodeGetChild( cv::FileNode& father, const char* name ); |
||||
cv::FileNode icvFileNodeGetChildArrayElem( cv::FileNode& father, const char* name,int index ); |
||||
cv::FileNode icvFileNodeGetNext( cv::FileNode& n, const char* name ); |
||||
|
||||
|
||||
void cvCheckTrainData( const CvMat* train_data, int tflag, |
||||
const CvMat* missing_mask, |
||||
int* var_all, int* sample_all ); |
||||
|
||||
CvMat* cvPreprocessIndexArray( const CvMat* idx_arr, int data_arr_size, bool check_for_duplicates=false ); |
||||
|
||||
CvMat* cvPreprocessVarType( const CvMat* type_mask, const CvMat* var_idx, |
||||
int var_all, int* response_type ); |
||||
|
||||
CvMat* cvPreprocessOrderedResponses( const CvMat* responses, |
||||
const CvMat* sample_idx, int sample_all ); |
||||
|
||||
CvMat* cvPreprocessCategoricalResponses( const CvMat* responses, |
||||
const CvMat* sample_idx, int sample_all, |
||||
CvMat** out_response_map, CvMat** class_counts=0 ); |
||||
|
||||
const float** cvGetTrainSamples( const CvMat* train_data, int tflag, |
||||
const CvMat* var_idx, const CvMat* sample_idx, |
||||
int* _var_count, int* _sample_count, |
||||
bool always_copy_data=false ); |
||||
|
||||
namespace cv |
||||
{ |
||||
struct DTreeBestSplitFinder |
||||
{ |
||||
DTreeBestSplitFinder(){ splitSize = 0, tree = 0; node = 0; } |
||||
DTreeBestSplitFinder( CvDTree* _tree, CvDTreeNode* _node); |
||||
DTreeBestSplitFinder( const DTreeBestSplitFinder& finder, Split ); |
||||
virtual ~DTreeBestSplitFinder() {} |
||||
virtual void operator()(const BlockedRange& range); |
||||
void join( DTreeBestSplitFinder& rhs ); |
||||
Ptr<CvDTreeSplit> bestSplit; |
||||
Ptr<CvDTreeSplit> split; |
||||
int splitSize; |
||||
CvDTree* tree; |
||||
CvDTreeNode* node; |
||||
}; |
||||
|
||||
struct ForestTreeBestSplitFinder : DTreeBestSplitFinder |
||||
{ |
||||
ForestTreeBestSplitFinder() : DTreeBestSplitFinder() {} |
||||
ForestTreeBestSplitFinder( CvForestTree* _tree, CvDTreeNode* _node ); |
||||
ForestTreeBestSplitFinder( const ForestTreeBestSplitFinder& finder, Split ); |
||||
virtual void operator()(const BlockedRange& range); |
||||
}; |
||||
} |
||||
|
||||
#endif /* __ML_H__ */ |
File diff suppressed because it is too large
Load Diff
@ -1,129 +0,0 @@ |
||||
#include "opencv2/core.hpp" |
||||
#include "cascadeclassifier.h" |
||||
|
||||
using namespace std; |
||||
using namespace cv; |
||||
|
||||
/*
|
||||
traincascade.cpp is the source file of the program used for cascade training. |
||||
User has to provide training input in form of positive and negative training images, |
||||
and other data related to training in form of command line argument. |
||||
*/ |
||||
int main( int argc, char* argv[] ) |
||||
{ |
||||
CvCascadeClassifier classifier; |
||||
string cascadeDirName, vecName, bgName; |
||||
int numPos = 2000; |
||||
int numNeg = 1000; |
||||
int numStages = 20; |
||||
int numThreads = getNumThreads(); |
||||
int precalcValBufSize = 1024, |
||||
precalcIdxBufSize = 1024; |
||||
bool baseFormatSave = false; |
||||
double acceptanceRatioBreakValue = -1.0; |
||||
|
||||
CvCascadeParams cascadeParams; |
||||
CvCascadeBoostParams stageParams; |
||||
Ptr<CvFeatureParams> featureParams[] = { makePtr<CvHaarFeatureParams>(), |
||||
makePtr<CvLBPFeatureParams>(), |
||||
makePtr<CvHOGFeatureParams>() |
||||
}; |
||||
int fc = sizeof(featureParams)/sizeof(featureParams[0]); |
||||
if( argc == 1 ) |
||||
{ |
||||
cout << "Usage: " << argv[0] << endl; |
||||
cout << " -data <cascade_dir_name>" << endl; |
||||
cout << " -vec <vec_file_name>" << endl; |
||||
cout << " -bg <background_file_name>" << endl; |
||||
cout << " [-numPos <number_of_positive_samples = " << numPos << ">]" << endl; |
||||
cout << " [-numNeg <number_of_negative_samples = " << numNeg << ">]" << endl; |
||||
cout << " [-numStages <number_of_stages = " << numStages << ">]" << endl; |
||||
cout << " [-precalcValBufSize <precalculated_vals_buffer_size_in_Mb = " << precalcValBufSize << ">]" << endl; |
||||
cout << " [-precalcIdxBufSize <precalculated_idxs_buffer_size_in_Mb = " << precalcIdxBufSize << ">]" << endl; |
||||
cout << " [-baseFormatSave]" << endl; |
||||
cout << " [-numThreads <max_number_of_threads = " << numThreads << ">]" << endl; |
||||
cout << " [-acceptanceRatioBreakValue <value> = " << acceptanceRatioBreakValue << ">]" << endl; |
||||
cascadeParams.printDefaults(); |
||||
stageParams.printDefaults(); |
||||
for( int fi = 0; fi < fc; fi++ ) |
||||
featureParams[fi]->printDefaults(); |
||||
return 0; |
||||
} |
||||
|
||||
for( int i = 1; i < argc; i++ ) |
||||
{ |
||||
bool set = false; |
||||
if( !strcmp( argv[i], "-data" ) ) |
||||
{ |
||||
cascadeDirName = argv[++i]; |
||||
} |
||||
else if( !strcmp( argv[i], "-vec" ) ) |
||||
{ |
||||
vecName = argv[++i]; |
||||
} |
||||
else if( !strcmp( argv[i], "-bg" ) ) |
||||
{ |
||||
bgName = argv[++i]; |
||||
} |
||||
else if( !strcmp( argv[i], "-numPos" ) ) |
||||
{ |
||||
numPos = atoi( argv[++i] ); |
||||
} |
||||
else if( !strcmp( argv[i], "-numNeg" ) ) |
||||
{ |
||||
numNeg = atoi( argv[++i] ); |
||||
} |
||||
else if( !strcmp( argv[i], "-numStages" ) ) |
||||
{ |
||||
numStages = atoi( argv[++i] ); |
||||
} |
||||
else if( !strcmp( argv[i], "-precalcValBufSize" ) ) |
||||
{ |
||||
precalcValBufSize = atoi( argv[++i] ); |
||||
} |
||||
else if( !strcmp( argv[i], "-precalcIdxBufSize" ) ) |
||||
{ |
||||
precalcIdxBufSize = atoi( argv[++i] ); |
||||
} |
||||
else if( !strcmp( argv[i], "-baseFormatSave" ) ) |
||||
{ |
||||
baseFormatSave = true; |
||||
} |
||||
else if( !strcmp( argv[i], "-numThreads" ) ) |
||||
{ |
||||
numThreads = atoi(argv[++i]); |
||||
} |
||||
else if( !strcmp( argv[i], "-acceptanceRatioBreakValue" ) ) |
||||
{ |
||||
acceptanceRatioBreakValue = atof(argv[++i]); |
||||
} |
||||
else if ( cascadeParams.scanAttr( argv[i], argv[i+1] ) ) { i++; } |
||||
else if ( stageParams.scanAttr( argv[i], argv[i+1] ) ) { i++; } |
||||
else if ( !set ) |
||||
{ |
||||
for( int fi = 0; fi < fc; fi++ ) |
||||
{ |
||||
set = featureParams[fi]->scanAttr(argv[i], argv[i+1]); |
||||
if ( !set ) |
||||
{ |
||||
i++; |
||||
break; |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
setNumThreads( numThreads ); |
||||
classifier.train( cascadeDirName, |
||||
vecName, |
||||
bgName, |
||||
numPos, numNeg, |
||||
precalcValBufSize, precalcIdxBufSize, |
||||
numStages, |
||||
cascadeParams, |
||||
*featureParams[cascadeParams.featureType], |
||||
stageParams, |
||||
baseFormatSave, |
||||
acceptanceRatioBreakValue ); |
||||
return 0; |
||||
} |
@ -1,101 +0,0 @@ |
||||
#ifndef _OPENCV_FEATURES_H_ |
||||
#define _OPENCV_FEATURES_H_ |
||||
|
||||
#include "imagestorage.h" |
||||
#include <stdio.h> |
||||
|
||||
#define FEATURES "features" |
||||
|
||||
#define CV_SUM_OFFSETS( p0, p1, p2, p3, rect, step ) \ |
||||
/* (x, y) */ \
|
||||
(p0) = (rect).x + (step) * (rect).y; \
|
||||
/* (x + w, y) */ \
|
||||
(p1) = (rect).x + (rect).width + (step) * (rect).y; \
|
||||
/* (x + w, y) */ \
|
||||
(p2) = (rect).x + (step) * ((rect).y + (rect).height); \
|
||||
/* (x + w, y + h) */ \
|
||||
(p3) = (rect).x + (rect).width + (step) * ((rect).y + (rect).height); |
||||
|
||||
#define CV_TILTED_OFFSETS( p0, p1, p2, p3, rect, step ) \ |
||||
/* (x, y) */ \
|
||||
(p0) = (rect).x + (step) * (rect).y; \
|
||||
/* (x - h, y + h) */ \
|
||||
(p1) = (rect).x - (rect).height + (step) * ((rect).y + (rect).height);\
|
||||
/* (x + w, y + w) */ \
|
||||
(p2) = (rect).x + (rect).width + (step) * ((rect).y + (rect).width); \
|
||||
/* (x + w - h, y + w + h) */ \
|
||||
(p3) = (rect).x + (rect).width - (rect).height \
|
||||
+ (step) * ((rect).y + (rect).width + (rect).height); |
||||
|
||||
float calcNormFactor( const cv::Mat& sum, const cv::Mat& sqSum ); |
||||
|
||||
template<class Feature> |
||||
void _writeFeatures( const std::vector<Feature> features, cv::FileStorage &fs, const cv::Mat& featureMap ) |
||||
{ |
||||
fs << FEATURES << "["; |
||||
const cv::Mat_<int>& featureMap_ = (const cv::Mat_<int>&)featureMap; |
||||
for ( int fi = 0; fi < featureMap.cols; fi++ ) |
||||
if ( featureMap_(0, fi) >= 0 ) |
||||
{ |
||||
fs << "{"; |
||||
features[fi].write( fs ); |
||||
fs << "}"; |
||||
} |
||||
fs << "]"; |
||||
} |
||||
|
||||
class CvParams |
||||
{ |
||||
public: |
||||
CvParams(); |
||||
virtual ~CvParams() {} |
||||
// from|to file
|
||||
virtual void write( cv::FileStorage &fs ) const = 0; |
||||
virtual bool read( const cv::FileNode &node ) = 0; |
||||
// from|to screen
|
||||
virtual void printDefaults() const; |
||||
virtual void printAttrs() const; |
||||
virtual bool scanAttr( const std::string prmName, const std::string val ); |
||||
std::string name; |
||||
}; |
||||
|
||||
class CvFeatureParams : public CvParams |
||||
{ |
||||
public: |
||||
enum { HAAR = 0, LBP = 1, HOG = 2 }; |
||||
CvFeatureParams(); |
||||
virtual void init( const CvFeatureParams& fp ); |
||||
virtual void write( cv::FileStorage &fs ) const; |
||||
virtual bool read( const cv::FileNode &node ); |
||||
static cv::Ptr<CvFeatureParams> create( int featureType ); |
||||
int maxCatCount; // 0 in case of numerical features
|
||||
int featSize; // 1 in case of simple features (HAAR, LBP) and N_BINS(9)*N_CELLS(4) in case of Dalal's HOG features
|
||||
}; |
||||
|
||||
class CvFeatureEvaluator |
||||
{ |
||||
public: |
||||
virtual ~CvFeatureEvaluator() {} |
||||
virtual void init(const CvFeatureParams *_featureParams, |
||||
int _maxSampleCount, cv::Size _winSize ); |
||||
virtual void setImage(const cv::Mat& img, uchar clsLabel, int idx); |
||||
virtual void writeFeatures( cv::FileStorage &fs, const cv::Mat& featureMap ) const = 0; |
||||
virtual float operator()(int featureIdx, int sampleIdx) const = 0; |
||||
static cv::Ptr<CvFeatureEvaluator> create(int type); |
||||
|
||||
int getNumFeatures() const { return numFeatures; } |
||||
int getMaxCatCount() const { return featureParams->maxCatCount; } |
||||
int getFeatureSize() const { return featureParams->featSize; } |
||||
const cv::Mat& getCls() const { return cls; } |
||||
float getCls(int si) const { return cls.at<float>(si, 0); } |
||||
protected: |
||||
virtual void generateFeatures() = 0; |
||||
|
||||
int npos, nneg; |
||||
int numFeatures; |
||||
cv::Size winSize; |
||||
CvFeatureParams *featureParams; |
||||
cv::Mat cls; |
||||
}; |
||||
|
||||
#endif |
@ -1,224 +0,0 @@ |
||||
Cascade Classifier Training {#tutorial_traincascade} |
||||
=========================== |
||||
|
||||
@tableofcontents |
||||
|
||||
@prev_tutorial{tutorial_cascade_classifier} |
||||
@next_tutorial{tutorial_barcode_detect_and_decode} |
||||
|
||||
Introduction |
||||
------------ |
||||
|
||||
Working with a boosted cascade of weak classifiers includes two major stages: the training and the detection stage. The detection stage using either HAAR or LBP based models, is described in the @ref tutorial_cascade_classifier "object detection tutorial". This documentation gives an overview of the functionality needed to train your own boosted cascade of weak classifiers. The current guide will walk through all the different stages: collecting training data, preparation of the training data and executing the actual model training. |
||||
|
||||
To support this tutorial, several official OpenCV applications will be used: [opencv_createsamples](https://github.com/opencv/opencv/tree/5.x/apps/createsamples), [opencv_annotation](https://github.com/opencv/opencv/tree/5.x/apps/annotation), [opencv_traincascade](https://github.com/opencv/opencv/tree/5.x/apps/traincascade) and [opencv_visualisation](https://github.com/opencv/opencv/tree/5.x/apps/visualisation). |
||||
|
||||
@note Createsamples and traincascade are disabled since OpenCV 4.0. Consider using these apps for training from 3.4 branch for Cascade Classifier. Model format is the same between 3.4 and 4.x. |
||||
|
||||
### Important notes |
||||
|
||||
- If you come across any tutorial mentioning the old opencv_haartraining tool <i>(which is deprecated and still using the OpenCV1.x interface)</i>, then please ignore that tutorial and stick to the opencv_traincascade tool. This tool is a newer version, written in C++ in accordance to the OpenCV 2.x and OpenCV 3.x API. The opencv_traincascade supports both HAAR like wavelet features @cite Viola01 and LBP (Local Binary Patterns) @cite Liao2007 features. LBP features yield integer precision in contrast to HAAR features, yielding floating point precision, so both training and detection with LBP are several times faster then with HAAR features. Regarding the LBP and HAAR detection quality, it mainly depends on the training data used and the training parameters selected. It's possible to train a LBP-based classifier that will provide almost the same quality as HAAR-based one, within a percentage of the training time. |
||||
|
||||
- The newer cascade classifier detection interface from OpenCV 2.x and OpenCV 3.x (@ref cv::CascadeClassifier) supports working with both old and new model formats. opencv_traincascade can even save (export) a trained cascade in the older format if for some reason you are stuck using the old interface. At least training the model could then be done in the most stable interface. |
||||
|
||||
- The opencv_traincascade application can use TBB for multi-threading. To use it in multicore mode OpenCV must be built with TBB support enabled. |
||||
|
||||
Preparation of the training data |
||||
-------------------------------- |
||||
|
||||
For training a boosted cascade of weak classifiers we need a set of positive samples (containing actual objects you want to detect) and a set of negative images (containing everything you do not want to detect). The set of negative samples must be prepared manually, whereas set of positive samples is created using the opencv_createsamples application. |
||||
|
||||
### Negative Samples |
||||
|
||||
Negative samples are taken from arbitrary images, not containing objects you want to detect. These negative images, from which the samples are generated, should be listed in a special negative image file containing one image path per line <i>(can be absolute or relative)</i>. Note that negative samples and sample images are also called background samples or background images, and are used interchangeably in this document. |
||||
|
||||
Described images may be of different sizes. However, each image should be equal or larger than the desired training window size <i>(which corresponds to the model dimensions, most of the times being the average size of your object)</i>, because these images are used to subsample a given negative image into several image samples having this training window size. |
||||
|
||||
An example of such a negative description file: |
||||
|
||||
Directory structure: |
||||
@code{.text} |
||||
/img |
||||
img1.jpg |
||||
img2.jpg |
||||
bg.txt |
||||
@endcode |
||||
|
||||
File bg.txt: |
||||
@code{.text} |
||||
img/img1.jpg |
||||
img/img2.jpg |
||||
@endcode |
||||
|
||||
Your set of negative window samples will be used to tell the machine learning step, boosting in this case, what not to look for, when trying to find your objects of interest. |
||||
|
||||
### Positive Samples |
||||
|
||||
Positive samples are created by the opencv_createsamples application. They are used by the boosting process to define what the model should actually look for when trying to find your objects of interest. The application supports two ways of generating a positive sample dataset. |
||||
|
||||
1. You can generate a bunch of positives from a single positive object image. |
||||
2. You can supply all the positives yourself and only use the tool to cut them out, resize them and put them in the opencv needed binary format. |
||||
|
||||
While the first approach works decently for fixed objects, like very rigid logo's, it tends to fail rather soon for less rigid objects. In that case we do suggest to use the second approach. Many tutorials on the web even state that 100 real object images, can lead to a better model than 1000 artificially generated positives, by using the opencv_createsamples application. If you however do decide to take the first approach, keep some things in mind: |
||||
|
||||
- Please note that you need more than a single positive samples before you give it to the mentioned application, because it only applies perspective transformation. |
||||
- If you want a robust model, take samples that cover the wide range of varieties that can occur within your object class. For example in the case of faces you should consider different races and age groups, emotions and perhaps beard styles. This also applies when using the second approach. |
||||
|
||||
The first approach takes a single object image with for example a company logo and creates a large set of positive samples from the given object image by randomly rotating the object, changing the image intensity as well as placing the image on arbitrary backgrounds. The amount and range of randomness can be controlled by command line arguments of the opencv_createsamples application. |
||||
|
||||
Command line arguments: |
||||
|
||||
- `-vec <vec_file_name>` : Name of the output file containing the positive samples for training. |
||||
|
||||
- `-img <image_file_name>` : Source object image (e.g., a company logo). |
||||
|
||||
- `-bg <background_file_name>` : Background description file; contains a list of images which are used as a background for randomly distorted versions of the object. |
||||
|
||||
- `-num <number_of_samples>` : Number of positive samples to generate. |
||||
|
||||
- `-bgcolor <background_color>` : Background color (currently grayscale images are assumed); the background color denotes the transparent color. Since there might be compression artifacts, the amount of color tolerance can be specified by -bgthresh. All pixels within bgcolor-bgthresh and bgcolor+bgthresh range are interpreted as transparent. |
||||
|
||||
- `-bgthresh <background_color_threshold>` |
||||
- `-inv` : If specified, colors will be inverted. |
||||
- `-randinv` : If specified, colors will be inverted randomly. |
||||
- `-maxidev <max_intensity_deviation>` : Maximal intensity deviation of pixels in foreground samples. |
||||
- `-maxxangle <max_x_rotation_angle>` : Maximal rotation angle towards x-axis, must be given in radians. |
||||
- `-maxyangle <max_y_rotation_angle>` : Maximal rotation angle towards y-axis, must be given in radians. |
||||
- `-maxzangle <max_z_rotation_angle>` : Maximal rotation angle towards z-axis, must be given in radians. |
||||
- `-show` : Useful debugging option. If specified, each sample will be shown. Pressing Esc will continue the samples creation process without showing each sample. |
||||
- `-w <sample_width>` : Width (in pixels) of the output samples. |
||||
- `-h <sample_height>` : Height (in pixels) of the output samples. |
||||
|
||||
When running opencv_createsamples in this way, the following procedure is used to create a sample object instance: The given source image is rotated randomly around all three axes. The chosen angle is limited by `-maxxangle`, `-maxyangle` and `-maxzangle`. Then pixels having the intensity from the [bg_color-bg_color_threshold; bg_color+bg_color_threshold] range are interpreted as transparent. White noise is added to the intensities of the foreground. If the `-inv` key is specified then foreground pixel intensities are inverted. If `-randinv` key is specified then algorithm randomly selects whether inversion should be applied to this sample. Finally, the obtained image is placed onto an arbitrary background from the background description file, resized to the desired size specified by `-w` and `-h` and stored to the vec-file, specified by the `-vec` command line option. |
||||
|
||||
Positive samples also may be obtained from a collection of previously marked up images, which is the desired way when building robust object models. This collection is described by a text file similar to the background description file. Each line of this file corresponds to an image. The first element of the line is the filename, followed by the number of object annotations, followed by numbers describing the coordinates of the objects bounding rectangles (x, y, width, height). |
||||
|
||||
An example of description file: |
||||
|
||||
Directory structure: |
||||
@code{.text} |
||||
/img |
||||
img1.jpg |
||||
img2.jpg |
||||
info.dat |
||||
@endcode |
||||
File info.dat: |
||||
@code{.text} |
||||
img/img1.jpg 1 140 100 45 45 |
||||
img/img2.jpg 2 100 200 50 50 50 30 25 25 |
||||
@endcode |
||||
Image img1.jpg contains single object instance with the following coordinates of bounding rectangle: |
||||
(140, 100, 45, 45). Image img2.jpg contains two object instances. |
||||
|
||||
In order to create positive samples from such collection, `-info` argument should be specified instead of `-img`: |
||||
|
||||
- `-info <collection_file_name>` : Description file of marked up images collection. |
||||
|
||||
Note that in this case, parameters like `-bg, -bgcolor, -bgthreshold, -inv, -randinv, -maxxangle, -maxyangle, -maxzangle` are simply ignored and not used anymore. The scheme of samples creation in this case is as follows. The object instances are taken from the given images, by cutting out the supplied bounding boxes from the original images. Then they are resized to target samples size (defined by `-w` and `-h`) and stored in output vec-file, defined by the `-vec` parameter. No distortion is applied, so the only affecting arguments are `-w`, `-h`, `-show` and `-num`. |
||||
|
||||
The manual process of creating the `-info` file can also been done by using the opencv_annotation tool. This is an open source tool for visually selecting the regions of interest of your object instances in any given images. The following subsection will discuss in more detail on how to use this application. |
||||
|
||||
#### Extra remarks |
||||
|
||||
- opencv_createsamples utility may be used for examining samples stored in any given positive samples file. In order to do this only `-vec`, `-w` and `-h` parameters should be specified. |
||||
- Example of vec-file is available here `opencv/data/vec_files/trainingfaces_24-24.vec`. It can be used to train a face detector with the following window size: `-w 24 -h 24`. |
||||
|
||||
### Using OpenCV's integrated annotation tool |
||||
|
||||
Since OpenCV 3.x the community has been supplying and maintaining a open source annotation tool, used for generating the `-info` file. The tool can be accessed by the command opencv_annotation if the OpenCV applications where build. |
||||
|
||||
Using the tool is quite straightforward. The tool accepts several required and some optional parameters: |
||||
|
||||
- `--annotations` <b>(required)</b> : path to annotations txt file, where you want to store your annotations, which is then passed to the `-info` parameter [example - /data/annotations.txt] |
||||
- `--images` <b>(required)</b> : path to folder containing the images with your objects [example - /data/testimages/] |
||||
- `--maxWindowHeight` <i>(optional)</i> : if the input image is larger in height then the given resolution here, resize the image for easier annotation, using `--resizeFactor`. |
||||
- `--resizeFactor` <i>(optional)</i> : factor used to resize the input image when using the `--maxWindowHeight` parameter. |
||||
|
||||
Note that the optional parameters can only be used together. An example of a command that could be used can be seen below |
||||
|
||||
@code{.text} |
||||
opencv_annotation --annotations=/path/to/annotations/file.txt --images=/path/to/image/folder/ |
||||
@endcode |
||||
|
||||
This command will fire up a window containing the first image and your mouse cursor which will be used for annotation. A video on how to use the annotation tool can be found [here](https://www.youtube.com/watch?v=EV5gmvoCTSk). Basically there are several keystrokes that trigger an action. The left mouse button is used to select the first corner of your object, then keeps drawing until you are fine, and stops when a second left mouse button click is registered. After each selection you have the following choices: |
||||
|
||||
- Pressing `c` : confirm the annotation, turning the annotation green and confirming it is stored |
||||
- Pressing `d` : delete the last annotation from the list of annotations (easy for removing wrong annotations) |
||||
- Pressing `n` : continue to the next image |
||||
- Pressing `ESC` : this will exit the annotation software |
||||
|
||||
Finally you will end up with a usable annotation file that can be passed to the `-info` argument of opencv_createsamples. |
||||
|
||||
Cascade Training |
||||
---------------- |
||||
|
||||
The next step is the actual training of the boosted cascade of weak classifiers, based on the positive and negative dataset that was prepared beforehand. |
||||
|
||||
Command line arguments of opencv_traincascade application grouped by purposes: |
||||
|
||||
- Common arguments: |
||||
- `-data <cascade_dir_name>` : Where the trained classifier should be stored. This folder should be created manually beforehand. |
||||
- `-vec <vec_file_name>` : vec-file with positive samples (created by opencv_createsamples utility). |
||||
- `-bg <background_file_name>` : Background description file. This is the file containing the negative sample images. |
||||
- `-numPos <number_of_positive_samples>` : Number of positive samples used in training for every classifier stage. |
||||
- `-numNeg <number_of_negative_samples>` : Number of negative samples used in training for every classifier stage. |
||||
- `-numStages <number_of_stages>` : Number of cascade stages to be trained. |
||||
- `-precalcValBufSize <precalculated_vals_buffer_size_in_Mb>` : Size of buffer for precalculated feature values (in Mb). The more memory you assign the faster the training process, however keep in mind that `-precalcValBufSize` and `-precalcIdxBufSize` combined should not exceed you available system memory. |
||||
- `-precalcIdxBufSize <precalculated_idxs_buffer_size_in_Mb>` : Size of buffer for precalculated feature indices (in Mb). The more memory you assign the faster the training process, however keep in mind that `-precalcValBufSize` and `-precalcIdxBufSize` combined should not exceed you available system memory. |
||||
- `-baseFormatSave` : This argument is actual in case of Haar-like features. If it is specified, the cascade will be saved in the old format. This is only available for backwards compatibility reasons and to allow users stuck to the old deprecated interface, to at least train models using the newer interface. |
||||
- `-numThreads <max_number_of_threads>` : Maximum number of threads to use during training. Notice that the actual number of used threads may be lower, depending on your machine and compilation options. By default, the maximum available threads are selected if you built OpenCV with TBB support, which is needed for this optimization. |
||||
- `-acceptanceRatioBreakValue <break_value>` : This argument is used to determine how precise your model should keep learning and when to stop. A good guideline is to train not further than 10e-5, to ensure the model does not overtrain on your training data. By default this value is set to -1 to disable this feature. |
||||
|
||||
- Cascade parameters: |
||||
- `-stageType <BOOST(default)>` : Type of stages. Only boosted classifiers are supported as a stage type at the moment. |
||||
- `-featureType<{HAAR(default), LBP}>` : Type of features: HAAR - Haar-like features, LBP - local binary patterns. |
||||
- `-w <sampleWidth>` : Width of training samples (in pixels). Must have exactly the same value as used during training samples creation (opencv_createsamples utility). |
||||
- `-h <sampleHeight>` : Height of training samples (in pixels). Must have exactly the same value as used during training samples creation (opencv_createsamples utility). |
||||
|
||||
- Boosted classifier parameters: |
||||
- `-bt <{DAB, RAB, LB, GAB(default)}>` : Type of boosted classifiers: DAB - Discrete AdaBoost, RAB - Real AdaBoost, LB - LogitBoost, GAB - Gentle AdaBoost. |
||||
- `-minHitRate <min_hit_rate>` : Minimal desired hit rate for each stage of the classifier. Overall hit rate may be estimated as (min_hit_rate ^ number_of_stages), @cite Viola04 §4.1. |
||||
- `-maxFalseAlarmRate <max_false_alarm_rate>` : Maximal desired false alarm rate for each stage of the classifier. Overall false alarm rate may be estimated as (max_false_alarm_rate ^ number_of_stages), @cite Viola04 §4.1. |
||||
- `-weightTrimRate <weight_trim_rate>` : Specifies whether trimming should be used and its weight. A decent choice is 0.95. |
||||
- `-maxDepth <max_depth_of_weak_tree>` : Maximal depth of a weak tree. A decent choice is 1, that is case of stumps. |
||||
- `-maxWeakCount <max_weak_tree_count>` : Maximal count of weak trees for every cascade stage. The boosted classifier (stage) will have so many weak trees (<=maxWeakCount), as needed to achieve the given `-maxFalseAlarmRate`. |
||||
|
||||
- Haar-like feature parameters: |
||||
- `-mode <BASIC (default) | CORE | ALL>` : Selects the type of Haar features set used in training. BASIC use only upright features, while ALL uses the full set of upright and 45 degree rotated feature set. See @cite Lienhart02 for more details. |
||||
|
||||
- Local Binary Patterns parameters: Local Binary Patterns don't have parameters. |
||||
|
||||
After the opencv_traincascade application has finished its work, the trained cascade will be saved in `cascade.xml` file in the `-data` folder. Other files in this folder are created for the case of interrupted training, so you may delete them after completion of training. |
||||
|
||||
Training is finished and you can test your cascade classifier! |
||||
|
||||
Visualising Cascade Classifiers |
||||
------------------------------- |
||||
|
||||
From time to time it can be useful to visualise the trained cascade, to see which features it selected and how complex its stages are. For this OpenCV supplies a opencv_visualisation application. This application has the following commands: |
||||
|
||||
- `--image` <b>(required)</b> : path to a reference image for your object model. This should be an annotation with dimensions [`-w`,`-h`] as passed to both opencv_createsamples and opencv_traincascade application. |
||||
- `--model` <b>(required)</b> : path to the trained model, which should be in the folder supplied to the `-data` parameter of the opencv_traincascade application. |
||||
- `--data` <i>(optional)</i> : if a data folder is supplied, which has to be manually created beforehand, stage output and a video of the features will be stored. |
||||
|
||||
An example command can be seen below |
||||
|
||||
@code{.text} |
||||
opencv_visualisation --image=/data/object.png --model=/data/model.xml --data=/data/result/ |
||||
@endcode |
||||
|
||||
Some limitations of the current visualisation tool |
||||
- Only handles cascade classifier models, trained with the opencv_traincascade tool, containing __stumps__ as decision trees [default settings]. |
||||
- The image provided needs to be a sample window with the original model dimensions, passed to the `--image` parameter. |
||||
|
||||
Example of the HAAR/LBP face model ran on a given window of Angelina Jolie, which had the same preprocessing as cascade classifier files -->24x24 pixel image, grayscale conversion and histogram equalisation: |
||||
|
||||
_A video is made with for each stage each feature visualised:_ |
||||
|
||||
![](images/visualisation_video.png) |
||||
|
||||
_Each stage is stored as an image for future validation of the features:_ |
||||
|
||||
![](images/visualisation_single_stage.png) |
||||
|
||||
_This work was created for [OpenCV 3 Blueprints](https://www.packtpub.com/application-development/opencv-3-blueprints) by StevenPuttemans but Packt Publishing agreed integration into OpenCV._ |
@ -1,48 +0,0 @@ |
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef OPENCV_IMGPROC_IMGPROC_C_H |
||||
#define OPENCV_IMGPROC_IMGPROC_C_H |
||||
|
||||
#include "opencv2/imgproc/types_c.h" |
||||
|
||||
#endif |
@ -1,48 +0,0 @@ |
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef OPENCV_IMGPROC_TYPES_C_H |
||||
#define OPENCV_IMGPROC_TYPES_C_H |
||||
|
||||
#include "opencv2/core/core_c.h" |
||||
|
||||
#endif |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue