|
|
|
@ -1,11 +1,13 @@ |
|
|
|
|
#include "opencv2/core/core_c.h" |
|
|
|
|
#include "opencv2/core/core.hpp" |
|
|
|
|
#include "opencv2/ml/ml.hpp" |
|
|
|
|
|
|
|
|
|
#include <cstdio> |
|
|
|
|
#include <vector> |
|
|
|
|
/*
|
|
|
|
|
#include <iostream> |
|
|
|
|
|
|
|
|
|
*/ |
|
|
|
|
using namespace std; |
|
|
|
|
using namespace cv; |
|
|
|
|
using namespace cv::ml; |
|
|
|
|
|
|
|
|
|
static void help() |
|
|
|
|
{ |
|
|
|
@ -33,142 +35,101 @@ static void help() |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// This function reads data and responses from the file <filename>
|
|
|
|
|
static int |
|
|
|
|
read_num_class_data( const char* filename, int var_count, |
|
|
|
|
CvMat** data, CvMat** responses ) |
|
|
|
|
static bool |
|
|
|
|
read_num_class_data( const string& filename, int var_count, |
|
|
|
|
Mat* _data, Mat* _responses ) |
|
|
|
|
{ |
|
|
|
|
const int M = 1024; |
|
|
|
|
FILE* f = fopen( filename, "rt" ); |
|
|
|
|
CvMemStorage* storage; |
|
|
|
|
CvSeq* seq; |
|
|
|
|
char buf[M+2]; |
|
|
|
|
float* el_ptr; |
|
|
|
|
CvSeqReader reader; |
|
|
|
|
int i, j; |
|
|
|
|
|
|
|
|
|
if( !f ) |
|
|
|
|
return 0; |
|
|
|
|
Mat el_ptr(1, var_count, CV_32F); |
|
|
|
|
int i; |
|
|
|
|
vector<int> responses; |
|
|
|
|
|
|
|
|
|
el_ptr = new float[var_count+1]; |
|
|
|
|
storage = cvCreateMemStorage(); |
|
|
|
|
seq = cvCreateSeq( 0, sizeof(*seq), (var_count+1)*sizeof(float), storage ); |
|
|
|
|
_data->release(); |
|
|
|
|
_responses->release(); |
|
|
|
|
|
|
|
|
|
FILE* f = fopen( filename.c_str(), "rt" ); |
|
|
|
|
if( !f ) |
|
|
|
|
{ |
|
|
|
|
cout << "Could not read the database " << filename << endl; |
|
|
|
|
return false; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
for(;;) |
|
|
|
|
{ |
|
|
|
|
char* ptr; |
|
|
|
|
if( !fgets( buf, M, f ) || !strchr( buf, ',' ) ) |
|
|
|
|
break; |
|
|
|
|
el_ptr[0] = buf[0]; |
|
|
|
|
responses.push_back((int)buf[0]); |
|
|
|
|
ptr = buf+2; |
|
|
|
|
for( i = 1; i <= var_count; i++ ) |
|
|
|
|
for( i = 0; i < var_count; i++ ) |
|
|
|
|
{ |
|
|
|
|
int n = 0; |
|
|
|
|
sscanf( ptr, "%f%n", el_ptr + i, &n ); |
|
|
|
|
sscanf( ptr, "%f%n", &el_ptr.at<float>(i), &n ); |
|
|
|
|
ptr += n + 1; |
|
|
|
|
} |
|
|
|
|
if( i <= var_count ) |
|
|
|
|
if( i < var_count ) |
|
|
|
|
break; |
|
|
|
|
cvSeqPush( seq, el_ptr ); |
|
|
|
|
_data->push_back(el_ptr); |
|
|
|
|
} |
|
|
|
|
fclose(f); |
|
|
|
|
Mat(responses).copyTo(*_responses); |
|
|
|
|
|
|
|
|
|
*data = cvCreateMat( seq->total, var_count, CV_32F ); |
|
|
|
|
*responses = cvCreateMat( seq->total, 1, CV_32F ); |
|
|
|
|
|
|
|
|
|
cvStartReadSeq( seq, &reader ); |
|
|
|
|
|
|
|
|
|
for( i = 0; i < seq->total; i++ ) |
|
|
|
|
{ |
|
|
|
|
const float* sdata = (float*)reader.ptr + 1; |
|
|
|
|
float* ddata = data[0]->data.fl + var_count*i; |
|
|
|
|
float* dr = responses[0]->data.fl + i; |
|
|
|
|
|
|
|
|
|
for( j = 0; j < var_count; j++ ) |
|
|
|
|
ddata[j] = sdata[j]; |
|
|
|
|
*dr = sdata[-1]; |
|
|
|
|
CV_NEXT_SEQ_ELEM( seq->elem_size, reader ); |
|
|
|
|
} |
|
|
|
|
cout << "The database " << filename << " is loaded.\n"; |
|
|
|
|
|
|
|
|
|
cvReleaseMemStorage( &storage ); |
|
|
|
|
delete[] el_ptr; |
|
|
|
|
return 1; |
|
|
|
|
return true; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static |
|
|
|
|
int build_rtrees_classifier( char* data_filename, |
|
|
|
|
char* filename_to_save, char* filename_to_load ) |
|
|
|
|
template<typename T> |
|
|
|
|
static Ptr<T> load_classifier(const string& filename_to_load) |
|
|
|
|
{ |
|
|
|
|
CvMat* data = 0; |
|
|
|
|
CvMat* responses = 0; |
|
|
|
|
CvMat* var_type = 0; |
|
|
|
|
CvMat* sample_idx = 0; |
|
|
|
|
|
|
|
|
|
int ok = read_num_class_data( data_filename, 16, &data, &responses ); |
|
|
|
|
int nsamples_all = 0, ntrain_samples = 0; |
|
|
|
|
int i = 0; |
|
|
|
|
double train_hr = 0, test_hr = 0; |
|
|
|
|
CvRTrees forest; |
|
|
|
|
CvMat* var_importance = 0; |
|
|
|
|
|
|
|
|
|
if( !ok ) |
|
|
|
|
{ |
|
|
|
|
printf( "Could not read the database %s\n", data_filename ); |
|
|
|
|
return -1; |
|
|
|
|
} |
|
|
|
|
// load classifier from the specified file
|
|
|
|
|
Ptr<T> model = StatModel::load<T>( filename_to_load ); |
|
|
|
|
if( model.empty() ) |
|
|
|
|
cout << "Could not read the classifier " << filename_to_load << endl; |
|
|
|
|
else |
|
|
|
|
cout << "The classifier " << filename_to_load << " is loaded.\n"; |
|
|
|
|
|
|
|
|
|
printf( "The database %s is loaded.\n", data_filename ); |
|
|
|
|
nsamples_all = data->rows; |
|
|
|
|
ntrain_samples = (int)(nsamples_all*0.8); |
|
|
|
|
return model; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// Create or load Random Trees classifier
|
|
|
|
|
if( filename_to_load ) |
|
|
|
|
{ |
|
|
|
|
// load classifier from the specified file
|
|
|
|
|
forest.load( filename_to_load ); |
|
|
|
|
ntrain_samples = 0; |
|
|
|
|
if( forest.get_tree_count() == 0 ) |
|
|
|
|
{ |
|
|
|
|
printf( "Could not read the classifier %s\n", filename_to_load ); |
|
|
|
|
return -1; |
|
|
|
|
} |
|
|
|
|
printf( "The classifier %s is loaded.\n", filename_to_load ); |
|
|
|
|
} |
|
|
|
|
else |
|
|
|
|
{ |
|
|
|
|
// create classifier by using <data> and <responses>
|
|
|
|
|
printf( "Training the classifier ...\n"); |
|
|
|
|
static Ptr<TrainData> |
|
|
|
|
prepare_train_data(const Mat& data, const Mat& responses, int ntrain_samples) |
|
|
|
|
{ |
|
|
|
|
Mat sample_idx = Mat::zeros( 1, data.rows, CV_8U ); |
|
|
|
|
Mat train_samples = sample_idx.colRange(0, ntrain_samples); |
|
|
|
|
train_samples.setTo(Scalar::all(1)); |
|
|
|
|
|
|
|
|
|
// 1. create type mask
|
|
|
|
|
var_type = cvCreateMat( data->cols + 1, 1, CV_8U ); |
|
|
|
|
cvSet( var_type, cvScalarAll(CV_VAR_ORDERED) ); |
|
|
|
|
cvSetReal1D( var_type, data->cols, CV_VAR_CATEGORICAL ); |
|
|
|
|
int nvars = data.cols; |
|
|
|
|
Mat var_type( nvars + 1, 1, CV_8U ); |
|
|
|
|
var_type.setTo(Scalar::all(VAR_ORDERED)); |
|
|
|
|
var_type.at<uchar>(nvars) = VAR_CATEGORICAL; |
|
|
|
|
|
|
|
|
|
// 2. create sample_idx
|
|
|
|
|
sample_idx = cvCreateMat( 1, nsamples_all, CV_8UC1 ); |
|
|
|
|
{ |
|
|
|
|
CvMat mat; |
|
|
|
|
cvGetCols( sample_idx, &mat, 0, ntrain_samples ); |
|
|
|
|
cvSet( &mat, cvRealScalar(1) ); |
|
|
|
|
return TrainData::create(data, ROW_SAMPLE, responses, |
|
|
|
|
noArray(), sample_idx, noArray(), var_type); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
cvGetCols( sample_idx, &mat, ntrain_samples, nsamples_all ); |
|
|
|
|
cvSetZero( &mat ); |
|
|
|
|
} |
|
|
|
|
inline TermCriteria TC(int iters, double eps) |
|
|
|
|
{ |
|
|
|
|
return TermCriteria(TermCriteria::MAX_ITER + (eps > 0 ? TermCriteria::EPS : 0), iters, eps); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// 3. train classifier
|
|
|
|
|
forest.train( data, CV_ROW_SAMPLE, responses, 0, sample_idx, var_type, 0, |
|
|
|
|
CvRTParams(10,10,0,false,15,0,true,4,100,0.01f,CV_TERMCRIT_ITER)); |
|
|
|
|
printf( "\n"); |
|
|
|
|
} |
|
|
|
|
static void test_and_save_classifier(const Ptr<StatModel>& model, |
|
|
|
|
const Mat& data, const Mat& responses, |
|
|
|
|
int ntrain_samples, int rdelta, |
|
|
|
|
const string& filename_to_save) |
|
|
|
|
{ |
|
|
|
|
int i, nsamples_all = data.rows; |
|
|
|
|
double train_hr = 0, test_hr = 0; |
|
|
|
|
|
|
|
|
|
// compute prediction error on train and test data
|
|
|
|
|
for( i = 0; i < nsamples_all; i++ ) |
|
|
|
|
{ |
|
|
|
|
double r; |
|
|
|
|
CvMat sample; |
|
|
|
|
cvGetRow( data, &sample, i ); |
|
|
|
|
Mat sample = data.row(i); |
|
|
|
|
|
|
|
|
|
r = forest.predict( &sample ); |
|
|
|
|
r = fabs((double)r - responses->data.fl[i]) <= FLT_EPSILON ? 1 : 0; |
|
|
|
|
float r = model->predict( sample ); |
|
|
|
|
r = std::abs(r + rdelta - responses.at<int>(i)) <= FLT_EPSILON ? 1 : 0; |
|
|
|
|
|
|
|
|
|
if( i < ntrain_samples ) |
|
|
|
|
train_hr += r; |
|
|
|
@ -176,93 +137,101 @@ int build_rtrees_classifier( char* data_filename, |
|
|
|
|
test_hr += r; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
test_hr /= (double)(nsamples_all-ntrain_samples); |
|
|
|
|
train_hr /= (double)ntrain_samples; |
|
|
|
|
test_hr /= nsamples_all - ntrain_samples; |
|
|
|
|
train_hr = ntrain_samples > 0 ? train_hr/ntrain_samples : 1.; |
|
|
|
|
|
|
|
|
|
printf( "Recognition rate: train = %.1f%%, test = %.1f%%\n", |
|
|
|
|
train_hr*100., test_hr*100. ); |
|
|
|
|
|
|
|
|
|
printf( "Number of trees: %d\n", forest.get_tree_count() ); |
|
|
|
|
|
|
|
|
|
// Print variable importance
|
|
|
|
|
var_importance = (CvMat*)forest.get_var_importance(); |
|
|
|
|
if( var_importance ) |
|
|
|
|
if( !filename_to_save.empty() ) |
|
|
|
|
{ |
|
|
|
|
double rt_imp_sum = cvSum( var_importance ).val[0]; |
|
|
|
|
printf("var#\timportance (in %%):\n"); |
|
|
|
|
for( i = 0; i < var_importance->cols; i++ ) |
|
|
|
|
printf( "%-2d\t%-4.1f\n", i, |
|
|
|
|
100.f*var_importance->data.fl[i]/rt_imp_sum); |
|
|
|
|
model->save( filename_to_save ); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//Print some proximitites
|
|
|
|
|
printf( "Proximities between some samples corresponding to the letter 'T':\n" ); |
|
|
|
|
static bool |
|
|
|
|
build_rtrees_classifier( const string& data_filename, |
|
|
|
|
const string& filename_to_save, |
|
|
|
|
const string& filename_to_load ) |
|
|
|
|
{ |
|
|
|
|
Mat data; |
|
|
|
|
Mat responses; |
|
|
|
|
bool ok = read_num_class_data( data_filename, 16, &data, &responses ); |
|
|
|
|
if( !ok ) |
|
|
|
|
return ok; |
|
|
|
|
|
|
|
|
|
Ptr<RTrees> model; |
|
|
|
|
|
|
|
|
|
int nsamples_all = data.rows; |
|
|
|
|
int ntrain_samples = (int)(nsamples_all*0.8); |
|
|
|
|
|
|
|
|
|
// Create or load Random Trees classifier
|
|
|
|
|
if( !filename_to_load.empty() ) |
|
|
|
|
{ |
|
|
|
|
CvMat sample1, sample2; |
|
|
|
|
const int pairs[][2] = {{0,103}, {0,106}, {106,103}, {-1,-1}}; |
|
|
|
|
model = load_classifier<RTrees>(filename_to_load); |
|
|
|
|
if( model.empty() ) |
|
|
|
|
return false; |
|
|
|
|
ntrain_samples = 0; |
|
|
|
|
} |
|
|
|
|
else |
|
|
|
|
{ |
|
|
|
|
// create classifier by using <data> and <responses>
|
|
|
|
|
cout << "Training the classifier ...\n"; |
|
|
|
|
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples); |
|
|
|
|
|
|
|
|
|
for( i = 0; pairs[i][0] >= 0; i++ ) |
|
|
|
|
{ |
|
|
|
|
cvGetRow( data, &sample1, pairs[i][0] ); |
|
|
|
|
cvGetRow( data, &sample2, pairs[i][1] ); |
|
|
|
|
printf( "proximity(%d,%d) = %.1f%%\n", pairs[i][0], pairs[i][1], |
|
|
|
|
forest.get_proximity( &sample1, &sample2 )*100. ); |
|
|
|
|
} |
|
|
|
|
// 3. train classifier
|
|
|
|
|
model = RTrees::create(RTrees::Params(10,10,0,false,15,Mat(),true,4,TC(100,0.01f))); |
|
|
|
|
model->train( tdata ); |
|
|
|
|
cout << endl; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// Save Random Trees classifier to file if needed
|
|
|
|
|
if( filename_to_save ) |
|
|
|
|
forest.save( filename_to_save ); |
|
|
|
|
test_and_save_classifier(model, data, responses, ntrain_samples, 0, filename_to_save); |
|
|
|
|
cout << "Number of trees: " << model->getRoots().size() << endl; |
|
|
|
|
|
|
|
|
|
cvReleaseMat( &sample_idx ); |
|
|
|
|
cvReleaseMat( &var_type ); |
|
|
|
|
cvReleaseMat( &data ); |
|
|
|
|
cvReleaseMat( &responses ); |
|
|
|
|
// Print variable importance
|
|
|
|
|
Mat var_importance = model->getVarImportance(); |
|
|
|
|
if( !var_importance.empty() ) |
|
|
|
|
{ |
|
|
|
|
double rt_imp_sum = sum( var_importance )[0]; |
|
|
|
|
printf("var#\timportance (in %%):\n"); |
|
|
|
|
int i, n = (int)var_importance.total(); |
|
|
|
|
for( i = 0; i < n; i++ ) |
|
|
|
|
printf( "%-2d\t%-4.1f\n", i, 100.f*var_importance.at<float>(i)/rt_imp_sum); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
return 0; |
|
|
|
|
return true; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static |
|
|
|
|
int build_boost_classifier( char* data_filename, |
|
|
|
|
char* filename_to_save, char* filename_to_load ) |
|
|
|
|
static bool |
|
|
|
|
build_boost_classifier( const string& data_filename, |
|
|
|
|
const string& filename_to_save, |
|
|
|
|
const string& filename_to_load ) |
|
|
|
|
{ |
|
|
|
|
const int class_count = 26; |
|
|
|
|
CvMat* data = 0; |
|
|
|
|
CvMat* responses = 0; |
|
|
|
|
CvMat* var_type = 0; |
|
|
|
|
CvMat* temp_sample = 0; |
|
|
|
|
CvMat* weak_responses = 0; |
|
|
|
|
|
|
|
|
|
int ok = read_num_class_data( data_filename, 16, &data, &responses ); |
|
|
|
|
int nsamples_all = 0, ntrain_samples = 0; |
|
|
|
|
int var_count; |
|
|
|
|
int i, j, k; |
|
|
|
|
double train_hr = 0, test_hr = 0; |
|
|
|
|
CvBoost boost; |
|
|
|
|
Mat data; |
|
|
|
|
Mat responses; |
|
|
|
|
Mat weak_responses; |
|
|
|
|
|
|
|
|
|
bool ok = read_num_class_data( data_filename, 16, &data, &responses ); |
|
|
|
|
if( !ok ) |
|
|
|
|
{ |
|
|
|
|
printf( "Could not read the database %s\n", data_filename ); |
|
|
|
|
return -1; |
|
|
|
|
} |
|
|
|
|
return ok; |
|
|
|
|
|
|
|
|
|
printf( "The database %s is loaded.\n", data_filename ); |
|
|
|
|
nsamples_all = data->rows; |
|
|
|
|
ntrain_samples = (int)(nsamples_all*0.5); |
|
|
|
|
var_count = data->cols; |
|
|
|
|
int i, j, k; |
|
|
|
|
Ptr<Boost> model; |
|
|
|
|
|
|
|
|
|
int nsamples_all = data.rows; |
|
|
|
|
int ntrain_samples = (int)(nsamples_all*0.5); |
|
|
|
|
int var_count = data.cols; |
|
|
|
|
|
|
|
|
|
// Create or load Boosted Tree classifier
|
|
|
|
|
if( filename_to_load ) |
|
|
|
|
if( !filename_to_load.empty() ) |
|
|
|
|
{ |
|
|
|
|
// load classifier from the specified file
|
|
|
|
|
boost.load( filename_to_load ); |
|
|
|
|
model = load_classifier<Boost>(filename_to_load); |
|
|
|
|
if( model.empty() ) |
|
|
|
|
return false; |
|
|
|
|
ntrain_samples = 0; |
|
|
|
|
if( !boost.get_weak_predictors() ) |
|
|
|
|
{ |
|
|
|
|
printf( "Could not read the classifier %s\n", filename_to_load ); |
|
|
|
|
return -1; |
|
|
|
|
} |
|
|
|
|
printf( "The classifier %s is loaded.\n", filename_to_load ); |
|
|
|
|
} |
|
|
|
|
else |
|
|
|
|
{ |
|
|
|
@ -275,135 +244,108 @@ int build_boost_classifier( char* data_filename, |
|
|
|
|
//
|
|
|
|
|
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
|
|
|
|
|
|
|
|
|
CvMat* new_data = cvCreateMat( ntrain_samples*class_count, var_count + 1, CV_32F ); |
|
|
|
|
CvMat* new_responses = cvCreateMat( ntrain_samples*class_count, 1, CV_32S ); |
|
|
|
|
Mat new_data( ntrain_samples*class_count, var_count + 1, CV_32F ); |
|
|
|
|
Mat new_responses( ntrain_samples*class_count, 1, CV_32S ); |
|
|
|
|
|
|
|
|
|
// 1. unroll the database type mask
|
|
|
|
|
printf( "Unrolling the database...\n"); |
|
|
|
|
for( i = 0; i < ntrain_samples; i++ ) |
|
|
|
|
{ |
|
|
|
|
float* data_row = (float*)(data->data.ptr + data->step*i); |
|
|
|
|
const float* data_row = data.ptr<float>(i); |
|
|
|
|
for( j = 0; j < class_count; j++ ) |
|
|
|
|
{ |
|
|
|
|
float* new_data_row = (float*)(new_data->data.ptr + |
|
|
|
|
new_data->step*(i*class_count+j)); |
|
|
|
|
for( k = 0; k < var_count; k++ ) |
|
|
|
|
new_data_row[k] = data_row[k]; |
|
|
|
|
float* new_data_row = (float*)new_data.ptr<float>(i*class_count+j); |
|
|
|
|
memcpy(new_data_row, data_row, var_count*sizeof(data_row[0])); |
|
|
|
|
new_data_row[var_count] = (float)j; |
|
|
|
|
new_responses->data.i[i*class_count + j] = responses->data.fl[i] == j+'A'; |
|
|
|
|
new_responses.at<int>(i*class_count + j) = responses.at<int>(i) == j+'A'; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// 2. create type mask
|
|
|
|
|
var_type = cvCreateMat( var_count + 2, 1, CV_8U ); |
|
|
|
|
cvSet( var_type, cvScalarAll(CV_VAR_ORDERED) ); |
|
|
|
|
// the last indicator variable, as well
|
|
|
|
|
// as the new (binary) response are categorical
|
|
|
|
|
cvSetReal1D( var_type, var_count, CV_VAR_CATEGORICAL ); |
|
|
|
|
cvSetReal1D( var_type, var_count+1, CV_VAR_CATEGORICAL ); |
|
|
|
|
Mat var_type( 1, var_count + 2, CV_8U ); |
|
|
|
|
var_type.setTo(Scalar::all(VAR_ORDERED)); |
|
|
|
|
var_type.at<uchar>(var_count) = var_type.at<uchar>(var_count+1) = VAR_CATEGORICAL; |
|
|
|
|
|
|
|
|
|
// 3. train classifier
|
|
|
|
|
printf( "Training the classifier (may take a few minutes)...\n"); |
|
|
|
|
boost.train( new_data, CV_ROW_SAMPLE, new_responses, 0, 0, var_type, 0, |
|
|
|
|
CvBoostParams(CvBoost::REAL, 100, 0.95, 5, false, 0 )); |
|
|
|
|
cvReleaseMat( &new_data ); |
|
|
|
|
cvReleaseMat( &new_responses ); |
|
|
|
|
printf("\n"); |
|
|
|
|
Ptr<TrainData> tdata = TrainData::create(new_data, ROW_SAMPLE, new_responses, |
|
|
|
|
noArray(), noArray(), noArray(), var_type); |
|
|
|
|
model = Boost::create(Boost::Params(Boost::REAL, 100, 0.95, 5, false, Mat() )); |
|
|
|
|
|
|
|
|
|
cout << "Training the classifier (may take a few minutes)...\n"; |
|
|
|
|
model->train(tdata); |
|
|
|
|
cout << endl; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
temp_sample = cvCreateMat( 1, var_count + 1, CV_32F ); |
|
|
|
|
weak_responses = cvCreateMat( 1, boost.get_weak_predictors()->total, CV_32F ); |
|
|
|
|
Mat temp_sample( 1, var_count + 1, CV_32F ); |
|
|
|
|
float* tptr = temp_sample.ptr<float>(); |
|
|
|
|
|
|
|
|
|
// compute prediction error on train and test data
|
|
|
|
|
double train_hr = 0, test_hr = 0; |
|
|
|
|
for( i = 0; i < nsamples_all; i++ ) |
|
|
|
|
{ |
|
|
|
|
int best_class = 0; |
|
|
|
|
double max_sum = -DBL_MAX; |
|
|
|
|
double r; |
|
|
|
|
CvMat sample; |
|
|
|
|
cvGetRow( data, &sample, i ); |
|
|
|
|
const float* ptr = data.ptr<float>(i); |
|
|
|
|
for( k = 0; k < var_count; k++ ) |
|
|
|
|
temp_sample->data.fl[k] = sample.data.fl[k]; |
|
|
|
|
tptr[k] = ptr[k]; |
|
|
|
|
|
|
|
|
|
for( j = 0; j < class_count; j++ ) |
|
|
|
|
{ |
|
|
|
|
temp_sample->data.fl[var_count] = (float)j; |
|
|
|
|
boost.predict( temp_sample, 0, weak_responses ); |
|
|
|
|
double sum = cvSum( weak_responses ).val[0]; |
|
|
|
|
if( max_sum < sum ) |
|
|
|
|
tptr[var_count] = (float)j; |
|
|
|
|
float s = model->predict( temp_sample, noArray(), StatModel::RAW_OUTPUT ); |
|
|
|
|
if( max_sum < s ) |
|
|
|
|
{ |
|
|
|
|
max_sum = sum; |
|
|
|
|
max_sum = s; |
|
|
|
|
best_class = j + 'A'; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
r = fabs(best_class - responses->data.fl[i]) < FLT_EPSILON ? 1 : 0; |
|
|
|
|
|
|
|
|
|
double r = std::abs(best_class - responses.at<int>(i)) < FLT_EPSILON ? 1 : 0; |
|
|
|
|
if( i < ntrain_samples ) |
|
|
|
|
train_hr += r; |
|
|
|
|
else |
|
|
|
|
test_hr += r; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
test_hr /= (double)(nsamples_all-ntrain_samples); |
|
|
|
|
train_hr /= (double)ntrain_samples; |
|
|
|
|
test_hr /= nsamples_all-ntrain_samples; |
|
|
|
|
train_hr = ntrain_samples > 0 ? train_hr/ntrain_samples : 1.; |
|
|
|
|
printf( "Recognition rate: train = %.1f%%, test = %.1f%%\n", |
|
|
|
|
train_hr*100., test_hr*100. ); |
|
|
|
|
|
|
|
|
|
printf( "Number of trees: %d\n", boost.get_weak_predictors()->total ); |
|
|
|
|
cout << "Number of trees: " << model->getRoots().size() << endl; |
|
|
|
|
|
|
|
|
|
// Save classifier to file if needed
|
|
|
|
|
if( filename_to_save ) |
|
|
|
|
boost.save( filename_to_save ); |
|
|
|
|
if( !filename_to_save.empty() ) |
|
|
|
|
model->save( filename_to_save ); |
|
|
|
|
|
|
|
|
|
cvReleaseMat( &temp_sample ); |
|
|
|
|
cvReleaseMat( &weak_responses ); |
|
|
|
|
cvReleaseMat( &var_type ); |
|
|
|
|
cvReleaseMat( &data ); |
|
|
|
|
cvReleaseMat( &responses ); |
|
|
|
|
|
|
|
|
|
return 0; |
|
|
|
|
return true; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static |
|
|
|
|
int build_mlp_classifier( char* data_filename, |
|
|
|
|
char* filename_to_save, char* filename_to_load ) |
|
|
|
|
static bool |
|
|
|
|
build_mlp_classifier( const string& data_filename, |
|
|
|
|
const string& filename_to_save, |
|
|
|
|
const string& filename_to_load ) |
|
|
|
|
{ |
|
|
|
|
const int class_count = 26; |
|
|
|
|
CvMat* data = 0; |
|
|
|
|
CvMat train_data; |
|
|
|
|
CvMat* responses = 0; |
|
|
|
|
CvMat* mlp_response = 0; |
|
|
|
|
|
|
|
|
|
int ok = read_num_class_data( data_filename, 16, &data, &responses ); |
|
|
|
|
int nsamples_all = 0, ntrain_samples = 0; |
|
|
|
|
int i, j; |
|
|
|
|
double train_hr = 0, test_hr = 0; |
|
|
|
|
CvANN_MLP mlp; |
|
|
|
|
Mat data; |
|
|
|
|
Mat responses; |
|
|
|
|
|
|
|
|
|
bool ok = read_num_class_data( data_filename, 16, &data, &responses ); |
|
|
|
|
if( !ok ) |
|
|
|
|
{ |
|
|
|
|
printf( "Could not read the database %s\n", data_filename ); |
|
|
|
|
return -1; |
|
|
|
|
} |
|
|
|
|
return ok; |
|
|
|
|
|
|
|
|
|
printf( "The database %s is loaded.\n", data_filename ); |
|
|
|
|
nsamples_all = data->rows; |
|
|
|
|
ntrain_samples = (int)(nsamples_all*0.8); |
|
|
|
|
int i, j; |
|
|
|
|
Ptr<ANN_MLP> model; |
|
|
|
|
|
|
|
|
|
int nsamples_all = data.rows; |
|
|
|
|
int ntrain_samples = (int)(nsamples_all*0.8); |
|
|
|
|
|
|
|
|
|
// Create or load MLP classifier
|
|
|
|
|
if( filename_to_load ) |
|
|
|
|
if( !filename_to_load.empty() ) |
|
|
|
|
{ |
|
|
|
|
// load classifier from the specified file
|
|
|
|
|
mlp.load( filename_to_load ); |
|
|
|
|
model = load_classifier<ANN_MLP>(filename_to_load); |
|
|
|
|
if( model.empty() ) |
|
|
|
|
return false; |
|
|
|
|
ntrain_samples = 0; |
|
|
|
|
if( !mlp.get_layer_count() ) |
|
|
|
|
{ |
|
|
|
|
printf( "Could not read the classifier %s\n", filename_to_load ); |
|
|
|
|
return -1; |
|
|
|
|
} |
|
|
|
|
printf( "The classifier %s is loaded.\n", filename_to_load ); |
|
|
|
|
} |
|
|
|
|
else |
|
|
|
|
{ |
|
|
|
@ -417,45 +359,44 @@ int build_mlp_classifier( char* data_filename, |
|
|
|
|
//
|
|
|
|
|
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
|
|
|
|
|
|
|
|
|
CvMat* new_responses = cvCreateMat( ntrain_samples, class_count, CV_32F ); |
|
|
|
|
Mat train_data = data.rowRange(0, ntrain_samples); |
|
|
|
|
Mat new_responses = Mat::zeros( ntrain_samples, class_count, CV_32F ); |
|
|
|
|
|
|
|
|
|
// 1. unroll the responses
|
|
|
|
|
printf( "Unrolling the responses...\n"); |
|
|
|
|
cout << "Unrolling the responses...\n"; |
|
|
|
|
for( i = 0; i < ntrain_samples; i++ ) |
|
|
|
|
{ |
|
|
|
|
int cls_label = cvRound(responses->data.fl[i]) - 'A'; |
|
|
|
|
float* bit_vec = (float*)(new_responses->data.ptr + i*new_responses->step); |
|
|
|
|
for( j = 0; j < class_count; j++ ) |
|
|
|
|
bit_vec[j] = 0.f; |
|
|
|
|
bit_vec[cls_label] = 1.f; |
|
|
|
|
int cls_label = responses.at<int>(i) - 'A' |
|
|
|
|
new_responses.at<float>(i, cls_label) = 1.f; |
|
|
|
|
} |
|
|
|
|
cvGetRows( data, &train_data, 0, ntrain_samples ); |
|
|
|
|
|
|
|
|
|
// 2. train classifier
|
|
|
|
|
int layer_sz[] = { data->cols, 100, 100, class_count }; |
|
|
|
|
CvMat layer_sizes = |
|
|
|
|
cvMat( 1, (int)(sizeof(layer_sz)/sizeof(layer_sz[0])), CV_32S, layer_sz ); |
|
|
|
|
mlp.create( &layer_sizes ); |
|
|
|
|
printf( "Training the classifier (may take a few minutes)...\n"); |
|
|
|
|
int layer_sz[] = { data.cols, 100, 100, class_count }; |
|
|
|
|
int nlayers = (int)(sizeof(layer_sz)/sizeof(layer_sz[0])); |
|
|
|
|
Mat layer_sizes( 1, nlayers, CV_32S, layer_sz ); |
|
|
|
|
|
|
|
|
|
#if 1 |
|
|
|
|
int method = CvANN_MLP_TrainParams::BACKPROP; |
|
|
|
|
int method = ANN_MLP::Params::BACKPROP; |
|
|
|
|
double method_param = 0.001; |
|
|
|
|
int max_iter = 300; |
|
|
|
|
#else |
|
|
|
|
int method = CvANN_MLP_TrainParams::RPROP; |
|
|
|
|
int method = ANN_MLP::Params::RPROP; |
|
|
|
|
double method_param = 0.1; |
|
|
|
|
int max_iter = 1000; |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
mlp.train( &train_data, new_responses, 0, 0, |
|
|
|
|
CvANN_MLP_TrainParams(cvTermCriteria(CV_TERMCRIT_ITER,max_iter,0.01), |
|
|
|
|
method, method_param)); |
|
|
|
|
ANN_MLP::Params(TC(max_iter,0), method, method_param)); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model = ANN_MLP::create() mlp.create( &layer_sizes ); |
|
|
|
|
printf( "Training the classifier (may take a few minutes)...\n"); |
|
|
|
|
|
|
|
|
|
cvReleaseMat( &new_responses ); |
|
|
|
|
printf("\n"); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
mlp_response = cvCreateMat( 1, class_count, CV_32F ); |
|
|
|
|
Mat mlp_response; |
|
|
|
|
|
|
|
|
|
// compute prediction error on train and test data
|
|
|
|
|
for( i = 0; i < nsamples_all; i++ ) |
|
|
|
@ -481,38 +422,26 @@ int build_mlp_classifier( char* data_filename, |
|
|
|
|
printf( "Recognition rate: train = %.1f%%, test = %.1f%%\n", |
|
|
|
|
train_hr*100., test_hr*100. ); |
|
|
|
|
|
|
|
|
|
// Save classifier to file if needed
|
|
|
|
|
if( filename_to_save ) |
|
|
|
|
mlp.save( filename_to_save ); |
|
|
|
|
if( !filename_to_save.empty() ) |
|
|
|
|
model->save( filename_to_save ); |
|
|
|
|
|
|
|
|
|
cvReleaseMat( &mlp_response ); |
|
|
|
|
cvReleaseMat( &data ); |
|
|
|
|
cvReleaseMat( &responses ); |
|
|
|
|
|
|
|
|
|
return 0; |
|
|
|
|
return true; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static |
|
|
|
|
int build_knearest_classifier( char* data_filename, int K ) |
|
|
|
|
static bool |
|
|
|
|
build_knearest_classifier( const string& data_filename, int K ) |
|
|
|
|
{ |
|
|
|
|
const int var_count = 16; |
|
|
|
|
CvMat* data = 0; |
|
|
|
|
Mat data; |
|
|
|
|
CvMat train_data; |
|
|
|
|
CvMat* responses; |
|
|
|
|
|
|
|
|
|
int ok = read_num_class_data( data_filename, 16, &data, &responses ); |
|
|
|
|
int nsamples_all = 0, ntrain_samples = 0; |
|
|
|
|
//int i, j;
|
|
|
|
|
//double /*train_hr = 0,*/ test_hr = 0;
|
|
|
|
|
CvANN_MLP mlp; |
|
|
|
|
Mat responses; |
|
|
|
|
|
|
|
|
|
bool ok = read_num_class_data( data_filename, 16, &data, &responses ); |
|
|
|
|
if( !ok ) |
|
|
|
|
{ |
|
|
|
|
printf( "Could not read the database %s\n", data_filename ); |
|
|
|
|
return -1; |
|
|
|
|
} |
|
|
|
|
return ok; |
|
|
|
|
|
|
|
|
|
int nsamples_all = 0, ntrain_samples = 0; |
|
|
|
|
|
|
|
|
|
printf( "The database %s is loaded.\n", data_filename ); |
|
|
|
|
nsamples_all = data->rows; |
|
|
|
|
ntrain_samples = (int)(nsamples_all*0.8); |
|
|
|
|
|
|
|
|
@ -521,12 +450,13 @@ int build_knearest_classifier( char* data_filename, int K ) |
|
|
|
|
cvGetRows( data, &train_data, 0, ntrain_samples ); |
|
|
|
|
|
|
|
|
|
// 2. train classifier
|
|
|
|
|
CvMat* train_resp = cvCreateMat( ntrain_samples, 1, CV_32FC1); |
|
|
|
|
Mat train_resp = cvCreateMat( ntrain_samples, 1, CV_32FC1); |
|
|
|
|
for (int i = 0; i < ntrain_samples; i++) |
|
|
|
|
train_resp->data.fl[i] = responses->data.fl[i]; |
|
|
|
|
CvKNearest knearest(&train_data, train_resp); |
|
|
|
|
Ptr<KNearest> model = KNearest::create(true); |
|
|
|
|
model->train(train_data, train_resp); |
|
|
|
|
|
|
|
|
|
CvMat* nearests = cvCreateMat( (nsamples_all - ntrain_samples), K, CV_32FC1); |
|
|
|
|
Mat nearests = cvCreateMat( (nsamples_all - ntrain_samples), K, CV_32FC1); |
|
|
|
|
float* _sample = new float[var_count * (nsamples_all - ntrain_samples)]; |
|
|
|
|
CvMat sample = cvMat( nsamples_all - ntrain_samples, 16, CV_32FC1, _sample ); |
|
|
|
|
float* true_results = new float[nsamples_all - ntrain_samples]; |
|
|
|
@ -569,27 +499,20 @@ int build_knearest_classifier( char* data_filename, int K ) |
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static |
|
|
|
|
int build_nbayes_classifier( char* data_filename ) |
|
|
|
|
static bool |
|
|
|
|
build_nbayes_classifier( const string& data_filename ) |
|
|
|
|
{ |
|
|
|
|
const int var_count = 16; |
|
|
|
|
CvMat* data = 0; |
|
|
|
|
Mat data; |
|
|
|
|
CvMat train_data; |
|
|
|
|
CvMat* responses; |
|
|
|
|
|
|
|
|
|
int ok = read_num_class_data( data_filename, 16, &data, &responses ); |
|
|
|
|
int nsamples_all = 0, ntrain_samples = 0; |
|
|
|
|
//int i, j;
|
|
|
|
|
//double /*train_hr = 0, */test_hr = 0;
|
|
|
|
|
CvANN_MLP mlp; |
|
|
|
|
Mat responses; |
|
|
|
|
|
|
|
|
|
bool ok = read_num_class_data( data_filename, 16, &data, &responses ); |
|
|
|
|
if( !ok ) |
|
|
|
|
{ |
|
|
|
|
printf( "Could not read the database %s\n", data_filename ); |
|
|
|
|
return -1; |
|
|
|
|
} |
|
|
|
|
return ok; |
|
|
|
|
|
|
|
|
|
int nsamples_all = 0, ntrain_samples = 0; |
|
|
|
|
|
|
|
|
|
printf( "The database %s is loaded.\n", data_filename ); |
|
|
|
|
nsamples_all = data->rows; |
|
|
|
|
ntrain_samples = (int)(nsamples_all*0.5); |
|
|
|
|
|
|
|
|
@ -598,7 +521,7 @@ int build_nbayes_classifier( char* data_filename ) |
|
|
|
|
cvGetRows( data, &train_data, 0, ntrain_samples ); |
|
|
|
|
|
|
|
|
|
// 2. train classifier
|
|
|
|
|
CvMat* train_resp = cvCreateMat( ntrain_samples, 1, CV_32FC1); |
|
|
|
|
Mat train_resp = cvCreateMat( ntrain_samples, 1, CV_32FC1); |
|
|
|
|
for (int i = 0; i < ntrain_samples; i++) |
|
|
|
|
train_resp->data.fl[i] = responses->data.fl[i]; |
|
|
|
|
CvNormalBayesClassifier nbayes(&train_data, train_resp); |
|
|
|
@ -638,23 +561,23 @@ int build_nbayes_classifier( char* data_filename ) |
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static |
|
|
|
|
int build_svm_classifier( char* data_filename, const char* filename_to_save, const char* filename_to_load ) |
|
|
|
|
static bool |
|
|
|
|
build_svm_classifier( const string& data_filename, |
|
|
|
|
const string& filename_to_save, |
|
|
|
|
const string& filename_to_load ) |
|
|
|
|
{ |
|
|
|
|
CvMat* data = 0; |
|
|
|
|
CvMat* responses = 0; |
|
|
|
|
CvMat* train_resp = 0; |
|
|
|
|
Mat data; |
|
|
|
|
Mat responses; |
|
|
|
|
Mat train_resp; |
|
|
|
|
CvMat train_data; |
|
|
|
|
int nsamples_all = 0, ntrain_samples = 0; |
|
|
|
|
int var_count; |
|
|
|
|
CvSVM svm; |
|
|
|
|
Ptr<SVM> model; |
|
|
|
|
|
|
|
|
|
int ok = read_num_class_data( data_filename, 16, &data, &responses ); |
|
|
|
|
bool ok = read_num_class_data( data_filename, 16, &data, &responses ); |
|
|
|
|
if( !ok ) |
|
|
|
|
{ |
|
|
|
|
printf( "Could not read the database %s\n", data_filename ); |
|
|
|
|
return -1; |
|
|
|
|
} |
|
|
|
|
return ok; |
|
|
|
|
|
|
|
|
|
////////// SVM parameters ///////////////////////////////
|
|
|
|
|
CvSVMParams param; |
|
|
|
|
param.kernel_type=CvSVM::LINEAR; |
|
|
|
@ -722,15 +645,10 @@ int build_svm_classifier( char* data_filename, const char* filename_to_save, con |
|
|
|
|
|
|
|
|
|
printf("true_resp = %f%%\n", (float)true_resp / (nsamples_all - ntrain_samples) * 100); |
|
|
|
|
|
|
|
|
|
if( filename_to_save ) |
|
|
|
|
svm.save( filename_to_save ); |
|
|
|
|
if( !filename_to_save.empty() ) |
|
|
|
|
model->save( filename_to_save ); |
|
|
|
|
|
|
|
|
|
cvReleaseMat( &train_resp ); |
|
|
|
|
cvReleaseMat( &result ); |
|
|
|
|
cvReleaseMat( &data ); |
|
|
|
|
cvReleaseMat( &responses ); |
|
|
|
|
|
|
|
|
|
return 0; |
|
|
|
|
return true; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
int main( int argc, char *argv[] ) |
|
|
|
@ -768,17 +686,17 @@ int main( int argc, char *argv[] ) |
|
|
|
|
method = 2; |
|
|
|
|
} |
|
|
|
|
else if ( strcmp(argv[i], "-knearest") == 0) |
|
|
|
|
{ |
|
|
|
|
method = 3; |
|
|
|
|
} |
|
|
|
|
else if ( strcmp(argv[i], "-nbayes") == 0) |
|
|
|
|
{ |
|
|
|
|
method = 4; |
|
|
|
|
} |
|
|
|
|
else if ( strcmp(argv[i], "-svm") == 0) |
|
|
|
|
{ |
|
|
|
|
method = 5; |
|
|
|
|
} |
|
|
|
|
{ |
|
|
|
|
method = 3; |
|
|
|
|
} |
|
|
|
|
else if ( strcmp(argv[i], "-nbayes") == 0) |
|
|
|
|
{ |
|
|
|
|
method = 4; |
|
|
|
|
} |
|
|
|
|
else if ( strcmp(argv[i], "-svm") == 0) |
|
|
|
|
{ |
|
|
|
|
method = 5; |
|
|
|
|
} |
|
|
|
|
else |
|
|
|
|
break; |
|
|
|
|
} |
|
|
|
|