made FLANN (and dependent on it code) build fine on Windows. Most of the changes are added CV_EXPORTS' into the class declarations and extern variables are turned into external functions (as a nice side effect the variables are now read-only)

pull/13383/head
Vadim Pisarevsky 15 years ago
parent d611fb61fc
commit f678c8f07b
  1. 2
      modules/flann/include/opencv2/flann/allocator.h
  2. 54
      modules/flann/include/opencv2/flann/autotuned_index.h
  3. 16
      modules/flann/include/opencv2/flann/composite_index.h
  4. 13
      modules/flann/include/opencv2/flann/dist.h
  5. 2
      modules/flann/include/opencv2/flann/flann.hpp
  6. 18
      modules/flann/include/opencv2/flann/flann_base.hpp
  7. 24
      modules/flann/include/opencv2/flann/general.h
  8. 34
      modules/flann/include/opencv2/flann/index_testing.h
  9. 6
      modules/flann/include/opencv2/flann/kdtree_index.h
  10. 22
      modules/flann/include/opencv2/flann/kmeans_index.h
  11. 4
      modules/flann/include/opencv2/flann/linear_index.h
  12. 4
      modules/flann/include/opencv2/flann/logger.h
  13. 1
      modules/flann/include/opencv2/flann/object_factory.h
  14. 8
      modules/flann/include/opencv2/flann/random.h
  15. 2
      modules/flann/include/opencv2/flann/result_set.h
  16. 14
      modules/flann/include/opencv2/flann/saving.h
  17. 10
      modules/flann/include/opencv2/flann/simplex_downhill.h
  18. 2
      modules/flann/include/opencv2/flann/timer.h
  19. 45
      modules/flann/src/flann.cpp
  20. 1
      modules/flann/src/precomp.cpp
  21. 18
      modules/flann/src/precomp.hpp
  22. 4
      modules/objdetect/src/_lsvm_error.h
  23. 2
      samples/c/find_obj.cpp

@ -70,7 +70,7 @@ T* allocate(size_t count = 1)
const size_t WORDSIZE=16; const size_t WORDSIZE=16;
const size_t BLOCKSIZE=8192; const size_t BLOCKSIZE=8192;
class PooledAllocator class CV_EXPORTS PooledAllocator
{ {
/* We maintain memory alignment to word boundaries by requiring that all /* We maintain memory alignment to word boundaries by requiring that all
allocations be in multiples of the machine wordsize. */ allocations be in multiples of the machine wordsize. */

@ -59,11 +59,11 @@ struct AutotunedIndexParams : public IndexParams {
void print() const void print() const
{ {
logger.info("Index type: %d\n",(int)algorithm); logger().info("Index type: %d\n",(int)algorithm);
logger.info("Target precision: %g\n", target_precision); logger().info("logger(). precision: %g\n", target_precision);
logger.info("Build weight: %g\n", build_weight); logger().info("Build weight: %g\n", build_weight);
logger.info("Memory weight: %g\n", memory_weight); logger().info("Memory weight: %g\n", memory_weight);
logger.info("Sample fraction: %g\n", sample_fraction); logger().info("Sample fraction: %g\n", sample_fraction);
} }
}; };
@ -117,10 +117,10 @@ public:
virtual void buildIndex() virtual void buildIndex()
{ {
bestParams = estimateBuildParams(); bestParams = estimateBuildParams();
logger.info("----------------------------------------------------\n"); logger().info("----------------------------------------------------\n");
logger.info("Autotuned parameters:\n"); logger().info("Autotuned parameters:\n");
bestParams->print(); bestParams->print();
logger.info("----------------------------------------------------\n"); logger().info("----------------------------------------------------\n");
flann_algorithm_t index_type = bestParams->getIndexType(); flann_algorithm_t index_type = bestParams->getIndexType();
switch (index_type) { switch (index_type) {
case LINEAR: case LINEAR:
@ -234,7 +234,7 @@ private:
int checks; int checks;
const int nn = 1; const int nn = 1;
logger.info("KMeansTree using params: max_iterations=%d, branching=%d\n", kmeans_params.iterations, kmeans_params.branching); logger().info("KMeansTree using params: max_iterations=%d, branching=%d\n", kmeans_params.iterations, kmeans_params.branching);
KMeansIndex<ELEM_TYPE> kmeans(sampledDataset, kmeans_params); KMeansIndex<ELEM_TYPE> kmeans(sampledDataset, kmeans_params);
// measure index build time // measure index build time
t.start(); t.start();
@ -250,7 +250,7 @@ private:
cost.searchTimeCost = searchTime; cost.searchTimeCost = searchTime;
cost.buildTimeCost = buildTime; cost.buildTimeCost = buildTime;
cost.timeCost = (buildTime*index_params.build_weight+searchTime); cost.timeCost = (buildTime*index_params.build_weight+searchTime);
logger.info("KMeansTree buildTime=%g, searchTime=%g, timeCost=%g, buildTimeFactor=%g\n",buildTime, searchTime, cost.timeCost, index_params.build_weight); logger().info("KMeansTree buildTime=%g, searchTime=%g, timeCost=%g, buildTimeFactor=%g\n",buildTime, searchTime, cost.timeCost, index_params.build_weight);
} }
@ -260,7 +260,7 @@ private:
int checks; int checks;
const int nn = 1; const int nn = 1;
logger.info("KDTree using params: trees=%d\n",kdtree_params.trees); logger().info("KDTree using params: trees=%d\n",kdtree_params.trees);
KDTreeIndex<ELEM_TYPE> kdtree(sampledDataset, kdtree_params); KDTreeIndex<ELEM_TYPE> kdtree(sampledDataset, kdtree_params);
t.start(); t.start();
@ -276,7 +276,7 @@ private:
cost.searchTimeCost = searchTime; cost.searchTimeCost = searchTime;
cost.buildTimeCost = buildTime; cost.buildTimeCost = buildTime;
cost.timeCost = (buildTime*index_params.build_weight+searchTime); cost.timeCost = (buildTime*index_params.build_weight+searchTime);
logger.info("KDTree buildTime=%g, searchTime=%g, timeCost=%g\n",buildTime, searchTime, cost.timeCost); logger().info("KDTree buildTime=%g, searchTime=%g, timeCost=%g\n",buildTime, searchTime, cost.timeCost);
} }
@ -330,7 +330,7 @@ private:
KMeansCostData optimizeKMeans() KMeansCostData optimizeKMeans()
{ {
logger.info("KMEANS, Step 1: Exploring parameter space\n"); logger().info("KMEANS, Step 1: Exploring parameter space\n");
// explore kmeans parameters space using combinations of the parameters below // explore kmeans parameters space using combinations of the parameters below
int maxIterations[] = { 1, 5, 10, 15 }; int maxIterations[] = { 1, 5, 10, 15 };
@ -363,7 +363,7 @@ private:
} }
} }
// logger.info("KMEANS, Step 2: simplex-downhill optimization\n"); // logger().info("KMEANS, Step 2: simplex-downhill optimization\n");
// //
// const int n = 2; // const int n = 2;
// // choose initial simplex points as the best parameters so far // // choose initial simplex points as the best parameters so far
@ -397,7 +397,7 @@ private:
} }
// display the costs obtained // display the costs obtained
for (int i=0;i<kmeansParamSpaceSize;++i) { for (int i=0;i<kmeansParamSpaceSize;++i) {
logger.info("KMeans, branching=%d, iterations=%d, time_cost=%g[%g] (build=%g, search=%g), memory_cost=%g, cost=%g\n", logger().info("KMeans, branching=%d, iterations=%d, time_cost=%g[%g] (build=%g, search=%g), memory_cost=%g, cost=%g\n",
kmeansCosts[i].second.branching, kmeansCosts[i].second.iterations, kmeansCosts[i].second.branching, kmeansCosts[i].second.iterations,
kmeansCosts[i].first.timeCost,kmeansCosts[i].first.timeCost/optTimeCost, kmeansCosts[i].first.timeCost,kmeansCosts[i].first.timeCost/optTimeCost,
kmeansCosts[i].first.buildTimeCost, kmeansCosts[i].first.searchTimeCost, kmeansCosts[i].first.buildTimeCost, kmeansCosts[i].first.searchTimeCost,
@ -411,7 +411,7 @@ private:
KDTreeCostData optimizeKDTree() KDTreeCostData optimizeKDTree()
{ {
logger.info("KD-TREE, Step 1: Exploring parameter space\n"); logger().info("KD-TREE, Step 1: Exploring parameter space\n");
// explore kd-tree parameters space using the parameters below // explore kd-tree parameters space using the parameters below
int testTrees[] = { 1, 4, 8, 16, 32 }; int testTrees[] = { 1, 4, 8, 16, 32 };
@ -435,7 +435,7 @@ private:
++cnt; ++cnt;
} }
// logger.info("KD-TREE, Step 2: simplex-downhill optimization\n"); // logger().info("KD-TREE, Step 2: simplex-downhill optimization\n");
// //
// const int n = 1; // const int n = 1;
// // choose initial simplex points as the best parameters so far // // choose initial simplex points as the best parameters so far
@ -467,7 +467,7 @@ private:
} }
// display costs obtained // display costs obtained
for (size_t i=0;i<kdtreeParamSpaceSize;++i) { for (size_t i=0;i<kdtreeParamSpaceSize;++i) {
logger.info("kd-tree, trees=%d, time_cost=%g[%g] (build=%g, search=%g), memory_cost=%g, cost=%g\n", logger().info("kd-tree, trees=%d, time_cost=%g[%g] (build=%g, search=%g), memory_cost=%g, cost=%g\n",
kdtreeCosts[i].second.trees,kdtreeCosts[i].first.timeCost,kdtreeCosts[i].first.timeCost/optTimeCost, kdtreeCosts[i].second.trees,kdtreeCosts[i].first.timeCost,kdtreeCosts[i].first.timeCost/optTimeCost,
kdtreeCosts[i].first.buildTimeCost, kdtreeCosts[i].first.searchTimeCost, kdtreeCosts[i].first.buildTimeCost, kdtreeCosts[i].first.searchTimeCost,
kdtreeCosts[i].first.memoryCost,kdtreeCosts[i].first.totalCost); kdtreeCosts[i].first.memoryCost,kdtreeCosts[i].first.totalCost);
@ -486,12 +486,12 @@ private:
int sampleSize = int(index_params.sample_fraction*dataset.rows); int sampleSize = int(index_params.sample_fraction*dataset.rows);
int testSampleSize = min(sampleSize/10, 1000); int testSampleSize = min(sampleSize/10, 1000);
logger.info("Entering autotuning, dataset size: %d, sampleSize: %d, testSampleSize: %d\n",dataset.rows, sampleSize, testSampleSize); logger().info("Entering autotuning, dataset size: %d, sampleSize: %d, testSampleSize: %d\n",dataset.rows, sampleSize, testSampleSize);
// For a very small dataset, it makes no sense to build any fancy index, just // For a very small dataset, it makes no sense to build any fancy index, just
// use linear search // use linear search
if (testSampleSize<10) { if (testSampleSize<10) {
logger.info("Choosing linear, dataset too small\n"); logger().info("Choosing linear, dataset too small\n");
return new LinearIndexParams(); return new LinearIndexParams();
} }
@ -501,7 +501,7 @@ private:
testDataset = random_sample(sampledDataset,testSampleSize,true); testDataset = random_sample(sampledDataset,testSampleSize,true);
// We compute the ground truth using linear search // We compute the ground truth using linear search
logger.info("Computing ground truth... \n"); logger().info("Computing ground truth... \n");
gt_matches = Matrix<int>(new int[testDataset.rows],testDataset.rows, 1); gt_matches = Matrix<int>(new int[testDataset.rows],testDataset.rows, 1);
StartStopTimer t; StartStopTimer t;
t.start(); t.start();
@ -511,7 +511,7 @@ private:
IndexParams* bestParams = new LinearIndexParams(); IndexParams* bestParams = new LinearIndexParams();
// Start parameter autotune process // Start parameter autotune process
logger.info("Autotuning parameters...\n"); logger().info("Autotuning parameters...\n");
KMeansCostData kmeansCost = optimizeKMeans(); KMeansCostData kmeansCost = optimizeKMeans();
@ -554,7 +554,7 @@ private:
if (samples>0) { if (samples>0) {
Matrix<ELEM_TYPE> testDataset = random_sample(dataset,samples); Matrix<ELEM_TYPE> testDataset = random_sample(dataset,samples);
logger.info("Computing ground truth\n"); logger().info("Computing ground truth\n");
// we need to compute the ground truth first // we need to compute the ground truth first
Matrix<int> gt_matches(new int[testDataset.rows],testDataset.rows,1); Matrix<int> gt_matches(new int[testDataset.rows],testDataset.rows,1);
@ -565,12 +565,12 @@ private:
float linear = t.value; float linear = t.value;
int checks; int checks;
logger.info("Estimating number of checks\n"); logger().info("Estimating number of checks\n");
float searchTime; float searchTime;
float cb_index; float cb_index;
if (bestIndex->getType() == KMEANS) { if (bestIndex->getType() == KMEANS) {
logger.info("KMeans algorithm, estimating cluster border factor\n"); logger().info("KMeans algorithm, estimating cluster border factor\n");
KMeansIndex<ELEM_TYPE>* kmeans = (KMeansIndex<ELEM_TYPE>*)bestIndex; KMeansIndex<ELEM_TYPE>* kmeans = (KMeansIndex<ELEM_TYPE>*)bestIndex;
float bestSearchTime = -1; float bestSearchTime = -1;
float best_cb_index = -1; float best_cb_index = -1;
@ -589,14 +589,14 @@ private:
checks = best_checks; checks = best_checks;
kmeans->set_cb_index(best_cb_index); kmeans->set_cb_index(best_cb_index);
logger.info("Optimum cb_index: %g\n",cb_index); logger().info("Optimum cb_index: %g\n",cb_index);
((KMeansIndexParams*)bestParams)->cb_index = cb_index; ((KMeansIndexParams*)bestParams)->cb_index = cb_index;
} }
else { else {
searchTime = test_index_precision(*bestIndex, dataset, testDataset, gt_matches, index_params.target_precision, checks, nn, 1); searchTime = test_index_precision(*bestIndex, dataset, testDataset, gt_matches, index_params.target_precision, checks, nn, 1);
} }
logger.info("Required number of checks: %d \n",checks);; logger().info("Required number of checks: %d \n",checks);;
searchParams.checks = checks; searchParams.checks = checks;
speedup = linear/searchTime; speedup = linear/searchTime;

@ -58,12 +58,12 @@ struct CompositeIndexParams : public IndexParams {
void print() const void print() const
{ {
logger.info("Index type: %d\n",(int)algorithm); logger().info("Index type: %d\n",(int)algorithm);
logger.info("Trees: %d\n", trees); logger().info("Trees: %d\n", trees);
logger.info("Branching: %d\n", branching); logger().info("Branching: %d\n", branching);
logger.info("Iterations: %d\n", iterations); logger().info("Iterations: %d\n", iterations);
logger.info("Centres initialisation: %d\n", centers_init); logger().info("Centres initialisation: %d\n", centers_init);
logger.info("Cluster boundary weight: %g\n", cb_index); logger().info("Cluster boundary weight: %g\n", cb_index);
} }
}; };
@ -124,9 +124,9 @@ public:
void buildIndex() void buildIndex()
{ {
logger.info("Building kmeans tree...\n"); logger().info("Building kmeans tree...\n");
kmeans->buildIndex(); kmeans->buildIndex();
logger.info("Building kdtree tree...\n"); logger().info("Building kdtree tree...\n");
kdtree->buildIndex(); kdtree->buildIndex();
} }

@ -82,7 +82,7 @@ double euclidean_dist(Iterator1 first1, Iterator1 last1, Iterator2 first2, doubl
return distsq; return distsq;
} }
double euclidean_dist(const unsigned char* first1, const unsigned char* last1, unsigned char* first2, double acc); CV_EXPORTS double euclidean_dist(const unsigned char* first1, const unsigned char* last1, unsigned char* first2, double acc);
/** /**
@ -117,7 +117,7 @@ double manhattan_dist(Iterator1 first1, Iterator1 last1, Iterator2 first2, doubl
} }
extern int flann_minkowski_order; CV_EXPORTS int flann_minkowski_order();
/** /**
* Compute the Minkowski (L_p) distance between two vectors. * Compute the Minkowski (L_p) distance between two vectors.
* *
@ -134,7 +134,7 @@ double minkowski_dist(Iterator1 first1, Iterator1 last1, Iterator2 first2, doubl
double diff0, diff1, diff2, diff3; double diff0, diff1, diff2, diff3;
Iterator1 lastgroup = last1 - 3; Iterator1 lastgroup = last1 - 3;
int p = flann_minkowski_order; int p = flann_minkowski_order();
/* Process 4 items with each loop for efficiency. */ /* Process 4 items with each loop for efficiency. */
while (first1 < lastgroup) { while (first1 < lastgroup) {
@ -293,7 +293,7 @@ double kl_divergence(Iterator1 first1, Iterator1 last1, Iterator2 first2, double
extern flann_distance_t flann_distance_type; CV_EXPORTS flann_distance_t flann_distance_type();
/** /**
* Custom distance function. The distance computed is dependent on the value * Custom distance function. The distance computed is dependent on the value
* of the 'flann_distance_type' global variable. * of the 'flann_distance_type' global variable.
@ -304,7 +304,7 @@ extern flann_distance_t flann_distance_type;
template <typename Iterator1, typename Iterator2> template <typename Iterator1, typename Iterator2>
double custom_dist(Iterator1 first1, Iterator1 last1, Iterator2 first2, double acc = 0) double custom_dist(Iterator1 first1, Iterator1 last1, Iterator2 first2, double acc = 0)
{ {
switch (flann_distance_type) { switch (flann_distance_type()) {
case EUCLIDEAN: case EUCLIDEAN:
return euclidean_dist(first1, last1, first2, acc); return euclidean_dist(first1, last1, first2, acc);
case MANHATTAN: case MANHATTAN:
@ -353,7 +353,8 @@ struct ZeroIterator {
} }
}; };
extern ZeroIterator<float> zero;
CV_EXPORTS ZeroIterator<float>& zero();
} // namespace cvflann } // namespace cvflann

@ -74,7 +74,7 @@ using ::cvflann::SearchParams;
template <typename T> template <typename T>
class Index_ { class CV_EXPORTS Index_ {
::cvflann::Index<T>* nnIndex; ::cvflann::Index<T>* nnIndex;
public: public:

@ -55,7 +55,7 @@ Sets the log level used for all flann functions
Params: Params:
level = verbosity level level = verbosity level
*/ */
void log_verbosity(int level); CV_EXPORTS void log_verbosity(int level);
/** /**
@ -63,10 +63,10 @@ void log_verbosity(int level);
* If distance type specified is MINKOWSKI, the second argument * If distance type specified is MINKOWSKI, the second argument
* specifies which order the minkowski distance should have. * specifies which order the minkowski distance should have.
*/ */
void set_distance_type(flann_distance_t distance_type, int order); CV_EXPORTS void set_distance_type(flann_distance_t distance_type, int order);
struct SavedIndexParams : public IndexParams { struct CV_EXPORTS SavedIndexParams : public IndexParams {
SavedIndexParams(std::string filename_) : IndexParams(SAVED), filename(filename_) {} SavedIndexParams(std::string filename_) : IndexParams(SAVED), filename(filename_) {}
std::string filename; // filename of the stored index std::string filename; // filename of the stored index
@ -75,13 +75,13 @@ struct SavedIndexParams : public IndexParams {
void print() const void print() const
{ {
logger.info("Index type: %d\n",(int)algorithm); logger().info("Index type: %d\n",(int)algorithm);
logger.info("Filename: %s\n", filename.c_str()); logger().info("Filename: %s\n", filename.c_str());
} }
}; };
template<typename T> template<typename T>
class Index { class CV_EXPORTS Index {
NNIndex<T>* nnIndex; NNIndex<T>* nnIndex;
bool built; bool built;
@ -178,7 +178,7 @@ void Index<T>::knnSearch(const Matrix<T>& queries, Matrix<int>& indices, Matrix<
for (size_t i = 0; i < queries.rows; i++) { for (size_t i = 0; i < queries.rows; i++) {
T* target = queries[i]; T* target = queries[i];
resultSet.init(target, queries.cols); resultSet.init(target, (int)queries.cols);
nnIndex->findNeighbors(resultSet, target, searchParams); nnIndex->findNeighbors(resultSet, target, searchParams);
@ -202,7 +202,7 @@ int Index<T>::radiusSearch(const Matrix<T>& query, Matrix<int>& indices, Matrix<
assert(query.cols==nnIndex->veclen()); assert(query.cols==nnIndex->veclen());
RadiusResultSet<T> resultSet(radius); RadiusResultSet<T> resultSet(radius);
resultSet.init(query.data, query.cols); resultSet.init(query.data, (int)query.cols);
nnIndex->findNeighbors(resultSet,query.data,searchParams); nnIndex->findNeighbors(resultSet,query.data,searchParams);
// TODO: optimise here // TODO: optimise here
@ -217,7 +217,7 @@ int Index<T>::radiusSearch(const Matrix<T>& query, Matrix<int>& indices, Matrix<
dists[0][i] = distances[i]; dists[0][i] = distances[i];
} }
return count_nn; return (int)count_nn;
} }

@ -31,9 +31,16 @@
#ifndef CONSTANTS_H #ifndef CONSTANTS_H
#define CONSTANTS_H #define CONSTANTS_H
#define ARRAY_LEN(a) (sizeof(a)/sizeof(a[0])) #ifdef __cplusplus
#include <stdexcept>
#include <cassert>
#include "opencv2/flann/object_factory.h"
namespace cvflann {
#undef ARRAY_LEN
#define ARRAY_LEN(a) (sizeof(a)/sizeof(a[0]))
/* Nearest neighbour index algorithms */ /* Nearest neighbour index algorithms */
enum flann_algorithm_t { enum flann_algorithm_t {
@ -85,17 +92,6 @@ enum flann_datatype_t {
FLOAT64 = 9 FLOAT64 = 9
}; };
#ifdef __cplusplus
#include <stdexcept>
#include <cassert>
#include "opencv2/flann/object_factory.h"
namespace cvflann {
template <typename ELEM_TYPE> template <typename ELEM_TYPE>
struct DistType struct DistType
{ {
@ -123,7 +119,7 @@ class FLANNException : public std::runtime_error {
}; };
struct IndexParams { struct CV_EXPORTS IndexParams {
protected: protected:
IndexParams(flann_algorithm_t algorithm_) : algorithm(algorithm_) {}; IndexParams(flann_algorithm_t algorithm_) : algorithm(algorithm_) {};
@ -139,7 +135,7 @@ public:
typedef ObjectFactory<IndexParams, flann_algorithm_t> ParamsFactory; typedef ObjectFactory<IndexParams, flann_algorithm_t> ParamsFactory;
struct SearchParams { struct CV_EXPORTS SearchParams {
SearchParams(int checks_ = 32) : SearchParams(int checks_ = 32) :
checks(checks_) {}; checks(checks_) {};

@ -46,7 +46,7 @@ using namespace std;
namespace cvflann namespace cvflann
{ {
int countCorrectMatches(int* neighbors, int* groundTruth, int n); CV_EXPORTS int countCorrectMatches(int* neighbors, int* groundTruth, int n);
template <typename ELEM_TYPE> template <typename ELEM_TYPE>
@ -73,7 +73,7 @@ template <typename ELEM_TYPE>
float search_with_ground_truth(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>& inputData, const Matrix<ELEM_TYPE>& testData, const Matrix<int>& matches, int nn, int checks, float& time, float& dist, int skipMatches) float search_with_ground_truth(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>& inputData, const Matrix<ELEM_TYPE>& testData, const Matrix<int>& matches, int nn, int checks, float& time, float& dist, int skipMatches)
{ {
if (matches.cols<size_t(nn)) { if (matches.cols<size_t(nn)) {
logger.info("matches.cols=%d, nn=%d\n",matches.cols,nn); logger().info("matches.cols=%d, nn=%d\n",matches.cols,nn);
throw FLANNException("Ground truth is not computed for as many neighbors as requested"); throw FLANNException("Ground truth is not computed for as many neighbors as requested");
} }
@ -109,7 +109,7 @@ float search_with_ground_truth(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE
dist = distR/(testData.rows*nn); dist = distR/(testData.rows*nn);
logger.info("%8d %10.4g %10.5g %10.5g %10.5g\n", logger().info("%8d %10.4g %10.5g %10.5g %10.5g\n",
checks, precicion, time, 1000.0 * time / testData.rows, dist); checks, precicion, time, 1000.0 * time / testData.rows, dist);
return precicion; return precicion;
@ -120,8 +120,8 @@ template <typename ELEM_TYPE>
float test_index_checks(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>& inputData, const Matrix<ELEM_TYPE>& testData, const Matrix<int>& matches, float test_index_checks(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>& inputData, const Matrix<ELEM_TYPE>& testData, const Matrix<int>& matches,
int checks, float& precision, int nn = 1, int skipMatches = 0) int checks, float& precision, int nn = 1, int skipMatches = 0)
{ {
logger.info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist\n"); logger().info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist\n");
logger.info("---------------------------------------------------------\n"); logger().info("---------------------------------------------------------\n");
float time = 0; float time = 0;
float dist = 0; float dist = 0;
@ -136,8 +136,8 @@ float test_index_precision(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>& i
{ {
const float SEARCH_EPS = 0.001; const float SEARCH_EPS = 0.001;
logger.info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist\n"); logger().info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist\n");
logger.info("---------------------------------------------------------\n"); logger().info("---------------------------------------------------------\n");
int c2 = 1; int c2 = 1;
float p2; float p2;
@ -149,7 +149,7 @@ float test_index_precision(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>& i
p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, skipMatches); p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, skipMatches);
if (p2>precision) { if (p2>precision) {
logger.info("Got as close as I can\n"); logger().info("Got as close as I can\n");
checks = c2; checks = c2;
return time; return time;
} }
@ -164,7 +164,7 @@ float test_index_precision(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>& i
int cx; int cx;
float realPrecision; float realPrecision;
if (fabs(p2-precision)>SEARCH_EPS) { if (fabs(p2-precision)>SEARCH_EPS) {
logger.info("Start linear estimation\n"); logger().info("Start linear estimation\n");
// after we got to values in the vecinity of the desired precision // after we got to values in the vecinity of the desired precision
// use linear approximation get a better estimation // use linear approximation get a better estimation
@ -180,7 +180,7 @@ float test_index_precision(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>& i
} }
cx = (c1+c2)/2; cx = (c1+c2)/2;
if (cx==c1) { if (cx==c1) {
logger.info("Got as close as I can\n"); logger().info("Got as close as I can\n");
break; break;
} }
realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, skipMatches); realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, skipMatches);
@ -190,7 +190,7 @@ float test_index_precision(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>& i
p2 = realPrecision; p2 = realPrecision;
} else { } else {
logger.info("No need for linear estimation\n"); logger().info("No need for linear estimation\n");
cx = c2; cx = c2;
realPrecision = p2; realPrecision = p2;
} }
@ -212,8 +212,8 @@ float test_index_precisions(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>&
int pindex = 0; int pindex = 0;
float precision = precisions[pindex]; float precision = precisions[pindex];
logger.info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist"); logger().info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist");
logger.info("---------------------------------------------------------"); logger().info("---------------------------------------------------------");
int c2 = 1; int c2 = 1;
float p2; float p2;
@ -234,7 +234,7 @@ float test_index_precisions(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>&
} }
if (pindex==precisions_length) { if (pindex==precisions_length) {
logger.info("Got as close as I can\n"); logger().info("Got as close as I can\n");
return time; return time;
} }
@ -252,7 +252,7 @@ float test_index_precisions(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>&
int cx; int cx;
float realPrecision; float realPrecision;
if (fabs(p2-precision)>SEARCH_EPS) { if (fabs(p2-precision)>SEARCH_EPS) {
logger.info("Start linear estimation\n"); logger().info("Start linear estimation\n");
// after we got to values in the vecinity of the desired precision // after we got to values in the vecinity of the desired precision
// use linear approximation get a better estimation // use linear approximation get a better estimation
@ -268,7 +268,7 @@ float test_index_precisions(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>&
} }
cx = (c1+c2)/2; cx = (c1+c2)/2;
if (cx==c1) { if (cx==c1) {
logger.info("Got as close as I can\n"); logger().info("Got as close as I can\n");
break; break;
} }
realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, skipMatches); realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, skipMatches);
@ -278,7 +278,7 @@ float test_index_precisions(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>&
p2 = realPrecision; p2 = realPrecision;
} else { } else {
logger.info("No need for linear estimation\n"); logger().info("No need for linear estimation\n");
cx = c2; cx = c2;
realPrecision = p2; realPrecision = p2;
} }

@ -51,7 +51,7 @@ using namespace std;
namespace cvflann namespace cvflann
{ {
struct KDTreeIndexParams : public IndexParams { struct CV_EXPORTS KDTreeIndexParams : public IndexParams {
KDTreeIndexParams(int trees_ = 4) : IndexParams(KDTREE), trees(trees_) {}; KDTreeIndexParams(int trees_ = 4) : IndexParams(KDTREE), trees(trees_) {};
int trees; // number of randomized trees to use (for kdtree) int trees; // number of randomized trees to use (for kdtree)
@ -60,8 +60,8 @@ struct KDTreeIndexParams : public IndexParams {
void print() const void print() const
{ {
logger.info("Index type: %d\n",(int)algorithm); logger().info("Index type: %d\n",(int)algorithm);
logger.info("Trees: %d\n", trees); logger().info("Trees: %d\n", trees);
} }
}; };

@ -52,7 +52,7 @@ namespace cvflann
{ {
struct KMeansIndexParams : public IndexParams { struct CV_EXPORTS KMeansIndexParams : public IndexParams {
KMeansIndexParams(int branching_ = 32, int iterations_ = 11, KMeansIndexParams(int branching_ = 32, int iterations_ = 11,
flann_centers_init_t centers_init_ = CENTERS_RANDOM, float cb_index_ = 0.2 ) : flann_centers_init_t centers_init_ = CENTERS_RANDOM, float cb_index_ = 0.2 ) :
IndexParams(KMEANS), IndexParams(KMEANS),
@ -70,11 +70,11 @@ struct KMeansIndexParams : public IndexParams {
void print() const void print() const
{ {
logger.info("Index type: %d\n",(int)algorithm); logger().info("Index type: %d\n",(int)algorithm);
logger.info("Branching: %d\n", branching); logger().info("Branching: %d\n", branching);
logger.info("Iterations: %d\n", iterations); logger().info("Iterations: %d\n", iterations);
logger.info("Centres initialisation: %d\n", centers_init); logger().info("Centres initialisation: %d\n", centers_init);
logger.info("Cluster boundary weight: %g\n", cb_index); logger().info("Cluster boundary weight: %g\n", cb_index);
} }
}; };
@ -578,7 +578,7 @@ public:
int clusterCount = getMinVarianceClusters(root, clusters, numClusters, variance); int clusterCount = getMinVarianceClusters(root, clusters, numClusters, variance);
// logger.info("Clusters requested: %d, returning %d\n",numClusters, clusterCount); // logger().info("Clusters requested: %d, returning %d\n",numClusters, clusterCount);
for (int i=0;i<clusterCount;++i) { for (int i=0;i<clusterCount;++i) {
@ -671,13 +671,13 @@ private:
for (size_t j=0;j<veclen_;++j) { for (size_t j=0;j<veclen_;++j) {
mean[j] += vec[j]; mean[j] += vec[j];
} }
variance += flann_dist(vec,vec+veclen_,zero); variance += flann_dist(vec,vec+veclen_,zero());
} }
for (size_t j=0;j<veclen_;++j) { for (size_t j=0;j<veclen_;++j) {
mean[j] /= size_; mean[j] /= size_;
} }
variance /= size_; variance /= size_;
variance -= flann_dist(mean,mean+veclen_,zero); variance -= flann_dist(mean,mean+veclen_,zero());
DIST_TYPE tmp = 0; DIST_TYPE tmp = 0;
for (int i=0;i<indices_length;++i) { for (int i=0;i<indices_length;++i) {
@ -856,7 +856,7 @@ private:
float mean_radius =0; float mean_radius =0;
for (int i=0;i<indices_length;++i) { for (int i=0;i<indices_length;++i) {
if (belongs_to[i]==c) { if (belongs_to[i]==c) {
float d = flann_dist(dataset[indices[i]],dataset[indices[i]]+veclen_,zero); float d = flann_dist(dataset[indices[i]],dataset[indices[i]]+veclen_,zero());
variance += d; variance += d;
mean_radius += sqrt(d); mean_radius += sqrt(d);
swap(indices[i],indices[end]); swap(indices[i],indices[end]);
@ -866,7 +866,7 @@ private:
} }
variance /= s; variance /= s;
mean_radius /= s; mean_radius /= s;
variance -= flann_dist(centers[c],centers[c]+veclen_,zero); variance -= flann_dist(centers[c],centers[c]+veclen_,zero());
node->childs[c] = pool.allocate<KMeansNodeSt>(); node->childs[c] = pool.allocate<KMeansNodeSt>();
node->childs[c]->radius = radiuses[c]; node->childs[c]->radius = radiuses[c];

@ -38,14 +38,14 @@
namespace cvflann namespace cvflann
{ {
struct LinearIndexParams : public IndexParams { struct CV_EXPORTS LinearIndexParams : public IndexParams {
LinearIndexParams() : IndexParams(LINEAR) {}; LinearIndexParams() : IndexParams(LINEAR) {};
flann_algorithm_t getIndexType() const { return algorithm; } flann_algorithm_t getIndexType() const { return algorithm; }
void print() const void print() const
{ {
logger.info("Index type: %d\n",(int)algorithm); logger().info("Index type: %d\n",(int)algorithm);
} }
}; };

@ -40,7 +40,7 @@ using namespace std;
namespace cvflann namespace cvflann
{ {
class Logger class CV_EXPORTS Logger
{ {
FILE* stream; FILE* stream;
int logLevel; int logLevel;
@ -84,7 +84,7 @@ public:
int info(const char* fmt, ...); int info(const char* fmt, ...);
}; };
extern Logger logger; CV_EXPORTS Logger& logger();
} // namespace cvflann } // namespace cvflann

@ -31,6 +31,7 @@
#ifndef OBJECT_FACTORY_H_ #ifndef OBJECT_FACTORY_H_
#define OBJECT_FACTORY_H_ #define OBJECT_FACTORY_H_
#include "opencv2/core/types_c.h"
#include <map> #include <map>
namespace cvflann namespace cvflann

@ -43,17 +43,17 @@ namespace cvflann
/** /**
* Seeds the random number generator * Seeds the random number generator
*/ */
void seed_random(unsigned int seed); CV_EXPORTS void seed_random(unsigned int seed);
/* /*
* Generates a random double value. * Generates a random double value.
*/ */
double rand_double(double high = 1.0, double low=0); CV_EXPORTS double rand_double(double high = 1.0, double low=0);
/* /*
* Generates a random integer value. * Generates a random integer value.
*/ */
int rand_int(int high = RAND_MAX, int low = 0); CV_EXPORTS int rand_int(int high = RAND_MAX, int low = 0);
/** /**
@ -63,7 +63,7 @@ int rand_int(int high = RAND_MAX, int low = 0);
* TODO: improve on this to use a generator function instead of an * TODO: improve on this to use a generator function instead of an
* array of randomly permuted numbers * array of randomly permuted numbers
*/ */
class UniqueRandom class CV_EXPORTS UniqueRandom
{ {
int* vals; int* vals;
int size; int size;

@ -161,7 +161,7 @@ public:
for (int i=0;i<count;++i) { for (int i=0;i<count;++i) {
if (indices[i]==index) return false; if (indices[i]==index) return false;
} }
float dist = flann_dist(target, target_end, point); float dist = (float)flann_dist(target, target_end, point);
if (count<capacity) { if (count<capacity) {
indices[count] = index; indices[count] = index;

@ -47,13 +47,13 @@ template<> struct Datatype<float> { static flann_datatype_t type() { return FLOA
template<> struct Datatype<double> { static flann_datatype_t type() { return FLOAT64; } }; template<> struct Datatype<double> { static flann_datatype_t type() { return FLOAT64; } };
extern const char FLANN_SIGNATURE[]; CV_EXPORTS const char* FLANN_SIGNATURE();
extern const char FLANN_VERSION[]; CV_EXPORTS const char* FLANN_VERSION();
/** /**
* Structure representing the index header. * Structure representing the index header.
*/ */
struct IndexHeader struct CV_EXPORTS IndexHeader
{ {
char signature[16]; char signature[16];
char version[16]; char version[16];
@ -74,12 +74,12 @@ void save_header(FILE* stream, const NNIndex<ELEM_TYPE>& index)
{ {
IndexHeader header; IndexHeader header;
memset(header.signature, 0 , sizeof(header.signature)); memset(header.signature, 0 , sizeof(header.signature));
strcpy(header.signature, FLANN_SIGNATURE); strcpy(header.signature, FLANN_SIGNATURE());
memset(header.version, 0 , sizeof(header.version)); memset(header.version, 0 , sizeof(header.version));
strcpy(header.version, FLANN_VERSION); strcpy(header.version, FLANN_VERSION());
header.data_type = Datatype<ELEM_TYPE>::type(); header.data_type = Datatype<ELEM_TYPE>::type();
header.index_type = index.getType(); header.index_type = index.getType();
header.rows = index.size(); header.rows = (int)index.size();
header.cols = index.veclen(); header.cols = index.veclen();
std::fwrite(&header, sizeof(header),1,stream); std::fwrite(&header, sizeof(header),1,stream);
@ -91,7 +91,7 @@ void save_header(FILE* stream, const NNIndex<ELEM_TYPE>& index)
* @param stream - Stream to load from * @param stream - Stream to load from
* @return Index header * @return Index header
*/ */
IndexHeader load_header(FILE* stream); CV_EXPORTS IndexHeader load_header(FILE* stream);
template<typename T> template<typename T>

@ -123,7 +123,7 @@ float optimizeSimplexDownhill(T* points, int n, F func, float* vals = NULL )
if (val_r>=vals[0] && val_r<vals[n]) { if (val_r>=vals[0] && val_r<vals[n]) {
// reflection between second highest and lowest // reflection between second highest and lowest
// add it to the simplex // add it to the simplex
logger.info("Choosing reflection\n"); logger().info("Choosing reflection\n");
addValue(n, val_r,vals, p_r, points, n); addValue(n, val_r,vals, p_r, points, n);
continue; continue;
} }
@ -138,11 +138,11 @@ float optimizeSimplexDownhill(T* points, int n, F func, float* vals = NULL )
float val_e = func(p_e); float val_e = func(p_e);
if (val_e<val_r) { if (val_e<val_r) {
logger.info("Choosing reflection and expansion\n"); logger().info("Choosing reflection and expansion\n");
addValue(n, val_e,vals,p_e,points,n); addValue(n, val_e,vals,p_e,points,n);
} }
else { else {
logger.info("Choosing reflection\n"); logger().info("Choosing reflection\n");
addValue(n, val_r,vals,p_r,points,n); addValue(n, val_r,vals,p_r,points,n);
} }
continue; continue;
@ -154,13 +154,13 @@ float optimizeSimplexDownhill(T* points, int n, F func, float* vals = NULL )
float val_e = func(p_e); float val_e = func(p_e);
if (val_e<vals[n]) { if (val_e<vals[n]) {
logger.info("Choosing contraction\n"); logger().info("Choosing contraction\n");
addValue(n,val_e,vals,p_e,points,n); addValue(n,val_e,vals,p_e,points,n);
continue; continue;
} }
} }
{ {
logger.info("Full contraction\n"); logger().info("Full contraction\n");
for (int j=1;j<=n;++j) { for (int j=1;j<=n;++j) {
for (int i=0;i<n;++i) { for (int i=0;i<n;++i) {
points[j*n+i] = (points[j*n+i]+points[i])/2; points[j*n+i] = (points[j*n+i]+points[i])/2;

@ -42,7 +42,7 @@ namespace cvflann
* *
* Can be used to time portions of code. * Can be used to time portions of code.
*/ */
class StartStopTimer class CV_EXPORTS StartStopTimer
{ {
clock_t startTime; clock_t startTime;

@ -26,19 +26,7 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*************************************************************************/ *************************************************************************/
#include <cstdio> #include "precomp.hpp"
#include <cstdarg>
#include <sstream>
#include "opencv2/flann/dist.h"
#include "opencv2/flann/index_testing.h"
#include "opencv2/flann/logger.h"
#include "opencv2/flann/logger.h"
#include "opencv2/flann/saving.h"
#include "opencv2/flann/general.h"
// index types
#include "opencv2/flann/all_indices.h"
namespace cvflann namespace cvflann
{ {
@ -47,17 +35,20 @@ namespace cvflann
/** Global variable indicating the distance metric /** Global variable indicating the distance metric
* to be used. * to be used.
*/ */
flann_distance_t flann_distance_type = EUCLIDEAN; flann_distance_t flann_distance_type_ = EUCLIDEAN;
flann_distance_t flann_distance_type() { return flann_distance_type_; }
/** /**
* Zero iterator that emulates a zero feature. * Zero iterator that emulates a zero feature.
*/ */
ZeroIterator<float> zero; ZeroIterator<float> zero_;
ZeroIterator<float>& zero() { return zero_; }
/** /**
* Order of Minkowski distance to use. * Order of Minkowski distance to use.
*/ */
int flann_minkowski_order; int flann_minkowski_order_;
int flann_minkowski_order() { return flann_minkowski_order_; }
double euclidean_dist(const unsigned char* first1, const unsigned char* last1, unsigned char* first2, double acc) double euclidean_dist(const unsigned char* first1, const unsigned char* last1, unsigned char* first2, double acc)
@ -98,9 +89,11 @@ int countCorrectMatches(int* neighbors, int* groundTruth, int n)
return count; return count;
} }
// ----------------------- logger.cpp --------------------------- // ----------------------- logger().cpp ---------------------------
Logger logger_;
Logger logger; Logger& logger() { return logger_; }
int Logger::log(int level, const char* fmt, ...) int Logger::log(int level, const char* fmt, ...)
{ {
@ -163,20 +156,22 @@ int rand_int(int high, int low)
// ----------------------- saving.cpp --------------------------- // ----------------------- saving.cpp ---------------------------
const char FLANN_SIGNATURE[] = "FLANN_INDEX"; const char FLANN_SIGNATURE_[] = "FLANN_INDEX";
const char FLANN_VERSION[] = "1.5.0"; const char FLANN_VERSION_[] = "1.5.0";
const char* FLANN_SIGNATURE() { return FLANN_SIGNATURE_; }
const char* FLANN_VERSION() { return FLANN_VERSION_; }
IndexHeader load_header(FILE* stream) IndexHeader load_header(FILE* stream)
{ {
IndexHeader header; IndexHeader header;
int read_size = fread(&header,sizeof(header),1,stream); size_t read_size = fread(&header,sizeof(header),1,stream);
if (read_size!=1) { if (read_size!=1) {
throw FLANNException("Invalid index file, cannot read"); throw FLANNException("Invalid index file, cannot read");
} }
if (strcmp(header.signature,FLANN_SIGNATURE)!=0) { if (strcmp(header.signature,FLANN_SIGNATURE())!=0) {
throw FLANNException("Invalid index file, wrong signature"); throw FLANNException("Invalid index file, wrong signature");
} }
@ -190,14 +185,14 @@ IndexHeader load_header(FILE* stream)
void log_verbosity(int level) void log_verbosity(int level)
{ {
if (level>=0) { if (level>=0) {
logger.setLevel(level); logger().setLevel(level);
} }
} }
void set_distance_type(flann_distance_t distance_type, int order) void set_distance_type(flann_distance_t distance_type, int order)
{ {
flann_distance_type = distance_type; flann_distance_type_ = distance_type;
flann_minkowski_order = order; flann_minkowski_order_ = order;
} }
class StaticInit class StaticInit

@ -0,0 +1 @@
#include "precomp.hpp"

@ -0,0 +1,18 @@
#ifndef _OPENCV_FLANN_PRECOMP_HPP_
#define _OPENCV_FLANN_PRECOMP_HPP_
#include <cstdio>
#include <cstdarg>
#include <sstream>
#include "opencv2/flann/dist.h"
#include "opencv2/flann/index_testing.h"
#include "opencv2/flann/logger.h"
#include "opencv2/flann/saving.h"
#include "opencv2/flann/general.h"
// index types
#include "opencv2/flann/all_indices.h"
#endif

@ -1,5 +1,5 @@
#ifndef SVM_ERROR #ifndef LSVM_ERROR
#define SVM_ERROR #define LSVM_ERROR
#define LATENT_SVM_OK 0 #define LATENT_SVM_OK 0
#define DISTANCE_TRANSFORM_OK 1 #define DISTANCE_TRANSFORM_OK 1

@ -171,7 +171,7 @@ locatePlanarObject( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors
findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs ); findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
#endif #endif
n = ptpairs.size()/2; n = (int)(ptpairs.size()/2);
if( n < 4 ) if( n < 4 )
return 0; return 0;

Loading…
Cancel
Save