From da383e65e2c8397e100b4a054a63a9d6192d6b46 Mon Sep 17 00:00:00 2001 From: Maksim Shabunin Date: Thu, 5 Feb 2015 17:40:15 +0300 Subject: [PATCH 1/7] Remove deprecated methods from cv::Algorithm --- apps/traincascade/old_ml.hpp | 97 -- doc/Doxyfile.in | 5 +- modules/calib3d/src/levmarq.cpp | 9 - modules/calib3d/src/ptsetreg.cpp | 16 - modules/calib3d/src/stereobm.cpp | 2 - modules/calib3d/src/stereosgbm.cpp | 2 - modules/core/include/opencv2/core.hpp | 358 +---- .../core/include/opencv2/core/operations.hpp | 78 -- modules/core/include/opencv2/core/private.hpp | 34 - modules/core/src/algorithm.cpp | 1167 ----------------- modules/cudaimgproc/src/histogram.cpp | 7 - modules/cudaoptflow/perf/perf_optflow.cpp | 8 +- modules/cudaoptflow/test/test_optflow.cpp | 10 +- modules/imgproc/src/clahe.cpp | 7 - modules/shape/include/opencv2/shape.hpp | 5 - modules/shape/src/aff_trans.cpp | 2 - modules/shape/src/haus_dis.cpp | 2 - modules/shape/src/hist_cost.cpp | 8 - modules/shape/src/sc_dis.cpp | 2 - modules/shape/src/tps_trans.cpp | 2 - modules/superres/include/opencv2/superres.hpp | 34 +- .../include/opencv2/superres/optical_flow.hpp | 68 +- modules/superres/perf/perf_superres.cpp | 24 +- modules/superres/src/btv_l1.cpp | 38 +- modules/superres/src/btv_l1_cuda.cpp | 35 +- modules/superres/src/optical_flow.cpp | 239 ++-- modules/superres/src/super_resolution.cpp | 5 - modules/superres/test/test_superres.cpp | 8 +- .../video/include/opencv2/video/tracking.hpp | 53 +- .../perf/opencl/perf_optflow_dualTVL1.cpp | 10 +- modules/video/src/bgfg_KNN.cpp | 2 - modules/video/src/bgfg_gaussmix2.cpp | 2 - modules/video/src/tvl1flow.cpp | 43 +- .../video/test/ocl/test_optflow_tvl1flow.cpp | 10 +- .../AKAZE_tracking/planar_tracking.cpp | 4 +- samples/gpu/super_resolution.cpp | 26 +- samples/gpu/surf_keypoint_matcher.cpp | 8 +- 37 files changed, 402 insertions(+), 2028 deletions(-) diff --git a/apps/traincascade/old_ml.hpp b/apps/traincascade/old_ml.hpp index 6ec31a025d..bf0cd15f97 100644 --- a/apps/traincascade/old_ml.hpp +++ b/apps/traincascade/old_ml.hpp @@ -122,7 +122,6 @@ CV_INLINE CvParamLattice cvDefaultParamLattice( void ) #define CV_TYPE_NAME_ML_SVM "opencv-ml-svm" #define CV_TYPE_NAME_ML_KNN "opencv-ml-knn" #define CV_TYPE_NAME_ML_NBAYES "opencv-ml-bayesian" -#define CV_TYPE_NAME_ML_EM "opencv-ml-em" #define CV_TYPE_NAME_ML_BOOSTING "opencv-ml-boost-tree" #define CV_TYPE_NAME_ML_TREE "opencv-ml-tree" #define CV_TYPE_NAME_ML_ANN_MLP "opencv-ml-ann-mlp" @@ -562,100 +561,6 @@ private: CvSVM& operator = (const CvSVM&); }; -/****************************************************************************************\ -* Expectation - Maximization * -\****************************************************************************************/ -namespace cv -{ -class EM : public Algorithm -{ -public: - // Type of covariation matrices - enum {COV_MAT_SPHERICAL=0, COV_MAT_DIAGONAL=1, COV_MAT_GENERIC=2, COV_MAT_DEFAULT=COV_MAT_DIAGONAL}; - - // Default parameters - enum {DEFAULT_NCLUSTERS=5, DEFAULT_MAX_ITERS=100}; - - // The initial step - enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0}; - - CV_WRAP EM(int nclusters=EM::DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL, - const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, - EM::DEFAULT_MAX_ITERS, FLT_EPSILON)); - - virtual ~EM(); - CV_WRAP virtual void clear(); - - CV_WRAP virtual bool train(InputArray samples, - OutputArray logLikelihoods=noArray(), - OutputArray labels=noArray(), - OutputArray probs=noArray()); - - CV_WRAP virtual bool trainE(InputArray samples, - InputArray means0, - InputArray covs0=noArray(), - InputArray weights0=noArray(), - OutputArray logLikelihoods=noArray(), - OutputArray labels=noArray(), - OutputArray probs=noArray()); - - CV_WRAP virtual bool trainM(InputArray samples, - InputArray probs0, - OutputArray logLikelihoods=noArray(), - OutputArray labels=noArray(), - OutputArray probs=noArray()); - - CV_WRAP Vec2d predict(InputArray sample, - OutputArray probs=noArray()) const; - - CV_WRAP bool isTrained() const; - - AlgorithmInfo* info() const; - virtual void read(const FileNode& fn); - -protected: - - virtual void setTrainData(int startStep, const Mat& samples, - const Mat* probs0, - const Mat* means0, - const std::vector* covs0, - const Mat* weights0); - - bool doTrain(int startStep, - OutputArray logLikelihoods, - OutputArray labels, - OutputArray probs); - virtual void eStep(); - virtual void mStep(); - - void clusterTrainSamples(); - void decomposeCovs(); - void computeLogWeightDivDet(); - - Vec2d computeProbabilities(const Mat& sample, Mat* probs) const; - - // all inner matrices have type CV_64FC1 - CV_PROP_RW int nclusters; - CV_PROP_RW int covMatType; - CV_PROP_RW int maxIters; - CV_PROP_RW double epsilon; - - Mat trainSamples; - Mat trainProbs; - Mat trainLogLikelihoods; - Mat trainLabels; - - CV_PROP Mat weights; - CV_PROP Mat means; - CV_PROP std::vector covs; - - std::vector covsEigenValues; - std::vector covsRotateMats; - std::vector invCovsEigenValues; - Mat logWeightDivDet; -}; -} // namespace cv - /****************************************************************************************\ * Decision Tree * \****************************************************************************************/\ @@ -2155,8 +2060,6 @@ typedef CvGBTreesParams GradientBoostingTreeParams; typedef CvGBTrees GradientBoostingTrees; template<> void DefaultDeleter::operator ()(CvDTreeSplit* obj) const; - -bool initModule_ml(void); } #endif // __cplusplus diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in index 8b310a2e39..79af5ac359 100644 --- a/doc/Doxyfile.in +++ b/doc/Doxyfile.in @@ -100,7 +100,7 @@ RECURSIVE = YES EXCLUDE = EXCLUDE_SYMLINKS = NO EXCLUDE_PATTERNS = *.inl.hpp *.impl.hpp *_detail.hpp */cudev/**/detail/*.hpp -EXCLUDE_SYMBOLS = cv::DataType<*> int +EXCLUDE_SYMBOLS = cv::DataType<*> int void EXAMPLE_PATH = @CMAKE_DOXYGEN_EXAMPLE_PATH@ EXAMPLE_PATTERNS = * EXAMPLE_RECURSIVE = YES @@ -243,7 +243,8 @@ PREDEFINED = __cplusplus=1 \ CV_NORETURN= \ CV_DEFAULT(x)=" = x" \ CV_NEON=1 \ - FLANN_DEPRECATED= + FLANN_DEPRECATED= \ + "CV_PURE_PROPERTY(type, name)= /**\@{*/ virtual type get##name() const = 0; virtual void set##name(type _##name) = 0; /**\@}*/" EXPAND_AS_DEFINED = SKIP_FUNCTION_MACROS = YES TAGFILES = diff --git a/modules/calib3d/src/levmarq.cpp b/modules/calib3d/src/levmarq.cpp index d3eb7b556f..bcf08e091a 100644 --- a/modules/calib3d/src/levmarq.cpp +++ b/modules/calib3d/src/levmarq.cpp @@ -200,8 +200,6 @@ public: void setCallback(const Ptr& _cb) { cb = _cb; } - AlgorithmInfo* info() const; - Ptr cb; double epsx; @@ -211,15 +209,8 @@ public: }; -CV_INIT_ALGORITHM(LMSolverImpl, "LMSolver", - obj.info()->addParam(obj, "epsx", obj.epsx); - obj.info()->addParam(obj, "epsf", obj.epsf); - obj.info()->addParam(obj, "maxIters", obj.maxIters); - obj.info()->addParam(obj, "printInterval", obj.printInterval)) - Ptr createLMSolver(const Ptr& cb, int maxIters) { - CV_Assert( !LMSolverImpl_info_auto.name().empty() ); return makePtr(cb, maxIters); } diff --git a/modules/calib3d/src/ptsetreg.cpp b/modules/calib3d/src/ptsetreg.cpp index 713f8b90f2..2a81a33ff5 100644 --- a/modules/calib3d/src/ptsetreg.cpp +++ b/modules/calib3d/src/ptsetreg.cpp @@ -256,8 +256,6 @@ public: void setCallback(const Ptr& _cb) { cb = _cb; } - AlgorithmInfo* info() const; - Ptr cb; int modelPoints; bool checkPartialSubsets; @@ -378,25 +376,12 @@ public: return result; } - AlgorithmInfo* info() const; }; - -CV_INIT_ALGORITHM(RANSACPointSetRegistrator, "PointSetRegistrator.RANSAC", - obj.info()->addParam(obj, "threshold", obj.threshold); - obj.info()->addParam(obj, "confidence", obj.confidence); - obj.info()->addParam(obj, "maxIters", obj.maxIters)) - -CV_INIT_ALGORITHM(LMeDSPointSetRegistrator, "PointSetRegistrator.LMeDS", - obj.info()->addParam(obj, "confidence", obj.confidence); - obj.info()->addParam(obj, "maxIters", obj.maxIters)) - - Ptr createRANSACPointSetRegistrator(const Ptr& _cb, int _modelPoints, double _threshold, double _confidence, int _maxIters) { - CV_Assert( !RANSACPointSetRegistrator_info_auto.name().empty() ); return Ptr( new RANSACPointSetRegistrator(_cb, _modelPoints, _threshold, _confidence, _maxIters)); } @@ -405,7 +390,6 @@ Ptr createRANSACPointSetRegistrator(const Ptr createLMeDSPointSetRegistrator(const Ptr& _cb, int _modelPoints, double _confidence, int _maxIters) { - CV_Assert( !LMeDSPointSetRegistrator_info_auto.name().empty() ); return Ptr( new LMeDSPointSetRegistrator(_cb, _modelPoints, _confidence, _maxIters)); } diff --git a/modules/calib3d/src/stereobm.cpp b/modules/calib3d/src/stereobm.cpp index 72802b624f..f093f2b2ba 100644 --- a/modules/calib3d/src/stereobm.cpp +++ b/modules/calib3d/src/stereobm.cpp @@ -1010,8 +1010,6 @@ public: disp.convertTo(disp0, disp0.type(), 1./(1 << DISPARITY_SHIFT), 0); } - AlgorithmInfo* info() const { return 0; } - int getMinDisparity() const { return params.minDisparity; } void setMinDisparity(int minDisparity) { params.minDisparity = minDisparity; } diff --git a/modules/calib3d/src/stereosgbm.cpp b/modules/calib3d/src/stereosgbm.cpp index 1793ee0945..4b0aa5a25b 100644 --- a/modules/calib3d/src/stereosgbm.cpp +++ b/modules/calib3d/src/stereosgbm.cpp @@ -865,8 +865,6 @@ public: StereoMatcher::DISP_SCALE*params.speckleRange, buffer); } - AlgorithmInfo* info() const { return 0; } - int getMinDisparity() const { return params.minDisparity; } void setMinDisparity(int minDisparity) { params.minDisparity = minDisparity; } diff --git a/modules/core/include/opencv2/core.hpp b/modules/core/include/opencv2/core.hpp index 76fb3fd520..701c0e3a07 100644 --- a/modules/core/include/opencv2/core.hpp +++ b/modules/core/include/opencv2/core.hpp @@ -2745,8 +2745,6 @@ public: //////////////////////////////////////// Algorithm //////////////////////////////////// class CV_EXPORTS Algorithm; -class CV_EXPORTS AlgorithmInfo; -struct CV_EXPORTS AlgorithmInfoData; template struct ParamType {}; @@ -2759,32 +2757,13 @@ matching, graph-cut etc.), background subtraction (which can be done using mixtu models, codebook-based algorithm etc.), optical flow (block matching, Lucas-Kanade, Horn-Schunck etc.). -The class provides the following features for all derived classes: - -- so called "virtual constructor". That is, each Algorithm derivative is registered at program - start and you can get the list of registered algorithms and create instance of a particular - algorithm by its name (see Algorithm::create). If you plan to add your own algorithms, it is - good practice to add a unique prefix to your algorithms to distinguish them from other - algorithms. -- setting/retrieving algorithm parameters by name. If you used video capturing functionality - from OpenCV videoio module, you are probably familar with cvSetCaptureProperty(), - cvGetCaptureProperty(), VideoCapture::set() and VideoCapture::get(). Algorithm provides - similar method where instead of integer id's you specify the parameter names as text strings. - See Algorithm::set and Algorithm::get for details. -- reading and writing parameters from/to XML or YAML files. Every Algorithm derivative can store - all its parameters and then read them back. There is no need to re-implement it each time. - Here is example of SIFT use in your application via Algorithm interface: @code #include "opencv2/opencv.hpp" #include "opencv2/xfeatures2d.hpp" - using namespace cv::xfeatures2d; - ... - Ptr sift = SIFT::create(); - FileStorage fs("sift_params.xml", FileStorage::READ); if( fs.isOpened() ) // if we have file with parameters, read them { @@ -2794,323 +2773,80 @@ Here is example of SIFT use in your application via Algorithm interface: else // else modify the parameters and store them; user can later edit the file to use different parameters { sift->setContrastThreshold(0.01f); // lower the contrast threshold, compared to the default value - { - WriteStructContext ws(fs, "sift_params", CV_NODE_MAP); - sift->write(fs); + WriteStructContext ws(fs, "sift_params", CV_NODE_MAP); + sift->write(fs); } } - Mat image = imread("myimage.png", 0), descriptors; vector keypoints; sift->detectAndCompute(image, noArray(), keypoints, descriptors); @endcode - -Creating Own Algorithms ------------------------ -If you want to make your own algorithm, derived from Algorithm, you should basically follow a few -conventions and add a little semi-standard piece of code to your class: -- Make a class and specify Algorithm as its base class. -- The algorithm parameters should be the class members. See Algorithm::get() for the list of - possible types of the parameters. -- Add public virtual method `AlgorithmInfo* info() const;` to your class. -- Add constructor function, AlgorithmInfo instance and implement the info() method. The simplest - way is to take as - the reference and modify it according to the list of your parameters. -- Add some public function (e.g. `initModule_()`) that calls info() of your algorithm - and put it into the same source file as info() implementation. This is to force C++ linker to - include this object file into the target application. See Algorithm::create() for details. */ class CV_EXPORTS_W Algorithm - { - public: +{ +public: Algorithm(); virtual ~Algorithm(); - /**Returns the algorithm name*/ - String name() const; - - /** @brief returns the algorithm parameter - - The method returns value of the particular parameter. Since the compiler can not deduce the - type of the returned parameter, you should specify it explicitly in angle brackets. Here are - the allowed forms of get: - - - myalgo.get\("param_name") - - myalgo.get\("param_name") - - myalgo.get\("param_name") - - myalgo.get\("param_name") - - myalgo.get\("param_name") - - myalgo.get\ \>("param_name") - - myalgo.get\("param_name") (it returns Ptr\). - - In some cases the actual type of the parameter can be cast to the specified type, e.g. integer - parameter can be cast to double, bool can be cast to int. But "dangerous" transformations - (string\<-\>number, double-\>int, 1x1 Mat\<-\>number, ...) are not performed and the method - will throw an exception. In the case of Mat or vector\ parameters the method does not - clone the matrix data, so do not modify the matrices. Use Algorithm::set instead - slower, but - more safe. - @param name The parameter name. - */ - template typename ParamType<_Tp>::member_type get(const String& name) const; - /** @overload */ - template typename ParamType<_Tp>::member_type get(const char* name) const; - - CV_WRAP int getInt(const String& name) const; - CV_WRAP double getDouble(const String& name) const; - CV_WRAP bool getBool(const String& name) const; - CV_WRAP String getString(const String& name) const; - CV_WRAP Mat getMat(const String& name) const; - CV_WRAP std::vector getMatVector(const String& name) const; - CV_WRAP Ptr getAlgorithm(const String& name) const; - - /** @brief Sets the algorithm parameter - - The method sets value of the particular parameter. Some of the algorithm - parameters may be declared as read-only. If you try to set such a - parameter, you will get exception with the corresponding error message. - @param name The parameter name. - @param value The parameter value. - */ - void set(const String& name, int value); - void set(const String& name, double value); - void set(const String& name, bool value); - void set(const String& name, const String& value); - void set(const String& name, const Mat& value); - void set(const String& name, const std::vector& value); - void set(const String& name, const Ptr& value); - template void set(const String& name, const Ptr<_Tp>& value); - - CV_WRAP void setInt(const String& name, int value); - CV_WRAP void setDouble(const String& name, double value); - CV_WRAP void setBool(const String& name, bool value); - CV_WRAP void setString(const String& name, const String& value); - CV_WRAP void setMat(const String& name, const Mat& value); - CV_WRAP void setMatVector(const String& name, const std::vector& value); - CV_WRAP void setAlgorithm(const String& name, const Ptr& value); - template void setAlgorithm(const String& name, const Ptr<_Tp>& value); - - void set(const char* name, int value); - void set(const char* name, double value); - void set(const char* name, bool value); - void set(const char* name, const String& value); - void set(const char* name, const Mat& value); - void set(const char* name, const std::vector& value); - void set(const char* name, const Ptr& value); - template void set(const char* name, const Ptr<_Tp>& value); - - void setInt(const char* name, int value); - void setDouble(const char* name, double value); - void setBool(const char* name, bool value); - void setString(const char* name, const String& value); - void setMat(const char* name, const Mat& value); - void setMatVector(const char* name, const std::vector& value); - void setAlgorithm(const char* name, const Ptr& value); - template void setAlgorithm(const char* name, const Ptr<_Tp>& value); - - CV_WRAP String paramHelp(const String& name) const; - int paramType(const char* name) const; - CV_WRAP int paramType(const String& name) const; - CV_WRAP void getParams(CV_OUT std::vector& names) const; /** @brief Stores algorithm parameters in a file storage - - The method stores all the algorithm parameters (in alphabetic order) to - the file storage. The method is virtual. If you define your own - Algorithm derivative, your can override the method and store some extra - information. However, it's rarely needed. Here are some examples: - - SIFT feature detector (from xfeatures2d module). The class only - stores algorithm parameters and no keypoints or their descriptors. - Therefore, it's enough to store the algorithm parameters, which is - what Algorithm::write() does. Therefore, there is no dedicated - SIFT::write(). - - Background subtractor (from video module). It has the algorithm - parameters and also it has the current background model. However, - the background model is not stored. First, it's rather big. Then, - if you have stored the background model, it would likely become - irrelevant on the next run (because of shifted camera, changed - background, different lighting etc.). Therefore, - BackgroundSubtractorMOG and BackgroundSubtractorMOG2 also rely on - the standard Algorithm::write() to store just the algorithm - parameters. - - Expectation Maximization (from ml module). The algorithm finds - mixture of gaussians that approximates user data best of all. In - this case the model may be re-used on the next run to test new - data against the trained statistical model. So EM needs to store - the model. However, since the model is described by a few - parameters that are available as read-only algorithm parameters - (i.e. they are available via EM::get()), EM also relies on - Algorithm::write() to store both EM parameters and the model - (represented by read-only algorithm parameters). - @param fs File storage. */ - virtual void write(FileStorage& fs) const; + virtual void write(FileStorage& fs) const { (void)fs; } /** @brief Reads algorithm parameters from a file storage - - The method reads all the algorithm parameters from the specified node of - a file storage. Similarly to Algorithm::write(), if you implement an - algorithm that needs to read some extra data and/or re-compute some - internal data, you may override the method. - @param fn File node of the file storage. */ - virtual void read(const FileNode& fn); + virtual void read(const FileNode& fn) { (void)fn; } +}; - typedef Algorithm* (*Constructor)(void); - typedef int (Algorithm::*Getter)() const; - typedef void (Algorithm::*Setter)(int); +// define properties - /** @brief Returns the list of registered algorithms +#define CV_PURE_PROPERTY(type, name) \ + CV_WRAP virtual type get##name() const = 0; \ + CV_WRAP virtual void set##name(type _##name) = 0; - This static method returns the list of registered algorithms in - alphabetical order. Here is how to use it : - @code{.cpp} - vector algorithms; - Algorithm::getList(algorithms); - cout << "Algorithms: " << algorithms.size() << endl; - for (size_t i=0; i < algorithms.size(); i++) - cout << algorithms[i] << endl; - @endcode - @param algorithms The output vector of algorithm names. - */ - CV_WRAP static void getList(CV_OUT std::vector& algorithms); - CV_WRAP static Ptr _create(const String& name); +#define CV_PURE_PROPERTY_S(type, name) \ + CV_WRAP virtual type get##name() const = 0; \ + CV_WRAP virtual void set##name(const type & _##name) = 0; - /** @brief Creates algorithm instance by name +#define CV_PURE_PROPERTY_RO(type, name) \ + CV_WRAP virtual type get##name() const = 0; - This static method creates a new instance of the specified algorithm. If - there is no such algorithm, the method will silently return a null - pointer. Also, you should specify the particular Algorithm subclass as - _Tp (or simply Algorithm if you do not know it at that point). : - @code{.cpp} - Ptr bgfg = Algorithm::create("BackgroundSubtractor.MOG2"); - @endcode - @note This is important note about seemingly mysterious behavior of - Algorithm::create() when it returns NULL while it should not. The reason - is simple - Algorithm::create() resides in OpenCV's core module and the - algorithms are implemented in other modules. If you create algorithms - dynamically, C++ linker may decide to throw away the modules where the - actual algorithms are implemented, since you do not call any functions - from the modules. To avoid this problem, you need to call - initModule_\(); somewhere in the beginning of the program - before Algorithm::create(). For example, call initModule_xfeatures2d() - in order to use SURF/SIFT, call initModule_ml() to use expectation - maximization etc. - @param name The algorithm name, one of the names returned by Algorithm::getList(). - */ - template static Ptr<_Tp> create(const String& name); +// basic property implementation - virtual AlgorithmInfo* info() const /* TODO: make it = 0;*/ { return 0; } -}; +#define CV_IMPL_PROPERTY(type, name, member) \ + type get##name() const \ + { \ + return member; \ + } \ + void set##name(type val) \ + { \ + member = val; \ + } -/** @todo document */ -class CV_EXPORTS AlgorithmInfo -{ -public: - friend class Algorithm; - AlgorithmInfo(const String& name, Algorithm::Constructor create); - ~AlgorithmInfo(); - void get(const Algorithm* algo, const char* name, int argType, void* value) const; - void addParam_(Algorithm& algo, const char* name, int argType, - void* value, bool readOnly, - Algorithm::Getter getter, Algorithm::Setter setter, - const String& help=String()); - String paramHelp(const char* name) const; - int paramType(const char* name) const; - void getParams(std::vector& names) const; - - void write(const Algorithm* algo, FileStorage& fs) const; - void read(Algorithm* algo, const FileNode& fn) const; - String name() const; - - void addParam(Algorithm& algo, const char* name, - int& value, bool readOnly=false, - int (Algorithm::*getter)()=0, - void (Algorithm::*setter)(int)=0, - const String& help=String()); - void addParam(Algorithm& algo, const char* name, - bool& value, bool readOnly=false, - int (Algorithm::*getter)()=0, - void (Algorithm::*setter)(int)=0, - const String& help=String()); - void addParam(Algorithm& algo, const char* name, - double& value, bool readOnly=false, - double (Algorithm::*getter)()=0, - void (Algorithm::*setter)(double)=0, - const String& help=String()); - void addParam(Algorithm& algo, const char* name, - String& value, bool readOnly=false, - String (Algorithm::*getter)()=0, - void (Algorithm::*setter)(const String&)=0, - const String& help=String()); - void addParam(Algorithm& algo, const char* name, - Mat& value, bool readOnly=false, - Mat (Algorithm::*getter)()=0, - void (Algorithm::*setter)(const Mat&)=0, - const String& help=String()); - void addParam(Algorithm& algo, const char* name, - std::vector& value, bool readOnly=false, - std::vector (Algorithm::*getter)()=0, - void (Algorithm::*setter)(const std::vector&)=0, - const String& help=String()); - void addParam(Algorithm& algo, const char* name, - Ptr& value, bool readOnly=false, - Ptr (Algorithm::*getter)()=0, - void (Algorithm::*setter)(const Ptr&)=0, - const String& help=String()); - void addParam(Algorithm& algo, const char* name, - float& value, bool readOnly=false, - float (Algorithm::*getter)()=0, - void (Algorithm::*setter)(float)=0, - const String& help=String()); - void addParam(Algorithm& algo, const char* name, - unsigned int& value, bool readOnly=false, - unsigned int (Algorithm::*getter)()=0, - void (Algorithm::*setter)(unsigned int)=0, - const String& help=String()); - void addParam(Algorithm& algo, const char* name, - uint64& value, bool readOnly=false, - uint64 (Algorithm::*getter)()=0, - void (Algorithm::*setter)(uint64)=0, - const String& help=String()); - void addParam(Algorithm& algo, const char* name, - uchar& value, bool readOnly=false, - uchar (Algorithm::*getter)()=0, - void (Algorithm::*setter)(uchar)=0, - const String& help=String()); - template void addParam(Algorithm& algo, const char* name, - Ptr<_Tp>& value, bool readOnly=false, - Ptr<_Tp> (Algorithm::*getter)()=0, - void (Algorithm::*setter)(const Ptr<_Tp>&)=0, - const String& help=String()); - template void addParam(Algorithm& algo, const char* name, - Ptr<_Tp>& value, bool readOnly=false, - Ptr<_Tp> (Algorithm::*getter)()=0, - void (Algorithm::*setter)(const Ptr<_Tp>&)=0, - const String& help=String()); -protected: - AlgorithmInfoData* data; - void set(Algorithm* algo, const char* name, int argType, - const void* value, bool force=false) const; -}; +#define CV_IMPL_PROPERTY_S(type, name, member) \ + type get##name() const \ + { \ + return member; \ + } \ + void set##name(const type &val) \ + { \ + member = val; \ + } -/** @todo document */ -struct CV_EXPORTS Param -{ - enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7, UNSIGNED_INT=8, UINT64=9, UCHAR=11 }; - - Param(); - Param(int _type, bool _readonly, int _offset, - Algorithm::Getter _getter=0, - Algorithm::Setter _setter=0, - const String& _help=String()); - int type; - int offset; - bool readonly; - Algorithm::Getter getter; - Algorithm::Setter setter; - String help; +#define CV_IMPL_PROPERTY_RO(type, name, member) \ + type get##name() const \ + { \ + return member; \ + } + + +struct Param { + enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7, + UNSIGNED_INT=8, UINT64=9, UCHAR=11 }; }; + + template<> struct ParamType { typedef bool const_param_type; diff --git a/modules/core/include/opencv2/core/operations.hpp b/modules/core/include/opencv2/core/operations.hpp index dd9fe1e7bd..067140abb3 100644 --- a/modules/core/include/opencv2/core/operations.hpp +++ b/modules/core/include/opencv2/core/operations.hpp @@ -412,84 +412,6 @@ int print(const Matx<_Tp, m, n>& matx, FILE* stream = stdout) return print(Formatter::get()->format(cv::Mat(matx)), stream); } - - -////////////////////////////////////////// Algorithm ////////////////////////////////////////// - -template inline -Ptr<_Tp> Algorithm::create(const String& name) -{ - return _create(name).dynamicCast<_Tp>(); -} - -template inline -void Algorithm::set(const char* _name, const Ptr<_Tp>& value) -{ - Ptr algo_ptr = value. template dynamicCast(); - if (!algo_ptr) { - CV_Error( Error::StsUnsupportedFormat, "unknown/unsupported Ptr type of the second parameter of the method Algorithm::set"); - } - info()->set(this, _name, ParamType::type, &algo_ptr); -} - -template inline -void Algorithm::set(const String& _name, const Ptr<_Tp>& value) -{ - this->set<_Tp>(_name.c_str(), value); -} - -template inline -void Algorithm::setAlgorithm(const char* _name, const Ptr<_Tp>& value) -{ - Ptr algo_ptr = value. template ptr(); - if (!algo_ptr) { - CV_Error( Error::StsUnsupportedFormat, "unknown/unsupported Ptr type of the second parameter of the method Algorithm::set"); - } - info()->set(this, _name, ParamType::type, &algo_ptr); -} - -template inline -void Algorithm::setAlgorithm(const String& _name, const Ptr<_Tp>& value) -{ - this->set<_Tp>(_name.c_str(), value); -} - -template inline -typename ParamType<_Tp>::member_type Algorithm::get(const String& _name) const -{ - typename ParamType<_Tp>::member_type value; - info()->get(this, _name.c_str(), ParamType<_Tp>::type, &value); - return value; -} - -template inline -typename ParamType<_Tp>::member_type Algorithm::get(const char* _name) const -{ - typename ParamType<_Tp>::member_type value; - info()->get(this, _name, ParamType<_Tp>::type, &value); - return value; -} - -template inline -void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, Ptr<_Tp>& value, bool readOnly, - Ptr<_Tp> (Algorithm::*getter)(), void (Algorithm::*setter)(const Ptr<_Tp>&), - const String& help) -{ - //TODO: static assert: _Tp inherits from _Base - addParam_(algo, parameter, ParamType<_Base>::type, &value, readOnly, - (Algorithm::Getter)getter, (Algorithm::Setter)setter, help); -} - -template inline -void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, Ptr<_Tp>& value, bool readOnly, - Ptr<_Tp> (Algorithm::*getter)(), void (Algorithm::*setter)(const Ptr<_Tp>&), - const String& help) -{ - //TODO: static assert: _Tp inherits from Algorithm - addParam_(algo, parameter, ParamType::type, &value, readOnly, - (Algorithm::Getter)getter, (Algorithm::Setter)setter, help); -} - //! @endcond /****************************************************************************************\ diff --git a/modules/core/include/opencv2/core/private.hpp b/modules/core/include/opencv2/core/private.hpp index c9b2bf66d9..38d18e3000 100644 --- a/modules/core/include/opencv2/core/private.hpp +++ b/modules/core/include/opencv2/core/private.hpp @@ -129,40 +129,6 @@ namespace cv CV_EXPORTS const char* currentParallelFramework(); } //namespace cv -#define CV_INIT_ALGORITHM(classname, algname, memberinit) \ - static inline ::cv::Algorithm* create##classname##_hidden() \ - { \ - return new classname; \ - } \ - \ - static inline ::cv::Ptr< ::cv::Algorithm> create##classname##_ptr_hidden() \ - { \ - return ::cv::makePtr(); \ - } \ - \ - static inline ::cv::AlgorithmInfo& classname##_info() \ - { \ - static ::cv::AlgorithmInfo classname##_info_var(algname, create##classname##_hidden); \ - return classname##_info_var; \ - } \ - \ - static ::cv::AlgorithmInfo& classname##_info_auto = classname##_info(); \ - \ - ::cv::AlgorithmInfo* classname::info() const \ - { \ - static volatile bool initialized = false; \ - \ - if( !initialized ) \ - { \ - initialized = true; \ - classname obj; \ - memberinit; \ - } \ - return &classname##_info(); \ - } - - - /****************************************************************************************\ * Common declarations * \****************************************************************************************/ diff --git a/modules/core/src/algorithm.cpp b/modules/core/src/algorithm.cpp index f817a987b4..b10a28988a 100644 --- a/modules/core/src/algorithm.cpp +++ b/modules/core/src/algorithm.cpp @@ -45,127 +45,6 @@ namespace cv { -template struct sorted_vector -{ - sorted_vector() {} - void clear() { vec.clear(); } - size_t size() const { return vec.size(); } - _ValueTp& operator [](size_t idx) { return vec[idx]; } - const _ValueTp& operator [](size_t idx) const { return vec[idx]; } - - void add(const _KeyTp& k, const _ValueTp& val) - { - std::pair<_KeyTp, _ValueTp> p(k, val); - vec.push_back(p); - size_t i = vec.size()-1; - for( ; i > 0 && vec[i].first < vec[i-1].first; i-- ) - std::swap(vec[i-1], vec[i]); - CV_Assert( i == 0 || vec[i].first != vec[i-1].first ); - } - - bool find(const _KeyTp& key, _ValueTp& value) const - { - size_t a = 0, b = vec.size(); - while( b > a ) - { - size_t c = (a + b)/2; - if( vec[c].first < key ) - a = c+1; - else - b = c; - } - - if( a < vec.size() && vec[a].first == key ) - { - value = vec[a].second; - return true; - } - return false; - } - - void get_keys(std::vector<_KeyTp>& keys) const - { - size_t i = 0, n = vec.size(); - keys.resize(n); - - for( i = 0; i < n; i++ ) - keys[i] = vec[i].first; - } - - std::vector > vec; -}; - - -template inline const _ValueTp* findstr(const sorted_vector& vec, - const char* key) -{ - if( !key ) - return 0; - - size_t a = 0, b = vec.vec.size(); - while( b > a ) - { - size_t c = (a + b)/2; - if( strcmp(vec.vec[c].first.c_str(), key) < 0 ) - a = c+1; - else - b = c; - } - - if( ( a < vec.vec.size() ) && ( strcmp(vec.vec[a].first.c_str(), key) == 0 )) - return &vec.vec[a].second; - return 0; -} - - -Param::Param() -{ - type = 0; - offset = 0; - readonly = false; - getter = 0; - setter = 0; -} - - -Param::Param(int _type, bool _readonly, int _offset, - Algorithm::Getter _getter, Algorithm::Setter _setter, - const String& _help) -{ - type = _type; - readonly = _readonly; - offset = _offset; - getter = _getter; - setter = _setter; - help = _help; -} - -struct CV_EXPORTS AlgorithmInfoData -{ - sorted_vector params; - String _name; -}; - - -static sorted_vector& alglist() -{ - static sorted_vector alglist_var; - return alglist_var; -} - -void Algorithm::getList(std::vector& algorithms) -{ - alglist().get_keys(algorithms); -} - -Ptr Algorithm::_create(const String& name) -{ - Algorithm::Constructor c = 0; - if( !alglist().find(name, c) ) - return Ptr(); - return Ptr(c()); -} - Algorithm::Algorithm() { } @@ -174,1052 +53,6 @@ Algorithm::~Algorithm() { } -String Algorithm::name() const -{ - return info()->name(); -} - -void Algorithm::set(const String& parameter, int value) -{ - info()->set(this, parameter.c_str(), ParamType::type, &value); -} - -void Algorithm::set(const String& parameter, double value) -{ - info()->set(this, parameter.c_str(), ParamType::type, &value); -} - -void Algorithm::set(const String& parameter, bool value) -{ - info()->set(this, parameter.c_str(), ParamType::type, &value); -} - -void Algorithm::set(const String& parameter, const String& value) -{ - info()->set(this, parameter.c_str(), ParamType::type, &value); -} - -void Algorithm::set(const String& parameter, const Mat& value) -{ - info()->set(this, parameter.c_str(), ParamType::type, &value); -} - -void Algorithm::set(const String& parameter, const std::vector& value) -{ - info()->set(this, parameter.c_str(), ParamType >::type, &value); -} - -void Algorithm::set(const String& parameter, const Ptr& value) -{ - info()->set(this, parameter.c_str(), ParamType::type, &value); -} - -void Algorithm::set(const char* parameter, int value) -{ - info()->set(this, parameter, ParamType::type, &value); -} - -void Algorithm::set(const char* parameter, double value) -{ - info()->set(this, parameter, ParamType::type, &value); -} - -void Algorithm::set(const char* parameter, bool value) -{ - info()->set(this, parameter, ParamType::type, &value); -} - -void Algorithm::set(const char* parameter, const String& value) -{ - info()->set(this, parameter, ParamType::type, &value); -} - -void Algorithm::set(const char* parameter, const Mat& value) -{ - info()->set(this, parameter, ParamType::type, &value); -} - -void Algorithm::set(const char* parameter, const std::vector& value) -{ - info()->set(this, parameter, ParamType >::type, &value); -} - -void Algorithm::set(const char* parameter, const Ptr& value) -{ - info()->set(this, parameter, ParamType::type, &value); -} - - -void Algorithm::setInt(const String& parameter, int value) -{ - info()->set(this, parameter.c_str(), ParamType::type, &value); -} - -void Algorithm::setDouble(const String& parameter, double value) -{ - info()->set(this, parameter.c_str(), ParamType::type, &value); -} - -void Algorithm::setBool(const String& parameter, bool value) -{ - info()->set(this, parameter.c_str(), ParamType::type, &value); -} - -void Algorithm::setString(const String& parameter, const String& value) -{ - info()->set(this, parameter.c_str(), ParamType::type, &value); -} - -void Algorithm::setMat(const String& parameter, const Mat& value) -{ - info()->set(this, parameter.c_str(), ParamType::type, &value); -} - -void Algorithm::setMatVector(const String& parameter, const std::vector& value) -{ - info()->set(this, parameter.c_str(), ParamType >::type, &value); -} - -void Algorithm::setAlgorithm(const String& parameter, const Ptr& value) -{ - info()->set(this, parameter.c_str(), ParamType::type, &value); -} - -void Algorithm::setInt(const char* parameter, int value) -{ - info()->set(this, parameter, ParamType::type, &value); -} - -void Algorithm::setDouble(const char* parameter, double value) -{ - info()->set(this, parameter, ParamType::type, &value); -} - -void Algorithm::setBool(const char* parameter, bool value) -{ - info()->set(this, parameter, ParamType::type, &value); -} - -void Algorithm::setString(const char* parameter, const String& value) -{ - info()->set(this, parameter, ParamType::type, &value); -} - -void Algorithm::setMat(const char* parameter, const Mat& value) -{ - info()->set(this, parameter, ParamType::type, &value); -} - -void Algorithm::setMatVector(const char* parameter, const std::vector& value) -{ - info()->set(this, parameter, ParamType >::type, &value); -} - -void Algorithm::setAlgorithm(const char* parameter, const Ptr& value) -{ - info()->set(this, parameter, ParamType::type, &value); -} - - - -int Algorithm::getInt(const String& parameter) const -{ - return get(parameter); -} - -double Algorithm::getDouble(const String& parameter) const -{ - return get(parameter); -} - -bool Algorithm::getBool(const String& parameter) const -{ - return get(parameter); -} - -String Algorithm::getString(const String& parameter) const -{ - return get(parameter); -} - -Mat Algorithm::getMat(const String& parameter) const -{ - return get(parameter); -} - -std::vector Algorithm::getMatVector(const String& parameter) const -{ - return get >(parameter); -} - -Ptr Algorithm::getAlgorithm(const String& parameter) const -{ - return get(parameter); -} - -String Algorithm::paramHelp(const String& parameter) const -{ - return info()->paramHelp(parameter.c_str()); -} - -int Algorithm::paramType(const String& parameter) const -{ - return info()->paramType(parameter.c_str()); -} - -int Algorithm::paramType(const char* parameter) const -{ - return info()->paramType(parameter); -} - -void Algorithm::getParams(std::vector& names) const -{ - info()->getParams(names); -} - -void Algorithm::write(FileStorage& fs) const -{ - info()->write(this, fs); -} - -void Algorithm::read(const FileNode& fn) -{ - info()->read(this, fn); -} - - -AlgorithmInfo::AlgorithmInfo(const String& _name, Algorithm::Constructor create) -{ - data = new AlgorithmInfoData; - data->_name = _name; - if (!alglist().find(_name, create)) - alglist().add(_name, create); -} - -AlgorithmInfo::~AlgorithmInfo() -{ - delete data; -} - -void AlgorithmInfo::write(const Algorithm* algo, FileStorage& fs) const -{ - size_t i = 0, nparams = data->params.vec.size(); - cv::write(fs, "name", algo->name()); - for( i = 0; i < nparams; i++ ) - { - const Param& p = data->params.vec[i].second; - const String& pname = data->params.vec[i].first; - if( p.type == Param::INT ) - cv::write(fs, pname, algo->get(pname)); - else if( p.type == Param::BOOLEAN ) - cv::write(fs, pname, (int)algo->get(pname)); - else if( p.type == Param::REAL ) - cv::write(fs, pname, algo->get(pname)); - else if( p.type == Param::STRING ) - cv::write(fs, pname, algo->get(pname)); - else if( p.type == Param::MAT ) - cv::write(fs, pname, algo->get(pname)); - else if( p.type == Param::MAT_VECTOR ) - cv::write(fs, pname, algo->get >(pname)); - else if( p.type == Param::ALGORITHM ) - { - cv::internal::WriteStructContext ws(fs, pname, CV_NODE_MAP); - Ptr nestedAlgo = algo->get(pname); - nestedAlgo->write(fs); - } - else if( p.type == Param::FLOAT) - cv::write(fs, pname, algo->getDouble(pname)); - else if( p.type == Param::UNSIGNED_INT) - cv::write(fs, pname, algo->getInt(pname));//TODO: implement cv::write(, , unsigned int) - else if( p.type == Param::UINT64) - cv::write(fs, pname, algo->getInt(pname));//TODO: implement cv::write(, , uint64) - else if( p.type == Param::UCHAR) - cv::write(fs, pname, algo->getInt(pname)); - else - { - String msg = format("unknown/unsupported type of '%s' parameter == %d", pname.c_str(), p.type); - CV_Error( CV_StsUnsupportedFormat, msg.c_str()); - } - } -} - -void AlgorithmInfo::read(Algorithm* algo, const FileNode& fn) const -{ - size_t i = 0, nparams = data->params.vec.size(); - AlgorithmInfo* info = algo->info(); - - for( i = 0; i < nparams; i++ ) - { - const Param& p = data->params.vec[i].second; - const String& pname = data->params.vec[i].first; - const FileNode n = fn[pname]; - if( n.empty() ) - continue; - if( p.type == Param::INT ) - { - int val = (int)n; - info->set(algo, pname.c_str(), p.type, &val, true); - } - else if( p.type == Param::BOOLEAN ) - { - bool val = (int)n != 0; - info->set(algo, pname.c_str(), p.type, &val, true); - } - else if( p.type == Param::REAL ) - { - double val = (double)n; - info->set(algo, pname.c_str(), p.type, &val, true); - } - else if( p.type == Param::STRING ) - { - String val = (String)n; - info->set(algo, pname.c_str(), p.type, &val, true); - } - else if( p.type == Param::MAT ) - { - Mat m; - cv::read(n, m); - info->set(algo, pname.c_str(), p.type, &m, true); - } - else if( p.type == Param::MAT_VECTOR ) - { - std::vector mv; - cv::read(n, mv); - info->set(algo, pname.c_str(), p.type, &mv, true); - } - else if( p.type == Param::ALGORITHM ) - { - Ptr nestedAlgo = Algorithm::_create((String)n["name"]); - CV_Assert( nestedAlgo ); - nestedAlgo->read(n); - info->set(algo, pname.c_str(), p.type, &nestedAlgo, true); - } - else if( p.type == Param::FLOAT ) - { - float val = (float)n; - info->set(algo, pname.c_str(), p.type, &val, true); - } - else if( p.type == Param::UNSIGNED_INT ) - { - unsigned int val = (unsigned int)((int)n);//TODO: implement conversion (unsigned int)FileNode - info->set(algo, pname.c_str(), p.type, &val, true); - } - else if( p.type == Param::UINT64) - { - uint64 val = (uint64)((int)n);//TODO: implement conversion (uint64)FileNode - info->set(algo, pname.c_str(), p.type, &val, true); - } - else if( p.type == Param::UCHAR) - { - uchar val = (uchar)((int)n); - info->set(algo, pname.c_str(), p.type, &val, true); - } - else - { - String msg = format("unknown/unsupported type of '%s' parameter == %d", pname.c_str(), p.type); - CV_Error( CV_StsUnsupportedFormat, msg.c_str()); - } - } -} - -String AlgorithmInfo::name() const -{ - return data->_name; -} - -union GetSetParam -{ - int (Algorithm::*get_int)() const; - bool (Algorithm::*get_bool)() const; - double (Algorithm::*get_double)() const; - String (Algorithm::*get_string)() const; - Mat (Algorithm::*get_mat)() const; - std::vector (Algorithm::*get_mat_vector)() const; - Ptr (Algorithm::*get_algo)() const; - float (Algorithm::*get_float)() const; - unsigned int (Algorithm::*get_uint)() const; - uint64 (Algorithm::*get_uint64)() const; - uchar (Algorithm::*get_uchar)() const; - - void (Algorithm::*set_int)(int); - void (Algorithm::*set_bool)(bool); - void (Algorithm::*set_double)(double); - void (Algorithm::*set_string)(const String&); - void (Algorithm::*set_mat)(const Mat&); - void (Algorithm::*set_mat_vector)(const std::vector&); - void (Algorithm::*set_algo)(const Ptr&); - void (Algorithm::*set_float)(float); - void (Algorithm::*set_uint)(unsigned int); - void (Algorithm::*set_uint64)(uint64); - void (Algorithm::*set_uchar)(uchar); -}; - -static String getNameOfType(int argType); - -static String getNameOfType(int argType) -{ - switch(argType) - { - case Param::INT: return "integer"; - case Param::BOOLEAN: return "boolean"; - case Param::REAL: return "double"; - case Param::STRING: return "string"; - case Param::MAT: return "cv::Mat"; - case Param::MAT_VECTOR: return "std::vector"; - case Param::ALGORITHM: return "algorithm"; - case Param::FLOAT: return "float"; - case Param::UNSIGNED_INT: return "unsigned int"; - case Param::UINT64: return "unsigned int64"; - case Param::UCHAR: return "unsigned char"; - default: CV_Error(CV_StsBadArg, "Wrong argument type"); - } - return ""; -} - -static String getErrorMessageForWrongArgumentInSetter(String algoName, String paramName, int paramType, int argType) -{ - String message = String("Argument error: the setter") - + " method was called for the parameter '" + paramName + "' of the algorithm '" + algoName - +"', the parameter has " + getNameOfType(paramType) + " type, "; - - if (paramType == Param::INT || paramType == Param::BOOLEAN || paramType == Param::REAL - || paramType == Param::FLOAT || paramType == Param::UNSIGNED_INT || paramType == Param::UINT64 || paramType == Param::UCHAR) - { - message = message + "so it should be set by integer, unsigned integer, uint64, unsigned char, boolean, float or double value, "; - } - message = message + "but the setter was called with " + getNameOfType(argType) + " value"; - - return message; -} - -static String getErrorMessageForWrongArgumentInGetter(String algoName, String paramName, int paramType, int argType) -{ - String message = String("Argument error: the getter") - + " method was called for the parameter '" + paramName + "' of the algorithm '" + algoName - +"', the parameter has " + getNameOfType(paramType) + " type, "; - - if (paramType == Param::BOOLEAN) - { - message = message + "so it should be get as integer, unsigned integer, uint64, boolean, unsigned char, float or double value, "; - } - else if (paramType == Param::INT || paramType == Param::UNSIGNED_INT || paramType == Param::UINT64 || paramType == Param::UCHAR) - { - message = message + "so it should be get as integer, unsigned integer, uint64, unsigned char, float or double value, "; - } - message = message + "but the getter was called to get a " + getNameOfType(argType) + " value"; - - return message; -} - -void AlgorithmInfo::set(Algorithm* algo, const char* parameter, int argType, const void* value, bool force) const -{ - const Param* p = findstr(data->params, parameter); - - if( !p ) - CV_Error_( CV_StsBadArg, ("No parameter '%s' is found", parameter ? parameter : "") ); - - if( !force && p->readonly ) - CV_Error_( CV_StsError, ("Parameter '%s' is readonly", parameter)); - - GetSetParam f; - f.set_int = p->setter; - - if( argType == Param::INT || argType == Param::BOOLEAN || argType == Param::REAL - || argType == Param::FLOAT || argType == Param::UNSIGNED_INT || argType == Param::UINT64 || argType == Param::UCHAR) - { - if ( !( p->type == Param::INT || p->type == Param::REAL || p->type == Param::BOOLEAN - || p->type == Param::UNSIGNED_INT || p->type == Param::UINT64 || p->type == Param::FLOAT || p->type == Param::UCHAR) ) - { - String message = getErrorMessageForWrongArgumentInSetter(algo->name(), parameter, p->type, argType); - CV_Error(CV_StsBadArg, message); - } - - if( p->type == Param::INT ) - { - bool is_ok = true; - int val = argType == Param::INT ? *(const int*)value : - argType == Param::BOOLEAN ? (int)*(const bool*)value : - argType == Param::REAL ? saturate_cast(*(const double*)value) : - argType == Param::FLOAT ? saturate_cast(*(const float*)value) : - argType == Param::UNSIGNED_INT ? (int)*(const unsigned int*)value : - argType == Param::UINT64 ? (int)*(const uint64*)value : - argType == Param::UCHAR ? (int)*(const uchar*)value : - (int)(is_ok = false); - - if (!is_ok) - { - CV_Error(CV_StsBadArg, "Wrong argument type in the setter"); - } - - if( p->setter ) - (algo->*f.set_int)(val); - else - *(int*)((uchar*)algo + p->offset) = val; - } - else if( p->type == Param::BOOLEAN ) - { - bool is_ok = true; - bool val = argType == Param::INT ? *(const int*)value != 0 : - argType == Param::BOOLEAN ? *(const bool*)value : - argType == Param::REAL ? (*(const double*)value != 0) : - argType == Param::FLOAT ? (*(const float*)value != 0) : - argType == Param::UNSIGNED_INT ? (*(const unsigned int*)value != 0): - argType == Param::UINT64 ? (*(const uint64*)value != 0): - argType == Param::UCHAR ? (*(const uchar*)value != 0): - (int)(is_ok = false); - - if (!is_ok) - { - CV_Error(CV_StsBadArg, "Wrong argument type in the setter"); - } - - if( p->setter ) - (algo->*f.set_bool)(val); - else - *(bool*)((uchar*)algo + p->offset) = val; - } - else if( p->type == Param::REAL ) - { - bool is_ok = true; - double val = argType == Param::INT ? (double)*(const int*)value : - argType == Param::BOOLEAN ? (double)*(const bool*)value : - argType == Param::REAL ? (double)(*(const double*)value ) : - argType == Param::FLOAT ? (double)(*(const float*)value ) : - argType == Param::UNSIGNED_INT ? (double)(*(const unsigned int*)value ) : - argType == Param::UINT64 ? (double)(*(const uint64*)value ) : - argType == Param::UCHAR ? (double)(*(const uchar*)value ) : - (double)(is_ok = false); - - if (!is_ok) - { - CV_Error(CV_StsBadArg, "Wrong argument type in the setter"); - } - if( p->setter ) - (algo->*f.set_double)(val); - else - *(double*)((uchar*)algo + p->offset) = val; - } - else if( p->type == Param::FLOAT ) - { - bool is_ok = true; - double val = argType == Param::INT ? (double)*(const int*)value : - argType == Param::BOOLEAN ? (double)*(const bool*)value : - argType == Param::REAL ? (double)(*(const double*)value ) : - argType == Param::FLOAT ? (double)(*(const float*)value ) : - argType == Param::UNSIGNED_INT ? (double)(*(const unsigned int*)value ) : - argType == Param::UINT64 ? (double)(*(const uint64*)value ) : - argType == Param::UCHAR ? (double)(*(const uchar*)value ) : - (double)(is_ok = false); - - if (!is_ok) - { - CV_Error(CV_StsBadArg, "Wrong argument type in the setter"); - } - if( p->setter ) - (algo->*f.set_float)((float)val); - else - *(float*)((uchar*)algo + p->offset) = (float)val; - } - else if( p->type == Param::UNSIGNED_INT ) - { - bool is_ok = true; - unsigned int val = argType == Param::INT ? (unsigned int)*(const int*)value : - argType == Param::BOOLEAN ? (unsigned int)*(const bool*)value : - argType == Param::REAL ? saturate_cast(*(const double*)value ) : - argType == Param::FLOAT ? saturate_cast(*(const float*)value ) : - argType == Param::UNSIGNED_INT ? (unsigned int)(*(const unsigned int*)value ) : - argType == Param::UINT64 ? (unsigned int)(*(const uint64*)value ) : - argType == Param::UCHAR ? (unsigned int)(*(const uchar*)value ) : - (int)(is_ok = false); - - if (!is_ok) - { - CV_Error(CV_StsBadArg, "Wrong argument type in the setter"); - } - if( p->setter ) - (algo->*f.set_uint)(val); - else - *(unsigned int*)((uchar*)algo + p->offset) = val; - } - else if( p->type == Param::UINT64 ) - { - bool is_ok = true; - uint64 val = argType == Param::INT ? (uint64)*(const int*)value : - argType == Param::BOOLEAN ? (uint64)*(const bool*)value : - argType == Param::REAL ? saturate_cast(*(const double*)value ) : - argType == Param::FLOAT ? saturate_cast(*(const float*)value ) : - argType == Param::UNSIGNED_INT ? (uint64)(*(const unsigned int*)value ) : - argType == Param::UINT64 ? (uint64)(*(const uint64*)value ) : - argType == Param::UCHAR ? (uint64)(*(const uchar*)value ) : - (int)(is_ok = false); - - if (!is_ok) - { - CV_Error(CV_StsBadArg, "Wrong argument type in the setter"); - } - if( p->setter ) - (algo->*f.set_uint64)(val); - else - *(uint64*)((uchar*)algo + p->offset) = val; - } - else if( p->type == Param::UCHAR ) - { - bool is_ok = true; - uchar val = argType == Param::INT ? (uchar)*(const int*)value : - argType == Param::BOOLEAN ? (uchar)*(const bool*)value : - argType == Param::REAL ? saturate_cast(*(const double*)value ) : - argType == Param::FLOAT ? saturate_cast(*(const float*)value ) : - argType == Param::UNSIGNED_INT ? (uchar)(*(const unsigned int*)value ) : - argType == Param::UINT64 ? (uchar)(*(const uint64*)value ) : - argType == Param::UCHAR ? (uchar)(*(const uchar*)value ) : - (int)(is_ok = false); - - if (!is_ok) - { - CV_Error(CV_StsBadArg, "Wrong argument type in the setter"); - } - if( p->setter ) - (algo->*f.set_uchar)(val); - else - *(uchar*)((uchar*)algo + p->offset) = val; - } - else - CV_Error(CV_StsBadArg, "Wrong parameter type in the setter"); - } - else if( argType == Param::STRING ) - { - if( p->type != Param::STRING ) - { - String message = getErrorMessageForWrongArgumentInSetter(algo->name(), parameter, p->type, argType); - CV_Error(CV_StsBadArg, message); - } - - const String& val = *(const String*)value; - if( p->setter ) - (algo->*f.set_string)(val); - else - *(String*)((uchar*)algo + p->offset) = val; - } - else if( argType == Param::MAT ) - { - if( p->type != Param::MAT ) - { - String message = getErrorMessageForWrongArgumentInSetter(algo->name(), parameter, p->type, argType); - CV_Error(CV_StsBadArg, message); - } - - const Mat& val = *(const Mat*)value; - if( p->setter ) - (algo->*f.set_mat)(val); - else - *(Mat*)((uchar*)algo + p->offset) = val; - } - else if( argType == Param::MAT_VECTOR ) - { - if( p->type != Param::MAT_VECTOR ) - { - String message = getErrorMessageForWrongArgumentInSetter(algo->name(), parameter, p->type, argType); - CV_Error(CV_StsBadArg, message); - } - - const std::vector& val = *(const std::vector*)value; - if( p->setter ) - (algo->*f.set_mat_vector)(val); - else - *(std::vector*)((uchar*)algo + p->offset) = val; - } - else if( argType == Param::ALGORITHM ) - { - if( p->type != Param::ALGORITHM ) - { - String message = getErrorMessageForWrongArgumentInSetter(algo->name(), parameter, p->type, argType); - CV_Error(CV_StsBadArg, message); - } - - const Ptr& val = *(const Ptr*)value; - if( p->setter ) - (algo->*f.set_algo)(val); - else - *(Ptr*)((uchar*)algo + p->offset) = val; - } - else - CV_Error(CV_StsBadArg, "Unknown/unsupported parameter type"); -} - -void AlgorithmInfo::get(const Algorithm* algo, const char* parameter, int argType, void* value) const -{ - const Param* p = findstr(data->params, parameter); - if( !p ) - CV_Error_( CV_StsBadArg, ("No parameter '%s' is found", parameter ? parameter : "") ); - - GetSetParam f; - f.get_int = p->getter; - - if( argType == Param::INT || argType == Param::BOOLEAN || argType == Param::REAL - || argType == Param::FLOAT || argType == Param::UNSIGNED_INT || argType == Param::UINT64 || argType == Param::UCHAR) - { - if( p->type == Param::INT ) - { - if (!( argType == Param::INT || argType == Param::REAL || argType == Param::FLOAT || argType == Param::UNSIGNED_INT || argType == Param::UINT64 || argType == Param::UCHAR)) - { - String message = getErrorMessageForWrongArgumentInGetter(algo->name(), parameter, p->type, argType); - CV_Error(CV_StsBadArg, message); - } - int val = p->getter ? (algo->*f.get_int)() : *(int*)((uchar*)algo + p->offset); - - if( argType == Param::INT ) - *(int*)value = (int)val; - else if ( argType == Param::REAL ) - *(double*)value = (double)val; - else if ( argType == Param::FLOAT) - *(float*)value = (float)val; - else if ( argType == Param::UNSIGNED_INT ) - *(unsigned int*)value = (unsigned int)val; - else if ( argType == Param::UINT64 ) - *(uint64*)value = (uint64)val; - else if ( argType == Param::UCHAR) - *(uchar*)value = (uchar)val; - else - CV_Error(CV_StsBadArg, "Wrong argument type"); - - } - else if( p->type == Param::BOOLEAN ) - { - if (!( argType == Param::INT || argType == Param::BOOLEAN || argType == Param::REAL || argType == Param::FLOAT || argType == Param::UNSIGNED_INT || argType == Param::UINT64 || argType == Param::UCHAR)) - { - String message = getErrorMessageForWrongArgumentInGetter(algo->name(), parameter, p->type, argType); - CV_Error(CV_StsBadArg, message); - } - bool val = p->getter ? (algo->*f.get_bool)() : *(bool*)((uchar*)algo + p->offset); - - if( argType == Param::INT ) - *(int*)value = (int)val; - else if( argType == Param::BOOLEAN ) - *(bool*)value = val; - else if ( argType == Param::REAL ) - *(double*)value = (int)val; - else if ( argType == Param::FLOAT) - *(float*)value = (float)((int)val); - else if ( argType == Param::UNSIGNED_INT ) - *(unsigned int*)value = (unsigned int)val; - else if ( argType == Param::UINT64 ) - *(uint64*)value = (int)val; - else if ( argType == Param::UCHAR) - *(uchar*)value = (uchar)val; - else - CV_Error(CV_StsBadArg, "Wrong argument type"); - } - else if( p->type == Param::REAL ) - { - if(!( argType == Param::REAL || argType == Param::FLOAT)) - { - String message = getErrorMessageForWrongArgumentInGetter(algo->name(), parameter, p->type, argType); - CV_Error(CV_StsBadArg, message); - } - double val = p->getter ? (algo->*f.get_double)() : *(double*)((uchar*)algo + p->offset); - - if ( argType == Param::REAL ) - *(double*)value = val; - else if ( argType == Param::FLOAT) - *(float*)value = (float)val; - else - CV_Error(CV_StsBadArg, "Wrong argument type"); - } - else if( p->type == Param::FLOAT ) - { - if(!( argType == Param::REAL || argType == Param::FLOAT)) - { - String message = getErrorMessageForWrongArgumentInGetter(algo->name(), parameter, p->type, argType); - CV_Error(CV_StsBadArg, message); - } - float val = p->getter ? (algo->*f.get_float)() : *(float*)((uchar*)algo + p->offset); - - if ( argType == Param::REAL ) - *(double*)value = (double)val; - else if ( argType == Param::FLOAT) - *(float*)value = (float)val; - else - CV_Error(CV_StsBadArg, "Wrong argument type"); - } - else if( p->type == Param::UNSIGNED_INT ) - { - if (!( argType == Param::INT || argType == Param::REAL || argType == Param::FLOAT || argType == Param::UNSIGNED_INT || argType == Param::UINT64 || argType == Param::UCHAR)) - { - String message = getErrorMessageForWrongArgumentInGetter(algo->name(), parameter, p->type, argType); - CV_Error(CV_StsBadArg, message); - } - unsigned int val = p->getter ? (algo->*f.get_uint)() : *(unsigned int*)((uchar*)algo + p->offset); - - if( argType == Param::INT ) - *(int*)value = (int)val; - else if ( argType == Param::REAL ) - *(double*)value = (double)val; - else if ( argType == Param::FLOAT) - *(float*)value = (float)val; - else if ( argType == Param::UNSIGNED_INT ) - *(unsigned int*)value = (unsigned int)val; - else if ( argType == Param::UINT64 ) - *(uint64*)value = (uint64)val; - else if ( argType == Param::UCHAR) - *(uchar*)value = (uchar)val; - else - CV_Error(CV_StsBadArg, "Wrong argument type"); - } - else if( p->type == Param::UINT64 ) - { - if (!( argType == Param::INT || argType == Param::REAL || argType == Param::FLOAT || argType == Param::UNSIGNED_INT || argType == Param::UINT64 || argType == Param::UCHAR)) - { - String message = getErrorMessageForWrongArgumentInGetter(algo->name(), parameter, p->type, argType); - CV_Error(CV_StsBadArg, message); - } - uint64 val = p->getter ? (algo->*f.get_uint64)() : *(uint64*)((uchar*)algo + p->offset); - - if( argType == Param::INT ) - *(int*)value = (int)val; - else if ( argType == Param::REAL ) - *(double*)value = (double)val; - else if ( argType == Param::FLOAT) - *(float*)value = (float)val; - else if ( argType == Param::UNSIGNED_INT ) - *(unsigned int*)value = (unsigned int)val; - else if ( argType == Param::UINT64 ) - *(uint64*)value = (uint64)val; - else if ( argType == Param::UCHAR) - *(uchar*)value = (uchar)val; - else - CV_Error(CV_StsBadArg, "Wrong argument type"); - } - else if( p->type == Param::UCHAR ) - { - if (!( argType == Param::INT || argType == Param::REAL || argType == Param::FLOAT || argType == Param::UNSIGNED_INT || argType == Param::UINT64 || argType == Param::UCHAR)) - { - String message = getErrorMessageForWrongArgumentInGetter(algo->name(), parameter, p->type, argType); - CV_Error(CV_StsBadArg, message); - } - uchar val = p->getter ? (algo->*f.get_uchar)() : *(uchar*)((uchar*)algo + p->offset); - - if( argType == Param::INT ) - *(int*)value = val; - else if ( argType == Param::REAL ) - *(double*)value = val; - else if ( argType == Param::FLOAT) - *(float*)value = val; - else if ( argType == Param::UNSIGNED_INT ) - *(unsigned int*)value = val; - else if ( argType == Param::UINT64 ) - *(uint64*)value = val; - else if ( argType == Param::UCHAR) - *(uchar*)value = val; - else - CV_Error(CV_StsBadArg, "Wrong argument type"); - - } - else - CV_Error(CV_StsBadArg, "Unknown/unsupported parameter type"); - } - else if( argType == Param::STRING ) - { - if( p->type != Param::STRING ) - { - String message = getErrorMessageForWrongArgumentInGetter(algo->name(), parameter, p->type, argType); - CV_Error(CV_StsBadArg, message); - } - - *(String*)value = p->getter ? (algo->*f.get_string)() : - *(String*)((uchar*)algo + p->offset); - } - else if( argType == Param::MAT ) - { - if( p->type != Param::MAT ) - { - String message = getErrorMessageForWrongArgumentInGetter(algo->name(), parameter, p->type, argType); - CV_Error(CV_StsBadArg, message); - } - - *(Mat*)value = p->getter ? (algo->*f.get_mat)() : - *(Mat*)((uchar*)algo + p->offset); - } - else if( argType == Param::MAT_VECTOR ) - { - if( p->type != Param::MAT_VECTOR ) - { - String message = getErrorMessageForWrongArgumentInGetter(algo->name(), parameter, p->type, argType); - CV_Error(CV_StsBadArg, message); - } - - *(std::vector*)value = p->getter ? (algo->*f.get_mat_vector)() : - *(std::vector*)((uchar*)algo + p->offset); - } - else if( argType == Param::ALGORITHM ) - { - if( p->type != Param::ALGORITHM ) - { - String message = getErrorMessageForWrongArgumentInGetter(algo->name(), parameter, p->type, argType); - CV_Error(CV_StsBadArg, message); - } - - *(Ptr*)value = p->getter ? (algo->*f.get_algo)() : - *(Ptr*)((uchar*)algo + p->offset); - } - else - { - String message = getErrorMessageForWrongArgumentInGetter(algo->name(), parameter, p->type, argType); - CV_Error(CV_StsBadArg, message); - } -} - - -int AlgorithmInfo::paramType(const char* parameter) const -{ - const Param* p = findstr(data->params, parameter); - if( !p ) - CV_Error_( CV_StsBadArg, ("No parameter '%s' is found", parameter ? parameter : "") ); - return p->type; -} - - -String AlgorithmInfo::paramHelp(const char* parameter) const -{ - const Param* p = findstr(data->params, parameter); - if( !p ) - CV_Error_( CV_StsBadArg, ("No parameter '%s' is found", parameter ? parameter : "") ); - return p->help; -} - - -void AlgorithmInfo::getParams(std::vector& names) const -{ - data->params.get_keys(names); -} - - -void AlgorithmInfo::addParam_(Algorithm& algo, const char* parameter, int argType, - void* value, bool readOnly, - Algorithm::Getter getter, Algorithm::Setter setter, - const String& help) -{ - CV_Assert( argType == Param::INT || argType == Param::BOOLEAN || - argType == Param::REAL || argType == Param::STRING || - argType == Param::MAT || argType == Param::MAT_VECTOR || - argType == Param::ALGORITHM - || argType == Param::FLOAT || argType == Param::UNSIGNED_INT || argType == Param::UINT64 - || argType == Param::UCHAR); - data->params.add(String(parameter), Param(argType, readOnly, - (int)((size_t)value - (size_t)(void*)&algo), - getter, setter, help)); -} - - -void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, - int& value, bool readOnly, - int (Algorithm::*getter)(), - void (Algorithm::*setter)(int), - const String& help) -{ - addParam_(algo, parameter, ParamType::type, &value, readOnly, - (Algorithm::Getter)getter, (Algorithm::Setter)setter, help); -} - -void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, - bool& value, bool readOnly, - int (Algorithm::*getter)(), - void (Algorithm::*setter)(int), - const String& help) -{ - addParam_(algo, parameter, ParamType::type, &value, readOnly, - (Algorithm::Getter)getter, (Algorithm::Setter)setter, help); -} - -void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, - double& value, bool readOnly, - double (Algorithm::*getter)(), - void (Algorithm::*setter)(double), - const String& help) -{ - addParam_(algo, parameter, ParamType::type, &value, readOnly, - (Algorithm::Getter)getter, (Algorithm::Setter)setter, help); -} - -void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, - String& value, bool readOnly, - String (Algorithm::*getter)(), - void (Algorithm::*setter)(const String&), - const String& help) -{ - addParam_(algo, parameter, ParamType::type, &value, readOnly, - (Algorithm::Getter)getter, (Algorithm::Setter)setter, help); -} - -void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, - Mat& value, bool readOnly, - Mat (Algorithm::*getter)(), - void (Algorithm::*setter)(const Mat&), - const String& help) -{ - addParam_(algo, parameter, ParamType::type, &value, readOnly, - (Algorithm::Getter)getter, (Algorithm::Setter)setter, help); -} - -void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, - std::vector& value, bool readOnly, - std::vector (Algorithm::*getter)(), - void (Algorithm::*setter)(const std::vector&), - const String& help) -{ - addParam_(algo, parameter, ParamType >::type, &value, readOnly, - (Algorithm::Getter)getter, (Algorithm::Setter)setter, help); -} - -void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, - Ptr& value, bool readOnly, - Ptr (Algorithm::*getter)(), - void (Algorithm::*setter)(const Ptr&), - const String& help) -{ - addParam_(algo, parameter, ParamType::type, &value, readOnly, - (Algorithm::Getter)getter, (Algorithm::Setter)setter, help); -} - -void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, - float& value, bool readOnly, - float (Algorithm::*getter)(), - void (Algorithm::*setter)(float), - const String& help) -{ - addParam_(algo, parameter, ParamType::type, &value, readOnly, - (Algorithm::Getter)getter, (Algorithm::Setter)setter, help); -} - -void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, - unsigned int& value, bool readOnly, - unsigned int (Algorithm::*getter)(), - void (Algorithm::*setter)(unsigned int), - const String& help) -{ - addParam_(algo, parameter, ParamType::type, &value, readOnly, - (Algorithm::Getter)getter, (Algorithm::Setter)setter, help); -} - -void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, - uint64& value, bool readOnly, - uint64 (Algorithm::*getter)(), - void (Algorithm::*setter)(uint64), - const String& help) -{ - addParam_(algo, parameter, ParamType::type, &value, readOnly, - (Algorithm::Getter)getter, (Algorithm::Setter)setter, help); -} - -void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, - uchar& value, bool readOnly, - uchar (Algorithm::*getter)(), - void (Algorithm::*setter)(uchar), - const String& help) -{ - addParam_(algo, parameter, ParamType::type, &value, readOnly, - (Algorithm::Getter)getter, (Algorithm::Setter)setter, help); -} - } /* End of file. */ diff --git a/modules/cudaimgproc/src/histogram.cpp b/modules/cudaimgproc/src/histogram.cpp index e942e9eb86..0ccce205a1 100644 --- a/modules/cudaimgproc/src/histogram.cpp +++ b/modules/cudaimgproc/src/histogram.cpp @@ -140,8 +140,6 @@ namespace public: CLAHE_Impl(double clipLimit = 40.0, int tilesX = 8, int tilesY = 8); - cv::AlgorithmInfo* info() const; - void apply(cv::InputArray src, cv::OutputArray dst); void apply(InputArray src, OutputArray dst, Stream& stream); @@ -167,11 +165,6 @@ namespace { } - CV_INIT_ALGORITHM(CLAHE_Impl, "CLAHE_CUDA", - obj.info()->addParam(obj, "clipLimit", obj.clipLimit_); - obj.info()->addParam(obj, "tilesX", obj.tilesX_); - obj.info()->addParam(obj, "tilesY", obj.tilesY_)) - void CLAHE_Impl::apply(cv::InputArray _src, cv::OutputArray _dst) { apply(_src, _dst, Stream::Null()); diff --git a/modules/cudaoptflow/perf/perf_optflow.cpp b/modules/cudaoptflow/perf/perf_optflow.cpp index 32040f282c..8480425cce 100644 --- a/modules/cudaoptflow/perf/perf_optflow.cpp +++ b/modules/cudaoptflow/perf/perf_optflow.cpp @@ -310,10 +310,10 @@ PERF_TEST_P(ImagePair, OpticalFlowDual_TVL1, { cv::Mat flow; - cv::Ptr alg = cv::createOptFlow_DualTVL1(); - alg->set("medianFiltering", 1); - alg->set("innerIterations", 1); - alg->set("outerIterations", 300); + cv::Ptr alg = cv::createOptFlow_DualTVL1(); + alg->setMedianFiltering(1); + alg->setInnerIterations(1); + alg->setOuterIterations(300); TEST_CYCLE() alg->calc(frame0, frame1, flow); CPU_SANITY_CHECK(flow); diff --git a/modules/cudaoptflow/test/test_optflow.cpp b/modules/cudaoptflow/test/test_optflow.cpp index c5b2ad8478..63bc461bb0 100644 --- a/modules/cudaoptflow/test/test_optflow.cpp +++ b/modules/cudaoptflow/test/test_optflow.cpp @@ -369,11 +369,11 @@ CUDA_TEST_P(OpticalFlowDual_TVL1, Accuracy) cv::cuda::GpuMat d_flow; d_alg->calc(loadMat(frame0), loadMat(frame1), d_flow); - cv::Ptr alg = cv::createOptFlow_DualTVL1(); - alg->set("medianFiltering", 1); - alg->set("innerIterations", 1); - alg->set("outerIterations", d_alg->getNumIterations()); - alg->set("gamma", gamma); + cv::Ptr alg = cv::createOptFlow_DualTVL1(); + alg->setMedianFiltering(1); + alg->setInnerIterations(1); + alg->setOuterIterations(d_alg->getNumIterations()); + alg->setGamma(gamma); cv::Mat flow; alg->calc(frame0, frame1, flow); diff --git a/modules/imgproc/src/clahe.cpp b/modules/imgproc/src/clahe.cpp index 06fc73153f..75edd18e4a 100644 --- a/modules/imgproc/src/clahe.cpp +++ b/modules/imgproc/src/clahe.cpp @@ -320,8 +320,6 @@ namespace public: CLAHE_Impl(double clipLimit = 40.0, int tilesX = 8, int tilesY = 8); - cv::AlgorithmInfo* info() const; - void apply(cv::InputArray src, cv::OutputArray dst); void setClipLimit(double clipLimit); @@ -351,11 +349,6 @@ namespace { } - CV_INIT_ALGORITHM(CLAHE_Impl, "CLAHE", - obj.info()->addParam(obj, "clipLimit", obj.clipLimit_); - obj.info()->addParam(obj, "tilesX", obj.tilesX_); - obj.info()->addParam(obj, "tilesY", obj.tilesY_)) - void CLAHE_Impl::apply(cv::InputArray _src, cv::OutputArray _dst) { CV_Assert( _src.type() == CV_8UC1 || _src.type() == CV_16UC1 ); diff --git a/modules/shape/include/opencv2/shape.hpp b/modules/shape/include/opencv2/shape.hpp index 093d8575d3..6999476a7d 100644 --- a/modules/shape/include/opencv2/shape.hpp +++ b/modules/shape/include/opencv2/shape.hpp @@ -52,11 +52,6 @@ @defgroup shape Shape Distance and Matching */ -namespace cv -{ -CV_EXPORTS bool initModule_shape(); -} - #endif /* End of file. */ diff --git a/modules/shape/src/aff_trans.cpp b/modules/shape/src/aff_trans.cpp index a309d85fcf..ec2342f479 100644 --- a/modules/shape/src/aff_trans.cpp +++ b/modules/shape/src/aff_trans.cpp @@ -66,8 +66,6 @@ public: { } - virtual AlgorithmInfo* info() const { return 0; } - //! the main operator virtual void estimateTransformation(InputArray transformingShape, InputArray targetShape, std::vector &matches); virtual float applyTransformation(InputArray input, OutputArray output=noArray()); diff --git a/modules/shape/src/haus_dis.cpp b/modules/shape/src/haus_dis.cpp index ff5bd8c3d1..6f2679f1a2 100644 --- a/modules/shape/src/haus_dis.cpp +++ b/modules/shape/src/haus_dis.cpp @@ -60,8 +60,6 @@ public: { } - virtual AlgorithmInfo* info() const { return 0; } - //! the main operator virtual float computeDistance(InputArray contour1, InputArray contour2); diff --git a/modules/shape/src/hist_cost.cpp b/modules/shape/src/hist_cost.cpp index 4e18687ad8..53c2c68ec0 100644 --- a/modules/shape/src/hist_cost.cpp +++ b/modules/shape/src/hist_cost.cpp @@ -62,8 +62,6 @@ public: { } - virtual AlgorithmInfo* info() const { return 0; } - //! the main operator virtual void buildCostMatrix(InputArray descriptors1, InputArray descriptors2, OutputArray costMatrix); @@ -189,8 +187,6 @@ public: { } - virtual AlgorithmInfo* info() const { return 0; } - //! the main operator virtual void buildCostMatrix(InputArray descriptors1, InputArray descriptors2, OutputArray costMatrix); @@ -327,8 +323,6 @@ public: { } - virtual AlgorithmInfo* info() const { return 0; } - //! the main operator virtual void buildCostMatrix(InputArray descriptors1, InputArray descriptors2, OutputArray costMatrix); @@ -445,8 +439,6 @@ public: { } - virtual AlgorithmInfo* info() const { return 0; } - //! the main operator virtual void buildCostMatrix(InputArray descriptors1, InputArray descriptors2, OutputArray costMatrix); diff --git a/modules/shape/src/sc_dis.cpp b/modules/shape/src/sc_dis.cpp index d67907494c..3f11e8b1bb 100644 --- a/modules/shape/src/sc_dis.cpp +++ b/modules/shape/src/sc_dis.cpp @@ -79,8 +79,6 @@ public: { } - virtual AlgorithmInfo* info() const { return 0; } - //! the main operator virtual float computeDistance(InputArray contour1, InputArray contour2); diff --git a/modules/shape/src/tps_trans.cpp b/modules/shape/src/tps_trans.cpp index 61758bd0b2..038fd2a678 100644 --- a/modules/shape/src/tps_trans.cpp +++ b/modules/shape/src/tps_trans.cpp @@ -68,8 +68,6 @@ public: { } - virtual AlgorithmInfo* info() const { return 0; } - //! the main operators virtual void estimateTransformation(InputArray transformingShape, InputArray targetShape, std::vector &matches); virtual float applyTransformation(InputArray inPts, OutputArray output=noArray()); diff --git a/modules/superres/include/opencv2/superres.hpp b/modules/superres/include/opencv2/superres.hpp index e5bca4b31f..0639b10422 100644 --- a/modules/superres/include/opencv2/superres.hpp +++ b/modules/superres/include/opencv2/superres.hpp @@ -44,6 +44,7 @@ #define __OPENCV_SUPERRES_HPP__ #include "opencv2/core.hpp" +#include "opencv2/superres/optical_flow.hpp" /** @defgroup superres Super Resolution @@ -62,8 +63,6 @@ namespace cv //! @addtogroup superres //! @{ - CV_EXPORTS bool initModule_superres(); - class CV_EXPORTS FrameSource { public: @@ -105,6 +104,36 @@ namespace cv */ virtual void collectGarbage(); + //! @name Scale factor + CV_PURE_PROPERTY(int, Scale) + + //! @name Iterations count + CV_PURE_PROPERTY(int, Iterations) + + //! @name Asymptotic value of steepest descent method + CV_PURE_PROPERTY(double, Tau) + + //! @name Weight parameter to balance data term and smoothness term + CV_PURE_PROPERTY(double, Labmda) + + //! @name Parameter of spacial distribution in Bilateral-TV + CV_PURE_PROPERTY(double, Alpha) + + //! @name Kernel size of Bilateral-TV filter + CV_PURE_PROPERTY(int, KernelSize) + + //! @name Gaussian blur kernel size + CV_PURE_PROPERTY(int, BlurKernelSize) + + //! @name Gaussian blur sigma + CV_PURE_PROPERTY(double, BlurSigma) + + //! @name Radius of the temporal search area + CV_PURE_PROPERTY(int, TemporalAreaRadius) + + //! @name Dense optical flow algorithm + CV_PURE_PROPERTY_S(Ptr, OpticalFlow) + protected: SuperResolution(); @@ -139,7 +168,6 @@ namespace cv */ CV_EXPORTS Ptr createSuperResolution_BTVL1(); CV_EXPORTS Ptr createSuperResolution_BTVL1_CUDA(); - CV_EXPORTS Ptr createSuperResolution_BTVL1_OCL(); //! @} superres diff --git a/modules/superres/include/opencv2/superres/optical_flow.hpp b/modules/superres/include/opencv2/superres/optical_flow.hpp index d4362c4fe7..7bc64782cb 100644 --- a/modules/superres/include/opencv2/superres/optical_flow.hpp +++ b/modules/superres/include/opencv2/superres/optical_flow.hpp @@ -60,20 +60,68 @@ namespace cv virtual void collectGarbage() = 0; }; - CV_EXPORTS Ptr createOptFlow_Farneback(); - CV_EXPORTS Ptr createOptFlow_Farneback_CUDA(); - CV_EXPORTS Ptr createOptFlow_Farneback_OCL(); - CV_EXPORTS Ptr createOptFlow_Simple(); + class CV_EXPORTS FarnebackOpticalFlow : public virtual DenseOpticalFlowExt + { + public: + CV_PURE_PROPERTY(double, PyrScale) + CV_PURE_PROPERTY(int, LevelsNumber) + CV_PURE_PROPERTY(int, WindowSize) + CV_PURE_PROPERTY(int, Iterations) + CV_PURE_PROPERTY(int, PolyN) + CV_PURE_PROPERTY(double, PolySigma) + CV_PURE_PROPERTY(int, Flags) + }; + CV_EXPORTS Ptr createOptFlow_Farneback(); + CV_EXPORTS Ptr createOptFlow_Farneback_CUDA(); + + +// CV_EXPORTS Ptr createOptFlow_Simple(); - CV_EXPORTS Ptr createOptFlow_DualTVL1(); - CV_EXPORTS Ptr createOptFlow_DualTVL1_CUDA(); - CV_EXPORTS Ptr createOptFlow_DualTVL1_OCL(); - CV_EXPORTS Ptr createOptFlow_Brox_CUDA(); + class CV_EXPORTS DualTVL1OpticalFlow : public virtual DenseOpticalFlowExt + { + public: + CV_PURE_PROPERTY(double, Tau) + CV_PURE_PROPERTY(double, Lambda) + CV_PURE_PROPERTY(double, Theta) + CV_PURE_PROPERTY(int, ScalesNumber) + CV_PURE_PROPERTY(int, WarpingsNumber) + CV_PURE_PROPERTY(double, Epsilon) + CV_PURE_PROPERTY(int, Iterations) + CV_PURE_PROPERTY(bool, UseInitialFlow) + }; + CV_EXPORTS Ptr createOptFlow_DualTVL1(); + CV_EXPORTS Ptr createOptFlow_DualTVL1_CUDA(); + - CV_EXPORTS Ptr createOptFlow_PyrLK_CUDA(); - CV_EXPORTS Ptr createOptFlow_PyrLK_OCL(); + class CV_EXPORTS BroxOpticalFlow : public virtual DenseOpticalFlowExt + { + public: + //! @name Flow smoothness + CV_PURE_PROPERTY(double, Alpha) + //! @name Gradient constancy importance + CV_PURE_PROPERTY(double, Gamma) + //! @name Pyramid scale factor + CV_PURE_PROPERTY(double, ScaleFactor) + //! @name Number of lagged non-linearity iterations (inner loop) + CV_PURE_PROPERTY(int, InnerIterations) + //! @name Number of warping iterations (number of pyramid levels) + CV_PURE_PROPERTY(int, OuterIterations) + //! @name Number of linear system solver iterations + CV_PURE_PROPERTY(int, SolverIterations) + }; + CV_EXPORTS Ptr createOptFlow_Brox_CUDA(); + + + class PyrLKOpticalFlow : public virtual DenseOpticalFlowExt + { + public: + CV_PURE_PROPERTY(int, WindowSize) + CV_PURE_PROPERTY(int, MaxLevel) + CV_PURE_PROPERTY(int, Iterations) + }; + CV_EXPORTS Ptr createOptFlow_PyrLK_CUDA(); //! @} diff --git a/modules/superres/perf/perf_superres.cpp b/modules/superres/perf/perf_superres.cpp index e8b3ef7545..f9c881aff7 100644 --- a/modules/superres/perf/perf_superres.cpp +++ b/modules/superres/perf/perf_superres.cpp @@ -138,10 +138,10 @@ PERF_TEST_P(Size_MatType, SuperResolution_BTVL1, { Ptr superRes = createSuperResolution_BTVL1_CUDA(); - superRes->set("scale", scale); - superRes->set("iterations", iterations); - superRes->set("temporalAreaRadius", temporalAreaRadius); - superRes->set("opticalFlow", opticalFlow); + superRes->setScale(scale); + superRes->setIterations(iterations); + superRes->setTemporalAreaRadius(temporalAreaRadius); + superRes->setOpticalFlow(opticalFlow); superRes->setInput(makePtr(GpuMat(frame))); @@ -156,10 +156,10 @@ PERF_TEST_P(Size_MatType, SuperResolution_BTVL1, { Ptr superRes = createSuperResolution_BTVL1(); - superRes->set("scale", scale); - superRes->set("iterations", iterations); - superRes->set("temporalAreaRadius", temporalAreaRadius); - superRes->set("opticalFlow", opticalFlow); + superRes->setScale(scale); + superRes->setIterations(iterations); + superRes->setTemporalAreaRadius(temporalAreaRadius); + superRes->setOpticalFlow(opticalFlow); superRes->setInput(makePtr(frame)); @@ -198,10 +198,10 @@ OCL_PERF_TEST_P(SuperResolution_BTVL1 ,BTVL1, Ptr opticalFlow(new ZeroOpticalFlow); Ptr superRes = createSuperResolution_BTVL1(); - superRes->set("scale", scale); - superRes->set("iterations", iterations); - superRes->set("temporalAreaRadius", temporalAreaRadius); - superRes->set("opticalFlow", opticalFlow); + superRes->setScale(scale); + superRes->setIterations(iterations); + superRes->setTemporalAreaRadius(temporalAreaRadius); + superRes->setOpticalFlow(opticalFlow); superRes->setInput(makePtr(frame)); diff --git a/modules/superres/src/btv_l1.cpp b/modules/superres/src/btv_l1.cpp index 6b6c3c3e7c..291fa1bcbe 100644 --- a/modules/superres/src/btv_l1.cpp +++ b/modules/superres/src/btv_l1.cpp @@ -460,7 +460,7 @@ namespace func(_src, _dst, btvKernelSize, btvWeights); } - class BTVL1_Base + class BTVL1_Base : public cv::superres::SuperResolution { public: BTVL1_Base(); @@ -470,6 +470,17 @@ namespace void collectGarbage(); + CV_IMPL_PROPERTY(int, Scale, scale_) + CV_IMPL_PROPERTY(int, Iterations, iterations_) + CV_IMPL_PROPERTY(double, Tau, tau_) + CV_IMPL_PROPERTY(double, Labmda, lambda_) + CV_IMPL_PROPERTY(double, Alpha, alpha_) + CV_IMPL_PROPERTY(int, KernelSize, btvKernelSize_) + CV_IMPL_PROPERTY(int, BlurKernelSize, blurKernelSize_) + CV_IMPL_PROPERTY(double, BlurSigma, blurSigma_) + CV_IMPL_PROPERTY(int, TemporalAreaRadius, temporalAreaRadius_) + CV_IMPL_PROPERTY_S(Ptr, OpticalFlow, opticalFlow_) + protected: int scale_; int iterations_; @@ -479,7 +490,8 @@ namespace int btvKernelSize_; int blurKernelSize_; double blurSigma_; - Ptr opticalFlow_; + int temporalAreaRadius_; // not used in some implementations + Ptr opticalFlow_; private: bool ocl_process(InputArrayOfArrays src, OutputArray dst, InputArrayOfArrays forwardMotions, @@ -539,6 +551,7 @@ namespace btvKernelSize_ = 7; blurKernelSize_ = 5; blurSigma_ = 0.0; + temporalAreaRadius_ = 0; opticalFlow_ = createOptFlow_Farneback(); curBlurKernelSize_ = -1; @@ -781,12 +794,9 @@ namespace //////////////////////////////////////////////////////////////////// - class BTVL1 : - public SuperResolution, private BTVL1_Base + class BTVL1 : public BTVL1_Base { public: - AlgorithmInfo* info() const; - BTVL1(); void collectGarbage(); @@ -799,8 +809,6 @@ namespace bool ocl_processImpl(Ptr& frameSource, OutputArray output); private: - int temporalAreaRadius_; - void readNextFrame(Ptr& frameSource); bool ocl_readNextFrame(Ptr& frameSource); @@ -841,18 +849,6 @@ namespace #endif }; - CV_INIT_ALGORITHM(BTVL1, "SuperResolution.BTVL1", - obj.info()->addParam(obj, "scale", obj.scale_, false, 0, 0, "Scale factor."); - obj.info()->addParam(obj, "iterations", obj.iterations_, false, 0, 0, "Iteration count."); - obj.info()->addParam(obj, "tau", obj.tau_, false, 0, 0, "Asymptotic value of steepest descent method."); - obj.info()->addParam(obj, "lambda", obj.lambda_, false, 0, 0, "Weight parameter to balance data term and smoothness term."); - obj.info()->addParam(obj, "alpha", obj.alpha_, false, 0, 0, "Parameter of spacial distribution in Bilateral-TV."); - obj.info()->addParam(obj, "btvKernelSize", obj.btvKernelSize_, false, 0, 0, "Kernel size of Bilateral-TV filter."); - obj.info()->addParam(obj, "blurKernelSize", obj.blurKernelSize_, false, 0, 0, "Gaussian blur kernel size."); - obj.info()->addParam(obj, "blurSigma", obj.blurSigma_, false, 0, 0, "Gaussian blur sigma."); - obj.info()->addParam(obj, "temporalAreaRadius", obj.temporalAreaRadius_, false, 0, 0, "Radius of the temporal search area."); - obj.info()->addParam(obj, "opticalFlow", obj.opticalFlow_, false, 0, 0, "Dense optical flow algorithm.")) - BTVL1::BTVL1() { temporalAreaRadius_ = 4; @@ -1101,7 +1097,7 @@ namespace } } -Ptr cv::superres::createSuperResolution_BTVL1() +Ptr cv::superres::createSuperResolution_BTVL1() { return makePtr(); } diff --git a/modules/superres/src/btv_l1_cuda.cpp b/modules/superres/src/btv_l1_cuda.cpp index f72e3846e8..b8d3eace12 100644 --- a/modules/superres/src/btv_l1_cuda.cpp +++ b/modules/superres/src/btv_l1_cuda.cpp @@ -207,7 +207,7 @@ namespace funcs[src.channels()](src, dst, ksize); } - class BTVL1_CUDA_Base + class BTVL1_CUDA_Base : public cv::superres::SuperResolution { public: BTVL1_CUDA_Base(); @@ -218,6 +218,17 @@ namespace void collectGarbage(); + CV_IMPL_PROPERTY(int, Scale, scale_) + CV_IMPL_PROPERTY(int, Iterations, iterations_) + CV_IMPL_PROPERTY(double, Tau, tau_) + CV_IMPL_PROPERTY(double, Labmda, lambda_) + CV_IMPL_PROPERTY(double, Alpha, alpha_) + CV_IMPL_PROPERTY(int, KernelSize, btvKernelSize_) + CV_IMPL_PROPERTY(int, BlurKernelSize, blurKernelSize_) + CV_IMPL_PROPERTY(double, BlurSigma, blurSigma_) + CV_IMPL_PROPERTY(int, TemporalAreaRadius, temporalAreaRadius_) + CV_IMPL_PROPERTY_S(Ptr, OpticalFlow, opticalFlow_) + protected: int scale_; int iterations_; @@ -227,7 +238,8 @@ namespace int btvKernelSize_; int blurKernelSize_; double blurSigma_; - Ptr opticalFlow_; + int temporalAreaRadius_; + Ptr opticalFlow_; private: std::vector > filters_; @@ -272,6 +284,7 @@ namespace #else opticalFlow_ = createOptFlow_Farneback(); #endif + temporalAreaRadius_ = 0; curBlurKernelSize_ = -1; curBlurSigma_ = -1.0; @@ -401,11 +414,9 @@ namespace //////////////////////////////////////////////////////////// - class BTVL1_CUDA : public SuperResolution, private BTVL1_CUDA_Base + class BTVL1_CUDA : public BTVL1_CUDA_Base { public: - AlgorithmInfo* info() const; - BTVL1_CUDA(); void collectGarbage(); @@ -415,8 +426,6 @@ namespace void processImpl(Ptr& frameSource, OutputArray output); private: - int temporalAreaRadius_; - void readNextFrame(Ptr& frameSource); void processFrame(int idx); @@ -438,18 +447,6 @@ namespace GpuMat finalOutput_; }; - CV_INIT_ALGORITHM(BTVL1_CUDA, "SuperResolution.BTVL1_CUDA", - obj.info()->addParam(obj, "scale", obj.scale_, false, 0, 0, "Scale factor."); - obj.info()->addParam(obj, "iterations", obj.iterations_, false, 0, 0, "Iteration count."); - obj.info()->addParam(obj, "tau", obj.tau_, false, 0, 0, "Asymptotic value of steepest descent method."); - obj.info()->addParam(obj, "lambda", obj.lambda_, false, 0, 0, "Weight parameter to balance data term and smoothness term."); - obj.info()->addParam(obj, "alpha", obj.alpha_, false, 0, 0, "Parameter of spacial distribution in Bilateral-TV."); - obj.info()->addParam(obj, "btvKernelSize", obj.btvKernelSize_, false, 0, 0, "Kernel size of Bilateral-TV filter."); - obj.info()->addParam(obj, "blurKernelSize", obj.blurKernelSize_, false, 0, 0, "Gaussian blur kernel size."); - obj.info()->addParam(obj, "blurSigma", obj.blurSigma_, false, 0, 0, "Gaussian blur sigma."); - obj.info()->addParam(obj, "temporalAreaRadius", obj.temporalAreaRadius_, false, 0, 0, "Radius of the temporal search area."); - obj.info()->addParam(obj, "opticalFlow", obj.opticalFlow_, false, 0, 0, "Dense optical flow algorithm.")); - BTVL1_CUDA::BTVL1_CUDA() { temporalAreaRadius_ = 4; diff --git a/modules/superres/src/optical_flow.cpp b/modules/superres/src/optical_flow.cpp index 52fc2648e2..a08a58bd9e 100644 --- a/modules/superres/src/optical_flow.cpp +++ b/modules/superres/src/optical_flow.cpp @@ -53,7 +53,7 @@ using namespace cv::superres::detail; namespace { - class CpuOpticalFlow : public DenseOpticalFlowExt + class CpuOpticalFlow : public virtual cv::superres::DenseOpticalFlowExt { public: explicit CpuOpticalFlow(int work_type); @@ -173,12 +173,20 @@ namespace namespace { - class Farneback : public CpuOpticalFlow + class Farneback : public CpuOpticalFlow, public cv::superres::FarnebackOpticalFlow { public: - AlgorithmInfo* info() const; - Farneback(); + void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2); + void collectGarbage(); + + CV_IMPL_PROPERTY(double, PyrScale, pyrScale_) + CV_IMPL_PROPERTY(int, LevelsNumber, numLevels_) + CV_IMPL_PROPERTY(int, WindowSize, winSize_) + CV_IMPL_PROPERTY(int, Iterations, numIters_) + CV_IMPL_PROPERTY(int, PolyN, polyN_) + CV_IMPL_PROPERTY(double, PolySigma, polySigma_) + CV_IMPL_PROPERTY(int, Flags, flags_) protected: void impl(InputArray input0, InputArray input1, OutputArray dst); @@ -193,15 +201,6 @@ namespace int flags_; }; - CV_INIT_ALGORITHM(Farneback, "DenseOpticalFlowExt.Farneback", - obj.info()->addParam(obj, "pyrScale", obj.pyrScale_); - obj.info()->addParam(obj, "numLevels", obj.numLevels_); - obj.info()->addParam(obj, "winSize", obj.winSize_); - obj.info()->addParam(obj, "numIters", obj.numIters_); - obj.info()->addParam(obj, "polyN", obj.polyN_); - obj.info()->addParam(obj, "polySigma", obj.polySigma_); - obj.info()->addParam(obj, "flags", obj.flags_)) - Farneback::Farneback() : CpuOpticalFlow(CV_8UC1) { pyrScale_ = 0.5; @@ -213,6 +212,16 @@ namespace flags_ = 0; } + void Farneback::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2) + { + CpuOpticalFlow::calc(frame0, frame1, flow1, flow2); + } + + void Farneback::collectGarbage() + { + CpuOpticalFlow::collectGarbage(); + } + void Farneback::impl(InputArray input0, InputArray input1, OutputArray dst) { calcOpticalFlowFarneback(input0, input1, (InputOutputArray)dst, pyrScale_, @@ -221,7 +230,7 @@ namespace } } -Ptr cv::superres::createOptFlow_Farneback() +Ptr cv::superres::createOptFlow_Farneback() { return makePtr(); } @@ -319,65 +328,53 @@ Ptr cv::superres::createOptFlow_Simple() namespace { - class DualTVL1 : public CpuOpticalFlow + #define CV_WRAP_PROPERTY(type, name, internal_name, internal_obj) \ + type get##name() const \ + { \ + return internal_obj->get##internal_name(); \ + } \ + void set##name(type _name) \ + { \ + internal_obj->set##internal_name(_name); \ + } + + #define CV_WRAP_SAME_PROPERTY(type, name, internal_obj) CV_WRAP_PROPERTY(type, name, name, internal_obj) + + class DualTVL1 : public CpuOpticalFlow, public virtual cv::superres::DualTVL1OpticalFlow { public: - AlgorithmInfo* info() const; - DualTVL1(); - + void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2); void collectGarbage(); + CV_WRAP_SAME_PROPERTY(double, Tau, alg_) + CV_WRAP_SAME_PROPERTY(double, Lambda, alg_) + CV_WRAP_SAME_PROPERTY(double, Theta, alg_) + CV_WRAP_SAME_PROPERTY(int, ScalesNumber, alg_) + CV_WRAP_SAME_PROPERTY(int, WarpingsNumber, alg_) + CV_WRAP_SAME_PROPERTY(double, Epsilon, alg_) + CV_WRAP_PROPERTY(int, Iterations, OuterIterations, alg_) + CV_WRAP_SAME_PROPERTY(bool, UseInitialFlow, alg_) + protected: void impl(InputArray input0, InputArray input1, OutputArray dst); private: - double tau_; - double lambda_; - double theta_; - int nscales_; - int warps_; - double epsilon_; - int iterations_; - bool useInitialFlow_; - - Ptr alg_; + Ptr alg_; }; - CV_INIT_ALGORITHM(DualTVL1, "DenseOpticalFlowExt.DualTVL1", - obj.info()->addParam(obj, "tau", obj.tau_); - obj.info()->addParam(obj, "lambda", obj.lambda_); - obj.info()->addParam(obj, "theta", obj.theta_); - obj.info()->addParam(obj, "nscales", obj.nscales_); - obj.info()->addParam(obj, "warps", obj.warps_); - obj.info()->addParam(obj, "epsilon", obj.epsilon_); - obj.info()->addParam(obj, "iterations", obj.iterations_); - obj.info()->addParam(obj, "useInitialFlow", obj.useInitialFlow_)) - DualTVL1::DualTVL1() : CpuOpticalFlow(CV_8UC1) { alg_ = cv::createOptFlow_DualTVL1(); - tau_ = alg_->getDouble("tau"); - lambda_ = alg_->getDouble("lambda"); - theta_ = alg_->getDouble("theta"); - nscales_ = alg_->getInt("nscales"); - warps_ = alg_->getInt("warps"); - epsilon_ = alg_->getDouble("epsilon"); - iterations_ = alg_->getInt("iterations"); - useInitialFlow_ = alg_->getBool("useInitialFlow"); } - void DualTVL1::impl(InputArray input0, InputArray input1, OutputArray dst) + void DualTVL1::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2) { - alg_->set("tau", tau_); - alg_->set("lambda", lambda_); - alg_->set("theta", theta_); - alg_->set("nscales", nscales_); - alg_->set("warps", warps_); - alg_->set("epsilon", epsilon_); - alg_->set("iterations", iterations_); - alg_->set("useInitialFlow", useInitialFlow_); + CpuOpticalFlow::calc(frame0, frame1, flow1, flow2); + } + void DualTVL1::impl(InputArray input0, InputArray input1, OutputArray dst) + { alg_->calc(input0, input1, (InputOutputArray)dst); } @@ -388,7 +385,7 @@ namespace } } -Ptr cv::superres::createOptFlow_DualTVL1() +Ptr cv::superres::createOptFlow_DualTVL1() { return makePtr(); } @@ -398,35 +395,35 @@ Ptr cv::superres::createOptFlow_DualTVL1() #ifndef HAVE_OPENCV_CUDAOPTFLOW -Ptr cv::superres::createOptFlow_Farneback_CUDA() +Ptr cv::superres::createOptFlow_Farneback_CUDA() { CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform"); - return Ptr(); + return Ptr(); } -Ptr cv::superres::createOptFlow_DualTVL1_CUDA() +Ptr cv::superres::createOptFlow_DualTVL1_CUDA() { CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform"); - return Ptr(); + return Ptr(); } -Ptr cv::superres::createOptFlow_Brox_CUDA() +Ptr cv::superres::createOptFlow_Brox_CUDA() { CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform"); - return Ptr(); + return Ptr(); } -Ptr cv::superres::createOptFlow_PyrLK_CUDA() +Ptr cv::superres::createOptFlow_PyrLK_CUDA() { CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform"); - return Ptr(); + return Ptr(); } #else // HAVE_OPENCV_CUDAOPTFLOW namespace { - class GpuOpticalFlow : public DenseOpticalFlowExt + class GpuOpticalFlow : public virtual cv::superres::DenseOpticalFlowExt { public: explicit GpuOpticalFlow(int work_type); @@ -494,15 +491,20 @@ namespace namespace { - class Brox_CUDA : public GpuOpticalFlow + class Brox_CUDA : public GpuOpticalFlow, public virtual cv::superres::BroxOpticalFlow { public: - AlgorithmInfo* info() const; - Brox_CUDA(); - + void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2); void collectGarbage(); + CV_IMPL_PROPERTY(double, Alpha, alpha_) + CV_IMPL_PROPERTY(double, Gamma, gamma_) + CV_IMPL_PROPERTY(double, ScaleFactor, scaleFactor_) + CV_IMPL_PROPERTY(int, InnerIterations, innerIterations_) + CV_IMPL_PROPERTY(int, OuterIterations, outerIterations_) + CV_IMPL_PROPERTY(int, SolverIterations, solverIterations_) + protected: void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2); @@ -517,14 +519,6 @@ namespace Ptr alg_; }; - CV_INIT_ALGORITHM(Brox_CUDA, "DenseOpticalFlowExt.Brox_CUDA", - obj.info()->addParam(obj, "alpha", obj.alpha_, false, 0, 0, "Flow smoothness"); - obj.info()->addParam(obj, "gamma", obj.gamma_, false, 0, 0, "Gradient constancy importance"); - obj.info()->addParam(obj, "scaleFactor", obj.scaleFactor_, false, 0, 0, "Pyramid scale factor"); - obj.info()->addParam(obj, "innerIterations", obj.innerIterations_, false, 0, 0, "Number of lagged non-linearity iterations (inner loop)"); - obj.info()->addParam(obj, "outerIterations", obj.outerIterations_, false, 0, 0, "Number of warping iterations (number of pyramid levels)"); - obj.info()->addParam(obj, "solverIterations", obj.solverIterations_, false, 0, 0, "Number of linear system solver iterations")) - Brox_CUDA::Brox_CUDA() : GpuOpticalFlow(CV_32FC1) { alg_ = cuda::BroxOpticalFlow::create(0.197f, 50.0f, 0.8f, 10, 77, 10); @@ -537,6 +531,11 @@ namespace solverIterations_ = alg_->getSolverIterations(); } + void Brox_CUDA::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2) + { + GpuOpticalFlow::calc(frame0, frame1, flow1, flow2); + } + void Brox_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2) { alg_->setFlowSmoothness(alpha_); @@ -563,7 +562,7 @@ namespace } } -Ptr cv::superres::createOptFlow_Brox_CUDA() +Ptr cv::superres::createOptFlow_Brox_CUDA() { return makePtr(); } @@ -573,15 +572,17 @@ Ptr cv::superres::createOptFlow_Brox_CUDA() namespace { - class PyrLK_CUDA : public GpuOpticalFlow + class PyrLK_CUDA : public GpuOpticalFlow, public cv::superres::PyrLKOpticalFlow { public: - AlgorithmInfo* info() const; - PyrLK_CUDA(); - + void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2); void collectGarbage(); + CV_IMPL_PROPERTY(int, WindowSize, winSize_) + CV_IMPL_PROPERTY(int, MaxLevel, maxLevel_) + CV_IMPL_PROPERTY(int, Iterations, iterations_) + protected: void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2); @@ -593,11 +594,6 @@ namespace Ptr alg_; }; - CV_INIT_ALGORITHM(PyrLK_CUDA, "DenseOpticalFlowExt.PyrLK_CUDA", - obj.info()->addParam(obj, "winSize", obj.winSize_); - obj.info()->addParam(obj, "maxLevel", obj.maxLevel_); - obj.info()->addParam(obj, "iterations", obj.iterations_)) - PyrLK_CUDA::PyrLK_CUDA() : GpuOpticalFlow(CV_8UC1) { alg_ = cuda::DensePyrLKOpticalFlow::create(); @@ -607,6 +603,11 @@ namespace iterations_ = alg_->getNumIters(); } + void PyrLK_CUDA::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2) + { + GpuOpticalFlow::calc(frame0, frame1, flow1, flow2); + } + void PyrLK_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2) { alg_->setWinSize(Size(winSize_, winSize_)); @@ -630,7 +631,7 @@ namespace } } -Ptr cv::superres::createOptFlow_PyrLK_CUDA() +Ptr cv::superres::createOptFlow_PyrLK_CUDA() { return makePtr(); } @@ -640,15 +641,21 @@ Ptr cv::superres::createOptFlow_PyrLK_CUDA() namespace { - class Farneback_CUDA : public GpuOpticalFlow + class Farneback_CUDA : public GpuOpticalFlow, public cv::superres::FarnebackOpticalFlow { public: - AlgorithmInfo* info() const; - Farneback_CUDA(); - + void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2); void collectGarbage(); + CV_IMPL_PROPERTY(double, PyrScale, pyrScale_) + CV_IMPL_PROPERTY(int, LevelsNumber, numLevels_) + CV_IMPL_PROPERTY(int, WindowSize, winSize_) + CV_IMPL_PROPERTY(int, Iterations, numIters_) + CV_IMPL_PROPERTY(int, PolyN, polyN_) + CV_IMPL_PROPERTY(double, PolySigma, polySigma_) + CV_IMPL_PROPERTY(int, Flags, flags_) + protected: void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2); @@ -664,15 +671,6 @@ namespace Ptr alg_; }; - CV_INIT_ALGORITHM(Farneback_CUDA, "DenseOpticalFlowExt.Farneback_CUDA", - obj.info()->addParam(obj, "pyrScale", obj.pyrScale_); - obj.info()->addParam(obj, "numLevels", obj.numLevels_); - obj.info()->addParam(obj, "winSize", obj.winSize_); - obj.info()->addParam(obj, "numIters", obj.numIters_); - obj.info()->addParam(obj, "polyN", obj.polyN_); - obj.info()->addParam(obj, "polySigma", obj.polySigma_); - obj.info()->addParam(obj, "flags", obj.flags_)) - Farneback_CUDA::Farneback_CUDA() : GpuOpticalFlow(CV_8UC1) { alg_ = cuda::FarnebackOpticalFlow::create(); @@ -686,6 +684,11 @@ namespace flags_ = alg_->getFlags(); } + void Farneback_CUDA::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2) + { + GpuOpticalFlow::calc(frame0, frame1, flow1, flow2); + } + void Farneback_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2) { alg_->setPyrScale(pyrScale_); @@ -713,7 +716,7 @@ namespace } } -Ptr cv::superres::createOptFlow_Farneback_CUDA() +Ptr cv::superres::createOptFlow_Farneback_CUDA() { return makePtr(); } @@ -723,15 +726,22 @@ Ptr cv::superres::createOptFlow_Farneback_CUDA() namespace { - class DualTVL1_CUDA : public GpuOpticalFlow + class DualTVL1_CUDA : public GpuOpticalFlow, public cv::superres::DualTVL1OpticalFlow { public: - AlgorithmInfo* info() const; - DualTVL1_CUDA(); - + void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2); void collectGarbage(); + CV_IMPL_PROPERTY(double, Tau, tau_) + CV_IMPL_PROPERTY(double, Lambda, lambda_) + CV_IMPL_PROPERTY(double, Theta, theta_) + CV_IMPL_PROPERTY(int, ScalesNumber, nscales_) + CV_IMPL_PROPERTY(int, WarpingsNumber, warps_) + CV_IMPL_PROPERTY(double, Epsilon, epsilon_) + CV_IMPL_PROPERTY(int, Iterations, iterations_) + CV_IMPL_PROPERTY(bool, UseInitialFlow, useInitialFlow_) + protected: void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2); @@ -748,16 +758,6 @@ namespace Ptr alg_; }; - CV_INIT_ALGORITHM(DualTVL1_CUDA, "DenseOpticalFlowExt.DualTVL1_CUDA", - obj.info()->addParam(obj, "tau", obj.tau_); - obj.info()->addParam(obj, "lambda", obj.lambda_); - obj.info()->addParam(obj, "theta", obj.theta_); - obj.info()->addParam(obj, "nscales", obj.nscales_); - obj.info()->addParam(obj, "warps", obj.warps_); - obj.info()->addParam(obj, "epsilon", obj.epsilon_); - obj.info()->addParam(obj, "iterations", obj.iterations_); - obj.info()->addParam(obj, "useInitialFlow", obj.useInitialFlow_)) - DualTVL1_CUDA::DualTVL1_CUDA() : GpuOpticalFlow(CV_8UC1) { alg_ = cuda::OpticalFlowDual_TVL1::create(); @@ -772,6 +772,11 @@ namespace useInitialFlow_ = alg_->getUseInitialFlow(); } + void DualTVL1_CUDA::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2) + { + GpuOpticalFlow::calc(frame0, frame1, flow1, flow2); + } + void DualTVL1_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2) { alg_->setTau(tau_); @@ -800,7 +805,7 @@ namespace } } -Ptr cv::superres::createOptFlow_DualTVL1_CUDA() +Ptr cv::superres::createOptFlow_DualTVL1_CUDA() { return makePtr(); } diff --git a/modules/superres/src/super_resolution.cpp b/modules/superres/src/super_resolution.cpp index 215416dd70..3eae5a6fd2 100644 --- a/modules/superres/src/super_resolution.cpp +++ b/modules/superres/src/super_resolution.cpp @@ -45,11 +45,6 @@ using namespace cv; using namespace cv::superres; -bool cv::superres::initModule_superres() -{ - return !createSuperResolution_BTVL1().empty(); -} - cv::superres::SuperResolution::SuperResolution() { frameSource_ = createFrameSource_Empty(); diff --git a/modules/superres/test/test_superres.cpp b/modules/superres/test/test_superres.cpp index 980c8ed601..74a90bdc2d 100644 --- a/modules/superres/test/test_superres.cpp +++ b/modules/superres/test/test_superres.cpp @@ -222,11 +222,11 @@ void SuperResolution::RunTest(cv::Ptr superRes) ASSERT_FALSE( superRes.empty() ); - const int btvKernelSize = superRes->getInt("btvKernelSize"); + const int btvKernelSize = superRes->getKernelSize(); - superRes->set("scale", scale); - superRes->set("iterations", iterations); - superRes->set("temporalAreaRadius", temporalAreaRadius); + superRes->setScale(scale); + superRes->setIterations(iterations); + superRes->setTemporalAreaRadius(temporalAreaRadius); cv::Ptr goldSource(new AllignedFrameSource(cv::superres::createFrameSource_Video(inputVideoName), scale)); cv::Ptr lowResSource(new DegradeFrameSource( diff --git a/modules/video/include/opencv2/video/tracking.hpp b/modules/video/include/opencv2/video/tracking.hpp index d54547ef71..40e9ffab88 100644 --- a/modules/video/include/opencv2/video/tracking.hpp +++ b/modules/video/include/opencv2/video/tracking.hpp @@ -380,6 +380,21 @@ public: }; +class CV_EXPORTS_W DenseOpticalFlow : public Algorithm +{ +public: + /** @brief Calculates an optical flow. + + @param I0 first 8-bit single-channel input image. + @param I1 second input image of the same size and the same type as prev. + @param flow computed flow image that has the same size as prev and type CV_32FC2. + */ + CV_WRAP virtual void calc( InputArray I0, InputArray I1, InputOutputArray flow ) = 0; + /** @brief Releases all inner buffers. + */ + CV_WRAP virtual void collectGarbage() = 0; +}; + /** @brief "Dual TV L1" Optical Flow Algorithm. The class implements the "Dual TV L1" optical flow algorithm described in @cite Zach2007 and @@ -422,24 +437,38 @@ constructing the class instance: C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow". Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation". */ -class CV_EXPORTS_W DenseOpticalFlow : public Algorithm +class CV_EXPORTS_W DualTVL1OpticalFlow : public DenseOpticalFlow { public: - /** @brief Calculates an optical flow. - - @param I0 first 8-bit single-channel input image. - @param I1 second input image of the same size and the same type as prev. - @param flow computed flow image that has the same size as prev and type CV_32FC2. - */ - CV_WRAP virtual void calc( InputArray I0, InputArray I1, InputOutputArray flow ) = 0; - /** @brief Releases all inner buffers. - */ - CV_WRAP virtual void collectGarbage() = 0; + //! @name Time step of the numerical scheme + CV_PURE_PROPERTY(double, Tau) + //! @name Weight parameter for the data term, attachment parameter + CV_PURE_PROPERTY(double, Lambda) + //! @name Weight parameter for (u - v)^2, tightness parameter + CV_PURE_PROPERTY(double, Theta) + //! @name coefficient for additional illumination variation term + CV_PURE_PROPERTY(double, Gamma) + //! @name Number of scales used to create the pyramid of images + CV_PURE_PROPERTY(int, ScalesNumber) + //! @name Number of warpings per scale + CV_PURE_PROPERTY(int, WarpingsNumber) + //! @name Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time + CV_PURE_PROPERTY(double, Epsilon) + //! @name Inner iterations (between outlier filtering) used in the numerical scheme + CV_PURE_PROPERTY(int, InnerIterations) + //! @name Outer iterations (number of inner loops) used in the numerical scheme + CV_PURE_PROPERTY(int, OuterIterations) + //! @name Use initial flow + CV_PURE_PROPERTY(bool, UseInitialFlow) + //! @name Step between scales (<1) + CV_PURE_PROPERTY(double, ScaleStep) + //! @name Median filter kernel size (1 = no filter) (3 or 5) + CV_PURE_PROPERTY(int, MedianFiltering) }; /** @brief Creates instance of cv::DenseOpticalFlow */ -CV_EXPORTS_W Ptr createOptFlow_DualTVL1(); +CV_EXPORTS_W Ptr createOptFlow_DualTVL1(); //! @} video_track diff --git a/modules/video/perf/opencl/perf_optflow_dualTVL1.cpp b/modules/video/perf/opencl/perf_optflow_dualTVL1.cpp index 90e656d81e..4f862f04bf 100644 --- a/modules/video/perf/opencl/perf_optflow_dualTVL1.cpp +++ b/modules/video/perf/opencl/perf_optflow_dualTVL1.cpp @@ -87,11 +87,11 @@ OCL_PERF_TEST_P(OpticalFlowDualTVL1Fixture, OpticalFlowDualTVL1, declare.in(uFrame0, uFrame1, WARMUP_READ).out(uFlow, WARMUP_READ); //create algorithm - cv::Ptr alg = cv::createOptFlow_DualTVL1(); + cv::Ptr alg = cv::createOptFlow_DualTVL1(); //set parameters - alg->set("scaleStep", scaleStep); - alg->setInt("medianFiltering", medianFiltering); + alg->setScaleStep(scaleStep); + alg->setMedianFiltering(medianFiltering); if (useInitFlow) { @@ -100,7 +100,7 @@ OCL_PERF_TEST_P(OpticalFlowDualTVL1Fixture, OpticalFlowDualTVL1, } //set flag to use initial flow - alg->setBool("useInitialFlow", useInitFlow); + alg->setUseInitialFlow(useInitFlow); OCL_TEST_CYCLE() alg->calc(uFrame0, uFrame1, uFlow); @@ -109,4 +109,4 @@ OCL_PERF_TEST_P(OpticalFlowDualTVL1Fixture, OpticalFlowDualTVL1, } } // namespace cvtest::ocl -#endif // HAVE_OPENCL \ No newline at end of file +#endif // HAVE_OPENCL diff --git a/modules/video/src/bgfg_KNN.cpp b/modules/video/src/bgfg_KNN.cpp index 63ef300e04..c551ce4c9e 100755 --- a/modules/video/src/bgfg_KNN.cpp +++ b/modules/video/src/bgfg_KNN.cpp @@ -160,8 +160,6 @@ public: nNextLongUpdate = Scalar::all(0); } - virtual AlgorithmInfo* info() const { return 0; } - virtual int getHistory() const { return history; } virtual void setHistory(int _nframes) { history = _nframes; } diff --git a/modules/video/src/bgfg_gaussmix2.cpp b/modules/video/src/bgfg_gaussmix2.cpp index e2d875517d..226af9dc02 100644 --- a/modules/video/src/bgfg_gaussmix2.cpp +++ b/modules/video/src/bgfg_gaussmix2.cpp @@ -230,8 +230,6 @@ public: } } - virtual AlgorithmInfo* info() const { return 0; } - virtual int getHistory() const { return history; } virtual void setHistory(int _nframes) { history = _nframes; } diff --git a/modules/video/src/tvl1flow.cpp b/modules/video/src/tvl1flow.cpp index 8a865a1d2e..90fe48f474 100644 --- a/modules/video/src/tvl1flow.cpp +++ b/modules/video/src/tvl1flow.cpp @@ -86,7 +86,7 @@ using namespace cv; namespace { -class OpticalFlowDual_TVL1 : public DenseOpticalFlow +class OpticalFlowDual_TVL1 : public DualTVL1OpticalFlow { public: OpticalFlowDual_TVL1(); @@ -94,7 +94,18 @@ public: void calc(InputArray I0, InputArray I1, InputOutputArray flow); void collectGarbage(); - AlgorithmInfo* info() const; + CV_IMPL_PROPERTY(double, Tau, tau) + CV_IMPL_PROPERTY(double, Lambda, lambda) + CV_IMPL_PROPERTY(double, Theta, theta) + CV_IMPL_PROPERTY(double, Gamma, gamma) + CV_IMPL_PROPERTY(int, ScalesNumber, nscales) + CV_IMPL_PROPERTY(int, WarpingsNumber, warps) + CV_IMPL_PROPERTY(double, Epsilon, epsilon) + CV_IMPL_PROPERTY(int, InnerIterations, innerIterations) + CV_IMPL_PROPERTY(int, OuterIterations, outerIterations) + CV_IMPL_PROPERTY(bool, UseInitialFlow, useInitialFlow) + CV_IMPL_PROPERTY(double, ScaleStep, scaleStep) + CV_IMPL_PROPERTY(int, MedianFiltering, medianFiltering) protected: double tau; @@ -1416,35 +1427,9 @@ void OpticalFlowDual_TVL1::collectGarbage() dum.norm_buf.release(); } - -CV_INIT_ALGORITHM(OpticalFlowDual_TVL1, "DenseOpticalFlow.DualTVL1", - obj.info()->addParam(obj, "tau", obj.tau, false, 0, 0, - "Time step of the numerical scheme"); - obj.info()->addParam(obj, "lambda", obj.lambda, false, 0, 0, - "Weight parameter for the data term, attachment parameter"); - obj.info()->addParam(obj, "theta", obj.theta, false, 0, 0, - "Weight parameter for (u - v)^2, tightness parameter"); - obj.info()->addParam(obj, "nscales", obj.nscales, false, 0, 0, - "Number of scales used to create the pyramid of images"); - obj.info()->addParam(obj, "warps", obj.warps, false, 0, 0, - "Number of warpings per scale"); - obj.info()->addParam(obj, "medianFiltering", obj.medianFiltering, false, 0, 0, - "Median filter kernel size (1 = no filter) (3 or 5)"); - obj.info()->addParam(obj, "scaleStep", obj.scaleStep, false, 0, 0, - "Step between scales (<1)"); - obj.info()->addParam(obj, "epsilon", obj.epsilon, false, 0, 0, - "Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time"); - obj.info()->addParam(obj, "innerIterations", obj.innerIterations, false, 0, 0, - "inner iterations (between outlier filtering) used in the numerical scheme"); - obj.info()->addParam(obj, "outerIterations", obj.outerIterations, false, 0, 0, - "outer iterations (number of inner loops) used in the numerical scheme"); - obj.info()->addParam(obj, "gamma", obj.gamma, false, 0, 0, - "coefficient for additional illumination variation term"); - obj.info()->addParam(obj, "useInitialFlow", obj.useInitialFlow)) - } // namespace -Ptr cv::createOptFlow_DualTVL1() +Ptr cv::createOptFlow_DualTVL1() { return makePtr(); } diff --git a/modules/video/test/ocl/test_optflow_tvl1flow.cpp b/modules/video/test/ocl/test_optflow_tvl1flow.cpp index bd32252b27..67b4a1fd6e 100644 --- a/modules/video/test/ocl/test_optflow_tvl1flow.cpp +++ b/modules/video/test/ocl/test_optflow_tvl1flow.cpp @@ -82,11 +82,11 @@ OCL_TEST_P(OpticalFlowTVL1, Mat) cv::Mat flow; cv::UMat uflow; //create algorithm - cv::Ptr alg = cv::createOptFlow_DualTVL1(); + cv::Ptr alg = cv::createOptFlow_DualTVL1(); //set parameters - alg->set("scaleStep", scaleStep); - alg->setInt("medianFiltering", medianFiltering); + alg->setScaleStep(scaleStep); + alg->setMedianFiltering(medianFiltering); //create initial flow as result of algorithm calculation if (useInitFlow) @@ -96,7 +96,7 @@ OCL_TEST_P(OpticalFlowTVL1, Mat) } //set flag to use initial flow as it is ready to use - alg->setBool("useInitialFlow", useInitFlow); + alg->setUseInitialFlow(useInitFlow); OCL_OFF(alg->calc(frame0, frame1, flow)); OCL_ON(alg->calc(frame0, frame1, uflow)); @@ -114,4 +114,4 @@ OCL_INSTANTIATE_TEST_CASE_P(Video, OpticalFlowTVL1, } } // namespace cvtest::ocl -#endif // HAVE_OPENCL \ No newline at end of file +#endif // HAVE_OPENCL diff --git a/samples/cpp/tutorial_code/features2D/AKAZE_tracking/planar_tracking.cpp b/samples/cpp/tutorial_code/features2D/AKAZE_tracking/planar_tracking.cpp index 6ffc77cdc4..ba845dc6df 100755 --- a/samples/cpp/tutorial_code/features2D/AKAZE_tracking/planar_tracking.cpp +++ b/samples/cpp/tutorial_code/features2D/AKAZE_tracking/planar_tracking.cpp @@ -138,7 +138,7 @@ int main(int argc, char **argv) Stats stats, akaze_stats, orb_stats; Ptr akaze = AKAZE::create(); - akaze->set("threshold", akaze_thresh); + akaze->setThreshold(akaze_thresh); Ptr orb = ORB::create(); orb->setMaxFeatures(stats.keypoints); Ptr matcher = DescriptorMatcher::create("BruteForce-Hamming"); @@ -163,7 +163,7 @@ int main(int argc, char **argv) akaze_draw_stats = stats; } - orb_tracker.getDetector()->set("nFeatures", stats.keypoints); + orb->setMaxFeatures(stats.keypoints); orb_res = orb_tracker.process(frame, stats); orb_stats += stats; if(update_stats) { diff --git a/samples/gpu/super_resolution.cpp b/samples/gpu/super_resolution.cpp index 95147a6d07..f106f76d32 100644 --- a/samples/gpu/super_resolution.cpp +++ b/samples/gpu/super_resolution.cpp @@ -26,32 +26,32 @@ using namespace cv::superres; cout << tm.getTimeSec() << " sec" << endl; \ } -static Ptr createOptFlow(const string& name, bool useGpu) +static Ptr createOptFlow(const string& name, bool useGpu) { if (name == "farneback") { if (useGpu) - return createOptFlow_Farneback_CUDA(); + return cv::superres::createOptFlow_Farneback_CUDA(); else - return createOptFlow_Farneback(); + return cv::superres::createOptFlow_Farneback(); } /*else if (name == "simple") return createOptFlow_Simple();*/ else if (name == "tvl1") { if (useGpu) - return createOptFlow_DualTVL1_CUDA(); + return cv::superres::createOptFlow_DualTVL1_CUDA(); else - return createOptFlow_DualTVL1(); + return cv::superres::createOptFlow_DualTVL1(); } else if (name == "brox") - return createOptFlow_Brox_CUDA(); + return cv::superres::createOptFlow_Brox_CUDA(); else if (name == "pyrlk") - return createOptFlow_PyrLK_CUDA(); + return cv::superres::createOptFlow_PyrLK_CUDA(); else cerr << "Incorrect Optical Flow algorithm - " << name << endl; - return Ptr(); + return Ptr(); } int main(int argc, const char* argv[]) @@ -92,15 +92,15 @@ int main(int argc, const char* argv[]) else superRes = createSuperResolution_BTVL1(); - Ptr of = createOptFlow(optFlow, useCuda); + Ptr of = createOptFlow(optFlow, useCuda); if (of.empty()) return EXIT_FAILURE; - superRes->set("opticalFlow", of); + superRes->setOpticalFlow(of); - superRes->set("scale", scale); - superRes->set("iterations", iterations); - superRes->set("temporalAreaRadius", temporalAreaRadius); + superRes->setScale(scale); + superRes->setIterations(iterations); + superRes->setTemporalAreaRadius(temporalAreaRadius); Ptr frameSource; if (useCuda) diff --git a/samples/gpu/surf_keypoint_matcher.cpp b/samples/gpu/surf_keypoint_matcher.cpp index 0a8554d71d..522c8a1e14 100644 --- a/samples/gpu/surf_keypoint_matcher.cpp +++ b/samples/gpu/surf_keypoint_matcher.cpp @@ -62,19 +62,17 @@ int main(int argc, char* argv[]) cout << "FOUND " << keypoints2GPU.cols << " keypoints on second image" << endl; // matching descriptors - BFMatcher_CUDA matcher(surf.defaultNorm()); - GpuMat trainIdx, distance; - matcher.matchSingle(descriptors1GPU, descriptors2GPU, trainIdx, distance); + Ptr matcher = cv::cuda::DescriptorMatcher::createBFMatcher(surf.defaultNorm()); + vector matches; + matcher->match(descriptors1GPU, descriptors2GPU, matches); // downloading results vector keypoints1, keypoints2; vector descriptors1, descriptors2; - vector matches; surf.downloadKeypoints(keypoints1GPU, keypoints1); surf.downloadKeypoints(keypoints2GPU, keypoints2); surf.downloadDescriptors(descriptors1GPU, descriptors1); surf.downloadDescriptors(descriptors2GPU, descriptors2); - BFMatcher_CUDA::matchDownload(trainIdx, distance, matches); // drawing the results Mat img_matches; From 79e8f0680c609472fd7e3e932fbdcc5924339686 Mon Sep 17 00:00:00 2001 From: Maksim Shabunin Date: Wed, 11 Feb 2015 13:24:14 +0300 Subject: [PATCH 2/7] Updated ml module interfaces and documentation --- doc/Doxyfile.in | 5 +- .../introduction_to_svm.markdown | 103 +-- .../non_linear_svms/non_linear_svms.markdown | 171 ++-- modules/core/include/opencv2/core.hpp | 43 +- modules/ml/doc/ml_intro.markdown | 63 +- modules/ml/include/opencv2/ml.hpp | 801 ++++++++---------- modules/ml/src/ann_mlp.cpp | 162 ++-- modules/ml/src/boost.cpp | 66 +- modules/ml/src/em.cpp | 138 ++- modules/ml/src/knearest.cpp | 265 +++--- modules/ml/src/lr.cpp | 60 +- modules/ml/src/nbayes.cpp | 6 +- modules/ml/src/precomp.hpp | 100 ++- modules/ml/src/rtrees.cpp | 69 +- modules/ml/src/svm.cpp | 232 ++--- modules/ml/src/tree.cpp | 118 +-- modules/ml/test/test_emknearestkmeans.cpp | 32 +- modules/ml/test/test_lr.cpp | 30 +- modules/ml/test/test_mltests2.cpp | 89 +- modules/ml/test/test_save_load.cpp | 3 +- modules/superres/include/opencv2/superres.hpp | 20 +- .../include/opencv2/superres/optical_flow.hpp | 12 +- modules/superres/src/optical_flow.cpp | 28 +- .../video/include/opencv2/video/tracking.hpp | 24 +- samples/cpp/em.cpp | 8 +- samples/cpp/letter_recog.cpp | 58 +- samples/cpp/logistic_regression.cpp | 20 +- samples/cpp/points_classifier.cpp | 120 +-- samples/cpp/train_HOG.cpp | 26 +- samples/cpp/tree_engine.cpp | 30 +- .../introduction_to_svm.cpp | 30 +- .../ml/non_linear_svms/non_linear_svms.cpp | 29 +- 32 files changed, 1418 insertions(+), 1543 deletions(-) diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in index 79af5ac359..93ccafaae8 100644 --- a/doc/Doxyfile.in +++ b/doc/Doxyfile.in @@ -244,7 +244,10 @@ PREDEFINED = __cplusplus=1 \ CV_DEFAULT(x)=" = x" \ CV_NEON=1 \ FLANN_DEPRECATED= \ - "CV_PURE_PROPERTY(type, name)= /**\@{*/ virtual type get##name() const = 0; virtual void set##name(type _##name) = 0; /**\@}*/" + "CV_PURE_PROPERTY(type, name)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(type val) = 0;" \ + "CV_IMPL_PROPERTY(type, name, x)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(type val) = 0;" \ + "CV_IMPL_PROPERTY_S(type, name, x)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(const type & val);" \ + "CV_IMPL_PROPERTY_RO(type, name, x)= virtual type get##name() const;" EXPAND_AS_DEFINED = SKIP_FUNCTION_MACROS = YES TAGFILES = diff --git a/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.markdown b/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.markdown index 9b2de2c1e3..50f19b6fd2 100644 --- a/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.markdown +++ b/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.markdown @@ -1,8 +1,6 @@ Introduction to Support Vector Machines {#tutorial_introduction_to_svm} ======================================= -@todo update this tutorial - Goal ---- @@ -31,13 +29,11 @@ understand that this is done only because our intuition is better built from exa to imagine. However, the same concepts apply to tasks where the examples to classify lie in a space whose dimension is higher than two. -In the above picture you can see that there exists multiple -lines that offer a solution to the problem. Is any of them better than the others? We can -intuitively define a criterion to estimate the worth of the lines: - -- A line is bad if it passes too close to the points because it will be noise sensitive and it will - not generalize correctly. Therefore, our goal should be to find the line passing as far as - possible from all points. +In the above picture you can see that there exists multiple lines that offer a solution to the +problem. Is any of them better than the others? We can intuitively define a criterion to estimate +the worth of the lines: A line is bad if it passes too close to the points because it will be +noise sensitive and it will not generalize correctly. Therefore, our goal should be to find +the line passing as far as possible from all points. Then, the operation of the SVM algorithm is based on finding the hyperplane that gives the largest minimum distance to the training examples. Twice, this distance receives the important name of @@ -57,7 +53,7 @@ where \f$\beta\f$ is known as the *weight vector* and \f$\beta_{0}\f$ as the *bi @sa A more in depth description of this and hyperplanes you can find in the section 4.5 (*Seperating Hyperplanes*) of the book: *Elements of Statistical Learning* by T. Hastie, R. Tibshirani and J. H. -Friedman. +Friedman (@cite HTF01). The optimal hyperplane can be represented in an infinite number of different ways by scaling of \f$\beta\f$ and \f$\beta_{0}\f$. As a matter of convention, among all the possible @@ -107,17 +103,14 @@ Explanation The training data of this exercise is formed by a set of labeled 2D-points that belong to one of two different classes; one of the classes consists of one point and the other of three points. - @code{.cpp} - float labels[4] = {1.0, -1.0, -1.0, -1.0}; - float trainingData[4][2] = {{501, 10}, {255, 10}, {501, 255}, {10, 501}}; - @endcode + + @snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp setup1 + The function @ref cv::ml::SVM::train that will be used afterwards requires the training data to be stored as @ref cv::Mat objects of floats. Therefore, we create these objects from the arrays defined above: - @code{.cpp} - Mat trainingDataMat(4, 2, CV_32FC1, trainingData); - Mat labelsMat (4, 1, CV_32FC1, labels); - @endcode + + @snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp setup2 -# **Set up SVM's parameters** @@ -126,42 +119,35 @@ Explanation used in a wide variety of problems (e.g. problems with non-linearly separable data, a SVM using a kernel function to raise the dimensionality of the examples, etc). As a consequence of this, we have to define some parameters before training the SVM. These parameters are stored in an - object of the class @ref cv::ml::SVM::Params . - @code{.cpp} - ml::SVM::Params params; - params.svmType = ml::SVM::C_SVC; - params.kernelType = ml::SVM::LINEAR; - params.termCrit = TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6); - @endcode - - *Type of SVM*. We choose here the type **ml::SVM::C_SVC** that can be used for n-class - classification (n \f$\geq\f$ 2). This parameter is defined in the attribute - *ml::SVM::Params.svmType*. - - The important feature of the type of SVM **CvSVM::C_SVC** deals with imperfect separation of classes (i.e. when the training data is non-linearly separable). This feature is not important here since the data is linearly separable and we chose this SVM type only for being the most commonly used. + object of the class @ref cv::ml::SVM. + + @snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp init + + Here: + - *Type of SVM*. We choose here the type @ref cv::ml::SVM::C_SVC "C_SVC" that can be used for + n-class classification (n \f$\geq\f$ 2). The important feature of this type is that it deals + with imperfect separation of classes (i.e. when the training data is non-linearly separable). + This feature is not important here since the data is linearly separable and we chose this SVM + type only for being the most commonly used. - *Type of SVM kernel*. We have not talked about kernel functions since they are not - interesting for the training data we are dealing with. Nevertheless, let's explain briefly - now the main idea behind a kernel function. It is a mapping done to the training data to - improve its resemblance to a linearly separable set of data. This mapping consists of - increasing the dimensionality of the data and is done efficiently using a kernel function. - We choose here the type **ml::SVM::LINEAR** which means that no mapping is done. This - parameter is defined in the attribute *ml::SVMParams.kernel_type*. + interesting for the training data we are dealing with. Nevertheless, let's explain briefly now + the main idea behind a kernel function. It is a mapping done to the training data to improve + its resemblance to a linearly separable set of data. This mapping consists of increasing the + dimensionality of the data and is done efficiently using a kernel function. We choose here the + type @ref cv::ml::SVM::LINEAR "LINEAR" which means that no mapping is done. This parameter is + defined using cv::ml::SVM::setKernel. - *Termination criteria of the algorithm*. The SVM training procedure is implemented solving a constrained quadratic optimization problem in an **iterative** fashion. Here we specify a maximum number of iterations and a tolerance error so we allow the algorithm to finish in less number of steps even if the optimal hyperplane has not been computed yet. This - parameter is defined in a structure @ref cv::cvTermCriteria . + parameter is defined in a structure @ref cv::TermCriteria . -# **Train the SVM** + We call the method @ref cv::ml::SVM::train to build the SVM model. - We call the method - [CvSVM::train](http://docs.opencv.org/modules/ml/doc/support_vector_machines.html#cvsvm-train) - to build the SVM model. - @code{.cpp} - CvSVM SVM; - SVM.train(trainingDataMat, labelsMat, Mat(), Mat(), params); - @endcode + @snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp train -# **Regions classified by the SVM** @@ -170,22 +156,8 @@ Explanation by the SVM. In other words, an image is traversed interpreting its pixels as points of the Cartesian plane. Each of the points is colored depending on the class predicted by the SVM; in green if it is the class with label 1 and in blue if it is the class with label -1. - @code{.cpp} - Vec3b green(0,255,0), blue (255,0,0); - - for (int i = 0; i < image.rows; ++i) - for (int j = 0; j < image.cols; ++j) - { - Mat sampleMat = (Mat_(1,2) << i,j); - float response = SVM.predict(sampleMat); - - if (response == 1) - image.at(j, i) = green; - else - if (response == -1) - image.at(j, i) = blue; - } - @endcode + + @snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp show -# **Support vectors** @@ -193,15 +165,8 @@ Explanation The method @ref cv::ml::SVM::getSupportVectors obtain all of the support vectors. We have used this methods here to find the training examples that are support vectors and highlight them. - @code{.cpp} - int c = SVM.get_support_vector_count(); - - for (int i = 0; i < c; ++i) - { - const float* v = SVM.get_support_vector(i); // get and then highlight with grayscale - circle( image, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thickness, lineType); - } - @endcode + + @snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp show_vectors Results ------- diff --git a/doc/tutorials/ml/non_linear_svms/non_linear_svms.markdown b/doc/tutorials/ml/non_linear_svms/non_linear_svms.markdown index 5c36e425b3..eb171b94a3 100644 --- a/doc/tutorials/ml/non_linear_svms/non_linear_svms.markdown +++ b/doc/tutorials/ml/non_linear_svms/non_linear_svms.markdown @@ -1,8 +1,6 @@ Support Vector Machines for Non-Linearly Separable Data {#tutorial_non_linear_svms} ======================================================= -@todo update this tutorial - Goal ---- @@ -10,21 +8,20 @@ In this tutorial you will learn how to: - Define the optimization problem for SVMs when it is not possible to separate linearly the training data. -- How to configure the parameters in @ref cv::ml::SVM::Params to adapt your SVM for this class of - problems. +- How to configure the parameters to adapt your SVM for this class of problems. Motivation ---------- Why is it interesting to extend the SVM optimation problem in order to handle non-linearly separable training data? Most of the applications in which SVMs are used in computer vision require a more -powerful tool than a simple linear classifier. This stems from the fact that in these tasks **the -training data can be rarely separated using an hyperplane**. +powerful tool than a simple linear classifier. This stems from the fact that in these tasks __the +training data can be rarely separated using an hyperplane__. Consider one of these tasks, for example, face detection. The training data in this case is composed -by a set of images that are faces and another set of images that are non-faces (*every other thing -in the world except from faces*). This training data is too complex so as to find a representation -of each sample (*feature vector*) that could make the whole set of faces linearly separable from the +by a set of images that are faces and another set of images that are non-faces (_every other thing +in the world except from faces_). This training data is too complex so as to find a representation +of each sample (_feature vector_) that could make the whole set of faces linearly separable from the whole set of non-faces. Extension of the Optimization Problem @@ -32,13 +29,13 @@ Extension of the Optimization Problem Remember that using SVMs we obtain a separating hyperplane. Therefore, since the training data is now non-linearly separable, we must admit that the hyperplane found will misclassify some of the -samples. This *misclassification* is a new variable in the optimization that must be taken into +samples. This _misclassification_ is a new variable in the optimization that must be taken into account. The new model has to include both the old requirement of finding the hyperplane that gives the biggest margin and the new one of generalizing the training data correctly by not allowing too many classification errors. We start here from the formulation of the optimization problem of finding the hyperplane which -maximizes the **margin** (this is explained in the previous tutorial (@ref tutorial_introduction_to_svm): +maximizes the __margin__ (this is explained in the previous tutorial (@ref tutorial_introduction_to_svm): \f[\min_{\beta, \beta_{0}} L(\beta) = \frac{1}{2}||\beta||^{2} \text{ subject to } y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 \text{ } \forall i\f] @@ -50,8 +47,8 @@ constant times the number of misclassification errors in the training data, i.e. However, this one is not a very good solution since, among some other reasons, we do not distinguish between samples that are misclassified with a small distance to their appropriate decision region or -samples that are not. Therefore, a better solution will take into account the *distance of the -misclassified samples to their correct decision regions*, i.e.: +samples that are not. Therefore, a better solution will take into account the _distance of the +misclassified samples to their correct decision regions_, i.e.: \f[\min ||\beta||^{2} + C \text{(distance of misclassified samples to their correct regions)}\f] @@ -68,7 +65,7 @@ distances of the rest of the samples are zero since they lay already in their co region. The red and blue lines that appear on the picture are the margins to each one of the -decision regions. It is very **important** to realize that each of the \f$\xi_{i}\f$ goes from a +decision regions. It is very __important__ to realize that each of the \f$\xi_{i}\f$ goes from a misclassified training sample to the margin of its appropriate region. Finally, the new formulation for the optimization problem is: @@ -79,26 +76,25 @@ How should the parameter C be chosen? It is obvious that the answer to this ques the training data is distributed. Although there is no general answer, it is useful to take into account these rules: -- Large values of C give solutions with *less misclassification errors* but a *smaller margin*. +- Large values of C give solutions with _less misclassification errors_ but a _smaller margin_. Consider that in this case it is expensive to make misclassification errors. Since the aim of the optimization is to minimize the argument, few misclassifications errors are allowed. -- Small values of C give solutions with *bigger margin* and *more classification errors*. In this +- Small values of C give solutions with _bigger margin_ and _more classification errors_. In this case the minimization does not consider that much the term of the sum so it focuses more on finding a hyperplane with big margin. Source Code ----------- -You may also find the source code and these video file in the -`samples/cpp/tutorial_code/gpu/non_linear_svms/non_linear_svms` folder of the OpenCV source library -or [download it from here ](https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp). +You may also find the source code in `samples/cpp/tutorial_code/ml/non_linear_svms` folder of the OpenCV source library or +[download it from here](https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp). @includelineno cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp Explanation ----------- --# **Set up the training data** +-# __Set up the training data__ The training data of this exercise is formed by a set of labeled 2D-points that belong to one of two different classes. To make the exercise more appealing, the training data is generated @@ -107,136 +103,67 @@ Explanation We have divided the generation of the training data into two main parts. In the first part we generate data for both classes that is linearly separable. - @code{.cpp} - // Generate random points for the class 1 - Mat trainClass = trainData.rowRange(0, nLinearSamples); - // The x coordinate of the points is in [0, 0.4) - Mat c = trainClass.colRange(0, 1); - rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(0.4 * WIDTH)); - // The y coordinate of the points is in [0, 1) - c = trainClass.colRange(1,2); - rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT)); - - // Generate random points for the class 2 - trainClass = trainData.rowRange(2*NTRAINING_SAMPLES-nLinearSamples, 2*NTRAINING_SAMPLES); - // The x coordinate of the points is in [0.6, 1] - c = trainClass.colRange(0 , 1); - rng.fill(c, RNG::UNIFORM, Scalar(0.6*WIDTH), Scalar(WIDTH)); - // The y coordinate of the points is in [0, 1) - c = trainClass.colRange(1,2); - rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT)); - @endcode + @snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp setup1 + In the second part we create data for both classes that is non-linearly separable, data that overlaps. - @code{.cpp} - // Generate random points for the classes 1 and 2 - trainClass = trainData.rowRange( nLinearSamples, 2*NTRAINING_SAMPLES-nLinearSamples); - // The x coordinate of the points is in [0.4, 0.6) - c = trainClass.colRange(0,1); - rng.fill(c, RNG::UNIFORM, Scalar(0.4*WIDTH), Scalar(0.6*WIDTH)); - // The y coordinate of the points is in [0, 1) - c = trainClass.colRange(1,2); - rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT)); - @endcode - --# **Set up SVM's parameters** - - @sa - In the previous tutorial @ref tutorial_introduction_to_svm there is an explanation of the atributes of the - class @ref cv::ml::SVM::Params that we configure here before training the SVM. - - @code{.cpp} - CvSVMParams params; - params.svm_type = SVM::C_SVC; - params.C = 0.1; - params.kernel_type = SVM::LINEAR; - params.term_crit = TermCriteria(TermCriteria::ITER, (int)1e7, 1e-6); - @endcode + @snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp setup2 + +-# __Set up SVM's parameters__ + + @note In the previous tutorial @ref tutorial_introduction_to_svm there is an explanation of the + atributes of the class @ref cv::ml::SVM that we configure here before training the SVM. + + @snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp init + There are just two differences between the configuration we do here and the one that was done in - the previous tutorial (tutorial_introduction_to_svm) that we use as reference. + the previous tutorial (@ref tutorial_introduction_to_svm) that we use as reference. - - *CvSVM::C_SVC*. We chose here a small value of this parameter in order not to punish too much - the misclassification errors in the optimization. The idea of doing this stems from the will - of obtaining a solution close to the one intuitively expected. However, we recommend to get a + - _C_. We chose here a small value of this parameter in order not to punish too much the + misclassification errors in the optimization. The idea of doing this stems from the will of + obtaining a solution close to the one intuitively expected. However, we recommend to get a better insight of the problem by making adjustments to this parameter. - @note Here there are just very few points in the overlapping region between classes, giving a smaller value to **FRAC_LINEAR_SEP** the density of points can be incremented and the impact of the parameter **CvSVM::C_SVC** explored deeply. + @note In this case there are just very few points in the overlapping region between classes. + By giving a smaller value to __FRAC_LINEAR_SEP__ the density of points can be incremented and the + impact of the parameter _C_ explored deeply. - - *Termination Criteria of the algorithm*. The maximum number of iterations has to be + - _Termination Criteria of the algorithm_. The maximum number of iterations has to be increased considerably in order to solve correctly a problem with non-linearly separable training data. In particular, we have increased in five orders of magnitude this value. --# **Train the SVM** +-# __Train the SVM__ We call the method @ref cv::ml::SVM::train to build the SVM model. Watch out that the training process may take a quite long time. Have patiance when your run the program. - @code{.cpp} - CvSVM svm; - svm.train(trainData, labels, Mat(), Mat(), params); - @endcode --# **Show the Decision Regions** + @snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp train + +-# __Show the Decision Regions__ The method @ref cv::ml::SVM::predict is used to classify an input sample using a trained SVM. In this example we have used this method in order to color the space depending on the prediction done by the SVM. In other words, an image is traversed interpreting its pixels as points of the Cartesian plane. Each of the points is colored depending on the class predicted by the SVM; in dark green if it is the class with label 1 and in dark blue if it is the class with label 2. - @code{.cpp} - Vec3b green(0,100,0), blue (100,0,0); - for (int i = 0; i < I.rows; ++i) - for (int j = 0; j < I.cols; ++j) - { - Mat sampleMat = (Mat_(1,2) << i, j); - float response = svm.predict(sampleMat); - if (response == 1) I.at(j, i) = green; - else if (response == 2) I.at(j, i) = blue; - } - @endcode + @snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show --# **Show the training data** +-# __Show the training data__ The method @ref cv::circle is used to show the samples that compose the training data. The samples of the class labeled with 1 are shown in light green and in light blue the samples of the class labeled with 2. - @code{.cpp} - int thick = -1; - int lineType = 8; - float px, py; - // Class 1 - for (int i = 0; i < NTRAINING_SAMPLES; ++i) - { - px = trainData.at(i,0); - py = trainData.at(i,1); - circle(I, Point( (int) px, (int) py ), 3, Scalar(0, 255, 0), thick, lineType); - } - // Class 2 - for (int i = NTRAINING_SAMPLES; i <2*NTRAINING_SAMPLES; ++i) - { - px = trainData.at(i,0); - py = trainData.at(i,1); - circle(I, Point( (int) px, (int) py ), 3, Scalar(255, 0, 0), thick, lineType); - } - @endcode - --# **Support vectors** + + @snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show_data + +-# __Support vectors__ We use here a couple of methods to obtain information about the support vectors. The method - @ref cv::ml::SVM::getSupportVectors obtain all support vectors. - We have used this methods here to find the training examples that are - support vectors and highlight them. - @code{.cpp} - thick = 2; - lineType = 8; - int x = svm.get_support_vector_count(); - - for (int i = 0; i < x; ++i) - { - const float* v = svm.get_support_vector(i); - circle( I, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thick, lineType); - } - @endcode + @ref cv::ml::SVM::getSupportVectors obtain all support vectors. We have used this methods here + to find the training examples that are support vectors and highlight them. + + @snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show_vectors Results ------- diff --git a/modules/core/include/opencv2/core.hpp b/modules/core/include/opencv2/core.hpp index 701c0e3a07..77a8c503e7 100644 --- a/modules/core/include/opencv2/core.hpp +++ b/modules/core/include/opencv2/core.hpp @@ -2802,43 +2802,36 @@ public: #define CV_PURE_PROPERTY(type, name) \ CV_WRAP virtual type get##name() const = 0; \ - CV_WRAP virtual void set##name(type _##name) = 0; + CV_WRAP virtual void set##name(type val) = 0; #define CV_PURE_PROPERTY_S(type, name) \ CV_WRAP virtual type get##name() const = 0; \ - CV_WRAP virtual void set##name(const type & _##name) = 0; + CV_WRAP virtual void set##name(const type & val) = 0; #define CV_PURE_PROPERTY_RO(type, name) \ CV_WRAP virtual type get##name() const = 0; // basic property implementation -#define CV_IMPL_PROPERTY(type, name, member) \ - type get##name() const \ - { \ - return member; \ - } \ - void set##name(type val) \ - { \ - member = val; \ - } +#define CV_IMPL_PROPERTY_RO(type, name, member) \ + inline type get##name() const { return member; } -#define CV_IMPL_PROPERTY_S(type, name, member) \ - type get##name() const \ - { \ - return member; \ - } \ - void set##name(const type &val) \ - { \ - member = val; \ - } +#define CV_HELP_IMPL_PROPERTY(r_type, w_type, name, member) \ + CV_IMPL_PROPERTY_RO(r_type, name, member) \ + inline void set##name(w_type val) { member = val; } -#define CV_IMPL_PROPERTY_RO(type, name, member) \ - type get##name() const \ - { \ - return member; \ - } +#define CV_HELP_WRAP_PROPERTY(r_type, w_type, name, internal_name, internal_obj) \ + r_type get##name() const { return internal_obj.get##internal_name(); } \ + void set##name(w_type val) { internal_obj.set##internal_name(val); } + +#define CV_IMPL_PROPERTY(type, name, member) CV_HELP_IMPL_PROPERTY(type, type, name, member) +#define CV_IMPL_PROPERTY_S(type, name, member) CV_HELP_IMPL_PROPERTY(type, const type &, name, member) + +#define CV_WRAP_PROPERTY(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, type, name, internal_name, internal_obj) +#define CV_WRAP_PROPERTY_S(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, const type &, name, internal_name, internal_obj) +#define CV_WRAP_SAME_PROPERTY(type, name, internal_obj) CV_WRAP_PROPERTY(type, name, name, internal_obj) +#define CV_WRAP_SAME_PROPERTY_S(type, name, internal_obj) CV_WRAP_PROPERTY_S(type, name, name, internal_obj) struct Param { enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7, diff --git a/modules/ml/doc/ml_intro.markdown b/modules/ml/doc/ml_intro.markdown index 5e3c3d2cf8..b7a3d4059f 100644 --- a/modules/ml/doc/ml_intro.markdown +++ b/modules/ml/doc/ml_intro.markdown @@ -449,40 +449,33 @@ classes 0 and 1, one can determine that the given data instance belongs to class \geq 0.5\f$ or class 0 if \f$h_\theta(x) < 0.5\f$ . In Logistic Regression, choosing the right parameters is of utmost importance for reducing the -training error and ensuring high training accuracy. cv::ml::LogisticRegression::Params is the -structure that defines parameters that are required to train a Logistic Regression classifier. - -The learning rate is determined by cv::ml::LogisticRegression::Params.alpha. It determines how fast -we approach the solution. It is a positive real number. - -Optimization algorithms like Batch Gradient Descent and Mini-Batch Gradient Descent are supported in -LogisticRegression. It is important that we mention the number of iterations these optimization -algorithms have to run. The number of iterations are mentioned by -cv::ml::LogisticRegression::Params.num_iters. The number of iterations can be thought as number of -steps taken and learning rate specifies if it is a long step or a short step. These two parameters -define how fast we arrive at a possible solution. - -In order to compensate for overfitting regularization is performed, which can be enabled by setting -cv::ml::LogisticRegression::Params.regularized to a positive integer (greater than zero). One can -specify what kind of regularization has to be performed by setting -cv::ml::LogisticRegression::Params.norm to REG_L1 or REG_L2 values. - -LogisticRegression provides a choice of 2 training methods with Batch Gradient Descent or the Mini- -Batch Gradient Descent. To specify this, set cv::ml::LogisticRegression::Params::train_method to -either BATCH or MINI_BATCH. If training method is set to MINI_BATCH, the size of the mini batch has -to be to a postive integer using cv::ml::LogisticRegression::Params::mini_batch_size. - -A sample set of training parameters for the Logistic Regression classifier can be initialized as -follows: -@code{.cpp} -using namespace cv::ml; -LogisticRegression::Params params; -params.alpha = 0.5; -params.num_iters = 10000; -params.norm = LogisticRegression::REG_L2; -params.regularized = 1; -params.train_method = LogisticRegression::MINI_BATCH; -params.mini_batch_size = 10; -@endcode +training error and ensuring high training accuracy: + +- The learning rate can be set with @ref cv::ml::LogisticRegression::setLearningRate "setLearningRate" + method. It determines how fast we approach the solution. It is a positive real number. + +- Optimization algorithms like Batch Gradient Descent and Mini-Batch Gradient Descent are supported + in LogisticRegression. It is important that we mention the number of iterations these optimization + algorithms have to run. The number of iterations can be set with @ref + cv::ml::LogisticRegression::setIterations "setIterations". This parameter can be thought + as number of steps taken and learning rate specifies if it is a long step or a short step. This + and previous parameter define how fast we arrive at a possible solution. + +- In order to compensate for overfitting regularization is performed, which can be enabled with + @ref cv::ml::LogisticRegression::setRegularization "setRegularization". One can specify what + kind of regularization has to be performed by passing one of @ref + cv::ml::LogisticRegression::RegKinds "regularization kinds" to this method. + +- Logistic regression implementation provides a choice of 2 training methods with Batch Gradient + Descent or the MiniBatch Gradient Descent. To specify this, call @ref + cv::ml::LogisticRegression::setTrainMethod "setTrainMethod" with either @ref + cv::ml::LogisticRegression::BATCH "LogisticRegression::BATCH" or @ref + cv::ml::LogisticRegression::MINI_BATCH "LogisticRegression::MINI_BATCH". If training method is + set to @ref cv::ml::LogisticRegression::MINI_BATCH "MINI_BATCH", the size of the mini batch has + to be to a postive integer set with @ref cv::ml::LogisticRegression::setMiniBatchSize + "setMiniBatchSize". + +A sample set of training parameters for the Logistic Regression classifier can be initialized as follows: +@snippet samples/cpp/logistic_regression.cpp init @sa cv::ml::LogisticRegression diff --git a/modules/ml/include/opencv2/ml.hpp b/modules/ml/include/opencv2/ml.hpp index 9dca486af4..c7559aea52 100644 --- a/modules/ml/include/opencv2/ml.hpp +++ b/modules/ml/include/opencv2/ml.hpp @@ -381,43 +381,22 @@ public: return model->isTrained() ? model : Ptr<_Tp>(); } + /** @brief Create and train model with default parameters - /** @brief Creates new statistical model and trains it - - @param data training data that can be loaded from file using TrainData::loadFromCSV or - created with TrainData::create. - @param p model parameters - @param flags optional flags, depending on the model. Some of the models can be updated with the - new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP). - */ - template static Ptr<_Tp> train(const Ptr& data, const typename _Tp::Params& p, int flags=0) - { - Ptr<_Tp> model = _Tp::create(p); - return !model.empty() && model->train(data, flags) ? model : Ptr<_Tp>(); - } - - /** @brief Creates new statistical model and trains it - - @param samples training samples - @param layout See ml::SampleTypes. - @param responses vector of responses associated with the training samples. - @param p model parameters - @param flags optional flags, depending on the model. Some of the models can be updated with the - new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP). + The class must implement static `create()` method with no parameters or with all default parameter values */ - template static Ptr<_Tp> train(InputArray samples, int layout, InputArray responses, - const typename _Tp::Params& p, int flags=0) + template static Ptr<_Tp> train(const Ptr& data, int flags=0) { - Ptr<_Tp> model = _Tp::create(p); - return !model.empty() && model->train(TrainData::create(samples, layout, responses), flags) ? model : Ptr<_Tp>(); + Ptr<_Tp> model = _Tp::create(); + return !model.empty() && model->train(data, flags) ? model : Ptr<_Tp>(); } - /** @brief Saves the model to a file. - - In order to make this method work, the derived class must overwrite - Algorithm::write(FileStorage& fs). - */ + /** Saves the model to a file. + In order to make this method work, the derived class must implement Algorithm::write(FileStorage& fs). */ virtual void save(const String& filename) const; + + /** Returns model string identifier. + This string is used as top level xml/yml node tag when model is saved to a file or string. */ virtual String getDefaultModelName() const = 0; }; @@ -432,11 +411,6 @@ public: class CV_EXPORTS_W NormalBayesClassifier : public StatModel { public: - class CV_EXPORTS_W Params - { - public: - Params(); - }; /** @brief Predicts the response for sample(s). The method estimates the most probable classes for input vectors. Input vectors (one or more) @@ -447,21 +421,10 @@ public: */ virtual float predictProb( InputArray inputs, OutputArray outputs, OutputArray outputProbs, int flags=0 ) const = 0; - virtual void setParams(const Params& params) = 0; - virtual Params getParams() const = 0; - /** @brief Creates empty model - - @param params The model parameters. There is none so far, the structure is used as a placeholder - for possible extensions. - - Use StatModel::train to train the model: - @code - StatModel::train(traindata, params); // to create and train the model - StatModel::load(filename); // load the pre-trained model - @endcode - */ - static Ptr create(const Params& params=Params()); + /** Creates empty model + Use StatModel::train to train the model after creation. */ + static Ptr create(); }; /****************************************************************************************\ @@ -475,19 +438,18 @@ public: class CV_EXPORTS_W KNearest : public StatModel { public: - class CV_EXPORTS_W_MAP Params - { - public: - /** @brief Constructor with parameters */ - Params(int defaultK=10, bool isclassifier_=true, int Emax_=INT_MAX, int algorithmType_=BRUTE_FORCE); - CV_PROP_RW int defaultK; //!< default number of neighbors to use in predict method - CV_PROP_RW bool isclassifier; //!< whether classification or regression model should be trained - CV_PROP_RW int Emax; //!< for implementation with KDTree - CV_PROP_RW int algorithmType; //!< See KNearest::Types - }; - virtual void setParams(const Params& p) = 0; - virtual Params getParams() const = 0; + /** Default number of neighbors to use in predict method. */ + CV_PURE_PROPERTY(int, DefaultK) + + /** Whether classification or regression model should be trained. */ + CV_PURE_PROPERTY(bool, IsClassifier) + + /** Parameter for KDTree implementation. */ + CV_PURE_PROPERTY(int, Emax) + + /** %Algorithm type, one of KNearest::Types. */ + CV_PURE_PROPERTY(int, AlgorithmType) /** @brief Finds the neighbors and predicts responses for input vectors. @@ -520,17 +482,19 @@ public: OutputArray neighborResponses=noArray(), OutputArray dist=noArray() ) const = 0; - enum Types { BRUTE_FORCE=1, KDTREE=2 }; + /** @brief Implementations of KNearest algorithm + */ + enum Types + { + BRUTE_FORCE=1, + KDTREE=2 + }; /** @brief Creates the empty model - @param params The model parameters - - The static method creates empty %KNearest classifier. It should be then trained using train - method (see StatModel::train). Alternatively, you can load boost model from file using: - `StatModel::load(filename)` + The static method creates empty %KNearest classifier. It should be then trained using StatModel::train method. */ - static Ptr create(const Params& params=Params()); + static Ptr create(); }; /****************************************************************************************\ @@ -544,54 +508,6 @@ public: class CV_EXPORTS_W SVM : public StatModel { public: - /** @brief %SVM training parameters. - - The structure must be initialized and passed to the training method of %SVM. - */ - class CV_EXPORTS_W_MAP Params - { - public: - /** @brief Default constructor */ - Params(); - /** @brief Constructor with parameters */ - Params( int svm_type, int kernel_type, - double degree, double gamma, double coef0, - double Cvalue, double nu, double p, - const Mat& classWeights, TermCriteria termCrit ); - - /** Type of a %SVM formulation. See SVM::Types. Default value is SVM::C_SVC. */ - CV_PROP_RW int svmType; - /** Type of a %SVM kernel. See SVM::KernelTypes. Default value is SVM::RBF. */ - CV_PROP_RW int kernelType; - /** Parameter \f$\gamma\f$ of a kernel function (SVM::POLY / SVM::RBF / SVM::SIGMOID / - SVM::CHI2). Default value is 1. */ - CV_PROP_RW double gamma; - /** Parameter coef0 of a kernel function (SVM::POLY / SVM::SIGMOID). Default value is 0. */ - CV_PROP_RW double coef0; - /** Parameter degree of a kernel function (SVM::POLY). Default value is 0. */ - CV_PROP_RW double degree; - - /** Parameter C of a %SVM optimization problem (SVM::C_SVC / SVM::EPS_SVR / SVM::NU_SVR). - Default value is 0. */ - CV_PROP_RW double C; - /** Parameter \f$\nu\f$ of a %SVM optimization problem (SVM::NU_SVC / SVM::ONE_CLASS / - SVM::NU_SVR). Default value is 0. */ - CV_PROP_RW double nu; - /** Parameter \f$\epsilon\f$ of a %SVM optimization problem (SVM::EPS_SVR). Default value is 0. */ - CV_PROP_RW double p; - - /** Optional weights in the SVM::C_SVC problem , assigned to particular classes. They are - multiplied by C so the parameter C of class \#i becomes classWeights(i) \* C. Thus these - weights affect the misclassification penalty for different classes. The larger weight, the - larger penalty on misclassification of data from the corresponding class. Default value is - empty Mat.*/ - CV_PROP_RW Mat classWeights; - /** Termination criteria of the iterative %SVM training procedure which solves a partial - case of constrained quadratic optimization problem. You can specify tolerance and/or the - maximum number of iterations. Default value is TermCriteria( - TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, FLT_EPSILON );*/ - CV_PROP_RW TermCriteria termCrit; - }; class CV_EXPORTS Kernel : public Algorithm { @@ -600,6 +516,59 @@ public: virtual void calc( int vcount, int n, const float* vecs, const float* another, float* results ) = 0; }; + /** Type of a %SVM formulation. + See SVM::Types. Default value is SVM::C_SVC. */ + CV_PURE_PROPERTY(int, Type) + + /** Parameter \f$\gamma\f$ of a kernel function. + For SVM::POLY, SVM::RBF, SVM::SIGMOID or SVM::CHI2. Default value is 1. */ + CV_PURE_PROPERTY(double, Gamma) + + /** Parameter _coef0_ of a kernel function. + For SVM::POLY or SVM::SIGMOID. Default value is 0.*/ + CV_PURE_PROPERTY(double, Coef0) + + /** Parameter _degree_ of a kernel function. + For SVM::POLY. Default value is 0. */ + CV_PURE_PROPERTY(double, Degree) + + /** Parameter _C_ of a %SVM optimization problem. + For SVM::C_SVC, SVM::EPS_SVR or SVM::NU_SVR. Default value is 0. */ + CV_PURE_PROPERTY(double, C) + + /** Parameter \f$\nu\f$ of a %SVM optimization problem. + For SVM::NU_SVC, SVM::ONE_CLASS or SVM::NU_SVR. Default value is 0. */ + CV_PURE_PROPERTY(double, Nu) + + /** Parameter \f$\epsilon\f$ of a %SVM optimization problem. + For SVM::EPS_SVR. Default value is 0. */ + CV_PURE_PROPERTY(double, P) + + /** Optional weights in the SVM::C_SVC problem, assigned to particular classes. + They are multiplied by _C_ so the parameter _C_ of class _i_ becomes `classWeights(i) * C`. Thus + these weights affect the misclassification penalty for different classes. The larger weight, + the larger penalty on misclassification of data from the corresponding class. Default value is + empty Mat. */ + CV_PURE_PROPERTY_S(cv::Mat, ClassWeights) + + /** Termination criteria of the iterative %SVM training procedure which solves a partial + case of constrained quadratic optimization problem. + You can specify tolerance and/or the maximum number of iterations. Default value is + `TermCriteria( TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, FLT_EPSILON )`; */ + CV_PURE_PROPERTY_S(cv::TermCriteria, TermCriteria) + + /** Type of a %SVM kernel. + See SVM::KernelTypes. Default value is SVM::RBF. */ + virtual int getKernelType() const = 0; + + /** Initialize with one of predefined kernels. + See SVM::KernelTypes. */ + virtual void setKernel(int kernelType) = 0; + + /** Initialize with custom kernel. + See SVM::Kernel class for implementation details */ + virtual void setCustomKernel(const Ptr &_kernel) = 0; + //! %SVM type enum Types { /** C-Support Vector Classification. n-class classification (n \f$\geq\f$ 2), allows @@ -631,6 +600,7 @@ public: ![image](pics/SVM_Comparison.png) */ enum KernelTypes { + /** Returned by SVM::getKernelType in case when custom kernel has been set */ CUSTOM=-1, /** Linear kernel. No mapping is done, linear discrimination (or regression) is done in the original feature space. It is the fastest option. \f$K(x_i, x_j) = x_i^T x_j\f$. */ @@ -678,13 +648,13 @@ public: to such proportion in the whole train dataset. The method trains the %SVM model automatically by choosing the optimal parameters C, gamma, p, - nu, coef0, degree from SVM::Params. Parameters are considered optimal when the cross-validation + nu, coef0, degree. Parameters are considered optimal when the cross-validation estimate of the test set error is minimal. If there is no need to optimize a parameter, the corresponding grid step should be set to any value less than or equal to 1. For example, to avoid optimization in gamma, set `gammaGrid.step = 0`, `gammaGrid.minVal`, `gamma_grid.maxVal` as arbitrary numbers. In this case, the value - `params.gamma` is taken for gamma. + `Gamma` is taken for gamma. And, finally, if the optimization in a parameter is required but the corresponding grid is unknown, you may call the function SVM::getDefaultGrid. To generate a grid, for example, for @@ -710,16 +680,6 @@ public: */ CV_WRAP virtual Mat getSupportVectors() const = 0; - virtual void setParams(const Params& p, const Ptr& customKernel=Ptr()) = 0; - - /** @brief Returns the current %SVM parameters. - - This function may be used to get the optimal parameters obtained while automatically training - SVM::trainAuto. - */ - virtual Params getParams() const = 0; - virtual Ptr getKernel() const = 0; - /** @brief Retrieves the decision function @param i the index of the decision function. If the problem solved is regression, 1-class or @@ -740,28 +700,17 @@ public: /** @brief Generates a grid for %SVM parameters. @param param_id %SVM parameters IDs that must be one of the SVM::ParamTypes. The grid is - generated for the parameter with this ID. + generated for the parameter with this ID. The function generates a grid for the specified parameter of the %SVM algorithm. The grid may be passed to the function SVM::trainAuto. */ static ParamGrid getDefaultGrid( int param_id ); - /** @brief Creates empty model - - @param p %SVM parameters - @param customKernel the optional custom kernel to use. It must implement SVM::Kernel interface. - - Use StatModel::train to train the model: - @code - StatModel::train(traindata, params); // to create and train the model - // or - StatModel::load(filename); // to load the pre-trained model. - @endcode - Since %SVM has several parameters, you may want to find the best parameters for your problem. It - can be done with SVM::trainAuto. - */ - static Ptr create(const Params& p=Params(), const Ptr& customKernel=Ptr()); + /** Creates empty model. + Use StatModel::train to train the model. Since %SVM has several parameters, you may want to + find the best parameters for your problem, it can be done with SVM::trainAuto. */ + static Ptr create(); }; /****************************************************************************************\ @@ -802,34 +751,22 @@ public: //! The initial step enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0}; - /** @brief The class describes %EM training parameters. - */ - class CV_EXPORTS_W_MAP Params - { - public: - /** @brief The constructor - - @param nclusters The number of mixture components in the Gaussian mixture model. Default - value of the parameter is EM::DEFAULT_NCLUSTERS=5. Some of %EM implementation could - determine the optimal number of mixtures within a specified value range, but that is not - the case in ML yet. - @param covMatType Constraint on covariance matrices which defines type of matrices. See - EM::Types. - @param termCrit The termination criteria of the %EM algorithm. The %EM algorithm can be - terminated by the number of iterations termCrit.maxCount (number of M-steps) or when - relative change of likelihood logarithm is less than termCrit.epsilon. Default maximum - number of iterations is EM::DEFAULT_MAX_ITERS=100. - */ - explicit Params(int nclusters=DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL, - const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, - EM::DEFAULT_MAX_ITERS, 1e-6)); - CV_PROP_RW int nclusters; - CV_PROP_RW int covMatType; - CV_PROP_RW TermCriteria termCrit; - }; + /** The number of mixture components in the Gaussian mixture model. + Default value of the parameter is EM::DEFAULT_NCLUSTERS=5. Some of %EM implementation could + determine the optimal number of mixtures within a specified value range, but that is not the + case in ML yet. */ + CV_PURE_PROPERTY(int, ClustersNumber) + + /** Constraint on covariance matrices which defines type of matrices. + See EM::Types. */ + CV_PURE_PROPERTY(int, CovarianceMatrixType) + + /** The termination criteria of the %EM algorithm. + The %EM algorithm can be terminated by the number of iterations termCrit.maxCount (number of + M-steps) or when relative change of likelihood logarithm is less than termCrit.epsilon. Default + maximum number of iterations is EM::DEFAULT_MAX_ITERS=100. */ + CV_PURE_PROPERTY_S(TermCriteria, TermCriteria) - virtual void setParams(const Params& p) = 0; - virtual Params getParams() const = 0; /** @brief Returns weights of the mixtures Returns vector with the number of elements equal to the number of mixtures. @@ -862,9 +799,7 @@ public: */ CV_WRAP virtual Vec2d predict2(InputArray sample, OutputArray probs) const = 0; - virtual bool train( const Ptr& trainData, int flags=0 ) = 0; - - /** @brief Static method that estimate the Gaussian mixture parameters from a samples set + /** @brief Estimate the Gaussian mixture parameters from a samples set. This variation starts with Expectation step. Initial values of the model parameters will be estimated by the k-means algorithm. @@ -891,15 +826,13 @@ public: @param probs The optional output matrix that contains posterior probabilities of each Gaussian mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and CV_64FC1 type. - @param params The Gaussian mixture params, see EM::Params description */ - static Ptr train(InputArray samples, - OutputArray logLikelihoods=noArray(), - OutputArray labels=noArray(), - OutputArray probs=noArray(), - const Params& params=Params()); + virtual bool trainEM(InputArray samples, + OutputArray logLikelihoods=noArray(), + OutputArray labels=noArray(), + OutputArray probs=noArray()) = 0; - /** @brief Static method that estimate the Gaussian mixture parameters from a samples set + /** @brief Estimate the Gaussian mixture parameters from a samples set. This variation starts with Expectation step. You need to provide initial means \f$a_k\f$ of mixture components. Optionally you can pass initial weights \f$\pi_k\f$ and covariance matrices @@ -925,17 +858,15 @@ public: @param probs The optional output matrix that contains posterior probabilities of each Gaussian mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and CV_64FC1 type. - @param params The Gaussian mixture params, see EM::Params description */ - static Ptr train_startWithE(InputArray samples, InputArray means0, - InputArray covs0=noArray(), - InputArray weights0=noArray(), - OutputArray logLikelihoods=noArray(), - OutputArray labels=noArray(), - OutputArray probs=noArray(), - const Params& params=Params()); + virtual bool trainE(InputArray samples, InputArray means0, + InputArray covs0=noArray(), + InputArray weights0=noArray(), + OutputArray logLikelihoods=noArray(), + OutputArray labels=noArray(), + OutputArray probs=noArray()) = 0; - /** @brief Static method that estimate the Gaussian mixture parameters from a samples set + /** @brief Estimate the Gaussian mixture parameters from a samples set. This variation starts with Maximization step. You need to provide initial probabilities \f$p_{i,k}\f$ to use this option. @@ -952,22 +883,17 @@ public: @param probs The optional output matrix that contains posterior probabilities of each Gaussian mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and CV_64FC1 type. - @param params The Gaussian mixture params, see EM::Params description */ - static Ptr train_startWithM(InputArray samples, InputArray probs0, - OutputArray logLikelihoods=noArray(), - OutputArray labels=noArray(), - OutputArray probs=noArray(), - const Params& params=Params()); - - /** @brief Creates empty %EM model - - @param params %EM parameters + virtual bool trainM(InputArray samples, InputArray probs0, + OutputArray logLikelihoods=noArray(), + OutputArray labels=noArray(), + OutputArray probs=noArray()) = 0; + /** Creates empty %EM model. The model should be trained then using StatModel::train(traindata, flags) method. Alternatively, you can use one of the EM::train\* methods or load it from file using StatModel::load\(filename). */ - static Ptr create(const Params& params=Params()); + static Ptr create(); }; /****************************************************************************************\ @@ -989,85 +915,74 @@ public: /** Predict options */ enum Flags { PREDICT_AUTO=0, PREDICT_SUM=(1<<8), PREDICT_MAX_VOTE=(2<<8), PREDICT_MASK=(3<<8) }; - /** @brief The structure contains all the decision tree training parameters. - - You can initialize it by default constructor and then override any parameters directly before - training, or the structure may be fully initialized using the advanced variant of the - constructor. - */ - class CV_EXPORTS_W_MAP Params - { - public: - /** @brief Default constructor. */ - Params(); - /** @brief Constructor with parameters */ - Params( int maxDepth, int minSampleCount, - double regressionAccuracy, bool useSurrogates, - int maxCategories, int CVFolds, - bool use1SERule, bool truncatePrunedTree, - const Mat& priors ); - - /** @brief Cluster possible values of a categorical variable into K\<=maxCategories clusters - to find a suboptimal split. - - If a discrete variable, on which the training procedure tries to make a split, takes more - than maxCategories values, the precise best subset estimation may take a very long time - because the algorithm is exponential. Instead, many decision trees engines (including our - implementation) try to find sub-optimal split in this case by clustering all the samples - into maxCategories clusters that is some categories are merged together. The clustering is - applied only in n \> 2-class classification problems for categorical variables with N \> - max_categories possible values. In case of regression and 2-class classification the optimal - split can be found efficiently without employing clustering, thus the parameter is not used - in these cases. Default value is 10.*/ - CV_PROP_RW int maxCategories; - /** @brief The maximum possible depth of the tree. - - That is the training algorithms attempts to split a node while its depth is less than - maxDepth. The root node has zero depth. The actual depth may be smaller if the other - termination criteria are met (see the outline of the training procedure @ref ml_intro_trees - "here"), and/or if the tree is pruned. Default value is INT_MAX.*/ - CV_PROP_RW int maxDepth; - /** If the number of samples in a node is less than this parameter then the node will not be - split. Default value is 10.*/ - CV_PROP_RW int minSampleCount; - /** If CVFolds \> 1 then algorithms prunes the built decision tree using K-fold - cross-validation procedure where K is equal to CVFolds. Default value is 10.*/ - CV_PROP_RW int CVFolds; - /** @brief If true then surrogate splits will be built. - - These splits allow to work with missing data and compute variable importance correctly. - @note currently it's not implemented. Default value is false.*/ - CV_PROP_RW bool useSurrogates; - /** If true then a pruning will be harsher. This will make a tree more compact and more - resistant to the training data noise but a bit less accurate. Default value is true.*/ - CV_PROP_RW bool use1SERule; - /** If true then pruned branches are physically removed from the tree. Otherwise they are - retained and it is possible to get results from the original unpruned (or pruned less - aggressively) tree. Default value is true.*/ - CV_PROP_RW bool truncatePrunedTree; - /** @brief Termination criteria for regression trees. - - If all absolute differences between an estimated value in a node and values of train samples - in this node are less than this parameter then the node will not be split further. Default - value is 0.01f*/ - CV_PROP_RW float regressionAccuracy; - /** @brief The array of a priori class probabilities, sorted by the class label value. - - The parameter can be used to tune the decision tree preferences toward a certain class. For - example, if you want to detect some rare anomaly occurrence, the training base will likely - contain much more normal cases than anomalies, so a very good classification performance - will be achieved just by considering every case as normal. To avoid this, the priors can be - specified, where the anomaly probability is artificially increased (up to 0.5 or even - greater), so the weight of the misclassified anomalies becomes much bigger, and the tree is - adjusted properly. - - You can also think about this parameter as weights of prediction categories which determine - relative weights that you give to misclassification. That is, if the weight of the first - category is 1 and the weight of the second category is 10, then each mistake in predicting - the second category is equivalent to making 10 mistakes in predicting the first category. - Default value is empty Mat.*/ - CV_PROP_RW Mat priors; - }; + /** Cluster possible values of a categorical variable into K\<=maxCategories clusters to + find a suboptimal split. + If a discrete variable, on which the training procedure tries to make a split, takes more than + maxCategories values, the precise best subset estimation may take a very long time because the + algorithm is exponential. Instead, many decision trees engines (including our implementation) + try to find sub-optimal split in this case by clustering all the samples into maxCategories + clusters that is some categories are merged together. The clustering is applied only in n \> + 2-class classification problems for categorical variables with N \> max_categories possible + values. In case of regression and 2-class classification the optimal split can be found + efficiently without employing clustering, thus the parameter is not used in these cases. + Default value is 10.*/ + CV_PURE_PROPERTY(int, MaxCategories) + + /** The maximum possible depth of the tree. + That is the training algorithms attempts to split a node while its depth is less than maxDepth. + The root node has zero depth. The actual depth may be smaller if the other termination criteria + are met (see the outline of the training procedure @ref ml_intro_trees "here"), and/or if the + tree is pruned. Default value is INT_MAX.*/ + CV_PURE_PROPERTY(int, MaxDepth) + + /** If the number of samples in a node is less than this parameter then the node will not be split. + + Default value is 10.*/ + CV_PURE_PROPERTY(int, MinSampleCount) + + /** If CVFolds \> 1 then algorithms prunes the built decision tree using K-fold + cross-validation procedure where K is equal to CVFolds. + Default value is 10.*/ + CV_PURE_PROPERTY(int, CVFolds) + + /** If true then surrogate splits will be built. + These splits allow to work with missing data and compute variable importance correctly. + Default value is false. + @note currently it's not implemented.*/ + CV_PURE_PROPERTY(bool, UseSurrogates) + + /** If true then a pruning will be harsher. + This will make a tree more compact and more resistant to the training data noise but a bit less + accurate. Default value is true.*/ + CV_PURE_PROPERTY(bool, Use1SERule) + + /** If true then pruned branches are physically removed from the tree. + Otherwise they are retained and it is possible to get results from the original unpruned (or + pruned less aggressively) tree. Default value is true.*/ + CV_PURE_PROPERTY(bool, TruncatePrunedTree) + + /** Termination criteria for regression trees. + If all absolute differences between an estimated value in a node and values of train samples + in this node are less than this parameter then the node will not be split further. Default + value is 0.01f*/ + CV_PURE_PROPERTY(float, RegressionAccuracy) + + /** @brief The array of a priori class probabilities, sorted by the class label value. + + The parameter can be used to tune the decision tree preferences toward a certain class. For + example, if you want to detect some rare anomaly occurrence, the training base will likely + contain much more normal cases than anomalies, so a very good classification performance + will be achieved just by considering every case as normal. To avoid this, the priors can be + specified, where the anomaly probability is artificially increased (up to 0.5 or even + greater), so the weight of the misclassified anomalies becomes much bigger, and the tree is + adjusted properly. + + You can also think about this parameter as weights of prediction categories which determine + relative weights that you give to misclassification. That is, if the weight of the first + category is 1 and the weight of the second category is 10, then each mistake in predicting + the second category is equivalent to making 10 mistakes in predicting the first category. + Default value is empty Mat.*/ + CV_PURE_PROPERTY_S(cv::Mat, Priors) /** @brief The class represents a decision tree node. */ @@ -1114,13 +1029,6 @@ public: @endcode */ }; - /** @brief Sets the training parameters - */ - virtual void setDParams(const Params& p); - /** @brief Returns the training parameters - */ - virtual Params getDParams() const; - /** @brief Returns indices of root nodes */ virtual const std::vector& getRoots() const = 0; @@ -1146,7 +1054,7 @@ public: trained using train method (see StatModel::train). Alternatively, you can load the model from file using StatModel::load\(filename). */ - static Ptr create(const Params& params=Params()); + static Ptr create(); }; /****************************************************************************************\ @@ -1160,58 +1068,38 @@ public: class CV_EXPORTS_W RTrees : public DTrees { public: - /** @brief The set of training parameters for the forest is a superset of the training - parameters for a single tree. - - However, random trees do not need all the functionality/features of decision trees. Most - noticeably, the trees are not pruned, so the cross-validation parameters are not used. - */ - class CV_EXPORTS_W_MAP Params : public DTrees::Params - { - public: - /** @brief Default constructor. */ - Params(); - /** @brief Constructor with parameters. */ - Params( int maxDepth, int minSampleCount, - double regressionAccuracy, bool useSurrogates, - int maxCategories, const Mat& priors, - bool calcVarImportance, int nactiveVars, - TermCriteria termCrit ); - - /** If true then variable importance will be calculated and then it can be retrieved by - RTrees::getVarImportance. Default value is false.*/ - CV_PROP_RW bool calcVarImportance; - /** The size of the randomly selected subset of features at each tree node and that are used - to find the best split(s). If you set it to 0 then the size will be set to the square root - of the total number of features. Default value is 0.*/ - CV_PROP_RW int nactiveVars; - /** The termination criteria that specifies when the training algorithm stops - either when - the specified number of trees is trained and added to the ensemble or when sufficient - accuracy (measured as OOB error) is achieved. Typically the more trees you have the better - the accuracy. However, the improvement in accuracy generally diminishes and asymptotes pass - a certain number of trees. Also to keep in mind, the number of tree increases the prediction - time linearly. Default value is TermCriteria(TermCriteria::MAX_ITERS + TermCriteria::EPS, - 50, 0.1)*/ - CV_PROP_RW TermCriteria termCrit; - }; - - virtual void setRParams(const Params& p) = 0; - virtual Params getRParams() const = 0; - - /** @brief Returns the variable importance array. + /** If true then variable importance will be calculated and then it can be retrieved by RTrees::getVarImportance. + Default value is false.*/ + CV_PURE_PROPERTY(bool, CalculateVarImportance) + + /** The size of the randomly selected subset of features at each tree node and that are used + to find the best split(s). + If you set it to 0 then the size will be set to the square root of the total number of + features. Default value is 0.*/ + CV_PURE_PROPERTY(int, ActiveVarCount) + + /** The termination criteria that specifies when the training algorithm stops. + Either when the specified number of trees is trained and added to the ensemble or when + sufficient accuracy (measured as OOB error) is achieved. Typically the more trees you have the + better the accuracy. However, the improvement in accuracy generally diminishes and asymptotes + pass a certain number of trees. Also to keep in mind, the number of tree increases the + prediction time linearly. Default value is TermCriteria(TermCriteria::MAX_ITERS + + TermCriteria::EPS, 50, 0.1)*/ + CV_PURE_PROPERTY_S(TermCriteria, TermCriteria) + + /** Returns the variable importance array. The method returns the variable importance vector, computed at the training stage when - Params::calcVarImportance is set to true. If this flag was set to false, the empty matrix is + CalculateVarImportance is set to true. If this flag was set to false, the empty matrix is returned. */ virtual Mat getVarImportance() const = 0; - /** @brief Creates the empty model - + /** Creates the empty model. Use StatModel::train to train the model, StatModel::train to create and train the model, StatModel::load to load the pre-trained model. */ - static Ptr create(const Params& params=Params()); + static Ptr create(); }; /****************************************************************************************\ @@ -1225,36 +1113,21 @@ public: class CV_EXPORTS_W Boost : public DTrees { public: - /** @brief Parameters of Boost trees. + /** Type of the boosting algorithm. + See Boost::Types. Default value is Boost::REAL. */ + CV_PURE_PROPERTY(int, BoostType) - The structure is derived from DTrees::Params but not all of the decision tree parameters are - supported. In particular, cross-validation is not supported. + /** The number of weak classifiers. + Default value is 100. */ + CV_PURE_PROPERTY(int, WeakCount) - All parameters are public. You can initialize them by a constructor and then override some of - them directly if you want. - */ - class CV_EXPORTS_W_MAP Params : public DTrees::Params - { - public: - CV_PROP_RW int boostType; //!< Type of the boosting algorithm. See Boost::Types. - //!< Default value is Boost::REAL. - CV_PROP_RW int weakCount; //!< The number of weak classifiers. Default value is 100. - /** A threshold between 0 and 1 used to save computational time. Samples with summary weight - \f$\leq 1 - weight_trim_rate\f$ do not participate in the *next* iteration of training. Set - this parameter to 0 to turn off this functionality. Default value is 0.95.*/ - CV_PROP_RW double weightTrimRate; - - /** @brief Default constructor */ - Params(); - /** @brief Constructor with parameters */ - Params( int boostType, int weakCount, double weightTrimRate, - int maxDepth, bool useSurrogates, const Mat& priors ); - }; - - /** @brief Boosting type + /** A threshold between 0 and 1 used to save computational time. + Samples with summary weight \f$\leq 1 - weight_trim_rate\f$ do not participate in the *next* + iteration of training. Set this parameter to 0 to turn off this functionality. Default value is 0.95.*/ + CV_PURE_PROPERTY(double, WeightTrimRate) - Gentle AdaBoost and Real AdaBoost are often the preferable choices. - */ + /** Boosting type. + Gentle AdaBoost and Real AdaBoost are often the preferable choices. */ enum Types { DISCRETE=0, //!< Discrete AdaBoost. REAL=1, //!< Real AdaBoost. It is a technique that utilizes confidence-rated predictions @@ -1264,17 +1137,9 @@ public: //!(traindata, params) to create - and train the model, StatModel::load\(filename) to load the pre-trained model. - */ - static Ptr create(const Params& params=Params()); + /** Creates the empty model. + Use StatModel::train to train the model, StatModel::load\(filename) to load the pre-trained model. */ + static Ptr create(); }; /****************************************************************************************\ @@ -1327,68 +1192,78 @@ Additional flags for StatModel::train are available: ANN_MLP::TrainFlags. class CV_EXPORTS_W ANN_MLP : public StatModel { public: - /** @brief Parameters of the MLP and of the training algorithm. - */ - struct CV_EXPORTS_W_MAP Params - { - /** @brief Default constructor */ - Params(); - /** @brief Constructor with parameters - @note param1 sets Params::rp_dw0 for RPROP and Paramss::bp_dw_scale for BACKPROP. - @note param2 sets Params::rp_dw_min for RPROP and Params::bp_moment_scale for BACKPROP. - */ - Params( const Mat& layerSizes, int activateFunc, double fparam1, double fparam2, - TermCriteria termCrit, int trainMethod, double param1, double param2=0 ); - - /** Available training methods */ - enum TrainingMethods { - BACKPROP=0, //!< The back-propagation algorithm. - RPROP=1 //!< The RPROP algorithm. See @cite RPROP93 for details. - }; - - /** Integer vector specifying the number of neurons in each layer including the input and - output layers. The very first element specifies the number of elements in the input layer. - The last element - number of elements in the output layer. Default value is empty Mat.*/ - CV_PROP_RW Mat layerSizes; - /** The activation function for each neuron. Currently the default and the only fully - supported activation function is ANN_MLP::SIGMOID_SYM. See ANN_MLP::ActivationFunctions.*/ - CV_PROP_RW int activateFunc; - /** The first parameter of the activation function, \f$\alpha\f$. Default value is 0. */ - CV_PROP_RW double fparam1; - /** The second parameter of the activation function, \f$\beta\f$. Default value is 0. */ - CV_PROP_RW double fparam2; - - /** Termination criteria of the training algorithm. You can specify the maximum number of - iterations (maxCount) and/or how much the error could change between the iterations to make - the algorithm continue (epsilon). Default value is TermCriteria(TermCriteria::MAX_ITER + - TermCriteria::EPS, 1000, 0.01).*/ - CV_PROP_RW TermCriteria termCrit; - /** Training method. Default value is Params::RPROP. See ANN_MLP::Params::TrainingMethods.*/ - CV_PROP_RW int trainMethod; - - // backpropagation parameters - /** BPROP: Strength of the weight gradient term. The recommended value is about 0.1. Default - value is 0.1.*/ - CV_PROP_RW double bpDWScale; - /** BPROP: Strength of the momentum term (the difference between weights on the 2 previous - iterations). This parameter provides some inertia to smooth the random fluctuations of the - weights. It can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so - is good enough. Default value is 0.1.*/ - CV_PROP_RW double bpMomentScale; - - // rprop parameters - /** RPROP: Initial value \f$\Delta_0\f$ of update-values \f$\Delta_{ij}\f$. Default value is 0.1.*/ - CV_PROP_RW double rpDW0; - /** RPROP: Increase factor \f$\eta^+\f$. It must be \>1. Default value is 1.2.*/ - CV_PROP_RW double rpDWPlus; - /** RPROP: Decrease factor \f$\eta^-\f$. It must be \<1. Default value is 0.5.*/ - CV_PROP_RW double rpDWMinus; - /** RPROP: Update-values lower limit \f$\Delta_{min}\f$. It must be positive. Default value is FLT_EPSILON.*/ - CV_PROP_RW double rpDWMin; - /** RPROP: Update-values upper limit \f$\Delta_{max}\f$. It must be \>1. Default value is 50.*/ - CV_PROP_RW double rpDWMax; + /** Available training methods */ + enum TrainingMethods { + BACKPROP=0, //!< The back-propagation algorithm. + RPROP=1 //!< The RPROP algorithm. See @cite RPROP93 for details. }; + /** Sets training method and common parameters. + @param method Default value is ANN_MLP::RPROP. See ANN_MLP::TrainingMethods. + @param param1 passed to setRpropDW0 for ANN_MLP::RPROP and to setBackpropWeightScale for ANN_MLP::BACKPROP + @param param2 passed to setRpropDWMin for ANN_MLP::RPROP and to setBackpropMomentumScale for ANN_MLP::BACKPROP. + */ + virtual void setTrainMethod(int method, double param1 = 0, double param2 = 0) = 0; + + /** Returns current training method */ + virtual int getTrainMethod() const = 0; + + /** Initialize the activation function for each neuron. + Currently the default and the only fully supported activation function is ANN_MLP::SIGMOID_SYM. + @param type The type of activation function. See ANN_MLP::ActivationFunctions. + @param param1 The first parameter of the activation function, \f$\alpha\f$. Default value is 0. + @param param2 The second parameter of the activation function, \f$\beta\f$. Default value is 0. + */ + virtual void setActivationFunction(int type, double param1 = 0, double param2 = 0) = 0; + + /** Integer vector specifying the number of neurons in each layer including the input and output layers. + The very first element specifies the number of elements in the input layer. + The last element - number of elements in the output layer. Default value is empty Mat. + @sa getLayerSizes */ + virtual void setLayerSizes(InputArray _layer_sizes) = 0; + + /** Integer vector specifying the number of neurons in each layer including the input and output layers. + The very first element specifies the number of elements in the input layer. + The last element - number of elements in the output layer. + @sa setLayerSizes */ + virtual cv::Mat getLayerSizes() const = 0; + + /** Termination criteria of the training algorithm. + You can specify the maximum number of iterations (maxCount) and/or how much the error could + change between the iterations to make the algorithm continue (epsilon). Default value is + TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 0.01).*/ + CV_PURE_PROPERTY(TermCriteria, TermCriteria) + + /** BPROP: Strength of the weight gradient term. + The recommended value is about 0.1. Default value is 0.1.*/ + CV_PURE_PROPERTY(double, BackpropWeightScale) + + /** BPROP: Strength of the momentum term (the difference between weights on the 2 previous iterations). + This parameter provides some inertia to smooth the random fluctuations of the weights. It can + vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough. + Default value is 0.1.*/ + CV_PURE_PROPERTY(double, BackpropMomentumScale) + + /** RPROP: Initial value \f$\Delta_0\f$ of update-values \f$\Delta_{ij}\f$. + Default value is 0.1.*/ + CV_PURE_PROPERTY(double, RpropDW0) + + /** RPROP: Increase factor \f$\eta^+\f$. + It must be \>1. Default value is 1.2.*/ + CV_PURE_PROPERTY(double, RpropDWPlus) + + /** RPROP: Decrease factor \f$\eta^-\f$. + It must be \<1. Default value is 0.5.*/ + CV_PURE_PROPERTY(double, RpropDWMinus) + + /** RPROP: Update-values lower limit \f$\Delta_{min}\f$. + It must be positive. Default value is FLT_EPSILON.*/ + CV_PURE_PROPERTY(double, RpropDWMin) + + /** RPROP: Update-values upper limit \f$\Delta_{max}\f$. + It must be \>1. Default value is 50.*/ + CV_PURE_PROPERTY(double, RpropDWMax) + /** possible activation functions */ enum ActivationFunctions { /** Identity function: \f$f(x)=x\f$ */ @@ -1422,19 +1297,12 @@ public: virtual Mat getWeights(int layerIdx) const = 0; - /** @brief Sets the new network parameters */ - virtual void setParams(const Params& p) = 0; - - /** @brief Retrieves the current network parameters */ - virtual Params getParams() const = 0; - /** @brief Creates empty model - Use StatModel::train to train the model, StatModel::train\(traindata, params) to - create and train the model, StatModel::load\(filename) to load the pre-trained model. + Use StatModel::train to train the model, StatModel::load\(filename) to load the pre-trained model. Note that the train method has optional flags: ANN_MLP::TrainFlags. */ - static Ptr create(const Params& params=Params()); + static Ptr create(); }; /****************************************************************************************\ @@ -1448,43 +1316,38 @@ public: class CV_EXPORTS LogisticRegression : public StatModel { public: - class CV_EXPORTS Params - { - public: - /** @brief Constructor */ - Params(double learning_rate = 0.001, - int iters = 1000, - int method = LogisticRegression::BATCH, - int normalization = LogisticRegression::REG_L2, - int reg = 1, - int batch_size = 1); - double alpha; //!< learning rate. - int num_iters; //!< number of iterations. - /** Kind of regularization to be applied. See LogisticRegression::RegKinds. */ - int norm; - /** Enable or disable regularization. Set to positive integer (greater than zero) to enable - and to 0 to disable. */ - int regularized; - /** Kind of training method used. See LogisticRegression::Methods. */ - int train_method; - /** Specifies the number of training samples taken in each step of Mini-Batch Gradient - Descent. Will only be used if using LogisticRegression::MINI_BATCH training algorithm. It - has to take values less than the total number of training samples. */ - int mini_batch_size; - /** Termination criteria of the algorithm */ - TermCriteria term_crit; - }; + + /** Learning rate. */ + CV_PURE_PROPERTY(double, LearningRate) + + /** Number of iterations. */ + CV_PURE_PROPERTY(int, Iterations) + + /** Kind of regularization to be applied. See LogisticRegression::RegKinds. */ + CV_PURE_PROPERTY(int, Regularization) + + /** Kind of training method used. See LogisticRegression::Methods. */ + CV_PURE_PROPERTY(int, TrainMethod) + + /** Specifies the number of training samples taken in each step of Mini-Batch Gradient + Descent. Will only be used if using LogisticRegression::MINI_BATCH training algorithm. It + has to take values less than the total number of training samples. */ + CV_PURE_PROPERTY(int, MiniBatchSize) + + /** Termination criteria of the algorithm. */ + CV_PURE_PROPERTY(TermCriteria, TermCriteria) //! Regularization kinds enum RegKinds { + REG_NONE = -1, //!< Regularization disabled REG_L1 = 0, //!< %L1 norm - REG_L2 = 1 //!< %L2 norm. Set Params::regularized \> 0 when using this kind + REG_L2 = 1 //!< %L2 norm }; //! Training methods enum Methods { BATCH = 0, - MINI_BATCH = 1 //!< Set Params::mini_batch_size to a positive integer when using this method. + MINI_BATCH = 1 //!< Set MiniBatchSize to a positive integer when using this method. }; /** @brief Predicts responses for input samples and returns a float type. @@ -1505,11 +1368,9 @@ public: /** @brief Creates empty model. - @param params The training parameters for the classifier of type LogisticRegression::Params. - Creates Logistic Regression model with parameters given. */ - static Ptr create( const Params& params = Params() ); + static Ptr create(); }; /****************************************************************************************\ diff --git a/modules/ml/src/ann_mlp.cpp b/modules/ml/src/ann_mlp.cpp index 3bc173513b..2b29519cef 100644 --- a/modules/ml/src/ann_mlp.cpp +++ b/modules/ml/src/ann_mlp.cpp @@ -42,84 +42,57 @@ namespace cv { namespace ml { -ANN_MLP::Params::Params() +struct AnnParams { - layerSizes = Mat(); - activateFunc = SIGMOID_SYM; - fparam1 = fparam2 = 0; - termCrit = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 1000, 0.01 ); - trainMethod = RPROP; - bpDWScale = bpMomentScale = 0.1; - rpDW0 = 0.1; rpDWPlus = 1.2; rpDWMinus = 0.5; - rpDWMin = FLT_EPSILON; rpDWMax = 50.; -} + AnnParams() + { + termCrit = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 1000, 0.01 ); + trainMethod = ANN_MLP::RPROP; + bpDWScale = bpMomentScale = 0.1; + rpDW0 = 0.1; rpDWPlus = 1.2; rpDWMinus = 0.5; + rpDWMin = FLT_EPSILON; rpDWMax = 50.; + } + + TermCriteria termCrit; + int trainMethod; + + double bpDWScale; + double bpMomentScale; + double rpDW0; + double rpDWPlus; + double rpDWMinus; + double rpDWMin; + double rpDWMax; +}; -ANN_MLP::Params::Params( const Mat& _layerSizes, int _activateFunc, double _fparam1, double _fparam2, - TermCriteria _termCrit, int _trainMethod, double _param1, double _param2 ) +template +inline T inBounds(T val, T min_val, T max_val) { - layerSizes = _layerSizes; - activateFunc = _activateFunc; - fparam1 = _fparam1; - fparam2 = _fparam2; - termCrit = _termCrit; - trainMethod = _trainMethod; - bpDWScale = bpMomentScale = 0.1; - rpDW0 = 1.; rpDWPlus = 1.2; rpDWMinus = 0.5; - rpDWMin = FLT_EPSILON; rpDWMax = 50.; - - if( trainMethod == RPROP ) - { - rpDW0 = _param1; - if( rpDW0 < FLT_EPSILON ) - rpDW0 = 1.; - rpDWMin = _param2; - rpDWMin = std::max( rpDWMin, 0. ); - } - else if( trainMethod == BACKPROP ) - { - bpDWScale = _param1; - if( bpDWScale <= 0 ) - bpDWScale = 0.1; - bpDWScale = std::max( bpDWScale, 1e-3 ); - bpDWScale = std::min( bpDWScale, 1. ); - bpMomentScale = _param2; - if( bpMomentScale < 0 ) - bpMomentScale = 0.1; - bpMomentScale = std::min( bpMomentScale, 1. ); - } - else - trainMethod = RPROP; + return std::min(std::max(val, min_val), max_val); } - class ANN_MLPImpl : public ANN_MLP { public: ANN_MLPImpl() { clear(); - } - - ANN_MLPImpl( const Params& p ) - { - clear(); - setParams(p); + setActivationFunction( SIGMOID_SYM, 0, 0 ); + setLayerSizes(Mat()); + setTrainMethod(ANN_MLP::RPROP, 0.1, FLT_EPSILON); } virtual ~ANN_MLPImpl() {} - void setParams(const Params& p) - { - params = p; - create( params.layerSizes ); - set_activ_func( params.activateFunc, params.fparam1, params.fparam2 ); - } - - Params getParams() const - { - return params; - } + CV_IMPL_PROPERTY(TermCriteria, TermCriteria, params.termCrit) + CV_IMPL_PROPERTY(double, BackpropWeightScale, params.bpDWScale) + CV_IMPL_PROPERTY(double, BackpropMomentumScale, params.bpMomentScale) + CV_IMPL_PROPERTY(double, RpropDW0, params.rpDW0) + CV_IMPL_PROPERTY(double, RpropDWPlus, params.rpDWPlus) + CV_IMPL_PROPERTY(double, RpropDWMinus, params.rpDWMinus) + CV_IMPL_PROPERTY(double, RpropDWMin, params.rpDWMin) + CV_IMPL_PROPERTY(double, RpropDWMax, params.rpDWMax) void clear() { @@ -132,7 +105,35 @@ public: int layer_count() const { return (int)layer_sizes.size(); } - void set_activ_func( int _activ_func, double _f_param1, double _f_param2 ) + void setTrainMethod(int method, double param1, double param2) + { + if (method != ANN_MLP::RPROP && method != ANN_MLP::BACKPROP) + method = ANN_MLP::RPROP; + params.trainMethod = method; + if(method == ANN_MLP::RPROP ) + { + if( param1 < FLT_EPSILON ) + param1 = 1.; + params.rpDW0 = param1; + params.rpDWMin = std::max( param2, 0. ); + } + else if(method == ANN_MLP::BACKPROP ) + { + if( param1 <= 0 ) + param1 = 0.1; + params.bpDWScale = inBounds(param1, 1e-3, 1.); + if( param2 < 0 ) + param2 = 0.1; + params.bpMomentScale = std::min( param2, 1. ); + } + } + + int getTrainMethod() const + { + return params.trainMethod; + } + + void setActivationFunction(int _activ_func, double _f_param1, double _f_param2 ) { if( _activ_func < 0 || _activ_func > GAUSSIAN ) CV_Error( CV_StsOutOfRange, "Unknown activation function" ); @@ -201,7 +202,12 @@ public: } } - void create( InputArray _layer_sizes ) + Mat getLayerSizes() const + { + return Mat_(layer_sizes, true); + } + + void setLayerSizes( InputArray _layer_sizes ) { clear(); @@ -700,7 +706,7 @@ public: termcrit.maxCount = std::max((params.termCrit.type & CV_TERMCRIT_ITER ? params.termCrit.maxCount : MAX_ITER), 1); termcrit.epsilon = std::max((params.termCrit.type & CV_TERMCRIT_EPS ? params.termCrit.epsilon : DEFAULT_EPSILON), DBL_EPSILON); - int iter = params.trainMethod == Params::BACKPROP ? + int iter = params.trainMethod == ANN_MLP::BACKPROP ? train_backprop( inputs, outputs, sw, termcrit ) : train_rprop( inputs, outputs, sw, termcrit ); @@ -1113,13 +1119,13 @@ public: fs << "min_val" << min_val << "max_val" << max_val << "min_val1" << min_val1 << "max_val1" << max_val1; fs << "training_params" << "{"; - if( params.trainMethod == Params::BACKPROP ) + if( params.trainMethod == ANN_MLP::BACKPROP ) { fs << "train_method" << "BACKPROP"; fs << "dw_scale" << params.bpDWScale; fs << "moment_scale" << params.bpMomentScale; } - else if( params.trainMethod == Params::RPROP ) + else if( params.trainMethod == ANN_MLP::RPROP ) { fs << "train_method" << "RPROP"; fs << "dw0" << params.rpDW0; @@ -1186,7 +1192,7 @@ public: f_param1 = (double)fn["f_param1"]; f_param2 = (double)fn["f_param2"]; - set_activ_func( activ_func, f_param1, f_param2 ); + setActivationFunction( activ_func, f_param1, f_param2 ); min_val = (double)fn["min_val"]; max_val = (double)fn["max_val"]; @@ -1194,7 +1200,7 @@ public: max_val1 = (double)fn["max_val1"]; FileNode tpn = fn["training_params"]; - params = Params(); + params = AnnParams(); if( !tpn.empty() ) { @@ -1202,13 +1208,13 @@ public: if( tmethod_name == "BACKPROP" ) { - params.trainMethod = Params::BACKPROP; + params.trainMethod = ANN_MLP::BACKPROP; params.bpDWScale = (double)tpn["dw_scale"]; params.bpMomentScale = (double)tpn["moment_scale"]; } else if( tmethod_name == "RPROP" ) { - params.trainMethod = Params::RPROP; + params.trainMethod = ANN_MLP::RPROP; params.rpDW0 = (double)tpn["dw0"]; params.rpDWPlus = (double)tpn["dw_plus"]; params.rpDWMinus = (double)tpn["dw_minus"]; @@ -1244,7 +1250,7 @@ public: vector _layer_sizes; readVectorOrMat(fn["layer_sizes"], _layer_sizes); - create( _layer_sizes ); + setLayerSizes( _layer_sizes ); int i, l_count = layer_count(); read_params(fn); @@ -1267,11 +1273,6 @@ public: trained = true; } - Mat getLayerSizes() const - { - return Mat_(layer_sizes, true); - } - Mat getWeights(int layerIdx) const { CV_Assert( 0 <= layerIdx && layerIdx < (int)weights.size() ); @@ -1304,17 +1305,16 @@ public: double min_val, max_val, min_val1, max_val1; int activ_func; int max_lsize, max_buf_sz; - Params params; + AnnParams params; RNG rng; Mutex mtx; bool trained; }; -Ptr ANN_MLP::create(const ANN_MLP::Params& params) +Ptr ANN_MLP::create() { - Ptr ann = makePtr(params); - return ann; + return makePtr(); } }} diff --git a/modules/ml/src/boost.cpp b/modules/ml/src/boost.cpp index 236cd97a2d..5694ff1051 100644 --- a/modules/ml/src/boost.cpp +++ b/modules/ml/src/boost.cpp @@ -54,47 +54,32 @@ log_ratio( double val ) } -Boost::Params::Params() +BoostTreeParams::BoostTreeParams() { boostType = Boost::REAL; weakCount = 100; weightTrimRate = 0.95; - CVFolds = 0; - maxDepth = 1; } - -Boost::Params::Params( int _boostType, int _weak_count, - double _weightTrimRate, int _maxDepth, - bool _use_surrogates, const Mat& _priors ) +BoostTreeParams::BoostTreeParams( int _boostType, int _weak_count, + double _weightTrimRate) { boostType = _boostType; weakCount = _weak_count; weightTrimRate = _weightTrimRate; - CVFolds = 0; - maxDepth = _maxDepth; - useSurrogates = _use_surrogates; - priors = _priors; } - class DTreesImplForBoost : public DTreesImpl { public: - DTreesImplForBoost() {} - virtual ~DTreesImplForBoost() {} - - bool isClassifier() const { return true; } - - void setBParams(const Boost::Params& p) + DTreesImplForBoost() { - bparams = p; + params.setCVFolds(0); + params.setMaxDepth(1); } + virtual ~DTreesImplForBoost() {} - Boost::Params getBParams() const - { - return bparams; - } + bool isClassifier() const { return true; } void clear() { @@ -199,10 +184,6 @@ public: bool train( const Ptr& trainData, int flags ) { - Params dp(bparams.maxDepth, bparams.minSampleCount, bparams.regressionAccuracy, - bparams.useSurrogates, bparams.maxCategories, 0, - false, false, bparams.priors); - setDParams(dp); startTraining(trainData, flags); int treeidx, ntrees = bparams.weakCount >= 0 ? bparams.weakCount : 10000; vector sidx = w->sidx; @@ -426,12 +407,6 @@ public: void readParams( const FileNode& fn ) { DTreesImpl::readParams(fn); - bparams.maxDepth = params0.maxDepth; - bparams.minSampleCount = params0.minSampleCount; - bparams.regressionAccuracy = params0.regressionAccuracy; - bparams.useSurrogates = params0.useSurrogates; - bparams.maxCategories = params0.maxCategories; - bparams.priors = params0.priors; FileNode tparams_node = fn["training_params"]; // check for old layout @@ -465,7 +440,7 @@ public: } } - Boost::Params bparams; + BoostTreeParams bparams; vector sumResult; }; @@ -476,6 +451,20 @@ public: BoostImpl() {} virtual ~BoostImpl() {} + CV_IMPL_PROPERTY(int, BoostType, impl.bparams.boostType) + CV_IMPL_PROPERTY(int, WeakCount, impl.bparams.weakCount) + CV_IMPL_PROPERTY(double, WeightTrimRate, impl.bparams.weightTrimRate) + + CV_WRAP_SAME_PROPERTY(int, MaxCategories, impl.params) + CV_WRAP_SAME_PROPERTY(int, MaxDepth, impl.params) + CV_WRAP_SAME_PROPERTY(int, MinSampleCount, impl.params) + CV_WRAP_SAME_PROPERTY(int, CVFolds, impl.params) + CV_WRAP_SAME_PROPERTY(bool, UseSurrogates, impl.params) + CV_WRAP_SAME_PROPERTY(bool, Use1SERule, impl.params) + CV_WRAP_SAME_PROPERTY(bool, TruncatePrunedTree, impl.params) + CV_WRAP_SAME_PROPERTY(float, RegressionAccuracy, impl.params) + CV_WRAP_SAME_PROPERTY_S(cv::Mat, Priors, impl.params) + String getDefaultModelName() const { return "opencv_ml_boost"; } bool train( const Ptr& trainData, int flags ) @@ -498,9 +487,6 @@ public: impl.read(fn); } - void setBParams(const Params& p) { impl.setBParams(p); } - Params getBParams() const { return impl.getBParams(); } - int getVarCount() const { return impl.getVarCount(); } bool isTrained() const { return impl.isTrained(); } @@ -515,11 +501,9 @@ public: }; -Ptr Boost::create(const Params& params) +Ptr Boost::create() { - Ptr p = makePtr(); - p->setBParams(params); - return p; + return makePtr(); } }} diff --git a/modules/ml/src/em.cpp b/modules/ml/src/em.cpp index 351ca39fc7..c84be84b9c 100644 --- a/modules/ml/src/em.cpp +++ b/modules/ml/src/em.cpp @@ -48,37 +48,49 @@ namespace ml const double minEigenValue = DBL_EPSILON; -EM::Params::Params(int _nclusters, int _covMatType, const TermCriteria& _termCrit) -{ - nclusters = _nclusters; - covMatType = _covMatType; - termCrit = _termCrit; -} - class CV_EXPORTS EMImpl : public EM { public: - EMImpl(const Params& _params) + + int nclusters; + int covMatType; + TermCriteria termCrit; + + CV_IMPL_PROPERTY_S(TermCriteria, TermCriteria, termCrit) + + void setClustersNumber(int val) { - setParams(_params); + nclusters = val; + CV_Assert(nclusters > 1); } - virtual ~EMImpl() {} + int getClustersNumber() const + { + return nclusters; + } - void setParams(const Params& _params) + void setCovarianceMatrixType(int val) { - params = _params; - CV_Assert(params.nclusters > 1); - CV_Assert(params.covMatType == COV_MAT_SPHERICAL || - params.covMatType == COV_MAT_DIAGONAL || - params.covMatType == COV_MAT_GENERIC); + covMatType = val; + CV_Assert(covMatType == COV_MAT_SPHERICAL || + covMatType == COV_MAT_DIAGONAL || + covMatType == COV_MAT_GENERIC); } - Params getParams() const + int getCovarianceMatrixType() const { - return params; + return covMatType; } + EMImpl() + { + nclusters = DEFAULT_NCLUSTERS; + covMatType=EM::COV_MAT_DIAGONAL; + termCrit = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, EM::DEFAULT_MAX_ITERS, 1e-6); + } + + virtual ~EMImpl() {} + void clear() { trainSamples.release(); @@ -100,10 +112,10 @@ public: bool train(const Ptr& data, int) { Mat samples = data->getTrainSamples(), labels; - return train_(samples, labels, noArray(), noArray()); + return trainEM(samples, labels, noArray(), noArray()); } - bool train_(InputArray samples, + bool trainEM(InputArray samples, OutputArray logLikelihoods, OutputArray labels, OutputArray probs) @@ -157,7 +169,7 @@ public: { if( _outputs.fixedType() ) ptype = _outputs.type(); - _outputs.create(samples.rows, params.nclusters, ptype); + _outputs.create(samples.rows, nclusters, ptype); } else nsamples = std::min(nsamples, 1); @@ -193,7 +205,7 @@ public: { if( _probs.fixedType() ) ptype = _probs.type(); - _probs.create(1, params.nclusters, ptype); + _probs.create(1, nclusters, ptype); probs = _probs.getMat(); } @@ -311,7 +323,6 @@ public: const std::vector* covs0, const Mat* weights0) { - int nclusters = params.nclusters, covMatType = params.covMatType; clear(); checkTrainData(startStep, samples, nclusters, covMatType, probs0, means0, covs0, weights0); @@ -350,7 +361,6 @@ public: void decomposeCovs() { - int nclusters = params.nclusters, covMatType = params.covMatType; CV_Assert(!covs.empty()); covsEigenValues.resize(nclusters); if(covMatType == COV_MAT_GENERIC) @@ -383,7 +393,6 @@ public: void clusterTrainSamples() { - int nclusters = params.nclusters; int nsamples = trainSamples.rows; // Cluster samples, compute/update means @@ -443,7 +452,6 @@ public: void computeLogWeightDivDet() { - int nclusters = params.nclusters; CV_Assert(!covsEigenValues.empty()); Mat logWeights; @@ -458,7 +466,7 @@ public: double logDetCov = 0.; const int evalCount = static_cast(covsEigenValues[clusterIndex].total()); for(int di = 0; di < evalCount; di++) - logDetCov += std::log(covsEigenValues[clusterIndex].at(params.covMatType != COV_MAT_SPHERICAL ? di : 0)); + logDetCov += std::log(covsEigenValues[clusterIndex].at(covMatType != COV_MAT_SPHERICAL ? di : 0)); logWeightDivDet.at(clusterIndex) = logWeights.at(clusterIndex) - 0.5 * logDetCov; } @@ -466,7 +474,6 @@ public: bool doTrain(int startStep, OutputArray logLikelihoods, OutputArray labels, OutputArray probs) { - int nclusters = params.nclusters; int dim = trainSamples.cols; // Precompute the empty initial train data in the cases of START_E_STEP and START_AUTO_STEP if(startStep != START_M_STEP) @@ -488,9 +495,9 @@ public: mStep(); double trainLogLikelihood, prevTrainLogLikelihood = 0.; - int maxIters = (params.termCrit.type & TermCriteria::MAX_ITER) ? - params.termCrit.maxCount : DEFAULT_MAX_ITERS; - double epsilon = (params.termCrit.type & TermCriteria::EPS) ? params.termCrit.epsilon : 0.; + int maxIters = (termCrit.type & TermCriteria::MAX_ITER) ? + termCrit.maxCount : DEFAULT_MAX_ITERS; + double epsilon = (termCrit.type & TermCriteria::EPS) ? termCrit.epsilon : 0.; for(int iter = 0; ; iter++) { @@ -521,12 +528,12 @@ public: covs.resize(nclusters); for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++) { - if(params.covMatType == COV_MAT_SPHERICAL) + if(covMatType == COV_MAT_SPHERICAL) { covs[clusterIndex].create(dim, dim, CV_64FC1); setIdentity(covs[clusterIndex], Scalar(covsEigenValues[clusterIndex].at(0))); } - else if(params.covMatType == COV_MAT_DIAGONAL) + else if(covMatType == COV_MAT_DIAGONAL) { covs[clusterIndex] = Mat::diag(covsEigenValues[clusterIndex]); } @@ -555,7 +562,6 @@ public: // see Alex Smola's blog http://blog.smola.org/page/2 for // details on the log-sum-exp trick - int nclusters = params.nclusters, covMatType = params.covMatType; int stype = sample.type(); CV_Assert(!means.empty()); CV_Assert((stype == CV_32F || stype == CV_64F) && (ptype == CV_32F || ptype == CV_64F)); @@ -621,7 +627,7 @@ public: void eStep() { // Compute probs_ik from means_k, covs_k and weights_k. - trainProbs.create(trainSamples.rows, params.nclusters, CV_64FC1); + trainProbs.create(trainSamples.rows, nclusters, CV_64FC1); trainLabels.create(trainSamples.rows, 1, CV_32SC1); trainLogLikelihoods.create(trainSamples.rows, 1, CV_64FC1); @@ -642,8 +648,6 @@ public: void mStep() { // Update means_k, covs_k and weights_k from probs_ik - int nclusters = params.nclusters; - int covMatType = params.covMatType; int dim = trainSamples.cols; // Update weights @@ -755,12 +759,12 @@ public: void write_params(FileStorage& fs) const { - fs << "nclusters" << params.nclusters; - fs << "cov_mat_type" << (params.covMatType == COV_MAT_SPHERICAL ? String("spherical") : - params.covMatType == COV_MAT_DIAGONAL ? String("diagonal") : - params.covMatType == COV_MAT_GENERIC ? String("generic") : - format("unknown_%d", params.covMatType)); - writeTermCrit(fs, params.termCrit); + fs << "nclusters" << nclusters; + fs << "cov_mat_type" << (covMatType == COV_MAT_SPHERICAL ? String("spherical") : + covMatType == COV_MAT_DIAGONAL ? String("diagonal") : + covMatType == COV_MAT_GENERIC ? String("generic") : + format("unknown_%d", covMatType)); + writeTermCrit(fs, termCrit); } void write(FileStorage& fs) const @@ -781,15 +785,13 @@ public: void read_params(const FileNode& fn) { - Params _params; - _params.nclusters = (int)fn["nclusters"]; + nclusters = (int)fn["nclusters"]; String s = (String)fn["cov_mat_type"]; - _params.covMatType = s == "spherical" ? COV_MAT_SPHERICAL : + covMatType = s == "spherical" ? COV_MAT_SPHERICAL : s == "diagonal" ? COV_MAT_DIAGONAL : s == "generic" ? COV_MAT_GENERIC : -1; - CV_Assert(_params.covMatType >= 0); - _params.termCrit = readTermCrit(fn); - setParams(_params); + CV_Assert(covMatType >= 0); + termCrit = readTermCrit(fn); } void read(const FileNode& fn) @@ -820,8 +822,6 @@ public: std::copy(covs.begin(), covs.end(), _covs.begin()); } - Params params; - // all inner matrices have type CV_64FC1 Mat trainSamples; Mat trainProbs; @@ -838,41 +838,9 @@ public: Mat logWeightDivDet; }; - -Ptr EM::train(InputArray samples, OutputArray logLikelihoods, - OutputArray labels, OutputArray probs, - const EM::Params& params) -{ - Ptr em = makePtr(params); - if(!em->train_(samples, logLikelihoods, labels, probs)) - em.release(); - return em; -} - -Ptr EM::train_startWithE(InputArray samples, InputArray means0, - InputArray covs0, InputArray weights0, - OutputArray logLikelihoods, OutputArray labels, - OutputArray probs, const EM::Params& params) -{ - Ptr em = makePtr(params); - if(!em->trainE(samples, means0, covs0, weights0, logLikelihoods, labels, probs)) - em.release(); - return em; -} - -Ptr EM::train_startWithM(InputArray samples, InputArray probs0, - OutputArray logLikelihoods, OutputArray labels, - OutputArray probs, const EM::Params& params) -{ - Ptr em = makePtr(params); - if(!em->trainM(samples, probs0, logLikelihoods, labels, probs)) - em.release(); - return em; -} - -Ptr EM::create(const Params& params) +Ptr EM::create() { - return makePtr(params); + return makePtr(); } } diff --git a/modules/ml/src/knearest.cpp b/modules/ml/src/knearest.cpp index 4bf40758f2..70e178e6e2 100644 --- a/modules/ml/src/knearest.cpp +++ b/modules/ml/src/knearest.cpp @@ -50,46 +50,33 @@ namespace cv { namespace ml { -KNearest::Params::Params(int k, bool isclassifier_, int Emax_, int algorithmType_) : - defaultK(k), - isclassifier(isclassifier_), - Emax(Emax_), - algorithmType(algorithmType_) -{ -} +const String NAME_BRUTE_FORCE = "opencv_ml_knn"; +const String NAME_KDTREE = "opencv_ml_knn_kd"; -class KNearestImpl : public KNearest +class Impl { public: - KNearestImpl(const Params& p) - { - params = p; - } - - virtual ~KNearestImpl() {} - - Params getParams() const { return params; } - void setParams(const Params& p) { params = p; } - - bool isClassifier() const { return params.isclassifier; } - bool isTrained() const { return !samples.empty(); } - - String getDefaultModelName() const { return "opencv_ml_knn"; } - - void clear() + Impl() { - samples.release(); - responses.release(); + defaultK = 10; + isclassifier = true; + Emax = INT_MAX; } - int getVarCount() const { return samples.cols; } + virtual ~Impl() {} + virtual String getModelName() const = 0; + virtual int getType() const = 0; + virtual float findNearest( InputArray _samples, int k, + OutputArray _results, + OutputArray _neighborResponses, + OutputArray _dists ) const = 0; bool train( const Ptr& data, int flags ) { Mat new_samples = data->getTrainSamples(ROW_SAMPLE); Mat new_responses; data->getTrainResponses().convertTo(new_responses, CV_32F); - bool update = (flags & UPDATE_MODEL) != 0 && !samples.empty(); + bool update = (flags & ml::KNearest::UPDATE_MODEL) != 0 && !samples.empty(); CV_Assert( new_samples.type() == CV_32F ); @@ -106,9 +93,53 @@ public: samples.push_back(new_samples); responses.push_back(new_responses); + doTrain(samples); + return true; } + virtual void doTrain(InputArray points) { (void)points; } + + void clear() + { + samples.release(); + responses.release(); + } + + void read( const FileNode& fn ) + { + clear(); + isclassifier = (int)fn["is_classifier"] != 0; + defaultK = (int)fn["default_k"]; + + fn["samples"] >> samples; + fn["responses"] >> responses; + } + + void write( FileStorage& fs ) const + { + fs << "is_classifier" << (int)isclassifier; + fs << "default_k" << defaultK; + + fs << "samples" << samples; + fs << "responses" << responses; + } + +public: + int defaultK; + bool isclassifier; + int Emax; + + Mat samples; + Mat responses; +}; + +class BruteForceImpl : public Impl +{ +public: + String getModelName() const { return NAME_BRUTE_FORCE; } + int getType() const { return ml::KNearest::BRUTE_FORCE; } + void findNearestCore( const Mat& _samples, int k0, const Range& range, Mat* results, Mat* neighbor_responses, Mat* dists, float* presult ) const @@ -199,7 +230,7 @@ public: if( results || testidx+range.start == 0 ) { - if( !params.isclassifier || k == 1 ) + if( !isclassifier || k == 1 ) { float s = 0.f; for( j = 0; j < k; j++ ) @@ -251,7 +282,7 @@ public: struct findKNearestInvoker : public ParallelLoopBody { - findKNearestInvoker(const KNearestImpl* _p, int _k, const Mat& __samples, + findKNearestInvoker(const BruteForceImpl* _p, int _k, const Mat& __samples, Mat* __results, Mat* __neighbor_responses, Mat* __dists, float* _presult) { p = _p; @@ -273,7 +304,7 @@ public: } } - const KNearestImpl* p; + const BruteForceImpl* p; int k; const Mat* _samples; Mat* _results; @@ -324,88 +355,18 @@ public: //invoker(Range(0, testcount)); return result; } - - float predict(InputArray inputs, OutputArray outputs, int) const - { - return findNearest( inputs, params.defaultK, outputs, noArray(), noArray() ); - } - - void write( FileStorage& fs ) const - { - fs << "is_classifier" << (int)params.isclassifier; - fs << "default_k" << params.defaultK; - - fs << "samples" << samples; - fs << "responses" << responses; - } - - void read( const FileNode& fn ) - { - clear(); - params.isclassifier = (int)fn["is_classifier"] != 0; - params.defaultK = (int)fn["default_k"]; - - fn["samples"] >> samples; - fn["responses"] >> responses; - } - - Mat samples; - Mat responses; - Params params; }; -class KNearestKDTreeImpl : public KNearest +class KDTreeImpl : public Impl { public: - KNearestKDTreeImpl(const Params& p) - { - params = p; - } - - virtual ~KNearestKDTreeImpl() {} - - Params getParams() const { return params; } - void setParams(const Params& p) { params = p; } - - bool isClassifier() const { return params.isclassifier; } - bool isTrained() const { return !samples.empty(); } + String getModelName() const { return NAME_KDTREE; } + int getType() const { return ml::KNearest::KDTREE; } - String getDefaultModelName() const { return "opencv_ml_knn_kd"; } - - void clear() - { - samples.release(); - responses.release(); - } - - int getVarCount() const { return samples.cols; } - - bool train( const Ptr& data, int flags ) + void doTrain(InputArray points) { - Mat new_samples = data->getTrainSamples(ROW_SAMPLE); - Mat new_responses; - data->getTrainResponses().convertTo(new_responses, CV_32F); - bool update = (flags & UPDATE_MODEL) != 0 && !samples.empty(); - - CV_Assert( new_samples.type() == CV_32F ); - - if( !update ) - { - clear(); - } - else - { - CV_Assert( new_samples.cols == samples.cols && - new_responses.cols == responses.cols ); - } - - samples.push_back(new_samples); - responses.push_back(new_responses); - - tr.build(samples); - - return true; + tr.build(points); } float findNearest( InputArray _samples, int k, @@ -460,51 +421,97 @@ public: { _d = d.row(i); } - tr.findNearest(test_samples.row(i), k, params.Emax, _res, _nr, _d, noArray()); + tr.findNearest(test_samples.row(i), k, Emax, _res, _nr, _d, noArray()); } return result; // currently always 0 } - float predict(InputArray inputs, OutputArray outputs, int) const + KDTree tr; +}; + +//================================================================ + +class KNearestImpl : public KNearest +{ + CV_IMPL_PROPERTY(int, DefaultK, impl->defaultK) + CV_IMPL_PROPERTY(bool, IsClassifier, impl->isclassifier) + CV_IMPL_PROPERTY(int, Emax, impl->Emax) + +public: + int getAlgorithmType() const + { + return impl->getType(); + } + void setAlgorithmType(int val) { - return findNearest( inputs, params.defaultK, outputs, noArray(), noArray() ); + if (val != BRUTE_FORCE && val != KDTREE) + val = BRUTE_FORCE; + initImpl(val); } - void write( FileStorage& fs ) const +public: + KNearestImpl() + { + initImpl(BRUTE_FORCE); + } + ~KNearestImpl() { - fs << "is_classifier" << (int)params.isclassifier; - fs << "default_k" << params.defaultK; + } - fs << "samples" << samples; - fs << "responses" << responses; + bool isClassifier() const { return impl->isclassifier; } + bool isTrained() const { return !impl->samples.empty(); } + + int getVarCount() const { return impl->samples.cols; } + + void write( FileStorage& fs ) const + { + impl->write(fs); } void read( const FileNode& fn ) { - clear(); - params.isclassifier = (int)fn["is_classifier"] != 0; - params.defaultK = (int)fn["default_k"]; + int algorithmType = BRUTE_FORCE; + if (fn.name() == NAME_KDTREE) + algorithmType = KDTREE; + initImpl(algorithmType); + impl->read(fn); + } - fn["samples"] >> samples; - fn["responses"] >> responses; + float findNearest( InputArray samples, int k, + OutputArray results, + OutputArray neighborResponses=noArray(), + OutputArray dist=noArray() ) const + { + return impl->findNearest(samples, k, results, neighborResponses, dist); } - KDTree tr; + float predict(InputArray inputs, OutputArray outputs, int) const + { + return impl->findNearest( inputs, impl->defaultK, outputs, noArray(), noArray() ); + } - Mat samples; - Mat responses; - Params params; -}; + bool train( const Ptr& data, int flags ) + { + return impl->train(data, flags); + } -Ptr KNearest::create(const Params& p) -{ - if (KDTREE==p.algorithmType) + String getDefaultModelName() const { return impl->getModelName(); } + +protected: + void initImpl(int algorithmType) { - return makePtr(p); + if (algorithmType != KDTREE) + impl = makePtr(); + else + impl = makePtr(); } + Ptr impl; +}; - return makePtr(p); +Ptr KNearest::create() +{ + return makePtr(); } } diff --git a/modules/ml/src/lr.cpp b/modules/ml/src/lr.cpp index 2cff9003c4..e621009981 100644 --- a/modules/ml/src/lr.cpp +++ b/modules/ml/src/lr.cpp @@ -60,31 +60,41 @@ using namespace std; namespace cv { namespace ml { -LogisticRegression::Params::Params(double learning_rate, - int iters, - int method, - int normlization, - int reg, - int batch_size) +class LrParams { - alpha = learning_rate; - num_iters = iters; - norm = normlization; - regularized = reg; - train_method = method; - mini_batch_size = batch_size; - term_crit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, num_iters, alpha); -} +public: + LrParams() + { + alpha = 0.001; + num_iters = 1000; + norm = LogisticRegression::REG_L2; + train_method = LogisticRegression::BATCH; + mini_batch_size = 1; + term_crit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, num_iters, alpha); + } + + double alpha; //!< learning rate. + int num_iters; //!< number of iterations. + int norm; + int train_method; + int mini_batch_size; + TermCriteria term_crit; +}; class LogisticRegressionImpl : public LogisticRegression { public: - LogisticRegressionImpl(const Params& pms) - : params(pms) - { - } + + LogisticRegressionImpl() { } virtual ~LogisticRegressionImpl() {} + CV_IMPL_PROPERTY(double, LearningRate, params.alpha) + CV_IMPL_PROPERTY(int, Iterations, params.num_iters) + CV_IMPL_PROPERTY(int, Regularization, params.norm) + CV_IMPL_PROPERTY(int, TrainMethod, params.train_method) + CV_IMPL_PROPERTY(int, MiniBatchSize, params.mini_batch_size) + CV_IMPL_PROPERTY(TermCriteria, TermCriteria, params.term_crit) + virtual bool train( const Ptr& trainData, int=0 ); virtual float predict(InputArray samples, OutputArray results, int) const; virtual void clear(); @@ -103,7 +113,7 @@ protected: bool set_label_map(const Mat& _labels_i); Mat remap_labels(const Mat& _labels_i, const map& lmap) const; protected: - Params params; + LrParams params; Mat learnt_thetas; map forward_mapper; map reverse_mapper; @@ -111,9 +121,9 @@ protected: Mat labels_n; }; -Ptr LogisticRegression::create(const Params& params) +Ptr LogisticRegression::create() { - return makePtr(params); + return makePtr(); } bool LogisticRegressionImpl::train(const Ptr& trainData, int) @@ -312,7 +322,7 @@ double LogisticRegressionImpl::compute_cost(const Mat& _data, const Mat& _labels theta_b = _init_theta(Range(1, n), Range::all()); multiply(theta_b, theta_b, theta_c, 1); - if(this->params.regularized > 0) + if(params.norm != REG_NONE) { llambda = 1; } @@ -367,7 +377,7 @@ Mat LogisticRegressionImpl::compute_batch_gradient(const Mat& _data, const Mat& m = _data.rows; n = _data.cols; - if(this->params.regularized > 0) + if(params.norm != REG_NONE) { llambda = 1; } @@ -439,7 +449,7 @@ Mat LogisticRegressionImpl::compute_mini_batch_gradient(const Mat& _data, const Mat data_d; Mat labels_l; - if(this->params.regularized > 0) + if(params.norm != REG_NONE) { lambda_l = 1; } @@ -570,7 +580,6 @@ void LogisticRegressionImpl::write(FileStorage& fs) const fs<<"alpha"<params.alpha; fs<<"iterations"<params.num_iters; fs<<"norm"<params.norm; - fs<<"regularized"<params.regularized; fs<<"train_method"<params.train_method; if(this->params.train_method == LogisticRegression::MINI_BATCH) { @@ -592,7 +601,6 @@ void LogisticRegressionImpl::read(const FileNode& fn) this->params.alpha = (double)fn["alpha"]; this->params.num_iters = (int)fn["iterations"]; this->params.norm = (int)fn["norm"]; - this->params.regularized = (int)fn["regularized"]; this->params.train_method = (int)fn["train_method"]; if(this->params.train_method == LogisticRegression::MINI_BATCH) diff --git a/modules/ml/src/nbayes.cpp b/modules/ml/src/nbayes.cpp index 425e337398..9fc0d833ba 100644 --- a/modules/ml/src/nbayes.cpp +++ b/modules/ml/src/nbayes.cpp @@ -43,7 +43,6 @@ namespace cv { namespace ml { -NormalBayesClassifier::Params::Params() {} class NormalBayesClassifierImpl : public NormalBayesClassifier { @@ -53,9 +52,6 @@ public: nallvars = 0; } - void setParams(const Params&) {} - Params getParams() const { return Params(); } - bool train( const Ptr& trainData, int flags ) { const float min_variation = FLT_EPSILON; @@ -455,7 +451,7 @@ public: }; -Ptr NormalBayesClassifier::create(const Params&) +Ptr NormalBayesClassifier::create() { Ptr p = makePtr(); return p; diff --git a/modules/ml/src/precomp.hpp b/modules/ml/src/precomp.hpp index 69ff03047e..77700a05a2 100644 --- a/modules/ml/src/precomp.hpp +++ b/modules/ml/src/precomp.hpp @@ -120,6 +120,91 @@ namespace ml return termCrit; } + struct TreeParams + { + TreeParams(); + TreeParams( int maxDepth, int minSampleCount, + double regressionAccuracy, bool useSurrogates, + int maxCategories, int CVFolds, + bool use1SERule, bool truncatePrunedTree, + const Mat& priors ); + + inline void setMaxCategories(int val) + { + if( val < 2 ) + CV_Error( CV_StsOutOfRange, "max_categories should be >= 2" ); + maxCategories = std::min(val, 15 ); + } + inline void setMaxDepth(int val) + { + if( val < 0 ) + CV_Error( CV_StsOutOfRange, "max_depth should be >= 0" ); + maxDepth = std::min( val, 25 ); + } + inline void setMinSampleCount(int val) + { + minSampleCount = std::max(val, 1); + } + inline void setCVFolds(int val) + { + if( val < 0 ) + CV_Error( CV_StsOutOfRange, + "params.CVFolds should be =0 (the tree is not pruned) " + "or n>0 (tree is pruned using n-fold cross-validation)" ); + if( val == 1 ) + val = 0; + CVFolds = val; + } + inline void setRegressionAccuracy(float val) + { + if( val < 0 ) + CV_Error( CV_StsOutOfRange, "params.regression_accuracy should be >= 0" ); + regressionAccuracy = val; + } + + inline int getMaxCategories() const { return maxCategories; } + inline int getMaxDepth() const { return maxDepth; } + inline int getMinSampleCount() const { return minSampleCount; } + inline int getCVFolds() const { return CVFolds; } + inline float getRegressionAccuracy() const { return regressionAccuracy; } + + CV_IMPL_PROPERTY(bool, UseSurrogates, useSurrogates) + CV_IMPL_PROPERTY(bool, Use1SERule, use1SERule) + CV_IMPL_PROPERTY(bool, TruncatePrunedTree, truncatePrunedTree) + CV_IMPL_PROPERTY_S(cv::Mat, Priors, priors) + + public: + bool useSurrogates; + bool use1SERule; + bool truncatePrunedTree; + Mat priors; + + protected: + int maxCategories; + int maxDepth; + int minSampleCount; + int CVFolds; + float regressionAccuracy; + }; + + struct RTreeParams + { + RTreeParams(); + RTreeParams(bool calcVarImportance, int nactiveVars, TermCriteria termCrit ); + bool calcVarImportance; + int nactiveVars; + TermCriteria termCrit; + }; + + struct BoostTreeParams + { + BoostTreeParams(); + BoostTreeParams(int boostType, int weakCount, double weightTrimRate); + int boostType; + int weakCount; + double weightTrimRate; + }; + class DTreesImpl : public DTrees { public: @@ -191,6 +276,16 @@ namespace ml int maxSubsetSize; }; + CV_WRAP_SAME_PROPERTY(int, MaxCategories, params) + CV_WRAP_SAME_PROPERTY(int, MaxDepth, params) + CV_WRAP_SAME_PROPERTY(int, MinSampleCount, params) + CV_WRAP_SAME_PROPERTY(int, CVFolds, params) + CV_WRAP_SAME_PROPERTY(bool, UseSurrogates, params) + CV_WRAP_SAME_PROPERTY(bool, Use1SERule, params) + CV_WRAP_SAME_PROPERTY(bool, TruncatePrunedTree, params) + CV_WRAP_SAME_PROPERTY(float, RegressionAccuracy, params) + CV_WRAP_SAME_PROPERTY_S(cv::Mat, Priors, params) + DTreesImpl(); virtual ~DTreesImpl(); virtual void clear(); @@ -202,8 +297,7 @@ namespace ml int getCatCount(int vi) const { return catOfs[vi][1] - catOfs[vi][0]; } int getSubsetSize(int vi) const { return (getCatCount(vi) + 31)/32; } - virtual void setDParams(const Params& _params); - virtual Params getDParams() const; + virtual void setDParams(const TreeParams& _params); virtual void startTraining( const Ptr& trainData, int flags ); virtual void endTraining(); virtual void initCompVarIdx(); @@ -250,7 +344,7 @@ namespace ml virtual const std::vector& getSplits() const { return splits; } virtual const std::vector& getSubsets() const { return subsets; } - Params params0, params; + TreeParams params; vector varIdx; vector compVarIdx; diff --git a/modules/ml/src/rtrees.cpp b/modules/ml/src/rtrees.cpp index 7441faac17..f5e2b21bdb 100644 --- a/modules/ml/src/rtrees.cpp +++ b/modules/ml/src/rtrees.cpp @@ -48,21 +48,16 @@ namespace ml { ////////////////////////////////////////////////////////////////////////////////////////// // Random trees // ////////////////////////////////////////////////////////////////////////////////////////// -RTrees::Params::Params() - : DTrees::Params(5, 10, 0.f, false, 10, 0, false, false, Mat()) +RTreeParams::RTreeParams() { calcVarImportance = false; nactiveVars = 0; termCrit = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 50, 0.1); } -RTrees::Params::Params( int _maxDepth, int _minSampleCount, - double _regressionAccuracy, bool _useSurrogates, - int _maxCategories, const Mat& _priors, - bool _calcVarImportance, int _nactiveVars, - TermCriteria _termCrit ) - : DTrees::Params(_maxDepth, _minSampleCount, _regressionAccuracy, _useSurrogates, - _maxCategories, 0, false, false, _priors) +RTreeParams::RTreeParams(bool _calcVarImportance, + int _nactiveVars, + TermCriteria _termCrit ) { calcVarImportance = _calcVarImportance; nactiveVars = _nactiveVars; @@ -73,18 +68,19 @@ RTrees::Params::Params( int _maxDepth, int _minSampleCount, class DTreesImplForRTrees : public DTreesImpl { public: - DTreesImplForRTrees() {} - virtual ~DTreesImplForRTrees() {} - - void setRParams(const RTrees::Params& p) - { - rparams = p; - } - - RTrees::Params getRParams() const + DTreesImplForRTrees() { - return rparams; + params.setMaxDepth(5); + params.setMinSampleCount(10); + params.setRegressionAccuracy(0.f); + params.useSurrogates = false; + params.setMaxCategories(10); + params.setCVFolds(0); + params.use1SERule = false; + params.truncatePrunedTree = false; + params.priors = Mat(); } + virtual ~DTreesImplForRTrees() {} void clear() { @@ -129,10 +125,6 @@ public: bool train( const Ptr& trainData, int flags ) { - Params dp(rparams.maxDepth, rparams.minSampleCount, rparams.regressionAccuracy, - rparams.useSurrogates, rparams.maxCategories, rparams.CVFolds, - rparams.use1SERule, rparams.truncatePrunedTree, rparams.priors); - setDParams(dp); startTraining(trainData, flags); int treeidx, ntrees = (rparams.termCrit.type & TermCriteria::COUNT) != 0 ? rparams.termCrit.maxCount : 10000; @@ -326,12 +318,6 @@ public: void readParams( const FileNode& fn ) { DTreesImpl::readParams(fn); - rparams.maxDepth = params0.maxDepth; - rparams.minSampleCount = params0.minSampleCount; - rparams.regressionAccuracy = params0.regressionAccuracy; - rparams.useSurrogates = params0.useSurrogates; - rparams.maxCategories = params0.maxCategories; - rparams.priors = params0.priors; FileNode tparams_node = fn["training_params"]; rparams.nactiveVars = (int)tparams_node["nactive_vars"]; @@ -361,7 +347,7 @@ public: } } - RTrees::Params rparams; + RTreeParams rparams; double oobError; vector varImportance; vector allVars, activeVars; @@ -372,6 +358,20 @@ public: class RTreesImpl : public RTrees { public: + CV_IMPL_PROPERTY(bool, CalculateVarImportance, impl.rparams.calcVarImportance) + CV_IMPL_PROPERTY(int, ActiveVarCount, impl.rparams.nactiveVars) + CV_IMPL_PROPERTY_S(TermCriteria, TermCriteria, impl.rparams.termCrit) + + CV_WRAP_SAME_PROPERTY(int, MaxCategories, impl.params) + CV_WRAP_SAME_PROPERTY(int, MaxDepth, impl.params) + CV_WRAP_SAME_PROPERTY(int, MinSampleCount, impl.params) + CV_WRAP_SAME_PROPERTY(int, CVFolds, impl.params) + CV_WRAP_SAME_PROPERTY(bool, UseSurrogates, impl.params) + CV_WRAP_SAME_PROPERTY(bool, Use1SERule, impl.params) + CV_WRAP_SAME_PROPERTY(bool, TruncatePrunedTree, impl.params) + CV_WRAP_SAME_PROPERTY(float, RegressionAccuracy, impl.params) + CV_WRAP_SAME_PROPERTY_S(cv::Mat, Priors, impl.params) + RTreesImpl() {} virtual ~RTreesImpl() {} @@ -397,9 +397,6 @@ public: impl.read(fn); } - void setRParams(const Params& p) { impl.setRParams(p); } - Params getRParams() const { return impl.getRParams(); } - Mat getVarImportance() const { return Mat_(impl.varImportance, true); } int getVarCount() const { return impl.getVarCount(); } @@ -415,11 +412,9 @@ public: }; -Ptr RTrees::create(const Params& params) +Ptr RTrees::create() { - Ptr p = makePtr(); - p->setRParams(params); - return p; + return makePtr(); } }} diff --git a/modules/ml/src/svm.cpp b/modules/ml/src/svm.cpp index a0df44f78b..8bed117639 100644 --- a/modules/ml/src/svm.cpp +++ b/modules/ml/src/svm.cpp @@ -103,54 +103,60 @@ static void checkParamGrid(const ParamGrid& pg) } // SVM training parameters -SVM::Params::Params() +struct SvmParams { - svmType = SVM::C_SVC; - kernelType = SVM::RBF; - degree = 0; - gamma = 1; - coef0 = 0; - C = 1; - nu = 0; - p = 0; - termCrit = TermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, FLT_EPSILON ); -} + int svmType; + int kernelType; + double gamma; + double coef0; + double degree; + double C; + double nu; + double p; + Mat classWeights; + TermCriteria termCrit; + SvmParams() + { + svmType = SVM::C_SVC; + kernelType = SVM::RBF; + degree = 0; + gamma = 1; + coef0 = 0; + C = 1; + nu = 0; + p = 0; + termCrit = TermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, FLT_EPSILON ); + } -SVM::Params::Params( int _svmType, int _kernelType, - double _degree, double _gamma, double _coef0, - double _Con, double _nu, double _p, - const Mat& _classWeights, TermCriteria _termCrit ) -{ - svmType = _svmType; - kernelType = _kernelType; - degree = _degree; - gamma = _gamma; - coef0 = _coef0; - C = _Con; - nu = _nu; - p = _p; - classWeights = _classWeights; - termCrit = _termCrit; -} + SvmParams( int _svmType, int _kernelType, + double _degree, double _gamma, double _coef0, + double _Con, double _nu, double _p, + const Mat& _classWeights, TermCriteria _termCrit ) + { + svmType = _svmType; + kernelType = _kernelType; + degree = _degree; + gamma = _gamma; + coef0 = _coef0; + C = _Con; + nu = _nu; + p = _p; + classWeights = _classWeights; + termCrit = _termCrit; + } + +}; /////////////////////////////////////// SVM kernel /////////////////////////////////////// class SVMKernelImpl : public SVM::Kernel { public: - SVMKernelImpl() - { - } - - SVMKernelImpl( const SVM::Params& _params ) + SVMKernelImpl( const SvmParams& _params = SvmParams() ) { params = _params; } - virtual ~SVMKernelImpl() - { - } - int getType() const { return params.kernelType; @@ -327,7 +333,7 @@ public: } } - SVM::Params params; + SvmParams params; }; @@ -1185,7 +1191,7 @@ public: int cache_size; int max_cache_size; Mat samples; - SVM::Params params; + SvmParams params; vector lru_cache; int lru_first; int lru_last; @@ -1215,6 +1221,7 @@ public: SVMImpl() { clear(); + checkParams(); } ~SVMImpl() @@ -1235,32 +1242,68 @@ public: return sv; } - void setParams( const Params& _params, const Ptr& _kernel ) + CV_IMPL_PROPERTY(int, Type, params.svmType) + CV_IMPL_PROPERTY(double, Gamma, params.gamma) + CV_IMPL_PROPERTY(double, Coef0, params.coef0) + CV_IMPL_PROPERTY(double, Degree, params.degree) + CV_IMPL_PROPERTY(double, C, params.C) + CV_IMPL_PROPERTY(double, Nu, params.nu) + CV_IMPL_PROPERTY(double, P, params.p) + CV_IMPL_PROPERTY_S(cv::Mat, ClassWeights, params.classWeights) + CV_IMPL_PROPERTY_S(cv::TermCriteria, TermCriteria, params.termCrit) + + int getKernelType() const { - params = _params; + return params.kernelType; + } - int kernelType = params.kernelType; - int svmType = params.svmType; + void setKernel(int kernelType) + { + params.kernelType = kernelType; + if (kernelType != CUSTOM) + kernel = makePtr(params); + } - if( kernelType != LINEAR && kernelType != POLY && - kernelType != SIGMOID && kernelType != RBF && - kernelType != INTER && kernelType != CHI2) - CV_Error( CV_StsBadArg, "Unknown/unsupported kernel type" ); + void setCustomKernel(const Ptr &_kernel) + { + params.kernelType = CUSTOM; + kernel = _kernel; + } - if( kernelType == LINEAR ) - params.gamma = 1; - else if( params.gamma <= 0 ) - CV_Error( CV_StsOutOfRange, "gamma parameter of the kernel must be positive" ); + void checkParams() + { + int kernelType = params.kernelType; + if (kernelType != CUSTOM) + { + if( kernelType != LINEAR && kernelType != POLY && + kernelType != SIGMOID && kernelType != RBF && + kernelType != INTER && kernelType != CHI2) + CV_Error( CV_StsBadArg, "Unknown/unsupported kernel type" ); + + if( kernelType == LINEAR ) + params.gamma = 1; + else if( params.gamma <= 0 ) + CV_Error( CV_StsOutOfRange, "gamma parameter of the kernel must be positive" ); + + if( kernelType != SIGMOID && kernelType != POLY ) + params.coef0 = 0; + else if( params.coef0 < 0 ) + CV_Error( CV_StsOutOfRange, "The kernel parameter must be positive or zero" ); + + if( kernelType != POLY ) + params.degree = 0; + else if( params.degree <= 0 ) + CV_Error( CV_StsOutOfRange, "The kernel parameter must be positive" ); - if( kernelType != SIGMOID && kernelType != POLY ) - params.coef0 = 0; - else if( params.coef0 < 0 ) - CV_Error( CV_StsOutOfRange, "The kernel parameter must be positive or zero" ); + kernel = makePtr(params); + } + else + { + if (!kernel) + CV_Error( CV_StsBadArg, "Custom kernel is not set" ); + } - if( kernelType != POLY ) - params.degree = 0; - else if( params.degree <= 0 ) - CV_Error( CV_StsOutOfRange, "The kernel parameter must be positive" ); + int svmType = params.svmType; if( svmType != C_SVC && svmType != NU_SVC && svmType != ONE_CLASS && svmType != EPS_SVR && @@ -1285,28 +1328,18 @@ public: if( svmType != C_SVC ) params.classWeights.release(); - termCrit = params.termCrit; - if( !(termCrit.type & TermCriteria::EPS) ) - termCrit.epsilon = DBL_EPSILON; - termCrit.epsilon = std::max(termCrit.epsilon, DBL_EPSILON); - if( !(termCrit.type & TermCriteria::COUNT) ) - termCrit.maxCount = INT_MAX; - termCrit.maxCount = std::max(termCrit.maxCount, 1); - - if( _kernel ) - kernel = _kernel; - else - kernel = makePtr(params); + if( !(params.termCrit.type & TermCriteria::EPS) ) + params.termCrit.epsilon = DBL_EPSILON; + params.termCrit.epsilon = std::max(params.termCrit.epsilon, DBL_EPSILON); + if( !(params.termCrit.type & TermCriteria::COUNT) ) + params.termCrit.maxCount = INT_MAX; + params.termCrit.maxCount = std::max(params.termCrit.maxCount, 1); } - Params getParams() const + void setParams( const SvmParams& _params) { - return params; - } - - Ptr getKernel() const - { - return kernel; + params = _params; + checkParams(); } int getSVCount(int i) const @@ -1335,9 +1368,9 @@ public: _responses.convertTo(_yf, CV_32F); bool ok = - svmType == ONE_CLASS ? Solver::solve_one_class( _samples, params.nu, kernel, _alpha, sinfo, termCrit ) : - svmType == EPS_SVR ? Solver::solve_eps_svr( _samples, _yf, params.p, params.C, kernel, _alpha, sinfo, termCrit ) : - svmType == NU_SVR ? Solver::solve_nu_svr( _samples, _yf, params.nu, params.C, kernel, _alpha, sinfo, termCrit ) : false; + svmType == ONE_CLASS ? Solver::solve_one_class( _samples, params.nu, kernel, _alpha, sinfo, params.termCrit ) : + svmType == EPS_SVR ? Solver::solve_eps_svr( _samples, _yf, params.p, params.C, kernel, _alpha, sinfo, params.termCrit ) : + svmType == NU_SVR ? Solver::solve_nu_svr( _samples, _yf, params.nu, params.C, kernel, _alpha, sinfo, params.termCrit ) : false; if( !ok ) return false; @@ -1397,7 +1430,7 @@ public: //check that while cross-validation there were the samples from all the classes if( class_ranges[class_count] <= 0 ) CV_Error( CV_StsBadArg, "While cross-validation one or more of the classes have " - "been fell out of the sample. Try to enlarge " ); + "been fell out of the sample. Try to enlarge " ); if( svmType == NU_SVC ) { @@ -1448,10 +1481,10 @@ public: DecisionFunc df; bool ok = params.svmType == C_SVC ? Solver::solve_c_svc( temp_samples, temp_y, Cp, Cn, - kernel, _alpha, sinfo, termCrit ) : + kernel, _alpha, sinfo, params.termCrit ) : params.svmType == NU_SVC ? Solver::solve_nu_svc( temp_samples, temp_y, params.nu, - kernel, _alpha, sinfo, termCrit ) : + kernel, _alpha, sinfo, params.termCrit ) : false; if( !ok ) return false; @@ -1557,6 +1590,8 @@ public: { clear(); + checkParams(); + int svmType = params.svmType; Mat samples = data->getTrainSamples(); Mat responses; @@ -1586,6 +1621,8 @@ public: ParamGrid nu_grid, ParamGrid coef_grid, ParamGrid degree_grid, bool balanced ) { + checkParams(); + int svmType = params.svmType; RNG rng((uint64)-1); @@ -1708,7 +1745,7 @@ public: int test_sample_count = (sample_count + k_fold/2)/k_fold; int train_sample_count = sample_count - test_sample_count; - Params best_params = params; + SvmParams best_params = params; double min_error = FLT_MAX; int rtype = responses.type(); @@ -1729,7 +1766,7 @@ public: FOR_IN_GRID(degree, degree_grid) { // make sure we updated the kernel and other parameters - setParams(params, Ptr() ); + setParams(params); double error = 0; for( k = 0; k < k_fold; k++ ) @@ -1919,7 +1956,9 @@ public: kernelType == LINEAR ? "LINEAR" : kernelType == POLY ? "POLY" : kernelType == RBF ? "RBF" : - kernelType == SIGMOID ? "SIGMOID" : format("Unknown_%d", kernelType); + kernelType == SIGMOID ? "SIGMOID" : + kernelType == CHI2 ? "CHI2" : + kernelType == INTER ? "INTER" : format("Unknown_%d", kernelType); fs << "svmType" << svm_type_str; @@ -2036,7 +2075,7 @@ public: void read_params( const FileNode& fn ) { - Params _params; + SvmParams _params; // check for old naming String svm_type_str = (String)(fn["svm_type"].empty() ? fn["svmType"] : fn["svm_type"]); @@ -2059,10 +2098,12 @@ public: kernel_type_str == "LINEAR" ? LINEAR : kernel_type_str == "POLY" ? POLY : kernel_type_str == "RBF" ? RBF : - kernel_type_str == "SIGMOID" ? SIGMOID : -1; + kernel_type_str == "SIGMOID" ? SIGMOID : + kernel_type_str == "CHI2" ? CHI2 : + kernel_type_str == "INTER" ? INTER : CUSTOM; - if( kernelType < 0 ) - CV_Error( CV_StsParseError, "Missing of invalid SVM kernel type" ); + if( kernelType == CUSTOM ) + CV_Error( CV_StsParseError, "Invalid SVM kernel type (or custom kernel)" ); _params.svmType = svmType; _params.kernelType = kernelType; @@ -2086,7 +2127,7 @@ public: else _params.termCrit = TermCriteria( TermCriteria::EPS + TermCriteria::COUNT, 1000, FLT_EPSILON ); - setParams( _params, Ptr() ); + setParams( _params ); } void read( const FileNode& fn ) @@ -2154,8 +2195,7 @@ public: optimize_linear_svm(); } - Params params; - TermCriteria termCrit; + SvmParams params; Mat class_labels; int var_count; Mat sv; @@ -2167,11 +2207,9 @@ public: }; -Ptr SVM::create(const Params& params, const Ptr& kernel) +Ptr SVM::create() { - Ptr p = makePtr(); - p->setParams(params, kernel); - return p; + return makePtr(); } } diff --git a/modules/ml/src/tree.cpp b/modules/ml/src/tree.cpp index 64f66169b0..537728336d 100644 --- a/modules/ml/src/tree.cpp +++ b/modules/ml/src/tree.cpp @@ -48,18 +48,7 @@ namespace ml { using std::vector; -void DTrees::setDParams(const DTrees::Params&) -{ - CV_Error(CV_StsNotImplemented, ""); -} - -DTrees::Params DTrees::getDParams() const -{ - CV_Error(CV_StsNotImplemented, ""); - return DTrees::Params(); -} - -DTrees::Params::Params() +TreeParams::TreeParams() { maxDepth = INT_MAX; minSampleCount = 10; @@ -72,11 +61,11 @@ DTrees::Params::Params() priors = Mat(); } -DTrees::Params::Params( int _maxDepth, int _minSampleCount, - double _regressionAccuracy, bool _useSurrogates, - int _maxCategories, int _CVFolds, - bool _use1SERule, bool _truncatePrunedTree, - const Mat& _priors ) +TreeParams::TreeParams(int _maxDepth, int _minSampleCount, + double _regressionAccuracy, bool _useSurrogates, + int _maxCategories, int _CVFolds, + bool _use1SERule, bool _truncatePrunedTree, + const Mat& _priors) { maxDepth = _maxDepth; minSampleCount = _minSampleCount; @@ -248,7 +237,7 @@ const vector& DTreesImpl::getActiveVars() int DTreesImpl::addTree(const vector& sidx ) { - size_t n = (params.maxDepth > 0 ? (1 << params.maxDepth) : 1024) + w->wnodes.size(); + size_t n = (params.getMaxDepth() > 0 ? (1 << params.getMaxDepth()) : 1024) + w->wnodes.size(); w->wnodes.reserve(n); w->wsplits.reserve(n); @@ -257,7 +246,7 @@ int DTreesImpl::addTree(const vector& sidx ) w->wsplits.clear(); w->wsubsets.clear(); - int cv_n = params.CVFolds; + int cv_n = params.getCVFolds(); if( cv_n > 0 ) { @@ -347,34 +336,9 @@ int DTreesImpl::addTree(const vector& sidx ) return root; } -DTrees::Params DTreesImpl::getDParams() const -{ - return params0; -} - -void DTreesImpl::setDParams(const Params& _params) +void DTreesImpl::setDParams(const TreeParams& _params) { - params0 = params = _params; - if( params.maxCategories < 2 ) - CV_Error( CV_StsOutOfRange, "params.max_categories should be >= 2" ); - params.maxCategories = std::min( params.maxCategories, 15 ); - - if( params.maxDepth < 0 ) - CV_Error( CV_StsOutOfRange, "params.max_depth should be >= 0" ); - params.maxDepth = std::min( params.maxDepth, 25 ); - - params.minSampleCount = std::max(params.minSampleCount, 1); - - if( params.CVFolds < 0 ) - CV_Error( CV_StsOutOfRange, - "params.CVFolds should be =0 (the tree is not pruned) " - "or n>0 (tree is pruned using n-fold cross-validation)" ); - - if( params.CVFolds == 1 ) - params.CVFolds = 0; - - if( params.regressionAccuracy < 0 ) - CV_Error( CV_StsOutOfRange, "params.regression_accuracy should be >= 0" ); + params = _params; } int DTreesImpl::addNodeAndTrySplit( int parent, const vector& sidx ) @@ -385,7 +349,7 @@ int DTreesImpl::addNodeAndTrySplit( int parent, const vector& sidx ) node.parent = parent; node.depth = parent >= 0 ? w->wnodes[parent].depth + 1 : 0; - int nfolds = params.CVFolds; + int nfolds = params.getCVFolds(); if( nfolds > 0 ) { @@ -400,7 +364,7 @@ int DTreesImpl::addNodeAndTrySplit( int parent, const vector& sidx ) calcValue( nidx, sidx ); - if( n <= params.minSampleCount || node.depth >= params.maxDepth ) + if( n <= params.getMinSampleCount() || node.depth >= params.getMaxDepth() ) can_split = false; else if( _isClassifier ) { @@ -415,7 +379,7 @@ int DTreesImpl::addNodeAndTrySplit( int parent, const vector& sidx ) } else { - if( sqrt(node.node_risk) < params.regressionAccuracy ) + if( sqrt(node.node_risk) < params.getRegressionAccuracy() ) can_split = false; } @@ -493,7 +457,7 @@ int DTreesImpl::findBestSplit( const vector& _sidx ) void DTreesImpl::calcValue( int nidx, const vector& _sidx ) { WNode* node = &w->wnodes[nidx]; - int i, j, k, n = (int)_sidx.size(), cv_n = params.CVFolds; + int i, j, k, n = (int)_sidx.size(), cv_n = params.getCVFolds(); int m = (int)classLabels.size(); cv::AutoBuffer buf(std::max(m, 3)*(cv_n+1)); @@ -841,8 +805,8 @@ DTreesImpl::WSplit DTreesImpl::findSplitCatClass( int vi, const vector& _si int m = (int)classLabels.size(); int base_size = m*(3 + mi) + mi + 1; - if( m > 2 && mi > params.maxCategories ) - base_size += m*std::min(params.maxCategories, n) + mi; + if( m > 2 && mi > params.getMaxCategories() ) + base_size += m*std::min(params.getMaxCategories(), n) + mi; else base_size += mi; AutoBuffer buf(base_size + n); @@ -880,9 +844,9 @@ DTreesImpl::WSplit DTreesImpl::findSplitCatClass( int vi, const vector& _si if( m > 2 ) { - if( mi > params.maxCategories ) + if( mi > params.getMaxCategories() ) { - mi = std::min(params.maxCategories, n); + mi = std::min(params.getMaxCategories(), n); cjk = c_weights + _mi; cluster_labels = (int*)(cjk + m*mi); clusterCategories( _cjk, _mi, m, cjk, mi, cluster_labels ); @@ -1228,7 +1192,7 @@ int DTreesImpl::pruneCV( int root ) // 2. choose the best tree index (if need, apply 1SE rule). // 3. store the best index and cut the branches. - int ti, tree_count = 0, j, cv_n = params.CVFolds, n = w->wnodes[root].sample_count; + int ti, tree_count = 0, j, cv_n = params.getCVFolds(), n = w->wnodes[root].sample_count; // currently, 1SE for regression is not implemented bool use_1se = params.use1SERule != 0 && _isClassifier; double min_err = 0, min_err_se = 0; @@ -1294,7 +1258,7 @@ int DTreesImpl::pruneCV( int root ) double DTreesImpl::updateTreeRNC( int root, double T, int fold ) { - int nidx = root, pidx = -1, cv_n = params.CVFolds; + int nidx = root, pidx = -1, cv_n = params.getCVFolds(); double min_alpha = DBL_MAX; for(;;) @@ -1350,7 +1314,7 @@ double DTreesImpl::updateTreeRNC( int root, double T, int fold ) bool DTreesImpl::cutTree( int root, double T, int fold, double min_alpha ) { - int cv_n = params.CVFolds, nidx = root, pidx = -1; + int cv_n = params.getCVFolds(), nidx = root, pidx = -1; WNode* node = &w->wnodes[root]; if( node->left < 0 ) return true; @@ -1560,19 +1524,19 @@ float DTreesImpl::predict( InputArray _samples, OutputArray _results, int flags void DTreesImpl::writeTrainingParams(FileStorage& fs) const { - fs << "use_surrogates" << (params0.useSurrogates ? 1 : 0); - fs << "max_categories" << params0.maxCategories; - fs << "regression_accuracy" << params0.regressionAccuracy; + fs << "use_surrogates" << (params.useSurrogates ? 1 : 0); + fs << "max_categories" << params.getMaxCategories(); + fs << "regression_accuracy" << params.getRegressionAccuracy(); - fs << "max_depth" << params0.maxDepth; - fs << "min_sample_count" << params0.minSampleCount; - fs << "cross_validation_folds" << params0.CVFolds; + fs << "max_depth" << params.getMaxDepth(); + fs << "min_sample_count" << params.getMinSampleCount(); + fs << "cross_validation_folds" << params.getCVFolds(); - if( params0.CVFolds > 1 ) - fs << "use_1se_rule" << (params0.use1SERule ? 1 : 0); + if( params.getCVFolds() > 1 ) + fs << "use_1se_rule" << (params.use1SERule ? 1 : 0); - if( !params0.priors.empty() ) - fs << "priors" << params0.priors; + if( !params.priors.empty() ) + fs << "priors" << params.priors; } void DTreesImpl::writeParams(FileStorage& fs) const @@ -1724,18 +1688,18 @@ void DTreesImpl::readParams( const FileNode& fn ) FileNode tparams_node = fn["training_params"]; - params0 = Params(); + TreeParams params0 = TreeParams(); if( !tparams_node.empty() ) // training parameters are not necessary { params0.useSurrogates = (int)tparams_node["use_surrogates"] != 0; - params0.maxCategories = (int)(tparams_node["max_categories"].empty() ? 16 : tparams_node["max_categories"]); - params0.regressionAccuracy = (float)tparams_node["regression_accuracy"]; - params0.maxDepth = (int)tparams_node["max_depth"]; - params0.minSampleCount = (int)tparams_node["min_sample_count"]; - params0.CVFolds = (int)tparams_node["cross_validation_folds"]; + params0.setMaxCategories((int)(tparams_node["max_categories"].empty() ? 16 : tparams_node["max_categories"])); + params0.setRegressionAccuracy((float)tparams_node["regression_accuracy"]); + params0.setMaxDepth((int)tparams_node["max_depth"]); + params0.setMinSampleCount((int)tparams_node["min_sample_count"]); + params0.setCVFolds((int)tparams_node["cross_validation_folds"]); - if( params0.CVFolds > 1 ) + if( params0.getCVFolds() > 1 ) { params.use1SERule = (int)tparams_node["use_1se_rule"] != 0; } @@ -1964,11 +1928,9 @@ void DTreesImpl::read( const FileNode& fn ) readTree(fnodes); } -Ptr DTrees::create(const DTrees::Params& params) +Ptr DTrees::create() { - Ptr p = makePtr(); - p->setDParams(params); - return p; + return makePtr(); } } diff --git a/modules/ml/test/test_emknearestkmeans.cpp b/modules/ml/test/test_emknearestkmeans.cpp index 121b34d184..a079be22f2 100644 --- a/modules/ml/test/test_emknearestkmeans.cpp +++ b/modules/ml/test/test_emknearestkmeans.cpp @@ -330,7 +330,8 @@ void CV_KNearestTest::run( int /*start_from*/ ) } // KNearest KDTree implementation - Ptr knearestKdt = KNearest::create(ml::KNearest::Params(10, true, INT_MAX, ml::KNearest::KDTREE)); + Ptr knearestKdt = KNearest::create(); + knearestKdt->setAlgorithmType(KNearest::KDTREE); knearestKdt->train(trainData, ml::ROW_SAMPLE, trainLabels); knearestKdt->findNearest(testData, 4, bestLabels); if( !calcErr( bestLabels, testLabels, sizes, err, true ) ) @@ -394,16 +395,18 @@ int CV_EMTest::runCase( int caseIndex, const EM_Params& params, cv::Mat labels; float err; - Ptr em; - EM::Params emp(params.nclusters, params.covMatType, params.termCrit); + Ptr em = EM::create(); + em->setClustersNumber(params.nclusters); + em->setCovarianceMatrixType(params.covMatType); + em->setTermCriteria(params.termCrit); if( params.startStep == EM::START_AUTO_STEP ) - em = EM::train( trainData, noArray(), labels, noArray(), emp ); + em->trainEM( trainData, noArray(), labels, noArray() ); else if( params.startStep == EM::START_E_STEP ) - em = EM::train_startWithE( trainData, *params.means, *params.covs, - *params.weights, noArray(), labels, noArray(), emp ); + em->trainE( trainData, *params.means, *params.covs, + *params.weights, noArray(), labels, noArray() ); else if( params.startStep == EM::START_M_STEP ) - em = EM::train_startWithM( trainData, *params.probs, - noArray(), labels, noArray(), emp ); + em->trainM( trainData, *params.probs, + noArray(), labels, noArray() ); // check train error if( !calcErr( labels, trainLabels, sizes, err , false, false ) ) @@ -543,7 +546,9 @@ protected: Mat labels; - Ptr em = EM::train(samples, noArray(), labels, noArray(), EM::Params(nclusters)); + Ptr em = EM::create(); + em->setClustersNumber(nclusters); + em->trainEM(samples, noArray(), labels, noArray()); Mat firstResult(samples.rows, 1, CV_32SC1); for( int i = 0; i < samples.rows; i++) @@ -644,8 +649,13 @@ protected: samples1.push_back(sample); } } - Ptr model0 = EM::train(samples0, noArray(), noArray(), noArray(), EM::Params(3)); - Ptr model1 = EM::train(samples1, noArray(), noArray(), noArray(), EM::Params(3)); + Ptr model0 = EM::create(); + model0->setClustersNumber(3); + model0->trainEM(samples0, noArray(), noArray(), noArray()); + + Ptr model1 = EM::create(); + model1->setClustersNumber(3); + model1->trainEM(samples1, noArray(), noArray(), noArray()); Mat trainConfusionMat(2, 2, CV_32SC1, Scalar(0)), testConfusionMat(2, 2, CV_32SC1, Scalar(0)); diff --git a/modules/ml/test/test_lr.cpp b/modules/ml/test/test_lr.cpp index 18de0825dc..e0da01cfb9 100644 --- a/modules/ml/test/test_lr.cpp +++ b/modules/ml/test/test_lr.cpp @@ -95,16 +95,13 @@ void CV_LRTest::run( int /*start_from*/ ) string dataFileName = ts->get_data_path() + "iris.data"; Ptr tdata = TrainData::loadFromCSV(dataFileName, 0); - LogisticRegression::Params params = LogisticRegression::Params(); - params.alpha = 1.0; - params.num_iters = 10001; - params.norm = LogisticRegression::REG_L2; - params.regularized = 1; - params.train_method = LogisticRegression::BATCH; - params.mini_batch_size = 10; - // run LR classifier train classifier - Ptr p = LogisticRegression::create(params); + Ptr p = LogisticRegression::create(); + p->setLearningRate(1.0); + p->setIterations(10001); + p->setRegularization(LogisticRegression::REG_L2); + p->setTrainMethod(LogisticRegression::BATCH); + p->setMiniBatchSize(10); p->train(tdata); // predict using the same data @@ -157,20 +154,17 @@ void CV_LRTest_SaveLoad::run( int /*start_from*/ ) Mat responses1, responses2; Mat learnt_mat1, learnt_mat2; - LogisticRegression::Params params1 = LogisticRegression::Params(); - params1.alpha = 1.0; - params1.num_iters = 10001; - params1.norm = LogisticRegression::REG_L2; - params1.regularized = 1; - params1.train_method = LogisticRegression::BATCH; - params1.mini_batch_size = 10; - // train and save the classifier String filename = tempfile(".xml"); try { // run LR classifier train classifier - Ptr lr1 = LogisticRegression::create(params1); + Ptr lr1 = LogisticRegression::create(); + lr1->setLearningRate(1.0); + lr1->setIterations(10001); + lr1->setRegularization(LogisticRegression::REG_L2); + lr1->setTrainMethod(LogisticRegression::BATCH); + lr1->setMiniBatchSize(10); lr1->train(tdata); lr1->predict(tdata->getSamples(), responses1); learnt_mat1 = lr1->get_learnt_thetas(); diff --git a/modules/ml/test/test_mltests2.cpp b/modules/ml/test/test_mltests2.cpp index b7c5f46c6e..cfaf0f2491 100644 --- a/modules/ml/test/test_mltests2.cpp +++ b/modules/ml/test/test_mltests2.cpp @@ -73,30 +73,14 @@ int str_to_svm_kernel_type( String& str ) return -1; } -Ptr svm_train_auto( Ptr _data, SVM::Params _params, - int k_fold, ParamGrid C_grid, ParamGrid gamma_grid, - ParamGrid p_grid, ParamGrid nu_grid, ParamGrid coef_grid, - ParamGrid degree_grid ) -{ - Mat _train_data = _data->getSamples(); - Mat _responses = _data->getResponses(); - Mat _var_idx = _data->getVarIdx(); - Mat _sample_idx = _data->getTrainSampleIdx(); - - Ptr svm = SVM::create(_params); - if( svm->trainAuto( _data, k_fold, C_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid ) ) - return svm; - return Ptr(); -} - // 4. em // 5. ann int str_to_ann_train_method( String& str ) { if( !str.compare("BACKPROP") ) - return ANN_MLP::Params::BACKPROP; + return ANN_MLP::BACKPROP; if( !str.compare("RPROP") ) - return ANN_MLP::Params::RPROP; + return ANN_MLP::RPROP; CV_Error( CV_StsBadArg, "incorrect ann train method string" ); return -1; } @@ -343,16 +327,16 @@ int CV_MLBaseTest::train( int testCaseIdx ) String svm_type_str, kernel_type_str; modelParamsNode["svm_type"] >> svm_type_str; modelParamsNode["kernel_type"] >> kernel_type_str; - SVM::Params params; - params.svmType = str_to_svm_type( svm_type_str ); - params.kernelType = str_to_svm_kernel_type( kernel_type_str ); - modelParamsNode["degree"] >> params.degree; - modelParamsNode["gamma"] >> params.gamma; - modelParamsNode["coef0"] >> params.coef0; - modelParamsNode["C"] >> params.C; - modelParamsNode["nu"] >> params.nu; - modelParamsNode["p"] >> params.p; - model = SVM::create(params); + Ptr m = SVM::create(); + m->setType(str_to_svm_type( svm_type_str )); + m->setKernel(str_to_svm_kernel_type( kernel_type_str )); + m->setDegree(modelParamsNode["degree"]); + m->setGamma(modelParamsNode["gamma"]); + m->setCoef0(modelParamsNode["coef0"]); + m->setC(modelParamsNode["C"]); + m->setNu(modelParamsNode["nu"]); + m->setP(modelParamsNode["p"]); + model = m; } else if( modelName == CV_EM ) { @@ -371,9 +355,13 @@ int CV_MLBaseTest::train( int testCaseIdx ) data->getVarIdx(), data->getTrainSampleIdx()); int layer_sz[] = { data->getNAllVars(), 100, 100, (int)cls_map.size() }; Mat layer_sizes( 1, (int)(sizeof(layer_sz)/sizeof(layer_sz[0])), CV_32S, layer_sz ); - model = ANN_MLP::create(ANN_MLP::Params(layer_sizes, ANN_MLP::SIGMOID_SYM, 0, 0, - TermCriteria(TermCriteria::COUNT,300,0.01), - str_to_ann_train_method(train_method_str), param1, param2)); + Ptr m = ANN_MLP::create(); + m->setLayerSizes(layer_sizes); + m->setActivationFunction(ANN_MLP::SIGMOID_SYM, 0, 0); + m->setTermCriteria(TermCriteria(TermCriteria::COUNT,300,0.01)); + m->setTrainMethod(str_to_ann_train_method(train_method_str), param1, param2); + model = m; + } else if( modelName == CV_DTREE ) { @@ -386,8 +374,18 @@ int CV_MLBaseTest::train( int testCaseIdx ) modelParamsNode["max_categories"] >> MAX_CATEGORIES; modelParamsNode["cv_folds"] >> CV_FOLDS; modelParamsNode["is_pruned"] >> IS_PRUNED; - model = DTrees::create(DTrees::Params(MAX_DEPTH, MIN_SAMPLE_COUNT, REG_ACCURACY, USE_SURROGATE, - MAX_CATEGORIES, CV_FOLDS, false, IS_PRUNED, Mat() )); + + Ptr m = DTrees::create(); + m->setMaxDepth(MAX_DEPTH); + m->setMinSampleCount(MIN_SAMPLE_COUNT); + m->setRegressionAccuracy(REG_ACCURACY); + m->setUseSurrogates(USE_SURROGATE); + m->setMaxCategories(MAX_CATEGORIES); + m->setCVFolds(CV_FOLDS); + m->setUse1SERule(false); + m->setTruncatePrunedTree(IS_PRUNED); + m->setPriors(Mat()); + model = m; } else if( modelName == CV_BOOST ) { @@ -401,7 +399,15 @@ int CV_MLBaseTest::train( int testCaseIdx ) modelParamsNode["weight_trim_rate"] >> WEIGHT_TRIM_RATE; modelParamsNode["max_depth"] >> MAX_DEPTH; //modelParamsNode["use_surrogate"] >> USE_SURROGATE; - model = Boost::create( Boost::Params(BOOST_TYPE, WEAK_COUNT, WEIGHT_TRIM_RATE, MAX_DEPTH, USE_SURROGATE, Mat()) ); + + Ptr m = Boost::create(); + m->setBoostType(BOOST_TYPE); + m->setWeakCount(WEAK_COUNT); + m->setWeightTrimRate(WEIGHT_TRIM_RATE); + m->setMaxDepth(MAX_DEPTH); + m->setUseSurrogates(USE_SURROGATE); + m->setPriors(Mat()); + model = m; } else if( modelName == CV_RTREES ) { @@ -416,9 +422,18 @@ int CV_MLBaseTest::train( int testCaseIdx ) modelParamsNode["is_pruned"] >> IS_PRUNED; modelParamsNode["nactive_vars"] >> NACTIVE_VARS; modelParamsNode["max_trees_num"] >> MAX_TREES_NUM; - model = RTrees::create(RTrees::Params( MAX_DEPTH, MIN_SAMPLE_COUNT, REG_ACCURACY, - USE_SURROGATE, MAX_CATEGORIES, Mat(), true, // (calc_var_importance == true) <=> RF processes variable importance - NACTIVE_VARS, TermCriteria(TermCriteria::COUNT, MAX_TREES_NUM, OOB_EPS))); + + Ptr m = RTrees::create(); + m->setMaxDepth(MAX_DEPTH); + m->setMinSampleCount(MIN_SAMPLE_COUNT); + m->setRegressionAccuracy(REG_ACCURACY); + m->setUseSurrogates(USE_SURROGATE); + m->setMaxCategories(MAX_CATEGORIES); + m->setPriors(Mat()); + m->setCalculateVarImportance(true); + m->setActiveVarCount(NACTIVE_VARS); + m->setTermCriteria(TermCriteria(TermCriteria::COUNT, MAX_TREES_NUM, OOB_EPS)); + model = m; } if( !model.empty() ) diff --git a/modules/ml/test/test_save_load.cpp b/modules/ml/test/test_save_load.cpp index 74e8eef0df..606079b818 100644 --- a/modules/ml/test/test_save_load.cpp +++ b/modules/ml/test/test_save_load.cpp @@ -149,9 +149,8 @@ int CV_SLMLTest::validate_test_results( int testCaseIdx ) } TEST(ML_NaiveBayes, save_load) { CV_SLMLTest test( CV_NBAYES ); test.safe_run(); } -//CV_SLMLTest lsmlknearest( CV_KNEAREST, "slknearest" ); // does not support save! +TEST(ML_KNearest, save_load) { CV_SLMLTest test( CV_KNEAREST ); test.safe_run(); } TEST(ML_SVM, save_load) { CV_SLMLTest test( CV_SVM ); test.safe_run(); } -//CV_SLMLTest lsmlem( CV_EM, "slem" ); // does not support save! TEST(ML_ANN, save_load) { CV_SLMLTest test( CV_ANN ); test.safe_run(); } TEST(ML_DTree, save_load) { CV_SLMLTest test( CV_DTREE ); test.safe_run(); } TEST(ML_Boost, save_load) { CV_SLMLTest test( CV_BOOST ); test.safe_run(); } diff --git a/modules/superres/include/opencv2/superres.hpp b/modules/superres/include/opencv2/superres.hpp index 0639b10422..acc067302a 100644 --- a/modules/superres/include/opencv2/superres.hpp +++ b/modules/superres/include/opencv2/superres.hpp @@ -104,34 +104,34 @@ namespace cv */ virtual void collectGarbage(); - //! @name Scale factor + //! @brief Scale factor CV_PURE_PROPERTY(int, Scale) - //! @name Iterations count + //! @brief Iterations count CV_PURE_PROPERTY(int, Iterations) - //! @name Asymptotic value of steepest descent method + //! @brief Asymptotic value of steepest descent method CV_PURE_PROPERTY(double, Tau) - //! @name Weight parameter to balance data term and smoothness term + //! @brief Weight parameter to balance data term and smoothness term CV_PURE_PROPERTY(double, Labmda) - //! @name Parameter of spacial distribution in Bilateral-TV + //! @brief Parameter of spacial distribution in Bilateral-TV CV_PURE_PROPERTY(double, Alpha) - //! @name Kernel size of Bilateral-TV filter + //! @brief Kernel size of Bilateral-TV filter CV_PURE_PROPERTY(int, KernelSize) - //! @name Gaussian blur kernel size + //! @brief Gaussian blur kernel size CV_PURE_PROPERTY(int, BlurKernelSize) - //! @name Gaussian blur sigma + //! @brief Gaussian blur sigma CV_PURE_PROPERTY(double, BlurSigma) - //! @name Radius of the temporal search area + //! @brief Radius of the temporal search area CV_PURE_PROPERTY(int, TemporalAreaRadius) - //! @name Dense optical flow algorithm + //! @brief Dense optical flow algorithm CV_PURE_PROPERTY_S(Ptr, OpticalFlow) protected: diff --git a/modules/superres/include/opencv2/superres/optical_flow.hpp b/modules/superres/include/opencv2/superres/optical_flow.hpp index 7bc64782cb..add606c02b 100644 --- a/modules/superres/include/opencv2/superres/optical_flow.hpp +++ b/modules/superres/include/opencv2/superres/optical_flow.hpp @@ -98,17 +98,17 @@ namespace cv class CV_EXPORTS BroxOpticalFlow : public virtual DenseOpticalFlowExt { public: - //! @name Flow smoothness + //! @brief Flow smoothness CV_PURE_PROPERTY(double, Alpha) - //! @name Gradient constancy importance + //! @brief Gradient constancy importance CV_PURE_PROPERTY(double, Gamma) - //! @name Pyramid scale factor + //! @brief Pyramid scale factor CV_PURE_PROPERTY(double, ScaleFactor) - //! @name Number of lagged non-linearity iterations (inner loop) + //! @brief Number of lagged non-linearity iterations (inner loop) CV_PURE_PROPERTY(int, InnerIterations) - //! @name Number of warping iterations (number of pyramid levels) + //! @brief Number of warping iterations (number of pyramid levels) CV_PURE_PROPERTY(int, OuterIterations) - //! @name Number of linear system solver iterations + //! @brief Number of linear system solver iterations CV_PURE_PROPERTY(int, SolverIterations) }; CV_EXPORTS Ptr createOptFlow_Brox_CUDA(); diff --git a/modules/superres/src/optical_flow.cpp b/modules/superres/src/optical_flow.cpp index a08a58bd9e..df6725b72b 100644 --- a/modules/superres/src/optical_flow.cpp +++ b/modules/superres/src/optical_flow.cpp @@ -328,18 +328,6 @@ Ptr cv::superres::createOptFlow_Simple() namespace { - #define CV_WRAP_PROPERTY(type, name, internal_name, internal_obj) \ - type get##name() const \ - { \ - return internal_obj->get##internal_name(); \ - } \ - void set##name(type _name) \ - { \ - internal_obj->set##internal_name(_name); \ - } - - #define CV_WRAP_SAME_PROPERTY(type, name, internal_obj) CV_WRAP_PROPERTY(type, name, name, internal_obj) - class DualTVL1 : public CpuOpticalFlow, public virtual cv::superres::DualTVL1OpticalFlow { public: @@ -347,14 +335,14 @@ namespace void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2); void collectGarbage(); - CV_WRAP_SAME_PROPERTY(double, Tau, alg_) - CV_WRAP_SAME_PROPERTY(double, Lambda, alg_) - CV_WRAP_SAME_PROPERTY(double, Theta, alg_) - CV_WRAP_SAME_PROPERTY(int, ScalesNumber, alg_) - CV_WRAP_SAME_PROPERTY(int, WarpingsNumber, alg_) - CV_WRAP_SAME_PROPERTY(double, Epsilon, alg_) - CV_WRAP_PROPERTY(int, Iterations, OuterIterations, alg_) - CV_WRAP_SAME_PROPERTY(bool, UseInitialFlow, alg_) + CV_WRAP_SAME_PROPERTY(double, Tau, (*alg_)) + CV_WRAP_SAME_PROPERTY(double, Lambda, (*alg_)) + CV_WRAP_SAME_PROPERTY(double, Theta, (*alg_)) + CV_WRAP_SAME_PROPERTY(int, ScalesNumber, (*alg_)) + CV_WRAP_SAME_PROPERTY(int, WarpingsNumber, (*alg_)) + CV_WRAP_SAME_PROPERTY(double, Epsilon, (*alg_)) + CV_WRAP_PROPERTY(int, Iterations, OuterIterations, (*alg_)) + CV_WRAP_SAME_PROPERTY(bool, UseInitialFlow, (*alg_)) protected: void impl(InputArray input0, InputArray input1, OutputArray dst); diff --git a/modules/video/include/opencv2/video/tracking.hpp b/modules/video/include/opencv2/video/tracking.hpp index 40e9ffab88..90be72ea2d 100644 --- a/modules/video/include/opencv2/video/tracking.hpp +++ b/modules/video/include/opencv2/video/tracking.hpp @@ -440,29 +440,29 @@ Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flo class CV_EXPORTS_W DualTVL1OpticalFlow : public DenseOpticalFlow { public: - //! @name Time step of the numerical scheme + //! @brief Time step of the numerical scheme CV_PURE_PROPERTY(double, Tau) - //! @name Weight parameter for the data term, attachment parameter + //! @brief Weight parameter for the data term, attachment parameter CV_PURE_PROPERTY(double, Lambda) - //! @name Weight parameter for (u - v)^2, tightness parameter + //! @brief Weight parameter for (u - v)^2, tightness parameter CV_PURE_PROPERTY(double, Theta) - //! @name coefficient for additional illumination variation term + //! @brief coefficient for additional illumination variation term CV_PURE_PROPERTY(double, Gamma) - //! @name Number of scales used to create the pyramid of images + //! @brief Number of scales used to create the pyramid of images CV_PURE_PROPERTY(int, ScalesNumber) - //! @name Number of warpings per scale + //! @brief Number of warpings per scale CV_PURE_PROPERTY(int, WarpingsNumber) - //! @name Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time + //! @brief Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time CV_PURE_PROPERTY(double, Epsilon) - //! @name Inner iterations (between outlier filtering) used in the numerical scheme + //! @brief Inner iterations (between outlier filtering) used in the numerical scheme CV_PURE_PROPERTY(int, InnerIterations) - //! @name Outer iterations (number of inner loops) used in the numerical scheme + //! @brief Outer iterations (number of inner loops) used in the numerical scheme CV_PURE_PROPERTY(int, OuterIterations) - //! @name Use initial flow + //! @brief Use initial flow CV_PURE_PROPERTY(bool, UseInitialFlow) - //! @name Step between scales (<1) + //! @brief Step between scales (<1) CV_PURE_PROPERTY(double, ScaleStep) - //! @name Median filter kernel size (1 = no filter) (3 or 5) + //! @brief Median filter kernel size (1 = no filter) (3 or 5) CV_PURE_PROPERTY(int, MedianFiltering) }; diff --git a/samples/cpp/em.cpp b/samples/cpp/em.cpp index bb777fcc86..f5310740f4 100644 --- a/samples/cpp/em.cpp +++ b/samples/cpp/em.cpp @@ -36,9 +36,11 @@ int main( int /*argc*/, char** /*argv*/ ) samples = samples.reshape(1, 0); // cluster the data - Ptr em_model = EM::train( samples, noArray(), labels, noArray(), - EM::Params(N, EM::COV_MAT_SPHERICAL, - TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 300, 0.1))); + Ptr em_model = EM::create(); + em_model->setClustersNumber(N); + em_model->setCovarianceMatrixType(EM::COV_MAT_SPHERICAL); + em_model->setTermCriteria(TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 300, 0.1)); + em_model->trainEM( samples, noArray(), labels, noArray() ); // classify every image pixel for( i = 0; i < img.rows; i++ ) diff --git a/samples/cpp/letter_recog.cpp b/samples/cpp/letter_recog.cpp index 66a59318cc..174e7f9839 100644 --- a/samples/cpp/letter_recog.cpp +++ b/samples/cpp/letter_recog.cpp @@ -178,8 +178,23 @@ build_rtrees_classifier( const string& data_filename, { // create classifier by using and cout << "Training the classifier ...\n"; +// Params( int maxDepth, int minSampleCount, +// double regressionAccuracy, bool useSurrogates, +// int maxCategories, const Mat& priors, +// bool calcVarImportance, int nactiveVars, +// TermCriteria termCrit ); Ptr tdata = prepare_train_data(data, responses, ntrain_samples); - model = StatModel::train(tdata, RTrees::Params(10,10,0,false,15,Mat(),true,4,TC(100,0.01f))); + model = RTrees::create(); + model->setMaxDepth(10); + model->setMinSampleCount(10); + model->setRegressionAccuracy(0); + model->setUseSurrogates(false); + model->setMaxCategories(15); + model->setPriors(Mat()); + model->setCalculateVarImportance(true); + model->setActiveVarCount(4); + model->setTermCriteria(TC(100,0.01f)); + model->train(tdata); cout << endl; } @@ -269,7 +284,14 @@ build_boost_classifier( const string& data_filename, priors[1] = 26; cout << "Training the classifier (may take a few minutes)...\n"; - model = StatModel::train(tdata, Boost::Params(Boost::GENTLE, 100, 0.95, 5, false, Mat(priors) )); + model = Boost::create(); + model->setBoostType(Boost::GENTLE); + model->setWeakCount(100); + model->setWeightTrimRate(0.95); + model->setMaxDepth(5); + model->setUseSurrogates(false); + model->setPriors(Mat(priors)); + model->train(tdata); cout << endl; } @@ -374,11 +396,11 @@ build_mlp_classifier( const string& data_filename, Mat layer_sizes( 1, nlayers, CV_32S, layer_sz ); #if 1 - int method = ANN_MLP::Params::BACKPROP; + int method = ANN_MLP::BACKPROP; double method_param = 0.001; int max_iter = 300; #else - int method = ANN_MLP::Params::RPROP; + int method = ANN_MLP::RPROP; double method_param = 0.1; int max_iter = 1000; #endif @@ -386,7 +408,12 @@ build_mlp_classifier( const string& data_filename, Ptr tdata = TrainData::create(train_data, ROW_SAMPLE, train_responses); cout << "Training the classifier (may take a few minutes)...\n"; - model = StatModel::train(tdata, ANN_MLP::Params(layer_sizes, ANN_MLP::SIGMOID_SYM, 0, 0, TC(max_iter,0), method, method_param)); + model = ANN_MLP::create(); + model->setLayerSizes(layer_sizes); + model->setActivationFunction(ANN_MLP::SIGMOID_SYM, 0, 0); + model->setTermCriteria(TC(max_iter,0)); + model->setTrainMethod(method, method_param); + model->train(tdata); cout << endl; } @@ -403,7 +430,6 @@ build_knearest_classifier( const string& data_filename, int K ) if( !ok ) return ok; - Ptr model; int nsamples_all = data.rows; int ntrain_samples = (int)(nsamples_all*0.8); @@ -411,7 +437,10 @@ build_knearest_classifier( const string& data_filename, int K ) // create classifier by using and cout << "Training the classifier ...\n"; Ptr tdata = prepare_train_data(data, responses, ntrain_samples); - model = StatModel::train(tdata, KNearest::Params(K, true)); + Ptr model = KNearest::create(); + model->setDefaultK(K); + model->setIsClassifier(true); + model->train(tdata); cout << endl; test_and_save_classifier(model, data, responses, ntrain_samples, 0, string()); @@ -435,7 +464,8 @@ build_nbayes_classifier( const string& data_filename ) // create classifier by using and cout << "Training the classifier ...\n"; Ptr tdata = prepare_train_data(data, responses, ntrain_samples); - model = StatModel::train(tdata, NormalBayesClassifier::Params()); + model = NormalBayesClassifier::create(); + model->train(tdata); cout << endl; test_and_save_classifier(model, data, responses, ntrain_samples, 0, string()); @@ -471,13 +501,11 @@ build_svm_classifier( const string& data_filename, // create classifier by using and cout << "Training the classifier ...\n"; Ptr tdata = prepare_train_data(data, responses, ntrain_samples); - - SVM::Params params; - params.svmType = SVM::C_SVC; - params.kernelType = SVM::LINEAR; - params.C = 1; - - model = StatModel::train(tdata, params); + model = SVM::create(); + model->setType(SVM::C_SVC); + model->setKernel(SVM::LINEAR); + model->setC(1); + model->train(tdata); cout << endl; } diff --git a/samples/cpp/logistic_regression.cpp b/samples/cpp/logistic_regression.cpp index 1aeb42d925..b567dd2d25 100644 --- a/samples/cpp/logistic_regression.cpp +++ b/samples/cpp/logistic_regression.cpp @@ -132,20 +132,16 @@ int main() showImage(data_train, 28, "train data"); showImage(data_test, 28, "test data"); - // simple case with batch gradient - LogisticRegression::Params params = LogisticRegression::Params( - 0.001, 10, LogisticRegression::BATCH, LogisticRegression::REG_L2, 1, 1); - // simple case with mini-batch gradient - // LogisticRegression::Params params = LogisticRegression::Params( - // 0.001, 10, LogisticRegression::MINI_BATCH, LogisticRegression::REG_L2, 1, 1); - - // mini-batch gradient with higher accuracy - // LogisticRegression::Params params = LogisticRegression::Params( - // 0.000001, 10, LogisticRegression::MINI_BATCH, LogisticRegression::REG_L2, 1, 1); - cout << "training..."; - Ptr lr1 = LogisticRegression::create(params); + //! [init] + Ptr lr1 = LogisticRegression::create(); + lr1->setLearningRate(0.001); + lr1->setIterations(10); + lr1->setRegularization(LogisticRegression::REG_L2); + lr1->setTrainMethod(LogisticRegression::BATCH); + lr1->setMiniBatchSize(1); + //! [init] lr1->train(data_train, ROW_SAMPLE, labels_train); cout << "done!" << endl; diff --git a/samples/cpp/points_classifier.cpp b/samples/cpp/points_classifier.cpp index 9b274bac1a..c0270d084c 100644 --- a/samples/cpp/points_classifier.cpp +++ b/samples/cpp/points_classifier.cpp @@ -102,7 +102,7 @@ static void predict_and_paint(const Ptr& model, Mat& dst) static void find_decision_boundary_NBC() { // learn classifier - Ptr normalBayesClassifier = StatModel::train(prepare_train_data(), NormalBayesClassifier::Params()); + Ptr normalBayesClassifier = StatModel::train(prepare_train_data()); predict_and_paint(normalBayesClassifier, imgDst); } @@ -112,15 +112,29 @@ static void find_decision_boundary_NBC() #if _KNN_ static void find_decision_boundary_KNN( int K ) { - Ptr knn = StatModel::train(prepare_train_data(), KNearest::Params(K, true)); + + Ptr knn = KNearest::create(); + knn->setDefaultK(K); + knn->setIsClassifier(true); + knn->train(prepare_train_data()); predict_and_paint(knn, imgDst); } #endif #if _SVM_ -static void find_decision_boundary_SVM( SVM::Params params ) +static void find_decision_boundary_SVM( double C ) { - Ptr svm = StatModel::train(prepare_train_data(), params); + Ptr svm = SVM::create(); + svm->setType(SVM::C_SVC); + svm->setKernel(SVM::POLY); //SVM::LINEAR; + svm->setDegree(0.5); + svm->setGamma(1); + svm->setCoef0(1); + svm->setNu(0.5); + svm->setP(0); + svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, 0.01)); + svm->setC(C); + svm->train(prepare_train_data()); predict_and_paint(svm, imgDst); Mat sv = svm->getSupportVectors(); @@ -135,16 +149,14 @@ static void find_decision_boundary_SVM( SVM::Params params ) #if _DT_ static void find_decision_boundary_DT() { - DTrees::Params params; - params.maxDepth = 8; - params.minSampleCount = 2; - params.useSurrogates = false; - params.CVFolds = 0; // the number of cross-validation folds - params.use1SERule = false; - params.truncatePrunedTree = false; - - Ptr dtree = StatModel::train(prepare_train_data(), params); - + Ptr dtree = DTrees::create(); + dtree->setMaxDepth(8); + dtree->setMinSampleCount(2); + dtree->setUseSurrogates(false); + dtree->setCVFolds(0); // the number of cross-validation folds + dtree->setUse1SERule(false); + dtree->setTruncatePrunedTree(false); + dtree->train(prepare_train_data()); predict_and_paint(dtree, imgDst); } #endif @@ -152,15 +164,14 @@ static void find_decision_boundary_DT() #if _BT_ static void find_decision_boundary_BT() { - Boost::Params params( Boost::DISCRETE, // boost_type - 100, // weak_count - 0.95, // weight_trim_rate - 2, // max_depth - false, //use_surrogates - Mat() // priors - ); - - Ptr boost = StatModel::train(prepare_train_data(), params); + Ptr boost = Boost::create(); + boost->setBoostType(Boost::DISCRETE); + boost->setWeakCount(100); + boost->setWeightTrimRate(0.95); + boost->setMaxDepth(2); + boost->setUseSurrogates(false); + boost->setPriors(Mat()); + boost->train(prepare_train_data()); predict_and_paint(boost, imgDst); } @@ -185,18 +196,17 @@ static void find_decision_boundary_GBT() #if _RF_ static void find_decision_boundary_RF() { - RTrees::Params params( 4, // max_depth, - 2, // min_sample_count, - 0.f, // regression_accuracy, - false, // use_surrogates, - 16, // max_categories, - Mat(), // priors, - false, // calc_var_importance, - 1, // nactive_vars, - TermCriteria(TermCriteria::MAX_ITER, 5, 0) // max_num_of_trees_in_the_forest, - ); - - Ptr rtrees = StatModel::train(prepare_train_data(), params); + Ptr rtrees = RTrees::create(); + rtrees->setMaxDepth(4); + rtrees->setMinSampleCount(2); + rtrees->setRegressionAccuracy(0.f); + rtrees->setUseSurrogates(false); + rtrees->setMaxCategories(16); + rtrees->setPriors(Mat()); + rtrees->setCalculateVarImportance(false); + rtrees->setActiveVarCount(1); + rtrees->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 5, 0)); + rtrees->train(prepare_train_data()); predict_and_paint(rtrees, imgDst); } @@ -205,9 +215,6 @@ static void find_decision_boundary_RF() #if _ANN_ static void find_decision_boundary_ANN( const Mat& layer_sizes ) { - ANN_MLP::Params params(layer_sizes, ANN_MLP::SIGMOID_SYM, 1, 1, TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 300, FLT_EPSILON), - ANN_MLP::Params::BACKPROP, 0.001); - Mat trainClasses = Mat::zeros( (int)trainedPoints.size(), (int)classColors.size(), CV_32FC1 ); for( int i = 0; i < trainClasses.rows; i++ ) { @@ -217,7 +224,12 @@ static void find_decision_boundary_ANN( const Mat& layer_sizes ) Mat samples = prepare_train_samples(trainedPoints); Ptr tdata = TrainData::create(samples, ROW_SAMPLE, trainClasses); - Ptr ann = StatModel::train(tdata, params); + Ptr ann = ANN_MLP::create(); + ann->setLayerSizes(layer_sizes); + ann->setActivationFunction(ANN_MLP::SIGMOID_SYM, 1, 1); + ann->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 300, FLT_EPSILON)); + ann->setTrainMethod(ANN_MLP::BACKPROP, 0.001); + ann->train(tdata); predict_and_paint(ann, imgDst); } #endif @@ -247,8 +259,11 @@ static void find_decision_boundary_EM() // learn models if( !modelSamples.empty() ) { - em_models[i] = EM::train(modelSamples, noArray(), noArray(), noArray(), - EM::Params(componentCount, EM::COV_MAT_DIAGONAL)); + Ptr em = EM::create(); + em->setClustersNumber(componentCount); + em->setCovarianceMatrixType(EM::COV_MAT_DIAGONAL); + em->trainEM(modelSamples, noArray(), noArray(), noArray()); + em_models[i] = em; } } @@ -332,33 +347,20 @@ int main() imshow( "NormalBayesClassifier", imgDst ); #endif #if _KNN_ - int K = 3; - find_decision_boundary_KNN( K ); + find_decision_boundary_KNN( 3 ); imshow( "kNN", imgDst ); - K = 15; - find_decision_boundary_KNN( K ); + find_decision_boundary_KNN( 15 ); imshow( "kNN2", imgDst ); #endif #if _SVM_ //(1)-(2)separable and not sets - SVM::Params params; - params.svmType = SVM::C_SVC; - params.kernelType = SVM::POLY; //CvSVM::LINEAR; - params.degree = 0.5; - params.gamma = 1; - params.coef0 = 1; - params.C = 1; - params.nu = 0.5; - params.p = 0; - params.termCrit = TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, 0.01); - - find_decision_boundary_SVM( params ); + + find_decision_boundary_SVM( 1 ); imshow( "classificationSVM1", imgDst ); - params.C = 10; - find_decision_boundary_SVM( params ); + find_decision_boundary_SVM( 10 ); imshow( "classificationSVM2", imgDst ); #endif diff --git a/samples/cpp/train_HOG.cpp b/samples/cpp/train_HOG.cpp index dfecb0e03f..68c7f56aae 100644 --- a/samples/cpp/train_HOG.cpp +++ b/samples/cpp/train_HOG.cpp @@ -141,7 +141,7 @@ Mat get_hogdescriptor_visu(const Mat& color_origImg, vector& descriptorVa int cellSize = 8; int gradientBinSize = 9; - float radRangeForOneBin = (float)(CV_PI/(float)gradientBinSize); // dividing 180° into 9 bins, how large (in rad) is one bin? + float radRangeForOneBin = (float)(CV_PI/(float)gradientBinSize); // dividing 180 into 9 bins, how large (in rad) is one bin? // prepare data structure: 9 orientation / gradient strenghts for each cell int cells_in_x_dir = DIMX / cellSize; @@ -313,23 +313,23 @@ void compute_hog( const vector< Mat > & img_lst, vector< Mat > & gradient_lst, c void train_svm( const vector< Mat > & gradient_lst, const vector< int > & labels ) { - /* Default values to train SVM */ - SVM::Params params; - params.coef0 = 0.0; - params.degree = 3; - params.termCrit.epsilon = 1e-3; - params.gamma = 0; - params.kernelType = SVM::LINEAR; - params.nu = 0.5; - params.p = 0.1; // for EPSILON_SVR, epsilon in loss function? - params.C = 0.01; // From paper, soft classifier - params.svmType = SVM::EPS_SVR; // C_SVC; // EPSILON_SVR; // may be also NU_SVR; // do regression task Mat train_data; convert_to_ml( gradient_lst, train_data ); clog << "Start training..."; - Ptr svm = StatModel::train(train_data, ROW_SAMPLE, Mat(labels), params); + Ptr svm = SVM::create(); + /* Default values to train SVM */ + svm->setCoef0(0.0); + svm->setDegree(3); + svm->setTermCriteria(TermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, 1e-3 )); + svm->setGamma(0); + svm->setKernel(SVM::LINEAR); + svm->setNu(0.5); + svm->setP(0.1); // for EPSILON_SVR, epsilon in loss function? + svm->setC(0.01); // From paper, soft classifier + svm->setType(SVM::EPS_SVR); // C_SVC; // EPSILON_SVR; // may be also NU_SVR; // do regression task + svm->train(train_data, ROW_SAMPLE, Mat(labels)); clog << "...[done]" << endl; svm->save( "my_people_detector.yml" ); diff --git a/samples/cpp/tree_engine.cpp b/samples/cpp/tree_engine.cpp index 6defc31c50..2d6824d24d 100644 --- a/samples/cpp/tree_engine.cpp +++ b/samples/cpp/tree_engine.cpp @@ -73,18 +73,42 @@ int main(int argc, char** argv) data->setTrainTestSplitRatio(train_test_split_ratio); printf("======DTREE=====\n"); - Ptr dtree = DTrees::create(DTrees::Params( 10, 2, 0, false, 16, 0, false, false, Mat() )); + Ptr dtree = DTrees::create(); + dtree->setMaxDepth(10); + dtree->setMinSampleCount(2); + dtree->setRegressionAccuracy(0); + dtree->setUseSurrogates(false); + dtree->setMaxCategories(16); + dtree->setCVFolds(0); + dtree->setUse1SERule(false); + dtree->setTruncatePrunedTree(false); + dtree->setPriors(Mat()); train_and_print_errs(dtree, data); if( (int)data->getClassLabels().total() <= 2 ) // regression or 2-class classification problem { printf("======BOOST=====\n"); - Ptr boost = Boost::create(Boost::Params(Boost::GENTLE, 100, 0.95, 2, false, Mat())); + Ptr boost = Boost::create(); + boost->setBoostType(Boost::GENTLE); + boost->setWeakCount(100); + boost->setWeightTrimRate(0.95); + boost->setMaxDepth(2); + boost->setUseSurrogates(false); + boost->setPriors(Mat()); train_and_print_errs(boost, data); } printf("======RTREES=====\n"); - Ptr rtrees = RTrees::create(RTrees::Params(10, 2, 0, false, 16, Mat(), false, 0, TermCriteria(TermCriteria::MAX_ITER, 100, 0))); + Ptr rtrees = RTrees::create(); + rtrees->setMaxDepth(10); + rtrees->setMinSampleCount(2); + rtrees->setRegressionAccuracy(0); + rtrees->setUseSurrogates(false); + rtrees->setMaxCategories(16); + rtrees->setPriors(Mat()); + rtrees->setCalculateVarImportance(false); + rtrees->setActiveVarCount(0); + rtrees->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 0)); train_and_print_errs(rtrees, data); return 0; diff --git a/samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp b/samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp index e90882b67a..0513e367d6 100644 --- a/samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp +++ b/samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp @@ -14,23 +14,30 @@ int main(int, char**) Mat image = Mat::zeros(height, width, CV_8UC3); // Set up training data + //! [setup1] int labels[4] = {1, -1, -1, -1}; - Mat labelsMat(4, 1, CV_32SC1, labels); - float trainingData[4][2] = { {501, 10}, {255, 10}, {501, 255}, {10, 501} }; + //! [setup1] + //! [setup2] Mat trainingDataMat(4, 2, CV_32FC1, trainingData); + Mat labelsMat(4, 1, CV_32SC1, labels); + //! [setup2] - // Set up SVM's parameters - SVM::Params params; - params.svmType = SVM::C_SVC; - params.kernelType = SVM::LINEAR; - params.termCrit = TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6); // Train the SVM - Ptr svm = StatModel::train(trainingDataMat, ROW_SAMPLE, labelsMat, params); + //! [init] + Ptr svm = SVM::create(); + svm->setType(SVM::C_SVC); + svm->setKernel(SVM::LINEAR); + svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6)); + //! [init] + //! [train] + svm->train(trainingDataMat, ROW_SAMPLE, labelsMat); + //! [train] - Vec3b green(0,255,0), blue (255,0,0); // Show the decision regions given by the SVM + //! [show] + Vec3b green(0,255,0), blue (255,0,0); for (int i = 0; i < image.rows; ++i) for (int j = 0; j < image.cols; ++j) { @@ -42,16 +49,20 @@ int main(int, char**) else if (response == -1) image.at(i,j) = blue; } + //! [show] // Show the training data + //! [show_data] int thickness = -1; int lineType = 8; circle( image, Point(501, 10), 5, Scalar( 0, 0, 0), thickness, lineType ); circle( image, Point(255, 10), 5, Scalar(255, 255, 255), thickness, lineType ); circle( image, Point(501, 255), 5, Scalar(255, 255, 255), thickness, lineType ); circle( image, Point( 10, 501), 5, Scalar(255, 255, 255), thickness, lineType ); + //! [show_data] // Show support vectors + //! [show_vectors] thickness = 2; lineType = 8; Mat sv = svm->getSupportVectors(); @@ -61,6 +72,7 @@ int main(int, char**) const float* v = sv.ptr(i); circle( image, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thickness, lineType); } + //! [show_vectors] imwrite("result.png", image); // save the image diff --git a/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp b/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp index c40a17e9aa..b221ab5f18 100644 --- a/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp +++ b/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp @@ -39,6 +39,7 @@ int main() // Set up the linearly separable part of the training data int nLinearSamples = (int) (FRAC_LINEAR_SEP * NTRAINING_SAMPLES); + //! [setup1] // Generate random points for the class 1 Mat trainClass = trainData.rowRange(0, nLinearSamples); // The x coordinate of the points is in [0, 0.4) @@ -56,9 +57,10 @@ int main() // The y coordinate of the points is in [0, 1) c = trainClass.colRange(1,2); rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT)); + //! [setup1] //------------------ Set up the non-linearly separable part of the training data --------------- - + //! [setup2] // Generate random points for the classes 1 and 2 trainClass = trainData.rowRange( nLinearSamples, 2*NTRAINING_SAMPLES-nLinearSamples); // The x coordinate of the points is in [0.4, 0.6) @@ -67,24 +69,28 @@ int main() // The y coordinate of the points is in [0, 1) c = trainClass.colRange(1,2); rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT)); - + //! [setup2] //------------------------- Set up the labels for the classes --------------------------------- labels.rowRange( 0, NTRAINING_SAMPLES).setTo(1); // Class 1 labels.rowRange(NTRAINING_SAMPLES, 2*NTRAINING_SAMPLES).setTo(2); // Class 2 //------------------------ 2. Set up the support vector machines parameters -------------------- - SVM::Params params; - params.svmType = SVM::C_SVC; - params.C = 0.1; - params.kernelType = SVM::LINEAR; - params.termCrit = TermCriteria(TermCriteria::MAX_ITER, (int)1e7, 1e-6); - //------------------------ 3. Train the svm ---------------------------------------------------- cout << "Starting training process" << endl; - Ptr svm = StatModel::train(trainData, ROW_SAMPLE, labels, params); + //! [init] + Ptr svm = SVM::create(); + svm->setType(SVM::C_SVC); + svm->setC(0.1); + svm->setKernel(SVM::LINEAR); + svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, (int)1e7, 1e-6)); + //! [init] + //! [train] + svm->train(trainData, ROW_SAMPLE, labels); + //! [train] cout << "Finished training process" << endl; //------------------------ 4. Show the decision regions ---------------------------------------- + //! [show] Vec3b green(0,100,0), blue (100,0,0); for (int i = 0; i < I.rows; ++i) for (int j = 0; j < I.cols; ++j) @@ -95,8 +101,10 @@ int main() if (response == 1) I.at(j, i) = green; else if (response == 2) I.at(j, i) = blue; } + //! [show] //----------------------- 5. Show the training data -------------------------------------------- + //! [show_data] int thick = -1; int lineType = 8; float px, py; @@ -114,8 +122,10 @@ int main() py = trainData.at(i,1); circle(I, Point( (int) px, (int) py ), 3, Scalar(255, 0, 0), thick, lineType); } + //! [show_data] //------------------------- 6. Show support vectors -------------------------------------------- + //! [show_vectors] thick = 2; lineType = 8; Mat sv = svm->getSupportVectors(); @@ -125,6 +135,7 @@ int main() const float* v = sv.ptr(i); circle( I, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thick, lineType); } + //! [show_vectors] imwrite("result.png", I); // save the Image imshow("SVM for Non-Linear Training Data", I); // show it to the user From e57359a3a062d39e9cdff32255357ba2161a9027 Mon Sep 17 00:00:00 2001 From: Maksim Shabunin Date: Wed, 18 Feb 2015 18:19:36 +0300 Subject: [PATCH 3/7] Documentation: set max-width property for inserted images --- doc/stylesheet.css | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/stylesheet.css b/doc/stylesheet.css index 20b8bc329d..806e03bfd4 100644 --- a/doc/stylesheet.css +++ b/doc/stylesheet.css @@ -31,3 +31,7 @@ div.contents { span.arrow { height: 13px; } + +div.image img{ + max-width: 900px; +} From e6876fecd34fc3c8d06f86115b536d7a34ec5b48 Mon Sep 17 00:00:00 2001 From: Nisarg Thakkar Date: Tue, 17 Feb 2015 22:14:57 +0530 Subject: [PATCH 4/7] Fixed doc error in optical flow --- .../py_video/py_lucas_kanade/py_lucas_kanade.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/py_tutorials/py_video/py_lucas_kanade/py_lucas_kanade.markdown b/doc/py_tutorials/py_video/py_lucas_kanade/py_lucas_kanade.markdown index 1ea6cd69dc..48c8761c76 100644 --- a/doc/py_tutorials/py_video/py_lucas_kanade/py_lucas_kanade.markdown +++ b/doc/py_tutorials/py_video/py_lucas_kanade/py_lucas_kanade.markdown @@ -46,7 +46,7 @@ get the following equation: where: -\f[f_x = \frac{\partial f}{\partial x} \; ; \; f_y = \frac{\partial f}{\partial x}\f]\f[u = \frac{dx}{dt} \; ; \; v = \frac{dy}{dt}\f] +\f[f_x = \frac{\partial f}{\partial x} \; ; \; f_y = \frac{\partial f}{\partial y}\f]\f[u = \frac{dx}{dt} \; ; \; v = \frac{dy}{dt}\f] Above equation is called Optical Flow equation. In it, we can find \f$f_x\f$ and \f$f_y\f$, they are image gradients. Similarly \f$f_t\f$ is the gradient along time. But \f$(u,v)\f$ is unknown. We cannot solve this From 98a8045aaf07393c3b3fc0b58a3f33511b42f220 Mon Sep 17 00:00:00 2001 From: Dmitry-Me Date: Mon, 16 Feb 2015 15:39:52 +0300 Subject: [PATCH 5/7] Reduce variable scope --- modules/core/src/conjugate_gradient.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/core/src/conjugate_gradient.cpp b/modules/core/src/conjugate_gradient.cpp index caf41fc954..90353cc7fd 100644 --- a/modules/core/src/conjugate_gradient.cpp +++ b/modules/core/src/conjugate_gradient.cpp @@ -136,7 +136,6 @@ namespace cv dprintf(("d first time\n"));print_matrix(d); dprintf(("r\n"));print_matrix(r); - double beta=0; for(int count=0;count<_termcrit.maxCount;count++){ minimizeOnTheLine(_Function,proxy_x,d,minimizeOnTheLine_buf1,minimizeOnTheLine_buf2); r.copyTo(r_old); @@ -147,7 +146,7 @@ namespace cv break; } r_norm_sq=r_norm_sq*r_norm_sq; - beta=MAX(0.0,(r_norm_sq-r.dot(r_old))/r_norm_sq); + double beta=MAX(0.0,(r_norm_sq-r.dot(r_old))/r_norm_sq); d=r+beta*d; } From 453f384bd7d72506377ca60d41a738b71d71d229 Mon Sep 17 00:00:00 2001 From: theodore Date: Thu, 19 Feb 2015 17:38:44 +0100 Subject: [PATCH 6/7] adding documentation for the findnonzero() function --- modules/core/include/opencv2/core.hpp | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/modules/core/include/opencv2/core.hpp b/modules/core/include/opencv2/core.hpp index 76fb3fd520..cd9cb47206 100644 --- a/modules/core/include/opencv2/core.hpp +++ b/modules/core/include/opencv2/core.hpp @@ -545,8 +545,29 @@ The function returns the number of non-zero elements in src : */ CV_EXPORTS_W int countNonZero( InputArray src ); -/** @brief returns the list of locations of non-zero pixels -@todo document +/** @brief Returns the list of locations of non-zero pixels + +The function returns the coordinates of the location of non-zero pixels in src. +The result array can be both type of Mat or vector. For example: +@code{.cpp} + cv::Mat binaryImage; // input, binary image + cv::Mat locations; // output, locations of non-zero pixels + cv::findNonZero(binaryImage, locations); + + // access pixel coordinates + Point pnt = locations.at(i); +@endcode +or +@code{.cpp} + cv::Mat binaryImage; // input, binary image + vector locations; // output, locations of non-zero pixels + cv::findNonZero(binaryImage, locations); + + // access pixel coordinates + Point pnt = locations[i]; +@endcode +@param src single-channel array +@param idx output array with the non-zero pixel points */ CV_EXPORTS_W void findNonZero( InputArray src, OutputArray idx ); From cfccdc9b0cdfc3e856a2b81fa51d90cd05949439 Mon Sep 17 00:00:00 2001 From: theodore Date: Thu, 19 Feb 2015 17:57:52 +0100 Subject: [PATCH 7/7] documenting findnonzero() function --- modules/core/include/opencv2/core.hpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/modules/core/include/opencv2/core.hpp b/modules/core/include/opencv2/core.hpp index cd9cb47206..69baa4d1ab 100644 --- a/modules/core/include/opencv2/core.hpp +++ b/modules/core/include/opencv2/core.hpp @@ -547,8 +547,10 @@ CV_EXPORTS_W int countNonZero( InputArray src ); /** @brief Returns the list of locations of non-zero pixels -The function returns the coordinates of the location of non-zero pixels in src. -The result array can be both type of Mat or vector. For example: +Given a binary matrix (likely returned from an operation such +as threshold(), compare(), >, ==, etc, return all of +the non-zero indices as a cv::Mat or std::vector (x,y) +For example: @code{.cpp} cv::Mat binaryImage; // input, binary image cv::Mat locations; // output, locations of non-zero pixels @@ -566,8 +568,8 @@ or // access pixel coordinates Point pnt = locations[i]; @endcode -@param src single-channel array -@param idx output array with the non-zero pixel points +@param src single-channel array (type CV_8UC1) +@param idx the output array, type of cv::Mat or std::vector, corresponding to non-zero indices in the input */ CV_EXPORTS_W void findNonZero( InputArray src, OutputArray idx );