diff --git a/modules/features2d/include/opencv2/features2d.hpp b/modules/features2d/include/opencv2/features2d.hpp index d54b38ab86..4262f1c565 100644 --- a/modules/features2d/include/opencv2/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d.hpp @@ -49,8 +49,6 @@ namespace cv { -CV_EXPORTS bool initModule_features2d(void); - // //! writes vector of keypoints to the file storage // CV_EXPORTS void write(FileStorage& fs, const String& name, const std::vector& keypoints); // //! reads vector of keypoints from the specified file storage node @@ -94,12 +92,12 @@ public: /************************************ Base Classes ************************************/ /* - * Abstract base class for 2D image feature detectors. + * Abstract base class for 2D image feature detectors and descriptor extractors */ -class CV_EXPORTS_W FeatureDetector : public virtual Algorithm +class CV_EXPORTS_W Feature2D : public virtual Algorithm { public: - virtual ~FeatureDetector(); + virtual ~Feature2D(); /* * Detect keypoints in an image. @@ -108,47 +106,9 @@ public: * mask Mask specifying where to look for keypoints (optional). Must be a char * matrix with non-zero values in the region of interest. */ - CV_WRAP void detect( InputArray image, CV_OUT std::vector& keypoints, InputArray mask=noArray() ) const; - - /* - * Detect keypoints in an image set. - * images Image collection. - * keypoints Collection of keypoints detected in an input images. keypoints[i] is a set of keypoints detected in an images[i]. - * masks Masks for image set. masks[i] is a mask for images[i]. - */ - void detect( InputArrayOfArrays images, std::vector >& keypoints, InputArrayOfArrays masks=noArray() ) const; - - // Return true if detector object is empty - CV_WRAP virtual bool empty() const; - - // Create feature detector by detector name. - CV_WRAP static Ptr create( const String& detectorType ); - -protected: - virtual void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const = 0; - - /* - * Remove keypoints that are not in the mask. - * Helper function, useful when wrapping a library call for keypoint detection that - * does not support a mask argument. - */ - static void removeInvalidPoints( const Mat & mask, std::vector& keypoints ); -}; - - -/* - * Abstract base class for computing descriptors for image keypoints. - * - * In this interface we assume a keypoint descriptor can be represented as a - * dense, fixed-dimensional vector of some basic type. Most descriptors used - * in practice follow this pattern, as it makes it very easy to compute - * distances between descriptors. Therefore we represent a collection of - * descriptors as a Mat, where each row is one keypoint descriptor. - */ -class CV_EXPORTS_W DescriptorExtractor : public virtual Algorithm -{ -public: - virtual ~DescriptorExtractor(); + CV_WRAP virtual void detect( InputArray image, + CV_OUT std::vector& keypoints, + InputArray mask=noArray() ); /* * Compute the descriptors for a set of keypoints in an image. @@ -156,62 +116,26 @@ public: * keypoints The input keypoints. Keypoints for which a descriptor cannot be computed are removed. * descriptors Copmputed descriptors. Row i is the descriptor for keypoint i. */ - CV_WRAP void compute( InputArray image, CV_OUT CV_IN_OUT std::vector& keypoints, OutputArray descriptors ) const; + CV_WRAP virtual void compute( InputArray image, + CV_OUT CV_IN_OUT std::vector& keypoints, + OutputArray descriptors ); - /* - * Compute the descriptors for a keypoints collection detected in image collection. - * images Image collection. - * keypoints Input keypoints collection. keypoints[i] is keypoints detected in images[i]. - * Keypoints for which a descriptor cannot be computed are removed. - * descriptors Descriptor collection. descriptors[i] are descriptors computed for set keypoints[i]. - */ - void compute( InputArrayOfArrays images, std::vector >& keypoints, OutputArrayOfArrays descriptors ) const; + /* Detects keypoints and computes the descriptors */ + CV_WRAP virtual void detectAndCompute( InputArray image, InputArray mask, + CV_OUT std::vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints=false ); - CV_WRAP virtual int descriptorSize() const = 0; - CV_WRAP virtual int descriptorType() const = 0; - CV_WRAP virtual int defaultNorm() const = 0; + CV_WRAP virtual int descriptorSize() const; + CV_WRAP virtual int descriptorType() const; + CV_WRAP virtual int defaultNorm() const; + // Return true if detector object is empty CV_WRAP virtual bool empty() const; - - CV_WRAP static Ptr create( const String& descriptorExtractorType ); - -protected: - virtual void computeImpl( InputArray image, std::vector& keypoints, OutputArray descriptors ) const = 0; - - /* - * Remove keypoints within borderPixels of an image edge. - */ - static void removeBorderKeypoints( std::vector& keypoints, - Size imageSize, int borderSize ); }; - - -/* - * Abstract base class for simultaneous 2D feature detection descriptor extraction. - */ -class CV_EXPORTS_W Feature2D : public FeatureDetector, public DescriptorExtractor -{ -public: - /* - * Detect keypoints in an image. - * image The image. - * keypoints The detected keypoints. - * mask Mask specifying where to look for keypoints (optional). Must be a char - * matrix with non-zero values in the region of interest. - * useProvidedKeypoints If true, the method will skip the detection phase and will compute - * descriptors for the provided keypoints - */ - CV_WRAP_AS(detectAndCompute) virtual void operator()( InputArray image, InputArray mask, - CV_OUT std::vector& keypoints, - OutputArray descriptors, - bool useProvidedKeypoints=false ) const = 0; - - CV_WRAP void compute( InputArray image, CV_OUT CV_IN_OUT std::vector& keypoints, OutputArray descriptors ) const; - - // Create feature detector and descriptor extractor by name. - CV_WRAP static Ptr create( const String& name ); -}; +typedef Feature2D FeatureDetector; +typedef Feature2D DescriptorExtractor; /*! BRISK implementation @@ -219,94 +143,12 @@ public: class CV_EXPORTS_W BRISK : public Feature2D { public: - CV_WRAP explicit BRISK(int thresh=30, int octaves=3, float patternScale=1.0f); - - virtual ~BRISK(); - - // returns the descriptor size in bytes - int descriptorSize() const; - // returns the descriptor type - int descriptorType() const; - // returns the default norm type - int defaultNorm() const; - - // Compute the BRISK features on an image - void operator()(InputArray image, InputArray mask, std::vector& keypoints) const; - - // Compute the BRISK features and descriptors on an image - void operator()( InputArray image, InputArray mask, std::vector& keypoints, - OutputArray descriptors, bool useProvidedKeypoints=false ) const; - - AlgorithmInfo* info() const; - + CV_WRAP static Ptr create(int thresh=30, int octaves=3, float patternScale=1.0f); // custom setup - CV_WRAP explicit BRISK(std::vector &radiusList, std::vector &numberList, - float dMax=5.85f, float dMin=8.2f, std::vector indexChange=std::vector()); - - // call this to generate the kernel: - // circle of radius r (pixels), with n points; - // short pairings with dMax, long pairings with dMin - CV_WRAP void generateKernel(std::vector &radiusList, - std::vector &numberList, float dMax=5.85f, float dMin=8.2f, - std::vector indexChange=std::vector()); - -protected: - - void computeImpl( InputArray image, std::vector& keypoints, OutputArray descriptors ) const; - void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; - - void computeKeypointsNoOrientation(InputArray image, InputArray mask, std::vector& keypoints) const; - void computeDescriptorsAndOrOrientation(InputArray image, InputArray mask, std::vector& keypoints, - OutputArray descriptors, bool doDescriptors, bool doOrientation, - bool useProvidedKeypoints) const; - - // Feature parameters - CV_PROP_RW int threshold; - CV_PROP_RW int octaves; - - // some helper structures for the Brisk pattern representation - struct BriskPatternPoint{ - float x; // x coordinate relative to center - float y; // x coordinate relative to center - float sigma; // Gaussian smoothing sigma - }; - struct BriskShortPair{ - unsigned int i; // index of the first pattern point - unsigned int j; // index of other pattern point - }; - struct BriskLongPair{ - unsigned int i; // index of the first pattern point - unsigned int j; // index of other pattern point - int weighted_dx; // 1024.0/dx - int weighted_dy; // 1024.0/dy - }; - inline int smoothedIntensity(const cv::Mat& image, - const cv::Mat& integral,const float key_x, - const float key_y, const unsigned int scale, - const unsigned int rot, const unsigned int point) const; - // pattern properties - BriskPatternPoint* patternPoints_; //[i][rotation][scale] - unsigned int points_; // total number of collocation points - float* scaleList_; // lists the scaling per scale index [scale] - unsigned int* sizeList_; // lists the total pattern size per scale index [scale] - static const unsigned int scales_; // scales discretization - static const float scalerange_; // span of sizes 40->4 Octaves - else, this needs to be adjusted... - static const unsigned int n_rot_; // discretization of the rotation look-up - - // pairs - int strings_; // number of uchars the descriptor consists of - float dMax_; // short pair maximum distance - float dMin_; // long pair maximum distance - BriskShortPair* shortPairs_; // d<_dMax - BriskLongPair* longPairs_; // d>_dMin - unsigned int noShortPairs_; // number of shortParis - unsigned int noLongPairs_; // number of longParis - - // general - static const float basicSize_; + CV_WRAP static Ptr create(const std::vector &radiusList, const std::vector &numberList, + float dMax=5.85f, float dMin=8.2f, const std::vector indexChange=std::vector()); }; - /*! ORB implementation. */ @@ -316,44 +158,10 @@ public: // the size of the signature in bytes enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 }; - CV_WRAP explicit ORB(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31, + CV_WRAP static Ptr create(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31, int firstLevel = 0, int WTA_K=2, int scoreType=ORB::HARRIS_SCORE, int patchSize=31, int fastThreshold = 20); - - // returns the descriptor size in bytes - int descriptorSize() const; - // returns the descriptor type - int descriptorType() const; - // returns the default norm type - int defaultNorm() const; - - // Compute the ORB features and descriptors on an image - void operator()(InputArray image, InputArray mask, std::vector& keypoints) const; - - // Compute the ORB features and descriptors on an image - void operator()( InputArray image, InputArray mask, std::vector& keypoints, - OutputArray descriptors, bool useProvidedKeypoints=false ) const; - - AlgorithmInfo* info() const; - -protected: - - void computeImpl( InputArray image, std::vector& keypoints, OutputArray descriptors ) const; - void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; - - CV_PROP_RW int nfeatures; - CV_PROP_RW double scaleFactor; - CV_PROP_RW int nlevels; - CV_PROP_RW int edgeThreshold; - CV_PROP_RW int firstLevel; - CV_PROP_RW int WTA_K; - CV_PROP_RW int scoreType; - CV_PROP_RW int patchSize; - CV_PROP_RW int fastThreshold; }; -typedef ORB OrbFeatureDetector; -typedef ORB OrbDescriptorExtractor; - /*! Maximal Stable Extremal Regions class. @@ -363,36 +171,19 @@ typedef ORB OrbDescriptorExtractor; It returns the regions, each of those is encoded as a contour. */ -class CV_EXPORTS_W MSER : public FeatureDetector +class CV_EXPORTS_W MSER : public Feature2D { public: //! the full constructor - CV_WRAP explicit MSER( int _delta=5, int _min_area=60, int _max_area=14400, + CV_WRAP static Ptr create( int _delta=5, int _min_area=60, int _max_area=14400, double _max_variation=0.25, double _min_diversity=.2, int _max_evolution=200, double _area_threshold=1.01, double _min_margin=0.003, int _edge_blur_size=5 ); - //! the operator that extracts the MSERs from the image or the specific part of it - CV_WRAP_AS(detect) void operator()( InputArray image, CV_OUT std::vector >& msers, - InputArray mask=noArray() ) const; - AlgorithmInfo* info() const; - -protected: - void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; - - int delta; - int minArea; - int maxArea; - double maxVariation; - double minDiversity; - int maxEvolution; - double areaThreshold; - double minMargin; - int edgeBlurSize; + CV_WRAP virtual int detectAndLabel( InputArray image, OutputArray label, + OutputArray stats=noArray() ) const = 0; }; -typedef MSER MserFeatureDetector; - //! detects corners using FAST algorithm by E. Rosten CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector& keypoints, int threshold, bool nonmaxSuppression=true ); @@ -400,48 +191,27 @@ CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector& keypoints, CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector& keypoints, int threshold, bool nonmaxSuppression, int type ); -class CV_EXPORTS_W FastFeatureDetector : public FeatureDetector +class CV_EXPORTS_W FastFeatureDetector : public Feature2D { public: enum Type { - TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2 + TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2 }; - CV_WRAP FastFeatureDetector( int threshold=10, bool nonmaxSuppression=true); - CV_WRAP FastFeatureDetector( int threshold, bool nonmaxSuppression, int type); - AlgorithmInfo* info() const; - -protected: - virtual void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; - - int threshold; - bool nonmaxSuppression; - int type; + CV_WRAP static Ptr create( int threshold=10, bool nonmaxSuppression=true, int type=TYPE_9_16 ); }; -class CV_EXPORTS_W GFTTDetector : public FeatureDetector +class CV_EXPORTS_W GFTTDetector : public Feature2D { public: - CV_WRAP GFTTDetector( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1, - int blockSize=3, bool useHarrisDetector=false, double k=0.04 ); - AlgorithmInfo* info() const; - -protected: - virtual void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; - - int nfeatures; - double qualityLevel; - double minDistance; - int blockSize; - bool useHarrisDetector; - double k; + CV_WRAP static Ptr create( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1, + int blockSize=3, bool useHarrisDetector=false, double k=0.04 ); }; -typedef GFTTDetector GoodFeaturesToTrackDetector; -class CV_EXPORTS_W SimpleBlobDetector : public FeatureDetector +class CV_EXPORTS_W SimpleBlobDetector : public Feature2D { public: struct CV_EXPORTS_W_SIMPLE Params @@ -472,81 +242,29 @@ public: void write( FileStorage& fs ) const; }; - CV_WRAP SimpleBlobDetector(const SimpleBlobDetector::Params ¶meters = SimpleBlobDetector::Params()); - - virtual void read( const FileNode& fn ); - virtual void write( FileStorage& fs ) const; - -protected: - struct CV_EXPORTS Center - { - Point2d location; - double radius; - double confidence; - }; - - virtual void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; - virtual void findBlobs(InputArray image, InputArray binaryImage, std::vector
¢ers) const; - - Params params; - AlgorithmInfo* info() const; + CV_WRAP static Ptr + create(const SimpleBlobDetector::Params ¶meters = SimpleBlobDetector::Params()); }; -// KAZE/AKAZE diffusivity -enum { - DIFF_PM_G1 = 0, - DIFF_PM_G2 = 1, - DIFF_WEICKERT = 2, - DIFF_CHARBONNIER = 3 -}; - -// AKAZE descriptor type -enum { - DESCRIPTOR_KAZE_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation - DESCRIPTOR_KAZE = 3, - DESCRIPTOR_MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation - DESCRIPTOR_MLDB = 5 -}; - /*! KAZE implementation */ class CV_EXPORTS_W KAZE : public Feature2D { public: - CV_WRAP KAZE(); - CV_WRAP explicit KAZE(bool extended, bool upright, float threshold = 0.001f, - int octaves = 4, int sublevels = 4, int diffusivity = DIFF_PM_G2); - - virtual ~KAZE(); - - // returns the descriptor size in bytes - int descriptorSize() const; - // returns the descriptor type - int descriptorType() const; - // returns the default norm type - int defaultNorm() const; - - AlgorithmInfo* info() const; - - // Compute the KAZE features on an image - void operator()(InputArray image, InputArray mask, std::vector& keypoints) const; - - // Compute the KAZE features and descriptors on an image - void operator()(InputArray image, InputArray mask, std::vector& keypoints, - OutputArray descriptors, bool useProvidedKeypoints = false) const; + enum + { + DIFF_PM_G1 = 0, + DIFF_PM_G2 = 1, + DIFF_WEICKERT = 2, + DIFF_CHARBONNIER = 3 + }; -protected: - void detectImpl(InputArray image, std::vector& keypoints, InputArray mask) const; - void computeImpl(InputArray image, std::vector& keypoints, OutputArray descriptors) const; - - CV_PROP bool extended; - CV_PROP bool upright; - CV_PROP float threshold; - CV_PROP int octaves; - CV_PROP int sublevels; - CV_PROP int diffusivity; + CV_WRAP static Ptr create(bool extended=false, bool upright=false, + float threshold = 0.001f, + int octaves = 4, int sublevels = 4, + int diffusivity = KAZE::DIFF_PM_G2); }; /*! @@ -555,41 +273,21 @@ AKAZE implementation class CV_EXPORTS_W AKAZE : public Feature2D { public: - CV_WRAP AKAZE(); - CV_WRAP explicit AKAZE(int descriptor_type, int descriptor_size = 0, int descriptor_channels = 3, - float threshold = 0.001f, int octaves = 4, int sublevels = 4, int diffusivity = DIFF_PM_G2); - - virtual ~AKAZE(); - - // returns the descriptor size in bytes - int descriptorSize() const; - // returns the descriptor type - int descriptorType() const; - // returns the default norm type - int defaultNorm() const; - - // Compute the AKAZE features on an image - void operator()(InputArray image, InputArray mask, std::vector& keypoints) const; - - // Compute the AKAZE features and descriptors on an image - void operator()(InputArray image, InputArray mask, std::vector& keypoints, - OutputArray descriptors, bool useProvidedKeypoints = false) const; - - AlgorithmInfo* info() const; - -protected: - - void computeImpl(InputArray image, std::vector& keypoints, OutputArray descriptors) const; - void detectImpl(InputArray image, std::vector& keypoints, InputArray mask = noArray()) const; + // AKAZE descriptor type + enum + { + DESCRIPTOR_KAZE_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation + DESCRIPTOR_KAZE = 3, + DESCRIPTOR_MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation + DESCRIPTOR_MLDB = 5 + }; - CV_PROP int descriptor; - CV_PROP int descriptor_channels; - CV_PROP int descriptor_size; - CV_PROP float threshold; - CV_PROP int octaves; - CV_PROP int sublevels; - CV_PROP int diffusivity; + CV_WRAP static Ptr create(int descriptor_type=DESCRIPTOR_MLDB, + int descriptor_size = 0, int descriptor_channels = 3, + float threshold = 0.001f, int octaves = 4, + int sublevels = 4, int diffusivity = KAZE::DIFF_PM_G2); }; + /****************************************************************************************\ * Distance * \****************************************************************************************/ diff --git a/modules/features2d/src/blobdetector.cpp b/modules/features2d/src/blobdetector.cpp index 69e058555d..b9511e9acb 100644 --- a/modules/features2d/src/blobdetector.cpp +++ b/modules/features2d/src/blobdetector.cpp @@ -55,7 +55,32 @@ # endif #endif -using namespace cv; +namespace cv +{ + +class CV_EXPORTS_W SimpleBlobDetectorImpl : public SimpleBlobDetector +{ +public: + + explicit SimpleBlobDetectorImpl(const SimpleBlobDetector::Params ¶meters = SimpleBlobDetector::Params()); + + virtual void read( const FileNode& fn ); + virtual void write( FileStorage& fs ) const; + +protected: + struct CV_EXPORTS Center + { + Point2d location; + double radius; + double confidence; + }; + + virtual void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; + virtual void findBlobs(InputArray image, InputArray binaryImage, std::vector
¢ers) const; + + Params params; + AlgorithmInfo* info() const; +}; /* * SimpleBlobDetector diff --git a/modules/features2d/src/brisk.cpp b/modules/features2d/src/brisk.cpp index eb832d6403..3163de70e0 100644 --- a/modules/features2d/src/brisk.cpp +++ b/modules/features2d/src/brisk.cpp @@ -53,6 +53,79 @@ namespace cv { + +class BRISK_Impl : public BRISK +{ +public: + explicit BRISK_Impl(int thresh=30, int octaves=3, float patternScale=1.0f); + // custom setup + explicit BRISK_Impl(const std::vector &radiusList, const std::vector &numberList, + float dMax=5.85f, float dMin=8.2f, const std::vector indexChange=std::vector()); + + // call this to generate the kernel: + // circle of radius r (pixels), with n points; + // short pairings with dMax, long pairings with dMin + void generateKernel(std::vector &radiusList, + std::vector &numberList, float dMax=5.85f, float dMin=8.2f, + std::vector indexChange=std::vector()); + +protected: + + void computeImpl( InputArray image, std::vector& keypoints, OutputArray descriptors ) const; + void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; + + void computeKeypointsNoOrientation(InputArray image, InputArray mask, std::vector& keypoints) const; + void computeDescriptorsAndOrOrientation(InputArray image, InputArray mask, std::vector& keypoints, + OutputArray descriptors, bool doDescriptors, bool doOrientation, + bool useProvidedKeypoints) const; + + // Feature parameters + CV_PROP_RW int threshold; + CV_PROP_RW int octaves; + + // some helper structures for the Brisk pattern representation + struct BriskPatternPoint{ + float x; // x coordinate relative to center + float y; // x coordinate relative to center + float sigma; // Gaussian smoothing sigma + }; + struct BriskShortPair{ + unsigned int i; // index of the first pattern point + unsigned int j; // index of other pattern point + }; + struct BriskLongPair{ + unsigned int i; // index of the first pattern point + unsigned int j; // index of other pattern point + int weighted_dx; // 1024.0/dx + int weighted_dy; // 1024.0/dy + }; + inline int smoothedIntensity(const cv::Mat& image, + const cv::Mat& integral,const float key_x, + const float key_y, const unsigned int scale, + const unsigned int rot, const unsigned int point) const; + // pattern properties + BriskPatternPoint* patternPoints_; //[i][rotation][scale] + unsigned int points_; // total number of collocation points + float* scaleList_; // lists the scaling per scale index [scale] + unsigned int* sizeList_; // lists the total pattern size per scale index [scale] + static const unsigned int scales_; // scales discretization + static const float scalerange_; // span of sizes 40->4 Octaves - else, this needs to be adjusted... + static const unsigned int n_rot_; // discretization of the rotation look-up + + // pairs + int strings_; // number of uchars the descriptor consists of + float dMax_; // short pair maximum distance + float dMin_; // long pair maximum distance + BriskShortPair* shortPairs_; // d<_dMax + BriskLongPair* longPairs_; // d>_dMin + unsigned int noShortPairs_; // number of shortParis + unsigned int noLongPairs_; // number of longParis + + // general + static const float basicSize_; +}; + + // a layer in the Brisk detector pyramid class CV_EXPORTS BriskLayer { diff --git a/modules/features2d/src/descriptors.cpp b/modules/features2d/src/descriptors.cpp deleted file mode 100644 index 23d9fbbc9b..0000000000 --- a/modules/features2d/src/descriptors.cpp +++ /dev/null @@ -1,110 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" -#include - -namespace cv -{ - -/****************************************************************************************\ -* DescriptorExtractor * -\****************************************************************************************/ -/* - * DescriptorExtractor - */ -DescriptorExtractor::~DescriptorExtractor() -{} - -void DescriptorExtractor::compute( InputArray image, std::vector& keypoints, OutputArray descriptors ) const -{ - if( image.empty() || keypoints.empty() ) - { - descriptors.release(); - return; - } - - KeyPointsFilter::runByImageBorder( keypoints, image.size(), 0 ); - KeyPointsFilter::runByKeypointSize( keypoints, std::numeric_limits::epsilon() ); - - computeImpl( image, keypoints, descriptors ); -} - -void DescriptorExtractor::compute( InputArrayOfArrays _imageCollection, std::vector >& pointCollection, OutputArrayOfArrays _descCollection ) const -{ - std::vector imageCollection, descCollection; - _imageCollection.getMatVector(imageCollection); - _descCollection.getMatVector(descCollection); - CV_Assert( imageCollection.size() == pointCollection.size() ); - descCollection.resize( imageCollection.size() ); - for( size_t i = 0; i < imageCollection.size(); i++ ) - compute( imageCollection[i], pointCollection[i], descCollection[i] ); -} - -/*void DescriptorExtractor::read( const FileNode& ) -{} - -void DescriptorExtractor::write( FileStorage& ) const -{}*/ - -bool DescriptorExtractor::empty() const -{ - return false; -} - -void DescriptorExtractor::removeBorderKeypoints( std::vector& keypoints, - Size imageSize, int borderSize ) -{ - KeyPointsFilter::runByImageBorder( keypoints, imageSize, borderSize ); -} - -Ptr DescriptorExtractor::create(const String& descriptorExtractorType) -{ - return Algorithm::create("Feature2D." + descriptorExtractorType); -} - - -CV_WRAP void Feature2D::compute( InputArray image, CV_OUT CV_IN_OUT std::vector& keypoints, OutputArray descriptors ) const -{ - DescriptorExtractor::compute(image, keypoints, descriptors); -} - -} diff --git a/modules/features2d/src/detectors.cpp b/modules/features2d/src/detectors.cpp index 866d24d106..899e2f274a 100644 --- a/modules/features2d/src/detectors.cpp +++ b/modules/features2d/src/detectors.cpp @@ -44,118 +44,65 @@ namespace cv { -/* - * FeatureDetector - */ - -FeatureDetector::~FeatureDetector() -{} - -void FeatureDetector::detect( InputArray image, std::vector& keypoints, InputArray mask ) const +class GFTTDetector_Impl : public GFTTDetector { - keypoints.clear(); - - if( image.empty() ) - return; - - CV_Assert( mask.empty() || (mask.type() == CV_8UC1 && mask.size() == image.size()) ); - - detectImpl( image, keypoints, mask ); -} - -void FeatureDetector::detect(InputArrayOfArrays _imageCollection, std::vector >& pointCollection, - InputArrayOfArrays _masks ) const -{ - if (_imageCollection.isUMatVector()) +public: + GFTTDetector_Impl( int _nfeatures, double _qualityLevel, + double _minDistance, int _blockSize, + bool _useHarrisDetector, double _k ) + : nfeatures(_nfeatures), qualityLevel(_qualityLevel), minDistance(_minDistance), + blockSize(_blockSize), useHarrisDetector(_useHarrisDetector), k(_k) { - std::vector uimageCollection, umasks; - _imageCollection.getUMatVector(uimageCollection); - _masks.getUMatVector(umasks); - - pointCollection.resize( uimageCollection.size() ); - for( size_t i = 0; i < uimageCollection.size(); i++ ) - detect( uimageCollection[i], pointCollection[i], umasks.empty() ? noArray() : umasks[i] ); - - return; } - std::vector imageCollection, masks; - _imageCollection.getMatVector(imageCollection); - _masks.getMatVector(masks); - - pointCollection.resize( imageCollection.size() ); - for( size_t i = 0; i < imageCollection.size(); i++ ) - detect( imageCollection[i], pointCollection[i], masks.empty() ? noArray() : masks[i] ); -} - -/*void FeatureDetector::read( const FileNode& ) -{} - -void FeatureDetector::write( FileStorage& ) const -{}*/ - -bool FeatureDetector::empty() const -{ - return false; -} - -void FeatureDetector::removeInvalidPoints( const Mat& mask, std::vector& keypoints ) -{ - KeyPointsFilter::runByPixelsMask( keypoints, mask ); -} - -Ptr FeatureDetector::create( const String& detectorType ) -{ - if( detectorType.compare( "HARRIS" ) == 0 ) + void detect( InputArray _image, std::vector& keypoints, InputArray _mask ) { - Ptr fd = FeatureDetector::create("GFTT"); - fd->set("useHarrisDetector", true); - return fd; - } - - return Algorithm::create("Feature2D." + detectorType); -} - - -GFTTDetector::GFTTDetector( int _nfeatures, double _qualityLevel, - double _minDistance, int _blockSize, - bool _useHarrisDetector, double _k ) - : nfeatures(_nfeatures), qualityLevel(_qualityLevel), minDistance(_minDistance), - blockSize(_blockSize), useHarrisDetector(_useHarrisDetector), k(_k) -{ -} + std::vector corners; + + if (_image.isUMat()) + { + UMat ugrayImage; + if( _image.type() != CV_8U ) + cvtColor( _image, ugrayImage, COLOR_BGR2GRAY ); + else + ugrayImage = _image.getUMat(); + + goodFeaturesToTrack( ugrayImage, corners, nfeatures, qualityLevel, minDistance, _mask, + blockSize, useHarrisDetector, k ); + } + else + { + Mat image = _image.getMat(), grayImage = image; + if( image.type() != CV_8U ) + cvtColor( image, grayImage, COLOR_BGR2GRAY ); -void GFTTDetector::detectImpl( InputArray _image, std::vector& keypoints, InputArray _mask) const -{ - std::vector corners; + goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, _mask, + blockSize, useHarrisDetector, k ); + } - if (_image.isUMat()) - { - UMat ugrayImage; - if( _image.type() != CV_8U ) - cvtColor( _image, ugrayImage, COLOR_BGR2GRAY ); - else - ugrayImage = _image.getUMat(); + keypoints.resize(corners.size()); + std::vector::const_iterator corner_it = corners.begin(); + std::vector::iterator keypoint_it = keypoints.begin(); + for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it ) + *keypoint_it = KeyPoint( *corner_it, (float)blockSize ); - goodFeaturesToTrack( ugrayImage, corners, nfeatures, qualityLevel, minDistance, _mask, - blockSize, useHarrisDetector, k ); } - else - { - Mat image = _image.getMat(), grayImage = image; - if( image.type() != CV_8U ) - cvtColor( image, grayImage, COLOR_BGR2GRAY ); - goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, _mask, - blockSize, useHarrisDetector, k ); - } + int nfeatures; + double qualityLevel; + double minDistance; + int blockSize; + bool useHarrisDetector; + double k; +}; - keypoints.resize(corners.size()); - std::vector::const_iterator corner_it = corners.begin(); - std::vector::iterator keypoint_it = keypoints.begin(); - for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it ) - *keypoint_it = KeyPoint( *corner_it, (float)blockSize ); +Ptr GFTTDetector::create( int _nfeatures, double _qualityLevel, + double _minDistance, int _blockSize, + bool _useHarrisDetector, double _k ) +{ + return makePtr(_nfeatures, _qualityLevel, + _minDistance, _blockSize, _useHarrisDetector, _k); } } diff --git a/modules/features2d/src/fast.cpp b/modules/features2d/src/fast.cpp index 22ec0318d1..bfb9246041 100644 --- a/modules/features2d/src/fast.cpp +++ b/modules/features2d/src/fast.cpp @@ -359,30 +359,39 @@ void FAST(InputArray _img, std::vector& keypoints, int threshold, bool { FAST(_img, keypoints, threshold, nonmax_suppression, FastFeatureDetector::TYPE_9_16); } -/* - * FastFeatureDetector - */ -FastFeatureDetector::FastFeatureDetector( int _threshold, bool _nonmaxSuppression ) - : threshold(_threshold), nonmaxSuppression(_nonmaxSuppression), type(FastFeatureDetector::TYPE_9_16) -{} -FastFeatureDetector::FastFeatureDetector( int _threshold, bool _nonmaxSuppression, int _type ) -: threshold(_threshold), nonmaxSuppression(_nonmaxSuppression), type((short)_type) -{} -void FastFeatureDetector::detectImpl( InputArray _image, std::vector& keypoints, InputArray _mask ) const +class FastFeatureDetector_Impl : public FastFeatureDetector { - Mat mask = _mask.getMat(), grayImage; - UMat ugrayImage; - _InputArray gray = _image; - if( _image.type() != CV_8U ) +public: + FastFeatureDetector_Impl( int _threshold, bool _nonmaxSuppression, int _type ) + : threshold(_threshold), nonmaxSuppression(_nonmaxSuppression), type((short)_type) + {} + + void detect( InputArray _image, std::vector& keypoints, InputArray _mask ) { - _OutputArray ogray = _image.isUMat() ? _OutputArray(ugrayImage) : _OutputArray(grayImage); - cvtColor( _image, ogray, COLOR_BGR2GRAY ); - gray = ogray; + Mat mask = _mask.getMat(), grayImage; + UMat ugrayImage; + _InputArray gray = _image; + if( _image.type() != CV_8U ) + { + _OutputArray ogray = _image.isUMat() ? _OutputArray(ugrayImage) : _OutputArray(grayImage); + cvtColor( _image, ogray, COLOR_BGR2GRAY ); + gray = ogray; + } + FAST( gray, keypoints, threshold, nonmaxSuppression, type ); + KeyPointsFilter::runByPixelsMask( keypoints, mask ); } - FAST( gray, keypoints, threshold, nonmaxSuppression, type ); - KeyPointsFilter::runByPixelsMask( keypoints, mask ); + + int threshold; + bool nonmaxSuppression; + int type; +}; + +Ptr FastFeatureDetector::create( int threshold, bool nonmaxSuppression, int type ) +{ + return makePtr(threshold, nonmaxSuppression, type); } + } diff --git a/modules/features2d/src/features2d_init.cpp b/modules/features2d/src/features2d_init.cpp index 31ca564e49..b23a8899b1 100644 --- a/modules/features2d/src/features2d_init.cpp +++ b/modules/features2d/src/features2d_init.cpp @@ -42,6 +42,8 @@ #include "precomp.hpp" +#if 0 + using namespace cv; Ptr Feature2D::create( const String& feature2DType ) @@ -193,3 +195,5 @@ bool cv::initModule_features2d(void) return all; } + +#endif diff --git a/modules/features2d/src/kaze.cpp b/modules/features2d/src/kaze.cpp index e4ddbe4451..d16e988f6f 100644 --- a/modules/features2d/src/kaze.cpp +++ b/modules/features2d/src/kaze.cpp @@ -52,153 +52,93 @@ http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla12eccv.pd namespace cv { - KAZE::KAZE() - : extended(false) - , upright(false) - , threshold(0.001f) - , octaves(4) - , sublevels(4) - , diffusivity(DIFF_PM_G2) - { - } - KAZE::KAZE(bool _extended, bool _upright, float _threshold, int _octaves, - int _sublevels, int _diffusivity) + class KAZE_Impl : public KAZE + { + public: + KAZE_Impl(bool _extended, bool _upright, float _threshold, int _octaves, + int _sublevels, int _diffusivity) : extended(_extended) , upright(_upright) , threshold(_threshold) , octaves(_octaves) , sublevels(_sublevels) , diffusivity(_diffusivity) - { - - } - KAZE::~KAZE() - { - - } - - // returns the descriptor size in bytes - int KAZE::descriptorSize() const - { - return extended ? 128 : 64; - } - - // returns the descriptor type - int KAZE::descriptorType() const - { - return CV_32F; - } - - // returns the default norm type - int KAZE::defaultNorm() const - { - return NORM_L2; - } - - void KAZE::operator()(InputArray image, InputArray mask, std::vector& keypoints) const - { - detectImpl(image, keypoints, mask); - } - - void KAZE::operator()(InputArray image, InputArray mask, - std::vector& keypoints, - OutputArray descriptors, - bool useProvidedKeypoints) const - { - cv::Mat img = image.getMat(); - if (img.type() != CV_8UC1) - cvtColor(image, img, COLOR_BGR2GRAY); - - Mat img1_32; - img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); - - cv::Mat& desc = descriptors.getMatRef(); - - KAZEOptions options; - options.img_width = img.cols; - options.img_height = img.rows; - options.extended = extended; - options.upright = upright; - options.dthreshold = threshold; - options.omax = octaves; - options.nsublevels = sublevels; - options.diffusivity = diffusivity; + { + } - KAZEFeatures impl(options); - impl.Create_Nonlinear_Scale_Space(img1_32); + virtual ~KAZE_Impl() {} - if (!useProvidedKeypoints) + // returns the descriptor size in bytes + int descriptorSize() const { - impl.Feature_Detection(keypoints); + return extended ? 128 : 64; } - if (!mask.empty()) + // returns the descriptor type + int descriptorType() const { - cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat()); + return CV_32F; } - impl.Feature_Description(keypoints, desc); - - CV_Assert((!desc.rows || desc.cols == descriptorSize())); - CV_Assert((!desc.rows || (desc.type() == descriptorType()))); - } + // returns the default norm type + int defaultNorm() const + { + return NORM_L2; + } - void KAZE::detectImpl(InputArray image, std::vector& keypoints, InputArray mask) const - { - Mat img = image.getMat(); - if (img.type() != CV_8UC1) - cvtColor(image, img, COLOR_BGR2GRAY); - - Mat img1_32; - img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); - - KAZEOptions options; - options.img_width = img.cols; - options.img_height = img.rows; - options.extended = extended; - options.upright = upright; - options.dthreshold = threshold; - options.omax = octaves; - options.nsublevels = sublevels; - options.diffusivity = diffusivity; - - KAZEFeatures impl(options); - impl.Create_Nonlinear_Scale_Space(img1_32); - impl.Feature_Detection(keypoints); - - if (!mask.empty()) + void detectAndCompute(InputArray image, InputArray mask, + std::vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints) { - cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat()); + cv::Mat img = image.getMat(); + if (img.type() != CV_8UC1) + cvtColor(image, img, COLOR_BGR2GRAY); + + Mat img1_32; + img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); + + KAZEOptions options; + options.img_width = img.cols; + options.img_height = img.rows; + options.extended = extended; + options.upright = upright; + options.dthreshold = threshold; + options.omax = octaves; + options.nsublevels = sublevels; + options.diffusivity = diffusivity; + + KAZEFeatures impl(options); + impl.Create_Nonlinear_Scale_Space(img1_32); + + if (!useProvidedKeypoints) + { + impl.Feature_Detection(keypoints); + } + + if (!mask.empty()) + { + cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat()); + } + + if( descriptors.needed() ) + { + Mat& desc = descriptors.getMatRef(); + impl.Feature_Description(keypoints, desc); + + CV_Assert((!desc.rows || desc.cols == descriptorSize())); + CV_Assert((!desc.rows || (desc.type() == descriptorType()))); + } } - } - void KAZE::computeImpl(InputArray image, std::vector& keypoints, OutputArray descriptors) const - { - cv::Mat img = image.getMat(); - if (img.type() != CV_8UC1) - cvtColor(image, img, COLOR_BGR2GRAY); - - Mat img1_32; - img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); - - cv::Mat& desc = descriptors.getMatRef(); - - KAZEOptions options; - options.img_width = img.cols; - options.img_height = img.rows; - options.extended = extended; - options.upright = upright; - options.dthreshold = threshold; - options.omax = octaves; - options.nsublevels = sublevels; - options.diffusivity = diffusivity; - - KAZEFeatures impl(options); - impl.Create_Nonlinear_Scale_Space(img1_32); - impl.Feature_Description(keypoints, desc); - - CV_Assert((!desc.rows || desc.cols == descriptorSize())); - CV_Assert((!desc.rows || (desc.type() == descriptorType()))); - } + bool extended; + bool upright; + float threshold; + int octaves; + int sublevels; + int diffusivity; + }; + + } diff --git a/modules/features2d/src/kaze/AKAZEConfig.h b/modules/features2d/src/kaze/AKAZEConfig.h index e2ba51c531..2ea21f3702 100644 --- a/modules/features2d/src/kaze/AKAZEConfig.h +++ b/modules/features2d/src/kaze/AKAZEConfig.h @@ -8,23 +8,8 @@ #ifndef __OPENCV_FEATURES_2D_AKAZE_CONFIG_H__ #define __OPENCV_FEATURES_2D_AKAZE_CONFIG_H__ -/* ************************************************************************* */ -// OpenCV -#include "../precomp.hpp" -#include - -/* ************************************************************************* */ -/// Lookup table for 2d gaussian (sigma = 2.5) where (0,0) is top left and (6,6) is bottom right -const float gauss25[7][7] = { - { 0.02546481f, 0.02350698f, 0.01849125f, 0.01239505f, 0.00708017f, 0.00344629f, 0.00142946f }, - { 0.02350698f, 0.02169968f, 0.01706957f, 0.01144208f, 0.00653582f, 0.00318132f, 0.00131956f }, - { 0.01849125f, 0.01706957f, 0.01342740f, 0.00900066f, 0.00514126f, 0.00250252f, 0.00103800f }, - { 0.01239505f, 0.01144208f, 0.00900066f, 0.00603332f, 0.00344629f, 0.00167749f, 0.00069579f }, - { 0.00708017f, 0.00653582f, 0.00514126f, 0.00344629f, 0.00196855f, 0.00095820f, 0.00039744f }, - { 0.00344629f, 0.00318132f, 0.00250252f, 0.00167749f, 0.00095820f, 0.00046640f, 0.00019346f }, - { 0.00142946f, 0.00131956f, 0.00103800f, 0.00069579f, 0.00039744f, 0.00019346f, 0.00008024f } -}; - +namespace cv +{ /* ************************************************************************* */ /// AKAZE configuration options structure struct AKAZEOptions { @@ -75,4 +60,6 @@ struct AKAZEOptions { int kcontrast_nbins; ///< Number of bins for the contrast factor histogram }; +} + #endif diff --git a/modules/features2d/src/kaze/AKAZEFeatures.cpp b/modules/features2d/src/kaze/AKAZEFeatures.cpp index 72569dad92..59e260fc4d 100644 --- a/modules/features2d/src/kaze/AKAZEFeatures.cpp +++ b/modules/features2d/src/kaze/AKAZEFeatures.cpp @@ -6,6 +6,7 @@ * @author Pablo F. Alcantarilla, Jesus Nuevo */ +#include "../precomp.hpp" #include "AKAZEFeatures.h" #include "fed.h" #include "nldiffusion_functions.h" @@ -14,9 +15,9 @@ #include // Namespaces +namespace cv +{ using namespace std; -using namespace cv; -using namespace cv::details::kaze; /* ************************************************************************* */ /** @@ -29,7 +30,7 @@ AKAZEFeatures::AKAZEFeatures(const AKAZEOptions& options) : options_(options) { ncycles_ = 0; reordering_ = true; - if (options_.descriptor_size > 0 && options_.descriptor >= cv::DESCRIPTOR_MLDB_UPRIGHT) { + if (options_.descriptor_size > 0 && options_.descriptor >= AKAZE::DESCRIPTOR_MLDB_UPRIGHT) { generateDescriptorSubsample(descriptorSamples_, descriptorBits_, options_.descriptor_size, options_.descriptor_pattern_size, options_.descriptor_channels); } @@ -264,10 +265,10 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) vector kpts_aux; // Set maximum size - if (options_.descriptor == cv::DESCRIPTOR_MLDB_UPRIGHT || options_.descriptor == cv::DESCRIPTOR_MLDB) { + if (options_.descriptor == AKAZE::DESCRIPTOR_MLDB_UPRIGHT || options_.descriptor == AKAZE::DESCRIPTOR_MLDB) { smax = 10.0f*sqrtf(2.0f); } - else if (options_.descriptor == cv::DESCRIPTOR_KAZE_UPRIGHT || options_.descriptor == cv::DESCRIPTOR_KAZE) { + else if (options_.descriptor == AKAZE::DESCRIPTOR_KAZE_UPRIGHT || options_.descriptor == AKAZE::DESCRIPTOR_KAZE) { smax = 12.0f*sqrtf(2.0f); } @@ -712,7 +713,7 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat } // Allocate memory for the matrix with the descriptors - if (options_.descriptor < cv::DESCRIPTOR_MLDB_UPRIGHT) { + if (options_.descriptor < AKAZE::DESCRIPTOR_MLDB_UPRIGHT) { desc = cv::Mat::zeros((int)kpts.size(), 64, CV_32FC1); } else { @@ -729,17 +730,17 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat switch (options_.descriptor) { - case cv::DESCRIPTOR_KAZE_UPRIGHT: // Upright descriptors, not invariant to rotation + case AKAZE::DESCRIPTOR_KAZE_UPRIGHT: // Upright descriptors, not invariant to rotation { cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Upright_Descriptor_64_Invoker(kpts, desc, evolution_)); } break; - case cv::DESCRIPTOR_KAZE: + case AKAZE::DESCRIPTOR_KAZE: { cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Descriptor_64_Invoker(kpts, desc, evolution_)); } break; - case cv::DESCRIPTOR_MLDB_UPRIGHT: // Upright descriptors, not invariant to rotation + case AKAZE::DESCRIPTOR_MLDB_UPRIGHT: // Upright descriptors, not invariant to rotation { if (options_.descriptor_size == 0) cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); @@ -747,7 +748,7 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_)); } break; - case cv::DESCRIPTOR_MLDB: + case AKAZE::DESCRIPTOR_MLDB: { if (options_.descriptor_size == 0) cv::parallel_for_(cv::Range(0, (int)kpts.size()), MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); @@ -765,7 +766,20 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat * @note The orientation is computed using a similar approach as described in the * original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006 */ -void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector& evolution_) { +void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector& evolution_) +{ + /* ************************************************************************* */ + /// Lookup table for 2d gaussian (sigma = 2.5) where (0,0) is top left and (6,6) is bottom right + static const float gauss25[7][7] = + { + { 0.02546481f, 0.02350698f, 0.01849125f, 0.01239505f, 0.00708017f, 0.00344629f, 0.00142946f }, + { 0.02350698f, 0.02169968f, 0.01706957f, 0.01144208f, 0.00653582f, 0.00318132f, 0.00131956f }, + { 0.01849125f, 0.01706957f, 0.01342740f, 0.00900066f, 0.00514126f, 0.00250252f, 0.00103800f }, + { 0.01239505f, 0.01144208f, 0.00900066f, 0.00603332f, 0.00344629f, 0.00167749f, 0.00069579f }, + { 0.00708017f, 0.00653582f, 0.00514126f, 0.00344629f, 0.00196855f, 0.00095820f, 0.00039744f }, + { 0.00344629f, 0.00318132f, 0.00250252f, 0.00167749f, 0.00095820f, 0.00046640f, 0.00019346f }, + { 0.00142946f, 0.00131956f, 0.00103800f, 0.00069579f, 0.00039744f, 0.00019346f, 0.00008024f } + }; int ix = 0, iy = 0, idx = 0, s = 0, level = 0; float xf = 0.0, yf = 0.0, gweight = 0.0, ratio = 0.0; @@ -1702,3 +1716,6 @@ void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int sampleList = samples.rowRange(0, count).clone(); comparisons = comps.rowRange(0, nbits).clone(); } + +} +} diff --git a/modules/features2d/src/kaze/KAZEConfig.h b/modules/features2d/src/kaze/KAZEConfig.h index 546ee36579..5f2f8dcecd 100644 --- a/modules/features2d/src/kaze/KAZEConfig.h +++ b/modules/features2d/src/kaze/KAZEConfig.h @@ -12,12 +12,14 @@ #include "../precomp.hpp" #include +namespace cv +{ //************************************************************************************* struct KAZEOptions { KAZEOptions() - : diffusivity(cv::DIFF_PM_G2) + : diffusivity(KAZE::DIFF_PM_G2) , soffset(1.60f) , omax(4) @@ -49,4 +51,6 @@ struct KAZEOptions { bool extended; }; +} + #endif diff --git a/modules/features2d/src/kaze/KAZEFeatures.h b/modules/features2d/src/kaze/KAZEFeatures.h index b62f94831e..98c8307888 100644 --- a/modules/features2d/src/kaze/KAZEFeatures.h +++ b/modules/features2d/src/kaze/KAZEFeatures.h @@ -17,43 +17,48 @@ #include "fed.h" #include "TEvolution.h" +namespace cv +{ + /* ************************************************************************* */ // KAZE Class Declaration class KAZEFeatures { private: - /// Parameters of the Nonlinear diffusion class - KAZEOptions options_; ///< Configuration options for KAZE - std::vector evolution_; ///< Vector of nonlinear diffusion evolution + /// Parameters of the Nonlinear diffusion class + KAZEOptions options_; ///< Configuration options for KAZE + std::vector evolution_; ///< Vector of nonlinear diffusion evolution - /// Vector of keypoint vectors for finding extrema in multiple threads + /// Vector of keypoint vectors for finding extrema in multiple threads std::vector > kpts_par_; - /// FED parameters - int ncycles_; ///< Number of cycles - bool reordering_; ///< Flag for reordering time steps - std::vector > tsteps_; ///< Vector of FED dynamic time steps - std::vector nsteps_; ///< Vector of number of steps per cycle + /// FED parameters + int ncycles_; ///< Number of cycles + bool reordering_; ///< Flag for reordering time steps + std::vector > tsteps_; ///< Vector of FED dynamic time steps + std::vector nsteps_; ///< Vector of number of steps per cycle public: - /// Constructor + /// Constructor KAZEFeatures(KAZEOptions& options); - /// Public methods for KAZE interface + /// Public methods for KAZE interface void Allocate_Memory_Evolution(void); int Create_Nonlinear_Scale_Space(const cv::Mat& img); void Feature_Detection(std::vector& kpts); void Feature_Description(std::vector& kpts, cv::Mat& desc); static void Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector& evolution_, const KAZEOptions& options); - /// Feature Detection Methods + /// Feature Detection Methods void Compute_KContrast(const cv::Mat& img, const float& kper); void Compute_Multiscale_Derivatives(void); void Compute_Detector_Response(void); - void Determinant_Hessian(std::vector& kpts); + void Determinant_Hessian(std::vector& kpts); void Do_Subpixel_Refinement(std::vector& kpts); }; +} + #endif diff --git a/modules/features2d/src/orb.cpp b/modules/features2d/src/orb.cpp index d1e401ecc8..85b4318b85 100644 --- a/modules/features2d/src/orb.cpp +++ b/modules/features2d/src/orb.cpp @@ -645,38 +645,70 @@ static inline float getScale(int level, int firstLevel, double scaleFactor) return (float)std::pow(scaleFactor, (double)(level - firstLevel)); } -/** Constructor - * @param detector_params parameters to use - */ -ORB::ORB(int _nfeatures, float _scaleFactor, int _nlevels, int _edgeThreshold, - int _firstLevel, int _WTA_K, int _scoreType, int _patchSize, int _fastThreshold) : - nfeatures(_nfeatures), scaleFactor(_scaleFactor), nlevels(_nlevels), - edgeThreshold(_edgeThreshold), firstLevel(_firstLevel), WTA_K(_WTA_K), - scoreType(_scoreType), patchSize(_patchSize), fastThreshold(_fastThreshold) -{} +class ORB_Impl : public ORB +{ +public: + explicit ORB_Impl(int _nfeatures, float _scaleFactor, int _nlevels, int _edgeThreshold, + int _firstLevel, int _WTA_K, int _scoreType, int _patchSize, int _fastThreshold) : + nfeatures(_nfeatures), scaleFactor(_scaleFactor), nlevels(_nlevels), + edgeThreshold(_edgeThreshold), firstLevel(_firstLevel), WTA_K(_WTA_K), + scoreType(_scoreType), patchSize(_patchSize), fastThreshold(_fastThreshold) + {} + + // returns the descriptor size in bytes + int descriptorSize() const; + // returns the descriptor type + int descriptorType() const; + // returns the default norm type + int defaultNorm() const; + + // Compute the ORB_Impl features and descriptors on an image + void operator()(InputArray image, InputArray mask, std::vector& keypoints) const; + + // Compute the ORB_Impl features and descriptors on an image + void operator()( InputArray image, InputArray mask, std::vector& keypoints, + OutputArray descriptors, bool useProvidedKeypoints=false ) const; + + AlgorithmInfo* info() const; + +protected: + + void computeImpl( InputArray image, std::vector& keypoints, OutputArray descriptors ) const; + void detectImpl( InputArray image, std::vector& keypoints, InputArray mask=noArray() ) const; + + int nfeatures; + double scaleFactor; + int nlevels; + int edgeThreshold; + int firstLevel; + int WTA_K; + int scoreType; + int patchSize; + int fastThreshold; +}; -int ORB::descriptorSize() const +int ORB_Impl::descriptorSize() const { return kBytes; } -int ORB::descriptorType() const +int ORB_Impl::descriptorType() const { return CV_8U; } -int ORB::defaultNorm() const +int ORB_Impl::defaultNorm() const { return NORM_HAMMING; } -/** Compute the ORB features and descriptors on an image +/** Compute the ORB_Impl features and descriptors on an image * @param img the image to compute the features and descriptors on * @param mask the mask to apply * @param keypoints the resulting keypoints */ -void ORB::operator()(InputArray image, InputArray mask, std::vector& keypoints) const +void ORB_Impl::operator()(InputArray image, InputArray mask, std::vector& keypoints) const { (*this)(image, mask, keypoints, noArray(), false); } @@ -716,7 +748,7 @@ static void uploadORBKeypoints(const std::vector& src, } -/** Compute the ORB keypoints on an image +/** Compute the ORB_Impl keypoints on an image * @param image_pyramid the image pyramid to compute the features and descriptors on * @param mask_pyramid the masks to apply at every level * @param keypoints the resulting keypoints, clustered per level @@ -788,7 +820,7 @@ static void computeKeyPoints(const Mat& imagePyramid, KeyPointsFilter::runByImageBorder(keypoints, img.size(), edgeThreshold); // Keep more points than necessary as FAST does not give amazing corners - KeyPointsFilter::retainBest(keypoints, scoreType == ORB::HARRIS_SCORE ? 2 * featuresNum : featuresNum); + KeyPointsFilter::retainBest(keypoints, scoreType == ORB_Impl::HARRIS_SCORE ? 2 * featuresNum : featuresNum); nkeypoints = (int)keypoints.size(); counters[level] = nkeypoints; @@ -814,7 +846,7 @@ static void computeKeyPoints(const Mat& imagePyramid, UMat ukeypoints, uresponses(1, nkeypoints, CV_32F); // Select best features using the Harris cornerness (better scoring than FAST) - if( scoreType == ORB::HARRIS_SCORE ) + if( scoreType == ORB_Impl::HARRIS_SCORE ) { if( useOCL ) { @@ -888,7 +920,7 @@ static void computeKeyPoints(const Mat& imagePyramid, } -/** Compute the ORB features and descriptors on an image +/** Compute the ORB_Impl features and descriptors on an image * @param img the image to compute the features and descriptors on * @param mask the mask to apply * @param keypoints the resulting keypoints @@ -896,7 +928,7 @@ static void computeKeyPoints(const Mat& imagePyramid, * @param do_keypoints if true, the keypoints are computed, otherwise used as an input * @param do_descriptors if true, also computes the descriptors */ -void ORB::operator()( InputArray _image, InputArray _mask, std::vector& keypoints, +void ORB_Impl::operator()( InputArray _image, InputArray _mask, std::vector& keypoints, OutputArray _descriptors, bool useProvidedKeypoints ) const { CV_Assert(patchSize >= 2); @@ -1127,12 +1159,12 @@ void ORB::operator()( InputArray _image, InputArray _mask, std::vector } } -void ORB::detectImpl( InputArray image, std::vector& keypoints, InputArray mask) const +void ORB_Impl::detectImpl( InputArray image, std::vector& keypoints, InputArray mask) const { (*this)(image.getMat(), mask.getMat(), keypoints, noArray(), false); } -void ORB::computeImpl( InputArray image, std::vector& keypoints, OutputArray descriptors) const +void ORB_Impl::computeImpl( InputArray image, std::vector& keypoints, OutputArray descriptors) const { (*this)(image, Mat(), keypoints, descriptors, true); }