added documentation for ORB (ticket #1835). updated documentation on features2d (Feature2D class is still to be documented) (ticket #1819)

pull/13383/head
Vadim Pisarevsky 13 years ago
parent d40dc9a928
commit 7a3b351357
  1. 84
      modules/features2d/doc/common_interfaces_of_descriptor_extractors.rst
  2. 98
      modules/features2d/doc/common_interfaces_of_descriptor_matchers.rst
  3. 81
      modules/features2d/doc/common_interfaces_of_feature_detectors.rst
  4. 123
      modules/features2d/doc/feature_detection_and_description.rst
  5. 121
      modules/nonfree/doc/feature_detection.rst

@ -95,9 +95,9 @@ Creates a descriptor extractor by name.
The current implementation supports the following types of a descriptor extractor:
* ``"SIFT"`` -- :ocv:class:`SiftDescriptorExtractor`
* ``"SURF"`` -- :ocv:class:`SurfDescriptorExtractor`
* ``"ORB"`` -- :ocv:class:`OrbDescriptorExtractor`
* ``"SIFT"`` -- :ocv:class:`SIFT`
* ``"SURF"`` -- :ocv:class:`SURF`
* ``"ORB"`` -- :ocv:class:`ORB`
* ``"BRIEF"`` -- :ocv:class:`BriefDescriptorExtractor`
A combined format is also supported: descriptor extractor adapter name ( ``"Opponent"`` --
@ -105,84 +105,6 @@ A combined format is also supported: descriptor extractor adapter name ( ``"Oppo
for example: ``"OpponentSIFT"`` .
SiftDescriptorExtractor
-----------------------
.. ocv:class:: SiftDescriptorExtractor
Wrapping class for computing descriptors by using the
:ocv:class:`SIFT` class. ::
class SiftDescriptorExtractor : public DescriptorExtractor
{
public:
SiftDescriptorExtractor(
const SIFT::DescriptorParams& descriptorParams=SIFT::DescriptorParams(),
const SIFT::CommonParams& commonParams=SIFT::CommonParams() );
SiftDescriptorExtractor( double magnification, bool isNormalize=true,
bool recalculateAngles=true, int nOctaves=SIFT::CommonParams::DEFAULT_NOCTAVES,
int nOctaveLayers=SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS,
int firstOctave=SIFT::CommonParams::DEFAULT_FIRST_OCTAVE,
int angleMode=SIFT::CommonParams::FIRST_ANGLE );
virtual void read (const FileNode &fn);
virtual void write (FileStorage &fs) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
protected:
...
}
SurfDescriptorExtractor
-----------------------
.. ocv:class:: SurfDescriptorExtractor
Wrapping class for computing descriptors by using the
:ocv:class:`SURF` class. ::
class SurfDescriptorExtractor : public DescriptorExtractor
{
public:
SurfDescriptorExtractor( int nOctaves=4,
int nOctaveLayers=2, bool extended=false );
virtual void read (const FileNode &fn);
virtual void write (FileStorage &fs) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
protected:
...
}
OrbDescriptorExtractor
---------------------------
.. ocv:class:: OrbDescriptorExtractor
Wrapping class for computing descriptors by using the
:ocv:class:`ORB` class. ::
template<typename T>
class ORbDescriptorExtractor : public DescriptorExtractor
{
public:
OrbDescriptorExtractor( ORB::PatchSize patch_size );
virtual void read( const FileNode &fn );
virtual void write( FileStorage &fs ) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
protected:
...
}
OpponentColorDescriptorExtractor
--------------------------------
.. ocv:class:: OpponentColorDescriptorExtractor

@ -248,7 +248,7 @@ Creates a descriptor matcher of a given type with the default parameters (using
*
``BruteForce-Hamming``
*
``BruteForce-HammingLUT``
``BruteForce-Hamming(2)``
*
``FlannBased``
@ -256,102 +256,22 @@ Creates a descriptor matcher of a given type with the default parameters (using
BruteForceMatcher
BFMatcher
-----------------
.. ocv:class:: BruteForceMatcher
.. ocv:class::BFMatcher
Brute-force descriptor matcher. For each descriptor in the first set, this matcher finds the closest descriptor in the second set by trying each one. This descriptor matcher supports masking permissible matches of descriptor sets. ::
template<class Distance>
class BruteForceMatcher : public DescriptorMatcher
{
public:
BruteForceMatcher( Distance d = Distance() );
virtual ~BruteForceMatcher();
virtual bool isMaskSupported() const;
virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const;
protected:
...
}
BFMatcher::BFMatcher
--------------------
Brute-force matcher constructor.
For efficiency, ``BruteForceMatcher`` is used as a template parameterized with the distance type. For float descriptors, ``L2<float>`` is a common choice. The following distances are supported: ::
.. ocv:function:: BFMatcher::BFMatcher( int distanceType, bool crossCheck=false )
template<typename T>
struct Accumulator
{
typedef T Type;
};
template<> struct Accumulator<unsigned char> { typedef unsigned int Type; };
template<> struct Accumulator<unsigned short> { typedef unsigned int Type; };
template<> struct Accumulator<char> { typedef int Type; };
template<> struct Accumulator<short> { typedef int Type; };
/*
* Euclidean distance functor
*/
template<class T>
struct L2
{
typedef T ValueType;
typedef typename Accumulator<T>::Type ResultType;
ResultType operator()( const T* a, const T* b, int size ) const;
};
:param distanceType: One of ``NORM_L1``, ``NORM_L2``, ``NORM_HAMMING``, ``NORM_HAMMING2``. ``L1`` and ``L2`` norms are preferable choices for SIFT and SURF descriptors, ``NORM_HAMMING`` should be used with ORB and BRIEF, ``NORM_HAMMING2`` should be used with ORB when ``WTA_K==3`` or ``4`` (see ORB::ORB constructor description).
/*
* Squared Euclidean distance functor
*/
template<class T>
struct SL2
{
typedef T ValueType;
typedef typename Accumulator<T>::Type ResultType;
ResultType operator()( const T* a, const T* b, int size ) const;
};
// Note: in case of SL2 distance a parameter maxDistance in the method DescriptorMatcher::radiusMatch
// is a squared maximum distance in L2.
/*
* Manhattan distance (city block distance) functor
*/
template<class T>
struct CV_EXPORTS L1
{
typedef T ValueType;
typedef typename Accumulator<T>::Type ResultType;
ResultType operator()( const T* a, const T* b, int size ) const;
};
/*
* Hamming distance functor
*/
struct HammingLUT
{
typedef unsigned char ValueType;
typedef int ResultType;
ResultType operator()( const unsigned char* a, const unsigned char* b,
int size ) const;
...
};
struct Hamming
{
typedef unsigned char ValueType;
typedef int ResultType;
ResultType operator()( const unsigned char* a, const unsigned char* b,
int size ) const;
};
:param crossCheck: If it is false, this is will be default BFMatcher behaviour when it finds the k nearest neighbors for each query descriptor. If ``crossCheck==true``, then the ``knnMatch()`` method with ``k=1`` will only return pairs ``(i,j)`` such that for ``i-th`` query descriptor the ``j-th`` descriptor in the matcher's collection is the nearest and vice versa, i.e. the ``BFMathcher`` will only return consistent pairs. Such technique usually produces best results with minimal number of outliers when there are enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper.
FlannBasedMatcher

@ -140,10 +140,10 @@ The following detector types are supported:
* ``"FAST"`` -- :ocv:class:`FastFeatureDetector`
* ``"STAR"`` -- :ocv:class:`StarFeatureDetector`
* ``"SIFT"`` -- :ocv:class:`SiftFeatureDetector`
* ``"SURF"`` -- :ocv:class:`SurfFeatureDetector`
* ``"ORB"`` -- :ocv:class:`OrbFeatureDetector`
* ``"MSER"`` -- :ocv:class:`MserFeatureDetector`
* ``"SIFT"`` -- :ocv:class:`SIFT` (nonfree module)
* ``"SURF"`` -- :ocv:class:`SURF` (nonfree module)
* ``"ORB"`` -- :ocv:class:`ORB`
* ``"MSER"`` -- :ocv:class:`MSER`
* ``"GFTT"`` -- :ocv:class:`GoodFeaturesToTrackDetector`
* ``"HARRIS"`` -- :ocv:class:`GoodFeaturesToTrackDetector` with Harris detector enabled
* ``"Dense"`` -- :ocv:class:`DenseFeatureDetector`
@ -250,67 +250,6 @@ Wrapping class for feature detection using the
...
};
SiftFeatureDetector
-------------------
.. ocv:class:: SiftFeatureDetector
Wrapping class for feature detection using the
:ocv:class:`SIFT` class. ::
class SiftFeatureDetector : public FeatureDetector
{
public:
SiftFeatureDetector(
const SIFT::DetectorParams& detectorParams=SIFT::DetectorParams(),
const SIFT::CommonParams& commonParams=SIFT::CommonParams() );
SiftFeatureDetector( double threshold, double edgeThreshold,
int nOctaves=SIFT::CommonParams::DEFAULT_NOCTAVES,
int nOctaveLayers=SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS,
int firstOctave=SIFT::CommonParams::DEFAULT_FIRST_OCTAVE,
int angleMode=SIFT::CommonParams::FIRST_ANGLE );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
SurfFeatureDetector
-------------------
.. ocv:class:: SurfFeatureDetector
Wrapping class for feature detection using the
:ocv:class:`SURF` class. ::
class SurfFeatureDetector : public FeatureDetector
{
public:
SurfFeatureDetector( double hessianThreshold = 400., int octaves = 3,
int octaveLayers = 4 );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
OrbFeatureDetector
-------------------
.. ocv:class:: OrbFeatureDetector
Wrapping class for feature detection using the
:ocv:class:`ORB` class. ::
class OrbFeatureDetector : public FeatureDetector
{
public:
OrbFeatureDetector( size_t n_features );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
DenseFeatureDetector
--------------------
.. ocv:class:: DenseFeatureDetector
@ -605,15 +544,3 @@ StarAdjuster
StarAdjuster(double initial_thresh = 30.0);
...
};
SurfAdjuster
------------
.. ocv:class:: SurfAdjuster
:ocv:class:`AdjusterAdapter` for :ocv:class:`SurfFeatureDetector`. This class adjusts the ``hessianThreshold`` of ``SurfFeatureDetector``. ::
class SurfAdjuster: public SurfAdjuster
{
SurfAdjuster();
...
};

@ -7,7 +7,7 @@ FAST
--------
Detects corners using the FAST algorithm
.. ocv:function:: void FAST( const Mat& image, vector<KeyPoint>& keypoints, int threshold, bool nonmaxSupression=true )
.. ocv:function:: void FAST( InputArray image, vector<KeyPoint>& keypoints, int threshold, bool nonmaxSupression=true )
:param image: Image where keypoints (corners) are detected.
@ -17,7 +17,9 @@ Detects corners using the FAST algorithm
:param nonmaxSupression: If it is true, non-maximum suppression is applied to detected corners (keypoints).
Detects corners using the FAST algorithm by E. Rosten (*Machine Learning for High-speed Corner Detection*, 2006).
Detects corners using the FAST algorithm by [Rosten06]_.
.. [Rosten06] E. Rosten. Machine Learning for High-speed Corner Detection, 2006.
MSER
@ -46,102 +48,51 @@ The class encapsulates all the parameters of the MSER extraction algorithm (see
http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions). Also see http://opencv.willowgarage.com/wiki/documentation/cpp/features2d/MSER for useful comments and parameters description.
StarDetector
------------
.. ocv:class:: StarDetector
Class implementing the ``Star`` keypoint detector, a modified version of the ``CenSurE`` keypoint detector described in [Agrawal08]_.
ORB
---
.. ocv:class:: ORB
.. [Agrawal08] Agrawal, M. and Konolige, K. and Blas, M.R. "CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching", ECCV08, 2008
Class implementing the ORB (*oriented BRIEF*) keypoint detector and descriptor extractor, described in [RRKB11]_. The algorithm uses FAST in pyramids to detect stable keypoints, selects the strongest features using FAST or Harris response, finds their orientation using first-order moments and computes the descriptors using BRIEF (where the coordinates of random point pairs (or k-tuples) are rotated according to the measured orientation).
StarDetector::StarDetector
--------------------------
The Star Detector constructor
.. [RRKB11] Ethan Rublee, Vincent Rabaud, Kurt Konolige, Gary R. Bradski: ORB: An efficient alternative to SIFT or SURF. ICCV 2011: 2564-2571.
.. ocv:function:: StarDetector::StarDetector()
ORB::ORB
--------
The ORB constructor
.. ocv:function:: StarDetector::StarDetector(int maxSize, int responseThreshold, int lineThresholdProjected, int lineThresholdBinarized, int suppressNonmaxSize)
.. ocv:function:: ORB::ORB()
.. ocv:pyfunction:: cv2.StarDetector(maxSize, responseThreshold, lineThresholdProjected, lineThresholdBinarized, suppressNonmaxSize) -> <StarDetector object>
.. ocv:function:: ORB::ORB(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31, int firstLevel = 0, int WTA_K=2, int scoreType=HARRIS_SCORE, int patchSize=31)
:param maxSize: maximum size of the features. The following values are supported: 4, 6, 8, 11, 12, 16, 22, 23, 32, 45, 46, 64, 90, 128. In the case of a different value the result is undefined.
:param nfeatures: The maximum number of features to retain.
:param responseThreshold: threshold for the approximated laplacian, used to eliminate weak features. The larger it is, the less features will be retrieved
:param scaleFactor: Pyramid decimation ratio, greater than 1. ``scaleFactor==2`` means the classical pyramid, where each next level has 4x less pixels than the previous, but such a big scale factor will degrade feature matching scores dramatically. On the other hand, too close to 1 scale factor will mean that to cover certain scale range you will need more pyramid levels and so the speed will suffer.
:param lineThresholdProjected: another threshold for the laplacian to eliminate edges
:param lineThresholdBinarized: yet another threshold for the feature size to eliminate edges. The larger the 2nd threshold, the more points you get.
:param nlevels: The number of pyramid levels. The smallest level will have linear size equal to ``input_image_linear_size/pow(scaleFactor, nlevels)``.
:param edgeThreshold: This is size of the border where the features are not detected. It should roughly match the ``patchSize`` parameter.
:param firstLevel: It should be 0 in the current implementation.
:param WTA_K: The number of points that produce each element of the oriented BRIEF descriptor. The default value 2 means the BRIEF where we take a random point pair and compare their brightnesses, so we get 0/1 response. Other possible values are 3 and 4. For example, 3 means that we take 3 random points (of course, those point coordinates are random, but they are generated from the pre-defined seed, so each element of BRIEF descriptor is computed deterministically from the pixel rectangle), find point of maximum brightness and output index of the winner (0, 1 or 2). Such output will occupy 2 bits, and therefore it will need a special variant of Hamming distance, denoted as ``NORM_HAMMING2`` (2 bits per bin). When ``WTA_K=4``, we take 4 random points to compute each bin (that will also occupy 2 bits with possible values 0, 1, 2 or 3).
:param scoreType: The default HARRIS_SCORE means that Harris algorithm is used to rank features (the score is written to ``KeyPoint::score`` and is used to retain best ``nfeatures`` features); FAST_SCORE is alternative value of the parameter that produces slightly less stable keypoints, but it is a little faster to compute.
:param patchSize: size of the patch used by the oriented BRIEF descriptor. Of course, on smaller pyramid layers the perceived image area covered by a feature will be larger.
StarDetector::operator()
------------------------
Finds keypoints in an image
ORB::operator()
---------------
Finds keypoints in an image and computes their descriptors
.. ocv:function:: void StarDetector::operator()(const Mat& image, vector<KeyPoint>& keypoints)
.. ocv:pyfunction:: cv2.StarDetector.detect(image) -> keypoints
.. ocv:cfunction:: CvSeq* cvGetStarKeypoints( const CvArr* image, CvMemStorage* storage, CvStarDetectorParams params=cvStarDetectorParams() )
.. ocv:pyoldfunction:: cv.GetStarKeypoints(image, storage, params)-> keypoints
.. ocv:function:: void ORB::operator()(InputArray image, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false ) const
:param image: The input 8-bit grayscale image
:param image: The input 8-bit grayscale image.
:param keypoints: The output vector of keypoints
:param mask: The operation mask.
:param storage: The memory storage used to store the keypoints (OpenCV 1.x API only)
:param keypoints: The output vector of keypoints.
:param params: The algorithm parameters stored in ``CvStarDetectorParams`` (OpenCV 1.x API only)
ORB
----
.. ocv:class:: ORB
Class for extracting ORB features and descriptors from an image. ::
class ORB
{
public:
/** The patch sizes that can be used (only one right now) */
struct CommonParams
{
enum { DEFAULT_N_LEVELS = 3, DEFAULT_FIRST_LEVEL = 0};
/** default constructor */
CommonParams(float scale_factor = 1.2f, unsigned int n_levels = DEFAULT_N_LEVELS,
int edge_threshold = 31, unsigned int first_level = DEFAULT_FIRST_LEVEL);
void read(const FileNode& fn);
void write(FileStorage& fs) const;
/** Coefficient by which we divide the dimensions from one scale pyramid level to the next */
float scale_factor_;
/** The number of levels in the scale pyramid */
unsigned int n_levels_;
/** The level at which the image is given
* if 1, that means we will also look at the image scale_factor_ times bigger
*/
unsigned int first_level_;
/** How far from the boundary the points should be */
int edge_threshold_;
};
// constructor that initializes all the algorithm parameters
// n_features is the number of desired features
ORB(size_t n_features = 500, const CommonParams & detector_params = CommonParams());
// returns the number of elements in each descriptor (32 bytes)
int descriptorSize() const;
// detects keypoints using ORB
void operator()(const Mat& img, const Mat& mask,
vector<KeyPoint>& keypoints) const;
// detects ORB keypoints and computes the ORB descriptors for them;
// output vector "descriptors" stores elements of descriptors and has size
// equal descriptorSize()*keypoints.size() as each descriptor is
// descriptorSize() elements of this vector.
void operator()(const Mat& img, const Mat& mask,
vector<KeyPoint>& keypoints,
cv::Mat& descriptors,
bool useProvidedKeypoints=false) const;
};
The class implements ORB.
:param descriptors: The output descriptors. Pass ``cv::noArray()`` if you do not need it.
:param useProvidedKeypoints: If it is true, then the method will use the provided vector of keypoints instead of detecting them.
..

@ -5,90 +5,45 @@ SIFT
----
.. ocv:class:: SIFT
Class for extracting keypoints and computing descriptors using the Scale Invariant Feature Transform (SIFT) approach. ::
class CV_EXPORTS SIFT
{
public:
struct CommonParams
{
static const int DEFAULT_NOCTAVES = 4;
static const int DEFAULT_NOCTAVE_LAYERS = 3;
static const int DEFAULT_FIRST_OCTAVE = -1;
enum{ FIRST_ANGLE = 0, AVERAGE_ANGLE = 1 };
CommonParams();
CommonParams( int _nOctaves, int _nOctaveLayers, int _firstOctave,
int _angleMode );
int nOctaves, nOctaveLayers, firstOctave;
int angleMode;
};
struct DetectorParams
{
static double GET_DEFAULT_THRESHOLD()
{ return 0.04 / SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS / 2.0; }
static double GET_DEFAULT_EDGE_THRESHOLD() { return 10.0; }
DetectorParams();
DetectorParams( double _threshold, double _edgeThreshold );
double threshold, edgeThreshold;
};
struct DescriptorParams
{
static double GET_DEFAULT_MAGNIFICATION() { return 3.0; }
static const bool DEFAULT_IS_NORMALIZE = true;
static const int DESCRIPTOR_SIZE = 128;
DescriptorParams();
DescriptorParams( double _magnification, bool _isNormalize,
bool _recalculateAngles );
double magnification;
bool isNormalize;
bool recalculateAngles;
};
SIFT();
//! sift-detector constructor
SIFT( double _threshold, double _edgeThreshold,
int _nOctaves=CommonParams::DEFAULT_NOCTAVES,
int _nOctaveLayers=CommonParams::DEFAULT_NOCTAVE_LAYERS,
int _firstOctave=CommonParams::DEFAULT_FIRST_OCTAVE,
int _angleMode=CommonParams::FIRST_ANGLE );
//! sift-descriptor constructor
SIFT( double _magnification, bool _isNormalize=true,
bool _recalculateAngles = true,
int _nOctaves=CommonParams::DEFAULT_NOCTAVES,
int _nOctaveLayers=CommonParams::DEFAULT_NOCTAVE_LAYERS,
int _firstOctave=CommonParams::DEFAULT_FIRST_OCTAVE,
int _angleMode=CommonParams::FIRST_ANGLE );
SIFT( const CommonParams& _commParams,
const DetectorParams& _detectorParams = DetectorParams(),
const DescriptorParams& _descriptorParams = DescriptorParams() );
//! returns the descriptor size in floats (128)
int descriptorSize() const { return DescriptorParams::DESCRIPTOR_SIZE; }
//! finds the keypoints using the SIFT algorithm
void operator()(const Mat& img, const Mat& mask,
vector<KeyPoint>& keypoints) const;
//! finds the keypoints and computes descriptors for them using SIFT algorithm.
//! Optionally it can compute descriptors for the user-provided keypoints
void operator()(const Mat& img, const Mat& mask,
vector<KeyPoint>& keypoints,
Mat& descriptors,
bool useProvidedKeypoints=false) const;
CommonParams getCommonParams () const { return commParams; }
DetectorParams getDetectorParams () const { return detectorParams; }
DescriptorParams getDescriptorParams () const { return descriptorParams; }
protected:
...
};
Class for extracting keypoints and computing descriptors using the Scale Invariant Feature Transform (SIFT) algorithm by D. Lowe [Lowe04]_.
.. [Lowe04] Lowe, D. G., “Distinctive Image Features from Scale-Invariant Keypoints”, International Journal of Computer Vision, 60, 2, pp. 91-110, 2004.
SIFT::SIFT
----------
The SIFT constructors.
.. ocv:function:: SIFT::SIFT( int nfeatures=0, int nOctaveLayers=3, double contrastThreshold=0.04, double edgeThreshold=10, double sigma=1.6)
:param nfeatures: The number of best features to retain. The features are ranked by their scores (measured in SIFT algorithm as the local contrast)
:param nOctaveLayers: The number of layers in each octave. 3 is the value used in D. Lowe paper. The number of octaves is computed automatically from the image resolution.
:param contrastThreshold: The contrast threshold used to filter out weak features in semi-uniform (low-contrast) regions. The larger the threshold, the less features are produced by the detector.
:param edgeThreshold: The threshold used to filter out edge-like features. Note that the its meaning is different from the contrastThreshold, i.e. the larger the ``edgeThreshold``, the less features are filtered out (more features are retained).
:param sigma: The sigma of the Gaussian applied to the input image at the octave #0. If your image is captured with a weak camera with soft lenses, you might want to reduce the number.
SIFT::operator ()
-----------------
Extract features and computes their descriptors using SIFT algorithm
.. ocv:function:: void SIFT::operator()(InputArray image, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false)
:param image: Input 8-bit grayscale image
:param mask: Optional input mask that marks the regions where we should detect features.
:param keypoints: The input/output vector of keypoints
:param descriptors: The output matrix of descriptors. Pass ``cv::noArray()`` if you do not need them.
:param useProvidedKeypoints: Boolean flag. If it is true, the keypoint detector is not run. Instead, the provided vector of keypoints is used and the algorithm just computes their descriptors.
SURF
----
.. ocv:class:: SURF
@ -146,8 +101,8 @@ SURF::operator()
----------------
Detects keypoints and computes SURF descriptors for them.
.. ocv:function:: void SURF::operator()(const Mat& image, const Mat& mask, vector<KeyPoint>& keypoints)
.. ocv:function:: void SURF::operator()(const Mat& image, const Mat& mask, vector<KeyPoint>& keypoints, vector<float>& descriptors, bool useProvidedKeypoints=false)
.. ocv:function:: void SURF::operator()(InputArray image, InputArray mask, vector<KeyPoint>& keypoints) const
.. ocv:function:: void SURF::operator()(InputArray image, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false)
.. ocv:pyfunction:: cv2.SURF.detect(img, mask) -> keypoints
.. ocv:pyfunction:: cv2.SURF.detect(img, mask[, useProvidedKeypoints]) -> keypoints, descriptors
@ -162,7 +117,7 @@ Detects keypoints and computes SURF descriptors for them.
:param keypoints: The input/output vector of keypoints
:param descriptors: The output concatenated vectors of descriptors. Each descriptor is 64- or 128-element vector, as returned by ``SURF::descriptorSize()``. So the total size of ``descriptors`` will be ``keypoints.size()*descriptorSize()``.
:param descriptors: The output matrix of descriptors. Pass ``cv::noArray()`` if you do not need them.
:param useProvidedKeypoints: Boolean flag. If it is true, the keypoint detector is not run. Instead, the provided vector of keypoints is used and the algorithm just computes their descriptors.

Loading…
Cancel
Save