From 5d863f75160c345c1dbc15d00a88762f68c8b60f Mon Sep 17 00:00:00 2001 From: abidrahmank Date: Thu, 11 Jul 2013 09:28:37 +0530 Subject: [PATCH 1/5] drawMatches python bindings --- modules/features2d/include/opencv2/features2d.hpp | 8 ++++---- modules/python/src2/cv2.cpp | 9 ++++++--- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/modules/features2d/include/opencv2/features2d.hpp b/modules/features2d/include/opencv2/features2d.hpp index 1ba9f9b902..1f0a92c188 100644 --- a/modules/features2d/include/opencv2/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d.hpp @@ -1404,15 +1404,15 @@ CV_EXPORTS_W void drawKeypoints( const Mat& image, const std::vector& const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT ); // Draws matches of keypints from two images on output image. -CV_EXPORTS void drawMatches( const Mat& img1, const std::vector& keypoints1, +CV_EXPORTS_W void drawMatches( const Mat& img1, const std::vector& keypoints1, const Mat& img2, const std::vector& keypoints2, - const std::vector& matches1to2, Mat& outImg, + const std::vector& matches1to2, CV_OUT Mat& outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const std::vector& matchesMask=std::vector(), int flags=DrawMatchesFlags::DEFAULT ); -CV_EXPORTS void drawMatches( const Mat& img1, const std::vector& keypoints1, +CV_EXPORTS_AS(drawMatchesKnn) void drawMatches( const Mat& img1, const std::vector& keypoints1, const Mat& img2, const std::vector& keypoints2, - const std::vector >& matches1to2, Mat& outImg, + const std::vector >& matches1to2, CV_OUT Mat& outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const std::vector >& matchesMask=std::vector >(), int flags=DrawMatchesFlags::DEFAULT ); diff --git a/modules/python/src2/cv2.cpp b/modules/python/src2/cv2.cpp index e68da59cf8..43fd536950 100644 --- a/modules/python/src2/cv2.cpp +++ b/modules/python/src2/cv2.cpp @@ -97,6 +97,7 @@ using namespace cv; typedef cv::softcascade::ChannelFeatureBuilder softcascade_ChannelFeatureBuilder; typedef std::vector vector_uchar; +typedef std::vector vector_char; typedef std::vector vector_int; typedef std::vector vector_float; typedef std::vector vector_double; @@ -112,6 +113,8 @@ typedef std::vector vector_KeyPoint; typedef std::vector vector_Mat; typedef std::vector vector_DMatch; typedef std::vector vector_String; + +typedef std::vector > vector_vector_char; typedef std::vector > vector_vector_Point; typedef std::vector > vector_vector_Point2f; typedef std::vector > vector_vector_Point3f; @@ -830,7 +833,7 @@ template struct pyopencvVecConverter } }; -template +template bool pyopencv_to(PyObject* obj, std::vector<_Tp>& value, const ArgInfo info) { return pyopencvVecConverter<_Tp>::to(obj, value, info); @@ -888,9 +891,9 @@ template static inline PyObject* pyopencv_from_generic_vec(const s template struct pyopencvVecConverter > { - static bool to(PyObject* obj, std::vector >& value, const char* name="") + static bool to(PyObject* obj, std::vector >& value, const ArgInfo info) { - return pyopencv_to_generic_vec(obj, value, name); + return pyopencv_to_generic_vec(obj, value, info); } static PyObject* from(const std::vector >& value) From 031c77f4eb87ba7ee7ec16415dcabf8d69c4cbc6 Mon Sep 17 00:00:00 2001 From: abidrahmank Date: Thu, 11 Jul 2013 09:30:21 +0530 Subject: [PATCH 2/5] SIFT+SURF PyDocs --- modules/nonfree/doc/feature_detection.rst | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/modules/nonfree/doc/feature_detection.rst b/modules/nonfree/doc/feature_detection.rst index 0b79560808..3286a91a44 100644 --- a/modules/nonfree/doc/feature_detection.rst +++ b/modules/nonfree/doc/feature_detection.rst @@ -16,6 +16,8 @@ The SIFT constructors. .. ocv:function:: SIFT::SIFT( int nfeatures=0, int nOctaveLayers=3, double contrastThreshold=0.04, double edgeThreshold=10, double sigma=1.6) +.. ocv:pyfunction:: cv2.SIFT([, nfeatures[, nOctaveLayers[, contrastThreshold[, edgeThreshold[, sigma]]]]]) -> + :param nfeatures: The number of best features to retain. The features are ranked by their scores (measured in SIFT algorithm as the local contrast) :param nOctaveLayers: The number of layers in each octave. 3 is the value used in D. Lowe paper. The number of octaves is computed automatically from the image resolution. @@ -33,6 +35,12 @@ Extract features and computes their descriptors using SIFT algorithm .. ocv:function:: void SIFT::operator()(InputArray img, InputArray mask, vector& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false) +.. ocv:pyfunction:: cv2.SIFT.detect(image[, mask]) -> keypoints + +.. ocv:pyfunction:: cv2.SIFT.compute(image, keypoints[, descriptors]) -> keypoints, descriptors + +.. ocv:pyfunction:: cv2.SIFT.detectAndCompute(image, mask[, descriptors[, useProvidedKeypoints]]) -> keypoints, descriptors + :param img: Input 8-bit grayscale image :param mask: Optional input mask that marks the regions where we should detect features. @@ -43,6 +51,7 @@ Extract features and computes their descriptors using SIFT algorithm :param useProvidedKeypoints: Boolean flag. If it is true, the keypoint detector is not run. Instead, the provided vector of keypoints is used and the algorithm just computes their descriptors. +.. note:: Python API provides three functions. First one finds keypoints only. Second function computes the descriptors based on the keypoints we provide. Third function detects the keypoints and computes their descriptors. If you want both keypoints and descriptors, directly use third function as ``kp, des = cv2.SIFT.detectAndCompute(image, None)`` SURF ---- @@ -105,6 +114,8 @@ Detects keypoints and computes SURF descriptors for them. .. ocv:function:: void SURF::operator()(InputArray img, InputArray mask, vector& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false) .. ocv:pyfunction:: cv2.SURF.detect(image[, mask]) -> keypoints +.. ocv:pyfunction:: cv2.SURF.compute(image, keypoints[, descriptors]) -> keypoints, descriptors +.. ocv:pyfunction:: cv2.SURF.detectAndCompute(image, mask[, descriptors[, useProvidedKeypoints]]) -> keypoints, descriptors .. ocv:cfunction:: void cvExtractSURF( const CvArr* image, const CvArr* mask, CvSeq** keypoints, CvSeq** descriptors, CvMemStorage* storage, CvSURFParams params ) @@ -325,4 +336,4 @@ The ``descriptors`` matrix is :math:`\texttt{nFeatures} \times \texttt{descripto The class ``SURF_OCL`` uses some buffers and provides access to it. All buffers can be safely released between function calls. -.. seealso:: :ocv:class:`SURF` \ No newline at end of file +.. seealso:: :ocv:class:`SURF` From 204783810636e72502d51f3dca40d997465297c7 Mon Sep 17 00:00:00 2001 From: abidrahmank Date: Thu, 11 Jul 2013 09:31:25 +0530 Subject: [PATCH 3/5] feature2d drawing functions PyDocs --- .../doc/drawing_function_of_keypoints_and_matches.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/modules/features2d/doc/drawing_function_of_keypoints_and_matches.rst b/modules/features2d/doc/drawing_function_of_keypoints_and_matches.rst index 2669ab9f27..cc9850b9b5 100644 --- a/modules/features2d/doc/drawing_function_of_keypoints_and_matches.rst +++ b/modules/features2d/doc/drawing_function_of_keypoints_and_matches.rst @@ -11,6 +11,10 @@ Draws the found matches of keypoints from two images. .. ocv:function:: void drawMatches( const Mat& img1, const vector& keypoints1, const Mat& img2, const vector& keypoints2, const vector >& matches1to2, Mat& outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const vector >& matchesMask=vector >(), int flags=DrawMatchesFlags::DEFAULT ) +.. ocv:pyfunction:: cv2.drawMatches(img1, keypoints1, img2, keypoints2, matches1to2[, outImg[, matchColor[, singlePointColor[, matchesMask[, flags]]]]]) -> outImg + +.. ocv:pyfunction:: cv2.drawMatchesKnn(img1, keypoints1, img2, keypoints2, matches1to2[, outImg[, matchColor[, singlePointColor[, matchesMask[, flags]]]]]) -> outImg + :param img1: First source image. @@ -67,6 +71,8 @@ Draws keypoints. .. ocv:function:: void drawKeypoints( const Mat& image, const vector& keypoints, Mat& outImage, const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT ) +.. ocv:pyfunction:: cv2.drawKeypoints(image, keypoints[, outImage[, color[, flags]]]) -> outImage + :param image: Source image. :param keypoints: Keypoints from the source image. @@ -77,3 +83,4 @@ Draws keypoints. :param flags: Flags setting drawing features. Possible ``flags`` bit values are defined by ``DrawMatchesFlags``. See details above in :ocv:func:`drawMatches` . +.. note:: For Python API, flags are modified as `cv2.DRAW_MATCHES_FLAGS_DEFAULT`, `cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS`, `cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG`, `cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS` From f91f369788d061963a77bc0acc17177795def3dc Mon Sep 17 00:00:00 2001 From: abidrahmank Date: Thu, 11 Jul 2013 09:32:49 +0530 Subject: [PATCH 4/5] PyDocs for FAST, ORB etc --- .../doc/feature_detection_and_description.rst | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/modules/features2d/doc/feature_detection_and_description.rst b/modules/features2d/doc/feature_detection_and_description.rst index f265ab3c4f..7c283b016d 100644 --- a/modules/features2d/doc/feature_detection_and_description.rst +++ b/modules/features2d/doc/feature_detection_and_description.rst @@ -10,6 +10,11 @@ Detects corners using the FAST algorithm .. ocv:function:: void FAST( InputArray image, vector& keypoints, int threshold, bool nonmaxSupression=true ) .. ocv:function:: void FAST( InputArray image, vector& keypoints, int threshold, bool nonmaxSupression, int type ) +.. ocv:pyfunction:: cv2.FastFeatureDetector([, threshold[, nonmaxSuppression]]) -> +.. ocv:pyfunction:: cv2.FastFeatureDetector(threshold, nonmaxSuppression, type) -> +.. ocv:pyfunction:: cv2.FastFeatureDetector.detect(image[, mask]) -> keypoints + + :param image: grayscale image where keypoints (corners) are detected. :param keypoints: keypoints detected on the image. @@ -22,6 +27,9 @@ Detects corners using the FAST algorithm Detects corners using the FAST algorithm by [Rosten06]_. +..note:: In Python API, types are given as ``cv2.FAST_FEATURE_DETECTOR_TYPE_5_8``, ``cv2.FAST_FEATURE_DETECTOR_TYPE_7_12`` and ``cv2.FAST_FEATURE_DETECTOR_TYPE_9_16``. For corner detection, use ``cv2.FAST.detect()`` method. + + .. [Rosten06] E. Rosten. Machine Learning for High-speed Corner Detection, 2006. @@ -65,6 +73,9 @@ The ORB constructor .. ocv:function:: ORB::ORB(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31, int firstLevel = 0, int WTA_K=2, int scoreType=ORB::HARRIS_SCORE, int patchSize=31) +.. ocv:pyfunction:: cv2.ORB([, nfeatures[, scaleFactor[, nlevels[, edgeThreshold[, firstLevel[, WTA_K[, scoreType[, patchSize]]]]]]]]) -> + + :param nfeatures: The maximum number of features to retain. :param scaleFactor: Pyramid decimation ratio, greater than 1. ``scaleFactor==2`` means the classical pyramid, where each next level has 4x less pixels than the previous, but such a big scale factor will degrade feature matching scores dramatically. On the other hand, too close to 1 scale factor will mean that to cover certain scale range you will need more pyramid levels and so the speed will suffer. @@ -87,6 +98,11 @@ Finds keypoints in an image and computes their descriptors .. ocv:function:: void ORB::operator()(InputArray image, InputArray mask, vector& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false ) const +.. ocv:pyfunction:: cv2.ORB.detect(image[, mask]) -> keypoints +.. ocv:pyfunction:: cv2.ORB.compute(image, keypoints[, descriptors]) -> keypoints, descriptors +.. ocv:pyfunction:: cv2.ORB.detectAndCompute(image, mask[, descriptors[, useProvidedKeypoints]]) -> keypoints, descriptors + + :param image: The input 8-bit grayscale image. :param mask: The operation mask. @@ -96,6 +112,7 @@ Finds keypoints in an image and computes their descriptors :param descriptors: The output descriptors. Pass ``cv::noArray()`` if you do not need it. :param useProvidedKeypoints: If it is true, then the method will use the provided vector of keypoints instead of detecting them. + BRISK ----- @@ -111,6 +128,8 @@ The BRISK constructor .. ocv:function:: BRISK::BRISK(int thresh=30, int octaves=3, float patternScale=1.0f) +.. ocv:pyfunction:: cv2.BRISK([, thresh[, octaves[, patternScale]]]) -> + :param thresh: FAST/AGAST detection threshold score. :param octaves: detection octaves. Use 0 to do single scale. @@ -123,6 +142,8 @@ The BRISK constructor for a custom pattern .. ocv:function:: BRISK::BRISK(std::vector &radiusList, std::vector &numberList, float dMax=5.85f, float dMin=8.2f, std::vector indexChange=std::vector()) +.. ocv:pyfunction:: cv2.BRISK(radiusList, numberList[, dMax[, dMin[, indexChange]]]) -> + :param radiusList: defines the radii (in pixels) where the samples around a keypoint are taken (for keypoint scale 1). :param numberList: defines the number of sampling points on the sampling circle. Must be the same size as radiusList.. @@ -139,6 +160,10 @@ Finds keypoints in an image and computes their descriptors .. ocv:function:: void BRISK::operator()(InputArray image, InputArray mask, vector& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false ) const +.. ocv:pyfunction:: cv2.BRISK.detect(image[, mask]) -> keypoints +.. ocv:pyfunction:: cv2.BRISK.compute(image, keypoints[, descriptors]) -> keypoints, descriptors +.. ocv:pyfunction:: cv2.BRISK.detectAndCompute(image, mask[, descriptors[, useProvidedKeypoints]]) -> keypoints, descriptors + :param image: The input 8-bit grayscale image. :param mask: The operation mask. From 1923d87f61c7677aa2492f3cf0ae7fab729c8a46 Mon Sep 17 00:00:00 2001 From: abidrahmank Date: Thu, 11 Jul 2013 09:33:32 +0530 Subject: [PATCH 5/5] PyDocs for common interface of feature2d --- .../doc/common_interfaces_of_descriptor_extractors.rst | 4 ++++ .../features2d/doc/common_interfaces_of_feature_detectors.rst | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/modules/features2d/doc/common_interfaces_of_descriptor_extractors.rst b/modules/features2d/doc/common_interfaces_of_descriptor_extractors.rst index a1ac7b95e6..33f90a2fbe 100644 --- a/modules/features2d/doc/common_interfaces_of_descriptor_extractors.rst +++ b/modules/features2d/doc/common_interfaces_of_descriptor_extractors.rst @@ -57,6 +57,8 @@ Computes the descriptors for a set of keypoints detected in an image (first vari .. ocv:function:: void DescriptorExtractor::compute( const vector& images, vector >& keypoints, vector& descriptors ) const +.. ocv:pyfunction:: cv2.DescriptorExtractor_create.compute(image, keypoints[, descriptors]) -> keypoints, descriptors + :param image: Image. :param images: Image set. @@ -72,6 +74,8 @@ Creates a descriptor extractor by name. .. ocv:function:: Ptr DescriptorExtractor::create( const String& descriptorExtractorType ) +.. ocv:pyfunction:: cv2.DescriptorExtractor_create(descriptorExtractorType) -> retval + :param descriptorExtractorType: Descriptor extractor type. The current implementation supports the following types of a descriptor extractor: diff --git a/modules/features2d/doc/common_interfaces_of_feature_detectors.rst b/modules/features2d/doc/common_interfaces_of_feature_detectors.rst index 3bbaa8aca1..50b0883a03 100644 --- a/modules/features2d/doc/common_interfaces_of_feature_detectors.rst +++ b/modules/features2d/doc/common_interfaces_of_feature_detectors.rst @@ -44,6 +44,8 @@ Detects keypoints in an image (first variant) or image set (second variant). .. ocv:function:: void FeatureDetector::detect( const vector& images, vector >& keypoints, const vector& masks=vector() ) const +.. ocv:pyfunction:: cv2.FeatureDetector_create.detect(image[, mask]) -> keypoints + :param image: Image. :param images: Image set. @@ -60,6 +62,8 @@ Creates a feature detector by its name. .. ocv:function:: Ptr FeatureDetector::create( const String& detectorType ) +.. ocv:pyfunction:: cv2.FeatureDetector_create(detectorType) -> retval + :param detectorType: Feature detector type. The following detector types are supported: