Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/2510/head
Alexander Alekhin 5 years ago
commit 69c8689338
  1. 2
      modules/README.md
  2. 22
      modules/aruco/include/opencv2/aruco.hpp
  3. 2
      modules/xfeatures2d/README.md
  4. 11
      modules/xfeatures2d/doc/xfeatures2d.bib
  5. 33
      modules/xfeatures2d/include/opencv2/xfeatures2d.hpp
  6. 2
      modules/xfeatures2d/misc/python/pyopencv_sift.hpp
  7. 20
      modules/xfeatures2d/misc/python/shadow_sift.hpp
  8. 24
      modules/xfeatures2d/misc/python/test/test_sift_compatibility.py
  9. 72
      modules/xfeatures2d/perf/perf_sift.cpp
  10. 2
      modules/xfeatures2d/src/precomp.hpp
  11. 1232
      modules/xfeatures2d/src/sift.cpp
  12. 40
      modules/xfeatures2d/test/test_features2d.cpp
  13. 6
      modules/xfeatures2d/test/test_keypoints.cpp
  14. 17
      modules/xfeatures2d/test/test_rotation_and_scale_invariance.cpp

@ -68,7 +68,7 @@ $ cmake -D OPENCV_EXTRA_MODULES_PATH=<opencv_contrib>/modules -D BUILD_opencv_<r
- **tracking**: Vision Based Object Tracking -- Use and/or evaluate one of 5 different visual object tracking techniques.
- **xfeatures2d**: Features2D extra -- Extra 2D Features Framework containing experimental and non-free 2D feature detector/descriptor algorithms. SURF, SIFT, BRIEF, Censure, Freak, LUCID, Daisy, Self-similar.
- **xfeatures2d**: Features2D extra -- Extra 2D Features Framework containing experimental and non-free 2D feature detector/descriptor algorithms. SURF, BRIEF, Censure, Freak, LUCID, Daisy, Self-similar.
- **ximgproc**: Extended Image Processing -- Structured Forests / Domain Transform Filter / Guided Filter / Adaptive Manifold Filter / Joint Bilateral Filter / Superpixels / Ridge Detection Filter.

@ -99,7 +99,7 @@ enum CornerRefineMethod{
* - maxMarkerPerimeterRate: determine maximum perimeter for marker contour to be detected. This
* is defined as a rate respect to the maximum dimension of the input image (default 4.0).
* - polygonalApproxAccuracyRate: minimum accuracy during the polygonal approximation process to
* determine which contours are squares.
* determine which contours are squares. (default 0.03)
* - minCornerDistanceRate: minimum distance between corners for detected markers relative to its
* perimeter (default 0.05)
* - minDistanceToBorder: minimum distance of any corner to the image border for detected markers
@ -109,7 +109,7 @@ enum CornerRefineMethod{
* of the two markers (default 0.05).
* - cornerRefinementMethod: corner refinement method. (CORNER_REFINE_NONE, no refinement.
* CORNER_REFINE_SUBPIX, do subpixel refinement. CORNER_REFINE_CONTOUR use contour-Points,
* CORNER_REFINE_APRILTAG use the AprilTag2 approach)
* CORNER_REFINE_APRILTAG use the AprilTag2 approach). (default CORNER_REFINE_NONE)
* - cornerRefinementWinSize: window size for the corner refinement process (in pixels) (default 5).
* - cornerRefinementMaxIterations: maximum number of iterations for stop criteria of the corner
* refinement process (default 30).
@ -117,7 +117,7 @@ enum CornerRefineMethod{
* process (default: 0.1)
* - markerBorderBits: number of bits of the marker border, i.e. marker border width (default 1).
* - perspectiveRemovePixelPerCell: number of bits (per dimension) for each cell of the marker
* when removing the perspective (default 8).
* when removing the perspective (default 4).
* - perspectiveRemoveIgnoredMarginPerCell: width of the margin of pixels on each cell not
* considered for the determination of the cell bit. Represents the rate respect to the total
* size of the cell, i.e. perspectiveRemovePixelPerCell (default 0.13)
@ -129,21 +129,21 @@ enum CornerRefineMethod{
* than 128 or not) (default 5.0)
* - errorCorrectionRate error correction rate respect to the maximun error correction capability
* for each dictionary. (default 0.6).
* - aprilTagMinClusterPixels: reject quads containing too few pixels.
* - aprilTagMaxNmaxima: how many corner candidates to consider when segmenting a group of pixels into a quad.
* - aprilTagMinClusterPixels: reject quads containing too few pixels. (default 5)
* - aprilTagMaxNmaxima: how many corner candidates to consider when segmenting a group of pixels into a quad. (default 10)
* - aprilTagCriticalRad: Reject quads where pairs of edges have angles that are close to straight or close to
* 180 degrees. Zero means that no quads are rejected. (In radians).
* 180 degrees. Zero means that no quads are rejected. (In radians) (default 10*PI/180)
* - aprilTagMaxLineFitMse: When fitting lines to the contours, what is the maximum mean squared error
* allowed? This is useful in rejecting contours that are far from being quad shaped; rejecting
* these quads "early" saves expensive decoding processing.
* these quads "early" saves expensive decoding processing. (default 10.0)
* - aprilTagMinWhiteBlackDiff: When we build our model of black & white pixels, we add an extra check that
* the white model must be (overall) brighter than the black model. How much brighter? (in pixel values, [0,255]).
* - aprilTagDeglitch: should the thresholded image be deglitched? Only useful for very noisy images
* the white model must be (overall) brighter than the black model. How much brighter? (in pixel values, [0,255]). (default 5)
* - aprilTagDeglitch: should the thresholded image be deglitched? Only useful for very noisy images. (default 0)
* - aprilTagQuadDecimate: Detection of quads can be done on a lower-resolution image, improving speed at a
* cost of pose accuracy and a slight decrease in detection rate. Decoding the binary payload is still
* done at full resolution.
* done at full resolution. (default 0.0)
* - aprilTagQuadSigma: What Gaussian blur should be applied to the segmented image (used for quad detection?)
* Parameter is the standard deviation in pixels. Very noisy images benefit from non-zero values (e.g. 0.8).
* Parameter is the standard deviation in pixels. Very noisy images benefit from non-zero values (e.g. 0.8). (default 0.0)
* - detectInvertedMarker: to check if there is a white marker. In order to generate a "white" marker just
* invert a normal marker by using a tilde, ~markerImage. (default false)
*/

@ -5,4 +5,4 @@ Extra 2D Features Framework
2. Non-free 2D feature algorithms
Extra 2D Features Framework containing experimental and non-free 2D feature detector/descriptor algorithms:
SURF, SIFT, BRIEF, Censure, Freak, LUCID, Daisy, Self-similar.
SURF, BRIEF, Censure, Freak, LUCID, Daisy, Self-similar.

@ -60,17 +60,6 @@
year = {2016}
}
@article{Lowe04,
title = {Distinctive image features from scale-invariant keypoints},
author = {Lowe, David G},
journal = {International journal of computer vision},
volume = {60},
number = {2},
pages = {91--110},
year = {2004},
publisher = {Kluwer Academic Publishers}
}
@article{Lowry2018LOGOSLG,
title = {LOGOS: Local Geometric Support for High-Outlier Spatial Verification},
author = {Stephanie Lowry and Henrik Andreasson},

@ -67,39 +67,6 @@ namespace cv
namespace xfeatures2d
{
/** @brief Class for extracting keypoints and computing descriptors using the Scale Invariant Feature Transform
(SIFT) algorithm by D. Lowe @cite Lowe04 .
*/
class CV_EXPORTS_W SIFT : public Feature2D
{
public:
/**
@param nfeatures The number of best features to retain. The features are ranked by their scores
(measured in SIFT algorithm as the local contrast)
@param nOctaveLayers The number of layers in each octave. 3 is the value used in D. Lowe paper. The
number of octaves is computed automatically from the image resolution.
@param contrastThreshold The contrast threshold used to filter out weak features in semi-uniform
(low-contrast) regions. The larger the threshold, the less features are produced by the detector.
@param edgeThreshold The threshold used to filter out edge-like features. Note that the its meaning
is different from the contrastThreshold, i.e. the larger the edgeThreshold, the less features are
filtered out (more features are retained).
@param sigma The sigma of the Gaussian applied to the input image at the octave \#0. If your image
is captured with a weak camera with soft lenses, you might want to reduce the number.
*/
CV_WRAP static Ptr<SIFT> create(int nfeatures = 0, int nOctaveLayers = 3,
double contrastThreshold = 0.04, double edgeThreshold = 10,
double sigma = 1.6);
};
typedef SIFT SiftFeatureDetector;
typedef SIFT SiftDescriptorExtractor;
//! @addtogroup xfeatures2d_experiment
//! @{

@ -0,0 +1,2 @@
// Compatibility
#include "shadow_sift.hpp"

@ -0,0 +1,20 @@
// Compatibility
// SIFT is moved to the main repository
namespace cv {
namespace xfeatures2d {
/** Use cv.SIFT_create() instead */
CV_WRAP static inline
Ptr<cv::SIFT> SIFT_create(int nfeatures = 0, int nOctaveLayers = 3,
double contrastThreshold = 0.04, double edgeThreshold = 10,
double sigma = 1.6)
{
CV_LOG_ONCE_WARNING(NULL, "DEPRECATED: cv.xfeatures2d.SIFT_create() is deprecated due SIFT tranfer to the main repository. "
"https://github.com/opencv/opencv/issues/16736"
);
return SIFT::create(nfeatures, nOctaveLayers, contrastThreshold, edgeThreshold, sigma);
}
}} // namespace

@ -0,0 +1,24 @@
#!/usr/bin/env python
# Python 2/3 compatibility
from __future__ import print_function
import os
import numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests
class sift_compatibility_test(NewOpenCVTests):
def test_create(self):
sift = cv.xfeatures2d.SIFT_create()
self.assertFalse(sift is None)
img1 = np.zeros((100, 100, 3), dtype=np.uint8)
kp1_, des1_ = sift.detectAndCompute(img1, None)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

@ -1,72 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "perf_precomp.hpp"
namespace opencv_test { namespace {
typedef perf::TestBaseWithParam<std::string> sift;
#define SIFT_IMAGES \
"cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\
"stitching/a3.png"
PERF_TEST_P(sift, detect, testing::Values(SIFT_IMAGES))
{
string filename = getDataPath(GetParam());
Mat frame = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
Mat mask;
declare.in(frame).time(90);
Ptr<SIFT> detector = SIFT::create();
vector<KeyPoint> points;
PERF_SAMPLE_BEGIN();
detector->detect(frame, points, mask);
PERF_SAMPLE_END();
SANITY_CHECK_NOTHING();
}
PERF_TEST_P(sift, extract, testing::Values(SIFT_IMAGES))
{
string filename = getDataPath(GetParam());
Mat frame = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
Mat mask;
declare.in(frame).time(90);
Ptr<SIFT> detector = SIFT::create();
vector<KeyPoint> points;
Mat descriptors;
detector->detect(frame, points, mask);
PERF_SAMPLE_BEGIN();
detector->compute(frame, points, descriptors);
PERF_SAMPLE_END();
SANITY_CHECK_NOTHING();
}
PERF_TEST_P(sift, full, testing::Values(SIFT_IMAGES))
{
string filename = getDataPath(GetParam());
Mat frame = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
Mat mask;
declare.in(frame).time(90);
Ptr<SIFT> detector = SIFT::create();
vector<KeyPoint> points;
Mat descriptors;
PERF_SAMPLE_BEGIN();
detector->detectAndCompute(frame, mask, points, descriptors, false);
PERF_SAMPLE_END();
SANITY_CHECK_NOTHING();
}
}} // namespace

@ -61,6 +61,4 @@
#include "opencv2/core/private.hpp"
#define USE_AVX2 (cv::checkHardwareSupport(CV_CPU_AVX2))
#endif

File diff suppressed because it is too large Load Diff

@ -53,12 +53,6 @@ const string IMAGE_FILENAME = "tsukuba.png";
namespace opencv_test { namespace {
TEST( Features2d_Detector_SIFT, regression )
{
CV_FeatureDetectorTest test( "detector-sift", SIFT::create() );
test.safe_run();
}
#ifdef OPENCV_ENABLE_NONFREE
TEST( Features2d_Detector_SURF, regression )
{
@ -94,12 +88,6 @@ TEST( Features2d_Detector_Harris_Laplace_Affine, regression )
/*
* Descriptors
*/
TEST( Features2d_DescriptorExtractor_SIFT, regression )
{
CV_DescriptorExtractorTest<L1<float> > test( "descriptor-sift", 1.0f,
SIFT::create() );
test.safe_run();
}
#ifdef OPENCV_ENABLE_NONFREE
TEST( Features2d_DescriptorExtractor_SURF, regression )
@ -454,34 +442,6 @@ TEST(DISABLED_Features2d_SURF_using_mask, regression)
FeatureDetectorUsingMaskTest test(SURF::create());
test.safe_run();
}
TEST( XFeatures2d_DescriptorExtractor, batch )
{
string path = string(cvtest::TS::ptr()->get_data_path() + "detectors_descriptors_evaluation/images_datasets/graf");
vector<Mat> imgs, descriptors;
vector<vector<KeyPoint> > keypoints;
int i, n = 6;
Ptr<SIFT> sift = SIFT::create();
for( i = 0; i < n; i++ )
{
string imgname = format("%s/img%d.png", path.c_str(), i+1);
Mat img = imread(imgname, 0);
imgs.push_back(img);
}
sift->detect(imgs, keypoints);
sift->compute(imgs, keypoints, descriptors);
ASSERT_EQ((int)keypoints.size(), n);
ASSERT_EQ((int)descriptors.size(), n);
for( i = 0; i < n; i++ )
{
EXPECT_GT((int)keypoints[i].size(), 100);
EXPECT_GT(descriptors[i].rows, 100);
}
}
#endif // NONFREE
}} // namespace

@ -121,12 +121,6 @@ TEST(Features2d_Detector_Keypoints_SURF, validation)
CV_FeatureDetectorKeypointsTest test(xfeatures2d::SURF::create());
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_SIFT, validation)
{
CV_FeatureDetectorKeypointsTest test(xfeatures2d::SIFT::create());
test.safe_run();
}
#endif // NONFREE

@ -20,24 +20,16 @@ INSTANTIATE_TEST_CASE_P(SURF, DetectorRotationInvariance, Values(
make_tuple(IMAGE_TSUKUBA, SURF::create(), 0.40f, 0.76f)
));
INSTANTIATE_TEST_CASE_P(SIFT, DetectorRotationInvariance, Values(
make_tuple(IMAGE_TSUKUBA, SIFT::create(), 0.45f, 0.70f)
));
INSTANTIATE_TEST_CASE_P(SURF, DescriptorRotationInvariance, Values(
make_tuple(IMAGE_TSUKUBA, SURF::create(), SURF::create(), 0.83f)
));
INSTANTIATE_TEST_CASE_P(SIFT, DescriptorRotationInvariance, Values(
make_tuple(IMAGE_TSUKUBA, SIFT::create(), SIFT::create(), 0.98f)
));
#endif // NONFREE
INSTANTIATE_TEST_CASE_P(LATCH, DescriptorRotationInvariance, Values(
make_tuple(IMAGE_TSUKUBA, SIFT::create(), LATCH::create(), 0.98f)
));
#endif // NONFREE
INSTANTIATE_TEST_CASE_P(DAISY, DescriptorRotationInvariance, Values(
make_tuple(IMAGE_TSUKUBA,
BRISK::create(),
@ -159,16 +151,9 @@ INSTANTIATE_TEST_CASE_P(SURF, DetectorScaleInvariance, Values(
make_tuple(IMAGE_BIKES, SURF::create(), 0.64f, 0.84f)
));
INSTANTIATE_TEST_CASE_P(SIFT, DetectorScaleInvariance, Values(
make_tuple(IMAGE_BIKES, SIFT::create(), 0.55f, 0.99f)
));
INSTANTIATE_TEST_CASE_P(SURF, DescriptorScaleInvariance, Values(
make_tuple(IMAGE_BIKES, SURF::create(), SURF::create(), 0.7f)
));
INSTANTIATE_TEST_CASE_P(SIFT, DescriptorScaleInvariance, Values(
make_tuple(IMAGE_BIKES, SIFT::create(), SIFT::create(), 0.3f)
));
#endif // NONFREE

Loading…
Cancel
Save