Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/19916/head
Alexander Alekhin 4 years ago
commit cfb77091ca
  1. 4
      cmake/OpenCVModule.cmake
  2. 11
      doc/tools/html_functions.py
  3. 26
      modules/core/include/opencv2/core/types.hpp
  4. 2
      modules/core/src/precomp.hpp
  5. 13
      modules/core/src/system.cpp
  6. 45
      modules/features2d/include/opencv2/features2d.hpp
  7. 57
      modules/imgproc/include/opencv2/imgproc.hpp
  8. 1
      modules/ml/src/boost.cpp
  9. 1
      modules/ml/src/rtrees.cpp
  10. 46
      modules/ml/src/tree.cpp
  11. 19
      modules/ml/test/test_rtrees.cpp
  12. 2
      modules/python/package/cv2/__init__.py

@ -880,7 +880,9 @@ macro(_ocv_create_module)
ocv_compiler_optimization_process_sources(OPENCV_MODULE_${the_module}_SOURCES OPENCV_MODULE_${the_module}_DEPS_EXT ${the_module})
set(__module_headers ${OPENCV_MODULE_${the_module}_HEADERS})
list(SORT __module_headers) # fix headers order, useful for bindings
if(__module_headers)
list(SORT __module_headers) # fix headers order, useful for bindings
endif()
set(OPENCV_MODULE_${the_module}_HEADERS ${__module_headers} CACHE INTERNAL "List of header files for ${the_module}")
set(OPENCV_MODULE_${the_module}_SOURCES ${OPENCV_MODULE_${the_module}_SOURCES} CACHE INTERNAL "List of source files for ${the_module}")

@ -107,17 +107,10 @@ def add_signature_to_table(soup, table, signature, language, type):
""" Add a signature to an html table"""
row = soup.new_tag('tr')
row.append(soup.new_tag('td', style='width: 20px;'))
if 'ret' in signature:
row.append(append(soup.new_tag('td'), signature['ret']))
row.append(append(soup.new_tag('td'), '='))
else:
row.append(soup.new_tag('td')) # return values
row.append(soup.new_tag('td')) # '='
row.append(append(soup.new_tag('td'), signature['name'] + '('))
row.append(append(soup.new_tag('td', **{'class': 'paramname'}), signature['arg']))
row.append(append(soup.new_tag('td'), ')'))
row.append(append(soup.new_tag('td'), ') -> '))
row.append(append(soup.new_tag('td'), signature['ret']))
table.append(row)

@ -714,24 +714,24 @@ public:
//! the default constructor
CV_WRAP KeyPoint();
/**
@param _pt x & y coordinates of the keypoint
@param _size keypoint diameter
@param _angle keypoint orientation
@param _response keypoint detector response on the keypoint (that is, strength of the keypoint)
@param _octave pyramid octave in which the keypoint has been detected
@param _class_id object id
@param pt x & y coordinates of the keypoint
@param size keypoint diameter
@param angle keypoint orientation
@param response keypoint detector response on the keypoint (that is, strength of the keypoint)
@param octave pyramid octave in which the keypoint has been detected
@param class_id object id
*/
KeyPoint(Point2f _pt, float _size, float _angle=-1, float _response=0, int _octave=0, int _class_id=-1);
KeyPoint(Point2f pt, float size, float angle=-1, float response=0, int octave=0, int class_id=-1);
/**
@param x x-coordinate of the keypoint
@param y y-coordinate of the keypoint
@param _size keypoint diameter
@param _angle keypoint orientation
@param _response keypoint detector response on the keypoint (that is, strength of the keypoint)
@param _octave pyramid octave in which the keypoint has been detected
@param _class_id object id
@param size keypoint diameter
@param angle keypoint orientation
@param response keypoint detector response on the keypoint (that is, strength of the keypoint)
@param octave pyramid octave in which the keypoint has been detected
@param class_id object id
*/
CV_WRAP KeyPoint(float x, float y, float _size, float _angle=-1, float _response=0, int _octave=0, int _class_id=-1);
CV_WRAP KeyPoint(float x, float y, float size, float angle=-1, float response=0, int octave=0, int class_id=-1);
size_t hash() const;

@ -375,6 +375,8 @@ cv::Mutex& getInitializationMutex();
#define CV_SINGLETON_LAZY_INIT(TYPE, INITIALIZER) CV_SINGLETON_LAZY_INIT_(TYPE, INITIALIZER, instance)
#define CV_SINGLETON_LAZY_INIT_REF(TYPE, INITIALIZER) CV_SINGLETON_LAZY_INIT_(TYPE, INITIALIZER, *instance)
CV_EXPORTS void releaseTlsStorageThread();
int cv_snprintf(char* buf, int len, const char* fmt, ...);
int cv_vsnprintf(char* buf, int len, const char* fmt, va_list args);
}

@ -1520,6 +1520,9 @@ struct ThreadData
size_t idx; // Thread index in TLS storage. This is not OS thread ID!
};
static bool g_isTlsStorageInitialized = false;
// Main TLS storage class
class TlsStorage
{
@ -1529,6 +1532,7 @@ public:
{
tlsSlots.reserve(32);
threads.reserve(32);
g_isTlsStorageInitialized = true;
}
~TlsStorage()
{
@ -1747,6 +1751,13 @@ static void WINAPI opencv_fls_destructor(void* pData)
} // namespace details
using namespace details;
void releaseTlsStorageThread()
{
if (!g_isTlsStorageInitialized)
return; // nothing to release, so prefer to avoid creation of new global structures
getTlsStorage().releaseThread();
}
TLSDataContainer::TLSDataContainer()
{
key_ = (int)getTlsStorage().reserveSlot(this); // Reserve key from TLS storage
@ -1830,7 +1841,7 @@ BOOL WINAPI DllMain(HINSTANCE, DWORD fdwReason, LPVOID lpReserved)
{
// Not allowed to free resources if lpReserved is non-null
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms682583.aspx
cv::getTlsStorage().releaseThread();
releaseTlsStorageThread();
}
}
return TRUE;

@ -61,25 +61,11 @@ easily switch between different algorithms solving the same problem. This sectio
matching descriptors that are represented as vectors in a multidimensional space. All objects that
implement vector descriptor matchers inherit the DescriptorMatcher interface.
@note
- An example explaining keypoint matching can be found at
opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp
- An example on descriptor matching evaluation can be found at
opencv_source_code/samples/cpp/detector_descriptor_matcher_evaluation.cpp
- An example on one to many image matching can be found at
opencv_source_code/samples/cpp/matching_to_many_images.cpp
@defgroup features2d_draw Drawing Function of Keypoints and Matches
@defgroup features2d_category Object Categorization
This section describes approaches based on local 2D features and used to categorize objects.
@note
- A complete Bag-Of-Words sample can be found at
opencv_source_code/samples/cpp/bagofwords_classification.cpp
- (Python) An example using the features2D framework to perform object categorization can be
found at opencv_source_code/samples/python/find_obj.py
@defgroup feature2d_hal Hardware Acceleration Layer
@{
@defgroup features2d_hal_interface Interface
@ -90,7 +76,7 @@ This section describes approaches based on local 2D features and used to categor
namespace cv
{
//! @addtogroup features2d
//! @addtogroup features2d_main
//! @{
// //! writes vector of keypoints to the file storage
@ -241,9 +227,6 @@ the vector descriptor extractors inherit the DescriptorExtractor interface.
*/
typedef Feature2D DescriptorExtractor;
//! @addtogroup features2d_main
//! @{
/** @brief Class for implementing the wrapper which makes detectors and extractors to be affine invariant,
described as ASIFT in @cite YM11 .
@ -490,20 +473,20 @@ class CV_EXPORTS_W MSER : public Feature2D
public:
/** @brief Full constructor for %MSER detector
@param _delta it compares \f$(size_{i}-size_{i-delta})/size_{i-delta}\f$
@param _min_area prune the area which smaller than minArea
@param _max_area prune the area which bigger than maxArea
@param _max_variation prune the area have similar size to its children
@param _min_diversity for color image, trace back to cut off mser with diversity less than min_diversity
@param _max_evolution for color image, the evolution steps
@param _area_threshold for color image, the area threshold to cause re-initialize
@param _min_margin for color image, ignore too small margin
@param _edge_blur_size for color image, the aperture size for edge blur
@param delta it compares \f$(size_{i}-size_{i-delta})/size_{i-delta}\f$
@param min_area prune the area which smaller than minArea
@param max_area prune the area which bigger than maxArea
@param max_variation prune the area have similar size to its children
@param min_diversity for color image, trace back to cut off mser with diversity less than min_diversity
@param max_evolution for color image, the evolution steps
@param area_threshold for color image, the area threshold to cause re-initialize
@param min_margin for color image, ignore too small margin
@param edge_blur_size for color image, the aperture size for edge blur
*/
CV_WRAP static Ptr<MSER> create( int _delta=5, int _min_area=60, int _max_area=14400,
double _max_variation=0.25, double _min_diversity=.2,
int _max_evolution=200, double _area_threshold=1.01,
double _min_margin=0.003, int _edge_blur_size=5 );
CV_WRAP static Ptr<MSER> create( int delta=5, int min_area=60, int max_area=14400,
double max_variation=0.25, double min_diversity=.2,
int max_evolution=200, double area_threshold=1.01,
double min_margin=0.003, int edge_blur_size=5 );
/** @brief Detect %MSER regions

@ -1220,7 +1220,7 @@ protected:
struct CV_EXPORTS Vertex
{
Vertex();
Vertex(Point2f pt, bool _isvirtual, int _firstEdge=0);
Vertex(Point2f pt, bool isvirtual, int firstEdge=0);
bool isvirtual() const;
bool isfree() const;
@ -1276,9 +1276,9 @@ public:
![image](pics/building_lsd.png)
@param _image A grayscale (CV_8UC1) input image. If only a roi needs to be selected, use:
@param image A grayscale (CV_8UC1) input image. If only a roi needs to be selected, use:
`lsd_ptr-\>detect(image(roi), lines, ...); lines += Scalar(roi.x, roi.y, roi.x, roi.y);`
@param _lines A vector of Vec4i or Vec4f elements specifying the beginning and ending point of a line. Where
@param lines A vector of Vec4i or Vec4f elements specifying the beginning and ending point of a line. Where
Vec4i/Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly
oriented depending on the gradient.
@param width Vector of widths of the regions, where the lines are found. E.g. Width of line.
@ -1290,26 +1290,26 @@ public:
- 1 corresponds to 0.1 mean false alarms
This vector will be calculated only when the objects type is #LSD_REFINE_ADV.
*/
CV_WRAP virtual void detect(InputArray _image, OutputArray _lines,
CV_WRAP virtual void detect(InputArray image, OutputArray lines,
OutputArray width = noArray(), OutputArray prec = noArray(),
OutputArray nfa = noArray()) = 0;
/** @brief Draws the line segments on a given image.
@param _image The image, where the lines will be drawn. Should be bigger or equal to the image,
@param image The image, where the lines will be drawn. Should be bigger or equal to the image,
where the lines were found.
@param lines A vector of the lines that needed to be drawn.
*/
CV_WRAP virtual void drawSegments(InputOutputArray _image, InputArray lines) = 0;
CV_WRAP virtual void drawSegments(InputOutputArray image, InputArray lines) = 0;
/** @brief Draws two groups of lines in blue and red, counting the non overlapping (mismatching) pixels.
@param size The size of the image, where lines1 and lines2 were found.
@param lines1 The first group of lines that needs to be drawn. It is visualized in blue color.
@param lines2 The second group of lines. They visualized in red color.
@param _image Optional image, where the lines will be drawn. The image should be color(3-channel)
@param image Optional image, where the lines will be drawn. The image should be color(3-channel)
in order for lines1 and lines2 to be drawn in the above mentioned colors.
*/
CV_WRAP virtual int compareSegments(const Size& size, InputArray lines1, InputArray lines2, InputOutputArray _image = noArray()) = 0;
CV_WRAP virtual int compareSegments(const Size& size, InputArray lines1, InputArray lines2, InputOutputArray image = noArray()) = 0;
virtual ~LineSegmentDetector() { }
};
@ -1319,22 +1319,21 @@ public:
The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
to edit those, as to tailor it for their own application.
@param _refine The way found lines will be refined, see #LineSegmentDetectorModes
@param _scale The scale of the image that will be used to find the lines. Range (0..1].
@param _sigma_scale Sigma for Gaussian filter. It is computed as sigma = _sigma_scale/_scale.
@param _quant Bound to the quantization error on the gradient norm.
@param _ang_th Gradient angle tolerance in degrees.
@param _log_eps Detection threshold: -log10(NFA) \> log_eps. Used only when advance refinement
is chosen.
@param _density_th Minimal density of aligned region points in the enclosing rectangle.
@param _n_bins Number of bins in pseudo-ordering of gradient modulus.
@param refine The way found lines will be refined, see #LineSegmentDetectorModes
@param scale The scale of the image that will be used to find the lines. Range (0..1].
@param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale.
@param quant Bound to the quantization error on the gradient norm.
@param ang_th Gradient angle tolerance in degrees.
@param log_eps Detection threshold: -log10(NFA) \> log_eps. Used only when advance refinement is chosen.
@param density_th Minimal density of aligned region points in the enclosing rectangle.
@param n_bins Number of bins in pseudo-ordering of gradient modulus.
@note Implementation has been removed due original code license conflict
*/
CV_EXPORTS_W Ptr<LineSegmentDetector> createLineSegmentDetector(
int _refine = LSD_REFINE_STD, double _scale = 0.8,
double _sigma_scale = 0.6, double _quant = 2.0, double _ang_th = 22.5,
double _log_eps = 0, double _density_th = 0.7, int _n_bins = 1024);
int refine = LSD_REFINE_STD, double scale = 0.8,
double sigma_scale = 0.6, double quant = 2.0, double ang_th = 22.5,
double log_eps = 0, double density_th = 0.7, int n_bins = 1024);
//! @} imgproc_feature
@ -1533,7 +1532,7 @@ The unnormalized square box filter can be useful in computing local image statis
variance and standard deviation around the neighborhood of a pixel.
@param src input image
@param dst output image of the same size and type as _src
@param dst output image of the same size and type as src
@param ddepth the output image depth (-1 to use src.depth())
@param ksize kernel size
@param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel
@ -2107,8 +2106,8 @@ CV_EXPORTS_W void HoughLinesP( InputArray image, OutputArray lines,
The function finds lines in a set of points using a modification of the Hough transform.
@include snippets/imgproc_HoughLinesPointSet.cpp
@param _point Input vector of points. Each vector must be encoded as a Point vector \f$(x,y)\f$. Type must be CV_32FC2 or CV_32SC2.
@param _lines Output vector of found lines. Each vector is encoded as a vector<Vec3d> \f$(votes, rho, theta)\f$.
@param point Input vector of points. Each vector must be encoded as a Point vector \f$(x,y)\f$. Type must be CV_32FC2 or CV_32SC2.
@param lines Output vector of found lines. Each vector is encoded as a vector<Vec3d> \f$(votes, rho, theta)\f$.
The larger the value of 'votes', the higher the reliability of the Hough line.
@param lines_max Max count of hough lines.
@param threshold Accumulator threshold parameter. Only those lines are returned that get enough
@ -2120,7 +2119,7 @@ votes ( \f$>\texttt{threshold}\f$ )
@param max_theta Maximum angle value of the accumulator in radians.
@param theta_step Angle resolution of the accumulator in radians.
*/
CV_EXPORTS_W void HoughLinesPointSet( InputArray _point, OutputArray _lines, int lines_max, int threshold,
CV_EXPORTS_W void HoughLinesPointSet( InputArray point, OutputArray lines, int lines_max, int threshold,
double min_rho, double max_rho, double rho_step,
double min_theta, double max_theta, double theta_step );
@ -4109,9 +4108,9 @@ Examples of how intersectConvexConvex works
/** @brief Finds intersection of two convex polygons
@param _p1 First polygon
@param _p2 Second polygon
@param _p12 Output polygon describing the intersecting area
@param p1 First polygon
@param p2 Second polygon
@param p12 Output polygon describing the intersecting area
@param handleNested When true, an intersection is found if one of the polygons is fully enclosed in the other.
When false, no intersection is found. If the polygons share a side or the vertex of one polygon lies on an edge
of the other, they are not considered nested and an intersection will be found regardless of the value of handleNested.
@ -4120,8 +4119,8 @@ of the other, they are not considered nested and an intersection will be found r
@note intersectConvexConvex doesn't confirm that both polygons are convex and will return invalid results if they aren't.
*/
CV_EXPORTS_W float intersectConvexConvex( InputArray _p1, InputArray _p2,
OutputArray _p12, bool handleNested = true );
CV_EXPORTS_W float intersectConvexConvex( InputArray p1, InputArray p2,
OutputArray p12, bool handleNested = true );
/** @example samples/cpp/fitellipse.cpp
An example using the fitEllipse technique

@ -490,6 +490,7 @@ public:
float predict( InputArray samples, OutputArray results, int flags ) const CV_OVERRIDE
{
CV_CheckEQ(samples.cols(), getVarCount(), "");
return impl.predict(samples, results, flags);
}

@ -480,6 +480,7 @@ public:
float predict( InputArray samples, OutputArray results, int flags ) const CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_CheckEQ(samples.cols(), getVarCount(), "");
return impl.predict(samples, results, flags);
}

@ -43,6 +43,8 @@
#include "precomp.hpp"
#include <ctype.h>
#include <opencv2/core/utils/logger.hpp>
namespace cv {
namespace ml {
@ -1694,11 +1696,14 @@ void DTreesImpl::write( FileStorage& fs ) const
void DTreesImpl::readParams( const FileNode& fn )
{
_isClassifier = (int)fn["is_classifier"] != 0;
/*int var_all = (int)fn["var_all"];
int var_count = (int)fn["var_count"];
int cat_var_count = (int)fn["cat_var_count"];
int varAll = (int)fn["var_all"];
int varCount = (int)fn["var_count"];
/*int cat_var_count = (int)fn["cat_var_count"];
int ord_var_count = (int)fn["ord_var_count"];*/
if (varAll <= 0)
CV_Error(Error::StsParseError, "The field \"var_all\" of DTree classifier is missing or non-positive");
FileNode tparams_node = fn["training_params"];
TreeParams params0 = TreeParams();
@ -1723,11 +1728,38 @@ void DTreesImpl::readParams( const FileNode& fn )
readVectorOrMat(fn["var_idx"], varIdx);
fn["var_type"] >> varType;
int format = 0;
fn["format"] >> format;
bool isLegacy = format < 3;
bool isLegacy = false;
if (fn["format"].empty()) // Export bug until OpenCV 3.2: https://github.com/opencv/opencv/pull/6314
{
if (!fn["cat_ofs"].empty())
isLegacy = false; // 2.4 doesn't store "cat_ofs"
else if (!fn["missing_subst"].empty())
isLegacy = false; // 2.4 doesn't store "missing_subst"
else if (!fn["class_labels"].empty())
isLegacy = false; // 2.4 doesn't store "class_labels"
else if ((int)varType.size() != varAll)
isLegacy = true; // 3.0+: https://github.com/opencv/opencv/blame/3.0.0/modules/ml/src/tree.cpp#L1576
else if (/*(int)varType.size() == varAll &&*/ varCount == varAll)
isLegacy = true;
else
{
// 3.0+:
// - https://github.com/opencv/opencv/blame/3.0.0/modules/ml/src/tree.cpp#L1552-L1553
// - https://github.com/opencv/opencv/blame/3.0.0/modules/ml/src/precomp.hpp#L296
isLegacy = !(varCount + 1 == varAll);
}
CV_LOG_INFO(NULL, "ML/DTrees: possible missing 'format' field due to bug of OpenCV export implementation. "
"Details: https://github.com/opencv/opencv/issues/5412. Consider re-exporting of saved ML model. "
"isLegacy = " << isLegacy);
}
else
{
int format = 0;
fn["format"] >> format;
CV_CheckGT(format, 0, "");
isLegacy = format < 3;
}
int varAll = (int)fn["var_all"];
if (isLegacy && (int)varType.size() <= varAll)
{
std::vector<uchar> extendedTypes(varAll + 1, 0);

@ -95,6 +95,25 @@ TEST(ML_RTrees, 11142_sample_weights_classification)
EXPECT_GE(error_with_weights, error_without_weights);
}
TEST(ML_RTrees, bug_12974_throw_exception_when_predict_different_feature_count)
{
int numFeatures = 5;
// create a 5 feature dataset and train the model
cv::Ptr<RTrees> model = RTrees::create();
Mat samples(10, numFeatures, CV_32F);
randu(samples, 0, 10);
Mat labels = (Mat_<int>(10,1) << 0,0,0,0,0,1,1,1,1,1);
cv::Ptr<TrainData> trainData = TrainData::create(samples, cv::ml::ROW_SAMPLE, labels);
model->train(trainData);
// try to predict on data which have fewer features - this should throw an exception
for(int i = 1; i < numFeatures - 1; ++i) {
Mat test(1, i, CV_32FC1);
ASSERT_THROW(model->predict(test), Exception);
}
// try to predict on data which have more features - this should also throw an exception
Mat test(1, numFeatures + 1, CV_32FC1);
ASSERT_THROW(model->predict(test), Exception);
}
}} // namespace

@ -34,7 +34,7 @@ def bootstrap():
import platform
if DEBUG: print('OpenCV loader: os.name="{}" platform.system()="{}"'.format(os.name, str(platform.system())))
LOADER_DIR=os.path.dirname(os.path.abspath(__file__))
LOADER_DIR = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
PYTHON_EXTENSIONS_PATHS = []
BINARIES_PATHS = []

Loading…
Cancel
Save