Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/17273/head
Alexander Alekhin 5 years ago
commit 06bff34a6b
  1. 2
      doc/js_tutorials/js_setup/js_setup/js_setup.markdown
  2. 4
      doc/js_tutorials/js_setup/js_usage/js_usage.markdown
  3. 4
      doc/py_tutorials/py_setup/py_intro/py_intro.markdown
  4. 8
      doc/tutorials/calib3d/table_of_content_calib3d.markdown
  5. 8
      doc/tutorials/core/table_of_content_core.markdown
  6. 14
      doc/tutorials/dnn/table_of_content_dnn.markdown
  7. 6
      doc/tutorials/features2d/table_of_content_features2d.markdown
  8. 4
      doc/tutorials/gpu/table_of_content_gpu.markdown
  9. 2
      doc/tutorials/imgcodecs/table_of_content_highgui.markdown
  10. 4
      doc/tutorials/imgproc/table_of_content_imgproc.markdown
  11. 2
      doc/tutorials/introduction/documenting_opencv/documentation_tutorial.markdown
  12. 4
      doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.markdown
  13. 6
      doc/tutorials/ios/table_of_content_ios.markdown
  14. 2
      doc/tutorials/stitching/table_of_content_stitching.markdown
  15. 8
      doc/tutorials/videoio/table_of_content_videoio.markdown
  16. 28
      modules/calib3d/misc/java/test/Calib3dTest.java
  17. 45
      modules/calib3d/test/test_fisheye.cpp
  18. 2
      modules/core/src/matrix_expressions.cpp
  19. 84
      modules/dnn/src/onnx/onnx_graph_simplifier.cpp
  20. 19
      modules/dnn/src/onnx/onnx_importer.cpp
  21. 10
      modules/dnn/src/tensorflow/tf_importer.cpp
  22. 7
      modules/dnn/test/test_onnx_importer.cpp
  23. 15
      modules/dnn/test/test_tf_importer.cpp
  24. 3
      modules/features2d/CMakeLists.txt
  25. 540
      modules/features2d/src/sift.dispatch.cpp
  26. 495
      modules/features2d/src/sift.simd.hpp
  27. BIN
      modules/imgproc/doc/pics/colorscale_deepgreen.jpg
  28. 3
      modules/imgproc/include/opencv2/imgproc.hpp
  29. 23
      modules/imgproc/src/colormap.cpp
  30. 1
      modules/videoio/src/cap_dshow.cpp
  31. 43
      modules/videoio/src/cap_mfx_common.cpp
  32. 32
      modules/videoio/src/cap_mfx_common.hpp
  33. 2
      modules/videoio/src/cap_mfx_reader.cpp
  34. 18
      modules/videoio/src/cap_mfx_writer.cpp

@ -1,6 +1,8 @@
Build OpenCV.js {#tutorial_js_setup}
===============================
@note
You don't have to build your own copy if you simply want to start using it. Refer the Using Opencv.js tutorial for steps on getting a prebuilt copy from our releases or online documentation.
Installing Emscripten
-----------------------------

@ -4,7 +4,7 @@ Using OpenCV.js {#tutorial_js_usage}
Steps
-----
In this tutorial, you will learn how to include and start to use `opencv.js` inside a web page.
In this tutorial, you will learn how to include and start to use `opencv.js` inside a web page. You can get a copy of `opencv.js` from `opencv-{VERSION_NUMBER}-docs.zip` in each [release](https://github.com/opencv/opencv/releases), or simply download the prebuilt script from the online documentations at "https://docs.opencv.org/{VERISON_NUMBER}/opencv.js" (For example, [https://docs.opencv.org/3.4.0/opencv.js](https://docs.opencv.org/3.4.0/opencv.js). Use `master` if you want the latest build). You can also build your own copy by following the tutorial on Build Opencv.js.
### Create a web page
@ -44,7 +44,7 @@ To run this web page, copy the content above and save to a local index.html file
Set the URL of `opencv.js` to `src` attribute of \<script\> tag.
@note For this tutorial, we host `opencv.js` at same folder as index.html.
@note For this tutorial, we host `opencv.js` at same folder as index.html. You can also choose to use the URL of the prebuilt `opencv.js` in our online documentation.
Example for synchronous loading:
@code{.js}

@ -80,7 +80,7 @@ Additional Resources
--------------------
-# A Quick guide to Python - [A Byte of Python](http://swaroopch.com/notes/python/)
2. [Basic Numpy Tutorials](http://wiki.scipy.org/Tentative_NumPy_Tutorial)
3. [Numpy Examples List](http://wiki.scipy.org/Numpy_Example_List)
2. [NumPy Quickstart tutorial](https://numpy.org/devdocs/user/quickstart.html)
3. [NumPy Reference](https://numpy.org/devdocs/reference/index.html#reference)
4. [OpenCV Documentation](http://docs.opencv.org/)
5. [OpenCV Forum](http://answers.opencv.org/questions/)

@ -5,6 +5,8 @@ Although we get most of our images in a 2D format they do come from a 3D world.
- @subpage tutorial_camera_calibration_pattern
*Languages:* Python
*Compatibility:* \> OpenCV 2.0
*Author:* Laurent Berger
@ -13,6 +15,8 @@ Although we get most of our images in a 2D format they do come from a 3D world.
- @subpage tutorial_camera_calibration_square_chess
*Languages:* C++
*Compatibility:* \> OpenCV 2.0
*Author:* Victor Eruhimov
@ -21,6 +25,8 @@ Although we get most of our images in a 2D format they do come from a 3D world.
- @subpage tutorial_camera_calibration
*Languages:* C++
*Compatibility:* \> OpenCV 4.0
*Author:* Bernát Gábor
@ -31,6 +37,8 @@ Although we get most of our images in a 2D format they do come from a 3D world.
- @subpage tutorial_real_time_pose
*Languages:* C++
*Compatibility:* \> OpenCV 2.0
*Author:* Edgar Riba

@ -6,6 +6,8 @@ understanding how to manipulate the images on a pixel level.
- @subpage tutorial_mat_the_basic_image_container
*Languages:* C++
*Compatibility:* \> OpenCV 2.0
*Author:* Bernát Gábor
@ -15,6 +17,8 @@ understanding how to manipulate the images on a pixel level.
- @subpage tutorial_how_to_scan_images
*Languages:* C++
*Compatibility:* \> OpenCV 2.0
*Author:* Bernát Gábor
@ -75,6 +79,8 @@ understanding how to manipulate the images on a pixel level.
- @subpage tutorial_file_input_output_with_xml_yml
*Languages:* C++, Python
*Compatibility:* \> OpenCV 2.0
*Author:* Bernát Gábor
@ -84,6 +90,8 @@ understanding how to manipulate the images on a pixel level.
- @subpage tutorial_how_to_use_OpenCV_parallel_for_
*Languages:* C++
*Compatibility:* \>= OpenCV 2.4.3
You will see how to use the OpenCV parallel_for_ to easily parallelize your code.

@ -3,6 +3,8 @@ Deep Neural Networks (dnn module) {#tutorial_table_of_content_dnn}
- @subpage tutorial_dnn_googlenet
*Languages:* C++
*Compatibility:* \> OpenCV 3.3
*Author:* Vitaliy Lyudvichenko
@ -11,6 +13,8 @@ Deep Neural Networks (dnn module) {#tutorial_table_of_content_dnn}
- @subpage tutorial_dnn_halide
*Languages:* Halide
*Compatibility:* \> OpenCV 3.3
*Author:* Dmitry Kurtaev
@ -19,6 +23,8 @@ Deep Neural Networks (dnn module) {#tutorial_table_of_content_dnn}
- @subpage tutorial_dnn_halide_scheduling
*Languages:* Halide
*Compatibility:* \> OpenCV 3.3
*Author:* Dmitry Kurtaev
@ -27,6 +33,8 @@ Deep Neural Networks (dnn module) {#tutorial_table_of_content_dnn}
- @subpage tutorial_dnn_android
*Languages:* Java
*Compatibility:* \> OpenCV 3.3
*Author:* Dmitry Kurtaev
@ -35,6 +43,8 @@ Deep Neural Networks (dnn module) {#tutorial_table_of_content_dnn}
- @subpage tutorial_dnn_yolo
*Languages:* C++, Python
*Compatibility:* \> OpenCV 3.3.1
*Author:* Alessandro de Oliveira Faria
@ -43,6 +53,8 @@ Deep Neural Networks (dnn module) {#tutorial_table_of_content_dnn}
- @subpage tutorial_dnn_javascript
*Languages:* JavaScript
*Compatibility:* \> OpenCV 3.3.1
*Author:* Dmitry Kurtaev
@ -51,6 +63,8 @@ Deep Neural Networks (dnn module) {#tutorial_table_of_content_dnn}
- @subpage tutorial_dnn_custom_layers
*Languages:* C++, Python
*Compatibility:* \> OpenCV 3.4.1
*Author:* Dmitry Kurtaev

@ -89,6 +89,8 @@ OpenCV.
- @subpage tutorial_detection_of_planar_objects
*Languages:* C++
*Compatibility:* \> OpenCV 2.0
*Author:* Victor Eruhimov
@ -108,6 +110,8 @@ OpenCV.
- @subpage tutorial_akaze_tracking
*Languages:* C++
*Compatibility:* \> OpenCV 3.0
*Author:* Fedor Morozov
@ -116,6 +120,8 @@ OpenCV.
- @subpage tutorial_homography
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 3.0
This tutorial will explain the basic concepts of the homography with some

@ -7,6 +7,8 @@ run the OpenCV algorithms.
- @subpage tutorial_gpu_basics_similarity
*Languages:* C++
*Compatibility:* \> OpenCV 2.0
*Author:* Bernát Gábor
@ -17,6 +19,8 @@ run the OpenCV algorithms.
- @subpage tutorial_gpu_thrust_interop
*Languages:* C++
*Compatibility:* \>= OpenCV 3.0
This tutorial will show you how to wrap a GpuMat into a thrust iterator in order to be able to

@ -5,6 +5,8 @@ This section contains tutorials about how to read/save your image files.
- @subpage tutorial_raster_io_gdal
*Languages:* C++
*Compatibility:* \> OpenCV 2.0
*Author:* Marvin Smith

@ -15,6 +15,8 @@ In this section you will learn about the image processing (manipulation) functio
- @subpage tutorial_random_generator_and_text
*Languages:* C++
*Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán
@ -333,7 +335,7 @@ In this section you will learn about the image processing (manipulation) functio
- @subpage tutorial_anisotropic_image_segmentation_by_a_gst
*Languages:* C++
*Languages:* C++, Python
*Compatibility:* \> OpenCV 2.0

@ -690,6 +690,6 @@ References {#tutorial_documentation_refs}
[Documenting basics]: http://www.doxygen.nl/manual/docblocks.html
[Markdown support]: http://www.doxygen.nl/manual/markdown.html
[Formulas support]: http://www.doxygen.nl/manual/formulas.html
[Supported formula commands]: http://docs.mathjax.org/en/latest/tex.html#supported-latex-commands
[Supported formula commands]: http://docs.mathjax.org/en/latest/input/tex/macros/index.html
[Command reference]: http://www.doxygen.nl/manual/commands.html
[Google Scholar]: http://scholar.google.ru/

@ -18,8 +18,8 @@ This tutorial assumes that you have the following available:
Installation
------------
[Download](http://go.microsoft.com/fwlink/?LinkId=285460) the Image Watch installer. The installer
comes in a single file with extension .vsix (*Visual Studio Extension*). To launch it, simply
Download the Image Watch installer. ([Visual Studio 2019](https://marketplace.visualstudio.com/items?itemName=VisualCPPTeam.ImageWatch2019) | [Visual Studio 2017](https://marketplace.visualstudio.com/items?itemName=VisualCPPTeam.ImageWatch2017) | [Visual Studio 2012, 2013, 2015](https://marketplace.visualstudio.com/items?itemName=VisualCPPTeam.ImageWatch))
The installer comes in a single file with extension .vsix (*Visual Studio Extension*). To launch it, simply
double-click on the .vsix file in Windows Explorer. When the installer has finished, make sure to
restart Visual Studio to complete the installation.

@ -3,6 +3,8 @@ OpenCV iOS {#tutorial_table_of_content_ios}
- @subpage tutorial_hello
*Languages:* Objective-C++
*Compatibility:* \> OpenCV 2.4.3
*Author:* Charu Hans
@ -11,6 +13,8 @@ OpenCV iOS {#tutorial_table_of_content_ios}
- @subpage tutorial_image_manipulation
*Languages:* Objective-C++
*Compatibility:* \> OpenCV 2.4.3
*Author:* Charu Hans
@ -19,6 +23,8 @@ OpenCV iOS {#tutorial_table_of_content_ios}
- @subpage tutorial_video_processing
*Languages:* Objective-C++
*Compatibility:* \> OpenCV 2.4.3
*Author:* Eduard Feicho

@ -7,6 +7,8 @@ create a photo panorama or you want to stitch scans.
- @subpage tutorial_stitcher
*Languages:* C++
*Compatibility:* \>= OpenCV 3.2
*Author:* Jiri Horner

@ -5,6 +5,8 @@ This section contains tutorials about how to read/save your video files.
- @subpage tutorial_video_input_psnr_ssim
*Languages:* C++, Python
*Compatibility:* \> OpenCV 2.0
*Author:* Bernát Gábor
@ -14,10 +16,16 @@ This section contains tutorials about how to read/save your video files.
- @subpage tutorial_video_write
*Languages:* C++
*Compatibility:* \> OpenCV 2.0
*Author:* Bernát Gábor
- @subpage tutorial_kinect_openni
*Languages:* C++
- @subpage tutorial_intelperc
*Languages:* C++

@ -818,4 +818,32 @@ public class Calib3dTest extends OpenCVTestCase {
assertTrue(src.toList().get(i).equals(dst.toList().get(i)));
}
}
public void testEstimateNewCameraMatrixForUndistortRectify() {
Mat K = new Mat().eye(3, 3, CvType.CV_64FC1);
Mat K_new = new Mat().eye(3, 3, CvType.CV_64FC1);
Mat K_new_truth = new Mat().eye(3, 3, CvType.CV_64FC1);
Mat D = new Mat().zeros(4, 1, CvType.CV_64FC1);
K.put(0,0,600.4447738238429);
K.put(1,1,578.9929805505851);
K.put(0,2,992.0642578801213);
K.put(1,2,549.2682624212172);
D.put(0,0,-0.05090103223466704);
D.put(1,0,0.030944413642173308);
D.put(2,0,-0.021509225493198905);
D.put(3,0,0.0043378096628297145);
K_new_truth.put(0,0, 387.4809086880343);
K_new_truth.put(0,2, 1036.669802754649);
K_new_truth.put(1,1, 373.6375700303157);
K_new_truth.put(1,2, 538.8373261247601);
Calib3d.fisheye_estimateNewCameraMatrixForUndistortRectify(K,D,new Size(1920,1080),
new Mat().eye(3, 3, CvType.CV_64F), K_new, 0.0, new Size(1920,1080));
assertMatEqual(K_new, K_new_truth, EPS);
}
}

@ -656,6 +656,51 @@ TEST_F(fisheyeTest, CalibrationWithDifferentPointsNumber)
cv::noArray(), cv::noArray(), flag, cv::TermCriteria(3, 20, 1e-6));
}
TEST_F(fisheyeTest, estimateNewCameraMatrixForUndistortRectify)
{
cv::Size size(1920, 1080);
cv::Mat K_fullhd(3, 3, cv::DataType<double>::type);
K_fullhd.at<double>(0, 0) = 600.44477382;
K_fullhd.at<double>(0, 1) = 0.0;
K_fullhd.at<double>(0, 2) = 992.06425788;
K_fullhd.at<double>(1, 0) = 0.0;
K_fullhd.at<double>(1, 1) = 578.99298055;
K_fullhd.at<double>(1, 2) = 549.26826242;
K_fullhd.at<double>(2, 0) = 0.0;
K_fullhd.at<double>(2, 1) = 0.0;
K_fullhd.at<double>(2, 2) = 1.0;
cv::Mat K_new_truth(3, 3, cv::DataType<double>::type);
K_new_truth.at<double>(0, 0) = 387.4809086880343;
K_new_truth.at<double>(0, 1) = 0.0;
K_new_truth.at<double>(0, 2) = 1036.669802754649;
K_new_truth.at<double>(1, 0) = 0.0;
K_new_truth.at<double>(1, 1) = 373.6375700303157;
K_new_truth.at<double>(1, 2) = 538.8373261247601;
K_new_truth.at<double>(2, 0) = 0.0;
K_new_truth.at<double>(2, 1) = 0.0;
K_new_truth.at<double>(2, 2) = 1.0;
cv::Mat D_fullhd(4, 1, cv::DataType<double>::type);
D_fullhd.at<double>(0, 0) = -0.05090103223466704;
D_fullhd.at<double>(1, 0) = 0.030944413642173308;
D_fullhd.at<double>(2, 0) = -0.021509225493198905;
D_fullhd.at<double>(3, 0) = 0.0043378096628297145;
cv::Mat E = cv::Mat::eye(3, 3, cv::DataType<double>::type);
cv::Mat K_new(3, 3, cv::DataType<double>::type);
cv::fisheye::estimateNewCameraMatrixForUndistortRectify(K_fullhd, D_fullhd, size, E, K_new, 0.0, size);
EXPECT_MAT_NEAR(K_new, K_new_truth, 1e-6);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// fisheyeTest::

@ -1329,7 +1329,7 @@ void MatOp_AddEx::assign(const MatExpr& e, Mat& m, int _type) const
}
else if( e.s.isReal() && (dst.data != m.data || fabs(e.alpha) != 1))
{
if (e.a.channels() > 1)
if (e.a.channels() > 1 && e.s[0] != 0.0)
CV_LOG_ONCE_WARNING(NULL, "OpenCV/MatExpr: processing of multi-channel arrays might be changed in the future: "
"https://github.com/opencv/opencv/issues/16739");
e.a.convertTo(m, _type, e.alpha, e.s[0]);

@ -61,27 +61,28 @@ public:
ONNXGraphWrapper(opencv_onnx::GraphProto& _net) : net(_net)
{
numInputs = net.input_size();
numInitializers = net.initializer_size();
}
virtual Ptr<ImportNodeWrapper> getNode(int idx) const CV_OVERRIDE
{
opencv_onnx::NodeProto* node = 0;
if (idx >= numInputs)
node = net.mutable_node(idx - numInputs);
if (idx >= numInputs + numInitializers)
node = net.mutable_node(idx - numInputs - numInitializers);
return makePtr<ONNXNodeWrapper>(node);
}
virtual int getNumNodes() const CV_OVERRIDE
{
return numInputs + net.node_size();
return numInputs + numInitializers + net.node_size();
}
virtual int getNumOutputs(int nodeId) const CV_OVERRIDE
{
if (nodeId < numInputs)
if (nodeId < numInputs + numInitializers)
return 1;
else
return net.node(nodeId - numInputs).output_size();
return net.node(nodeId - numInputs - numInitializers).output_size();
}
virtual std::string getOutputName(int nodeId, int outId) const CV_OVERRIDE
@ -89,18 +90,20 @@ public:
CV_Assert(outId < getNumOutputs(nodeId));
if (nodeId < numInputs)
return net.input(nodeId).name();
else if (nodeId < numInputs + numInitializers)
return net.initializer(nodeId - numInputs).name();
else
return net.node(nodeId - numInputs).output(outId);
return net.node(nodeId - numInputs - numInitializers).output(outId);
}
virtual void removeNode(int idx) CV_OVERRIDE
{
CV_Assert(idx >= numInputs);
net.mutable_node()->DeleteSubrange(idx - numInputs, 1);
CV_Assert(idx >= numInputs + numInitializers);
net.mutable_node()->DeleteSubrange(idx - numInputs - numInitializers, 1);
}
private:
int numInputs;
int numInputs, numInitializers;
opencv_onnx::GraphProto& net;
};
@ -382,33 +385,63 @@ public:
}
};
class BatchNormalizationSubgraph : public Subgraph
class BatchNormalizationSubgraphBase : public Subgraph
{
public:
BatchNormalizationSubgraph()
BatchNormalizationSubgraphBase()
{
int input = addNodeToMatch("");
int data1 = addNodeToMatch("Constant");
int data2 = addNodeToMatch("Constant");
int data3 = addNodeToMatch("Constant");
int data4 = addNodeToMatch("Constant");
int shape1 = addNodeToMatch("Constant");
int reshape1 = addNodeToMatch("Reshape", data1, shape1);
int shape2 = addNodeToMatch("Constant");
int reshape2 = addNodeToMatch("Reshape", data2, shape2);
input = addNodeToMatch("");
var = addNodeToMatch("");
mean = addNodeToMatch("");
weight = addNodeToMatch("");
bias = addNodeToMatch("");
A = addNodeToMatch("");
shape1 = addNodeToMatch("");
shape2 = addNodeToMatch("");
}
protected:
int input, var, mean, weight, bias, A, shape1, shape2;
};
class BatchNormalizationSubgraph1 : public BatchNormalizationSubgraphBase
{
public:
BatchNormalizationSubgraph1()
{
int reshape1 = addNodeToMatch("Reshape", weight, shape1);
int reshape2 = addNodeToMatch("Reshape", bias, shape2);
int shape3 = addNodeToMatch("Constant");
int reshape3 = addNodeToMatch("Reshape", data3, shape3);
int reshape3 = addNodeToMatch("Reshape", var, shape3);
int shape4 = addNodeToMatch("Constant");
int reshape4 = addNodeToMatch("Reshape", data4, shape4);
int reshape4 = addNodeToMatch("Reshape", mean, shape4);
int sqrtNode = addNodeToMatch("Sqrt", reshape3);
int A = addNodeToMatch("Constant");
int divNode = addNodeToMatch("Div", A, sqrtNode);
int mul1 = addNodeToMatch("Mul", reshape1, divNode);
int mul2 = addNodeToMatch("Mul", reshape4, mul1);
int sub = addNodeToMatch("Sub", reshape2, mul2);
int mul3 = addNodeToMatch("Mul", input, mul1);
addNodeToMatch("Add", mul3, sub);
setFusedNode("BatchNormalization", input, data1, data2, data4 ,data3);
setFusedNode("BatchNormalization", input, weight, bias, mean, var);
}
};
class BatchNormalizationSubgraph2 : public BatchNormalizationSubgraphBase
{
public:
BatchNormalizationSubgraph2()
{
int sqrtNode = addNodeToMatch("Sqrt", var);
int divNode = addNodeToMatch("Div", A, sqrtNode);
int mul1 = addNodeToMatch("Mul", weight, divNode);
int reshape2 = addNodeToMatch("Reshape", mul1, shape2);
int mulMean = addNodeToMatch("Mul", mean, mul1);
int sub = addNodeToMatch("Sub", bias, mulMean);
int reshape1 = addNodeToMatch("Reshape", sub, shape1);
int mulInput = addNodeToMatch("Mul", input, reshape2);
addNodeToMatch("Add", mulInput, reshape1);
setFusedNode("BatchNormalization", input, weight, bias, mean, var);
}
};
@ -424,7 +457,8 @@ void simplifySubgraphs(opencv_onnx::GraphProto& net)
subgraphs.push_back(makePtr<NormalizeSubgraph1>());
subgraphs.push_back(makePtr<NormalizeSubgraph2>());
subgraphs.push_back(makePtr<NormalizeSubgraph3>());
subgraphs.push_back(makePtr<BatchNormalizationSubgraph>());
subgraphs.push_back(makePtr<BatchNormalizationSubgraph1>());
subgraphs.push_back(makePtr<BatchNormalizationSubgraph2>());
simplifySubgraphs(Ptr<ImportGraphWrapper>(new ONNXGraphWrapper(net)), subgraphs);
}

@ -309,30 +309,11 @@ static void addConstant(const std::string& name,
outShapes.insert(std::make_pair(name, shape(blob)));
}
void addConstantNodesForInitializers(opencv_onnx::GraphProto& graph_proto)
{
int num_initializers = graph_proto.initializer_size();
for (int id = 0; id < num_initializers; id++)
{
opencv_onnx::TensorProto initializer = graph_proto.initializer(id);
opencv_onnx::NodeProto* constant_node = graph_proto.add_node();
constant_node->set_op_type("Constant");
constant_node->set_name(initializer.name());
constant_node->add_output(initializer.name());
opencv_onnx::AttributeProto* value = constant_node->add_attribute();
opencv_onnx::TensorProto* tensor = initializer.New();
tensor->CopyFrom(initializer);
releaseONNXTensor(initializer);
value->set_allocated_t(tensor);
}
}
void ONNXImporter::populateNet(Net dstNet)
{
CV_Assert(model_proto.has_graph());
opencv_onnx::GraphProto graph_proto = model_proto.graph();
addConstantNodesForInitializers(graph_proto);
simplifySubgraphs(graph_proto);
std::map<std::string, Mat> constBlobs = getGraphTensors(graph_proto);

@ -46,6 +46,14 @@ static int toNCHW(int idx)
else return (4 + idx) % 3 + 1;
}
static int toNCDHW(int idx)
{
CV_Assert(-5 <= idx && idx < 5);
if (idx == 0) return 0;
else if (idx > 0) return idx % 4 + 1;
else return (5 + idx) % 4 + 1;
}
// This values are used to indicate layer output's data layout where it's possible.
enum DataLayout
{
@ -1323,6 +1331,8 @@ void TFImporter::populateNet(Net dstNet)
if (getDataLayout(name, data_layouts) == DATA_LAYOUT_NHWC)
axis = toNCHW(axis);
else if (getDataLayout(name, data_layouts) == DATA_LAYOUT_NDHWC)
axis = toNCDHW(axis);
layerParams.set("axis", axis);
// input(0) or input(n-1) is concat_dim

@ -330,6 +330,13 @@ TEST_P(Test_ONNX_layers, BatchNormalizationUnfused)
testONNXModels("frozenBatchNorm2d");
}
TEST_P(Test_ONNX_layers, BatchNormalizationSubgraph)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
testONNXModels("batch_norm_subgraph");
}
TEST_P(Test_ONNX_layers, Transpose)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)

@ -222,6 +222,21 @@ TEST_P(Test_TensorFlow_layers, concat_axis_1)
runTensorFlowNet("concat_axis_1");
}
TEST_P(Test_TensorFlow_layers, concat_3d)
{
if (backend == DNN_BACKEND_OPENCV && target != DNN_TARGET_CPU)
{
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
}
if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ||
backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
runTensorFlowNet("concat_3d");
}
TEST_P(Test_TensorFlow_layers, batch_norm_1)
{
runTensorFlowNet("batch_norm");

@ -1,4 +1,7 @@
set(the_description "2D Features Framework")
ocv_add_dispatched_file(sift SSE4_1 AVX2 AVX512_SKX)
set(debug_modules "")
if(DEBUG_opencv_features2d)
list(APPEND debug_modules opencv_highgui)

@ -0,0 +1,540 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (c) 2006-2010, Rob Hess <hess@eecs.oregonstate.edu>
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2020, Intel Corporation, all rights reserved.
/**********************************************************************************************\
Implementation of SIFT is based on the code from http://blogs.oregonstate.edu/hess/code/sift/
Below is the original copyright.
Patent US6711293 expired in March 2020.
// Copyright (c) 2006-2010, Rob Hess <hess@eecs.oregonstate.edu>
// All rights reserved.
// The following patent has been issued for methods embodied in this
// software: "Method and apparatus for identifying scale invariant features
// in an image and use of same for locating an object in an image," David
// G. Lowe, US Patent 6,711,293 (March 23, 2004). Provisional application
// filed March 8, 1999. Asignee: The University of British Columbia. For
// further details, contact David Lowe (lowe@cs.ubc.ca) or the
// University-Industry Liaison Office of the University of British
// Columbia.
// Note that restrictions imposed by this patent (and possibly others)
// exist independently of and may be in conflict with the freedoms granted
// in this license, which refers to copyright of the program, not patents
// for any methods that it implements. Both copyright and patent law must
// be obeyed to legally use and redistribute this program and it is not the
// purpose of this license to induce you to infringe any patents or other
// property right claims or to contest validity of any such claims. If you
// redistribute or use the program, then this license merely protects you
// from committing copyright infringement. It does not protect you from
// committing patent infringement. So, before you do anything with this
// program, make sure that you have permission to do so not merely in terms
// of copyright, but also in terms of patent law.
// Please note that this license is not to be understood as a guarantee
// either. If you use the program according to this license, but in
// conflict with patent law, it does not mean that the licensor will refund
// you for any losses that you incur if you are sued for your patent
// infringement.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright and
// patent notices, this list of conditions and the following
// disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Oregon State University nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// HOLDER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\**********************************************************************************************/
#include "precomp.hpp"
#include <opencv2/core/hal/hal.hpp>
#include <opencv2/core/utils/tls.hpp>
#include "sift.simd.hpp"
#include "sift.simd_declarations.hpp" // defines CV_CPU_DISPATCH_MODES_ALL=AVX2,...,BASELINE based on CMakeLists.txt content
namespace cv {
/*!
SIFT implementation.
The class implements SIFT algorithm by D. Lowe.
*/
class SIFT_Impl : public SIFT
{
public:
explicit SIFT_Impl( int nfeatures = 0, int nOctaveLayers = 3,
double contrastThreshold = 0.04, double edgeThreshold = 10,
double sigma = 1.6);
//! returns the descriptor size in floats (128)
int descriptorSize() const CV_OVERRIDE;
//! returns the descriptor type
int descriptorType() const CV_OVERRIDE;
//! returns the default norm type
int defaultNorm() const CV_OVERRIDE;
//! finds the keypoints and computes descriptors for them using SIFT algorithm.
//! Optionally it can compute descriptors for the user-provided keypoints
void detectAndCompute(InputArray img, InputArray mask,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints = false) CV_OVERRIDE;
void buildGaussianPyramid( const Mat& base, std::vector<Mat>& pyr, int nOctaves ) const;
void buildDoGPyramid( const std::vector<Mat>& pyr, std::vector<Mat>& dogpyr ) const;
void findScaleSpaceExtrema( const std::vector<Mat>& gauss_pyr, const std::vector<Mat>& dog_pyr,
std::vector<KeyPoint>& keypoints ) const;
protected:
CV_PROP_RW int nfeatures;
CV_PROP_RW int nOctaveLayers;
CV_PROP_RW double contrastThreshold;
CV_PROP_RW double edgeThreshold;
CV_PROP_RW double sigma;
};
Ptr<SIFT> SIFT::create( int _nfeatures, int _nOctaveLayers,
double _contrastThreshold, double _edgeThreshold, double _sigma )
{
CV_TRACE_FUNCTION();
return makePtr<SIFT_Impl>(_nfeatures, _nOctaveLayers, _contrastThreshold, _edgeThreshold, _sigma);
}
static inline void
unpackOctave(const KeyPoint& kpt, int& octave, int& layer, float& scale)
{
octave = kpt.octave & 255;
layer = (kpt.octave >> 8) & 255;
octave = octave < 128 ? octave : (-128 | octave);
scale = octave >= 0 ? 1.f/(1 << octave) : (float)(1 << -octave);
}
static Mat createInitialImage( const Mat& img, bool doubleImageSize, float sigma )
{
CV_TRACE_FUNCTION();
Mat gray, gray_fpt;
if( img.channels() == 3 || img.channels() == 4 )
{
cvtColor(img, gray, COLOR_BGR2GRAY);
gray.convertTo(gray_fpt, DataType<sift_wt>::type, SIFT_FIXPT_SCALE, 0);
}
else
img.convertTo(gray_fpt, DataType<sift_wt>::type, SIFT_FIXPT_SCALE, 0);
float sig_diff;
if( doubleImageSize )
{
sig_diff = sqrtf( std::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA * 4, 0.01f) );
Mat dbl;
#if DoG_TYPE_SHORT
resize(gray_fpt, dbl, Size(gray_fpt.cols*2, gray_fpt.rows*2), 0, 0, INTER_LINEAR_EXACT);
#else
resize(gray_fpt, dbl, Size(gray_fpt.cols*2, gray_fpt.rows*2), 0, 0, INTER_LINEAR);
#endif
Mat result;
GaussianBlur(dbl, result, Size(), sig_diff, sig_diff);
return result;
}
else
{
sig_diff = sqrtf( std::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA, 0.01f) );
Mat result;
GaussianBlur(gray_fpt, result, Size(), sig_diff, sig_diff);
return result;
}
}
void SIFT_Impl::buildGaussianPyramid( const Mat& base, std::vector<Mat>& pyr, int nOctaves ) const
{
CV_TRACE_FUNCTION();
std::vector<double> sig(nOctaveLayers + 3);
pyr.resize(nOctaves*(nOctaveLayers + 3));
// precompute Gaussian sigmas using the following formula:
// \sigma_{total}^2 = \sigma_{i}^2 + \sigma_{i-1}^2
sig[0] = sigma;
double k = std::pow( 2., 1. / nOctaveLayers );
for( int i = 1; i < nOctaveLayers + 3; i++ )
{
double sig_prev = std::pow(k, (double)(i-1))*sigma;
double sig_total = sig_prev*k;
sig[i] = std::sqrt(sig_total*sig_total - sig_prev*sig_prev);
}
for( int o = 0; o < nOctaves; o++ )
{
for( int i = 0; i < nOctaveLayers + 3; i++ )
{
Mat& dst = pyr[o*(nOctaveLayers + 3) + i];
if( o == 0 && i == 0 )
dst = base;
// base of new octave is halved image from end of previous octave
else if( i == 0 )
{
const Mat& src = pyr[(o-1)*(nOctaveLayers + 3) + nOctaveLayers];
resize(src, dst, Size(src.cols/2, src.rows/2),
0, 0, INTER_NEAREST);
}
else
{
const Mat& src = pyr[o*(nOctaveLayers + 3) + i-1];
GaussianBlur(src, dst, Size(), sig[i], sig[i]);
}
}
}
}
class buildDoGPyramidComputer : public ParallelLoopBody
{
public:
buildDoGPyramidComputer(
int _nOctaveLayers,
const std::vector<Mat>& _gpyr,
std::vector<Mat>& _dogpyr)
: nOctaveLayers(_nOctaveLayers),
gpyr(_gpyr),
dogpyr(_dogpyr) { }
void operator()( const cv::Range& range ) const CV_OVERRIDE
{
CV_TRACE_FUNCTION();
const int begin = range.start;
const int end = range.end;
for( int a = begin; a < end; a++ )
{
const int o = a / (nOctaveLayers + 2);
const int i = a % (nOctaveLayers + 2);
const Mat& src1 = gpyr[o*(nOctaveLayers + 3) + i];
const Mat& src2 = gpyr[o*(nOctaveLayers + 3) + i + 1];
Mat& dst = dogpyr[o*(nOctaveLayers + 2) + i];
subtract(src2, src1, dst, noArray(), DataType<sift_wt>::type);
}
}
private:
int nOctaveLayers;
const std::vector<Mat>& gpyr;
std::vector<Mat>& dogpyr;
};
void SIFT_Impl::buildDoGPyramid( const std::vector<Mat>& gpyr, std::vector<Mat>& dogpyr ) const
{
CV_TRACE_FUNCTION();
int nOctaves = (int)gpyr.size()/(nOctaveLayers + 3);
dogpyr.resize( nOctaves*(nOctaveLayers + 2) );
parallel_for_(Range(0, nOctaves * (nOctaveLayers + 2)), buildDoGPyramidComputer(nOctaveLayers, gpyr, dogpyr));
}
class findScaleSpaceExtremaComputer : public ParallelLoopBody
{
public:
findScaleSpaceExtremaComputer(
int _o,
int _i,
int _threshold,
int _idx,
int _step,
int _cols,
int _nOctaveLayers,
double _contrastThreshold,
double _edgeThreshold,
double _sigma,
const std::vector<Mat>& _gauss_pyr,
const std::vector<Mat>& _dog_pyr,
TLSData<std::vector<KeyPoint> > &_tls_kpts_struct)
: o(_o),
i(_i),
threshold(_threshold),
idx(_idx),
step(_step),
cols(_cols),
nOctaveLayers(_nOctaveLayers),
contrastThreshold(_contrastThreshold),
edgeThreshold(_edgeThreshold),
sigma(_sigma),
gauss_pyr(_gauss_pyr),
dog_pyr(_dog_pyr),
tls_kpts_struct(_tls_kpts_struct) { }
void operator()( const cv::Range& range ) const CV_OVERRIDE
{
CV_TRACE_FUNCTION();
std::vector<KeyPoint>& kpts = tls_kpts_struct.getRef();
CV_CPU_DISPATCH(findScaleSpaceExtrema, (o, i, threshold, idx, step, cols, nOctaveLayers, contrastThreshold, edgeThreshold, sigma, gauss_pyr, dog_pyr, kpts, range),
CV_CPU_DISPATCH_MODES_ALL);
}
private:
int o, i;
int threshold;
int idx, step, cols;
int nOctaveLayers;
double contrastThreshold;
double edgeThreshold;
double sigma;
const std::vector<Mat>& gauss_pyr;
const std::vector<Mat>& dog_pyr;
TLSData<std::vector<KeyPoint> > &tls_kpts_struct;
};
//
// Detects features at extrema in DoG scale space. Bad features are discarded
// based on contrast and ratio of principal curvatures.
void SIFT_Impl::findScaleSpaceExtrema( const std::vector<Mat>& gauss_pyr, const std::vector<Mat>& dog_pyr,
std::vector<KeyPoint>& keypoints ) const
{
CV_TRACE_FUNCTION();
const int nOctaves = (int)gauss_pyr.size()/(nOctaveLayers + 3);
const int threshold = cvFloor(0.5 * contrastThreshold / nOctaveLayers * 255 * SIFT_FIXPT_SCALE);
keypoints.clear();
TLSDataAccumulator<std::vector<KeyPoint> > tls_kpts_struct;
for( int o = 0; o < nOctaves; o++ )
for( int i = 1; i <= nOctaveLayers; i++ )
{
const int idx = o*(nOctaveLayers+2)+i;
const Mat& img = dog_pyr[idx];
const int step = (int)img.step1();
const int rows = img.rows, cols = img.cols;
parallel_for_(Range(SIFT_IMG_BORDER, rows-SIFT_IMG_BORDER),
findScaleSpaceExtremaComputer(
o, i, threshold, idx, step, cols,
nOctaveLayers,
contrastThreshold,
edgeThreshold,
sigma,
gauss_pyr, dog_pyr, tls_kpts_struct));
}
std::vector<std::vector<KeyPoint>*> kpt_vecs;
tls_kpts_struct.gather(kpt_vecs);
for (size_t i = 0; i < kpt_vecs.size(); ++i) {
keypoints.insert(keypoints.end(), kpt_vecs[i]->begin(), kpt_vecs[i]->end());
}
}
static
void calcSIFTDescriptor(
const Mat& img, Point2f ptf, float ori, float scl,
int d, int n, float* dst
)
{
CV_TRACE_FUNCTION();
CV_CPU_DISPATCH(calcSIFTDescriptor, (img, ptf, ori, scl, d, n, dst),
CV_CPU_DISPATCH_MODES_ALL);
}
class calcDescriptorsComputer : public ParallelLoopBody
{
public:
calcDescriptorsComputer(const std::vector<Mat>& _gpyr,
const std::vector<KeyPoint>& _keypoints,
Mat& _descriptors,
int _nOctaveLayers,
int _firstOctave)
: gpyr(_gpyr),
keypoints(_keypoints),
descriptors(_descriptors),
nOctaveLayers(_nOctaveLayers),
firstOctave(_firstOctave) { }
void operator()( const cv::Range& range ) const CV_OVERRIDE
{
CV_TRACE_FUNCTION();
const int begin = range.start;
const int end = range.end;
static const int d = SIFT_DESCR_WIDTH, n = SIFT_DESCR_HIST_BINS;
for ( int i = begin; i<end; i++ )
{
KeyPoint kpt = keypoints[i];
int octave, layer;
float scale;
unpackOctave(kpt, octave, layer, scale);
CV_Assert(octave >= firstOctave && layer <= nOctaveLayers+2);
float size=kpt.size*scale;
Point2f ptf(kpt.pt.x*scale, kpt.pt.y*scale);
const Mat& img = gpyr[(octave - firstOctave)*(nOctaveLayers + 3) + layer];
float angle = 360.f - kpt.angle;
if(std::abs(angle - 360.f) < FLT_EPSILON)
angle = 0.f;
calcSIFTDescriptor(img, ptf, angle, size*0.5f, d, n, descriptors.ptr<float>((int)i));
}
}
private:
const std::vector<Mat>& gpyr;
const std::vector<KeyPoint>& keypoints;
Mat& descriptors;
int nOctaveLayers;
int firstOctave;
};
static void calcDescriptors(const std::vector<Mat>& gpyr, const std::vector<KeyPoint>& keypoints,
Mat& descriptors, int nOctaveLayers, int firstOctave )
{
CV_TRACE_FUNCTION();
parallel_for_(Range(0, static_cast<int>(keypoints.size())), calcDescriptorsComputer(gpyr, keypoints, descriptors, nOctaveLayers, firstOctave));
}
//////////////////////////////////////////////////////////////////////////////////////////
SIFT_Impl::SIFT_Impl( int _nfeatures, int _nOctaveLayers,
double _contrastThreshold, double _edgeThreshold, double _sigma )
: nfeatures(_nfeatures), nOctaveLayers(_nOctaveLayers),
contrastThreshold(_contrastThreshold), edgeThreshold(_edgeThreshold), sigma(_sigma)
{
}
int SIFT_Impl::descriptorSize() const
{
return SIFT_DESCR_WIDTH*SIFT_DESCR_WIDTH*SIFT_DESCR_HIST_BINS;
}
int SIFT_Impl::descriptorType() const
{
return CV_32F;
}
int SIFT_Impl::defaultNorm() const
{
return NORM_L2;
}
void SIFT_Impl::detectAndCompute(InputArray _image, InputArray _mask,
std::vector<KeyPoint>& keypoints,
OutputArray _descriptors,
bool useProvidedKeypoints)
{
CV_TRACE_FUNCTION();
int firstOctave = -1, actualNOctaves = 0, actualNLayers = 0;
Mat image = _image.getMat(), mask = _mask.getMat();
if( image.empty() || image.depth() != CV_8U )
CV_Error( Error::StsBadArg, "image is empty or has incorrect depth (!=CV_8U)" );
if( !mask.empty() && mask.type() != CV_8UC1 )
CV_Error( Error::StsBadArg, "mask has incorrect type (!=CV_8UC1)" );
if( useProvidedKeypoints )
{
firstOctave = 0;
int maxOctave = INT_MIN;
for( size_t i = 0; i < keypoints.size(); i++ )
{
int octave, layer;
float scale;
unpackOctave(keypoints[i], octave, layer, scale);
firstOctave = std::min(firstOctave, octave);
maxOctave = std::max(maxOctave, octave);
actualNLayers = std::max(actualNLayers, layer-2);
}
firstOctave = std::min(firstOctave, 0);
CV_Assert( firstOctave >= -1 && actualNLayers <= nOctaveLayers );
actualNOctaves = maxOctave - firstOctave + 1;
}
Mat base = createInitialImage(image, firstOctave < 0, (float)sigma);
std::vector<Mat> gpyr;
int nOctaves = actualNOctaves > 0 ? actualNOctaves : cvRound(std::log( (double)std::min( base.cols, base.rows ) ) / std::log(2.) - 2) - firstOctave;
//double t, tf = getTickFrequency();
//t = (double)getTickCount();
buildGaussianPyramid(base, gpyr, nOctaves);
//t = (double)getTickCount() - t;
//printf("pyramid construction time: %g\n", t*1000./tf);
if( !useProvidedKeypoints )
{
std::vector<Mat> dogpyr;
buildDoGPyramid(gpyr, dogpyr);
//t = (double)getTickCount();
findScaleSpaceExtrema(gpyr, dogpyr, keypoints);
KeyPointsFilter::removeDuplicatedSorted( keypoints );
if( nfeatures > 0 )
KeyPointsFilter::retainBest(keypoints, nfeatures);
//t = (double)getTickCount() - t;
//printf("keypoint detection time: %g\n", t*1000./tf);
if( firstOctave < 0 )
for( size_t i = 0; i < keypoints.size(); i++ )
{
KeyPoint& kpt = keypoints[i];
float scale = 1.f/(float)(1 << -firstOctave);
kpt.octave = (kpt.octave & ~255) | ((kpt.octave + firstOctave) & 255);
kpt.pt *= scale;
kpt.size *= scale;
}
if( !mask.empty() )
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
else
{
// filter keypoints by mask
//KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
if( _descriptors.needed() )
{
//t = (double)getTickCount();
int dsize = descriptorSize();
_descriptors.create((int)keypoints.size(), dsize, CV_32F);
Mat descriptors = _descriptors.getMat();
calcDescriptors(gpyr, keypoints, descriptors, nOctaveLayers, firstOctave);
//t = (double)getTickCount() - t;
//printf("descriptor extraction time: %g\n", t*1000./tf);
}
}
}

@ -70,63 +70,13 @@
\**********************************************************************************************/
#include "precomp.hpp"
#include <iostream>
#include <stdarg.h>
#include <opencv2/core/hal/hal.hpp>
#include <opencv2/core/utils/tls.hpp>
namespace cv
{
/*!
SIFT implementation.
#include <opencv2/core/hal/hal.hpp>
#include "opencv2/core/hal/intrin.hpp"
The class implements SIFT algorithm by D. Lowe.
*/
class SIFT_Impl : public SIFT
{
public:
explicit SIFT_Impl( int nfeatures = 0, int nOctaveLayers = 3,
double contrastThreshold = 0.04, double edgeThreshold = 10,
double sigma = 1.6);
//! returns the descriptor size in floats (128)
int descriptorSize() const CV_OVERRIDE;
//! returns the descriptor type
int descriptorType() const CV_OVERRIDE;
//! returns the default norm type
int defaultNorm() const CV_OVERRIDE;
//! finds the keypoints and computes descriptors for them using SIFT algorithm.
//! Optionally it can compute descriptors for the user-provided keypoints
void detectAndCompute(InputArray img, InputArray mask,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints = false) CV_OVERRIDE;
void buildGaussianPyramid( const Mat& base, std::vector<Mat>& pyr, int nOctaves ) const;
void buildDoGPyramid( const std::vector<Mat>& pyr, std::vector<Mat>& dogpyr ) const;
void findScaleSpaceExtrema( const std::vector<Mat>& gauss_pyr, const std::vector<Mat>& dog_pyr,
std::vector<KeyPoint>& keypoints ) const;
protected:
CV_PROP_RW int nfeatures;
CV_PROP_RW int nOctaveLayers;
CV_PROP_RW double contrastThreshold;
CV_PROP_RW double edgeThreshold;
CV_PROP_RW double sigma;
};
Ptr<SIFT> SIFT::create( int _nfeatures, int _nOctaveLayers,
double _contrastThreshold, double _edgeThreshold, double _sigma )
{
CV_TRACE_FUNCTION();
return makePtr<SIFT_Impl>(_nfeatures, _nOctaveLayers, _contrastThreshold, _edgeThreshold, _sigma);
}
namespace cv {
#if !defined(CV_CPU_DISPATCH_MODE) || !defined(CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY)
/******************************* Defs and macros *****************************/
// default width of descriptor histogram array
@ -151,7 +101,7 @@ static const int SIFT_ORI_HIST_BINS = 36;
static const float SIFT_ORI_SIG_FCTR = 1.5f;
// determines the radius of the region used in orientation assignment
static const float SIFT_ORI_RADIUS = 3 * SIFT_ORI_SIG_FCTR;
static const float SIFT_ORI_RADIUS = 4.5f; // 3 * SIFT_ORI_SIG_FCTR;
// orientation magnitude relative to max that results in new feature
static const float SIFT_ORI_PEAK_RATIO = 0.8f;
@ -176,144 +126,41 @@ typedef float sift_wt;
static const int SIFT_FIXPT_SCALE = 1;
#endif
static inline void
unpackOctave(const KeyPoint& kpt, int& octave, int& layer, float& scale)
{
octave = kpt.octave & 255;
layer = (kpt.octave >> 8) & 255;
octave = octave < 128 ? octave : (-128 | octave);
scale = octave >= 0 ? 1.f/(1 << octave) : (float)(1 << -octave);
}
#endif // definitions and macros
static Mat createInitialImage( const Mat& img, bool doubleImageSize, float sigma )
{
CV_TRACE_FUNCTION();
Mat gray, gray_fpt;
if( img.channels() == 3 || img.channels() == 4 )
{
cvtColor(img, gray, COLOR_BGR2GRAY);
gray.convertTo(gray_fpt, DataType<sift_wt>::type, SIFT_FIXPT_SCALE, 0);
}
else
img.convertTo(gray_fpt, DataType<sift_wt>::type, SIFT_FIXPT_SCALE, 0);
CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
float sig_diff;
void findScaleSpaceExtrema(
int octave,
int layer,
int threshold,
int idx,
int step,
int cols,
int nOctaveLayers,
double contrastThreshold,
double edgeThreshold,
double sigma,
const std::vector<Mat>& gauss_pyr,
const std::vector<Mat>& dog_pyr,
std::vector<KeyPoint>& kpts,
const cv::Range& range);
if( doubleImageSize )
{
sig_diff = sqrtf( std::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA * 4, 0.01f) );
Mat dbl;
#if DoG_TYPE_SHORT
resize(gray_fpt, dbl, Size(gray_fpt.cols*2, gray_fpt.rows*2), 0, 0, INTER_LINEAR_EXACT);
#else
resize(gray_fpt, dbl, Size(gray_fpt.cols*2, gray_fpt.rows*2), 0, 0, INTER_LINEAR);
#endif
Mat result;
GaussianBlur(dbl, result, Size(), sig_diff, sig_diff);
return result;
}
else
{
sig_diff = sqrtf( std::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA, 0.01f) );
Mat result;
GaussianBlur(gray_fpt, result, Size(), sig_diff, sig_diff);
return result;
}
}
void SIFT_Impl::buildGaussianPyramid( const Mat& base, std::vector<Mat>& pyr, int nOctaves ) const
{
CV_TRACE_FUNCTION();
void calcSIFTDescriptor(
const Mat& img, Point2f ptf, float ori, float scl,
int d, int n, float* dst
);
std::vector<double> sig(nOctaveLayers + 3);
pyr.resize(nOctaves*(nOctaveLayers + 3));
// precompute Gaussian sigmas using the following formula:
// \sigma_{total}^2 = \sigma_{i}^2 + \sigma_{i-1}^2
sig[0] = sigma;
double k = std::pow( 2., 1. / nOctaveLayers );
for( int i = 1; i < nOctaveLayers + 3; i++ )
{
double sig_prev = std::pow(k, (double)(i-1))*sigma;
double sig_total = sig_prev*k;
sig[i] = std::sqrt(sig_total*sig_total - sig_prev*sig_prev);
}
for( int o = 0; o < nOctaves; o++ )
{
for( int i = 0; i < nOctaveLayers + 3; i++ )
{
Mat& dst = pyr[o*(nOctaveLayers + 3) + i];
if( o == 0 && i == 0 )
dst = base;
// base of new octave is halved image from end of previous octave
else if( i == 0 )
{
const Mat& src = pyr[(o-1)*(nOctaveLayers + 3) + nOctaveLayers];
resize(src, dst, Size(src.cols/2, src.rows/2),
0, 0, INTER_NEAREST);
}
else
{
const Mat& src = pyr[o*(nOctaveLayers + 3) + i-1];
GaussianBlur(src, dst, Size(), sig[i], sig[i]);
}
}
}
}
class buildDoGPyramidComputer : public ParallelLoopBody
{
public:
buildDoGPyramidComputer(
int _nOctaveLayers,
const std::vector<Mat>& _gpyr,
std::vector<Mat>& _dogpyr)
: nOctaveLayers(_nOctaveLayers),
gpyr(_gpyr),
dogpyr(_dogpyr) { }
void operator()( const cv::Range& range ) const CV_OVERRIDE
{
CV_TRACE_FUNCTION();
const int begin = range.start;
const int end = range.end;
for( int a = begin; a < end; a++ )
{
const int o = a / (nOctaveLayers + 2);
const int i = a % (nOctaveLayers + 2);
const Mat& src1 = gpyr[o*(nOctaveLayers + 3) + i];
const Mat& src2 = gpyr[o*(nOctaveLayers + 3) + i + 1];
Mat& dst = dogpyr[o*(nOctaveLayers + 2) + i];
subtract(src2, src1, dst, noArray(), DataType<sift_wt>::type);
}
}
private:
int nOctaveLayers;
const std::vector<Mat>& gpyr;
std::vector<Mat>& dogpyr;
};
void SIFT_Impl::buildDoGPyramid( const std::vector<Mat>& gpyr, std::vector<Mat>& dogpyr ) const
{
CV_TRACE_FUNCTION();
int nOctaves = (int)gpyr.size()/(nOctaveLayers + 3);
dogpyr.resize( nOctaves*(nOctaveLayers + 2) );
parallel_for_(Range(0, nOctaves * (nOctaveLayers + 2)), buildDoGPyramidComputer(nOctaveLayers, gpyr, dogpyr));
}
#ifndef CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
// Computes a gradient orientation histogram at a specified pixel
static float calcOrientationHist( const Mat& img, Point pt, int radius,
float sigma, float* hist, int n )
static
float calcOrientationHist(
const Mat& img, Point pt, int radius,
float sigma, float* hist, int n
)
{
CV_TRACE_FUNCTION();
@ -449,9 +296,12 @@ static float calcOrientationHist( const Mat& img, Point pt, int radius,
// Interpolates a scale-space extremum's location and scale to subpixel
// accuracy to form an image feature. Rejects features with low contrast.
// Based on Section 4 of Lowe's paper.
static bool adjustLocalExtrema( const std::vector<Mat>& dog_pyr, KeyPoint& kpt, int octv,
int& layer, int& r, int& c, int nOctaveLayers,
float contrastThreshold, float edgeThreshold, float sigma )
static
bool adjustLocalExtrema(
const std::vector<Mat>& dog_pyr, KeyPoint& kpt, int octv,
int& layer, int& r, int& c, int nOctaveLayers,
float contrastThreshold, float edgeThreshold, float sigma
)
{
CV_TRACE_FUNCTION();
@ -553,11 +403,12 @@ static bool adjustLocalExtrema( const std::vector<Mat>& dog_pyr, KeyPoint& kpt,
return true;
}
namespace {
class findScaleSpaceExtremaComputer : public ParallelLoopBody
class findScaleSpaceExtremaT
{
public:
findScaleSpaceExtremaComputer(
findScaleSpaceExtremaT(
int _o,
int _i,
int _threshold,
@ -570,7 +421,7 @@ public:
double _sigma,
const std::vector<Mat>& _gauss_pyr,
const std::vector<Mat>& _dog_pyr,
TLSData<std::vector<KeyPoint> > &_tls_kpts_struct)
std::vector<KeyPoint>& kpts)
: o(_o),
i(_i),
@ -584,8 +435,11 @@ public:
sigma(_sigma),
gauss_pyr(_gauss_pyr),
dog_pyr(_dog_pyr),
tls_kpts_struct(_tls_kpts_struct) { }
void operator()( const cv::Range& range ) const CV_OVERRIDE
kpts_(kpts)
{
// nothing
}
void process(const cv::Range& range)
{
CV_TRACE_FUNCTION();
@ -593,15 +447,12 @@ public:
const int end = range.end;
static const int n = SIFT_ORI_HIST_BINS;
float hist[n];
float CV_DECL_ALIGNED(CV_SIMD_WIDTH) hist[n];
const Mat& img = dog_pyr[idx];
const Mat& prev = dog_pyr[idx-1];
const Mat& next = dog_pyr[idx+1];
std::vector<KeyPoint> *tls_kpts = tls_kpts_struct.get();
KeyPoint kpt;
for( int r = begin; r < end; r++)
{
const sift_wt* currptr = img.ptr<sift_wt>(r);
@ -635,6 +486,7 @@ public:
{
CV_TRACE_REGION("pixel_candidate");
KeyPoint kpt;
int r1 = r, c1 = c, layer = i;
if( !adjustLocalExtrema(dog_pyr, kpt, o, layer, r1, c1,
nOctaveLayers, (float)contrastThreshold,
@ -659,9 +511,8 @@ public:
kpt.angle = 360.f - (float)((360.f/n) * bin);
if(std::abs(kpt.angle - 360.f) < FLT_EPSILON)
kpt.angle = 0.f;
{
tls_kpts->push_back(kpt);
}
kpts_.push_back(kpt);
}
}
}
@ -678,51 +529,42 @@ private:
double sigma;
const std::vector<Mat>& gauss_pyr;
const std::vector<Mat>& dog_pyr;
TLSData<std::vector<KeyPoint> > &tls_kpts_struct;
std::vector<KeyPoint>& kpts_;
};
//
// Detects features at extrema in DoG scale space. Bad features are discarded
// based on contrast and ratio of principal curvatures.
void SIFT_Impl::findScaleSpaceExtrema( const std::vector<Mat>& gauss_pyr, const std::vector<Mat>& dog_pyr,
std::vector<KeyPoint>& keypoints ) const
} // namespace
void findScaleSpaceExtrema(
int octave,
int layer,
int threshold,
int idx,
int step,
int cols,
int nOctaveLayers,
double contrastThreshold,
double edgeThreshold,
double sigma,
const std::vector<Mat>& gauss_pyr,
const std::vector<Mat>& dog_pyr,
std::vector<KeyPoint>& kpts,
const cv::Range& range)
{
CV_TRACE_FUNCTION();
const int nOctaves = (int)gauss_pyr.size()/(nOctaveLayers + 3);
const int threshold = cvFloor(0.5 * contrastThreshold / nOctaveLayers * 255 * SIFT_FIXPT_SCALE);
keypoints.clear();
TLSDataAccumulator<std::vector<KeyPoint> > tls_kpts_struct;
for( int o = 0; o < nOctaves; o++ )
for( int i = 1; i <= nOctaveLayers; i++ )
{
const int idx = o*(nOctaveLayers+2)+i;
const Mat& img = dog_pyr[idx];
const int step = (int)img.step1();
const int rows = img.rows, cols = img.cols;
parallel_for_(Range(SIFT_IMG_BORDER, rows-SIFT_IMG_BORDER),
findScaleSpaceExtremaComputer(
o, i, threshold, idx, step, cols,
nOctaveLayers,
contrastThreshold,
edgeThreshold,
sigma,
gauss_pyr, dog_pyr, tls_kpts_struct));
}
std::vector<std::vector<KeyPoint>*> kpt_vecs;
tls_kpts_struct.gather(kpt_vecs);
for (size_t i = 0; i < kpt_vecs.size(); ++i) {
keypoints.insert(keypoints.end(), kpt_vecs[i]->begin(), kpt_vecs[i]->end());
}
findScaleSpaceExtremaT(octave, layer, threshold, idx,
step, cols,
nOctaveLayers, contrastThreshold, edgeThreshold, sigma,
gauss_pyr, dog_pyr,
kpts)
.process(range);
}
static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float scl,
int d, int n, float* dst )
void calcSIFTDescriptor(
const Mat& img, Point2f ptf, float ori, float scl,
int d, int n, float* dst
)
{
CV_TRACE_FUNCTION();
@ -734,7 +576,7 @@ static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float sc
float hist_width = SIFT_DESCR_SCL_FCTR * scl;
int radius = cvRound(hist_width * 1.4142135623730951f * (d + 1) * 0.5f);
// Clip the radius to the diagonal of the image to avoid autobuffer too large exception
radius = std::min(radius, (int) sqrt(((double) img.cols)*img.cols + ((double) img.rows)*img.rows));
radius = std::min(radius, (int)std::sqrt(((double) img.cols)*img.cols + ((double) img.rows)*img.rows));
cos_t /= hist_width;
sin_t /= hist_width;
@ -1016,175 +858,6 @@ static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float sc
#endif
}
class calcDescriptorsComputer : public ParallelLoopBody
{
public:
calcDescriptorsComputer(const std::vector<Mat>& _gpyr,
const std::vector<KeyPoint>& _keypoints,
Mat& _descriptors,
int _nOctaveLayers,
int _firstOctave)
: gpyr(_gpyr),
keypoints(_keypoints),
descriptors(_descriptors),
nOctaveLayers(_nOctaveLayers),
firstOctave(_firstOctave) { }
void operator()( const cv::Range& range ) const CV_OVERRIDE
{
CV_TRACE_FUNCTION();
const int begin = range.start;
const int end = range.end;
static const int d = SIFT_DESCR_WIDTH, n = SIFT_DESCR_HIST_BINS;
for ( int i = begin; i<end; i++ )
{
KeyPoint kpt = keypoints[i];
int octave, layer;
float scale;
unpackOctave(kpt, octave, layer, scale);
CV_Assert(octave >= firstOctave && layer <= nOctaveLayers+2);
float size=kpt.size*scale;
Point2f ptf(kpt.pt.x*scale, kpt.pt.y*scale);
const Mat& img = gpyr[(octave - firstOctave)*(nOctaveLayers + 3) + layer];
float angle = 360.f - kpt.angle;
if(std::abs(angle - 360.f) < FLT_EPSILON)
angle = 0.f;
calcSIFTDescriptor(img, ptf, angle, size*0.5f, d, n, descriptors.ptr<float>((int)i));
}
}
private:
const std::vector<Mat>& gpyr;
const std::vector<KeyPoint>& keypoints;
Mat& descriptors;
int nOctaveLayers;
int firstOctave;
};
static void calcDescriptors(const std::vector<Mat>& gpyr, const std::vector<KeyPoint>& keypoints,
Mat& descriptors, int nOctaveLayers, int firstOctave )
{
CV_TRACE_FUNCTION();
parallel_for_(Range(0, static_cast<int>(keypoints.size())), calcDescriptorsComputer(gpyr, keypoints, descriptors, nOctaveLayers, firstOctave));
}
//////////////////////////////////////////////////////////////////////////////////////////
SIFT_Impl::SIFT_Impl( int _nfeatures, int _nOctaveLayers,
double _contrastThreshold, double _edgeThreshold, double _sigma )
: nfeatures(_nfeatures), nOctaveLayers(_nOctaveLayers),
contrastThreshold(_contrastThreshold), edgeThreshold(_edgeThreshold), sigma(_sigma)
{
}
int SIFT_Impl::descriptorSize() const
{
return SIFT_DESCR_WIDTH*SIFT_DESCR_WIDTH*SIFT_DESCR_HIST_BINS;
}
int SIFT_Impl::descriptorType() const
{
return CV_32F;
}
int SIFT_Impl::defaultNorm() const
{
return NORM_L2;
}
void SIFT_Impl::detectAndCompute(InputArray _image, InputArray _mask,
std::vector<KeyPoint>& keypoints,
OutputArray _descriptors,
bool useProvidedKeypoints)
{
CV_TRACE_FUNCTION();
int firstOctave = -1, actualNOctaves = 0, actualNLayers = 0;
Mat image = _image.getMat(), mask = _mask.getMat();
if( image.empty() || image.depth() != CV_8U )
CV_Error( Error::StsBadArg, "image is empty or has incorrect depth (!=CV_8U)" );
if( !mask.empty() && mask.type() != CV_8UC1 )
CV_Error( Error::StsBadArg, "mask has incorrect type (!=CV_8UC1)" );
if( useProvidedKeypoints )
{
firstOctave = 0;
int maxOctave = INT_MIN;
for( size_t i = 0; i < keypoints.size(); i++ )
{
int octave, layer;
float scale;
unpackOctave(keypoints[i], octave, layer, scale);
firstOctave = std::min(firstOctave, octave);
maxOctave = std::max(maxOctave, octave);
actualNLayers = std::max(actualNLayers, layer-2);
}
firstOctave = std::min(firstOctave, 0);
CV_Assert( firstOctave >= -1 && actualNLayers <= nOctaveLayers );
actualNOctaves = maxOctave - firstOctave + 1;
}
Mat base = createInitialImage(image, firstOctave < 0, (float)sigma);
std::vector<Mat> gpyr;
int nOctaves = actualNOctaves > 0 ? actualNOctaves : cvRound(std::log( (double)std::min( base.cols, base.rows ) ) / std::log(2.) - 2) - firstOctave;
//double t, tf = getTickFrequency();
//t = (double)getTickCount();
buildGaussianPyramid(base, gpyr, nOctaves);
//t = (double)getTickCount() - t;
//printf("pyramid construction time: %g\n", t*1000./tf);
if( !useProvidedKeypoints )
{
std::vector<Mat> dogpyr;
buildDoGPyramid(gpyr, dogpyr);
//t = (double)getTickCount();
findScaleSpaceExtrema(gpyr, dogpyr, keypoints);
KeyPointsFilter::removeDuplicatedSorted( keypoints );
if( nfeatures > 0 )
KeyPointsFilter::retainBest(keypoints, nfeatures);
//t = (double)getTickCount() - t;
//printf("keypoint detection time: %g\n", t*1000./tf);
if( firstOctave < 0 )
for( size_t i = 0; i < keypoints.size(); i++ )
{
KeyPoint& kpt = keypoints[i];
float scale = 1.f/(float)(1 << -firstOctave);
kpt.octave = (kpt.octave & ~255) | ((kpt.octave + firstOctave) & 255);
kpt.pt *= scale;
kpt.size *= scale;
}
if( !mask.empty() )
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
else
{
// filter keypoints by mask
//KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
if( _descriptors.needed() )
{
//t = (double)getTickCount();
int dsize = descriptorSize();
_descriptors.create((int)keypoints.size(), dsize, CV_32F);
Mat descriptors = _descriptors.getMat();
calcDescriptors(gpyr, keypoints, descriptors, nOctaveLayers, firstOctave);
//t = (double)getTickCount() - t;
//printf("descriptor extraction time: %g\n", t*1000./tf);
}
}
}
#endif
CV_CPU_OPTIMIZATION_NAMESPACE_END
} // namespace

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 KiB

@ -4247,7 +4247,8 @@ enum ColormapTypes
COLORMAP_CIVIDIS = 17, //!< ![cividis](pics/colormaps/colorscale_cividis.jpg)
COLORMAP_TWILIGHT = 18, //!< ![twilight](pics/colormaps/colorscale_twilight.jpg)
COLORMAP_TWILIGHT_SHIFTED = 19, //!< ![twilight shifted](pics/colormaps/colorscale_twilight_shifted.jpg)
COLORMAP_TURBO = 20 //!< ![turbo](pics/colormaps/colorscale_turbo.jpg)
COLORMAP_TURBO = 20, //!< ![turbo](pics/colormaps/colorscale_turbo.jpg)
COLORMAP_DEEPGREEN = 21 //!< ![deepgreen](pics/colormaps/colorscale_deepgreen.jpg)
};
/** @example samples/cpp/falsecolor.cpp

@ -297,6 +297,28 @@ namespace colormap
}
};
// Equals the colormap "deepgreen".
class DeepGreen : public ColorMap {
public:
DeepGreen() : ColorMap() {
init(256);
}
DeepGreen(int n) : ColorMap() {
init(n);
}
void init(int n) {
static const float r[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04761904761904762f, 0.09523809523809523f, 0.1428571428571428f, 0.1904761904761905f, 0.2380952380952381f, 0.2857142857142857f, 0.3333333333333333f, 0.3809523809523809f, 0.4285714285714285f, 0.4761904761904762f, 0.5238095238095238f, 0.5714285714285714f, 0.6190476190476191f, 0.6666666666666666f, 0.7142857142857143f, 0.7619047619047619f, 0.8095238095238095f, 0.8571428571428571f, 0.9047619047619048f, 0.9523809523809523f, 1 };
static const float g[] = { 0, 0.01587301587301587f, 0.03174603174603174f, 0.04761904761904762f, 0.06349206349206349f, 0.07936507936507936f, 0.09523809523809523f, 0.1111111111111111f, 0.126984126984127f, 0.1428571428571428f, 0.1587301587301587f, 0.1746031746031746f, 0.1904761904761905f, 0.2063492063492063f, 0.2222222222222222f, 0.2380952380952381f, 0.253968253968254f, 0.2698412698412698f, 0.2857142857142857f, 0.3015873015873016f, 0.3174603174603174f, 0.3333333333333333f, 0.3492063492063492f, 0.3650793650793651f, 0.3809523809523809f, 0.3968253968253968f, 0.4126984126984127f, 0.4285714285714285f, 0.4444444444444444f, 0.4603174603174603f, 0.4761904761904762f, 0.492063492063492f, 0.5079365079365079f, 0.5238095238095238f, 0.5396825396825397f, 0.5555555555555556f, 0.5714285714285714f, 0.5873015873015873f, 0.6031746031746031f, 0.6190476190476191f, 0.6349206349206349f, 0.6507936507936508f, 0.6666666666666666f, 0.6825396825396826f, 0.6984126984126984f, 0.7142857142857143f, 0.7301587301587301f, 0.746031746031746f, 0.7619047619047619f, 0.7777777777777778f, 0.7936507936507936f, 0.8095238095238095f, 0.8253968253968254f, 0.8412698412698413f, 0.8571428571428571f, 0.873015873015873f, 0.8888888888888888f, 0.9047619047619048f, 0.9206349206349206f, 0.9365079365079365f, 0.9523809523809523f, 0.9682539682539683f, 0.9841269841269841f, 1 };
static const float b[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02380952380952381f, 0.04761904761904762f, 0.07142857142857142f, 0.09523809523809523f, 0.119047619047619f, 0.1428571428571428f, 0.1666666666666667f, 0.1904761904761905f, 0.2142857142857143f, 0.2380952380952381f, 0.2619047619047619f, 0.2857142857142857f, 0.3095238095238095f, 0.3333333333333333f, 0.3571428571428572f, 0.3809523809523809f, 0.4047619047619048f, 0.4285714285714285f, 0.4523809523809524f, 0.4761904761904762f, 0.5f, 0.5238095238095238f, 0.5476190476190477f, 0.5714285714285714f, 0.5952380952380952f, 0.6190476190476191f, 0.6428571428571429f, 0.6666666666666666f, 0.6904761904761905f, 0.7142857142857143f, 0.7380952380952381f, 0.7619047619047619f, 0.7857142857142857f, 0.8095238095238095f, 0.8333333333333334f, 0.8571428571428571f, 0.8809523809523809f, 0.9047619047619048f, 0.9285714285714286f, 0.9523809523809523f, 0.9761904761904762f, 1 };
Mat X = linspace(0, 1, 64);
this->_lut = ColorMap::linear_colormap(X,
Mat(64, 1, CV_32FC1, (void*)r).clone(), // red
Mat(64, 1, CV_32FC1, (void*)g).clone(), // green
Mat(64, 1, CV_32FC1, (void*)b).clone(), // blue
n); // number of sample points
}
};
// Equals the GNU Octave colormap "ocean".
class Ocean : public ColorMap {
public:
@ -742,6 +764,7 @@ namespace colormap
colormap == COLORMAP_BONE ? (colormap::ColorMap*)(new colormap::Bone) :
colormap == COLORMAP_CIVIDIS ? (colormap::ColorMap*)(new colormap::Cividis) :
colormap == COLORMAP_COOL ? (colormap::ColorMap*)(new colormap::Cool) :
colormap == COLORMAP_DEEPGREEN ? (colormap::ColorMap*)(new colormap::DeepGreen) :
colormap == COLORMAP_HOT ? (colormap::ColorMap*)(new colormap::Hot) :
colormap == COLORMAP_HSV ? (colormap::ColorMap*)(new colormap::HSV) :
colormap == COLORMAP_INFERNO ? (colormap::ColorMap*)(new colormap::Inferno) :

@ -109,6 +109,7 @@ Thanks to:
#include <vector>
//Include Directshow stuff here so we don't worry about needing all the h files.
#define NO_DSHOW_STRSAFE
#include "dshow.h"
#include "strmif.h"
#include "aviriff.h"

@ -14,10 +14,30 @@
using namespace std;
using namespace cv;
static mfxIMPL getImpl()
{
static const size_t res = utils::getConfigurationParameterSizeT("OPENCV_VIDEOIO_MFX_IMPL", MFX_IMPL_AUTO_ANY);
return (mfxIMPL)res;
}
static size_t getExtraSurfaceNum()
{
static const size_t res = cv::utils::getConfigurationParameterSizeT("OPENCV_VIDEOIO_MFX_EXTRA_SURFACE_NUM", 1);
return res;
}
static size_t getPoolTimeoutSec()
{
static const size_t res = utils::getConfigurationParameterSizeT("OPENCV_VIDEOIO_MFX_POOL_TIMEOUT", 1);
return res;
}
//==================================================================================================
bool DeviceHandler::init(MFXVideoSession &session)
{
mfxStatus res = MFX_ERR_NONE;
mfxIMPL impl = MFX_IMPL_AUTO_ANY;
mfxIMPL impl = getImpl();
mfxVersion ver = { {19, 1} };
res = session.Init(impl, &ver);
@ -114,11 +134,26 @@ SurfacePool::~SurfacePool()
{
}
SurfacePool * SurfacePool::_create(const mfxFrameAllocRequest &request, const mfxVideoParam &params)
{
return new SurfacePool(request.Info.Width,
request.Info.Height,
saturate_cast<ushort>((size_t)request.NumFrameSuggested + getExtraSurfaceNum()),
params.mfx.FrameInfo);
}
mfxFrameSurface1 *SurfacePool::getFreeSurface()
{
for(std::vector<mfxFrameSurface1>::iterator i = surfaces.begin(); i != surfaces.end(); ++i)
if (!i->Data.Locked)
return &(*i);
const int64 start = cv::getTickCount();
do
{
for(std::vector<mfxFrameSurface1>::iterator i = surfaces.begin(); i != surfaces.end(); ++i)
if (!i->Data.Locked)
return &(*i);
sleep_ms(10);
}
while((cv::getTickCount() - start) / cv::getTickFrequency() < getPoolTimeoutSec()); // seconds
DBG(cout << "No free surface!" << std::endl);
return 0;
}

@ -6,6 +6,7 @@
#define MFXHELPER_H
#include "opencv2/core.hpp"
#include "opencv2/core/utils/configuration.private.hpp"
#include <iostream>
#include <fstream>
@ -259,11 +260,10 @@ public:
DBG(std::cout << "MFX QueryIOSurf: " << res << std::endl);
if (res < MFX_ERR_NONE)
return 0;
return new SurfacePool(request.Info.Width,
request.Info.Height,
request.NumFrameSuggested,
params.mfx.FrameInfo);
return _create(request, params);
}
private:
static SurfacePool* _create(const mfxFrameAllocRequest& request, const mfxVideoParam& params);
private:
SurfacePool(const SurfacePool &);
SurfacePool &operator=(const SurfacePool &);
@ -285,6 +285,29 @@ protected:
};
// TODO: move to core::util?
#ifdef CV_CXX11
#include <thread>
static void sleep_ms(int64 ms)
{
std::this_thread::sleep_for(std::chrono::milliseconds(ms));
}
#elif defined(__linux__)
#include <time.h>
static void sleep_ms(int64 ms)
{
nanosleep(ms * 1000 * 1000);
}
#elif defined _WIN32
static void sleep_ms(int64 ms)
{
Sleep(ms);
}
#else
#error "Can not detect sleep_ms() implementation"
#endif
// Linux specific
#ifdef __linux__
@ -310,7 +333,6 @@ private:
#ifdef _WIN32
#include <Windows.h>
inline void sleep(unsigned long sec) { Sleep(1000 * sec); }
class DXHandle : public DeviceHandler {
public:

@ -215,7 +215,7 @@ bool VideoCapture_IntelMFX::grabFrame()
else if (res == MFX_WRN_DEVICE_BUSY)
{
DBG(cout << "Waiting for device" << endl);
sleep(1);
sleep_ms(1000);
continue;
}
else if (res == MFX_WRN_VIDEO_PARAM_CHANGED)

@ -11,6 +11,18 @@
using namespace std;
using namespace cv;
static size_t getBitrateDivisor()
{
static const size_t res = utils::getConfigurationParameterSizeT("OPENCV_VIDEOIO_MFX_BITRATE_DIVISOR", 300);
return res;
}
static mfxU32 getWriterTimeoutMS()
{
static const size_t res = utils::getConfigurationParameterSizeT("OPENCV_VIDEOIO_MFX_WRITER_TIMEOUT", 1);
return saturate_cast<mfxU32>(res * 1000); // convert from seconds
}
inline mfxU32 codecIdByFourCC(int fourcc)
{
const int CC_MPG2 = FourCC('M', 'P', 'G', '2').vali32;
@ -78,7 +90,7 @@ VideoWriter_IntelMFX::VideoWriter_IntelMFX(const String &filename, int _fourcc,
memset(&params, 0, sizeof(params));
params.mfx.CodecId = codecId;
params.mfx.TargetUsage = MFX_TARGETUSAGE_BALANCED;
params.mfx.TargetKbps = (mfxU16)cvRound(frameSize.area() * fps / 500); // TODO: set in options
params.mfx.TargetKbps = saturate_cast<mfxU16>((frameSize.area() * fps) / (42.6666 * getBitrateDivisor())); // TODO: set in options
params.mfx.RateControlMethod = MFX_RATECONTROL_VBR;
params.mfx.FrameInfo.FrameRateExtN = cvRound(fps * 1000);
params.mfx.FrameInfo.FrameRateExtD = 1000;
@ -211,7 +223,7 @@ bool VideoWriter_IntelMFX::write_one(cv::InputArray bgr)
res = encoder->EncodeFrameAsync(NULL, workSurface, &bs->stream, &sync);
if (res == MFX_ERR_NONE)
{
res = session->SyncOperation(sync, 1000); // 1 sec, TODO: provide interface to modify timeout
res = session->SyncOperation(sync, getWriterTimeoutMS()); // TODO: provide interface to modify timeout
if (res == MFX_ERR_NONE)
{
// ready to write
@ -240,7 +252,7 @@ bool VideoWriter_IntelMFX::write_one(cv::InputArray bgr)
else if (res == MFX_WRN_DEVICE_BUSY)
{
DBG(cout << "Waiting for device" << endl);
sleep(1);
sleep_ms(1000);
continue;
}
else

Loading…
Cancel
Save