From d38869eb763d1bf8937e4c8004d6af90d45e32d6 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Thu, 4 Apr 2019 18:57:22 +0300 Subject: [PATCH 1/3] docs: fix links - replace tutorial links via docs.opencv.org - remove link on OpenCV 2.4 - avoid links on outdated packages --- doc/CMakeLists.txt | 3 +++ doc/Doxyfile.in | 4 ++-- .../py_histogram_begins.markdown | 3 +-- .../py_setup_in_windows.markdown | 21 ++++++++----------- .../camera_calibration_pattern.markdown | 7 ++++++- .../gpu_basics_similarity.markdown | 4 +--- .../android_binary_package/O4A_SDK.markdown | 10 +++------ .../clojure_dev_intro.markdown | 9 ++++---- .../introduction_to_svm.markdown | 2 -- .../non_linear_svms/non_linear_svms.markdown | 3 --- .../CameraCalibrationActivity.java | 4 ++-- samples/cpp/matchmethod_orb_akaze_brisk.cpp | 10 ++++----- 12 files changed, 36 insertions(+), 44 deletions(-) diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt index 8d7593e6bf..ed94e46317 100644 --- a/doc/CMakeLists.txt +++ b/doc/CMakeLists.txt @@ -34,6 +34,7 @@ if(DOXYGEN_FOUND) foreach(m ${OPENCV_MODULES_MAIN} ${OPENCV_MODULES_EXTRA}) list(FIND blacklist ${m} _pos) if(${_pos} EQUAL -1) + list(APPEND CMAKE_DOXYGEN_ENABLED_SECTIONS "HAVE_opencv_${m}") # include folder set(header_dir "${OPENCV_MODULE_opencv_${m}_LOCATION}/include") if(EXISTS "${header_dir}") @@ -125,6 +126,8 @@ if(DOXYGEN_FOUND) # set export variables string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_INPUT_LIST "${rootfile} ; ${faqfile} ; ${paths_include} ; ${paths_hal_interface} ; ${paths_doc} ; ${tutorial_path} ; ${tutorial_py_path} ; ${tutorial_js_path} ; ${paths_tutorial} ; ${tutorial_contrib_root}") string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_IMAGE_PATH "${paths_doc} ; ${tutorial_path} ; ${tutorial_py_path} ; ${tutorial_js_path} ; ${paths_tutorial}") + string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_EXCLUDE_LIST "${CMAKE_DOXYGEN_EXCLUDE_LIST}") + string(REPLACE ";" " " CMAKE_DOXYGEN_ENABLED_SECTIONS "${CMAKE_DOXYGEN_ENABLED_SECTIONS}") # TODO: remove paths_doc from EXAMPLE_PATH after face module tutorials/samples moved to separate folders string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_EXAMPLE_PATH "${example_path} ; ${paths_doc} ; ${paths_sample}") string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_INCLUDE_ROOTS "${paths_include}") diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in index 8386b14ff9..456bd00524 100644 --- a/doc/Doxyfile.in +++ b/doc/Doxyfile.in @@ -85,7 +85,7 @@ GENERATE_TODOLIST = YES GENERATE_TESTLIST = YES GENERATE_BUGLIST = YES GENERATE_DEPRECATEDLIST= YES -ENABLED_SECTIONS = +ENABLED_SECTIONS = @CMAKE_DOXYGEN_ENABLED_SECTIONS@ MAX_INITIALIZER_LINES = 30 SHOW_USED_FILES = YES SHOW_FILES = YES @@ -104,7 +104,7 @@ INPUT = @CMAKE_DOXYGEN_INPUT_LIST@ INPUT_ENCODING = UTF-8 FILE_PATTERNS = RECURSIVE = YES -EXCLUDE = +EXCLUDE = @CMAKE_DOXYGEN_EXCLUDE_LIST@ EXCLUDE_SYMLINKS = NO EXCLUDE_PATTERNS = *.inl.hpp *.impl.hpp *_detail.hpp */cudev/**/detail/*.hpp *.m */opencl/runtime/* EXCLUDE_SYMBOLS = cv::DataType<*> cv::traits::* int void CV__* T __CV* diff --git a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/py_histogram_begins.markdown b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/py_histogram_begins.markdown index df52e86bf8..c26449cad4 100644 --- a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/py_histogram_begins.markdown +++ b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/py_histogram_begins.markdown @@ -41,8 +41,7 @@ you need 256 values to show the above histogram. But consider, what if you need of pixels for all pixel values separately, but number of pixels in a interval of pixel values? say for example, you need to find the number of pixels lying between 0 to 15, then 16 to 31, ..., 240 to 255. You will need only 16 values to represent the histogram. And that is what is shown in example -given in [OpenCV Tutorials on -histograms](http://docs.opencv.org/doc/tutorials/imgproc/histograms/histogram_calculation/histogram_calculation.html#histogram-calculation). +given in @ref tutorial_histogram_calculation "OpenCV Tutorials on histograms". So what you do is simply split the whole histogram to 16 sub-parts and value of each sub-part is the sum of all pixel count in it. This each sub-part is called "BIN". In first case, number of bins diff --git a/doc/py_tutorials/py_setup/py_setup_in_windows/py_setup_in_windows.markdown b/doc/py_tutorials/py_setup/py_setup_in_windows/py_setup_in_windows.markdown index 891c51fb6c..0ba1643ee1 100644 --- a/doc/py_tutorials/py_setup/py_setup_in_windows/py_setup_in_windows.markdown +++ b/doc/py_tutorials/py_setup/py_setup_in_windows/py_setup_in_windows.markdown @@ -15,18 +15,18 @@ Installing OpenCV from prebuilt binaries -# Below Python packages are to be downloaded and installed to their default locations. - -# [Python-2.7.x](http://www.python.org/ftp/python/2.7.13/python-2.7.13.msi). + -# Python 3.x (3.4+) or Python 2.7.x from [here](https://www.python.org/downloads/). - -# [Numpy](https://sourceforge.net/projects/numpy/files/NumPy/1.10.2/numpy-1.10.2-win32-superpack-python2.7.exe/download). + -# Numpy package (for example, using `pip install numpy` command). - -# [Matplotlib](https://sourceforge.net/projects/matplotlib/files/matplotlib/matplotlib-1.5.0/windows/matplotlib-1.5.0.win32-py2.7.exe/download) (*Matplotlib is optional, but recommended since we use it a lot in our tutorials*). + -# Matplotlib (`pip install matplotlib`) (*Matplotlib is optional, but recommended since we use it a lot in our tutorials*). --# Install all packages into their default locations. Python will be installed to `C:/Python27/`. +-# Install all packages into their default locations. Python will be installed to `C:/Python27/` in case of Python 2.7. -# After installation, open Python IDLE. Enter **import numpy** and make sure Numpy is working fine. --# Download latest OpenCV release from [sourceforge - site](http://sourceforge.net/projects/opencvlibrary/files/opencv-win/2.4.6/OpenCV-2.4.6.0.exe/download) +-# Download latest OpenCV release from [GitHub](https://github.com/opencv/opencv/releases) or + [SourceForge site](https://sourceforge.net/projects/opencvlibrary/files/) and double-click to extract it. -# Goto **opencv/build/python/2.7** folder. @@ -49,16 +49,13 @@ Building OpenCV from source -# [Visual Studio 2012](http://go.microsoft.com/?linkid=9816768) - -# [CMake](http://www.cmake.org/files/v2.8/cmake-2.8.11.2-win32-x86.exe) + -# [CMake](https://cmake.org/download/) -# Download and install necessary Python packages to their default locations - -# [Python 2.7.x](http://python.org/ftp/python/2.7.5/python-2.7.5.msi) + -# Python - -# [Numpy](http://sourceforge.net/projects/numpy/files/NumPy/1.7.1/numpy-1.7.1-win32-superpack-python2.7.exe/download) - - -# [Matplotlib](https://downloads.sourceforge.net/project/matplotlib/matplotlib/matplotlib-1.3.0/matplotlib-1.3.0.win32-py2.7.exe) - (*Matplotlib is optional, but recommended since we use it a lot in our tutorials.*) + -# Numpy @note In this case, we are using 32-bit binaries of Python packages. But if you want to use OpenCV for x64, 64-bit binaries of Python packages are to be installed. Problem is that, there diff --git a/doc/tutorials/calib3d/camera_calibration_pattern/camera_calibration_pattern.markdown b/doc/tutorials/calib3d/camera_calibration_pattern/camera_calibration_pattern.markdown index de219a60f4..fba4f5a4cc 100644 --- a/doc/tutorials/calib3d/camera_calibration_pattern/camera_calibration_pattern.markdown +++ b/doc/tutorials/calib3d/camera_calibration_pattern/camera_calibration_pattern.markdown @@ -30,4 +30,9 @@ If you want to change unit use -u option (mm inches, px, m) If you want to change page size use -w and -h options -If you want to create a ChArUco board read tutorial Detection of ChArUco Corners in opencv_contrib tutorial(https://docs.opencv.org/3.4/df/d4a/tutorial_charuco_detection.html) \ No newline at end of file +@cond HAVE_opencv_aruco +If you want to create a ChArUco board read @ref tutorial_charuco_detection "tutorial Detection of ChArUco Corners" in opencv_contrib tutorial. +@endcond +@cond !HAVE_opencv_aruco +If you want to create a ChArUco board read tutorial Detection of ChArUco Corners in opencv_contrib tutorial. +@endcond diff --git a/doc/tutorials/gpu/gpu-basics-similarity/gpu_basics_similarity.markdown b/doc/tutorials/gpu/gpu-basics-similarity/gpu_basics_similarity.markdown index 1ef0e74311..b511221a79 100644 --- a/doc/tutorials/gpu/gpu-basics-similarity/gpu_basics_similarity.markdown +++ b/doc/tutorials/gpu/gpu-basics-similarity/gpu_basics_similarity.markdown @@ -96,9 +96,7 @@ I1 = gI1; // Download, gI1.download(I1) will work too @endcode Once you have your data up in the GPU memory you may call GPU enabled functions of OpenCV. Most of the functions keep the same name just as on the CPU, with the difference that they only accept -*GpuMat* inputs. A full list of these you will find in the documentation: [online -here](http://docs.opencv.org/modules/gpu/doc/gpu.html) or the OpenCV reference manual that comes -with the source code. +*GpuMat* inputs. Another thing to keep in mind is that not for all channel numbers you can make efficient algorithms on the GPU. Generally, I found that the input images for the GPU images need to be either one or diff --git a/doc/tutorials/introduction/android_binary_package/O4A_SDK.markdown b/doc/tutorials/introduction/android_binary_package/O4A_SDK.markdown index cb61f6dd94..57e4c3fe15 100644 --- a/doc/tutorials/introduction/android_binary_package/O4A_SDK.markdown +++ b/doc/tutorials/introduction/android_binary_package/O4A_SDK.markdown @@ -83,7 +83,7 @@ The structure of package contents looks as follows: - `doc` folder contains various OpenCV documentation in PDF format. It's also available online at . - @note The most recent docs (nightly build) are at . Generally, it's more + @note The most recent docs (nightly build) are at . Generally, it's more up-to-date, but can refer to not-yet-released functionality. @todo I'm not sure that this is the best place to talk about OpenCV Manager @@ -97,10 +97,6 @@ applications developers: - Automatic updates and bug fixes; - Trusted OpenCV library source. All packages with OpenCV are published on Google Play; -For additional information on OpenCV Manager see the: - -- [Slides](https://docs.google.com/a/itseez.com/presentation/d/1EO_1kijgBg_BsjNp2ymk-aarg-0K279_1VZRcPplSuk/present#slide=id.p) -- [Reference Manual](http://docs.opencv.org/android/refman.html) Manual OpenCV4Android SDK setup ------------------------------- @@ -108,8 +104,8 @@ Manual OpenCV4Android SDK setup ### Get the OpenCV4Android SDK -# Go to the [OpenCV download page on - SourceForge](http://sourceforge.net/projects/opencvlibrary/files/opencv-android/) and download - the latest available version. Currently it's [OpenCV-2.4.9-android-sdk.zip](http://sourceforge.net/projects/opencvlibrary/files/opencv-android/2.4.9/OpenCV-2.4.9-android-sdk.zip/download). + SourceForge](http://sourceforge.net/projects/opencvlibrary/files/) and download + the latest available version. This tutorial is based on this package: [OpenCV-2.4.9-android-sdk.zip](http://sourceforge.net/projects/opencvlibrary/files/opencv-android/2.4.9/OpenCV-2.4.9-android-sdk.zip/download). -# Create a new folder for Android with OpenCV development. For this tutorial we have unpacked OpenCV SDK to the `C:\Work\OpenCV4Android\` directory. diff --git a/doc/tutorials/introduction/clojure_dev_intro/clojure_dev_intro.markdown b/doc/tutorials/introduction/clojure_dev_intro/clojure_dev_intro.markdown index d85335ed1f..2cd8c74a07 100644 --- a/doc/tutorials/introduction/clojure_dev_intro/clojure_dev_intro.markdown +++ b/doc/tutorials/introduction/clojure_dev_intro/clojure_dev_intro.markdown @@ -27,8 +27,8 @@ lein run Preamble -------- -For detailed instruction on installing OpenCV with desktop Java support refer to the [corresponding -tutorial](http://docs.opencv.org/2.4.4-beta/doc/tutorials/introduction/desktop_java/java_dev_intro.html). +For detailed instruction on installing OpenCV with desktop Java support refer to the @ref tutorial_java_dev_intro "corresponding +tutorial". If you are in hurry, here is a minimum quick start guide to install OpenCV on Mac OS X: @@ -302,7 +302,7 @@ Then you can start interacting with OpenCV by just referencing the fully qualifi classes. @note -[Here](http://docs.opencv.org/java/) you can find the full OpenCV Java API. +[Here](https://docs.opencv.org/3.4/javadoc/index.html) you can find the full OpenCV Java API. @code{.clojure} user=> (org.opencv.core.Point. 0 0) @@ -387,8 +387,7 @@ user=> (javadoc Rect) @endcode ### Mimic the OpenCV Java Tutorial Sample in the REPL -Let's now try to port to Clojure the [opencv java tutorial -sample](http://docs.opencv.org/2.4.4-beta/doc/tutorials/introduction/desktop_java/java_dev_intro.html). +Let's now try to port to Clojure the @ref tutorial_java_dev_intro "OpenCV Java tutorial sample". Instead of writing it in a source file we're going to evaluate it at the REPL. Following is the original Java source code of the cited sample. diff --git a/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.markdown b/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.markdown index 5039285df4..e7bf3f4fb6 100644 --- a/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.markdown +++ b/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.markdown @@ -94,8 +94,6 @@ the weight vector \f$\beta\f$ and the bias \f$\beta_{0}\f$ of the optimal hyperp Source Code ----------- -@note The following code has been implemented with OpenCV 3.0 classes and functions. An equivalent version of the code using OpenCV 2.4 can be found in [this page.](http://docs.opencv.org/2.4/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.html#introductiontosvms) - @add_toggle_cpp - **Downloadable code**: Click [here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp) diff --git a/doc/tutorials/ml/non_linear_svms/non_linear_svms.markdown b/doc/tutorials/ml/non_linear_svms/non_linear_svms.markdown index 9212911b1a..d193e3a751 100644 --- a/doc/tutorials/ml/non_linear_svms/non_linear_svms.markdown +++ b/doc/tutorials/ml/non_linear_svms/non_linear_svms.markdown @@ -89,9 +89,6 @@ Source Code You may also find the source code in `samples/cpp/tutorial_code/ml/non_linear_svms` folder of the OpenCV source library or [download it from here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp). -@note The following code has been implemented with OpenCV 3.0 classes and functions. An equivalent version of the code -using OpenCV 2.4 can be found in [this page.](http://docs.opencv.org/2.4/doc/tutorials/ml/non_linear_svms/non_linear_svms.html#nonlinearsvms) - @add_toggle_cpp - **Downloadable code**: Click [here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp) diff --git a/samples/android/camera-calibration/src/org/opencv/samples/cameracalibration/CameraCalibrationActivity.java b/samples/android/camera-calibration/src/org/opencv/samples/cameracalibration/CameraCalibrationActivity.java index af0853a7cc..77353a702c 100644 --- a/samples/android/camera-calibration/src/org/opencv/samples/cameracalibration/CameraCalibrationActivity.java +++ b/samples/android/camera-calibration/src/org/opencv/samples/cameracalibration/CameraCalibrationActivity.java @@ -1,8 +1,8 @@ // This sample is based on "Camera calibration With OpenCV" tutorial: -// http://docs.opencv.org/doc/tutorials/calib3d/camera_calibration/camera_calibration.html +// https://docs.opencv.org/3.4/d4/d94/tutorial_camera_calibration.html // // It uses standard OpenCV asymmetric circles grid pattern 11x4: -// https://github.com/opencv/opencv/blob/2.4/doc/acircles_pattern.png. +// https://github.com/opencv/opencv/blob/3.4/doc/acircles_pattern.png // The results are the camera matrix and 5 distortion coefficients. // // Tap on highlighted pattern to capture pattern corners for calibration. diff --git a/samples/cpp/matchmethod_orb_akaze_brisk.cpp b/samples/cpp/matchmethod_orb_akaze_brisk.cpp index 890d673e91..4ae606f2a9 100644 --- a/samples/cpp/matchmethod_orb_akaze_brisk.cpp +++ b/samples/cpp/matchmethod_orb_akaze_brisk.cpp @@ -24,11 +24,11 @@ int main(int argc, char *argv[]) vector typeAlgoMatch; vector fileName; // This descriptor are going to be detect and compute - typeDesc.push_back("AKAZE-DESCRIPTOR_KAZE_UPRIGHT"); // see http://docs.opencv.org/trunk/d8/d30/classcv_1_1AKAZE.html - typeDesc.push_back("AKAZE"); // see http://docs.opencv.org/trunk/d8/d30/classcv_1_1AKAZE.html - typeDesc.push_back("ORB"); // see http://docs.opencv.org/trunk/de/dbf/classcv_1_1BRISK.html - typeDesc.push_back("BRISK"); // see http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html - // This algorithm would be used to match descriptors see http://docs.opencv.org/trunk/db/d39/classcv_1_1DescriptorMatcher.html#ab5dc5036569ecc8d47565007fa518257 + typeDesc.push_back("AKAZE-DESCRIPTOR_KAZE_UPRIGHT"); // see https://docs.opencv.org/3.4/d8/d30/classcv_1_1AKAZE.html + typeDesc.push_back("AKAZE"); // see http://docs.opencv.org/3.4/d8/d30/classcv_1_1AKAZE.html + typeDesc.push_back("ORB"); // see http://docs.opencv.org/3.4/de/dbf/classcv_1_1BRISK.html + typeDesc.push_back("BRISK"); // see http://docs.opencv.org/3.4/db/d95/classcv_1_1ORB.html + // This algorithm would be used to match descriptors see http://docs.opencv.org/3.4/db/d39/classcv_1_1DescriptorMatcher.html#ab5dc5036569ecc8d47565007fa518257 typeAlgoMatch.push_back("BruteForce"); typeAlgoMatch.push_back("BruteForce-L1"); typeAlgoMatch.push_back("BruteForce-Hamming"); From ec41a4897a7feb1c83afe19078af174368464b07 Mon Sep 17 00:00:00 2001 From: Dmitry Kurtaev Date: Wed, 3 Apr 2019 13:42:06 +0300 Subject: [PATCH 2/3] Remove Switch and Merge nodes from TensorFlow networks --- .../src/tensorflow/tf_graph_simplifier.cpp | 81 ++++++++++++++++++- .../src/tensorflow/tf_graph_simplifier.hpp | 2 + modules/dnn/src/tensorflow/tf_importer.cpp | 3 + modules/dnn/test/test_tf_importer.cpp | 10 +++ 4 files changed, 95 insertions(+), 1 deletion(-) diff --git a/modules/dnn/src/tensorflow/tf_graph_simplifier.cpp b/modules/dnn/src/tensorflow/tf_graph_simplifier.cpp index 59d0d57cc8..37e57505da 100644 --- a/modules/dnn/src/tensorflow/tf_graph_simplifier.cpp +++ b/modules/dnn/src/tensorflow/tf_graph_simplifier.cpp @@ -10,6 +10,7 @@ #ifdef HAVE_PROTOBUF #include "tf_graph_simplifier.hpp" +#include namespace cv { namespace dnn { CV__DNN_EXPERIMENTAL_NS_BEGIN @@ -883,7 +884,6 @@ void sortByExecutionOrder(tensorflow::GraphDef& net) nodesToAdd.pop_back(); permIds.push_back(nodeToAdd); - // std::cout << net.node(nodeToAdd).name() << '\n'; for (int i = 0; i < edges[nodeToAdd].size(); ++i) { @@ -902,6 +902,85 @@ void sortByExecutionOrder(tensorflow::GraphDef& net) permute(net.mutable_node(), permIds); } +// Remove training switches (Switch and Merge nodes and corresponding subgraphs). +void removePhaseSwitches(tensorflow::GraphDef& net) +{ + std::vector nodesToRemove; + std::map nodesMap; + std::map::iterator nodesMapIt; + std::queue mergeOpSubgraphNodes; + for (int i = 0; i < net.node_size(); ++i) + { + const tensorflow::NodeDef& node = net.node(i); + nodesMap.insert(std::make_pair(node.name(), i)); + if (node.op() == "Switch" || node.op() == "Merge") + { + CV_Assert(node.input_size() > 0); + // Replace consumers' inputs. + for (int j = 0; j < net.node_size(); ++j) + { + tensorflow::NodeDef* consumer = net.mutable_node(j); + for (int k = 0; k < consumer->input_size(); ++k) + { + std::string inpName = consumer->input(k); + inpName = inpName.substr(0, inpName.rfind(':')); + if (inpName == node.name()) + { + consumer->set_input(k, node.input(0)); + } + } + } + nodesToRemove.push_back(i); + if (node.op() == "Merge") + mergeOpSubgraphNodes.push(i); + } + } + + std::vector numConsumers(net.node_size(), 0); + for (int i = 0; i < net.node_size(); ++i) + { + const tensorflow::NodeDef& node = net.node(i); + for (int j = 0; j < node.input_size(); ++j) + { + std::string inpName = node.input(j); + inpName = inpName.substr(1 + (int)inpName.find('^'), inpName.rfind(':')); + nodesMapIt = nodesMap.find(inpName); + CV_Assert(nodesMapIt != nodesMap.end()); + numConsumers[nodesMapIt->second] += 1; + } + } + + // Remove subgraphs of unused nodes which are terminated by Merge nodes. + while (!mergeOpSubgraphNodes.empty()) + { + const tensorflow::NodeDef& node = net.node(mergeOpSubgraphNodes.front()); + mergeOpSubgraphNodes.pop(); + for (int i = 0; i < node.input_size(); ++i) + { + std::string inpName = node.input(i); + inpName = inpName.substr(1 + (int)inpName.find('^'), inpName.rfind(':')); + nodesMapIt = nodesMap.find(inpName); + CV_Assert(nodesMapIt != nodesMap.end()); + + int inpNodeId = nodesMapIt->second; + if (numConsumers[inpNodeId] == 1) + { + mergeOpSubgraphNodes.push(inpNodeId); + nodesToRemove.push_back(inpNodeId); + } + else if (numConsumers[inpNodeId] > 0) + numConsumers[inpNodeId] -= 1; + } + } + std::sort(nodesToRemove.begin(), nodesToRemove.end()); + for (int i = nodesToRemove.size() - 1; i >= 0; --i) + { + if (nodesToRemove[i] < net.node_size()) // Ids might be repeated. + net.mutable_node()->DeleteSubrange(nodesToRemove[i], 1); + } +} + + CV__DNN_EXPERIMENTAL_NS_END }} // namespace dnn, namespace cv diff --git a/modules/dnn/src/tensorflow/tf_graph_simplifier.hpp b/modules/dnn/src/tensorflow/tf_graph_simplifier.hpp index 24c4bd5a70..5929d1f857 100644 --- a/modules/dnn/src/tensorflow/tf_graph_simplifier.hpp +++ b/modules/dnn/src/tensorflow/tf_graph_simplifier.hpp @@ -27,6 +27,8 @@ void releaseTensor(tensorflow::TensorProto* tensor); void sortByExecutionOrder(tensorflow::GraphDef& net); +void removePhaseSwitches(tensorflow::GraphDef& net); + CV__DNN_EXPERIMENTAL_NS_END }} // namespace dnn, namespace cv diff --git a/modules/dnn/src/tensorflow/tf_importer.cpp b/modules/dnn/src/tensorflow/tf_importer.cpp index 9cca9e9b64..a1628104f7 100644 --- a/modules/dnn/src/tensorflow/tf_importer.cpp +++ b/modules/dnn/src/tensorflow/tf_importer.cpp @@ -657,6 +657,9 @@ static int predictOutputDataLayout(const tensorflow::GraphDef& net, void TFImporter::populateNet(Net dstNet) { + if (!netTxt.ByteSize()) + removePhaseSwitches(netBin); + RemoveIdentityOps(netBin); RemoveIdentityOps(netTxt); diff --git a/modules/dnn/test/test_tf_importer.cpp b/modules/dnn/test/test_tf_importer.cpp index 395a965ada..ef5206f340 100644 --- a/modules/dnn/test/test_tf_importer.cpp +++ b/modules/dnn/test/test_tf_importer.cpp @@ -185,6 +185,16 @@ TEST_P(Test_TensorFlow_layers, batch_norm) runTensorFlowNet("mvn_batch_norm_1x1"); } +TEST_P(Test_TensorFlow_layers, slim_batch_norm) +{ + if (backend == DNN_BACKEND_INFERENCE_ENGINE) + throw SkipTestException("Test is disabled for DLIE"); + // Output values range: [-40.0597, 207.827] + double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.041 : default_l1; + double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.33 : default_lInf; + runTensorFlowNet("slim_batch_norm", false, l1, lInf); +} + TEST_P(Test_TensorFlow_layers, pooling) { runTensorFlowNet("max_pool_even"); From 0c490accae2464cd4089af42db315e5995635692 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Fri, 5 Apr 2019 17:56:48 +0300 Subject: [PATCH 3/3] imgcodecs(tiff): check TIFF tile size oss-fuzz: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=13280 oss-fuzz: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=13283 --- modules/imgcodecs/src/grfmt_tiff.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modules/imgcodecs/src/grfmt_tiff.cpp b/modules/imgcodecs/src/grfmt_tiff.cpp index b1f3d8192c..2094b1accc 100644 --- a/modules/imgcodecs/src/grfmt_tiff.cpp +++ b/modules/imgcodecs/src/grfmt_tiff.cpp @@ -401,6 +401,10 @@ bool TiffDecoder::readData( Mat& img ) (!is_tiled && tile_height0 == std::numeric_limits::max()) ) tile_height0 = m_height; + CV_Assert((int)tile_width0 > 0 && (int)tile_width0 < std::numeric_limits::max()); + CV_Assert((int)tile_height0 > 0 && (int)tile_height0 < std::numeric_limits::max()); + CV_Assert(((uint64_t)tile_width0 * tile_height0 * ncn * (bpp / bitsPerByte) < (CV_BIG_UINT(1) << 30)) && "TIFF tile size is too large: >= 1Gb"); + if (dst_bpp == 8) { // we will use TIFFReadRGBA* functions, so allocate temporary buffer for 32bit RGBA