Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/17476/head
Alexander Alekhin 5 years ago
commit 7722a2b8a8
  1. 83
      cmake/OpenCVDetectCUDA.cmake
  2. 5
      doc/tutorials/core/file_input_output_with_xml_yml/file_input_output_with_xml_yml.markdown
  3. 60
      doc/tutorials/imgproc/table_of_content_imgproc.markdown
  4. 62
      doc/tutorials/imgproc/table_of_contents_contours.markdown
  5. 5
      modules/calib3d/include/opencv2/calib3d.hpp
  6. 9
      modules/core/include/opencv2/core/eigen.hpp
  7. 16
      modules/core/include/opencv2/core/persistence.hpp
  8. 6
      modules/core/src/count_non_zero.dispatch.cpp
  9. 19
      modules/core/test/test_countnonzero.cpp
  10. 7
      modules/dnn/src/darknet/darknet_io.cpp
  11. 81
      modules/dnn/src/dnn.cpp
  12. 4
      modules/dnn/src/onnx/onnx_importer.cpp
  13. 2
      modules/dnn/test/test_tf_importer.cpp
  14. 10
      modules/features2d/misc/java/test/BruteForceDescriptorMatcherTest.java
  15. 10
      modules/features2d/misc/java/test/FlannBasedDescriptorMatcherTest.java
  16. 10
      modules/imgproc/misc/java/test/ImgprocTest.java
  17. 260
      modules/imgproc/src/shapedescr.cpp
  18. 36
      modules/imgproc/test/test_fitellipse.cpp
  19. 13
      modules/java/generator/gen_java.py
  20. 13
      modules/js/test/tests.html
  21. 4
      modules/objdetect/src/cascadedetect_convert.cpp
  22. 113
      modules/python/test/test_filestorage_io.py
  23. 2
      modules/videoio/include/opencv2/videoio.hpp
  24. 2
      modules/videoio/include/opencv2/videoio/legacy/constants_c.h
  25. 15
      modules/videoio/src/cap_ffmpeg_impl.hpp
  26. 202
      platforms/winpack_dldt/2020.3.0/20200413-dldt-disable-unused-targets.patch
  27. 13
      platforms/winpack_dldt/2020.3.0/20200413-dldt-fix-binaries-location.patch
  28. 14
      platforms/winpack_dldt/2020.3.0/20200413-dldt-pdb.patch
  29. 13
      platforms/winpack_dldt/2020.3.0/20200604-dldt-disable-multidevice.patch
  30. 4
      platforms/winpack_dldt/2020.3.0/patch.config.py
  31. 57
      platforms/winpack_dldt/2020.3.0/sysroot.config.py
  32. 8
      platforms/winpack_dldt/build_package.py
  33. 104
      samples/dnn/human_parsing.cpp
  34. 2
      samples/dnn/virtual_try_on.py
  35. 27
      samples/python/tutorial_code/core/file_input_output/file_input_output.py

@ -63,6 +63,12 @@ if(CUDA_FOUND)
message(STATUS "CUDA detected: " ${CUDA_VERSION})
set(_generations "Fermi" "Kepler" "Maxwell" "Pascal" "Volta" "Turing")
set(_arch_fermi "2.0")
set(_arch_kepler "3.0;3.5;3.7")
set(_arch_maxwell "5.0;5.2")
set(_arch_pascal "6.0;6.1")
set(_arch_volta "7.0")
set(_arch_turing "7.5")
if(NOT CMAKE_CROSSCOMPILING)
list(APPEND _generations "Auto")
endif()
@ -86,30 +92,58 @@ if(CUDA_FOUND)
SET(DETECT_ARCHS_COMMAND ${DETECT_ARCHS_COMMAND} "-ccbin" "${host_compiler_bindir}")
endif()
macro(ocv_filter_available_architecture result_list)
if(DEFINED CUDA_SUPPORTED_CC)
set(${result_list} "${CUDA_SUPPORTED_CC}")
else()
set(CC_LIST ${ARGN})
foreach(target_arch ${CC_LIST})
string(REPLACE "." "" target_arch_short ${target_arch})
set(NVCC_OPTION "-gencode;arch=compute_${target_arch_short},code=sm_${target_arch_short}")
execute_process( COMMAND "${CUDA_NVCC_EXECUTABLE}" ${NVCC_OPTION} "${OpenCV_SOURCE_DIR}/cmake/checks/OpenCVDetectCudaArch.cu"
WORKING_DIRECTORY "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/"
RESULT_VARIABLE _nvcc_res OUTPUT_VARIABLE _nvcc_out
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
if(_nvcc_res EQUAL 0)
set(${result_list} "${${result_list}} ${target_arch}")
endif()
endforeach()
string(STRIP ${${result_list}} ${result_list})
set(CUDA_SUPPORTED_CC ${${result_list}} CACHE INTERNAL "List of supported compute capability")
endif()
endmacro()
macro(ocv_detect_native_cuda_arch status output)
execute_process( COMMAND ${DETECT_ARCHS_COMMAND}
WORKING_DIRECTORY "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/"
RESULT_VARIABLE ${status} OUTPUT_VARIABLE _nvcc_out
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
string(REGEX REPLACE ".*\n" "" ${output} "${_nvcc_out}") #Strip leading warning messages, if any
endmacro()
macro(ocv_wipeout_deprecated _arch_bin_list)
string(REPLACE "2.1" "2.1(2.0)" ${_arch_bin_list} ${${_arch_bin_list}})
endmacro()
set(__cuda_arch_ptx "")
if(CUDA_GENERATION STREQUAL "Fermi")
set(__cuda_arch_bin "2.0")
set(__cuda_arch_bin ${_arch_fermi})
elseif(CUDA_GENERATION STREQUAL "Kepler")
set(__cuda_arch_bin "3.0 3.5 3.7")
set(__cuda_arch_bin ${_arch_kepler})
elseif(CUDA_GENERATION STREQUAL "Maxwell")
set(__cuda_arch_bin "5.0 5.2")
set(__cuda_arch_bin ${_arch_maxwell})
elseif(CUDA_GENERATION STREQUAL "Pascal")
set(__cuda_arch_bin "6.0 6.1")
set(__cuda_arch_bin ${_arch_pascal})
elseif(CUDA_GENERATION STREQUAL "Volta")
set(__cuda_arch_bin "7.0")
set(__cuda_arch_bin ${_arch_volta})
elseif(CUDA_GENERATION STREQUAL "Turing")
set(__cuda_arch_bin "7.5")
set(__cuda_arch_bin ${_arch_turing})
elseif(CUDA_GENERATION STREQUAL "Auto")
execute_process( COMMAND ${DETECT_ARCHS_COMMAND}
WORKING_DIRECTORY "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/"
RESULT_VARIABLE _nvcc_res OUTPUT_VARIABLE _nvcc_out
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
string(REGEX REPLACE ".*\n" "" _nvcc_out "${_nvcc_out}") #Strip leading warning messages, if any
ocv_detect_native_cuda_arch(_nvcc_res _nvcc_out)
if(NOT _nvcc_res EQUAL 0)
message(STATUS "Automatic detection of CUDA generation failed. Going to build for all known architectures.")
else()
set(__cuda_arch_bin "${_nvcc_out}")
string(REPLACE "2.1" "2.1(2.0)" __cuda_arch_bin "${__cuda_arch_bin}")
string(REGEX MATCHALL "[0-9]+\\.[0-9]" __cuda_arch_bin "${_nvcc_out}")
endif()
endif()
@ -118,29 +152,26 @@ if(CUDA_FOUND)
set(__cuda_arch_bin "3.2")
set(__cuda_arch_ptx "")
elseif(AARCH64)
execute_process( COMMAND ${DETECT_ARCHS_COMMAND}
WORKING_DIRECTORY "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/"
RESULT_VARIABLE _nvcc_res OUTPUT_VARIABLE _nvcc_out
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
string(REGEX REPLACE ".*\n" "" _nvcc_out "${_nvcc_out}") #Strip leading warning messages, if any
ocv_detect_native_cuda_arch(_nvcc_res _nvcc_out)
if(NOT _nvcc_res EQUAL 0)
message(STATUS "Automatic detection of CUDA generation failed. Going to build for all known architectures.")
set(__cuda_arch_bin "5.3 6.2 7.2")
else()
set(__cuda_arch_bin "${_nvcc_out}")
string(REPLACE "2.1" "2.1(2.0)" __cuda_arch_bin "${__cuda_arch_bin}")
endif()
set(__cuda_arch_ptx "")
else()
if(CUDA_VERSION VERSION_LESS "9.0")
set(__cuda_arch_bin "2.0 3.0 3.5 3.7 5.0 5.2 6.0 6.1")
elseif(CUDA_VERSION VERSION_LESS "10.0")
set(__cuda_arch_bin "3.0 3.5 3.7 5.0 5.2 6.0 6.1 7.0")
else()
set(__cuda_arch_bin "3.0 3.5 3.7 5.0 5.2 6.0 6.1 7.0 7.5")
endif()
ocv_filter_available_architecture(__cuda_arch_bin
${_arch_fermi}
${_arch_kepler}
${_arch_maxwell}
${_arch_pascal}
${_arch_volta}
${_arch_turing}
)
endif()
endif()
ocv_wipeout_deprecated(__cuda_arch_bin)
set(CUDA_ARCH_BIN ${__cuda_arch_bin} CACHE STRING "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported")
set(CUDA_ARCH_PTX ${__cuda_arch_ptx} CACHE STRING "Specify 'virtual' PTX architectures to build PTX intermediate code for")

@ -107,8 +107,9 @@ you may access it. For sequences you need to go through them to query a specific
then we have to specify if our output is either a sequence or map.
For sequence before the first element print the "[" character and after the last one the "]"
character. With Python, the "]" character could be written with the name of the sequence or
the last element of the sequence depending on the number of elements:
character. With Python, call `FileStorage.startWriteStruct(structure_name, struct_type)`,
where `struct_type` is `cv2.FileNode_MAP` or `cv2.FileNode_SEQ` to start writing the structure.
Call `FileStorage.endWriteStruct()` to finish the structure:
@add_toggle_cpp
@snippet cpp/tutorial_code/core/file_input_output/file_input_output.cpp writeStr
@end_toggle

@ -243,65 +243,9 @@ In this section you will learn about the image processing (manipulation) functio
Where we learn how to match templates in an image
- @subpage tutorial_find_contours
- @subpage tutorial_table_of_contents_contours
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán
Where we learn how to find contours of objects in our image
- @subpage tutorial_hull
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán
Where we learn how to get hull contours and draw them
- @subpage tutorial_bounding_rects_circles
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán
Where we learn how to obtain bounding boxes and circles for our contours
- @subpage tutorial_bounding_rotated_ellipses
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán
Where we learn how to obtain rotated bounding boxes and ellipses for our contours
- @subpage tutorial_moments
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán
Where we learn to calculate the moments of an image
- @subpage tutorial_point_polygon_test
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán
Where we learn how to calculate distances from the image to contours
Learn how to find contours in images and investigate their properties and features.
- @subpage tutorial_distance_transform

@ -0,0 +1,62 @@
Contours in OpenCV {#tutorial_table_of_contents_contours}
==================
- @subpage tutorial_find_contours
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán
Where we learn how to find contours of objects in our image
- @subpage tutorial_hull
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán
Where we learn how to get hull contours and draw them
- @subpage tutorial_bounding_rects_circles
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán
Where we learn how to obtain bounding boxes and circles for our contours
- @subpage tutorial_bounding_rotated_ellipses
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán
Where we learn how to obtain rotated bounding boxes and ellipses for our contours
- @subpage tutorial_moments
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán
Where we learn to calculate the moments of an image
- @subpage tutorial_point_polygon_test
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán
Where we learn how to calculate distances from the image to contours

@ -2458,7 +2458,10 @@ be floating-point (single or double precision).
@param points2 Array of the second image points of the same size and format as points1 .
@param cameraMatrix Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
Note that this function assumes that points1 and points2 are feature points from cameras with the
same camera matrix.
same camera matrix. If this assumption does not hold for your use case, use
`undistortPoints()` with `P = cv::NoArray()` for both cameras to transform image points
to normalized image coordinates, which are valid for the identity camera matrix. When
passing these coordinates, pass the identity matrix for this parameter.
@param method Method for computing an essential matrix.
- **RANSAC** for the RANSAC algorithm.
- **LMEDS** for the LMedS algorithm.

@ -45,9 +45,14 @@
#ifndef OPENCV_CORE_EIGEN_HPP
#define OPENCV_CORE_EIGEN_HPP
#ifndef EIGEN_WORLD_VERSION
#error "Wrong usage of OpenCV's Eigen utility header. Include Eigen's headers first. See https://github.com/opencv/opencv/issues/17366"
#endif
#include "opencv2/core.hpp"
#if EIGEN_WORLD_VERSION == 3 && EIGEN_MAJOR_VERSION >= 3
#if EIGEN_WORLD_VERSION == 3 && EIGEN_MAJOR_VERSION >= 3 \
&& defined(CV_CXX11) && defined(CV_CXX_STD_ARRAY)
#include <unsupported/Eigen/CXX11/Tensor>
#define OPENCV_EIGEN_TENSOR_SUPPORT
#endif // EIGEN_WORLD_VERSION == 3 && EIGEN_MAJOR_VERSION >= 3
@ -157,7 +162,7 @@ Eigen::TensorMap<Eigen::Tensor<float, 3, Eigen::RowMajor>> a_tensormap = cv2eige
\endcode
*/
template <typename _Tp> static inline
Eigen::TensorMap<Eigen::Tensor<_Tp, 3, Eigen::RowMajor>> cv2eigen_tensormap(const cv::InputArray &src)
Eigen::TensorMap<Eigen::Tensor<_Tp, 3, Eigen::RowMajor>> cv2eigen_tensormap(InputArray src)
{
Mat mat = src.getMat();
CV_CheckTypeEQ(mat.type(), CV_MAKETYPE(traits::Type<_Tp>::value, mat.channels()), "");

@ -436,12 +436,20 @@ public:
*/
CV_WRAP void writeComment(const String& comment, bool append = false);
void startWriteStruct(const String& name, int flags, const String& typeName);
void endWriteStruct();
/** @brief Starts to write a nested structure (sequence or a mapping).
@param name name of the structure (if it's a member of parent mapping, otherwise it should be empty
@param flags type of the structure (FileNode::MAP or FileNode::SEQ (both with optional FileNode::FLOW)).
@param typeName usually an empty string
*/
CV_WRAP void startWriteStruct(const String& name, int flags, const String& typeName=String());
/** @brief Finishes writing nested structure (should pair startWriteStruct())
*/
CV_WRAP void endWriteStruct();
/** @brief Returns the normalized object name for the specified name of a file.
@param filename Name of a file
@returns The normalized object name.
@param filename Name of a file
@returns The normalized object name.
*/
static String getDefaultObjectName(const String& filename);

@ -62,6 +62,12 @@ static bool ipp_countNonZero( Mat &src, int &res )
{
CV_INSTRUMENT_REGION_IPP();
#if defined __APPLE__ || (defined _MSC_VER && defined _M_IX86)
// see https://github.com/opencv/opencv/issues/17453
if (src.dims <= 2 && src.step > 520000)
return false;
#endif
#if IPP_VERSION_X100 < 201801
// Poor performance of SSE42
if(cv::ipp::getIppTopFeatures() == ippCPUID_SSE42)

@ -276,4 +276,23 @@ INSTANTIATE_TEST_CASE_P(Core, CountNonZeroND,
)
);
typedef testing::TestWithParam<tuple<int, cv::Size> > CountNonZeroBig;
TEST_P(CountNonZeroBig, /**/)
{
const int type = get<0>(GetParam());
const Size sz = get<1>(GetParam());
EXPECT_EQ(0, cv::countNonZero(cv::Mat::zeros(sz, type)));
EXPECT_EQ(sz.area(), cv::countNonZero(cv::Mat::ones(sz, type)));
}
INSTANTIATE_TEST_CASE_P(Core, CountNonZeroBig,
testing::Combine(
testing::Values(CV_8UC1, CV_32FC1),
testing::Values(Size(1, 524190), Size(524190, 1), Size(3840, 2160))
)
);
}} // namespace

@ -658,6 +658,8 @@ namespace cv {
if (pad)
padding = kernel_size / 2;
// Cannot divide 0
CV_Assert(stride > 0);
CV_Assert(kernel_size > 0 && filters > 0);
CV_Assert(tensor_shape[0] > 0);
CV_Assert(tensor_shape[0] % groups == 0);
@ -690,6 +692,9 @@ namespace cv {
int kernel_size = getParam<int>(layer_params, "size", 2);
int stride = getParam<int>(layer_params, "stride", 2);
int padding = getParam<int>(layer_params, "padding", kernel_size - 1);
// Cannot divide 0
CV_Assert(stride > 0);
setParams.setMaxpool(kernel_size, padding, stride);
tensor_shape[1] = (tensor_shape[1] - kernel_size + padding) / stride + 1;
@ -732,6 +737,8 @@ namespace cv {
else if (layer_type == "reorg")
{
int stride = getParam<int>(layer_params, "stride", 2);
// Cannot divide 0
CV_Assert(stride > 0);
tensor_shape[0] = tensor_shape[0] * (stride * stride);
tensor_shape[1] = tensor_shape[1] / stride;
tensor_shape[2] = tensor_shape[2] / stride;

@ -3508,6 +3508,7 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe
for (auto& it : ieNet.getOutputsInfo())
{
CV_TRACE_REGION("output");
const auto& outputName = it.first;
LayerParams lp;
int lid = cvNet.addLayer(it.first, "", lp);
@ -3517,37 +3518,60 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe
#ifdef HAVE_DNN_NGRAPH
if (DNN_BACKEND_INFERENCE_ENGINE_NGRAPH == getInferenceEngineBackendTypeParam())
{
const auto& outputName = it.first;
Ptr<Layer> cvLayer(new NgraphBackendLayer(ieNet));
cvLayer->name = outputName;
cvLayer->type = "_unknown_";
if (ngraphFunction)
auto process_layer = [&](const std::string& name) -> bool
{
CV_TRACE_REGION("ngraph_function");
bool found = false;
for (const auto& op : ngraphOperations)
if (ngraphFunction)
{
CV_Assert(op);
if (op->get_friendly_name() == outputName)
CV_TRACE_REGION("ngraph_function");
for (const auto& op : ngraphOperations)
{
const std::string typeName = op->get_type_info().name;
cvLayer->type = typeName;
found = true;
break;
CV_Assert(op);
if (op->get_friendly_name() == name)
{
const std::string typeName = op->get_type_info().name;
cvLayer->type = typeName;
return true;
}
}
return false;
}
if (!found)
CV_LOG_WARNING(NULL, "DNN/IE: Can't determine output layer type: '" << outputName << "'");
}
else
{
CV_TRACE_REGION("legacy_cnn_layer");
InferenceEngine::CNNLayerPtr ieLayer = ieNet.getLayerByName(it.first.c_str());
CV_Assert(ieLayer);
else
{
CV_TRACE_REGION("legacy_cnn_layer");
try
{
InferenceEngine::CNNLayerPtr ieLayer = ieNet.getLayerByName(name.c_str());
CV_Assert(ieLayer);
cvLayer->type = ieLayer->type;
cvLayer->type = ieLayer->type;
return true;
}
catch (const std::exception& e)
{
CV_UNUSED(e);
CV_LOG_DEBUG(NULL, "IE layer extraction failure: '" << name << "' - " << e.what());
return false;
}
}
};
bool found = process_layer(outputName);
if (!found)
{
auto pos = outputName.rfind('.'); // cut port number: ".0"
if (pos != std::string::npos)
{
std::string layerName = outputName.substr(0, pos);
found = process_layer(layerName);
}
}
if (!found)
CV_LOG_WARNING(NULL, "DNN/IE: Can't determine output layer type: '" << outputName << "'");
ld.layerInstance = cvLayer;
ld.backendNodes[DNN_BACKEND_INFERENCE_ENGINE_NGRAPH] = backendNode;
}
@ -3557,10 +3581,23 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
Ptr<Layer> cvLayer(new InfEngineBackendLayer(ieNet));
InferenceEngine::CNNLayerPtr ieLayer = ieNet.getLayerByName(it.first.c_str());
InferenceEngine::CNNLayerPtr ieLayer;
try
{
ieLayer = ieNet.getLayerByName(outputName.c_str());
}
catch (...)
{
auto pos = outputName.rfind('.'); // cut port number: ".0"
if (pos != std::string::npos)
{
std::string layerName = outputName.substr(0, pos);
ieLayer = ieNet.getLayerByName(layerName.c_str());
}
}
CV_Assert(ieLayer);
cvLayer->name = it.first;
cvLayer->name = outputName;
cvLayer->type = ieLayer->type;
ld.layerInstance = cvLayer;

@ -806,6 +806,10 @@ void ONNXImporter::populateNet(Net dstNet)
{
layerParams.type = "ELU";
}
else if (layer_type == "Tanh")
{
layerParams.type = "TanH";
}
else if (layer_type == "PRelu")
{
layerParams.type = "PReLU";

@ -1220,7 +1220,7 @@ TEST_P(Test_TensorFlow_nets, EfficientDet)
}
checkBackend();
std::string proto = findDataFile("dnn/efficientdet-d0.pbtxt");
std::string model = findDataFile("dnn/efficientdet-d0.pb");
std::string model = findDataFile("dnn/efficientdet-d0.pb", false);
Net net = readNetFromTensorflow(model, proto);
Mat img = imread(findDataFile("dnn/dog416.png"));

@ -12,6 +12,7 @@ import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.DMatch;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.BFMatcher;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
@ -93,6 +94,15 @@ public class BruteForceDescriptorMatcherTest extends OpenCVTestCase {
};
}
// https://github.com/opencv/opencv/issues/11268
public void testConstructor()
{
BFMatcher self_created_matcher = new BFMatcher();
Mat train = new Mat(1, 1, CvType.CV_8U, new Scalar(123));
self_created_matcher.add(Arrays.asList(train));
assertTrue(!self_created_matcher.empty());
}
public void testAdd() {
matcher.add(Arrays.asList(new Mat()));
assertFalse(matcher.empty());

@ -12,6 +12,7 @@ import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.DMatch;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.FlannBasedMatcher;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
@ -168,6 +169,15 @@ public class FlannBasedDescriptorMatcherTest extends OpenCVTestCase {
};
}
// https://github.com/opencv/opencv/issues/11268
public void testConstructor()
{
FlannBasedMatcher self_created_matcher = new FlannBasedMatcher();
Mat train = new Mat(1, 1, CvType.CV_8U, new Scalar(123));
self_created_matcher.add(Arrays.asList(train));
assertTrue(!self_created_matcher.empty());
}
public void testAdd() {
matcher.add(Arrays.asList(new Mat()));
assertFalse(matcher.empty());

@ -797,9 +797,13 @@ public class ImgprocTest extends OpenCVTestCase {
rrect = Imgproc.fitEllipse(points);
assertPointEquals(new Point(0, 0), rrect.center, EPS);
assertEquals(2.828, rrect.size.width, EPS);
assertEquals(2.828, rrect.size.height, EPS);
double FIT_ELLIPSE_CENTER_EPS = 0.01;
double FIT_ELLIPSE_SIZE_EPS = 0.4;
assertEquals(0.0, rrect.center.x, FIT_ELLIPSE_CENTER_EPS);
assertEquals(0.0, rrect.center.y, FIT_ELLIPSE_CENTER_EPS);
assertEquals(2.828, rrect.size.width, FIT_ELLIPSE_SIZE_EPS);
assertEquals(2.828, rrect.size.height, FIT_ELLIPSE_SIZE_EPS);
}
public void testFitLine() {

@ -337,8 +337,15 @@ double cv::contourArea( InputArray _contour, bool oriented )
return a00;
}
namespace cv
{
cv::RotatedRect cv::fitEllipse( InputArray _points )
static inline Point2f getOfs(int i, float eps)
{
return Point2f(((i & 1)*2 - 1)*eps, ((i & 2) - 1)*eps);
}
static RotatedRect fitEllipseNoDirect( InputArray _points )
{
CV_INSTRUMENT_REGION();
@ -354,42 +361,84 @@ cv::RotatedRect cv::fitEllipse( InputArray _points )
// New fitellipse algorithm, contributed by Dr. Daniel Weiss
Point2f c(0,0);
double gfp[5] = {0}, rp[5] = {0}, t;
double gfp[5] = {0}, rp[5] = {0}, t, vd[25]={0}, wd[5]={0};
const double min_eps = 1e-8;
bool is_float = depth == CV_32F;
const Point* ptsi = points.ptr<Point>();
const Point2f* ptsf = points.ptr<Point2f>();
AutoBuffer<double> _Ad(n*5), _bd(n);
double *Ad = _Ad.data(), *bd = _bd.data();
AutoBuffer<double> _Ad(n*12+n);
double *Ad = _Ad.data(), *ud = Ad + n*5, *bd = ud + n*5;
Point2f* ptsf_copy = (Point2f*)(bd + n);
// first fit for parameters A - E
Mat A( n, 5, CV_64F, Ad );
Mat b( n, 1, CV_64F, bd );
Mat x( 5, 1, CV_64F, gfp );
Mat u( n, 1, CV_64F, ud );
Mat vt( 5, 5, CV_64F, vd );
Mat w( 5, 1, CV_64F, wd );
{
const Point* ptsi = points.ptr<Point>();
const Point2f* ptsf = points.ptr<Point2f>();
for( i = 0; i < n; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
ptsf_copy[i] = p;
c += p;
}
}
c.x /= n;
c.y /= n;
double s = 0;
for( i = 0; i < n; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
Point2f p = ptsf_copy[i];
p -= c;
s += fabs(p.x) + fabs(p.y);
}
double scale = 100./(s > FLT_EPSILON ? s : FLT_EPSILON);
for( i = 0; i < n; i++ )
{
Point2f p = ptsf_copy[i];
p -= c;
double px = p.x*scale;
double py = p.y*scale;
bd[i] = 10000.0; // 1.0?
Ad[i*5] = -(double)p.x * p.x; // A - C signs inverted as proposed by APP
Ad[i*5 + 1] = -(double)p.y * p.y;
Ad[i*5 + 2] = -(double)p.x * p.y;
Ad[i*5 + 3] = p.x;
Ad[i*5 + 4] = p.y;
Ad[i*5] = -px * px; // A - C signs inverted as proposed by APP
Ad[i*5 + 1] = -py * py;
Ad[i*5 + 2] = -px * py;
Ad[i*5 + 3] = px;
Ad[i*5 + 4] = py;
}
solve(A, b, x, DECOMP_SVD);
SVDecomp(A, w, u, vt);
if(wd[0]*FLT_EPSILON > wd[4]) {
float eps = (float)(s/(n*2)*1e-3);
for( i = 0; i < n; i++ )
{
Point2f p = ptsf_copy[i] + getOfs(i, eps);
ptsf_copy[i] = p;
}
for( i = 0; i < n; i++ )
{
Point2f p = ptsf_copy[i];
p -= c;
double px = p.x*scale;
double py = p.y*scale;
bd[i] = 10000.0; // 1.0?
Ad[i*5] = -px * px; // A - C signs inverted as proposed by APP
Ad[i*5 + 1] = -py * py;
Ad[i*5 + 2] = -px * py;
Ad[i*5 + 3] = px;
Ad[i*5 + 4] = py;
}
SVDecomp(A, w, u, vt);
}
SVBackSubst(w, u, vt, b, x);
// now use general-form parameters A - E to find the ellipse center:
// differentiate general form wrt x/y to get two equations for cx and cy
@ -409,12 +458,14 @@ cv::RotatedRect cv::fitEllipse( InputArray _points )
x = Mat( 3, 1, CV_64F, gfp );
for( i = 0; i < n; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
Point2f p = ptsf_copy[i];
p -= c;
double px = p.x*scale;
double py = p.y*scale;
bd[i] = 1.0;
Ad[i * 3] = (p.x - rp[0]) * (p.x - rp[0]);
Ad[i * 3 + 1] = (p.y - rp[1]) * (p.y - rp[1]);
Ad[i * 3 + 2] = (p.x - rp[0]) * (p.y - rp[1]);
Ad[i * 3] = (px - rp[0]) * (px - rp[0]);
Ad[i * 3 + 1] = (py - rp[1]) * (py - rp[1]);
Ad[i * 3 + 2] = (px - rp[0]) * (py - rp[1]);
}
solve(A, b, x, DECOMP_SVD);
@ -431,10 +482,10 @@ cv::RotatedRect cv::fitEllipse( InputArray _points )
if( rp[3] > min_eps )
rp[3] = std::sqrt(2.0 / rp[3]);
box.center.x = (float)rp[0] + c.x;
box.center.y = (float)rp[1] + c.y;
box.size.width = (float)(rp[2]*2);
box.size.height = (float)(rp[3]*2);
box.center.x = (float)(rp[0]/scale) + c.x;
box.center.y = (float)(rp[1]/scale) + c.y;
box.size.width = (float)(rp[2]*2/scale);
box.size.height = (float)(rp[3]*2/scale);
if( box.size.width > box.size.height )
{
float tmp;
@ -448,6 +499,16 @@ cv::RotatedRect cv::fitEllipse( InputArray _points )
return box;
}
}
cv::RotatedRect cv::fitEllipse( InputArray _points )
{
CV_INSTRUMENT_REGION();
Mat points = _points.getMat();
int n = points.checkVector(2);
return n == 5 ? fitEllipseDirect(points) : fitEllipseNoDirect(points);
}
cv::RotatedRect cv::fitEllipseAMS( InputArray _points )
{
@ -483,16 +544,24 @@ cv::RotatedRect cv::fitEllipseAMS( InputArray _points )
c.x /= (float)n;
c.y /= (float)n;
double s = 0;
for( i = 0; i < n; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
p -= c;
s += fabs(p.x - c.x) + fabs(p.y - c.y);
}
double scale = 100./(s > FLT_EPSILON ? s : (double)FLT_EPSILON);
A.at<double>(i,0) = (double)(p.x)*(p.x);
A.at<double>(i,1) = (double)(p.x)*(p.y);
A.at<double>(i,2) = (double)(p.y)*(p.y);
A.at<double>(i,3) = (double)p.x;
A.at<double>(i,4) = (double)p.y;
for( i = 0; i < n; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
double px = (p.x - c.x)*scale, py = (p.y - c.y)*scale;
A.at<double>(i,0) = px*px;
A.at<double>(i,1) = px*py;
A.at<double>(i,2) = py*py;
A.at<double>(i,3) = px;
A.at<double>(i,4) = py;
A.at<double>(i,5) = 1.0;
}
cv::mulTransposed( A, DM, true, noArray(), 1.0, -1 );
@ -587,10 +656,10 @@ cv::RotatedRect cv::fitEllipseAMS( InputArray _points )
double p1 = 2.0*pVec(2) *pVec(3) - pVec(1) *pVec(4) ;
double p2 = 2.0*pVec(0) *pVec(4) -(pVec(1) *pVec(3) );
x0 = p1/l3 + c.x;
y0 = p2/l3 + c.y;
a = std::sqrt(2.)*sqrt((u1 - 4.0*u2)/((l1 - l2)*l3));
b = std::sqrt(2.)*sqrt(-1.0*((u1 - 4.0*u2)/((l1 + l2)*l3)));
x0 = p1/l3/scale + c.x;
y0 = p2/l3/scale + c.y;
a = std::sqrt(2.)*sqrt((u1 - 4.0*u2)/((l1 - l2)*l3))/scale;
b = std::sqrt(2.)*sqrt(-1.0*((u1 - 4.0*u2)/((l1 + l2)*l3)))/scale;
if (pVec(1) == 0) {
if (pVec(0) < pVec(2) ) {
theta = 0;
@ -601,8 +670,8 @@ cv::RotatedRect cv::fitEllipseAMS( InputArray _points )
theta = CV_PI/2. + 0.5*std::atan2(pVec(1) , (pVec(0) - pVec(2) ));
}
box.center.x = (float)x0; // +c.x;
box.center.y = (float)y0; // +c.y;
box.center.x = (float)x0;
box.center.y = (float)y0;
box.size.width = (float)(2.0*a);
box.size.height = (float)(2.0*b);
if( box.size.width > box.size.height )
@ -619,7 +688,7 @@ cv::RotatedRect cv::fitEllipseAMS( InputArray _points )
box = cv::fitEllipseDirect( points );
}
} else {
box = cv::fitEllipse( points );
box = cv::fitEllipseNoDirect( points );
}
return box;
@ -630,6 +699,7 @@ cv::RotatedRect cv::fitEllipseDirect( InputArray _points )
Mat points = _points.getMat();
int i, n = points.checkVector(2);
int depth = points.depth();
float eps = 0;
CV_Assert( n >= 0 && (depth == CV_32F || depth == CV_32S));
RotatedRect box;
@ -637,7 +707,7 @@ cv::RotatedRect cv::fitEllipseDirect( InputArray _points )
if( n < 5 )
CV_Error( CV_StsBadSize, "There should be at least 5 points to fit the ellipse" );
Point2f c(0,0);
Point2d c(0., 0.);
bool is_float = (depth == CV_32F);
const Point* ptsi = points.ptr<Point>();
@ -649,63 +719,83 @@ cv::RotatedRect cv::fitEllipseDirect( InputArray _points )
Matx<double, 3, 1> pVec;
double x0, y0, a, b, theta, Ts;
double s = 0;
for( i = 0; i < n; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
c += p;
c.x += p.x;
c.y += p.y;
}
c.x /= (float)n;
c.y /= (float)n;
c.x /= n;
c.y /= n;
for( i = 0; i < n; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
p -= c;
A.at<double>(i,0) = (double)(p.x)*(p.x);
A.at<double>(i,1) = (double)(p.x)*(p.y);
A.at<double>(i,2) = (double)(p.y)*(p.y);
A.at<double>(i,3) = (double)p.x;
A.at<double>(i,4) = (double)p.y;
A.at<double>(i,5) = 1.0;
s += fabs(p.x - c.x) + fabs(p.y - c.y);
}
cv::mulTransposed( A, DM, true, noArray(), 1.0, -1 );
DM *= (1.0/n);
double scale = 100./(s > FLT_EPSILON ? s : (double)FLT_EPSILON);
TM(0,0) = DM(0,5)*DM(3,5)*DM(4,4) - DM(0,5)*DM(3,4)*DM(4,5) - DM(0,4)*DM(3,5)*DM(5,4) + \
DM(0,3)*DM(4,5)*DM(5,4) + DM(0,4)*DM(3,4)*DM(5,5) - DM(0,3)*DM(4,4)*DM(5,5);
TM(0,1) = DM(1,5)*DM(3,5)*DM(4,4) - DM(1,5)*DM(3,4)*DM(4,5) - DM(1,4)*DM(3,5)*DM(5,4) + \
DM(1,3)*DM(4,5)*DM(5,4) + DM(1,4)*DM(3,4)*DM(5,5) - DM(1,3)*DM(4,4)*DM(5,5);
TM(0,2) = DM(2,5)*DM(3,5)*DM(4,4) - DM(2,5)*DM(3,4)*DM(4,5) - DM(2,4)*DM(3,5)*DM(5,4) + \
DM(2,3)*DM(4,5)*DM(5,4) + DM(2,4)*DM(3,4)*DM(5,5) - DM(2,3)*DM(4,4)*DM(5,5);
TM(1,0) = DM(0,5)*DM(3,3)*DM(4,5) - DM(0,5)*DM(3,5)*DM(4,3) + DM(0,4)*DM(3,5)*DM(5,3) - \
DM(0,3)*DM(4,5)*DM(5,3) - DM(0,4)*DM(3,3)*DM(5,5) + DM(0,3)*DM(4,3)*DM(5,5);
TM(1,1) = DM(1,5)*DM(3,3)*DM(4,5) - DM(1,5)*DM(3,5)*DM(4,3) + DM(1,4)*DM(3,5)*DM(5,3) - \
DM(1,3)*DM(4,5)*DM(5,3) - DM(1,4)*DM(3,3)*DM(5,5) + DM(1,3)*DM(4,3)*DM(5,5);
TM(1,2) = DM(2,5)*DM(3,3)*DM(4,5) - DM(2,5)*DM(3,5)*DM(4,3) + DM(2,4)*DM(3,5)*DM(5,3) - \
DM(2,3)*DM(4,5)*DM(5,3) - DM(2,4)*DM(3,3)*DM(5,5) + DM(2,3)*DM(4,3)*DM(5,5);
TM(2,0) = DM(0,5)*DM(3,4)*DM(4,3) - DM(0,5)*DM(3,3)*DM(4,4) - DM(0,4)*DM(3,4)*DM(5,3) + \
DM(0,3)*DM(4,4)*DM(5,3) + DM(0,4)*DM(3,3)*DM(5,4) - DM(0,3)*DM(4,3)*DM(5,4);
TM(2,1) = DM(1,5)*DM(3,4)*DM(4,3) - DM(1,5)*DM(3,3)*DM(4,4) - DM(1,4)*DM(3,4)*DM(5,3) + \
DM(1,3)*DM(4,4)*DM(5,3) + DM(1,4)*DM(3,3)*DM(5,4) - DM(1,3)*DM(4,3)*DM(5,4);
TM(2,2) = DM(2,5)*DM(3,4)*DM(4,3) - DM(2,5)*DM(3,3)*DM(4,4) - DM(2,4)*DM(3,4)*DM(5,3) + \
DM(2,3)*DM(4,4)*DM(5,3) + DM(2,4)*DM(3,3)*DM(5,4) - DM(2,3)*DM(4,3)*DM(5,4);
Ts=(-(DM(3,5)*DM(4,4)*DM(5,3)) + DM(3,4)*DM(4,5)*DM(5,3) + DM(3,5)*DM(4,3)*DM(5,4) - \
DM(3,3)*DM(4,5)*DM(5,4) - DM(3,4)*DM(4,3)*DM(5,5) + DM(3,3)*DM(4,4)*DM(5,5));
M(0,0) = (DM(2,0) + (DM(2,3)*TM(0,0) + DM(2,4)*TM(1,0) + DM(2,5)*TM(2,0))/Ts)/2.;
M(0,1) = (DM(2,1) + (DM(2,3)*TM(0,1) + DM(2,4)*TM(1,1) + DM(2,5)*TM(2,1))/Ts)/2.;
M(0,2) = (DM(2,2) + (DM(2,3)*TM(0,2) + DM(2,4)*TM(1,2) + DM(2,5)*TM(2,2))/Ts)/2.;
M(1,0) = -DM(1,0) - (DM(1,3)*TM(0,0) + DM(1,4)*TM(1,0) + DM(1,5)*TM(2,0))/Ts;
M(1,1) = -DM(1,1) - (DM(1,3)*TM(0,1) + DM(1,4)*TM(1,1) + DM(1,5)*TM(2,1))/Ts;
M(1,2) = -DM(1,2) - (DM(1,3)*TM(0,2) + DM(1,4)*TM(1,2) + DM(1,5)*TM(2,2))/Ts;
M(2,0) = (DM(0,0) + (DM(0,3)*TM(0,0) + DM(0,4)*TM(1,0) + DM(0,5)*TM(2,0))/Ts)/2.;
M(2,1) = (DM(0,1) + (DM(0,3)*TM(0,1) + DM(0,4)*TM(1,1) + DM(0,5)*TM(2,1))/Ts)/2.;
M(2,2) = (DM(0,2) + (DM(0,3)*TM(0,2) + DM(0,4)*TM(1,2) + DM(0,5)*TM(2,2))/Ts)/2.;
// first, try the original pointset.
// if it's singular, try to shift the points a bit
int iter = 0;
for( iter = 0; iter < 2; iter++ ) {
for( i = 0; i < n; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
Point2f delta = getOfs(i, eps);
double px = (p.x + delta.x - c.x)*scale, py = (p.y + delta.y - c.y)*scale;
A.at<double>(i,0) = px*px;
A.at<double>(i,1) = px*py;
A.at<double>(i,2) = py*py;
A.at<double>(i,3) = px;
A.at<double>(i,4) = py;
A.at<double>(i,5) = 1.0;
}
cv::mulTransposed( A, DM, true, noArray(), 1.0, -1 );
DM *= (1.0/n);
TM(0,0) = DM(0,5)*DM(3,5)*DM(4,4) - DM(0,5)*DM(3,4)*DM(4,5) - DM(0,4)*DM(3,5)*DM(5,4) + \
DM(0,3)*DM(4,5)*DM(5,4) + DM(0,4)*DM(3,4)*DM(5,5) - DM(0,3)*DM(4,4)*DM(5,5);
TM(0,1) = DM(1,5)*DM(3,5)*DM(4,4) - DM(1,5)*DM(3,4)*DM(4,5) - DM(1,4)*DM(3,5)*DM(5,4) + \
DM(1,3)*DM(4,5)*DM(5,4) + DM(1,4)*DM(3,4)*DM(5,5) - DM(1,3)*DM(4,4)*DM(5,5);
TM(0,2) = DM(2,5)*DM(3,5)*DM(4,4) - DM(2,5)*DM(3,4)*DM(4,5) - DM(2,4)*DM(3,5)*DM(5,4) + \
DM(2,3)*DM(4,5)*DM(5,4) + DM(2,4)*DM(3,4)*DM(5,5) - DM(2,3)*DM(4,4)*DM(5,5);
TM(1,0) = DM(0,5)*DM(3,3)*DM(4,5) - DM(0,5)*DM(3,5)*DM(4,3) + DM(0,4)*DM(3,5)*DM(5,3) - \
DM(0,3)*DM(4,5)*DM(5,3) - DM(0,4)*DM(3,3)*DM(5,5) + DM(0,3)*DM(4,3)*DM(5,5);
TM(1,1) = DM(1,5)*DM(3,3)*DM(4,5) - DM(1,5)*DM(3,5)*DM(4,3) + DM(1,4)*DM(3,5)*DM(5,3) - \
DM(1,3)*DM(4,5)*DM(5,3) - DM(1,4)*DM(3,3)*DM(5,5) + DM(1,3)*DM(4,3)*DM(5,5);
TM(1,2) = DM(2,5)*DM(3,3)*DM(4,5) - DM(2,5)*DM(3,5)*DM(4,3) + DM(2,4)*DM(3,5)*DM(5,3) - \
DM(2,3)*DM(4,5)*DM(5,3) - DM(2,4)*DM(3,3)*DM(5,5) + DM(2,3)*DM(4,3)*DM(5,5);
TM(2,0) = DM(0,5)*DM(3,4)*DM(4,3) - DM(0,5)*DM(3,3)*DM(4,4) - DM(0,4)*DM(3,4)*DM(5,3) + \
DM(0,3)*DM(4,4)*DM(5,3) + DM(0,4)*DM(3,3)*DM(5,4) - DM(0,3)*DM(4,3)*DM(5,4);
TM(2,1) = DM(1,5)*DM(3,4)*DM(4,3) - DM(1,5)*DM(3,3)*DM(4,4) - DM(1,4)*DM(3,4)*DM(5,3) + \
DM(1,3)*DM(4,4)*DM(5,3) + DM(1,4)*DM(3,3)*DM(5,4) - DM(1,3)*DM(4,3)*DM(5,4);
TM(2,2) = DM(2,5)*DM(3,4)*DM(4,3) - DM(2,5)*DM(3,3)*DM(4,4) - DM(2,4)*DM(3,4)*DM(5,3) + \
DM(2,3)*DM(4,4)*DM(5,3) + DM(2,4)*DM(3,3)*DM(5,4) - DM(2,3)*DM(4,3)*DM(5,4);
Ts=(-(DM(3,5)*DM(4,4)*DM(5,3)) + DM(3,4)*DM(4,5)*DM(5,3) + DM(3,5)*DM(4,3)*DM(5,4) - \
DM(3,3)*DM(4,5)*DM(5,4) - DM(3,4)*DM(4,3)*DM(5,5) + DM(3,3)*DM(4,4)*DM(5,5));
M(0,0) = (DM(2,0) + (DM(2,3)*TM(0,0) + DM(2,4)*TM(1,0) + DM(2,5)*TM(2,0))/Ts)/2.;
M(0,1) = (DM(2,1) + (DM(2,3)*TM(0,1) + DM(2,4)*TM(1,1) + DM(2,5)*TM(2,1))/Ts)/2.;
M(0,2) = (DM(2,2) + (DM(2,3)*TM(0,2) + DM(2,4)*TM(1,2) + DM(2,5)*TM(2,2))/Ts)/2.;
M(1,0) = -DM(1,0) - (DM(1,3)*TM(0,0) + DM(1,4)*TM(1,0) + DM(1,5)*TM(2,0))/Ts;
M(1,1) = -DM(1,1) - (DM(1,3)*TM(0,1) + DM(1,4)*TM(1,1) + DM(1,5)*TM(2,1))/Ts;
M(1,2) = -DM(1,2) - (DM(1,3)*TM(0,2) + DM(1,4)*TM(1,2) + DM(1,5)*TM(2,2))/Ts;
M(2,0) = (DM(0,0) + (DM(0,3)*TM(0,0) + DM(0,4)*TM(1,0) + DM(0,5)*TM(2,0))/Ts)/2.;
M(2,1) = (DM(0,1) + (DM(0,3)*TM(0,1) + DM(0,4)*TM(1,1) + DM(0,5)*TM(2,1))/Ts)/2.;
M(2,2) = (DM(0,2) + (DM(0,3)*TM(0,2) + DM(0,4)*TM(1,2) + DM(0,5)*TM(2,2))/Ts)/2.;
double det = fabs(cv::determinant(M));
if (fabs(det) > 1.0e-10)
break;
eps = (float)(s/(n*2)*1e-2);
}
if (fabs(cv::determinant(M)) > 1.0e-10) {
if( iter < 2 ) {
Mat eVal, eVec;
eigenNonSymmetric(M, eVal, eVec);
@ -740,10 +830,10 @@ cv::RotatedRect cv::fitEllipseDirect( InputArray _points )
double p1 = 2*pVec(2)*Q(0,0) - pVec(1)*Q(0,1);
double p2 = 2*pVec(0)*Q(0,1) - pVec(1)*Q(0,0);
x0 = p1/l3 + c.x;
y0 = p2/l3 + c.y;
a = sqrt(2.)*sqrt((u1 - 4.0*u2)/((l1 - l2)*l3));
b = sqrt(2.)*sqrt(-1.0*((u1 - 4.0*u2)/((l1 + l2)*l3)));
x0 = (p1/l3/scale) + c.x;
y0 = (p2/l3/scale) + c.y;
a = sqrt(2.)*sqrt((u1 - 4.0*u2)/((l1 - l2)*l3))/scale;
b = sqrt(2.)*sqrt(-1.0*((u1 - 4.0*u2)/((l1 + l2)*l3)))/scale;
if (pVec(1) == 0) {
if (pVec(0) < pVec(2) ) {
theta = 0;
@ -767,7 +857,7 @@ cv::RotatedRect cv::fitEllipseDirect( InputArray _points )
box.angle = (float)(fmod(theta*180/CV_PI,180.0));
};
} else {
box = cv::fitEllipse( points );
box = cv::fitEllipseNoDirect( points );
}
return box;
}

@ -66,4 +66,40 @@ TEST(Imgproc_FitEllipse_Issue_6544, accuracy) {
EXPECT_TRUE(fit_and_check_ellipse(pts));
}
TEST(Imgproc_FitEllipse_Issue_10270, accuracy) {
vector<Point2f> pts;
float scale = 1;
Point2f shift(0, 0);
pts.push_back(Point2f(0, 1)*scale+shift);
pts.push_back(Point2f(0, 2)*scale+shift);
pts.push_back(Point2f(0, 3)*scale+shift);
pts.push_back(Point2f(2, 3)*scale+shift);
pts.push_back(Point2f(0, 4)*scale+shift);
// check that we get almost vertical ellipse centered around (1, 3)
RotatedRect e = fitEllipse(pts);
EXPECT_LT(std::min(fabs(e.angle-180), fabs(e.angle)), 10.);
EXPECT_NEAR(e.center.x, 1, 1);
EXPECT_NEAR(e.center.y, 3, 1);
EXPECT_LT(e.size.width*3, e.size.height);
}
TEST(Imgproc_FitEllipse_JavaCase, accuracy) {
vector<Point2f> pts;
float scale = 1;
Point2f shift(0, 0);
pts.push_back(Point2f(0, 0)*scale+shift);
pts.push_back(Point2f(1, 1)*scale+shift);
pts.push_back(Point2f(-1, 1)*scale+shift);
pts.push_back(Point2f(-1, -1)*scale+shift);
pts.push_back(Point2f(1, -1)*scale+shift);
// check that we get almost vertical ellipse centered around (1, 3)
RotatedRect e = fitEllipse(pts);
EXPECT_NEAR(e.center.x, 0, 0.01);
EXPECT_NEAR(e.center.y, 0, 0.01);
EXPECT_NEAR(e.size.width, sqrt(2.)*2, 0.4);
EXPECT_NEAR(e.size.height, sqrt(2.)*2, 0.4);
}
}} // namespace

@ -895,7 +895,10 @@ class JavaWrapperGenerator(object):
ret = ""
default = ""
elif not fi.ctype: # c-tor
ret = "return (jlong) _retval_;"
if self.isSmartClass(ci):
ret = "return (jlong)(new Ptr<%(ctype)s>(_retval_));" % { 'ctype': fi.fullClass(isCPP=True) }
else:
ret = "return (jlong) _retval_;"
elif "v_type" in type_dict[fi.ctype]: # c-tor
if type_dict[fi.ctype]["v_type"] in ("Mat", "vector_Mat"):
ret = "return (jlong) _retval_;"
@ -940,8 +943,12 @@ class JavaWrapperGenerator(object):
c_epilogue.append("return " + fi.ctype + "_to_List(env, _ret_val_vector_);")
if fi.classname:
if not fi.ctype: # c-tor
retval = fi.fullClass(isCPP=True) + "* _retval_ = "
cvname = "new " + fi.fullClass(isCPP=True)
if self.isSmartClass(ci):
retval = self.smartWrap(ci, fi.fullClass(isCPP=True)) + " _retval_ = "
cvname = "makePtr<" + fi.fullClass(isCPP=True) +">"
else:
retval = fi.fullClass(isCPP=True) + "* _retval_ = "
cvname = "new " + fi.fullClass(isCPP=True)
elif fi.static:
cvname = fi.fullName(isCPP=True)
else:

@ -49,8 +49,17 @@
postRun: [] ,
onRuntimeInitialized: function() {
console.log("Emscripten runtime is ready, launching QUnit tests...");
//console.log(cv.getBuildInformation());
QUnit.start();
if (window.cv instanceof Promise) {
window.cv.then((target) => {
window.cv = target;
//console.log(cv.getBuildInformation());
QUnit.start();
})
} else {
// for backward compatible
// console.log(cv.getBuildInformation());
QUnit.start();
}
},
print: (function() {
var element = document.getElementById('output');

@ -197,8 +197,8 @@ bool convert(const FileNode& oldroot, FileStorage& newfs)
newfs << "cascade" << "{:opencv-cascade-classifier"
<< "stageType" << "BOOST"
<< "featureType" << "HAAR"
<< "height" << cascadesize.width
<< "width" << cascadesize.height
<< "width" << cascadesize.width
<< "height" << cascadesize.height
<< "stageParams" << "{"
<< "maxWeakCount" << (int)maxWeakCount
<< "}"

@ -0,0 +1,113 @@
#!/usr/bin/env python
"""Algorithm serialization test."""
from __future__ import print_function
import tempfile
import os
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests
class MyData:
def __init__(self):
self.A = 97
self.X = np.pi
self.name = 'mydata1234'
def write(self, fs, name):
fs.startWriteStruct(name, cv.FileNode_MAP|cv.FileNode_FLOW)
fs.write('A', self.A)
fs.write('X', self.X)
fs.write('name', self.name)
fs.endWriteStruct()
def read(self, node):
if (not node.empty()):
self.A = int(node.getNode('A').real())
self.X = node.getNode('X').real()
self.name = node.getNode('name').string()
else:
self.A = self.X = 0
self.name = ''
class filestorage_io_test(NewOpenCVTests):
strings_data = ['image1.jpg', 'Awesomeness', '../data/baboon.jpg']
R0 = np.eye(3,3)
T0 = np.zeros((3,1))
def write_data(self, fname):
fs = cv.FileStorage(fname, cv.FileStorage_WRITE)
R = self.R0
T = self.T0
m = MyData()
fs.write('iterationNr', 100)
fs.startWriteStruct('strings', cv.FileNode_SEQ)
for elem in self.strings_data:
fs.write('', elem)
fs.endWriteStruct()
fs.startWriteStruct('Mapping', cv.FileNode_MAP)
fs.write('One', 1)
fs.write('Two', 2)
fs.endWriteStruct()
fs.write('R_MAT', R)
fs.write('T_MAT', T)
m.write(fs, 'MyData')
fs.release()
def read_data_and_check(self, fname):
fs = cv.FileStorage(fname, cv.FileStorage_READ)
n = fs.getNode('iterationNr')
itNr = int(n.real())
self.assertEqual(itNr, 100)
n = fs.getNode('strings')
self.assertTrue(n.isSeq())
self.assertEqual(n.size(), len(self.strings_data))
for i in range(n.size()):
self.assertEqual(n.at(i).string(), self.strings_data[i])
n = fs.getNode('Mapping')
self.assertEqual(int(n.getNode('Two').real()), 2)
self.assertEqual(int(n.getNode('One').real()), 1)
R = fs.getNode('R_MAT').mat()
T = fs.getNode('T_MAT').mat()
self.assertEqual(cv.norm(R, self.R0, cv.NORM_INF), 0)
self.assertEqual(cv.norm(T, self.T0, cv.NORM_INF), 0)
m0 = MyData()
m = MyData()
m.read(fs.getNode('MyData'))
self.assertEqual(m.A, m0.A)
self.assertEqual(m.X, m0.X)
self.assertEqual(m.name, m0.name)
n = fs.getNode('NonExisting')
self.assertTrue(n.isNone())
fs.release()
def run_fs_test(self, ext):
fd, fname = tempfile.mkstemp(prefix="opencv_python_sample_filestorage", suffix=ext)
os.close(fd)
self.write_data(fname)
self.read_data_and_check(fname)
os.remove(fname)
def test_xml(self):
self.run_fs_test(".xml")
def test_yml(self):
self.run_fs_test(".yml")
def test_json(self):
self.run_fs_test(".json")
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

@ -873,7 +873,7 @@ public:
VideoWriter::fourcc('P','I','M','1') is a MPEG-1 codec, VideoWriter::fourcc('M','J','P','G') is a
motion-jpeg codec etc. List of codes can be obtained at [Video Codecs by
FOURCC](http://www.fourcc.org/codecs.php) page. FFMPEG backend with MP4 container natively uses
other values as fourcc code: see [ObjectType](http://www.mp4ra.org/codecs.html),
other values as fourcc code: see [ObjectType](http://mp4ra.org/#/codecs),
so you may receive a warning message from OpenCV about fourcc code conversion.
@param fps Framerate of the created video stream.
@param frameSize Size of the video frames.

@ -419,7 +419,7 @@ Simply call it with 4 chars fourcc code like `CV_FOURCC('I', 'Y', 'U', 'V')`
List of codes can be obtained at [Video Codecs by FOURCC](http://www.fourcc.org/codecs.php) page.
FFMPEG backend with MP4 container natively uses other values as fourcc code:
see [ObjectType](http://www.mp4ra.org/codecs.html).
see [ObjectType](http://mp4ra.org/#/codecs).
*/
CV_INLINE int CV_FOURCC(char c1, char c2, char c3, char c4)
{

@ -1208,9 +1208,20 @@ bool CvCapture_FFMPEG::grabFrame()
#endif
int ret = av_read_frame(ic, &packet);
if (ret == AVERROR(EAGAIN)) continue;
/* else if (ret < 0) break; */
if (ret == AVERROR(EAGAIN))
continue;
if (ret == AVERROR_EOF)
{
if (rawMode)
break;
// flush cached frames from video decoder
packet.data = NULL;
packet.size = 0;
packet.stream_index = video_stream;
}
if( packet.stream_index != video_stream )
{

@ -0,0 +1,202 @@
diff --git a/inference-engine/CMakeLists.txt b/inference-engine/CMakeLists.txt
index e7ea6547..7333d19c 100644
--- a/inference-engine/CMakeLists.txt
+++ b/inference-engine/CMakeLists.txt
@@ -72,11 +72,11 @@ if(ENABLE_TESTS)
add_subdirectory(tests)
endif()
-add_subdirectory(tools)
+#add_subdirectory(tools)
# gflags and format_reader targets are kept inside of samples directory and
# they must be built even if samples build is disabled (required for tests and tools).
-add_subdirectory(samples)
+#add_subdirectory(samples)
file(GLOB_RECURSE SAMPLES_SOURCES samples/*.cpp samples/*.hpp samples/*.h)
add_cpplint_target(sample_cpplint
@@ -154,10 +154,10 @@ endif()
# Developer package
#
-ie_developer_export_targets(format_reader)
+#ie_developer_export_targets(format_reader)
ie_developer_export_targets(${NGRAPH_LIBRARIES})
-ie_developer_export()
+#ie_developer_export()
configure_file(
"${IE_MAIN_SOURCE_DIR}/cmake/developer_package_config.cmake.in"
diff --git a/inference-engine/src/legacy_api/CMakeLists.txt b/inference-engine/src/legacy_api/CMakeLists.txt
index a03a5f23..63d4f687 100644
--- a/inference-engine/src/legacy_api/CMakeLists.txt
+++ b/inference-engine/src/legacy_api/CMakeLists.txt
@@ -22,7 +22,7 @@ source_group("include" FILES ${PUBLIC_HEADERS})
# Create object library
-add_library(${TARGET_NAME}_obj OBJECT
+add_library(${TARGET_NAME}_obj OBJECT EXCLUDE_FROM_ALL
${LIBRARY_SRC}
${NN_BUILDER_LIBRARY_SRC}
${PUBLIC_HEADERS})
diff --git a/inference-engine/src/inference_engine/CMakeLists.txt b/inference-engine/src/inference_engine/CMakeLists.txt
index 2071c126..015d8ff8 100644
--- a/inference-engine/src/inference_engine/CMakeLists.txt
+++ b/inference-engine/src/inference_engine/CMakeLists.txt
@@ -98,7 +98,7 @@ add_clang_format_target(${TARGET_NAME}_plugin_api_clang_format FOR_SOURCES ${plu
# Create common base object library
-add_library(${TARGET_NAME}_common_obj OBJECT
+add_library(${TARGET_NAME}_common_obj OBJECT EXCLUDE_FROM_ALL
${IE_BASE_SOURCE_FILES})
target_compile_definitions(${TARGET_NAME}_common_obj PRIVATE IMPLEMENT_INFERENCE_ENGINE_API)
@@ -110,7 +110,7 @@ target_include_directories(${TARGET_NAME}_common_obj SYSTEM PRIVATE
# Create object library
-add_library(${TARGET_NAME}_obj OBJECT
+add_library(${TARGET_NAME}_obj OBJECT EXCLUDE_FROM_ALL
${LIBRARY_SRC}
${LIBRARY_HEADERS}
${PUBLIC_HEADERS})
@@ -200,7 +200,7 @@ add_clang_format_target(${TARGET_NAME}_nn_builder_clang_format FOR_TARGETS ${TAR
# Static library used for unit tests which are always built
-add_library(${TARGET_NAME}_s STATIC
+add_library(${TARGET_NAME}_s STATIC EXCLUDE_FROM_ALL
$<TARGET_OBJECTS:${TARGET_NAME}_obj>
$<TARGET_OBJECTS:${TARGET_NAME}_common_obj>
$<TARGET_OBJECTS:${TARGET_NAME}_legacy_obj>
diff --git a/inference-engine/src/mkldnn_plugin/CMakeLists.txt b/inference-engine/src/mkldnn_plugin/CMakeLists.txt
index 52183e86..4fd6d7d4 100644
--- a/inference-engine/src/mkldnn_plugin/CMakeLists.txt
+++ b/inference-engine/src/mkldnn_plugin/CMakeLists.txt
@@ -163,9 +163,9 @@ add_library(mkldnn_plugin_layers_no_opt OBJECT ${CROSS_COMPILED_SOURCES})
set_ie_threading_interface_for(mkldnn_plugin_layers_no_opt)
target_compile_definitions(mkldnn_plugin_layers_no_opt PRIVATE "IMPLEMENT_INFERENCE_ENGINE_PLUGIN")
-add_library(mkldnn_plugin_layers_no_opt_s OBJECT ${CROSS_COMPILED_SOURCES})
-set_ie_threading_interface_for(mkldnn_plugin_layers_no_opt_s)
-target_compile_definitions(mkldnn_plugin_layers_no_opt_s PRIVATE "USE_STATIC_IE;IMPLEMENT_INFERENCE_ENGINE_PLUGIN")
+#add_library(mkldnn_plugin_layers_no_opt_s OBJECT ${CROSS_COMPILED_SOURCES})
+#set_ie_threading_interface_for(mkldnn_plugin_layers_no_opt_s)
+#target_compile_definitions(mkldnn_plugin_layers_no_opt_s PRIVATE "USE_STATIC_IE;IMPLEMENT_INFERENCE_ENGINE_PLUGIN")
set(object_libraries mkldnn_plugin_layers_no_opt)
set(mkldnn_plugin_object_libraries mkldnn_plugin_layers_no_opt_s)
@@ -190,7 +190,7 @@ if (ENABLE_SSE42)
endfunction()
mkldnn_create_sse42_layers(mkldnn_plugin_layers_sse42)
- mkldnn_create_sse42_layers(mkldnn_plugin_layers_sse42_s)
+ #mkldnn_create_sse42_layers(mkldnn_plugin_layers_sse42_s)
list(APPEND object_libraries mkldnn_plugin_layers_sse42)
list(APPEND mkldnn_plugin_object_libraries mkldnn_plugin_layers_sse42_s)
@@ -216,7 +216,7 @@ if (ENABLE_AVX2)
endfunction()
mkldnn_create_avx2_layers(mkldnn_plugin_layers_avx2)
- mkldnn_create_avx2_layers(mkldnn_plugin_layers_avx2_s)
+ #mkldnn_create_avx2_layers(mkldnn_plugin_layers_avx2_s)
list(APPEND object_libraries mkldnn_plugin_layers_avx2)
list(APPEND mkldnn_plugin_object_libraries mkldnn_plugin_layers_avx2_s)
@@ -242,7 +242,7 @@ if (ENABLE_AVX512F)
endfunction()
mkldnn_create_avx512f_layers(mkldnn_plugin_layers_avx512)
- mkldnn_create_avx512f_layers(mkldnn_plugin_layers_avx512_s)
+ #mkldnn_create_avx512f_layers(mkldnn_plugin_layers_avx512_s)
list(APPEND object_libraries mkldnn_plugin_layers_avx512)
list(APPEND mkldnn_plugin_object_libraries mkldnn_plugin_layers_avx512_s)
@@ -264,7 +264,7 @@ target_link_libraries(${TARGET_NAME} PRIVATE inference_engine inference_engine_l
# add test object library
-add_library(${TARGET_NAME}_obj OBJECT ${SOURCES} ${HEADERS})
+add_library(${TARGET_NAME}_obj OBJECT EXCLUDE_FROM_ALL ${SOURCES} ${HEADERS})
target_include_directories(${TARGET_NAME}_obj PRIVATE $<TARGET_PROPERTY:inference_engine_preproc_s,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:inference_engine_lp_transformations,INTERFACE_INCLUDE_DIRECTORIES>
diff --git a/inference-engine/src/preprocessing/CMakeLists.txt b/inference-engine/src/preprocessing/CMakeLists.txt
index c3ad1e58..b5913840 100644
--- a/inference-engine/src/preprocessing/CMakeLists.txt
+++ b/inference-engine/src/preprocessing/CMakeLists.txt
@@ -124,7 +124,7 @@ endif()
# Create object library
-add_library(${TARGET_NAME}_obj OBJECT
+add_library(${TARGET_NAME}_obj OBJECT EXCLUDE_FROM_ALL
${LIBRARY_SRC}
${LIBRARY_HEADERS})
@@ -167,7 +167,7 @@ endif()
# Static library used for unit tests which are always built
-add_library(${TARGET_NAME}_s STATIC
+add_library(${TARGET_NAME}_s STATIC EXCLUDE_FROM_ALL
$<TARGET_OBJECTS:${TARGET_NAME}_obj>)
set_ie_threading_interface_for(${TARGET_NAME}_s)
diff --git a/inference-engine/src/vpu/common/CMakeLists.txt b/inference-engine/src/vpu/common/CMakeLists.txt
index 65215299..03ba4a4c 100644
--- a/inference-engine/src/vpu/common/CMakeLists.txt
+++ b/inference-engine/src/vpu/common/CMakeLists.txt
@@ -53,7 +53,7 @@ add_common_target("vpu_common_lib" FALSE)
# Unit tests support for graph transformer
if(WIN32)
- add_common_target("vpu_common_lib_test_static" TRUE)
+ #add_common_target("vpu_common_lib_test_static" TRUE)
else()
add_library("vpu_common_lib_test_static" ALIAS "vpu_common_lib")
endif()
diff --git a/inference-engine/src/vpu/graph_transformer/CMakeLists.txt b/inference-engine/src/vpu/graph_transformer/CMakeLists.txt
index 982d3c7f..15fcf3e8 100644
--- a/inference-engine/src/vpu/graph_transformer/CMakeLists.txt
+++ b/inference-engine/src/vpu/graph_transformer/CMakeLists.txt
@@ -64,7 +64,7 @@ add_graph_transformer_target("vpu_graph_transformer" FALSE)
# Unit tests support for graph transformer
if(WIN32)
- add_graph_transformer_target("vpu_graph_transformer_test_static" TRUE)
+ #add_graph_transformer_target("vpu_graph_transformer_test_static" TRUE)
else()
add_library("vpu_graph_transformer_test_static" ALIAS "vpu_graph_transformer")
endif()
diff --git a/inference-engine/thirdparty/CMakeLists.txt b/inference-engine/thirdparty/CMakeLists.txt
index ebf32c71..ad8cb435 100644
--- a/inference-engine/thirdparty/CMakeLists.txt
+++ b/inference-engine/thirdparty/CMakeLists.txt
@@ -36,7 +36,7 @@ function(build_with_lto)
endif()
add_subdirectory(pugixml)
- add_subdirectory(stb_lib)
+ #add_subdirectory(stb_lib)
add_subdirectory(ade)
add_subdirectory(fluid/modules/gapi)
diff --git a/inference-engine/thirdparty/pugixml/CMakeLists.txt b/inference-engine/thirdparty/pugixml/CMakeLists.txt
index 8bcb2801..f7e031c0 100644
--- a/inference-engine/thirdparty/pugixml/CMakeLists.txt
+++ b/inference-engine/thirdparty/pugixml/CMakeLists.txt
@@ -41,7 +41,7 @@ if(BUILD_SHARED_LIBS)
else()
add_library(pugixml STATIC ${SOURCES})
if (MSVC)
- add_library(pugixml_mt STATIC ${SOURCES})
+ #add_library(pugixml_mt STATIC ${SOURCES})
#if (WIN32)
# set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT")
# set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MTd")

@ -0,0 +1,13 @@
diff --git a/cmake/developer_package.cmake b/cmake/developer_package.cmake
index bed73503..5124795a 100644
--- a/cmake/developer_package.cmake
+++ b/cmake/developer_package.cmake
@@ -137,7 +137,7 @@ if("${CMAKE_BUILD_TYPE}" STREQUAL "")
set(CMAKE_BUILD_TYPE "Release")
endif()
-set(OUTPUT_ROOT ${OpenVINO_MAIN_SOURCE_DIR})
+set(OUTPUT_ROOT "${CMAKE_BINARY_DIR}")
# Enable postfixes for Debug/Release builds
set(IE_DEBUG_POSTFIX_WIN "d")

@ -0,0 +1,14 @@
diff --git a/CMakeLists.txt b/CMakeLists.txt
index edf8233f..cf197376 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -18,6 +18,9 @@ endif()
project(OpenVINO)
+set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /Zi /FS")
+set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /DEBUG /OPT:REF /OPT:ICF")
+
set(OpenVINO_MAIN_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
set(IE_MAIN_SOURCE_DIR ${OpenVINO_MAIN_SOURCE_DIR}/inference-engine)
set(CMAKE_MODULE_PATH "${OpenVINO_MAIN_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})

@ -0,0 +1,13 @@
diff --git a/inference-engine/src/CMakeLists.txt b/inference-engine/src/CMakeLists.txt
index 7e9f590..a460c17 100644
--- a/inference-engine/src/CMakeLists.txt
+++ b/inference-engine/src/CMakeLists.txt
@@ -24,7 +24,7 @@ endif()
add_subdirectory(hetero_plugin)
-add_subdirectory(multi_device)
+#add_subdirectory(multi_device)
add_subdirectory(transformations)

@ -0,0 +1,4 @@
applyPatch('20200413-dldt-disable-unused-targets.patch')
applyPatch('20200413-dldt-fix-binaries-location.patch')
applyPatch('20200413-dldt-pdb.patch')
applyPatch('20200604-dldt-disable-multidevice.patch')

@ -0,0 +1,57 @@
sysroot_bin_dir = prepare_dir(self.sysrootdir / 'bin')
copytree(self.build_dir / 'install', self.sysrootdir / 'ngraph')
#rm_one(self.sysrootdir / 'ngraph' / 'lib' / 'ngraph.dll')
build_config = 'Release' if not self.config.build_debug else 'Debug'
build_bin_dir = self.build_dir / 'bin' / 'intel64' / build_config
def copy_bin(name):
global build_bin_dir, sysroot_bin_dir
copytree(build_bin_dir / name, sysroot_bin_dir / name)
dll_suffix = 'd' if self.config.build_debug else ''
def copy_dll(name):
global copy_bin, dll_suffix
copy_bin(name + dll_suffix + '.dll')
copy_bin(name + dll_suffix + '.pdb')
copy_bin('cldnn_global_custom_kernels')
copy_bin('cache.json')
copy_dll('clDNNPlugin')
copy_dll('HeteroPlugin')
copy_dll('inference_engine')
copy_dll('inference_engine_legacy')
copy_dll('inference_engine_nn_builder')
copy_dll('inference_engine_transformations') # runtime
copy_dll('inference_engine_lp_transformations') # runtime
copy_dll('MKLDNNPlugin') # runtime
copy_dll('myriadPlugin') # runtime
#copy_dll('MultiDevicePlugin') # runtime, not used
copy_dll('ngraph')
copy_bin('plugins.xml')
copytree(self.build_dir / 'bin' / 'intel64' / 'pcie-ma248x.elf', sysroot_bin_dir / 'pcie-ma248x.elf')
copytree(self.build_dir / 'bin' / 'intel64' / 'usb-ma2x8x.mvcmd', sysroot_bin_dir / 'usb-ma2x8x.mvcmd')
copytree(self.build_dir / 'bin' / 'intel64' / 'usb-ma2450.mvcmd', sysroot_bin_dir / 'usb-ma2450.mvcmd')
copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb' / 'bin', sysroot_bin_dir)
copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb', self.sysrootdir / 'tbb')
sysroot_ie_dir = prepare_dir(self.sysrootdir / 'deployment_tools' / 'inference_engine')
sysroot_ie_lib_dir = prepare_dir(sysroot_ie_dir / 'lib' / 'intel64')
copytree(self.srcdir / 'inference-engine' / 'include', sysroot_ie_dir / 'include')
if not self.config.build_debug:
copytree(self.build_dir / 'install' / 'lib' / 'ngraph.lib', sysroot_ie_lib_dir / 'ngraph.lib')
copytree(build_bin_dir / 'inference_engine.lib', sysroot_ie_lib_dir / 'inference_engine.lib')
copytree(build_bin_dir / 'inference_engine_nn_builder.lib', sysroot_ie_lib_dir / 'inference_engine_nn_builder.lib')
copytree(build_bin_dir / 'inference_engine_legacy.lib', sysroot_ie_lib_dir / 'inference_engine_legacy.lib')
else:
copytree(self.build_dir / 'install' / 'lib' / 'ngraphd.lib', sysroot_ie_lib_dir / 'ngraphd.lib')
copytree(build_bin_dir / 'inference_engined.lib', sysroot_ie_lib_dir / 'inference_engined.lib')
copytree(build_bin_dir / 'inference_engine_nn_builderd.lib', sysroot_ie_lib_dir / 'inference_engine_nn_builderd.lib')
copytree(build_bin_dir / 'inference_engine_legacyd.lib', sysroot_ie_lib_dir / 'inference_engine_legacyd.lib')
sysroot_license_dir = prepare_dir(self.sysrootdir / 'etc' / 'licenses')
copytree(self.srcdir / 'LICENSE', sysroot_license_dir / 'dldt-LICENSE')
copytree(self.srcdir / 'ngraph/LICENSE', sysroot_license_dir / 'ngraph-LICENSE')
copytree(self.sysrootdir / 'tbb/LICENSE', sysroot_license_dir / 'tbb-LICENSE')

@ -208,7 +208,7 @@ class BuilderDLDT:
def do_clone(srcdir, noFetch):
git_checkout(srcdir, self.config.dldt_src_url, self.config.dldt_src_branch, self.config.dldt_src_commit,
['-n', '--depth=100', '--recurse-submodules'] +
['-n', '--depth=100', '--no-single-branch', '--recurse-submodules'] +
(self.config.dldt_src_git_clone_extra or []),
noFetch=noFetch
)
@ -432,9 +432,9 @@ class Builder:
def main():
dldt_src_url = 'https://github.com/opencv/dldt.git'
dldt_src_commit = '2020.2'
dldt_release = '2020020000'
dldt_src_url = 'https://github.com/openvinotoolkit/openvino'
dldt_src_commit = '2020.3.0'
dldt_release = '2020030000'
build_cache_dir_default = os.environ.get('BUILD_CACHE_DIR', '.build_cache')
build_subst_drive = os.environ.get('BUILD_SUBST_DRIVE', None)

@ -0,0 +1,104 @@
//
// this sample demonstrates parsing (segmenting) human body parts from an image using opencv's dnn,
// based on https://github.com/Engineering-Course/LIP_JPPNet
//
// get the pretrained model from: https://www.dropbox.com/s/qag9vzambhhkvxr/lip_jppnet_384.pb?dl=0
//
#include <opencv2/dnn.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
using namespace cv;
static Mat parse_human(const Mat &image, const std::string &model, int backend=dnn::DNN_BACKEND_DEFAULT, int target=dnn::DNN_TARGET_CPU) {
// this network expects an image and a flipped copy as input
Mat flipped;
flip(image, flipped, 1);
std::vector<Mat> batch;
batch.push_back(image);
batch.push_back(flipped);
Mat blob = dnn::blobFromImages(batch, 1.0, Size(), Scalar(104.00698793, 116.66876762, 122.67891434));
dnn::Net net = dnn::readNet(model);
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
net.setInput(blob);
Mat out = net.forward();
// expected output: [2, 20, 384, 384], (2 lists(orig, flipped) of 20 body part heatmaps 384x384)
// LIP classes:
// 0 Background, 1 Hat, 2 Hair, 3 Glove, 4 Sunglasses, 5 UpperClothes, 6 Dress, 7 Coat, 8 Socks, 9 Pants
// 10 Jumpsuits, 11 Scarf, 12 Skirt, 13 Face, 14 LeftArm, 15 RightArm, 16 LeftLeg, 17 RightLeg, 18 LeftShoe. 19 RightShoe
Vec3b colors[] = {
Vec3b(0, 0, 0), Vec3b(128, 0, 0), Vec3b(255, 0, 0), Vec3b(0, 85, 0), Vec3b(170, 0, 51), Vec3b(255, 85, 0),
Vec3b(0, 0, 85), Vec3b(0, 119, 221), Vec3b(85, 85, 0), Vec3b(0, 85, 85), Vec3b(85, 51, 0), Vec3b(52, 86, 128),
Vec3b(0, 128, 0), Vec3b(0, 0, 255), Vec3b(51, 170, 221), Vec3b(0, 255, 255), Vec3b(85, 255, 170),
Vec3b(170, 255, 85), Vec3b(255, 255, 0), Vec3b(255, 170, 0)
};
Mat segm(image.size(), CV_8UC3, Scalar(0,0,0));
Mat maxval(image.size(), CV_32F, Scalar(0));
// iterate over body part heatmaps (LIP classes)
for (int i=0; i<out.size[1]; i++) {
// resize heatmaps to original image size
// "head" is the original image result, "tail" the flipped copy
Mat head, h(out.size[2], out.size[3], CV_32F, out.ptr<float>(0,i));
resize(h, head, image.size());
// we have to swap the last 3 pairs in the "tail" list
static int tail_order[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,15,14,17,16,19,18};
Mat tail, t(out.size[2], out.size[3], CV_32F, out.ptr<float>(1,tail_order[i]));
resize(t, tail, image.size());
flip(tail, tail, 1);
// mix original and flipped result
Mat avg = (head + tail) * 0.5;
// write color if prob value > maxval
Mat cmask;
compare(avg, maxval, cmask, CMP_GT);
segm.setTo(colors[i], cmask);
// keep largest values for next iteration
max(avg, maxval, maxval);
}
cvtColor(segm, segm, COLOR_RGB2BGR);
return segm;
}
int main(int argc, char**argv)
{
CommandLineParser parser(argc,argv,
"{help h | | show help screen / args}"
"{image i | | person image to process }"
"{model m |lip_jppnet_384.pb| network model}"
"{backend b | 0 | Choose one of computation backends: "
"0: automatically (by default), "
"1: Halide language (http://halide-lang.org/), "
"2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
"3: OpenCV implementation }"
"{target t | 0 | Choose one of target computation devices: "
"0: CPU target (by default), "
"1: OpenCL, "
"2: OpenCL fp16 (half-float precision), "
"3: VPU }"
);
if (argc == 1 || parser.has("help"))
{
parser.printMessage();
return 0;
}
std::string model = parser.get<std::string>("model");
std::string image = parser.get<std::string>("image");
int backend = parser.get<int>("backend");
int target = parser.get<int>("target");
Mat input = imread(image);
Mat segm = parse_human(input, model, backend, target);
imshow("human parsing", segm);
waitKey();
return 0;
}

@ -185,7 +185,7 @@ class CpVton(object):
agnostic = np.concatenate((res_shape, img_head, pose_map), axis=0)
agnostic = np.expand_dims(agnostic, axis=0)
return agnostic
return agnostic.astype(np.float32)
def get_warped_cloth(self, cloth_img, agnostic, height=256, width=192):
cloth = cv.dnn.blobFromImage(cloth_img, 1.0 / 127.5, (width, height), mean=(127.5, 127.5, 127.5), swapRB=True)

@ -29,12 +29,12 @@ class MyData:
return s
## [inside]
def write(self, fs):
fs.write('MyData','{')
def write(self, fs, name):
fs.startWriteStruct(name, cv.FileNode_MAP|cv.FileNode_FLOW)
fs.write('A', self.A)
fs.write('X', self.X)
fs.write('name', self.name)
fs.write('MyData','}')
fs.endWriteStruct()
def read(self, node):
if (not node.empty()):
@ -74,25 +74,26 @@ def main(argv):
## [writeNum]
## [writeStr]
s.write('strings', '[')
s.write('image1.jpg','Awesomeness')
s.write('../data/baboon.jpg',']')
s.startWriteStruct('strings', cv.FileNode_SEQ)
for elem in ['image1.jpg', 'Awesomeness', '../data/baboon.jpg']:
s.write('', elem)
s.endWriteStruct()
## [writeStr]
## [writeMap]
s.write ('Mapping', '{')
s.write ('One', 1)
s.write ('Two', 2)
s.write ('Mapping', '}')
s.startWriteStruct('Mapping', cv.FileNode_MAP)
s.write('One', 1)
s.write('Two', 2)
s.endWriteStruct()
## [writeMap]
## [iomatw]
s.write ('R_MAT', R)
s.write ('T_MAT', T)
s.write('R_MAT', R)
s.write('T_MAT', T)
## [iomatw]
## [customIOw]
m.write(s)
m.write(s, 'MyData')
## [customIOw]
## [close]
s.release()

Loading…
Cancel
Save