Merge branch 4.x

pull/21651/head
Alexander Alekhin 3 years ago
commit 899b4d1452
  1. 14
      .github/ISSUE_TEMPLATE.md
  2. 6
      .github/PULL_REQUEST_TEMPLATE.md
  3. 114
      CMakeLists.txt
  4. 14
      apps/opencv_stitching_tool/opencv_stitching/blender.py
  5. 149
      apps/opencv_stitching_tool/opencv_stitching/cropper.py
  6. 4
      apps/opencv_stitching_tool/opencv_stitching/feature_matcher.py
  7. 19
      apps/opencv_stitching_tool/opencv_stitching/image_handler.py
  8. 303
      apps/opencv_stitching_tool/opencv_stitching/largest_interior_rectangle.py
  9. 12
      apps/opencv_stitching_tool/opencv_stitching/megapix_downscaler.py
  10. 11
      apps/opencv_stitching_tool/opencv_stitching/megapix_scaler.py
  11. 27
      apps/opencv_stitching_tool/opencv_stitching/panorama_estimation.py
  12. 29
      apps/opencv_stitching_tool/opencv_stitching/seam_finder.py
  13. 137
      apps/opencv_stitching_tool/opencv_stitching/stitcher.py
  14. 5
      apps/opencv_stitching_tool/opencv_stitching/subsetter.py
  15. 5
      apps/opencv_stitching_tool/opencv_stitching/test/test_megapix_scaler.py
  16. 2
      apps/opencv_stitching_tool/opencv_stitching/test/test_performance.py
  17. 4
      apps/opencv_stitching_tool/opencv_stitching/test/test_registration.py
  18. 10
      apps/opencv_stitching_tool/opencv_stitching/test/test_stitcher.py
  19. 76
      apps/opencv_stitching_tool/opencv_stitching/warper.py
  20. 12
      apps/opencv_stitching_tool/opencv_stitching_tool.py
  21. 26
      cmake/OpenCVCompilerOptions.cmake
  22. 147
      cmake/OpenCVDetectInferenceEngine.cmake
  23. 4
      cmake/OpenCVFindLibsGrfmt.cmake
  24. 15
      cmake/OpenCVPluginStandalone.cmake
  25. 1
      cmake/OpenCVUtils.cmake
  26. 14
      doc/tutorials/dnn/dnn_face/dnn_face.markdown
  27. 7
      doc/tutorials/introduction/config_reference/config_reference.markdown
  28. 11
      modules/3d/src/dls.cpp
  29. 53
      modules/3d/src/dls.h
  30. 20
      modules/3d/src/solvepnp.cpp
  31. 6
      modules/core/include/opencv2/core/bindings_utils.hpp
  32. 13
      modules/core/include/opencv2/core/core_c.h
  33. 3
      modules/core/include/opencv2/core/cuda.hpp
  34. 13
      modules/core/include/opencv2/core/mat.hpp
  35. 4
      modules/core/include/opencv2/core/persistence.hpp
  36. 29
      modules/core/include/opencv2/core/utils/fp_control.private.hpp
  37. 69
      modules/core/include/opencv2/core/utils/fp_control_utils.hpp
  38. 3
      modules/core/include/opencv2/core/vsx_utils.hpp
  39. 4
      modules/core/perf/perf_reduce.cpp
  40. 5
      modules/core/src/cuda/gpu_mat.cu
  41. 2
      modules/core/src/cuda_stream.cpp
  42. 22
      modules/core/src/hal_internal.cpp
  43. 2
      modules/core/src/kmeans.cpp
  44. 2
      modules/core/src/matmul.dispatch.cpp
  45. 22
      modules/core/src/matrix.cpp
  46. 32
      modules/core/src/matrix_operations.cpp
  47. 14
      modules/core/src/parallel.cpp
  48. 60
      modules/core/src/persistence.cpp
  49. 28
      modules/core/src/persistence_impl.hpp
  50. 81
      modules/core/src/system.cpp
  51. 71
      modules/core/src/va_intel.cpp
  52. 16
      modules/core/test/ocl/test_arithm.cpp
  53. 24
      modules/core/test/test_io.cpp
  54. 34
      modules/core/test/test_mat.cpp
  55. 2
      modules/core/test/test_math.cpp
  56. 65
      modules/core/test/test_misc.cpp
  57. 3
      modules/core/test/test_precomp.hpp
  58. 4
      modules/core/test/test_umat.cpp
  59. 25
      modules/dnn/CMakeLists.txt
  60. 14
      modules/dnn/include/opencv2/dnn/dict.hpp
  61. 51
      modules/dnn/include/opencv2/dnn/dnn.hpp
  62. 3
      modules/dnn/include/opencv2/dnn/layer.hpp
  63. 10
      modules/dnn/include/opencv2/dnn/shape_utils.hpp
  64. 8
      modules/dnn/include/opencv2/dnn/utils/inference_engine.hpp
  65. 6
      modules/dnn/misc/objc/gen_dict.json
  66. 4
      modules/dnn/src/caffe/caffe_importer.cpp
  67. 3
      modules/dnn/src/darknet/darknet_importer.cpp
  68. 11
      modules/dnn/src/debug_utils.cpp
  69. 937
      modules/dnn/src/dnn.cpp
  70. 8
      modules/dnn/src/dnn_common.hpp
  71. 2
      modules/dnn/src/graph_simplifier.cpp
  72. 47
      modules/dnn/src/ie_ngraph.cpp
  73. 8
      modules/dnn/src/ie_ngraph.hpp
  74. 17
      modules/dnn/src/layers/batch_norm_layer.cpp
  75. 33
      modules/dnn/src/layers/blank_layer.cpp
  76. 18
      modules/dnn/src/layers/concat_layer.cpp
  77. 16
      modules/dnn/src/layers/const_layer.cpp
  78. 187
      modules/dnn/src/layers/convolution_layer.cpp
  79. 26
      modules/dnn/src/layers/detection_output_layer.cpp
  80. 120
      modules/dnn/src/layers/elementwise_layers.cpp
  81. 38
      modules/dnn/src/layers/eltwise_layer.cpp
  82. 28
      modules/dnn/src/layers/flatten_layer.cpp
  83. 26
      modules/dnn/src/layers/fully_connected_layer.cpp
  84. 2
      modules/dnn/src/layers/layers_common.simd.hpp
  85. 26
      modules/dnn/src/layers/lrn_layer.cpp
  86. 19
      modules/dnn/src/layers/mvn_layer.cpp
  87. 60
      modules/dnn/src/layers/normalize_bbox_layer.cpp
  88. 5
      modules/dnn/src/layers/not_implemented_layer.cpp
  89. 28
      modules/dnn/src/layers/padding_layer.cpp
  90. 19
      modules/dnn/src/layers/permute_layer.cpp
  91. 75
      modules/dnn/src/layers/pooling_layer.cpp
  92. 67
      modules/dnn/src/layers/prior_box_layer.cpp
  93. 28
      modules/dnn/src/layers/proposal_layer.cpp
  94. 2
      modules/dnn/src/layers/recurrent_layers.cpp
  95. 18
      modules/dnn/src/layers/reorg_layer.cpp
  96. 18
      modules/dnn/src/layers/reshape_layer.cpp
  97. 34
      modules/dnn/src/layers/resize_layer.cpp
  98. 34
      modules/dnn/src/layers/scale_layer.cpp
  99. 65
      modules/dnn/src/layers/slice_layer.cpp
  100. 17
      modules/dnn/src/layers/softmax_layer.cpp
  101. Some files were not shown because too many files have changed in this diff Show More

@ -34,11 +34,11 @@ This is a template helping you to create an issue which can be processed as quic
- [ ] I report the issue, it's not a question
<!--
OpenCV team works with forum.opencv.org, Stack Overflow and other communities
to discuss problems. Tickets with question without real issue statement will be
to discuss problems. Tickets with questions without a real issue statement will be
closed.
-->
- [ ] I checked the problem with documentation, FAQ, open issues,
forum.opencv.org, Stack Overflow, etc and have not found solution
forum.opencv.org, Stack Overflow, etc and have not found any solution
<!--
Places to check:
* OpenCV documentation: https://docs.opencv.org
@ -47,11 +47,11 @@ This is a template helping you to create an issue which can be processed as quic
* OpenCV issue tracker: https://github.com/opencv/opencv/issues?q=is%3Aissue
* Stack Overflow branch: https://stackoverflow.com/questions/tagged/opencv
-->
- [ ] I updated to latest OpenCV version and the issue is still there
- [ ] I updated to the latest OpenCV version and the issue is still there
<!--
master branch for OpenCV 4.x and 3.4 branch for OpenCV 3.x releases.
OpenCV team supports only latest release for each branch.
The ticket is closed, if the problem is not reproduced with modern version.
OpenCV team supports only the latest release for each branch.
The ticket is closed if the problem is not reproduced with the modern version.
-->
- [ ] There is reproducer code and related data files: videos, images, onnx, etc
<!--
@ -61,9 +61,9 @@ This is a template helping you to create an issue which can be processed as quic
to reduce attachment size
* Use PNG for images, if you report some CV related bug, but not image reader
issue
* Attach the image as archive to the ticket, if you report some reader issue.
* Attach the image as an archive to the ticket, if you report some reader issue.
Image hosting services compress images and it breaks the repro code.
* Provide ONNX file for some public model or ONNX file with with random weights,
* Provide ONNX file for some public model or ONNX file with random weights,
if you report ONNX parsing or handling issue. Architecture details diagram
from netron tool can be very useful too. See https://lutzroeder.github.io/netron/
-->

@ -3,9 +3,9 @@
See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request
- [ ] I agree to contribute to the project under Apache 2 License.
- [ ] To the best of my knowledge, the proposed patch is not based on a code under GPL or other license that is incompatible with OpenCV
- [ ] The PR is proposed to proper branch
- [ ] There is reference to original bug report and related work
- [ ] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [ ] The PR is proposed to the proper branch
- [ ] There is a reference to the original bug report and related work
- [ ] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
Patch to opencv_extra has the same branch name.
- [ ] The feature is well documented and sample code can be built with the project CMake

@ -244,7 +244,7 @@ OCV_OPTION(WITH_AVFOUNDATION "Use AVFoundation for Video I/O (iOS/Mac)" ON
OCV_OPTION(WITH_CAP_IOS "Enable iOS video capture" ON
VISIBLE_IF IOS
VERIFY HAVE_CAP_IOS)
OCV_OPTION(WITH_CAROTENE "Use NVidia carotene acceleration library for ARM platform" ON
OCV_OPTION(WITH_CAROTENE "Use NVidia carotene acceleration library for ARM platform" (NOT CV_DISABLE_OPTIMIZATION)
VISIBLE_IF (ARM OR AARCH64) AND NOT IOS)
OCV_OPTION(WITH_CPUFEATURES "Use cpufeatures Android library" ON
VISIBLE_IF ANDROID
@ -291,12 +291,10 @@ OCV_OPTION(WITH_HALIDE "Include Halide support" OFF
OCV_OPTION(WITH_VULKAN "Include Vulkan support" OFF
VISIBLE_IF TRUE
VERIFY HAVE_VULKAN)
OCV_OPTION(WITH_INF_ENGINE "Include Intel Inference Engine support" OFF
# replacement for deprecated options: WITH_INF_ENGINE, WITH_NGRAPH
OCV_OPTION(WITH_OPENVINO "Include Intel OpenVINO toolkit support" (WITH_INF_ENGINE)
VISIBLE_IF TRUE
VERIFY INF_ENGINE_TARGET)
OCV_OPTION(WITH_NGRAPH "Include nGraph support" WITH_INF_ENGINE
VISIBLE_IF TRUE
VERIFY TARGET ngraph::ngraph)
VERIFY TARGET ocv.3rdparty.openvino)
OCV_OPTION(WITH_WEBNN "Include WebNN support" OFF
VISIBLE_IF TRUE
VERIFY HAVE_WEBNN)
@ -809,7 +807,7 @@ if(WITH_WEBNN)
endif()
# --- Inference Engine ---
if(WITH_INF_ENGINE)
if(WITH_INF_ENGINE OR WITH_OPENVINO)
include(cmake/OpenCVDetectInferenceEngine.cmake)
endif()
@ -1548,55 +1546,61 @@ if(WITH_HALIDE OR HAVE_HALIDE)
status(" Halide:" HAVE_HALIDE THEN "YES (${HALIDE_LIBRARIES} ${HALIDE_INCLUDE_DIRS})" ELSE NO)
endif()
if(WITH_INF_ENGINE OR INF_ENGINE_TARGET)
if(INF_ENGINE_TARGET)
list(GET INF_ENGINE_TARGET 0 ie_target)
set(__msg "YES (${INF_ENGINE_RELEASE} / ${INF_ENGINE_VERSION})")
ocv_get_imported_target(ie_target "${ie_target}")
get_target_property(_lib ${ie_target} IMPORTED_LOCATION)
get_target_property(_lib_imp_rel ${ie_target} IMPORTED_IMPLIB_RELEASE)
get_target_property(_lib_imp_dbg ${ie_target} IMPORTED_IMPLIB_DEBUG)
get_target_property(_lib_rel ${ie_target} IMPORTED_LOCATION_RELEASE)
get_target_property(_lib_dbg ${ie_target} IMPORTED_LOCATION_DEBUG)
ocv_build_features_string(_lib
IF _lib THEN "${_lib}"
IF _lib_imp_rel AND _lib_imp_dbg THEN "${_lib_imp_rel} / ${_lib_imp_dbg}"
IF _lib_rel AND _lib_dbg THEN "${_lib_rel} / ${_lib_dbg}"
IF _lib_rel THEN "${_lib_rel}"
IF _lib_dbg THEN "${_lib_dbg}"
ELSE "unknown"
)
get_target_property(_inc ${ie_target} INTERFACE_INCLUDE_DIRECTORIES)
status(" Inference Engine:" "${__msg}")
status(" * libs:" "${_lib}")
status(" * includes:" "${_inc}")
else()
status(" Inference Engine:" "NO")
if(HAVE_OPENVINO
OR (WITH_OPENVINO AND NOT WITH_INF_ENGINE AND NOT INF_ENGINE_TARGET)
)
status(" OpenVINO:" TARGET openvino::runtime THEN "YES (${OpenVINO_VERSION})" ELSE "NO")
else()
if(WITH_INF_ENGINE OR INF_ENGINE_TARGET)
if(INF_ENGINE_TARGET)
list(GET INF_ENGINE_TARGET 0 ie_target)
set(__msg "YES (${INF_ENGINE_RELEASE} / ${INF_ENGINE_VERSION})")
ocv_get_imported_target(ie_target "${ie_target}")
get_target_property(_lib ${ie_target} IMPORTED_LOCATION)
get_target_property(_lib_imp_rel ${ie_target} IMPORTED_IMPLIB_RELEASE)
get_target_property(_lib_imp_dbg ${ie_target} IMPORTED_IMPLIB_DEBUG)
get_target_property(_lib_rel ${ie_target} IMPORTED_LOCATION_RELEASE)
get_target_property(_lib_dbg ${ie_target} IMPORTED_LOCATION_DEBUG)
ocv_build_features_string(_lib
IF _lib THEN "${_lib}"
IF _lib_imp_rel AND _lib_imp_dbg THEN "${_lib_imp_rel} / ${_lib_imp_dbg}"
IF _lib_rel AND _lib_dbg THEN "${_lib_rel} / ${_lib_dbg}"
IF _lib_rel THEN "${_lib_rel}"
IF _lib_dbg THEN "${_lib_dbg}"
ELSE "unknown"
)
get_target_property(_inc ${ie_target} INTERFACE_INCLUDE_DIRECTORIES)
status(" Inference Engine:" "${__msg}")
status(" * libs:" "${_lib}")
status(" * includes:" "${_inc}")
else()
status(" Inference Engine:" "NO")
endif()
endif()
endif()
if(WITH_NGRAPH OR HAVE_NGRAPH)
if(HAVE_NGRAPH)
ocv_get_imported_target(__target ngraph::ngraph)
set(__msg "YES (${ngraph_VERSION})")
get_target_property(_lib ${__target} IMPORTED_LOCATION)
get_target_property(_lib_imp_rel ${__target} IMPORTED_IMPLIB_RELEASE)
get_target_property(_lib_imp_dbg ${__target} IMPORTED_IMPLIB_DEBUG)
get_target_property(_lib_rel ${__target} IMPORTED_LOCATION_RELEASE)
get_target_property(_lib_dbg ${__target} IMPORTED_LOCATION_DEBUG)
ocv_build_features_string(_lib
IF _lib THEN "${_lib}"
IF _lib_imp_rel AND _lib_imp_dbg THEN "${_lib_imp_rel} / ${_lib_imp_dbg}"
IF _lib_rel AND _lib_dbg THEN "${_lib_rel} / ${_lib_dbg}"
IF _lib_rel THEN "${_lib_rel}"
IF _lib_dbg THEN "${_lib_dbg}"
ELSE "unknown"
)
get_target_property(_inc ${__target} INTERFACE_INCLUDE_DIRECTORIES)
status(" nGraph:" "${__msg}")
status(" * libs:" "${_lib}")
status(" * includes:" "${_inc}")
else()
status(" nGraph:" "NO")
if(WITH_NGRAPH OR HAVE_NGRAPH)
if(HAVE_NGRAPH)
ocv_get_imported_target(__target ngraph::ngraph)
set(__msg "YES (${ngraph_VERSION})")
get_target_property(_lib ${__target} IMPORTED_LOCATION)
get_target_property(_lib_imp_rel ${__target} IMPORTED_IMPLIB_RELEASE)
get_target_property(_lib_imp_dbg ${__target} IMPORTED_IMPLIB_DEBUG)
get_target_property(_lib_rel ${__target} IMPORTED_LOCATION_RELEASE)
get_target_property(_lib_dbg ${__target} IMPORTED_LOCATION_DEBUG)
ocv_build_features_string(_lib
IF _lib THEN "${_lib}"
IF _lib_imp_rel AND _lib_imp_dbg THEN "${_lib_imp_rel} / ${_lib_imp_dbg}"
IF _lib_rel AND _lib_dbg THEN "${_lib_rel} / ${_lib_dbg}"
IF _lib_rel THEN "${_lib_rel}"
IF _lib_dbg THEN "${_lib_dbg}"
ELSE "unknown"
)
get_target_property(_inc ${__target} INTERFACE_INCLUDE_DIRECTORIES)
status(" nGraph:" "${__msg}")
status(" * libs:" "${_lib}")
status(" * includes:" "${_inc}")
else()
status(" nGraph:" "NO")
endif()
endif()
endif()

@ -26,8 +26,8 @@ class Blender:
elif self.blender_type == "multiband":
self.blender = cv.detail_MultiBandBlender()
self.blender.setNumBands((np.log(blend_width) /
np.log(2.) - 1.).astype(np.int))
self.blender.setNumBands(int((np.log(blend_width) /
np.log(2.) - 1.)))
elif self.blender_type == "feather":
self.blender = cv.detail_FeatherBlender()
@ -45,4 +45,12 @@ class Blender:
result_mask = None
result, result_mask = self.blender.blend(result, result_mask)
result = cv.convertScaleAbs(result)
return result
return result, result_mask
@classmethod
def create_panorama(cls, imgs, masks, corners, sizes):
blender = cls("no")
blender.prepare(corners, sizes)
for img, mask, corner in zip(imgs, masks, corners):
blender.feed(img, mask, corner)
return blender.blend()

@ -0,0 +1,149 @@
from collections import namedtuple
import cv2 as cv
from .blender import Blender
from .stitching_error import StitchingError
class Rectangle(namedtuple('Rectangle', 'x y width height')):
__slots__ = ()
@property
def area(self):
return self.width * self.height
@property
def corner(self):
return (self.x, self.y)
@property
def size(self):
return (self.width, self.height)
@property
def x2(self):
return self.x + self.width
@property
def y2(self):
return self.y + self.height
def times(self, x):
return Rectangle(*(int(round(i*x)) for i in self))
def draw_on(self, img, color=(0, 0, 255), size=1):
if len(img.shape) == 2:
img = cv.cvtColor(img, cv.COLOR_GRAY2RGB)
start_point = (self.x, self.y)
end_point = (self.x2-1, self.y2-1)
cv.rectangle(img, start_point, end_point, color, size)
return img
class Cropper:
DEFAULT_CROP = False
def __init__(self, crop=DEFAULT_CROP):
self.do_crop = crop
self.overlapping_rectangles = []
self.cropping_rectangles = []
def prepare(self, imgs, masks, corners, sizes):
if self.do_crop:
mask = self.estimate_panorama_mask(imgs, masks, corners, sizes)
self.compile_numba_functionality()
lir = self.estimate_largest_interior_rectangle(mask)
corners = self.get_zero_center_corners(corners)
rectangles = self.get_rectangles(corners, sizes)
self.overlapping_rectangles = self.get_overlaps(
rectangles, lir)
self.intersection_rectangles = self.get_intersections(
rectangles, self.overlapping_rectangles)
def crop_images(self, imgs, aspect=1):
for idx, img in enumerate(imgs):
yield self.crop_img(img, idx, aspect)
def crop_img(self, img, idx, aspect=1):
if self.do_crop:
intersection_rect = self.intersection_rectangles[idx]
scaled_intersection_rect = intersection_rect.times(aspect)
cropped_img = self.crop_rectangle(img, scaled_intersection_rect)
return cropped_img
return img
def crop_rois(self, corners, sizes, aspect=1):
if self.do_crop:
scaled_overlaps = \
[r.times(aspect) for r in self.overlapping_rectangles]
cropped_corners = [r.corner for r in scaled_overlaps]
cropped_corners = self.get_zero_center_corners(cropped_corners)
cropped_sizes = [r.size for r in scaled_overlaps]
return cropped_corners, cropped_sizes
return corners, sizes
@staticmethod
def estimate_panorama_mask(imgs, masks, corners, sizes):
_, mask = Blender.create_panorama(imgs, masks, corners, sizes)
return mask
def compile_numba_functionality(self):
# numba functionality is only imported if cropping
# is explicitely desired
try:
import numba
except ModuleNotFoundError:
raise StitchingError("Numba is needed for cropping but not installed")
from .largest_interior_rectangle import largest_interior_rectangle
self.largest_interior_rectangle = largest_interior_rectangle
def estimate_largest_interior_rectangle(self, mask):
lir = self.largest_interior_rectangle(mask)
lir = Rectangle(*lir)
return lir
@staticmethod
def get_zero_center_corners(corners):
min_corner_x = min([corner[0] for corner in corners])
min_corner_y = min([corner[1] for corner in corners])
return [(x - min_corner_x, y - min_corner_y) for x, y in corners]
@staticmethod
def get_rectangles(corners, sizes):
rectangles = []
for corner, size in zip(corners, sizes):
rectangle = Rectangle(*corner, *size)
rectangles.append(rectangle)
return rectangles
@staticmethod
def get_overlaps(rectangles, lir):
return [Cropper.get_overlap(r, lir) for r in rectangles]
@staticmethod
def get_overlap(rectangle1, rectangle2):
x1 = max(rectangle1.x, rectangle2.x)
y1 = max(rectangle1.y, rectangle2.y)
x2 = min(rectangle1.x2, rectangle2.x2)
y2 = min(rectangle1.y2, rectangle2.y2)
if x2 < x1 or y2 < y1:
raise StitchingError("Rectangles do not overlap!")
return Rectangle(x1, y1, x2-x1, y2-y1)
@staticmethod
def get_intersections(rectangles, overlapping_rectangles):
return [Cropper.get_intersection(r, overlap_r) for r, overlap_r
in zip(rectangles, overlapping_rectangles)]
@staticmethod
def get_intersection(rectangle, overlapping_rectangle):
x = abs(overlapping_rectangle.x - rectangle.x)
y = abs(overlapping_rectangle.y - rectangle.y)
width = overlapping_rectangle.width
height = overlapping_rectangle.height
return Rectangle(x, y, width, height)
@staticmethod
def crop_rectangle(img, rectangle):
return img[rectangle.y:rectangle.y2, rectangle.x:rectangle.x2]

@ -19,10 +19,10 @@ class FeatureMatcher:
self.matcher = cv.detail_AffineBestOf2NearestMatcher(**kwargs)
elif range_width == -1:
"""https://docs.opencv.org/5.x/d4/d26/classcv_1_1detail_1_1BestOf2NearestMatcher.html""" # noqa
self.matcher = cv.detail.BestOf2NearestMatcher_create(**kwargs)
self.matcher = cv.detail_BestOf2NearestMatcher(**kwargs)
else:
"""https://docs.opencv.org/5.x/d8/d72/classcv_1_1detail_1_1BestOf2NearestRangeMatcher.html""" # noqa
self.matcher = cv.detail.BestOf2NearestRangeMatcher_create(
self.matcher = cv.detail_BestOf2NearestRangeMatcher(
range_width, **kwargs
)

@ -1,6 +1,6 @@
import cv2 as cv
from .megapix_downscaler import MegapixDownscaler
from .megapix_scaler import MegapixDownscaler
from .stitching_error import StitchingError
class ImageHandler:
@ -35,7 +35,7 @@ class ImageHandler:
def resize_to_low_resolution(self, medium_imgs=None):
if medium_imgs and self.scales_set:
return self.resize_medium_to_low(medium_imgs)
return self.resize_imgs_by_scaler(medium_imgs, self.low_scaler)
return self.read_and_resize_imgs(self.low_scaler)
def resize_to_final_resolution(self):
@ -45,9 +45,9 @@ class ImageHandler:
for img, size in self.input_images():
yield self.resize_img_by_scaler(scaler, size, img)
def resize_medium_to_low(self, medium_imgs):
def resize_imgs_by_scaler(self, medium_imgs, scaler):
for img, size in zip(medium_imgs, self.img_sizes):
yield self.resize_img_by_scaler(self.low_scaler, size, img)
yield self.resize_img_by_scaler(scaler, size, img)
@staticmethod
def resize_img_by_scaler(scaler, size, img):
@ -92,3 +92,14 @@ class ImageHandler:
def get_final_to_low_ratio(self):
return self.low_scaler.scale / self.final_scaler.scale
def get_low_to_final_ratio(self):
return self.final_scaler.scale / self.low_scaler.scale
def get_final_img_sizes(self):
return [self.final_scaler.get_scaled_img_size(sz)
for sz in self.img_sizes]
def get_low_img_sizes(self):
return [self.low_scaler.get_scaled_img_size(sz)
for sz in self.img_sizes]

@ -0,0 +1,303 @@
import numpy as np
import numba as nb
import cv2 as cv
from .stitching_error import StitchingError
def largest_interior_rectangle(cells):
outline = get_outline(cells)
adjacencies = adjacencies_all_directions(cells)
s_map, _, saddle_candidates_map = create_maps(outline, adjacencies)
lir1 = biggest_span_in_span_map(s_map)
candidate_cells = cells_of_interest(saddle_candidates_map)
s_map = span_map(adjacencies[0], adjacencies[2], candidate_cells)
lir2 = biggest_span_in_span_map(s_map)
lir = biggest_rectangle(lir1, lir2)
return lir
def get_outline(cells):
contours, hierarchy = \
cv.findContours(cells, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
# TODO support multiple contours
# test that only one regular contour exists
if not hierarchy.shape == (1, 1, 4) or not np.all(hierarchy == -1):
raise StitchingError("Invalid Contour. Try without cropping.")
contour = contours[0][:, 0, :]
x_values = contour[:, 0].astype("uint32", order="C")
y_values = contour[:, 1].astype("uint32", order="C")
return x_values, y_values
@nb.njit('uint32[:,::1](uint8[:,::1], boolean)', parallel=True, cache=True)
def horizontal_adjacency(cells, direction):
result = np.zeros(cells.shape, dtype=np.uint32)
for y in nb.prange(cells.shape[0]):
span = 0
if direction:
iterator = range(cells.shape[1]-1, -1, -1)
else:
iterator = range(cells.shape[1])
for x in iterator:
if cells[y, x] > 0:
span += 1
else:
span = 0
result[y, x] = span
return result
@nb.njit('uint32[:,::1](uint8[:,::1], boolean)', parallel=True, cache=True)
def vertical_adjacency(cells, direction):
result = np.zeros(cells.shape, dtype=np.uint32)
for x in nb.prange(cells.shape[1]):
span = 0
if direction:
iterator = range(cells.shape[0]-1, -1, -1)
else:
iterator = range(cells.shape[0])
for y in iterator:
if cells[y, x] > 0:
span += 1
else:
span = 0
result[y, x] = span
return result
@nb.njit(cache=True)
def adjacencies_all_directions(cells):
h_left2right = horizontal_adjacency(cells, 1)
h_right2left = horizontal_adjacency(cells, 0)
v_top2bottom = vertical_adjacency(cells, 1)
v_bottom2top = vertical_adjacency(cells, 0)
return h_left2right, h_right2left, v_top2bottom, v_bottom2top
@nb.njit('uint32(uint32[:])', cache=True)
def predict_vector_size(array):
zero_indices = np.where(array == 0)[0]
if len(zero_indices) == 0:
if len(array) == 0:
return 0
return len(array)
return zero_indices[0]
@nb.njit('uint32[:](uint32[:,::1], uint32, uint32)', cache=True)
def h_vector_top2bottom(h_adjacency, x, y):
vector_size = predict_vector_size(h_adjacency[y:, x])
h_vector = np.zeros(vector_size, dtype=np.uint32)
h = np.Inf
for p in range(vector_size):
h = np.minimum(h_adjacency[y+p, x], h)
h_vector[p] = h
h_vector = np.unique(h_vector)[::-1]
return h_vector
@nb.njit('uint32[:](uint32[:,::1], uint32, uint32)', cache=True)
def h_vector_bottom2top(h_adjacency, x, y):
vector_size = predict_vector_size(np.flip(h_adjacency[:y+1, x]))
h_vector = np.zeros(vector_size, dtype=np.uint32)
h = np.Inf
for p in range(vector_size):
h = np.minimum(h_adjacency[y-p, x], h)
h_vector[p] = h
h_vector = np.unique(h_vector)[::-1]
return h_vector
@nb.njit(cache=True)
def h_vectors_all_directions(h_left2right, h_right2left, x, y):
h_l2r_t2b = h_vector_top2bottom(h_left2right, x, y)
h_r2l_t2b = h_vector_top2bottom(h_right2left, x, y)
h_l2r_b2t = h_vector_bottom2top(h_left2right, x, y)
h_r2l_b2t = h_vector_bottom2top(h_right2left, x, y)
return h_l2r_t2b, h_r2l_t2b, h_l2r_b2t, h_r2l_b2t
@nb.njit('uint32[:](uint32[:,::1], uint32, uint32)', cache=True)
def v_vector_left2right(v_adjacency, x, y):
vector_size = predict_vector_size(v_adjacency[y, x:])
v_vector = np.zeros(vector_size, dtype=np.uint32)
v = np.Inf
for q in range(vector_size):
v = np.minimum(v_adjacency[y, x+q], v)
v_vector[q] = v
v_vector = np.unique(v_vector)[::-1]
return v_vector
@nb.njit('uint32[:](uint32[:,::1], uint32, uint32)', cache=True)
def v_vector_right2left(v_adjacency, x, y):
vector_size = predict_vector_size(np.flip(v_adjacency[y, :x+1]))
v_vector = np.zeros(vector_size, dtype=np.uint32)
v = np.Inf
for q in range(vector_size):
v = np.minimum(v_adjacency[y, x-q], v)
v_vector[q] = v
v_vector = np.unique(v_vector)[::-1]
return v_vector
@nb.njit(cache=True)
def v_vectors_all_directions(v_top2bottom, v_bottom2top, x, y):
v_l2r_t2b = v_vector_left2right(v_top2bottom, x, y)
v_r2l_t2b = v_vector_right2left(v_top2bottom, x, y)
v_l2r_b2t = v_vector_left2right(v_bottom2top, x, y)
v_r2l_b2t = v_vector_right2left(v_bottom2top, x, y)
return v_l2r_t2b, v_r2l_t2b, v_l2r_b2t, v_r2l_b2t
@nb.njit('uint32[:,:](uint32[:], uint32[:])', cache=True)
def spans(h_vector, v_vector):
spans = np.stack((h_vector, v_vector[::-1]), axis=1)
return spans
@nb.njit('uint32[:](uint32[:,:])', cache=True)
def biggest_span(spans):
if len(spans) == 0:
return np.array([0, 0], dtype=np.uint32)
areas = spans[:, 0] * spans[:, 1]
biggest_span_index = np.where(areas == np.amax(areas))[0][0]
return spans[biggest_span_index]
@nb.njit(cache=True)
def spans_all_directions(h_vectors, v_vectors):
span_l2r_t2b = spans(h_vectors[0], v_vectors[0])
span_r2l_t2b = spans(h_vectors[1], v_vectors[1])
span_l2r_b2t = spans(h_vectors[2], v_vectors[2])
span_r2l_b2t = spans(h_vectors[3], v_vectors[3])
return span_l2r_t2b, span_r2l_t2b, span_l2r_b2t, span_r2l_b2t
@nb.njit(cache=True)
def get_n_directions(spans_all_directions):
n_directions = 1
for spans in spans_all_directions:
all_x_1 = np.all(spans[:, 0] == 1)
all_y_1 = np.all(spans[:, 1] == 1)
if not all_x_1 and not all_y_1:
n_directions += 1
return n_directions
@nb.njit(cache=True)
def get_xy_array(x, y, spans, mode=0):
"""0 - flip none, 1 - flip x, 2 - flip y, 3 - flip both"""
xy = spans.copy()
xy[:, 0] = x
xy[:, 1] = y
if mode == 1:
xy[:, 0] = xy[:, 0] - spans[:, 0] + 1
if mode == 2:
xy[:, 1] = xy[:, 1] - spans[:, 1] + 1
if mode == 3:
xy[:, 0] = xy[:, 0] - spans[:, 0] + 1
xy[:, 1] = xy[:, 1] - spans[:, 1] + 1
return xy
@nb.njit(cache=True)
def get_xy_arrays(x, y, spans_all_directions):
xy_l2r_t2b = get_xy_array(x, y, spans_all_directions[0], 0)
xy_r2l_t2b = get_xy_array(x, y, spans_all_directions[1], 1)
xy_l2r_b2t = get_xy_array(x, y, spans_all_directions[2], 2)
xy_r2l_b2t = get_xy_array(x, y, spans_all_directions[3], 3)
return xy_l2r_t2b, xy_r2l_t2b, xy_l2r_b2t, xy_r2l_b2t
@nb.njit(cache=True)
def point_on_outline(x, y, outline):
x_vals, y_vals = outline
x_true = x_vals == x
y_true = y_vals == y
both_true = np.logical_and(x_true, y_true)
return np.any(both_true)
@nb.njit('Tuple((uint32[:,:,::1], uint8[:,::1], uint8[:,::1]))'
'(UniTuple(uint32[:], 2), UniTuple(uint32[:,::1], 4))',
parallel=True, cache=True)
def create_maps(outline, adjacencies):
x_values, y_values = outline
h_left2right, h_right2left, v_top2bottom, v_bottom2top = adjacencies
shape = h_left2right.shape
span_map = np.zeros(shape + (2,), "uint32")
direction_map = np.zeros(shape, "uint8")
saddle_candidates_map = np.zeros(shape, "uint8")
for idx in nb.prange(len(x_values)):
x, y = x_values[idx], y_values[idx]
h_vectors = h_vectors_all_directions(h_left2right, h_right2left, x, y)
v_vectors = v_vectors_all_directions(v_top2bottom, v_bottom2top, x, y)
span_arrays = spans_all_directions(h_vectors, v_vectors)
n = get_n_directions(span_arrays)
direction_map[y, x] = n
xy_arrays = get_xy_arrays(x, y, span_arrays)
for direction_idx in range(4):
xy_array = xy_arrays[direction_idx]
span_array = span_arrays[direction_idx]
for span_idx in range(span_array.shape[0]):
x, y = xy_array[span_idx][0], xy_array[span_idx][1]
w, h = span_array[span_idx][0], span_array[span_idx][1]
if w*h > span_map[y, x, 0] * span_map[y, x, 1]:
span_map[y, x, :] = np.array([w, h], "uint32")
if n == 3 and not point_on_outline(x, y, outline):
saddle_candidates_map[y, x] = np.uint8(255)
return span_map, direction_map, saddle_candidates_map
def cells_of_interest(cells):
y_vals, x_vals = cells.nonzero()
x_vals = x_vals.astype("uint32", order="C")
y_vals = y_vals.astype("uint32", order="C")
return x_vals, y_vals
@nb.njit('uint32[:, :, :]'
'(uint32[:,::1], uint32[:,::1], UniTuple(uint32[:], 2))',
parallel=True, cache=True)
def span_map(h_adjacency_left2right,
v_adjacency_top2bottom,
cells_of_interest):
x_values, y_values = cells_of_interest
span_map = np.zeros(h_adjacency_left2right.shape + (2,), dtype=np.uint32)
for idx in nb.prange(len(x_values)):
x, y = x_values[idx], y_values[idx]
h_vector = h_vector_top2bottom(h_adjacency_left2right, x, y)
v_vector = v_vector_left2right(v_adjacency_top2bottom, x, y)
s = spans(h_vector, v_vector)
s = biggest_span(s)
span_map[y, x, :] = s
return span_map
@nb.njit('uint32[:](uint32[:, :, :])', cache=True)
def biggest_span_in_span_map(span_map):
areas = span_map[:, :, 0] * span_map[:, :, 1]
largest_rectangle_indices = np.where(areas == np.amax(areas))
x = largest_rectangle_indices[1][0]
y = largest_rectangle_indices[0][0]
span = span_map[y, x]
return np.array([x, y, span[0], span[1]], dtype=np.uint32)
def biggest_rectangle(*args):
biggest_rect = np.array([0, 0, 0, 0], dtype=np.uint32)
for rect in args:
if rect[2] * rect[3] > biggest_rect[2] * biggest_rect[3]:
biggest_rect = rect
return biggest_rect

@ -1,12 +0,0 @@
from .megapix_scaler import MegapixScaler
class MegapixDownscaler(MegapixScaler):
@staticmethod
def force_downscale(scale):
return min(1.0, scale)
def set_scale(self, scale):
scale = self.force_downscale(scale)
super().set_scale(scale)

@ -25,3 +25,14 @@ class MegapixScaler:
width = int(round(img_size[0] * self.scale))
height = int(round(img_size[1] * self.scale))
return (width, height)
class MegapixDownscaler(MegapixScaler):
@staticmethod
def force_downscale(scale):
return min(1.0, scale)
def set_scale(self, scale):
scale = self.force_downscale(scale)
super().set_scale(scale)

@ -1,27 +0,0 @@
import statistics
def estimate_final_panorama_dimensions(cameras, warper, img_handler):
medium_to_final_ratio = img_handler.get_medium_to_final_ratio()
panorama_scale_determined_on_medium_img = \
estimate_panorama_scale(cameras)
panorama_scale = (panorama_scale_determined_on_medium_img *
medium_to_final_ratio)
panorama_corners = []
panorama_sizes = []
for size, camera in zip(img_handler.img_sizes, cameras):
width, height = img_handler.final_scaler.get_scaled_img_size(size)
roi = warper.warp_roi(width, height, camera, panorama_scale, medium_to_final_ratio)
panorama_corners.append(roi[0:2])
panorama_sizes.append(roi[2:4])
return panorama_scale, panorama_corners, panorama_sizes
def estimate_panorama_scale(cameras):
focals = [cam.focal for cam in cameras]
panorama_scale = statistics.median(focals)
return panorama_scale

@ -63,7 +63,14 @@ class SeamFinder:
return cv.dilate(seam_lines, kernel)
@staticmethod
def blend_seam_masks(seam_masks, corners, sizes, colors=[
def blend_seam_masks(seam_masks, corners, sizes):
imgs = colored_img_generator(sizes)
blended_seam_masks, _ = \
Blender.create_panorama(imgs, seam_masks, corners, sizes)
return blended_seam_masks
def colored_img_generator(sizes, colors=(
(255, 000, 000), # Blue
(000, 000, 255), # Red
(000, 255, 000), # Green
@ -72,21 +79,13 @@ class SeamFinder:
(128, 128, 255), # Pink
(128, 128, 128), # Gray
(000, 000, 128), # Brown
(000, 128, 255)] # Orange
(000, 128, 255)) # Orange
):
blender = Blender("no")
blender.prepare(corners, sizes)
for idx, (seam_mask, size, corner) in enumerate(
zip(seam_masks, sizes, corners)):
if idx+1 > len(colors):
raise ValueError("Not enough default colors! Pass additional "
"colors to \"colors\" parameter")
one_color_img = create_img_by_size(size, colors[idx])
blender.feed(one_color_img, seam_mask, corner)
return blender.blend()
for idx, size in enumerate(sizes):
if idx+1 > len(colors):
raise ValueError("Not enough default colors! Pass additional "
"colors to \"colors\" parameter")
yield create_img_by_size(size, colors[idx])
def create_img_by_size(size, color=(0, 0, 0)):

@ -8,7 +8,7 @@ from .camera_estimator import CameraEstimator
from .camera_adjuster import CameraAdjuster
from .camera_wave_corrector import WaveCorrector
from .warper import Warper
from .panorama_estimation import estimate_final_panorama_dimensions
from .cropper import Cropper
from .exposure_error_compensator import ExposureErrorCompensator
from .seam_finder import SeamFinder
from .blender import Blender
@ -33,6 +33,7 @@ class Stitcher:
"wave_correct_kind": WaveCorrector.DEFAULT_WAVE_CORRECTION,
"warper_type": Warper.DEFAULT_WARP_TYPE,
"low_megapix": ImageHandler.DEFAULT_LOW_MEGAPIX,
"crop": Cropper.DEFAULT_CROP,
"compensator": ExposureErrorCompensator.DEFAULT_COMPENSATOR,
"nr_feeds": ExposureErrorCompensator.DEFAULT_NR_FEEDS,
"block_size": ExposureErrorCompensator.DEFAULT_BLOCK_SIZE,
@ -68,6 +69,7 @@ class Stitcher:
CameraAdjuster(args.adjuster, args.refinement_mask)
self.wave_corrector = WaveCorrector(args.wave_correct_kind)
self.warper = Warper(args.warper_type)
self.cropper = Cropper(args.crop)
self.compensator = \
ExposureErrorCompensator(args.compensator, args.nr_feeds,
args.block_size)
@ -77,7 +79,6 @@ class Stitcher:
def stitch(self, img_names):
self.initialize_registration(img_names)
imgs = self.resize_medium_resolution()
features = self.find_features(imgs)
matches = self.match_features(features)
@ -85,22 +86,26 @@ class Stitcher:
cameras = self.estimate_camera_parameters(features, matches)
cameras = self.refine_camera_parameters(features, matches, cameras)
cameras = self.perform_wave_correction(cameras)
panorama_scale, panorama_corners, panorama_sizes = \
self.estimate_final_panorama_dimensions(cameras)
self.initialize_composition(panorama_corners, panorama_sizes)
self.estimate_scale(cameras)
imgs = self.resize_low_resolution(imgs)
imgs = self.warp_low_resolution_images(imgs, cameras, panorama_scale)
self.estimate_exposure_errors(imgs)
seam_masks = self.find_seam_masks(imgs)
imgs, masks, corners, sizes = self.warp_low_resolution(imgs, cameras)
self.prepare_cropper(imgs, masks, corners, sizes)
imgs, masks, corners, sizes = \
self.crop_low_resolution(imgs, masks, corners, sizes)
self.estimate_exposure_errors(corners, imgs, masks)
seam_masks = self.find_seam_masks(imgs, corners, masks)
imgs = self.resize_final_resolution()
imgs = self.warp_final_resolution_images(imgs, cameras, panorama_scale)
imgs = self.compensate_exposure_errors(imgs)
imgs, masks, corners, sizes = self.warp_final_resolution(imgs, cameras)
imgs, masks, corners, sizes = \
self.crop_final_resolution(imgs, masks, corners, sizes)
self.set_masks(masks)
imgs = self.compensate_exposure_errors(corners, imgs)
seam_masks = self.resize_seam_masks(seam_masks)
self.blend_images(imgs, seam_masks)
self.initialize_composition(corners, sizes)
self.blend_images(imgs, seam_masks, corners)
return self.create_final_panorama()
def initialize_registration(self, img_names):
@ -132,76 +137,100 @@ class Stitcher:
def perform_wave_correction(self, cameras):
return self.wave_corrector.correct(cameras)
def estimate_final_panorama_dimensions(self, cameras):
return estimate_final_panorama_dimensions(cameras, self.warper,
self.img_handler)
def initialize_composition(self, corners, sizes):
if self.timelapser.do_timelapse:
self.timelapser.initialize(corners, sizes)
else:
self.blender.prepare(corners, sizes)
def estimate_scale(self, cameras):
self.warper.set_scale(cameras)
def resize_low_resolution(self, imgs=None):
return list(self.img_handler.resize_to_low_resolution(imgs))
def warp_low_resolution_images(self, imgs, cameras, final_scale):
def warp_low_resolution(self, imgs, cameras):
sizes = self.img_handler.get_low_img_sizes()
camera_aspect = self.img_handler.get_medium_to_low_ratio()
scale = final_scale * self.img_handler.get_final_to_low_ratio()
return list(self.warp_images(imgs, cameras, scale, camera_aspect))
imgs, masks, corners, sizes = \
self.warp(imgs, cameras, sizes, camera_aspect)
return list(imgs), list(masks), corners, sizes
def warp_final_resolution_images(self, imgs, cameras, scale):
def warp_final_resolution(self, imgs, cameras):
sizes = self.img_handler.get_final_img_sizes()
camera_aspect = self.img_handler.get_medium_to_final_ratio()
return self.warp_images(imgs, cameras, scale, camera_aspect)
return self.warp(imgs, cameras, sizes, camera_aspect)
def warp(self, imgs, cameras, sizes, aspect=1):
imgs = self.warper.warp_images(imgs, cameras, aspect)
masks = self.warper.create_and_warp_masks(sizes, cameras, aspect)
corners, sizes = self.warper.warp_rois(sizes, cameras, aspect)
return imgs, masks, corners, sizes
def prepare_cropper(self, imgs, masks, corners, sizes):
self.cropper.prepare(imgs, masks, corners, sizes)
def warp_images(self, imgs, cameras, scale, aspect=1):
self._masks = []
self._corners = []
for img_warped, mask_warped, corner in \
self.warper.warp_images_and_image_masks(
imgs, cameras, scale, aspect
):
self._masks.append(mask_warped)
self._corners.append(corner)
yield img_warped
def crop_low_resolution(self, imgs, masks, corners, sizes):
imgs, masks, corners, sizes = self.crop(imgs, masks, corners, sizes)
return list(imgs), list(masks), corners, sizes
def estimate_exposure_errors(self, imgs):
self.compensator.feed(self._corners, imgs, self._masks)
def crop_final_resolution(self, imgs, masks, corners, sizes):
lir_aspect = self.img_handler.get_low_to_final_ratio()
return self.crop(imgs, masks, corners, sizes, lir_aspect)
def find_seam_masks(self, imgs):
return self.seam_finder.find(imgs, self._corners, self._masks)
def crop(self, imgs, masks, corners, sizes, aspect=1):
masks = self.cropper.crop_images(masks, aspect)
imgs = self.cropper.crop_images(imgs, aspect)
corners, sizes = self.cropper.crop_rois(corners, sizes, aspect)
return imgs, masks, corners, sizes
def estimate_exposure_errors(self, corners, imgs, masks):
self.compensator.feed(corners, imgs, masks)
def find_seam_masks(self, imgs, corners, masks):
return self.seam_finder.find(imgs, corners, masks)
def resize_final_resolution(self):
return self.img_handler.resize_to_final_resolution()
def compensate_exposure_errors(self, imgs):
for idx, img in enumerate(imgs):
yield self.compensator.apply(idx, self._corners[idx],
img, self._masks[idx])
def compensate_exposure_errors(self, corners, imgs):
for idx, (corner, img) in enumerate(zip(corners, imgs)):
yield self.compensator.apply(idx, corner, img, self.get_mask(idx))
def resize_seam_masks(self, seam_masks):
for idx, seam_mask in enumerate(seam_masks):
yield SeamFinder.resize(seam_mask, self._masks[idx])
yield SeamFinder.resize(seam_mask, self.get_mask(idx))
def set_masks(self, mask_generator):
self.masks = mask_generator
self.mask_index = -1
def get_mask(self, idx):
if idx == self.mask_index + 1:
self.mask_index += 1
self.mask = next(self.masks)
return self.mask
elif idx == self.mask_index:
return self.mask
else:
raise StitchingError("Invalid Mask Index!")
def initialize_composition(self, corners, sizes):
if self.timelapser.do_timelapse:
self.timelapser.initialize(corners, sizes)
else:
self.blender.prepare(corners, sizes)
def blend_images(self, imgs, masks):
for idx, (img, mask) in enumerate(zip(imgs, masks)):
def blend_images(self, imgs, masks, corners):
for idx, (img, mask, corner) in enumerate(zip(imgs, masks, corners)):
if self.timelapser.do_timelapse:
self.timelapser.process_and_save_frame(
self.img_handler.img_names[idx], img, self._corners[idx]
self.img_handler.img_names[idx], img, corner
)
else:
self.blender.feed(img, mask, self._corners[idx])
self.blender.feed(img, mask, corner)
def create_final_panorama(self):
if not self.timelapser.do_timelapse:
return self.blender.blend()
panorama, _ = self.blender.blend()
return panorama
@staticmethod
def validate_kwargs(kwargs):
for arg in kwargs:
if arg not in Stitcher.DEFAULT_SETTINGS:
raise StitchingError("Invalid Argument: " + arg)
def collect_garbage(self):
del self.img_handler.img_names, self.img_handler.img_sizes,
del self._corners, self._masks

@ -44,13 +44,12 @@ class Subsetter:
indices = cv.detail.leaveBiggestComponent(features,
pairwise_matches,
self.confidence_threshold)
indices_as_list = [int(idx) for idx in list(indices[:, 0])]
if len(indices_as_list) < 2:
if len(indices) < 2:
raise StitchingError("No match exceeds the "
"given confidence theshold.")
return indices_as_list
return indices
@staticmethod
def subset_list(list_to_subset, indices):

@ -7,9 +7,8 @@ import cv2 as cv
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..')))
from opencv_stitching.megapix_scaler import MegapixScaler
from opencv_stitching.megapix_downscaler import MegapixDownscaler
#%%
from opencv_stitching.megapix_scaler import MegapixScaler, MegapixDownscaler
# %%
class TestScaler(unittest.TestCase):

@ -14,6 +14,7 @@ from stitching_detailed import main
class TestStitcher(unittest.TestCase):
@unittest.skip("skip performance test (not needed in every run)")
def test_performance(self):
print("Run new Stitcher class:")
@ -25,7 +26,6 @@ class TestStitcher(unittest.TestCase):
stitcher.stitch(["boat5.jpg", "boat2.jpg",
"boat3.jpg", "boat4.jpg",
"boat1.jpg", "boat6.jpg"])
stitcher.collect_garbage()
_, peak_memory = tracemalloc.get_traced_memory()
tracemalloc.stop()

@ -70,8 +70,8 @@ class TestImageRegistration(unittest.TestCase):
indices_to_delete = subsetter.get_indices_to_delete(len(img_names),
indices)
self.assertEqual(indices, [2, 3, 4])
self.assertEqual(indices_to_delete, [0, 1])
np.testing.assert_array_equal(indices, np.array([2, 3, 4]))
np.testing.assert_array_equal(indices_to_delete, np.array([0, 1]))
subsetted_image_names = subsetter.subset_list(img_names, indices)
self.assertEqual(subsetted_image_names,

@ -15,7 +15,7 @@ from opencv_stitching.stitcher import Stitcher
class TestStitcher(unittest.TestCase):
def test_stitcher_aquaduct(self):
stitcher = Stitcher(n_features=250)
stitcher = Stitcher(nfeatures=250)
result = stitcher.stitch(["s1.jpg", "s2.jpg"])
cv.imwrite("result.jpg", result)
@ -30,7 +30,7 @@ class TestStitcher(unittest.TestCase):
"wave_correct_kind": "no",
"finder": "dp_colorgrad",
"compensator": "no",
"conf_thresh": 0.3}
"confidence_threshold": 0.3}
stitcher = Stitcher(**settings)
result = stitcher.stitch(["boat5.jpg", "boat2.jpg",
@ -49,7 +49,7 @@ class TestStitcher(unittest.TestCase):
settings = {"warper_type": "compressedPlaneA2B1",
"finder": "dp_colorgrad",
"compensator": "channel_blocks",
"conf_thresh": 0.3}
"confidence_threshold": 0.3}
stitcher = Stitcher(**settings)
result = stitcher.stitch(["boat5.jpg", "boat2.jpg",
@ -64,7 +64,7 @@ class TestStitcher(unittest.TestCase):
atol=max_image_shape_derivation)
def test_stitcher_boat_aquaduct_subset(self):
settings = {"final_megapix": 1}
settings = {"final_megapix": 1, "crop": True}
stitcher = Stitcher(**settings)
result = stitcher.stitch(["boat5.jpg",
@ -76,7 +76,7 @@ class TestStitcher(unittest.TestCase):
max_image_shape_derivation = 100
np.testing.assert_allclose(result.shape[:2],
(839, 3384),
(705, 3374),
atol=max_image_shape_derivation)
def test_stitcher_budapest(self):

@ -1,3 +1,5 @@
from statistics import median
import cv2 as cv
import numpy as np
@ -15,48 +17,54 @@ class Warper:
DEFAULT_WARP_TYPE = 'spherical'
def __init__(self, warper_type=DEFAULT_WARP_TYPE, scale=1):
def __init__(self, warper_type=DEFAULT_WARP_TYPE):
self.warper_type = warper_type
self.warper = cv.PyRotationWarper(warper_type, scale)
self.scale = scale
self.scale = None
def set_scale(self, cameras):
focals = [cam.focal for cam in cameras]
self.scale = median(focals)
def warp_images_and_image_masks(self, imgs, cameras, scale=None, aspect=1):
self.update_scale(scale)
def warp_images(self, imgs, cameras, aspect=1):
for img, camera in zip(imgs, cameras):
yield self.warp_image_and_image_mask(img, camera, scale, aspect)
yield self.warp_image(img, camera, aspect)
def warp_image_and_image_mask(self, img, camera, scale=None, aspect=1):
self.update_scale(scale)
corner, img_warped = self.warp_image(img, camera, aspect)
mask = 255 * np.ones((img.shape[0], img.shape[1]), np.uint8)
_, mask_warped = self.warp_image(mask, camera, aspect, mask=True)
return img_warped, mask_warped, corner
def warp_image(self, img, camera, aspect=1):
warper = cv.PyRotationWarper(self.warper_type, self.scale*aspect)
_, warped_image = warper.warp(img,
Warper.get_K(camera, aspect),
camera.R,
cv.INTER_LINEAR,
cv.BORDER_REFLECT)
return warped_image
def warp_image(self, image, camera, aspect=1, mask=False):
if mask:
interp_mode = cv.INTER_NEAREST
border_mode = cv.BORDER_CONSTANT
else:
interp_mode = cv.INTER_LINEAR
border_mode = cv.BORDER_REFLECT
def create_and_warp_masks(self, sizes, cameras, aspect=1):
for size, camera in zip(sizes, cameras):
yield self.create_and_warp_mask(size, camera, aspect)
corner, warped_image = self.warper.warp(image,
Warper.get_K(camera, aspect),
camera.R,
interp_mode,
border_mode)
return corner, warped_image
def create_and_warp_mask(self, size, camera, aspect=1):
warper = cv.PyRotationWarper(self.warper_type, self.scale*aspect)
mask = 255 * np.ones((size[1], size[0]), np.uint8)
_, warped_mask = warper.warp(mask,
Warper.get_K(camera, aspect),
camera.R,
cv.INTER_NEAREST,
cv.BORDER_CONSTANT)
return warped_mask
def warp_roi(self, width, height, camera, scale=None, aspect=1):
self.update_scale(scale)
roi = (width, height)
K = Warper.get_K(camera, aspect)
return self.warper.warpRoi(roi, K, camera.R)
def warp_rois(self, sizes, cameras, aspect=1):
roi_corners = []
roi_sizes = []
for size, camera in zip(sizes, cameras):
roi = self.warp_roi(size, camera, aspect)
roi_corners.append(roi[0:2])
roi_sizes.append(roi[2:4])
return roi_corners, roi_sizes
def update_scale(self, scale):
if scale is not None and scale != self.scale:
self.warper = cv.PyRotationWarper(self.warper_type, scale) # setScale not working: https://docs.opencv.org/5.x/d5/d76/classcv_1_1PyRotationWarper.html#a90b000bb75f95294f9b0b6ec9859eb55
self.scale = scale
def warp_roi(self, size, camera, aspect=1):
warper = cv.PyRotationWarper(self.warper_type, self.scale*aspect)
K = Warper.get_K(camera, aspect)
return self.warper.warpRoi(size, K, camera.R)
@staticmethod
def get_K(camera, aspect=1):

@ -23,6 +23,7 @@ from opencv_stitching.camera_estimator import CameraEstimator
from opencv_stitching.camera_adjuster import CameraAdjuster
from opencv_stitching.camera_wave_corrector import WaveCorrector
from opencv_stitching.warper import Warper
from opencv_stitching.cropper import Cropper
from opencv_stitching.exposure_error_compensator import ExposureErrorCompensator # noqa
from opencv_stitching.seam_finder import SeamFinder
from opencv_stitching.blender import Blender
@ -72,9 +73,7 @@ parser.add_argument(
type=int, dest='range_width'
)
parser.add_argument(
'--try_use_gpu',
action='store',
default=False,
'--try_use_gpu', action='store', default=False,
help="Try to use CUDA. The default value is no. "
"All default values are for CPU mode.",
type=bool, dest='try_use_gpu'
@ -146,6 +145,13 @@ parser.add_argument(
"The default is %s Mpx." % ImageHandler.DEFAULT_LOW_MEGAPIX,
type=float, dest='low_megapix'
)
parser.add_argument(
'--crop', action='store', default=Cropper.DEFAULT_CROP,
help="Crop black borders around images caused by warping using the "
"largest interior rectangle. "
"Default is '%s'." % Cropper.DEFAULT_CROP,
type=bool, dest='crop'
)
parser.add_argument(
'--compensator', action='store',
default=ExposureErrorCompensator.DEFAULT_COMPENSATOR,

@ -119,12 +119,12 @@ if(CV_GCC OR CV_CLANG)
# we want.
add_extra_compiler_option(-Wall)
endif()
add_extra_compiler_option(-Werror=return-type)
add_extra_compiler_option(-Werror=non-virtual-dtor)
add_extra_compiler_option(-Werror=address)
add_extra_compiler_option(-Werror=sequence-point)
add_extra_compiler_option(-Wreturn-type)
add_extra_compiler_option(-Wnon-virtual-dtor)
add_extra_compiler_option(-Waddress)
add_extra_compiler_option(-Wsequence-point)
add_extra_compiler_option(-Wformat)
add_extra_compiler_option(-Werror=format-security -Wformat)
add_extra_compiler_option(-Wformat-security -Wformat)
add_extra_compiler_option(-Wmissing-declarations)
add_extra_compiler_option(-Wmissing-prototypes)
add_extra_compiler_option(-Wstrict-prototypes)
@ -367,6 +367,22 @@ if(NOT OPENCV_SKIP_LINK_AS_NEEDED)
endif()
endif()
# Apply "-Wl,--no-undefined" linker flags: https://github.com/opencv/opencv/pull/21347
if(NOT OPENCV_SKIP_LINK_NO_UNDEFINED)
if(UNIX AND (NOT APPLE OR NOT CMAKE_VERSION VERSION_LESS "3.2"))
set(_option "-Wl,--no-undefined")
set(_saved_CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${_option}") # requires CMake 3.2+ and CMP0056
ocv_check_compiler_flag(CXX "" HAVE_LINK_NO_UNDEFINED)
set(CMAKE_EXE_LINKER_FLAGS "${_saved_CMAKE_EXE_LINKER_FLAGS}")
if(HAVE_LINK_NO_UNDEFINED)
set(OPENCV_EXTRA_EXE_LINKER_FLAGS "${OPENCV_EXTRA_EXE_LINKER_FLAGS} ${_option}")
set(OPENCV_EXTRA_SHARED_LINKER_FLAGS "${OPENCV_EXTRA_SHARED_LINKER_FLAGS} ${_option}")
set(OPENCV_EXTRA_MODULE_LINKER_FLAGS "${OPENCV_EXTRA_MODULE_LINKER_FLAGS} ${_option}")
endif()
endif()
endif()
# combine all "extra" options
if(NOT OPENCV_SKIP_EXTRA_COMPILER_FLAGS)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OPENCV_EXTRA_FLAGS} ${OPENCV_EXTRA_C_FLAGS}")

@ -1,101 +1,38 @@
# The script detects Intel(R) Inference Engine installation
#
# Cache variables:
# INF_ENGINE_RELEASE - a number reflecting IE source interface (linked with OpenVINO release)
#
# Detect parameters:
# 1. Native cmake IE package:
# - environment variable InferenceEngine_DIR is set to location of cmake module
# 2. Custom location:
# - INF_ENGINE_INCLUDE_DIRS - headers search location
# - INF_ENGINE_LIB_DIRS - library search location
# 3. OpenVINO location:
# - environment variable INTEL_OPENVINO_DIR is set to location of OpenVINO installation dir
# - INF_ENGINE_PLATFORM - part of name of library directory representing its platform
# The script detects Intel(R) OpenVINO(TM) runtime installation
#
# Result:
# INF_ENGINE_TARGET - set to name of imported library target representing InferenceEngine
#
# - target ocv.3rdparty.openvino
macro(ocv_ie_find_extra_libraries find_prefix find_suffix)
file(GLOB libraries "${INF_ENGINE_LIB_DIRS}/${find_prefix}inference_engine*${find_suffix}")
foreach(full_path IN LISTS libraries)
get_filename_component(library "${full_path}" NAME_WE)
string(REPLACE "${find_prefix}" "" library "${library}")
if(library STREQUAL "inference_engine" OR library STREQUAL "inference_engined")
# skip
else()
add_library(${library} UNKNOWN IMPORTED)
set_target_properties(${library} PROPERTIES
IMPORTED_LOCATION "${full_path}")
list(APPEND custom_libraries ${library})
endif()
endforeach()
endmacro()
function(add_custom_ie_build _inc _lib _lib_rel _lib_dbg _msg)
if(NOT _inc OR NOT (_lib OR _lib_rel OR _lib_dbg))
if(WITH_OPENVINO)
find_package(OpenVINO QUIET)
if(OpenVINO_FOUND)
message(STATUS "OpenVINO FOUND: ${OpenVINO_VERSION}")
math(EXPR ver "${OpenVINO_VERSION_MAJOR} * 1000000 + ${OpenVINO_VERSION_MINOR} * 10000 + ${OpenVINO_VERSION_PATCH} * 100")
ocv_add_external_target(openvino "" "openvino::runtime" "INF_ENGINE_RELEASE=${ver};HAVE_NGRAPH;HAVE_DNN_NGRAPH;HAVE_INF_ENGINE")
set(HAVE_OPENVINO 1)
return()
endif()
if(NOT _lib)
if(_lib_rel)
set(_lib "${_lib_rel}")
else()
set(_lib "${_lib_dbg}")
endif()
endif()
add_library(inference_engine UNKNOWN IMPORTED)
set_target_properties(inference_engine PROPERTIES
IMPORTED_LOCATION "${_lib}"
IMPORTED_IMPLIB_RELEASE "${_lib_rel}"
IMPORTED_IMPLIB_DEBUG "${_lib_dbg}"
INTERFACE_INCLUDE_DIRECTORIES "${_inc}"
)
endif()
set(custom_libraries "")
set(__prefixes "${CMAKE_FIND_LIBRARY_PREFIXES}")
if(NOT __prefixes)
set(__prefixes "_empty_")
endif()
foreach(find_prefix ${__prefixes})
if(find_prefix STREQUAL "_empty_") # foreach doesn't iterate over empty elements
set(find_prefix "")
endif()
if(NOT DEFINED INFERENCE_ENGINE_FIND_LIBRARY_SUFFIXES) # allow custom override
set(INFERENCE_ENGINE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
if(APPLE)
ocv_list_filterout(INFERENCE_ENGINE_FIND_LIBRARY_SUFFIXES "^.so$") # skip plugins (can't be linked)
endif()
endif()
foreach(find_suffix ${INFERENCE_ENGINE_FIND_LIBRARY_SUFFIXES})
ocv_ie_find_extra_libraries("${find_prefix}" "${find_suffix}")
endforeach()
if(NOT CMAKE_FIND_LIBRARY_SUFFIXES)
ocv_ie_find_extra_libraries("${find_prefix}" "")
endif()
endforeach()
# ======================
if(NOT INF_ENGINE_RELEASE VERSION_GREATER "2018050000")
find_library(INF_ENGINE_OMP_LIBRARY iomp5 PATHS "${INF_ENGINE_OMP_DIR}" NO_DEFAULT_PATH)
if(NOT INF_ENGINE_OMP_LIBRARY)
message(WARNING "OpenMP for IE have not been found. Set INF_ENGINE_OMP_DIR variable if you experience build errors.")
endif()
endif()
if(EXISTS "${INF_ENGINE_OMP_LIBRARY}")
set_target_properties(inference_engine PROPERTIES IMPORTED_LINK_INTERFACE_LIBRARIES "${INF_ENGINE_OMP_LIBRARY}")
if(WITH_OPENVINO)
find_package(OpenVINO QUIET)
if(OpenVINO_FOUND)
message(STATUS "OpenVINO FOUND: ${OpenVINO_VERSION}")
math(EXPR ver "${OpenVINO_VERSION_MAJOR} * 1000000 + ${OpenVINO_VERSION_MINOR} * 10000 + ${OpenVINO_VERSION_PATCH} * 100")
ocv_add_external_target(openvino "" "openvino::runtime" "INF_ENGINE_RELEASE=${ver};HAVE_NGRAPH;HAVE_DNN_NGRAPH;HAVE_INF_ENGINE")
set(HAVE_OPENVINO 1)
return()
endif()
set(INF_ENGINE_VERSION "Unknown" CACHE STRING "")
set(INF_ENGINE_TARGET "inference_engine;${custom_libraries}" PARENT_SCOPE)
message(STATUS "Detected InferenceEngine: ${_msg}")
endfunction()
endif()
# ======================
find_package(InferenceEngine QUIET)
if(InferenceEngine_FOUND)
set(INF_ENGINE_TARGET ${InferenceEngine_LIBRARIES})
set(INF_ENGINE_VERSION "${InferenceEngine_VERSION}" CACHE STRING "")
set(INF_ENGINE_VERSION "${InferenceEngine_VERSION}")
message(STATUS "Detected InferenceEngine: cmake package (${InferenceEngine_VERSION})")
endif()
@ -113,47 +50,19 @@ elseif(DEFINED INF_ENGINE_RELEASE)
endif()
set(INF_ENGINE_RELEASE "${INF_ENGINE_RELEASE_INIT}" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2020.1.0.2 -> 2020010002)")
if(NOT INF_ENGINE_TARGET AND INF_ENGINE_LIB_DIRS AND INF_ENGINE_INCLUDE_DIRS)
find_path(ie_custom_inc "inference_engine.hpp" PATHS "${INF_ENGINE_INCLUDE_DIRS}" NO_DEFAULT_PATH)
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
find_library(ie_custom_lib_dbg "inference_engined" PATHS "${INF_ENGINE_LIB_DIRS}" NO_DEFAULT_PATH) # Win32 and MacOSX
endif()
find_library(ie_custom_lib "inference_engine" PATHS "${INF_ENGINE_LIB_DIRS}" NO_DEFAULT_PATH)
find_library(ie_custom_lib_rel "inference_engine" PATHS "${INF_ENGINE_LIB_DIRS}/Release" NO_DEFAULT_PATH)
find_library(ie_custom_lib_dbg "inference_engine" PATHS "${INF_ENGINE_LIB_DIRS}/Debug" NO_DEFAULT_PATH)
add_custom_ie_build("${ie_custom_inc}" "${ie_custom_lib}" "${ie_custom_lib_rel}" "${ie_custom_lib_dbg}" "INF_ENGINE_{INCLUDE,LIB}_DIRS")
endif()
set(_loc "$ENV{INTEL_OPENVINO_DIR}")
if(NOT _loc AND DEFINED ENV{INTEL_CVSDK_DIR})
set(_loc "$ENV{INTEL_CVSDK_DIR}") # OpenVINO 2018.x
endif()
if(NOT INF_ENGINE_TARGET AND _loc)
if(NOT INF_ENGINE_RELEASE VERSION_GREATER "2018050000")
set(INF_ENGINE_PLATFORM_DEFAULT "ubuntu_16.04")
else()
set(INF_ENGINE_PLATFORM_DEFAULT "")
endif()
set(INF_ENGINE_PLATFORM "${INF_ENGINE_PLATFORM_DEFAULT}" CACHE STRING "InferenceEngine platform (library dir)")
find_path(ie_custom_env_inc "inference_engine.hpp" PATHS "${_loc}/deployment_tools/inference_engine/include" NO_DEFAULT_PATH)
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
find_library(ie_custom_env_lib_dbg "inference_engined" PATHS "${_loc}/deployment_tools/inference_engine/lib/${INF_ENGINE_PLATFORM}/intel64" NO_DEFAULT_PATH)
endif()
find_library(ie_custom_env_lib "inference_engine" PATHS "${_loc}/deployment_tools/inference_engine/lib/${INF_ENGINE_PLATFORM}/intel64" NO_DEFAULT_PATH)
find_library(ie_custom_env_lib_rel "inference_engine" PATHS "${_loc}/deployment_tools/inference_engine/lib/intel64/Release" NO_DEFAULT_PATH)
find_library(ie_custom_env_lib_dbg "inference_engine" PATHS "${_loc}/deployment_tools/inference_engine/lib/intel64/Debug" NO_DEFAULT_PATH)
add_custom_ie_build("${ie_custom_env_inc}" "${ie_custom_env_lib}" "${ie_custom_env_lib_rel}" "${ie_custom_env_lib_dbg}" "OpenVINO (${_loc})")
endif()
set(tgts)
set(defs)
# Add more features to the target
if(INF_ENGINE_TARGET)
set_target_properties(${INF_ENGINE_TARGET} PROPERTIES
INTERFACE_COMPILE_DEFINITIONS "HAVE_INF_ENGINE=1;INF_ENGINE_RELEASE=${INF_ENGINE_RELEASE}"
)
list(APPEND tgts ${INF_ENGINE_TARGET})
list(APPEND defs "INF_ENGINE_RELEASE=${INF_ENGINE_RELEASE}" "HAVE_INF_ENGINE")
endif()
if(WITH_NGRAPH)
if(WITH_NGRAPH OR NOT DEFINED WITH_NGRAPH)
find_package(ngraph QUIET)
if(ngraph_FOUND)
ocv_assert(TARGET ngraph::ngraph)
@ -162,5 +71,9 @@ if(WITH_NGRAPH)
endif()
message(STATUS "Detected ngraph: cmake package (${ngraph_VERSION})")
set(HAVE_NGRAPH ON)
list(APPEND tgts ngraph::ngraph)
list(APPEND defs "HAVE_NGRAPH" "HAVE_DNN_NGRAPH")
endif()
endif()
ocv_add_external_target(openvino "" "${tgts}" "${defs}")

@ -17,8 +17,10 @@ else()
unset(_zlib_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES)
endif()
if(ZLIB_FOUND AND ANDROID)
if(ZLIB_LIBRARIES MATCHES "/usr/lib.*/libz.so$")
if(ZLIB_LIBRARY MATCHES "/usr/lib.*/libz.so$")
set(ZLIB_LIBRARY z)
set(ZLIB_LIBRARIES z)
set(ZLIB_LIBRARY_RELEASE z)
endif()
endif()
endif()

@ -78,10 +78,17 @@ function(ocv_create_plugin module default_name dependency_target dependency_targ
set_target_properties(${OPENCV_PLUGIN_NAME} PROPERTIES PREFIX "${OPENCV_PLUGIN_MODULE_PREFIX}")
endif()
if(APPLE)
set_target_properties(${OPENCV_PLUGIN_NAME} PROPERTIES LINK_FLAGS "-undefined dynamic_lookup")
elseif(WIN32)
# Hack for Windows only, Linux/MacOS uses global symbol table (without exact .so binding)
if(WIN32 OR NOT APPLE)
set(OPENCV_PLUGIN_NO_LINK FALSE CACHE BOOL "")
else()
set(OPENCV_PLUGIN_NO_LINK TRUE CACHE BOOL "")
endif()
if(OPENCV_PLUGIN_NO_LINK)
if(APPLE)
set_target_properties(${OPENCV_PLUGIN_NAME} PROPERTIES LINK_FLAGS "-undefined dynamic_lookup")
endif()
else()
find_package(OpenCV REQUIRED ${module} ${OPENCV_PLUGIN_DEPS})
target_link_libraries(${OPENCV_PLUGIN_NAME} PRIVATE ${OpenCV_LIBRARIES})
endif()

@ -1619,6 +1619,7 @@ function(ocv_add_external_target name inc link def)
endif()
endfunction()
# Returns the first non-interface target
function(ocv_get_imported_target imported interface)
set(__result "${interface}")

@ -8,19 +8,19 @@
| | |
| -: | :- |
| Original Author | Chengrui Wang, Yuantao Feng |
| Compatibility | OpenCV >= 4.5.1 |
| Compatibility | OpenCV >= 4.5.4 |
## Introduction
In this section, we introduce the DNN-based module for face detection and face recognition. Models can be obtained in [Models](#Models). The usage of `FaceDetectorYN` and `FaceRecognizerSF` are presented in [Usage](#Usage).
In this section, we introduce cv::FaceDetectorYN class for face detection and cv::FaceRecognizerSF class for face recognition.
## Models
There are two models (ONNX format) pre-trained and required for this module:
- [Face Detection](https://github.com/ShiqiYu/libfacedetection.train/tree/master/tasks/task1/onnx):
- Size: 337KB
- [Face Detection](https://github.com/opencv/opencv_zoo/tree/master/models/face_detection_yunet):
- Size: 338KB
- Results on WIDER Face Val set: 0.830(easy), 0.824(medium), 0.708(hard)
- [Face Recognition](https://drive.google.com/file/d/1ClK9WiB492c5OZFKveF3XiHCejoOxINW/view?usp=sharing)
- [Face Recognition](https://github.com/opencv/opencv_zoo/tree/master/models/face_recognition_sface)
- Size: 36.9MB
- Results:
@ -32,9 +32,7 @@ There are two models (ONNX format) pre-trained and required for this module:
| AgeDB-30 | 94.90% | 1.202 | 0.277 |
| CFP-FP | 94.80% | 1.253 | 0.212 |
## Usage
### DNNFaceDetector
## Code
@add_toggle_cpp
- **Downloadable code**: Click

@ -476,9 +476,10 @@ OpenCV have own DNN inference module which have own build-in engine, but can als
| `BUILD_PROTOBUF` | _ON_ | Build own copy of _protobuf_. Must be disabled if you want to use external library. |
| `PROTOBUF_UPDATE_FILES` | _OFF_ | Re-generate all .proto files. _protoc_ compiler compatible with used version of _protobuf_ must be installed. |
| `OPENCV_DNN_OPENCL` | _ON_ | Enable built-in OpenCL inference backend. |
| `WITH_INF_ENGINE` | _OFF_ | Enables [Intel Inference Engine (IE)](https://github.com/openvinotoolkit/openvino) backend. Allows to execute networks in IE format (.xml + .bin). Inference Engine must be installed either as part of [OpenVINO toolkit](https://en.wikipedia.org/wiki/OpenVINO), either as a standalone library built from sources. |
| `INF_ENGINE_RELEASE` | _2020040000_ | Defines version of Inference Engine library which is tied to OpenVINO toolkit version. Must be a 10-digit string, e.g. _2020040000_ for OpenVINO 2020.4. |
| `WITH_NGRAPH` | _OFF_ | Enables Intel NGraph library support. This library is part of Inference Engine backend which allows executing arbitrary networks read from files in multiple formats supported by OpenCV: Caffe, TensorFlow, PyTorch, Darknet, etc.. NGraph library must be installed, it is included into Inference Engine. |
| `WITH_INF_ENGINE` | _OFF_ | **Deprecated since OpenVINO 2022.1** Enables [Intel Inference Engine (IE)](https://github.com/openvinotoolkit/openvino) backend. Allows to execute networks in IE format (.xml + .bin). Inference Engine must be installed either as part of [OpenVINO toolkit](https://en.wikipedia.org/wiki/OpenVINO), either as a standalone library built from sources. |
| `INF_ENGINE_RELEASE` | _2020040000_ | **Deprecated since OpenVINO 2022.1** Defines version of Inference Engine library which is tied to OpenVINO toolkit version. Must be a 10-digit string, e.g. _2020040000_ for OpenVINO 2020.4. |
| `WITH_NGRAPH` | _OFF_ | **Deprecated since OpenVINO 2022.1** Enables Intel NGraph library support. This library is part of Inference Engine backend which allows executing arbitrary networks read from files in multiple formats supported by OpenCV: Caffe, TensorFlow, PyTorch, Darknet, etc.. NGraph library must be installed, it is included into Inference Engine. |
| `WITH_OPENVINO` | _OFF_ | Enable Intel OpenVINO Toolkit support. Should be used for OpenVINO>=2022.1 instead of `WITH_INF_ENGINE` and `WITH_NGRAPH`. |
| `OPENCV_DNN_CUDA` | _OFF_ | Enable CUDA backend. [CUDA](https://en.wikipedia.org/wiki/CUDA), CUBLAS and [CUDNN](https://developer.nvidia.com/cudnn) must be installed. |
| `WITH_HALIDE` | _OFF_ | Use experimental [Halide](https://en.wikipedia.org/wiki/Halide_(programming_language)) backend which can generate optimized code for dnn-layers at runtime. Halide must be installed. |
| `WITH_VULKAN` | _OFF_ | Enable experimental [Vulkan](https://en.wikipedia.org/wiki/Vulkan_(API)) backend. Does not require additional dependencies, but can use external Vulkan headers (`VULKAN_INCLUDE_DIRS`). |

@ -25,8 +25,7 @@ namespace cv {
dls::dls(const Mat& opoints, const Mat& ipoints)
{
N = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
N = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
p = Mat(3, N, CV_64F);
z = Mat(3, N, CV_64F);
mn = Mat::zeros(3, 1, CV_64F);
@ -274,7 +273,7 @@ void dls::build_coeff_matrix(const Mat& pp, Mat& Mtilde, Mat& D)
}
void dls::compute_eigenvec(const Mat& Mtilde, Mat& eigenval_real, Mat& eigenval_imag,
Mat& eigenvec_real, Mat& eigenvec_imag)
Mat& eigenvec_real, Mat& eigenvec_imag)
{
#ifdef HAVE_EIGEN
Eigen::MatrixXd Mtilde_eig, zeros_eig;
@ -606,8 +605,8 @@ Mat dls::skewsymm(const Mat * X1)
{
MatConstIterator_<double> it = X1->begin<double>();
return (Mat_<double>(3,3) << 0, -*(it+2), *(it+1),
*(it+2), 0, -*(it+0),
-*(it+1), *(it+0), 0);
*(it+2), 0, -*(it+0),
-*(it+1), *(it+0), 0);
}
Mat dls::rotx(const double t)
@ -658,4 +657,4 @@ bool dls::positive_eigenvalues(const Mat * eigenvalues)
return *(it) > 0 && *(it+1) > 0 && *(it+2) > 0;
}
}
} // namespace cv

@ -9,16 +9,16 @@ namespace cv {
class dls
{
public:
dls(const cv::Mat& opoints, const cv::Mat& ipoints);
dls(const Mat& opoints, const Mat& ipoints);
~dls();
bool compute_pose(cv::Mat& R, cv::Mat& t);
bool compute_pose(Mat& R, Mat& t);
private:
// initialisation
template <typename OpointType, typename IpointType>
void init_points(const cv::Mat& opoints, const cv::Mat& ipoints)
void init_points(const Mat& opoints, const Mat& ipoints)
{
for(int i = 0; i < N; i++)
{
@ -47,33 +47,33 @@ private:
}
// main algorithm
cv::Mat LeftMultVec(const cv::Mat& v);
void run_kernel(const cv::Mat& pp);
void build_coeff_matrix(const cv::Mat& pp, cv::Mat& Mtilde, cv::Mat& D);
void compute_eigenvec(const cv::Mat& Mtilde, cv::Mat& eigenval_real, cv::Mat& eigenval_imag,
cv::Mat& eigenvec_real, cv::Mat& eigenvec_imag);
void fill_coeff(const cv::Mat * D);
Mat LeftMultVec(const Mat& v);
void run_kernel(const Mat& pp);
void build_coeff_matrix(const Mat& pp, Mat& Mtilde, Mat& D);
void compute_eigenvec(const Mat& Mtilde, Mat& eigenval_real, Mat& eigenval_imag,
Mat& eigenvec_real, Mat& eigenvec_imag);
void fill_coeff(const Mat * D);
// useful functions
cv::Mat cayley_LS_M(const std::vector<double>& a, const std::vector<double>& b,
const std::vector<double>& c, const std::vector<double>& u);
cv::Mat Hessian(const double s[]);
cv::Mat cayley2rotbar(const cv::Mat& s);
cv::Mat skewsymm(const cv::Mat * X1);
Mat cayley_LS_M(const std::vector<double>& a, const std::vector<double>& b,
const std::vector<double>& c, const std::vector<double>& u);
Mat Hessian(const double s[]);
Mat cayley2rotbar(const Mat& s);
Mat skewsymm(const Mat * X1);
// extra functions
cv::Mat rotx(const double t);
cv::Mat roty(const double t);
cv::Mat rotz(const double t);
cv::Mat mean(const cv::Mat& M);
bool is_empty(const cv::Mat * v);
bool positive_eigenvalues(const cv::Mat * eigenvalues);
cv::Mat p, z, mn; // object-image points
Mat rotx(const double t);
Mat roty(const double t);
Mat rotz(const double t);
Mat mean(const Mat& M);
bool is_empty(const Mat * v);
bool positive_eigenvalues(const Mat * eigenvalues);
Mat p, z, mn; // object-image points
int N; // number of input points
std::vector<double> f1coeff, f2coeff, f3coeff, cost_; // coefficient for coefficients matrix
std::vector<cv::Mat> C_est_, t_est_; // optimal candidates
cv::Mat C_est__, t_est__; // optimal found solution
std::vector<Mat> C_est_, t_est_; // optimal candidates
Mat C_est__, t_est__; // optimal found solution
double cost__; // optimal found solution
};
@ -736,7 +736,7 @@ public:
{
/*if(isSymmetric(src)) {
// Fall back to OpenCV for a symmetric matrix!
cv::eigen(src, _eigenvalues, _eigenvectors);
eigen(src, _eigenvalues, _eigenvectors);
} else {*/
Mat tmp;
// Convert the given input matrix to double. Is there any way to
@ -768,6 +768,5 @@ public:
Mat eigenvectors() { return _eigenvectors; }
};
}
} // namespace cv
#endif // DLS_H

@ -103,12 +103,12 @@ void drawFrameAxes(InputOutputArray image, InputArray cameraMatrix, InputArray d
CV_Assert(length > 0);
// project axes points
vector<Point3f> axesPoints;
std::vector<Point3f> axesPoints;
axesPoints.push_back(Point3f(0, 0, 0));
axesPoints.push_back(Point3f(length, 0, 0));
axesPoints.push_back(Point3f(0, length, 0));
axesPoints.push_back(Point3f(0, 0, length));
vector<Point2f> imagePoints;
std::vector<Point2f> imagePoints;
projectPoints(axesPoints, rvec, tvec, cameraMatrix, distCoeffs, imagePoints);
// draw axes lines
@ -123,7 +123,7 @@ bool solvePnP( InputArray opoints, InputArray ipoints,
{
CV_INSTRUMENT_REGION();
vector<Mat> rvecs, tvecs;
std::vector<Mat> rvecs, tvecs;
int solutions = solvePnPGeneric(opoints, ipoints, cameraMatrix, distCoeffs, rvecs, tvecs, useExtrinsicGuess, (SolvePnPMethod)flags, rvec, tvec);
if (solutions > 0)
@ -321,8 +321,8 @@ bool solvePnPRansac(InputArray _opoints, InputArray _ipoints,
return false;
}
vector<Point3d> opoints_inliers;
vector<Point2d> ipoints_inliers;
std::vector<Point3d> opoints_inliers;
std::vector<Point2d> ipoints_inliers;
opoints = opoints.reshape(3);
ipoints = ipoints.reshape(2);
opoints.convertTo(opoints_inliers, CV_64F);
@ -472,7 +472,7 @@ int solveP3P( InputArray _opoints, InputArray _ipoints,
else
imgPts = imgPts.reshape(1, 2*imgPts.rows);
vector<double> reproj_errors(solutions);
std::vector<double> reproj_errors(solutions);
for (size_t i = 0; i < reproj_errors.size(); i++)
{
Mat rvec;
@ -753,7 +753,7 @@ static void solvePnPRefine(InputArray _objectPoints, InputArray _imagePoints,
rvec0.convertTo(rvec, CV_64F);
tvec0.convertTo(tvec, CV_64F);
vector<Point2d> ipoints_normalized;
std::vector<Point2d> ipoints_normalized;
undistortPoints(ipoints, ipoints_normalized, cameraMatrix, distCoeffs);
Mat sd = Mat(ipoints_normalized).reshape(1, npoints*2);
Mat objectPoints0 = opoints.reshape(1, npoints);
@ -847,7 +847,7 @@ int solvePnPGeneric( InputArray _opoints, InputArray _ipoints,
Mat cameraMatrix = Mat_<double>(cameraMatrix0);
Mat distCoeffs = Mat_<double>(distCoeffs0);
vector<Mat> vec_rvecs, vec_tvecs;
std::vector<Mat> vec_rvecs, vec_tvecs;
if (flags == SOLVEPNP_EPNP || flags == SOLVEPNP_DLS || flags == SOLVEPNP_UPNP)
{
if (flags == SOLVEPNP_DLS)
@ -872,7 +872,7 @@ int solvePnPGeneric( InputArray _opoints, InputArray _ipoints,
}
else if (flags == SOLVEPNP_P3P || flags == SOLVEPNP_AP3P)
{
vector<Mat> rvecs, tvecs;
std::vector<Mat> rvecs, tvecs;
solveP3P(opoints, ipoints, _cameraMatrix, _distCoeffs, rvecs, tvecs, flags);
vec_rvecs.insert(vec_rvecs.end(), rvecs.begin(), rvecs.end());
vec_tvecs.insert(vec_tvecs.end(), tvecs.begin(), tvecs.end());
@ -1120,7 +1120,7 @@ int solvePnPGeneric( InputArray _opoints, InputArray _ipoints,
for (size_t i = 0; i < vec_rvecs.size(); i++)
{
vector<Point2d> projectedPoints;
std::vector<Point2d> projectedPoints;
projectPoints(objectPoints, vec_rvecs[i], vec_tvecs[i], cameraMatrix, distCoeffs, projectedPoints);
double rmse = norm(Mat(projectedPoints, false), imagePoints, NORM_L2) / sqrt(2*projectedPoints.size());

@ -219,6 +219,12 @@ AsyncArray testAsyncException()
return p.getArrayResult();
}
namespace nested {
CV_WRAP static inline bool testEchoBooleanFunction(bool flag) {
return flag;
}
} // namespace nested
namespace fs {
CV_EXPORTS_W cv::String getCacheDirectoryForDownloads();
} // namespace fs

@ -48,16 +48,19 @@
#include "opencv2/core/types_c.h"
#ifdef __cplusplus
# ifdef _MSC_VER
/* disable warning C4190: 'function' has C-linkage specified, but returns UDT 'typename'
which is incompatible with C
/* disable MSVC warning C4190 / clang-cl -Wreturn-type-c-linkage:
'function' has C-linkage specified, but returns UDT 'typename'
which is incompatible with C
It is OK to disable it because we only extend few plain structures with
C++ constructors for simpler interoperability with C++ API of the library
*/
# pragma warning(disable:4190)
# elif defined __clang__ && __clang_major__ >= 3
# if defined(__clang__)
// handle clang on Linux and clang-cl (i. e. clang on Windows) first
# pragma GCC diagnostic ignored "-Wreturn-type-c-linkage"
# elif defined(_MSC_VER)
// then handle MSVC
# pragma warning(disable:4190)
# endif
#endif

@ -924,7 +924,7 @@ public:
INTERPROCESS = 0x04 /**< Event is suitable for interprocess use. DisableTiming must be set */
};
CV_WRAP explicit Event(Event::CreateFlags flags = Event::CreateFlags::DEFAULT);
CV_WRAP explicit Event(const Event::CreateFlags flags = Event::CreateFlags::DEFAULT);
//! records an event
CV_WRAP void record(Stream& stream = Stream::Null());
@ -946,6 +946,7 @@ private:
friend struct EventAccessor;
};
CV_ENUM_FLAGS(Event::CreateFlags)
//! @} cudacore_struct

@ -444,7 +444,16 @@ CV_EXPORTS InputOutputArray noArray();
/////////////////////////////////// MatAllocator //////////////////////////////////////
//! Usage flags for allocator
/** @brief Usage flags for allocator
@warning All flags except `USAGE_DEFAULT` are experimental.
@warning For the OpenCL allocator, `USAGE_ALLOCATE_SHARED_MEMORY` depends on
OpenCV's optional, experimental integration with OpenCL SVM. To enable this
integration, build OpenCV using the `WITH_OPENCL_SVM=ON` CMake option and, at
runtime, call `cv::ocl::Context::getDefault().setUseSVM(true);` or similar
code. Note that SVM is incompatible with OpenCL 1.x.
*/
enum UMatUsageFlags
{
USAGE_DEFAULT = 0,
@ -2076,7 +2085,7 @@ public:
Mat_<Pixel> image = Mat::zeros(3, sizes, CV_8UC3);
image.forEach<Pixel>([&](Pixel& pixel, const int position[]) -> void {
image.forEach<Pixel>([](Pixel& pixel, const int position[]) -> void {
pixel.x = position[0];
pixel.y = position[1];
pixel.z = position[2];

@ -309,8 +309,8 @@ public:
READ = 0, //!< value, open the file for reading
WRITE = 1, //!< value, open the file for writing
APPEND = 2, //!< value, open the file for appending
MEMORY = 4, //!< flag, read data from source or write data to the internal buffer (which is
//!< returned by FileStorage::release)
MEMORY = 4, /**< flag, read data from source or write data to the internal buffer (which is
returned by FileStorage::release) */
FORMAT_MASK = (7<<3), //!< mask for format flags
FORMAT_AUTO = 0, //!< flag, auto format
FORMAT_XML = (1<<3), //!< flag, XML format

@ -0,0 +1,29 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_CORE_FP_CONTROL_UTILS_PRIVATE_HPP
#define OPENCV_CORE_FP_CONTROL_UTILS_PRIVATE_HPP
#include "fp_control_utils.hpp"
#if OPENCV_SUPPORTS_FP_DENORMALS_HINT == 0
// disabled
#elif defined(OPENCV_IMPL_FP_HINTS)
// custom
#elif defined(OPENCV_IMPL_FP_HINTS_X86)
// custom
#elif defined(__SSE__) || defined(__SSE2__) || defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
#include <xmmintrin.h>
#define OPENCV_IMPL_FP_HINTS_X86 1
#define OPENCV_IMPL_FP_HINTS 1
#endif
#ifndef OPENCV_IMPL_FP_HINTS
#define OPENCV_IMPL_FP_HINTS 0
#endif
#ifndef OPENCV_IMPL_FP_HINTS_X86
#define OPENCV_IMPL_FP_HINTS_X86 0
#endif
#endif // OPENCV_CORE_FP_CONTROL_UTILS_PRIVATE_HPP

@ -0,0 +1,69 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_CORE_FP_CONTROL_UTILS_HPP
#define OPENCV_CORE_FP_CONTROL_UTILS_HPP
namespace cv {
namespace details {
struct FPDenormalsModeState
{
uint32_t reserved[16]; // 64-bytes
}; // FPDenormalsModeState
CV_EXPORTS void setFPDenormalsIgnoreHint(bool ignore, CV_OUT FPDenormalsModeState& state);
CV_EXPORTS int saveFPDenormalsState(CV_OUT FPDenormalsModeState& state);
CV_EXPORTS bool restoreFPDenormalsState(const FPDenormalsModeState& state);
class FPDenormalsIgnoreHintScope
{
public:
inline explicit FPDenormalsIgnoreHintScope(bool ignore = true)
{
details::setFPDenormalsIgnoreHint(ignore, saved_state);
}
inline explicit FPDenormalsIgnoreHintScope(const FPDenormalsModeState& state)
{
details::saveFPDenormalsState(saved_state);
details::restoreFPDenormalsState(state);
}
inline ~FPDenormalsIgnoreHintScope()
{
details::restoreFPDenormalsState(saved_state);
}
protected:
FPDenormalsModeState saved_state;
}; // FPDenormalsIgnoreHintScope
class FPDenormalsIgnoreHintScopeNOOP
{
public:
inline FPDenormalsIgnoreHintScopeNOOP(bool ignore = true) { CV_UNUSED(ignore); }
inline FPDenormalsIgnoreHintScopeNOOP(const FPDenormalsModeState& state) { CV_UNUSED(state); }
inline ~FPDenormalsIgnoreHintScopeNOOP() { }
}; // FPDenormalsIgnoreHintScopeNOOP
} // namespace details
// Should depend on target compilation architecture only
// Note: previously added archs should NOT be removed to preserve ABI compatibility
#if defined(OPENCV_SUPPORTS_FP_DENORMALS_HINT)
// preserve configuration overloading through ports
#elif defined(__i386__) || defined(__x86_64__) || defined(_M_X64) || defined(_X86_)
typedef details::FPDenormalsIgnoreHintScope FPDenormalsIgnoreHintScope;
#define OPENCV_SUPPORTS_FP_DENORMALS_HINT 1
#else
#define OPENCV_SUPPORTS_FP_DENORMALS_HINT 0
typedef details::FPDenormalsIgnoreHintScopeNOOP FPDenormalsIgnoreHintScope;
#endif
} // namespace cv
#endif // OPENCV_CORE_FP_CONTROL_UTILS_HPP

@ -684,7 +684,8 @@ VSX_IMPL_LOAD_L8(vec_double2, double)
#endif
// absolute difference
#ifndef vec_absd
#ifndef _ARCH_PWR9
# undef vec_absd
# define vec_absd(a, b) vec_sub(vec_max(a, b), vec_min(a, b))
#endif

@ -23,7 +23,7 @@ PERF_TEST_P(Size_MatType_ROp, reduceR,
int reduceOp = get<2>(GetParam());
int ddepth = -1;
if( CV_MAT_DEPTH(matType) < CV_32S && (reduceOp == CV_REDUCE_SUM || reduceOp == CV_REDUCE_AVG) )
if( CV_MAT_DEPTH(matType) < CV_32S && (reduceOp == REDUCE_SUM || reduceOp == REDUCE_AVG) )
ddepth = CV_32S;
Mat src(sz, matType);
@ -51,7 +51,7 @@ PERF_TEST_P(Size_MatType_ROp, reduceC,
int reduceOp = get<2>(GetParam());
int ddepth = -1;
if( CV_MAT_DEPTH(matType)< CV_32S && (reduceOp == CV_REDUCE_SUM || reduceOp == CV_REDUCE_AVG) )
if( CV_MAT_DEPTH(matType)< CV_32S && (reduceOp == REDUCE_SUM || reduceOp == REDUCE_AVG) )
ddepth = CV_32S;
Mat src(sz, matType);

@ -184,11 +184,8 @@ void cv::cuda::GpuMat::create(int _rows, int _cols, int _type)
if (esz * cols == step)
flags |= Mat::CONTINUOUS_FLAG;
int64 _nettosize = static_cast<int64>(step) * rows;
size_t nettosize = static_cast<size_t>(_nettosize);
datastart = data;
dataend = data + nettosize;
dataend = data + step * (rows - 1) + cols * esz;
if (refcount)
*refcount = 1;

@ -811,7 +811,7 @@ Event cv::cuda::EventAccessor::wrapEvent(cudaEvent_t event)
#endif
cv::cuda::Event::Event(CreateFlags flags)
cv::cuda::Event::Event(const Event::CreateFlags flags)
{
#ifndef HAVE_CUDA
CV_UNUSED(flags);

@ -64,6 +64,16 @@
#define HAL_LU_SMALL_MATRIX_THRESH 100
#define HAL_CHOLESKY_SMALL_MATRIX_THRESH 100
#if defined(__clang__) && defined(__has_feature)
#if __has_feature(memory_sanitizer)
#define CV_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
__msan_unpoison(adresse, size)
#endif
#endif
#ifndef CV_ANNOTATE_MEMORY_IS_INITIALIZED
#define CV_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) do { } while(0)
#endif
//lapack stores matrices in column-major order so transposing is needed everywhere
template <typename fptype> static inline void
transpose_square_inplace(fptype *src, size_t src_ld, size_t m)
@ -248,6 +258,17 @@ lapack_SVD(fptype* a, size_t a_step, fptype *w, fptype* u, size_t u_step, fptype
OCV_LAPACK_FUNC(dgesdd)(mode, &m, &n, (double*)a, &lda, (double*)w, (double*)u, &ldu,
(double*)vt, &ldv, (double*)&buffer[0], &lwork, &iworkBuf[0], info);
// Make sure MSAN sees the memory as having been written.
// MSAN does not think it has been written because a different language was called.
CV_ANNOTATE_MEMORY_IS_INITIALIZED(a, a_step * n);
CV_ANNOTATE_MEMORY_IS_INITIALIZED(buffer, sizeof(fptype) * (lwork + 1));
if (u)
CV_ANNOTATE_MEMORY_IS_INITIALIZED(u, u_step * m);
if (vt)
CV_ANNOTATE_MEMORY_IS_INITIALIZED(vt, v_step * n);
if (w)
CV_ANNOTATE_MEMORY_IS_INITIALIZED(w, sizeof(fptype) * std::min(m, n));
if(!(flags & CV_HAL_SVD_NO_UV))
transpose_square_inplace(vt, ldv, n);
@ -359,6 +380,7 @@ lapack_QR(fptype* a, size_t a_step, int m, int n, int k, fptype* b, size_t b_ste
dgeqrf_(&m, &n, (double*)tmpA, &ldtmpA, (double*)dst, (double*)buffer, &lwork, info);
}
CV_ANNOTATE_MEMORY_IS_INITIALIZED(info, sizeof(int));
if (m == n)
transpose_square_inplace(a, lda, m);
else

@ -240,7 +240,7 @@ double cv::kmeans( InputArray _data, int K,
attempts = std::max(attempts, 1);
CV_Assert( data0.dims <= 2 && type == CV_32F && K > 0 );
CV_CheckGE(N, K, "Number of clusters should be more than number of elements");
CV_CheckGE(N, K, "There can't be more clusters than elements");
Mat data(N, dims, CV_32F, data0.ptr(), isrow ? dims * sizeof(float) : static_cast<size_t>(data0.step));

@ -804,7 +804,7 @@ void calcCovarMatrix( InputArray _src, OutputArray _covar, InputOutputArray _mea
else
{
ctype = std::max(CV_MAT_DEPTH(ctype >= 0 ? ctype : type), CV_32F);
reduce( _src, _mean, takeRows ? 0 : 1, CV_REDUCE_AVG, ctype );
reduce( _src, _mean, takeRows ? 0 : 1, REDUCE_AVG, ctype );
mean = _mean.getMat();
}

@ -176,27 +176,23 @@ public:
}
};
namespace
static
MatAllocator*& getDefaultAllocatorMatRef()
{
MatAllocator* volatile g_matAllocator = NULL;
static MatAllocator* g_matAllocator = Mat::getStdAllocator();
return g_matAllocator;
}
MatAllocator* Mat::getDefaultAllocator()
{
if (g_matAllocator == NULL)
{
cv::AutoLock lock(cv::getInitializationMutex());
if (g_matAllocator == NULL)
{
g_matAllocator = getStdAllocator();
}
}
return g_matAllocator;
return getDefaultAllocatorMatRef();
}
void Mat::setDefaultAllocator(MatAllocator* allocator)
{
g_matAllocator = allocator;
getDefaultAllocatorMatRef() = allocator;
}
MatAllocator* Mat::getStdAllocator()
{
CV_SINGLETON_LAZY_INIT(MatAllocator, new StdMatAllocator())
@ -269,7 +265,7 @@ void setSize( Mat& m, int _dims, const int* _sz, const size_t* _steps, bool auto
else if( autoSteps )
{
m.step.p[i] = total;
int64 total1 = (int64)total*s;
uint64 total1 = (uint64)total*s;
if( (uint64)total1 != (size_t)total1 )
CV_Error( CV_StsOutOfRange, "The total matrix size does not fit to \"size_t\" type" );
total = (size_t)total1;

@ -616,7 +616,7 @@ static bool ocl_reduce(InputArray _src, OutputArray _dst,
if (!doubleSupport && (sdepth == CV_64F || ddepth == CV_64F))
return false;
if (op == CV_REDUCE_AVG)
if (op == REDUCE_AVG)
{
if (sdepth < CV_32S && ddepth < CV_32S)
ddepth = CV_32S;
@ -654,7 +654,7 @@ static bool ocl_reduce(InputArray _src, OutputArray _dst,
_dst.create(dsize, dtype);
UMat dst = _dst.getUMat();
if (op0 == CV_REDUCE_AVG)
if (op0 == REDUCE_AVG)
k.args(ocl::KernelArg::ReadOnly(src),
ocl::KernelArg::WriteOnlyNoSize(dst), 1.0f / src.cols);
else
@ -690,7 +690,7 @@ static bool ocl_reduce(InputArray _src, OutputArray _dst,
ocl::KernelArg srcarg = ocl::KernelArg::ReadOnly(src),
temparg = ocl::KernelArg::WriteOnlyNoSize(dst);
if (op0 == CV_REDUCE_AVG)
if (op0 == REDUCE_AVG)
k.args(srcarg, temparg, 1.0f / (dim == 0 ? src.rows : src.cols));
else
k.args(srcarg, temparg);
@ -717,8 +717,8 @@ void cv::reduce(InputArray _src, OutputArray _dst, int dim, int op, int dtype)
int ddepth = CV_MAT_DEPTH(dtype);
CV_Assert( cn == CV_MAT_CN(dtype) );
CV_Assert( op == CV_REDUCE_SUM || op == CV_REDUCE_MAX ||
op == CV_REDUCE_MIN || op == CV_REDUCE_AVG );
CV_Assert( op == REDUCE_SUM || op == REDUCE_MAX ||
op == REDUCE_MIN || op == REDUCE_AVG );
CV_OCL_RUN(_dst.isUMat(),
ocl_reduce(_src, _dst, dim, op, op0, stype, dtype))
@ -732,9 +732,9 @@ void cv::reduce(InputArray _src, OutputArray _dst, int dim, int op, int dtype)
_dst.create(dim == 0 ? 1 : src.rows, dim == 0 ? src.cols : 1, dtype);
Mat dst = _dst.getMat(), temp = dst;
if( op == CV_REDUCE_AVG )
if( op == REDUCE_AVG )
{
op = CV_REDUCE_SUM;
op = REDUCE_SUM;
if( sdepth < CV_32S && ddepth < CV_32S )
{
temp.create(dst.rows, dst.cols, CV_32SC(cn));
@ -745,7 +745,7 @@ void cv::reduce(InputArray _src, OutputArray _dst, int dim, int op, int dtype)
ReduceFunc func = 0;
if( dim == 0 )
{
if( op == CV_REDUCE_SUM )
if( op == REDUCE_SUM )
{
if(sdepth == CV_8U && ddepth == CV_32S)
func = GET_OPTIMIZED(reduceSumR8u32s);
@ -768,7 +768,7 @@ void cv::reduce(InputArray _src, OutputArray _dst, int dim, int op, int dtype)
else if(sdepth == CV_64F && ddepth == CV_64F)
func = reduceSumR64f64f;
}
else if(op == CV_REDUCE_MAX)
else if(op == REDUCE_MAX)
{
if(sdepth == CV_8U && ddepth == CV_8U)
func = GET_OPTIMIZED(reduceMaxR8u);
@ -781,7 +781,7 @@ void cv::reduce(InputArray _src, OutputArray _dst, int dim, int op, int dtype)
else if(sdepth == CV_64F && ddepth == CV_64F)
func = reduceMaxR64f;
}
else if(op == CV_REDUCE_MIN)
else if(op == REDUCE_MIN)
{
if(sdepth == CV_8U && ddepth == CV_8U)
func = GET_OPTIMIZED(reduceMinR8u);
@ -797,7 +797,7 @@ void cv::reduce(InputArray _src, OutputArray _dst, int dim, int op, int dtype)
}
else
{
if(op == CV_REDUCE_SUM)
if(op == REDUCE_SUM)
{
if(sdepth == CV_8U && ddepth == CV_32S)
func = GET_OPTIMIZED(reduceSumC8u32s);
@ -820,7 +820,7 @@ void cv::reduce(InputArray _src, OutputArray _dst, int dim, int op, int dtype)
else if(sdepth == CV_64F && ddepth == CV_64F)
func = reduceSumC64f64f;
}
else if(op == CV_REDUCE_MAX)
else if(op == REDUCE_MAX)
{
if(sdepth == CV_8U && ddepth == CV_8U)
func = GET_OPTIMIZED(reduceMaxC8u);
@ -833,7 +833,7 @@ void cv::reduce(InputArray _src, OutputArray _dst, int dim, int op, int dtype)
else if(sdepth == CV_64F && ddepth == CV_64F)
func = reduceMaxC64f;
}
else if(op == CV_REDUCE_MIN)
else if(op == REDUCE_MIN)
{
if(sdepth == CV_8U && ddepth == CV_8U)
func = GET_OPTIMIZED(reduceMinC8u);
@ -854,7 +854,7 @@ void cv::reduce(InputArray _src, OutputArray _dst, int dim, int op, int dtype)
func( src, temp );
if( op0 == CV_REDUCE_AVG )
if( op0 == REDUCE_AVG )
temp.convertTo(dst, dst.type(), 1./(dim == 0 ? src.rows : src.cols));
}
@ -940,8 +940,8 @@ static bool ipp_sort(const Mat& src, Mat& dst, int flags)
{
CV_INSTRUMENT_REGION_IPP();
bool sortRows = (flags & 1) == CV_SORT_EVERY_ROW;
bool sortDescending = (flags & CV_SORT_DESCENDING) != 0;
bool sortRows = (flags & 1) == SORT_EVERY_ROW;
bool sortDescending = (flags & SORT_DESCENDING) != 0;
bool inplace = (src.data == dst.data);
int depth = src.depth();
IppDataType type = ippiGetDataType(depth);

@ -153,6 +153,9 @@
#include "opencv2/core/detail/exception_ptr.hpp" // CV__EXCEPTION_PTR = 1 if std::exception_ptr is available
#include <opencv2/core/utils/fp_control_utils.hpp>
#include <opencv2/core/utils/fp_control.private.hpp>
using namespace cv;
namespace cv {
@ -203,6 +206,9 @@ namespace {
// propagate main thread state
rng = cv::theRNG();
#if OPENCV_SUPPORTS_FP_DENORMALS_HINT && OPENCV_IMPL_FP_HINTS
details::saveFPDenormalsState(fp_denormals_base_state);
#endif
#ifdef OPENCV_TRACE
traceRootRegion = CV_TRACE_NS::details::getCurrentRegion();
@ -283,6 +289,11 @@ namespace {
}
}
}
#if OPENCV_SUPPORTS_FP_DENORMALS_HINT && OPENCV_IMPL_FP_HINTS
details::FPDenormalsModeState fp_denormals_base_state;
#endif
private:
ParallelLoopBodyWrapperContext(const ParallelLoopBodyWrapperContext&); // disabled
ParallelLoopBodyWrapperContext& operator=(const ParallelLoopBodyWrapperContext&); // disabled
@ -319,6 +330,9 @@ namespace {
// propagate main thread state
cv::theRNG() = ctx.rng;
#if OPENCV_SUPPORTS_FP_DENORMALS_HINT && OPENCV_IMPL_FP_HINTS
FPDenormalsIgnoreHintScope fp_denormals_scope(ctx.fp_denormals_base_state);
#endif
cv::Range r;
cv::Range wholeRange = ctx.wholeRange;

@ -9,6 +9,8 @@
#include <unordered_map>
#include <iterator>
#include <opencv2/core/utils/logger.hpp>
namespace cv
{
@ -499,21 +501,29 @@ bool FileStorage::Impl::open(const char *filename_or_buf, int _flags, const char
if (!isGZ) {
file = fopen(filename.c_str(), !write_mode ? "rt" : !append ? "wt" : "a+t");
if (!file)
{
CV_LOG_ERROR(NULL, "Can't open file: '" << filename << "' in " << (!write_mode ? "read" : !append ? "write" : "append") << " mode");
return false;
}
} else {
#if USE_ZLIB
char mode[] = {write_mode ? 'w' : 'r', 'b', compression ? compression : '3', '\0'};
gzfile = gzopen(filename.c_str(), mode);
if (!gzfile)
{
CV_LOG_ERROR(NULL, "Can't open archive: '" << filename << "' mode=" << mode);
return false;
}
#else
CV_Error(cv::Error::StsNotImplemented, "There is no compressed file storage support in this configuration");
#endif
}
}
// FIXIT release() must do that, use CV_Assert() here instead
roots.clear();
fs_data.clear();
wrap_margin = 71;
fmt = FileStorage::FORMAT_AUTO;
@ -616,14 +626,14 @@ bool FileStorage::Impl::open(const char *filename_or_buf, int _flags, const char
puts("\n");
}
emitter = createXMLEmitter(this);
emitter_do_not_use_direct_dereference = createXMLEmitter(this);
} else if (fmt == FileStorage::FORMAT_YAML) {
if (!append)
puts("%YAML:1.0\n---\n");
else
puts("...\n---\n");
emitter = createYAMLEmitter(this);
emitter_do_not_use_direct_dereference = createYAMLEmitter(this);
} else {
CV_Assert(fmt == FileStorage::FORMAT_JSON);
if (!append)
@ -653,7 +663,7 @@ bool FileStorage::Impl::open(const char *filename_or_buf, int _flags, const char
}
}
write_stack.back().indent = 4;
emitter = createJSONEmitter(this);
emitter_do_not_use_direct_dereference = createJSONEmitter(this);
}
is_opened = true;
} else {
@ -701,20 +711,20 @@ bool FileStorage::Impl::open(const char *filename_or_buf, int _flags, const char
switch (fmt) {
case FileStorage::FORMAT_XML:
parser = createXMLParser(this);
parser_do_not_use_direct_dereference = createXMLParser(this);
break;
case FileStorage::FORMAT_YAML:
parser = createYAMLParser(this);
parser_do_not_use_direct_dereference = createYAMLParser(this);
break;
case FileStorage::FORMAT_JSON:
parser = createJSONParser(this);
parser_do_not_use_direct_dereference = createJSONParser(this);
break;
default:
parser = Ptr<FileStorageParser>();
parser_do_not_use_direct_dereference = Ptr<FileStorageParser>();
}
if (!parser.empty()) {
ok = parser->parse(ptr);
if (!parser_do_not_use_direct_dereference.empty()) {
ok = getParser().parse(ptr);
if (ok) {
finalizeCollection(root_nodes);
@ -728,7 +738,9 @@ bool FileStorage::Impl::open(const char *filename_or_buf, int _flags, const char
}
}
}
catch (...) {
catch (...)
{
// FIXIT log error message
is_opened = true;
release();
throw;
@ -926,7 +938,7 @@ void FileStorage::Impl::endWriteStruct() {
if (fmt == FileStorage::FORMAT_JSON && !FileNode::isFlow(current_struct.flags) && write_stack.size() > 1)
current_struct.indent = write_stack[write_stack.size() - 2].indent;
emitter->endWriteStruct(current_struct);
getEmitter().endWriteStruct(current_struct);
write_stack.pop_back();
if (!write_stack.empty())
@ -945,7 +957,7 @@ void FileStorage::Impl::startWriteStruct_helper(const char *key, int struct_flag
if (type_name && type_name[0] == '\0')
type_name = 0;
FStructData s = emitter->startWriteStruct(write_stack.back(), key, struct_flags, type_name);
FStructData s = getEmitter().startWriteStruct(write_stack.back(), key, struct_flags, type_name);
write_stack.push_back(s);
size_t write_stack_size = write_stack.size();
@ -956,7 +968,7 @@ void FileStorage::Impl::startWriteStruct_helper(const char *key, int struct_flag
flush();
if (fmt == FileStorage::FORMAT_JSON && type_name && type_name[0] && FileNode::isMap(struct_flags)) {
emitter->write("type_id", type_name, false);
getEmitter().write("type_id", type_name, false);
}
}
@ -997,7 +1009,7 @@ void FileStorage::Impl::startWriteStruct(const char *key, int struct_flags,
void FileStorage::Impl::writeComment(const char *comment, bool eol_comment) {
CV_Assert(write_mode);
emitter->writeComment(comment, eol_comment);
getEmitter().writeComment(comment, eol_comment);
}
void FileStorage::Impl::startNextStream() {
@ -1006,7 +1018,7 @@ void FileStorage::Impl::startNextStream() {
while (!write_stack.empty())
endWriteStruct();
flush();
emitter->startNextStream();
getEmitter().startNextStream();
empty_stream = true;
write_stack.push_back(FStructData("", FileNode::EMPTY, 0));
bufofs = 0;
@ -1015,17 +1027,17 @@ void FileStorage::Impl::startNextStream() {
void FileStorage::Impl::write(const String &key, int value) {
CV_Assert(write_mode);
emitter->write(key.c_str(), value);
getEmitter().write(key.c_str(), value);
}
void FileStorage::Impl::write(const String &key, double value) {
CV_Assert(write_mode);
emitter->write(key.c_str(), value);
getEmitter().write(key.c_str(), value);
}
void FileStorage::Impl::write(const String &key, const String &value) {
CV_Assert(write_mode);
emitter->write(key.c_str(), value.c_str(), false);
getEmitter().write(key.c_str(), value.c_str(), false);
}
void FileStorage::Impl::writeRawData(const std::string &dt, const void *_data, size_t len) {
@ -1111,7 +1123,7 @@ void FileStorage::Impl::writeRawData(const std::string &dt, const void *_data, s
return;
}
emitter->writeScalar(0, ptr);
getEmitter().writeScalar(0, ptr);
}
offset = (int) (data - data0);
@ -1597,8 +1609,8 @@ FileStorage::Impl::Base64Decoder::Base64Decoder() {
eos = true;
}
void FileStorage::Impl::Base64Decoder::init(Ptr<FileStorageParser> &_parser, char *_ptr, int _indent) {
parser = _parser;
void FileStorage::Impl::Base64Decoder::init(const Ptr<FileStorageParser> &_parser, char *_ptr, int _indent) {
parser_do_not_use_direct_dereference = _parser;
ptr = _ptr;
indent = _indent;
encoded.clear();
@ -1641,9 +1653,9 @@ bool FileStorage::Impl::Base64Decoder::readMore(int needed) {
decoded.resize(sz);
ofs = 0;
CV_Assert(!parser.empty() && ptr);
CV_Assert(ptr);
char *beg = 0, *end = 0;
bool ok = parser->getBase64Row(ptr, indent, beg, end);
bool ok = getParser().getBase64Row(ptr, indent, beg, end);
ptr = end;
std::copy(beg, end, std::back_inserter(encoded));
totalchars += end - beg;
@ -1730,7 +1742,7 @@ char *FileStorage::Impl::Base64Decoder::getPtr() const { return ptr; }
char *FileStorage::Impl::parseBase64(char *ptr, int indent, FileNode &collection) {
const int BASE64_HDR_SIZE = 24;
char dt[BASE64_HDR_SIZE + 1] = {0};
base64decoder.init(parser, ptr, indent);
base64decoder.init(parser_do_not_use_direct_dereference, ptr, indent);
int i, k;

@ -139,7 +139,7 @@ public:
{
public:
Base64Decoder();
void init(Ptr<FileStorageParser>& _parser, char* _ptr, int _indent);
void init(const Ptr<FileStorageParser>& _parser, char* _ptr, int _indent);
bool readMore(int needed);
@ -155,7 +155,13 @@ public:
char* getPtr() const;
protected:
Ptr<FileStorageParser> parser;
Ptr<FileStorageParser> parser_do_not_use_direct_dereference;
FileStorageParser& getParser() const
{
if (!parser_do_not_use_direct_dereference)
CV_Error(Error::StsNullPtr, "Parser is not available");
return *parser_do_not_use_direct_dereference;
}
char* ptr;
int indent;
std::vector<char> encoded;
@ -205,8 +211,20 @@ public:
std::deque<char> outbuf;
Ptr<FileStorageEmitter> emitter;
Ptr<FileStorageParser> parser;
Ptr<FileStorageEmitter> emitter_do_not_use_direct_dereference;
FileStorageEmitter& getEmitter()
{
if (!emitter_do_not_use_direct_dereference)
CV_Error(Error::StsNullPtr, "Emitter is not available");
return *emitter_do_not_use_direct_dereference;
}
Ptr<FileStorageParser> parser_do_not_use_direct_dereference;
FileStorageParser& getParser() const
{
if (!parser_do_not_use_direct_dereference)
CV_Error(Error::StsNullPtr, "Parser is not available");
return *parser_do_not_use_direct_dereference;
}
Base64Decoder base64decoder;
base64::Base64Writer* base64_writer;
@ -228,4 +246,4 @@ public:
}
#endif
#endif

@ -55,6 +55,9 @@
#include <opencv2/core/utils/filesystem.private.hpp>
#include <opencv2/core/utils/fp_control_utils.hpp>
#include <opencv2/core/utils/fp_control.private.hpp>
#ifndef OPENCV_WITH_THREAD_SANITIZER
#if defined(__clang__) && defined(__has_feature)
#if __has_feature(thread_sanitizer)
@ -630,7 +633,7 @@ struct HWFeatures
}
}
#elif (defined __ppc64__ || defined __PPC64__) && defined __FreeBSD__
unsigned int hwcap = 0;
unsigned long hwcap = 0;
elf_aux_info(AT_HWCAP, &hwcap, sizeof(hwcap));
if (hwcap & PPC_FEATURE_HAS_VSX) {
elf_aux_info(AT_HWCAP2, &hwcap, sizeof(hwcap));
@ -2720,6 +2723,82 @@ void setUseIPP_NotExact(bool flag)
} // namespace ipp
namespace details {
#if OPENCV_IMPL_FP_HINTS_X86
#ifndef _MM_DENORMALS_ZERO_ON // requires pmmintrin.h (SSE3)
#define _MM_DENORMALS_ZERO_ON 0x0040
#endif
#ifndef _MM_DENORMALS_ZERO_MASK // requires pmmintrin.h (SSE3)
#define _MM_DENORMALS_ZERO_MASK 0x0040
#endif
#endif
void setFPDenormalsIgnoreHint(bool ignore, CV_OUT FPDenormalsModeState& state)
{
#if OPENCV_IMPL_FP_HINTS_X86
unsigned mask = _MM_FLUSH_ZERO_MASK;
unsigned value = ignore ? _MM_FLUSH_ZERO_ON : 0;
if (featuresEnabled.have[CPU_SSE3])
{
mask |= _MM_DENORMALS_ZERO_MASK;
value |= ignore ? _MM_DENORMALS_ZERO_ON : 0;
}
const unsigned old_flags = _mm_getcsr();
const unsigned old_value = old_flags & mask;
unsigned flags = (old_flags & ~mask) | value;
CV_LOG_DEBUG(NULL, "core: update FP mxcsr flags = " << cv::format("0x%08x", flags));
// save state
state.reserved[0] = (uint32_t)mask;
state.reserved[1] = (uint32_t)old_value;
_mm_setcsr(flags);
#else
CV_UNUSED(ignore); CV_UNUSED(state);
#endif
}
int saveFPDenormalsState(CV_OUT FPDenormalsModeState& state)
{
#if OPENCV_IMPL_FP_HINTS_X86
unsigned mask = _MM_FLUSH_ZERO_MASK;
if (featuresEnabled.have[CPU_SSE3])
{
mask |= _MM_DENORMALS_ZERO_MASK;
}
const unsigned old_flags = _mm_getcsr();
const unsigned old_value = old_flags & mask;
// save state
state.reserved[0] = (uint32_t)mask;
state.reserved[1] = (uint32_t)old_value;
return 2;
#else
CV_UNUSED(state);
return 0;
#endif
}
bool restoreFPDenormalsState(const FPDenormalsModeState& state)
{
#if OPENCV_IMPL_FP_HINTS_X86
const unsigned mask = (unsigned)state.reserved[0];
CV_DbgAssert(mask != 0); // invalid state (ensure that state is properly saved earlier)
const unsigned value = (unsigned)state.reserved[1];
CV_DbgCheck((int)value, value == (value & mask), "invalid SSE FP state");
const unsigned old_flags = _mm_getcsr();
unsigned flags = (old_flags & ~mask) | value;
CV_LOG_DEBUG(NULL, "core: restore FP mxcsr flags = " << cv::format("0x%08x", flags));
_mm_setcsr(flags);
return true;
#else
CV_UNUSED(state);
return false;
#endif
}
} // namespace details
} // namespace cv
/* End of file. */

@ -606,10 +606,36 @@ void convertToVASurface(VADisplay display, InputArray src, VASurfaceID surface,
if (status != VA_STATUS_SUCCESS)
CV_Error(cv::Error::StsError, "VA-API: vaSyncSurface failed");
bool indirect_buffer = false;
VAImage image;
status = vaDeriveImage(display, surface, &image);
if (status != VA_STATUS_SUCCESS)
CV_Error(cv::Error::StsError, "VA-API: vaDeriveImage failed");
if (status != VA_STATUS_SUCCESS){
//try vaCreateImage + vaPutImage
//pick a format
indirect_buffer = true;
int num_formats = vaMaxNumImageFormats(display);
if (num_formats <= 0)
CV_Error(cv::Error::StsError, "VA-API: vaMaxNumImageFormats failed");
std::vector<VAImageFormat> fmt_list(num_formats);
status = vaQueryImageFormats(display, fmt_list.data(), &num_formats);
if (status != VA_STATUS_SUCCESS)
CV_Error(cv::Error::StsError, "VA-API: vaQueryImageFormats failed");
VAImageFormat *selected_format = nullptr;
for (auto &fmt : fmt_list){
if (fmt.fourcc == VA_FOURCC_NV12 || fmt.fourcc == VA_FOURCC_YV12){
selected_format = &fmt;
break;
}
}
if (selected_format == nullptr)
CV_Error(cv::Error::StsError, "VA-API: vaQueryImageFormats did not return a supported format");
status = vaCreateImage(display, selected_format, size.width, size.height, &image);
if (status != VA_STATUS_SUCCESS)
CV_Error(cv::Error::StsError, "VA-API: vaCreateImage failed");
}
unsigned char* buffer = 0;
status = vaMapBuffer(display, image.buf, (void **)&buffer);
@ -627,6 +653,14 @@ void convertToVASurface(VADisplay display, InputArray src, VASurfaceID surface,
if (status != VA_STATUS_SUCCESS)
CV_Error(cv::Error::StsError, "VA-API: vaUnmapBuffer failed");
if (indirect_buffer){
status = vaPutImage(display, surface, image.image_id, 0, 0, size.width, size.height, 0, 0, size.width, size.height);
if (status != VA_STATUS_SUCCESS){
vaDestroyImage(display, image.image_id);
CV_Error(cv::Error::StsError, "VA-API: vaPutImage failed");
}
}
status = vaDestroyImage(display, image.image_id);
if (status != VA_STATUS_SUCCESS)
CV_Error(cv::Error::StsError, "VA-API: vaDestroyImage failed");
@ -711,8 +745,37 @@ void convertFromVASurface(VADisplay display, VASurfaceID surface, Size size, Out
VAImage image;
status = vaDeriveImage(display, surface, &image);
if (status != VA_STATUS_SUCCESS)
CV_Error(cv::Error::StsError, "VA-API: vaDeriveImage failed");
if (status != VA_STATUS_SUCCESS){
//try vaCreateImage + vaGetImage
//pick a format
int num_formats = vaMaxNumImageFormats(display);
if (num_formats <= 0)
CV_Error(cv::Error::StsError, "VA-API: vaMaxNumImageFormats failed");
std::vector<VAImageFormat> fmt_list(num_formats);
status = vaQueryImageFormats(display, fmt_list.data(), &num_formats);
if (status != VA_STATUS_SUCCESS)
CV_Error(cv::Error::StsError, "VA-API: vaQueryImageFormats failed");
VAImageFormat *selected_format = nullptr;
for (auto &fmt : fmt_list){
if (fmt.fourcc == VA_FOURCC_NV12 || fmt.fourcc == VA_FOURCC_YV12){
selected_format = &fmt;
break;
}
}
if (selected_format == nullptr)
CV_Error(cv::Error::StsError, "VA-API: vaQueryImageFormats did not return a supported format");
status = vaCreateImage(display, selected_format, size.width, size.height, &image);
if (status != VA_STATUS_SUCCESS)
CV_Error(cv::Error::StsError, "VA-API: vaCreateImage failed");
status = vaGetImage(display, surface, 0, 0, size.width, size.height, image.image_id);
if (status != VA_STATUS_SUCCESS){
vaDestroyImage(display, image.image_id);
CV_Error(cv::Error::StsError, "VA-API: vaPutImage failed");
}
}
unsigned char* buffer = 0;
status = vaMapBuffer(display, image.buf, (void **)&buffer);

@ -1819,8 +1819,8 @@ OCL_TEST_P(ReduceSum, Mat)
{
generateTestData();
OCL_OFF(cv::reduce(src_roi, dst_roi, dim, CV_REDUCE_SUM, dtype));
OCL_ON(cv::reduce(usrc_roi, udst_roi, dim, CV_REDUCE_SUM, dtype));
OCL_OFF(cv::reduce(src_roi, dst_roi, dim, REDUCE_SUM, dtype));
OCL_ON(cv::reduce(usrc_roi, udst_roi, dim, REDUCE_SUM, dtype));
double eps = ddepth <= CV_32S ? 1 : 7e-4;
OCL_EXPECT_MATS_NEAR(dst, eps);
@ -1835,8 +1835,8 @@ OCL_TEST_P(ReduceMax, Mat)
{
generateTestData();
OCL_OFF(cv::reduce(src_roi, dst_roi, dim, CV_REDUCE_MAX, dtype));
OCL_ON(cv::reduce(usrc_roi, udst_roi, dim, CV_REDUCE_MAX, dtype));
OCL_OFF(cv::reduce(src_roi, dst_roi, dim, REDUCE_MAX, dtype));
OCL_ON(cv::reduce(usrc_roi, udst_roi, dim, REDUCE_MAX, dtype));
OCL_EXPECT_MATS_NEAR(dst, 0);
}
@ -1850,8 +1850,8 @@ OCL_TEST_P(ReduceMin, Mat)
{
generateTestData();
OCL_OFF(cv::reduce(src_roi, dst_roi, dim, CV_REDUCE_MIN, dtype));
OCL_ON(cv::reduce(usrc_roi, udst_roi, dim, CV_REDUCE_MIN, dtype));
OCL_OFF(cv::reduce(src_roi, dst_roi, dim, REDUCE_MIN, dtype));
OCL_ON(cv::reduce(usrc_roi, udst_roi, dim, REDUCE_MIN, dtype));
OCL_EXPECT_MATS_NEAR(dst, 0);
}
@ -1865,8 +1865,8 @@ OCL_TEST_P(ReduceAvg, Mat)
{
generateTestData();
OCL_OFF(cv::reduce(src_roi, dst_roi, dim, CV_REDUCE_AVG, dtype));
OCL_ON(cv::reduce(usrc_roi, udst_roi, dim, CV_REDUCE_AVG, dtype));
OCL_OFF(cv::reduce(src_roi, dst_roi, dim, REDUCE_AVG, dtype));
OCL_ON(cv::reduce(usrc_roi, udst_roi, dim, REDUCE_AVG, dtype));
double eps = ddepth <= CV_32S ? 1 : 6e-6;
OCL_EXPECT_MATS_NEAR(dst, eps);

@ -1918,5 +1918,29 @@ TEST(Core_InputOutput, FileStorage_16F_json)
test_20279(fs);
}
TEST(Core_InputOutput, FileStorage_invalid_path_regression_21448_YAML)
{
FileStorage fs("invalid_path/test.yaml", cv::FileStorage::WRITE);
EXPECT_FALSE(fs.isOpened());
EXPECT_ANY_THROW(fs.write("K", 1));
fs.release();
}
TEST(Core_InputOutput, FileStorage_invalid_path_regression_21448_XML)
{
FileStorage fs("invalid_path/test.xml", cv::FileStorage::WRITE);
EXPECT_FALSE(fs.isOpened());
EXPECT_ANY_THROW(fs.write("K", 1));
fs.release();
}
TEST(Core_InputOutput, FileStorage_invalid_path_regression_21448_JSON)
{
FileStorage fs("invalid_path/test.json", cv::FileStorage::WRITE);
EXPECT_FALSE(fs.isOpened());
EXPECT_ANY_THROW(fs.write("K", 1));
fs.release();
}
}} // namespace

@ -93,7 +93,7 @@ int Core_ReduceTest::checkOp( const Mat& src, int dstType, int opType, const Mat
{
int srcType = src.type();
bool support = false;
if( opType == CV_REDUCE_SUM || opType == CV_REDUCE_AVG )
if( opType == REDUCE_SUM || opType == REDUCE_AVG )
{
if( srcType == CV_8U && (dstType == CV_32S || dstType == CV_32F || dstType == CV_64F) )
support = true;
@ -106,7 +106,7 @@ int Core_ReduceTest::checkOp( const Mat& src, int dstType, int opType, const Mat
if( srcType == CV_64F && dstType == CV_64F)
support = true;
}
else if( opType == CV_REDUCE_MAX )
else if( opType == REDUCE_MAX )
{
if( srcType == CV_8U && dstType == CV_8U )
support = true;
@ -115,7 +115,7 @@ int Core_ReduceTest::checkOp( const Mat& src, int dstType, int opType, const Mat
if( srcType == CV_64F && dstType == CV_64F )
support = true;
}
else if( opType == CV_REDUCE_MIN )
else if( opType == REDUCE_MIN )
{
if( srcType == CV_8U && dstType == CV_8U)
support = true;
@ -128,7 +128,7 @@ int Core_ReduceTest::checkOp( const Mat& src, int dstType, int opType, const Mat
return cvtest::TS::OK;
double eps = 0.0;
if ( opType == CV_REDUCE_SUM || opType == CV_REDUCE_AVG )
if ( opType == REDUCE_SUM || opType == REDUCE_AVG )
{
if ( dstType == CV_32F )
eps = 1.e-5;
@ -152,10 +152,10 @@ int Core_ReduceTest::checkOp( const Mat& src, int dstType, int opType, const Mat
if( check )
{
char msg[100];
const char* opTypeStr = opType == CV_REDUCE_SUM ? "CV_REDUCE_SUM" :
opType == CV_REDUCE_AVG ? "CV_REDUCE_AVG" :
opType == CV_REDUCE_MAX ? "CV_REDUCE_MAX" :
opType == CV_REDUCE_MIN ? "CV_REDUCE_MIN" : "unknown operation type";
const char* opTypeStr = opType == REDUCE_SUM ? "REDUCE_SUM" :
opType == REDUCE_AVG ? "REDUCE_AVG" :
opType == REDUCE_MAX ? "REDUCE_MAX" :
opType == REDUCE_MIN ? "REDUCE_MIN" : "unknown operation type";
string srcTypeStr, dstTypeStr;
getMatTypeStr( src.type(), srcTypeStr );
getMatTypeStr( dstType, dstTypeStr );
@ -195,19 +195,19 @@ int Core_ReduceTest::checkCase( int srcType, int dstType, int dim, Size sz )
CV_Assert( 0 );
// 1. sum
tempCode = checkOp( src, dstType, CV_REDUCE_SUM, sum, dim );
tempCode = checkOp( src, dstType, REDUCE_SUM, sum, dim );
code = tempCode != cvtest::TS::OK ? tempCode : code;
// 2. avg
tempCode = checkOp( src, dstType, CV_REDUCE_AVG, avg, dim );
tempCode = checkOp( src, dstType, REDUCE_AVG, avg, dim );
code = tempCode != cvtest::TS::OK ? tempCode : code;
// 3. max
tempCode = checkOp( src, dstType, CV_REDUCE_MAX, max, dim );
tempCode = checkOp( src, dstType, REDUCE_MAX, max, dim );
code = tempCode != cvtest::TS::OK ? tempCode : code;
// 4. min
tempCode = checkOp( src, dstType, CV_REDUCE_MIN, min, dim );
tempCode = checkOp( src, dstType, REDUCE_MIN, min, dim );
code = tempCode != cvtest::TS::OK ? tempCode : code;
return code;
@ -315,7 +315,7 @@ TEST(Core_PCA, accuracy)
Mat rBackPrjTestPoints = rPCA.backProject( rPrjTestPoints );
Mat avg(1, sz.width, CV_32FC1 );
cv::reduce( rPoints, avg, 0, CV_REDUCE_AVG );
cv::reduce( rPoints, avg, 0, REDUCE_AVG );
Mat Q = rPoints - repeat( avg, rPoints.rows, 1 ), Qt = Q.t(), eval, evec;
Q = Qt * Q;
Q = Q /(float)rPoints.rows;
@ -1559,10 +1559,10 @@ TEST(Reduce, regression_should_fail_bug_4594)
cv::Mat src = cv::Mat::eye(4, 4, CV_8U);
std::vector<int> dst;
EXPECT_THROW(cv::reduce(src, dst, 0, CV_REDUCE_MIN, CV_32S), cv::Exception);
EXPECT_THROW(cv::reduce(src, dst, 0, CV_REDUCE_MAX, CV_32S), cv::Exception);
EXPECT_NO_THROW(cv::reduce(src, dst, 0, CV_REDUCE_SUM, CV_32S));
EXPECT_NO_THROW(cv::reduce(src, dst, 0, CV_REDUCE_AVG, CV_32S));
EXPECT_THROW(cv::reduce(src, dst, 0, REDUCE_MIN, CV_32S), cv::Exception);
EXPECT_THROW(cv::reduce(src, dst, 0, REDUCE_MAX, CV_32S), cv::Exception);
EXPECT_NO_THROW(cv::reduce(src, dst, 0, REDUCE_SUM, CV_32S));
EXPECT_NO_THROW(cv::reduce(src, dst, 0, REDUCE_AVG, CV_32S));
}
TEST(Mat, push_back_vector)

@ -3023,7 +3023,7 @@ TEST(CovariationMatrixVectorOfMatWithMean, accuracy)
cv::randu(src,cv::Scalar(-128), cv::Scalar(128));
cv::Mat goldMean;
cv::reduce(src,goldMean,0 ,CV_REDUCE_AVG, CV_32F);
cv::reduce(src,goldMean,0 ,REDUCE_AVG, CV_32F);
cv::calcCovarMatrix(src,gold,goldMean,singleMatFlags,CV_32F);

@ -4,6 +4,15 @@
#include "test_precomp.hpp"
#include <cmath>
#include "opencv2/core/utils/logger.hpp"
#include <opencv2/core/utils/fp_control_utils.hpp>
#ifdef CV_CXX11
#include <chrono>
#include <thread>
#endif
namespace opencv_test { namespace {
TEST(Core_OutputArrayCreate, _1997)
@ -243,6 +252,62 @@ TEST(Core_Parallel, propagate_exceptions)
}, cv::Exception);
}
class FPDenormalsHintCheckerParallelLoopBody : public cv::ParallelLoopBody
{
public:
FPDenormalsHintCheckerParallelLoopBody()
: isOK(true)
{
state_values_to_check = cv::details::saveFPDenormalsState(base_state);
}
~FPDenormalsHintCheckerParallelLoopBody() {}
void operator()(const cv::Range& r) const
{
CV_UNUSED(r);
cv::details::FPDenormalsModeState state;
if (cv::details::saveFPDenormalsState(state))
{
for (int i = 0; i < state_values_to_check; ++i)
{
if (base_state.reserved[i] != state.reserved[i])
{
CV_LOG_ERROR(NULL, cv::format("FP state[%d] mismatch: base=0x%08x thread=0x%08x", i, base_state.reserved[i], state.reserved[i]));
isOK = false;
cv::details::restoreFPDenormalsState(base_state);
}
}
}
else
{
// FP state is not supported
// no checks
}
#ifdef CV_CXX11
std::this_thread::sleep_for(std::chrono::milliseconds(100));
#endif
}
cv::details::FPDenormalsModeState base_state;
int state_values_to_check;
mutable bool isOK;
};
TEST(Core_Parallel, propagate_fp_denormals_ignore_hint)
{
int nThreads = std::max(1, cv::getNumThreads()) * 3;
for (int i = 0; i < 4; ++i)
{
SCOPED_TRACE(cv::format("Case=%d: FP denormals ignore hint: %s\n", i, ((i & 1) != 0) ? "enable" : "disable"));
FPDenormalsIgnoreHintScope fp_denormals_scope((i & 1) != 0);
FPDenormalsHintCheckerParallelLoopBody job;
ASSERT_NO_THROW({
parallel_for_(cv::Range(0, nThreads), job);
});
EXPECT_TRUE(job.isOK);
}
}
TEST(Core_Version, consistency)
{
// this test verifies that OpenCV version loaded in runtime

@ -6,9 +6,6 @@
#include "opencv2/ts.hpp"
#include "opencv2/ts/ocl_test.hpp"
#include "opencv2/core/core_c.h"
#include "opencv2/core/cvdef.h"
#include "opencv2/core/private.hpp"
#include "opencv2/core/hal/hal.hpp"

@ -1398,8 +1398,8 @@ TEST(UMat, testTempObjects_Mat_issue_8693)
randu(srcUMat, -1.f, 1.f);
srcUMat.copyTo(srcMat);
reduce(srcUMat, srcUMat, 0, CV_REDUCE_SUM);
reduce(srcMat, srcMat, 0, CV_REDUCE_SUM);
reduce(srcUMat, srcUMat, 0, REDUCE_SUM);
reduce(srcMat, srcMat, 0, REDUCE_SUM);
srcUMat.convertTo(srcUMat, CV_64FC1);
srcMat.convertTo(srcMat, CV_64FC1);

@ -165,24 +165,13 @@ elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
endif()
set(dnn_runtime_libs "")
if(INF_ENGINE_TARGET)
set(use_nn_builder OFF)
if(TARGET inference_engine_nn_builder OR # custom imported target
TARGET IE::inference_engine_nn_builder OR # default imported target via InferenceEngineConfig.cmake
INF_ENGINE_RELEASE VERSION_LESS "2020000000") # compatibility with older versions on IE
set(use_nn_builder ON)
endif()
ocv_option(OPENCV_DNN_IE_NN_BUILDER_2019 "Build with Inference Engine NN Builder API support" ${use_nn_builder}) # future: NOT HAVE_NGRAPH
if(OPENCV_DNN_IE_NN_BUILDER_2019)
message(STATUS "DNN: Enabling Inference Engine NN Builder API support")
add_definitions(-DHAVE_DNN_IE_NN_BUILDER_2019=1)
ocv_option(OPENCV_DNN_OPENVINO "Build with OpenVINO support (2021.4+)" (TARGET ocv.3rdparty.openvino))
if(TARGET ocv.3rdparty.openvino AND OPENCV_DNN_OPENVINO)
if(NOT HAVE_OPENVINO AND NOT HAVE_NGRAPH)
message(FATAL_ERROR "DNN: Inference Engine is not supported without enabled 'nGraph'. Check build configuration.")
endif()
list(APPEND dnn_runtime_libs ${INF_ENGINE_TARGET})
endif()
if(HAVE_NGRAPH)
message(STATUS "DNN: Enabling Inference Engine nGraph API support")
add_definitions(-DHAVE_DNN_NGRAPH)
list(APPEND dnn_runtime_libs ngraph::ngraph)
list(APPEND dnn_runtime_libs ocv.3rdparty.openvino)
endif()
ocv_glob_module_sources(${sources_options} SOURCES ${fw_srcs} ${webnn_srcs})
@ -193,7 +182,7 @@ ocv_add_accuracy_tests(${dnn_runtime_libs})
set(perf_path "${CMAKE_CURRENT_LIST_DIR}/perf")
file(GLOB_RECURSE perf_srcs "${perf_path}/*.cpp")
file(GLOB_RECURSE perf_hdrs "${perf_path}/*.hpp" "${perf_path}/*.h")
ocv_add_perf_tests(${INF_ENGINE_TARGET}
ocv_add_perf_tests(${dnn_runtime_libs}
FILES test_common "${CMAKE_CURRENT_LIST_DIR}/test/test_common.hpp" "${CMAKE_CURRENT_LIST_DIR}/test/test_common.impl.hpp"
FILES Src ${perf_srcs}
FILES Include ${perf_hdrs}

@ -60,13 +60,13 @@ CV__DNN_INLINE_NS_BEGIN
struct CV_EXPORTS_W DictValue
{
DictValue(const DictValue &r);
DictValue(bool i) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = i ? 1 : 0; } //!< Constructs integer scalar
DictValue(int64 i = 0) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = i; } //!< Constructs integer scalar
CV_WRAP DictValue(int i) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = i; } //!< Constructs integer scalar
DictValue(unsigned p) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = p; } //!< Constructs integer scalar
CV_WRAP DictValue(double p) : type(Param::REAL), pd(new AutoBuffer<double,1>) { (*pd)[0] = p; } //!< Constructs floating point scalar
CV_WRAP DictValue(const String &s) : type(Param::STRING), ps(new AutoBuffer<String,1>) { (*ps)[0] = s; } //!< Constructs string scalar
DictValue(const char *s) : type(Param::STRING), ps(new AutoBuffer<String,1>) { (*ps)[0] = s; } //!< @overload
explicit DictValue(bool i) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = i ? 1 : 0; } //!< Constructs integer scalar
explicit DictValue(int64 i = 0) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = i; } //!< Constructs integer scalar
CV_WRAP explicit DictValue(int i) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = i; } //!< Constructs integer scalar
explicit DictValue(unsigned p) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = p; } //!< Constructs integer scalar
CV_WRAP explicit DictValue(double p) : type(Param::REAL), pd(new AutoBuffer<double,1>) { (*pd)[0] = p; } //!< Constructs floating point scalar
CV_WRAP explicit DictValue(const String &s) : type(Param::STRING), ps(new AutoBuffer<String,1>) { (*ps)[0] = s; } //!< Constructs string scalar
explicit DictValue(const char *s) : type(Param::STRING), ps(new AutoBuffer<String,1>) { (*ps)[0] = s; } //!< @overload
template<typename TypeIter>
static DictValue arrayInt(TypeIter begin, int size); //!< Constructs integer array

@ -134,7 +134,7 @@ CV__DNN_INLINE_NS_BEGIN
class BackendNode
{
public:
BackendNode(int backendId);
explicit BackendNode(int backendId);
virtual ~BackendNode(); //!< Virtual destructor to make polymorphism.
@ -277,18 +277,18 @@ CV__DNN_INLINE_NS_BEGIN
* Each layer input and output can be labeled to easily identify them using "%<layer_name%>[.output_name]" notation.
* This method maps label of input blob to its index into input vector.
*/
virtual int inputNameToIndex(String inputName);
virtual int inputNameToIndex(String inputName); // FIXIT const
/** @brief Returns index of output blob in output array.
* @see inputNameToIndex()
*/
CV_WRAP virtual int outputNameToIndex(const String& outputName);
CV_WRAP virtual int outputNameToIndex(const String& outputName); // FIXIT const
/**
* @brief Ask layer if it support specific backend for doing computations.
* @param[in] backendId computation backend identifier.
* @see Backend
*/
virtual bool supportBackend(int backendId);
virtual bool supportBackend(int backendId); // FIXIT const
/**
* @brief Returns Halide backend node.
@ -302,8 +302,6 @@ CV__DNN_INLINE_NS_BEGIN
*/
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs);
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs);
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs, const std::vector<Ptr<BackendNode> >& nodes);
virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> > &inputs);
@ -495,18 +493,29 @@ CV__DNN_INLINE_NS_BEGIN
/** @brief Converts string name of the layer to the integer identifier.
* @returns id of the layer, or -1 if the layer wasn't found.
*/
CV_WRAP int getLayerId(const String &layer);
CV_WRAP int getLayerId(const String &layer) const;
CV_WRAP std::vector<String> getLayerNames() const;
/** @brief Container for strings and integers. */
/** @brief Container for strings and integers.
*
* @deprecated Use getLayerId() with int result.
*/
typedef DictValue LayerId;
/** @brief Returns pointer to layer with specified id or name which the network use. */
CV_WRAP Ptr<Layer> getLayer(LayerId layerId);
CV_WRAP Ptr<Layer> getLayer(int layerId) const;
/** @overload
* @deprecated Use int getLayerId(const String &layer)
*/
CV_WRAP inline Ptr<Layer> getLayer(const String& layerName) const { return getLayer(getLayerId(layerName)); }
/** @overload
* @deprecated to be removed
*/
CV_WRAP Ptr<Layer> getLayer(const LayerId& layerId) const;
/** @brief Returns pointers to input layers of specific layer. */
std::vector<Ptr<Layer> > getLayerInputs(LayerId layerId); // FIXIT: CV_WRAP
std::vector<Ptr<Layer> > getLayerInputs(int layerId) const; // FIXIT: CV_WRAP
/** @brief Connects output of the first layer to input of the second layer.
* @param outPin descriptor of the first layer output.
@ -531,6 +540,18 @@ CV__DNN_INLINE_NS_BEGIN
*/
void connect(int outLayerId, int outNum, int inpLayerId, int inpNum);
/** @brief Registers network output with name
*
* Function may create additional 'Identity' layer.
*
* @param outputName identifier of the output
* @param layerId identifier of the second layer
* @param outputPort number of the second layer input
*
* @returns index of bound layer (the same as layerId or newly created)
*/
int registerOutput(const std::string& outputName, int layerId, int outputPort);
/** @brief Sets outputs names of the network input pseudo layer.
*
* Each net always has special own the network input pseudo layer with id=0.
@ -662,20 +683,26 @@ CV__DNN_INLINE_NS_BEGIN
* @note If shape of the new blob differs from the previous shape,
* then the following forward pass may fail.
*/
CV_WRAP void setParam(LayerId layer, int numParam, const Mat &blob);
CV_WRAP void setParam(int layer, int numParam, const Mat &blob);
CV_WRAP inline void setParam(const String& layerName, int numParam, const Mat &blob) { return setParam(getLayerId(layerName), numParam, blob); }
/** @brief Returns parameter blob of the layer.
* @param layer name or id of the layer.
* @param numParam index of the layer parameter in the Layer::blobs array.
* @see Layer::blobs
*/
CV_WRAP Mat getParam(LayerId layer, int numParam = 0);
CV_WRAP Mat getParam(int layer, int numParam = 0) const;
CV_WRAP inline Mat getParam(const String& layerName, int numParam = 0) const { return getParam(getLayerId(layerName), numParam); }
/** @brief Returns indexes of layers with unconnected outputs.
*
* FIXIT: Rework API to registerOutput() approach, deprecate this call
*/
CV_WRAP std::vector<int> getUnconnectedOutLayers() const;
/** @brief Returns names of layers with unconnected outputs.
*
* FIXIT: Rework API to registerOutput() approach, deprecate this call
*/
CV_WRAP std::vector<String> getUnconnectedOutLayersNames() const;

@ -66,6 +66,9 @@ public:
//! Unregisters registered layer with specified type name. Thread-safe.
static void unregisterLayer(const String &type);
//! Check if layer is registered.
static bool isLayerRegistered(const std::string& type);
/** @brief Creates instance of registered layer.
* @param type type name of creating layer.
* @param params parameters which will be used for layer initialization.

@ -184,7 +184,8 @@ static inline MatShape concat(const MatShape& a, const MatShape& b)
return c;
}
static inline std::string toString(const MatShape& shape, const String& name = "")
template<typename _Tp>
static inline std::string toString(const std::vector<_Tp>& shape, const String& name = "")
{
std::ostringstream ss;
if (!name.empty())
@ -195,11 +196,14 @@ static inline std::string toString(const MatShape& shape, const String& name = "
ss << " ]";
return ss.str();
}
static inline void print(const MatShape& shape, const String& name = "")
template<typename _Tp>
static inline void print(const std::vector<_Tp>& shape, const String& name = "")
{
std::cout << toString(shape, name) << std::endl;
}
static inline std::ostream& operator<<(std::ostream &out, const MatShape& shape)
template<typename _Tp>
static inline std::ostream& operator<<(std::ostream &out, const std::vector<_Tp>& shape)
{
out << toString(shape);
return out;

@ -15,14 +15,18 @@ CV__DNN_INLINE_NS_BEGIN
/* Values for 'OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE' parameter */
/// @deprecated
#define CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API "NN_BUILDER"
/// @deprecated
#define CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH "NGRAPH"
/** @brief Returns Inference Engine internal backend API.
*
* See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros.
*
* Default value is controlled through `OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE` runtime parameter (environment variable).
* `OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE` runtime parameter (environment variable) is ignored since 4.6.0.
*
* @deprecated
*/
CV_EXPORTS_W cv::String getInferenceEngineBackendType();
@ -31,6 +35,8 @@ CV_EXPORTS_W cv::String getInferenceEngineBackendType();
* See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros.
*
* @returns previous value of internal backend API
*
* @deprecated
*/
CV_EXPORTS_W cv::String setInferenceEngineBackendType(const cv::String& newBackendType);

@ -18,8 +18,12 @@
"(long)getFLOPS:(NSArray<IntVector*>*)netInputShapes" : { "getFLOPS" : {"name" : "getFLOPSWithNetInputShapes"} },
"(long)getFLOPS:(int)layerId netInputShape:(IntVector*)netInputShape" : { "getFLOPS" : {"name" : "getFLOPSWithLayerId"} },
"(long)getFLOPS:(int)layerId netInputShapes:(NSArray<IntVector*>*)netInputShapes" : { "getFLOPS" : {"name" : "getFLOPSWithLayerId"} },
"(Layer*)getLayer:(NSString*)layerName" : { "getLayer" : {"name" : "getLayerByName"} },
"(Layer*)getLayer:(DictValue*)layerId" : { "getLayer" : {"name" : "getLayerByDictValue"} },
"(void)getLayersShapes:(IntVector*)netInputShape layersIds:(IntVector*)layersIds inLayersShapes:(NSMutableArray<NSMutableArray<IntVector*>*>*)inLayersShapes outLayersShapes:(NSMutableArray<NSMutableArray<IntVector*>*>*)outLayersShapes" : { "getLayersShapes" : {"name" : "getLayersShapesWithNetInputShape"} },
"(void)getLayersShapes:(NSArray<IntVector*>*)netInputShapes layersIds:(IntVector*)layersIds inLayersShapes:(NSMutableArray<NSMutableArray<IntVector*>*>*)inLayersShapes outLayersShapes:(NSMutableArray<NSMutableArray<IntVector*>*>*)outLayersShapes" : { "getLayersShapes" : {"name" : "getLayersShapesWithNetInputShapes"} }
"(void)getLayersShapes:(NSArray<IntVector*>*)netInputShapes layersIds:(IntVector*)layersIds inLayersShapes:(NSMutableArray<NSMutableArray<IntVector*>*>*)inLayersShapes outLayersShapes:(NSMutableArray<NSMutableArray<IntVector*>*>*)outLayersShapes" : { "getLayersShapes" : {"name" : "getLayersShapesWithNetInputShapes"} },
"(Mat*)getParam:(NSString*)layerName numParam:(int)numParam" : { "getParam" : {"name" : "getParamByName"} },
"(void)setParam:(NSString*)layerName numParam:(int)numParam blob:(Mat*)blob" : { "setParam" : {"name" : "setParamByName"} }
}
},
"type_dict": {

@ -53,6 +53,8 @@
#include "caffe_io.hpp"
#endif
#include <opencv2/core/utils/fp_control_utils.hpp>
namespace cv {
namespace dnn {
CV__DNN_INLINE_NS_BEGIN
@ -88,6 +90,8 @@ MatShape parseBlobShape(const caffe::BlobShape& _input_shape)
class CaffeImporter
{
FPDenormalsIgnoreHintScope fp_denormals_ignore_scope;
caffe::NetParameter net;
caffe::NetParameter netBinary;

@ -51,6 +51,7 @@
#include "darknet_io.hpp"
#include <opencv2/core/utils/fp_control_utils.hpp>
namespace cv {
namespace dnn {
@ -61,6 +62,8 @@ namespace
class DarknetImporter
{
FPDenormalsIgnoreHintScope fp_denormals_ignore_scope;
darknet::NetParameter net;
public:

@ -37,11 +37,8 @@ void skipModelImport(bool skip)
void detail::LayerHandler::addMissing(const std::string& name, const std::string& type)
{
cv::AutoLock lock(getLayerFactoryMutex());
auto& registeredLayers = getLayerFactoryImpl();
// If we didn't add it, but can create it, it's custom and not missing.
if (layers.find(type) == layers.end() && registeredLayers.find(type) != registeredLayers.end())
if (!contains(type) && LayerFactory::isLayerRegistered(type))
{
return;
}
@ -51,17 +48,17 @@ void detail::LayerHandler::addMissing(const std::string& name, const std::string
bool detail::LayerHandler::contains(const std::string& type) const
{
return layers.find(type) != layers.end();
return layers.count(type) != 0;
}
void detail::LayerHandler::printMissing()
void detail::LayerHandler::printMissing() const
{
if (layers.empty())
{
return;
}
std::stringstream ss;
std::ostringstream ss;
ss << "DNN: Not supported types:\n";
for (const auto& type_names : layers)
{

File diff suppressed because it is too large Load Diff

@ -5,8 +5,8 @@
#ifndef __OPENCV_DNN_COMMON_HPP__
#define __OPENCV_DNN_COMMON_HPP__
#include <unordered_set>
#include <unordered_map>
#include <unordered_set>
#include <opencv2/dnn.hpp>
@ -59,7 +59,7 @@ class LayerHandler
public:
void addMissing(const std::string& name, const std::string& type);
bool contains(const std::string& type) const;
void printMissing();
void printMissing() const;
protected:
LayerParams getNotImplementedParams(const std::string& name, const std::string& op);
@ -71,12 +71,12 @@ private:
struct NetImplBase
{
const int networkId; // network global identifier
int networkDumpCounter; // dump counter
mutable int networkDumpCounter; // dump counter
int dumpLevel; // level of information dumps (initialized through OPENCV_DNN_NETWORK_DUMP parameter)
NetImplBase();
std::string getDumpFileNameBase();
std::string getDumpFileNameBase() const;
};
} // namespace detail

@ -108,7 +108,7 @@ bool Subgraph::match(const Ptr<ImportGraphWrapper>& net, int nodeId,
for (int j = 0; j < inputNodes.size(); ++j)
{
if (nodes[inputNodes[j]].empty()) // Unknown input node type.
if (nodes[inputNodes[j]].empty() || node->getInputName(j).empty()) // Unknown input node type.
continue;
nodeId = getInputNodeId(net, node, j);
const Ptr<ImportNodeWrapper> inpNode = net->getNode(nodeId);

@ -330,7 +330,7 @@ public:
InfEngineNgraphNode::InfEngineNgraphNode(std::shared_ptr<ngraph::Node>&& _node)
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), node(std::move(_node)) {}
InfEngineNgraphNode::InfEngineNgraphNode(std::shared_ptr<ngraph::Node>& _node)
InfEngineNgraphNode::InfEngineNgraphNode(const std::shared_ptr<ngraph::Node>& _node)
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), node(_node) {}
InfEngineNgraphNode::InfEngineNgraphNode(const std::vector<Ptr<BackendNode> >& nodes,
@ -379,16 +379,21 @@ InfEngineNgraphNet::InfEngineNgraphNet(detail::NetImplBase& netImpl, InferenceEn
device_name = "CPU";
}
void InfEngineNgraphNet::addOutput(const std::string& name)
void InfEngineNgraphNet::addOutput(const Ptr<InfEngineNgraphNode>& node)
{
requestedOutputs.push_back(name);
CV_Assert(node);
CV_Assert(node->node);
const std::string& name = node->node->get_friendly_name();
requestedOutputs.insert({name, node});
}
void InfEngineNgraphNet::setNodePtr(std::shared_ptr<ngraph::Node>* ptr) {
all_nodes.emplace((*ptr)->get_friendly_name(), ptr);
}
void InfEngineNgraphNet::release() {
void InfEngineNgraphNet::release()
{
// FIXIT release should not be conditional, release ALL
for (auto& node : components.back()) {
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_4)
if (!(ngraph::op::is_parameter(node) || ngraph::op::is_output(node) || ngraph::op::is_constant(node)) ) {
@ -397,7 +402,6 @@ void InfEngineNgraphNet::setNodePtr(std::shared_ptr<ngraph::Node>* ptr) {
#endif
auto it = all_nodes.find(node->get_friendly_name());
if (it != all_nodes.end()) {
unconnectedNodes.erase(*(it->second));
it->second->reset();
all_nodes.erase(it);
}
@ -422,7 +426,8 @@ void InfEngineNgraphNet::dfs(std::shared_ptr<ngraph::Node>& node,
}
}
int InfEngineNgraphNet::getNumComponents() {
int InfEngineNgraphNet::getNumComponents()
{
if (!components.empty()) {
return components.size();
}
@ -445,17 +450,21 @@ int InfEngineNgraphNet::getNumComponents() {
void InfEngineNgraphNet::createNet(Target targetId) {
if (!hasNetOwner)
{
CV_Assert(!unconnectedNodes.empty());
CV_Assert(!requestedOutputs.empty());
ngraph::ResultVector outs;
for (auto& node : unconnectedNodes)
for (auto output_node_it = requestedOutputs.begin(); output_node_it != requestedOutputs.end(); ++output_node_it)
{
auto out = std::make_shared<ngraph::op::Result>(node);
CV_LOG_DEBUG(NULL, "DNN/NGRAPH: Add 'Result' output: " << output_node_it->first);
CV_Assert(output_node_it->second);
auto out = std::make_shared<ngraph::op::Result>(output_node_it->second->node);
outs.push_back(out);
}
CV_Assert_N(!inputs_vec.empty(), !outs.empty());
ngraph_function = std::make_shared<ngraph::Function>(outs, inputs_vec);
int num_comp = getNumComponents();
CV_LOG_DEBUG(NULL, "DNN/IE: number of subgraphs: " << num_comp);
if (num_comp > 1) {
for (int i = num_comp - 1; i >= 0; --i) {
ngraph::ResultVector outputs;
@ -466,6 +475,7 @@ void InfEngineNgraphNet::createNet(Target targetId) {
#else
if (node->is_parameter()) {
#endif
CV_LOG_DEBUG(NULL, "DNN/IE: subgraph[" << i << "]: +input[" << inps.size() << "] = '" << node->get_friendly_name() << "'");
auto parameter = std::dynamic_pointer_cast<ngraph::op::Parameter>(node);
inps.push_back(parameter);
}
@ -474,10 +484,12 @@ void InfEngineNgraphNet::createNet(Target targetId) {
#else
else if (node->is_output()) {
#endif
CV_LOG_DEBUG(NULL, "DNN/IE: subgraph[" << i << "]: +output[" << outputs.size() << "] = '" << node->get_friendly_name() << "'");
auto result = std::dynamic_pointer_cast<ngraph::op::Result>(node);
outputs.push_back(result);
}
}
CV_LOG_DEBUG(NULL, "DNN/IE: subgraph[" << i << ": nodes=" << components.back().size() << " inputs=" << inps.size() << " outputs=" << outputs.size());
isInit = false;
CV_Assert_N(!inps.empty(), !outputs.empty());
ngraph_function = std::make_shared<ngraph::Function>(outputs, inps);
@ -574,7 +586,7 @@ void InfEngineNgraphNet::init(Target targetId)
auto node = ngraph_function->output(i).get_node();
for (size_t j = 0; j < node->get_input_size(); ++j) {
std::string name = node->input_value(j).get_node()->get_friendly_name();
auto iter = std::find(requestedOutputs.begin(), requestedOutputs.end(), name);
auto iter = requestedOutputs.find(name);
if (iter != requestedOutputs.end()) {
requestedOutputs.erase(iter);
cnn.addOutput(name);
@ -582,10 +594,6 @@ void InfEngineNgraphNet::init(Target targetId)
}
}
}
for (const auto& name : requestedOutputs)
{
cnn.addOutput(name);
}
for (const auto& it : cnn.getInputsInfo())
{
@ -630,9 +638,6 @@ ngraph::ParameterVector InfEngineNgraphNet::setInputs(const std::vector<cv::Mat>
return current_inp;
}
void InfEngineNgraphNet::setUnconnectedNodes(Ptr<InfEngineNgraphNode>& node) {
unconnectedNodes.insert(node->node);
}
void InfEngineNgraphNet::initPlugin(InferenceEngine::CNNNetwork& net)
{
@ -732,10 +737,10 @@ void InfEngineNgraphNet::initPlugin(InferenceEngine::CNNNetwork& net)
}
}
}
if (isHetero)
netExec = ie.LoadNetwork(net, "HETERO:" + device_name + ",CPU", config);
else
netExec = ie.LoadNetwork(net, device_name, config);
std::string ieDevice = isHetero ? ("HETERO:" + device_name + ",CPU") : device_name;
CV_LOG_INFO(NULL, "DNN/IE: Calling LoadNetwork(device=" << ieDevice << ")...");
netExec = ie.LoadNetwork(net, ieDevice, config);
}
catch (const std::exception& ex)
{

@ -37,7 +37,7 @@ public:
InfEngineNgraphNet(detail::NetImplBase& netImpl);
InfEngineNgraphNet(detail::NetImplBase& netImpl, InferenceEngine::CNNNetwork& net);
void addOutput(const std::string& name);
void addOutput(const Ptr<InfEngineNgraphNode>& node);
bool isInitialized();
void init(Target targetId);
@ -47,7 +47,6 @@ public:
void initPlugin(InferenceEngine::CNNNetwork& net);
ngraph::ParameterVector setInputs(const std::vector<cv::Mat>& inputs, const std::vector<std::string>& names);
void setUnconnectedNodes(Ptr<InfEngineNgraphNode>& node);
void addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs);
void createNet(Target targetId);
@ -88,8 +87,7 @@ public:
InferenceEngine::CNNNetwork cnn;
bool hasNetOwner;
std::vector<std::string> requestedOutputs;
std::unordered_set<std::shared_ptr<ngraph::Node>> unconnectedNodes;
std::unordered_map<std::string, Ptr<InfEngineNgraphNode> > requestedOutputs;
std::map<std::string, InferenceEngine::TensorDesc> outputsDesc;
};
@ -102,7 +100,7 @@ public:
std::vector<Mat>& internals);
InfEngineNgraphNode(std::shared_ptr<ngraph::Node>&& _node);
InfEngineNgraphNode(std::shared_ptr<ngraph::Node>& _node);
InfEngineNgraphNode(const std::shared_ptr<ngraph::Node>& _node);
void setName(const std::string& name);

@ -170,11 +170,14 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return preferableTarget == DNN_TARGET_CPU || dims == 4;
#endif
return (backendId == DNN_BACKEND_OPENCV) ||
backendId == DNN_BACKEND_CUDA ||
(backendId == DNN_BACKEND_HALIDE && haveHalide()) ||
backendId == DNN_BACKEND_WEBNN ||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && (preferableTarget == DNN_TARGET_CPU || dims == 4));
backendId == DNN_BACKEND_WEBNN;
}
#ifdef HAVE_OPENCL
@ -382,16 +385,6 @@ public:
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
const size_t numChannels = weights_.total();
addConstantData("weights", wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C), ieLayer);
addConstantData("biases", wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C), ieLayer);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE

@ -63,9 +63,12 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
backendId == DNN_BACKEND_CUDA;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -116,32 +119,6 @@ public:
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
std::vector<size_t> dims = input->getDims();
CV_Assert(!dims.empty());
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
if (preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL)
{
ieLayer.setType("Copy");
}
else
{
ieLayer.setType("Split");
ieLayer.getParameters()["axis"] = dims.size() - 1;
ieLayer.getParameters()["out_sizes"] = dims[0];
}
ieLayer.setInputPorts({InferenceEngine::Port(dims)});
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE

@ -113,11 +113,13 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding) || // By channels
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && haveInfEngine() && !padding) ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ||
(backendId == DNN_BACKEND_WEBNN && !padding) ||
(backendId == DNN_BACKEND_VKCOM && haveVulkan() && !padding);
}
@ -343,18 +345,6 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
InferenceEngine::Builder::ConcatLayer ieLayer(name);
ieLayer.setAxis(normalize_axis(axis, input->getDims().size()));
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

@ -34,9 +34,11 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ||
backendId == DNN_BACKEND_WEBNN ||
backendId == DNN_BACKEND_CUDA;
}
@ -78,16 +80,6 @@ public:
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::ConstLayer ieLayer(name);
ieLayer.setData(wrapToInfEngineBlob(blobs[0]));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE

@ -330,7 +330,7 @@ public:
}
#endif
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
bool isArmTarget = preferableTarget == DNN_TARGET_CPU && isArmComputePlugin();
if (isArmTarget && blobs.empty())
@ -340,7 +340,7 @@ public:
if (ksize == 3)
return preferableTarget != DNN_TARGET_MYRIAD && !isArmTarget;
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || !isMyriad) && blobs.empty())
if (!isMyriad && blobs.empty())
return false;
return (!isMyriad || dilation.width == dilation.height);
}
@ -421,7 +421,9 @@ public:
if (!blobs.empty())
{
Mat wm = blobs[0].reshape(1, numOutput);
if( wm.step1() % VEC_ALIGN != 0 )
if ((wm.step1() % VEC_ALIGN != 0) ||
!isAligned<VEC_ALIGN * sizeof(float)>(wm.data)
)
{
int newcols = (int)alignSize(wm.step1(), VEC_ALIGN);
Mat wm_buffer = Mat(numOutput, newcols, wm.type());
@ -759,69 +761,6 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{
CV_Assert(!blobs.empty());
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
std::vector<size_t> dims = input->getDims();
CV_Assert(dims.size() == 4 || dims.size() == 5);
const int inpCn = dims[1];
const int outCn = blobs[0].size[0];
const int inpGroupCn = blobs[0].size[1];
const int group = inpCn / inpGroupCn;
InferenceEngine::Layout layout = (dims.size() == 4) ? InferenceEngine::Layout::OIHW :
InferenceEngine::Layout::NCDHW;
auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
if (fusedWeights)
{
if (weightsMat.isContinuous())
{
Mat cvWeights = weightsMat.reshape(1, blobs[0].dims, blobs[0].size);
ieWeights = wrapToInfEngineBlob(cvWeights, layout);
}
else
{
ieWeights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32,
ieWeights->getTensorDesc().getDims(), layout
});
ieWeights->allocate();
Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, outCn);
Mat cvWeights = weightsMat.colRange(0, newWeights.cols);
cvWeights.copyTo(newWeights);
}
}
InferenceEngine::Blob::Ptr ieBiases;
if (hasBias() || fusedBias)
{
Mat biasesMat({outCn}, CV_32F, &biasvec[0]);
ieBiases = wrapToInfEngineBlob(biasesMat, {(size_t)outCn}, InferenceEngine::Layout::C);
}
InferenceEngine::Builder::ConvolutionLayer ieLayer(name);
ieLayer.setKernel(kernel_size);
ieLayer.setStrides(strides);
ieLayer.setDilation(dilations);
ieLayer.setPaddingsBegin(pads_begin);
ieLayer.setPaddingsEnd(pads_end);
ieLayer.setGroup((size_t)group);
ieLayer.setOutDepth((size_t)outCn);
InferenceEngine::Builder::Layer l = ieLayer;
addConstantData("weights", ieWeights, l);
if (ieBiases)
addConstantData("biases", ieBiases, l);
if (!padMode.empty())
l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper");
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
@ -1660,7 +1599,6 @@ public:
}
}
}
// now compute dot product of the weights
// and im2row-transformed part of the tensor
#if CV_TRY_AVX512_SKX
@ -1995,13 +1933,6 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
#if CV_SSE3
uint32_t ftzMode = _MM_GET_FLUSH_ZERO_MODE();
uint32_t dazMode = _MM_GET_DENORMALS_ZERO_MODE();
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
#endif
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
@ -2138,10 +2069,6 @@ public:
ParallelConv::run(inputs[0], outputs[0], weightsMat, biasvec, reluslope,
kernel_size, strides, pads_begin, pads_end, dilations, activ.get(), ngroups, nstripes);
}
#if CV_SSE3
_MM_SET_FLUSH_ZERO_MODE(ftzMode);
_MM_SET_DENORMALS_ZERO_MODE(dazMode);
#endif
}
#ifdef HAVE_CUDA
@ -2329,52 +2256,6 @@ public:
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
return group == 1;
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
if (kernel_size.size() == 3 && preferableTarget != DNN_TARGET_CPU) {
return false;
}
if (std::accumulate(adjust_pads.begin(), adjust_pads.end(), 0, std::plus<size_t>()) > 0)
{
if (padMode.empty())
{
if (preferableTarget != DNN_TARGET_CPU && group != 1)
{
for (int i = 0; i < adjust_pads.size(); i++) {
if (adjust_pads[i] && pads_begin[i])
return false;
}
}
for (int i = 0; i < adjust_pads.size(); i++) {
if (pads_end[i] < adjust_pads[i])
return false;
}
return true;
}
else if (padMode == "SAME")
{
for (int i = 0; i < adjust_pads.size(); i++) {
if (kernel_size[i] < pads_begin[i] + 1 + adjust_pads[i])
return false;
}
return true;
}
else if (padMode == "VALID")
return false;
}
if (group != 1)
{
return preferableTarget == DNN_TARGET_CPU;
}
if (preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL_FP16)
return std::accumulate(dilations.begin(), dilations.end(), 1, std::multiplies<size_t>()) == 1;
return true;
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#endif // HAVE_INF_ENGINE
{
return backendId == DNN_BACKEND_CUDA ||
@ -3032,64 +2913,6 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &) CV_OVERRIDE
{
CV_Assert(!blobs.empty());
InferenceEngine::Layout layout = blobs[0].dims == 5? InferenceEngine::Layout::NCDHW :
InferenceEngine::Layout::OIHW;
auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
if (fusedWeights)
{
ieWeights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32,
ieWeights->getTensorDesc().getDims(), layout
});
ieWeights->allocate();
int inpCn = blobs[0].size[0];
Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, inpCn);
transpose(weightsMat, newWeights);
}
const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW or OIDHW layout
const int group = numOutput / outGroupCn;
InferenceEngine::Builder::DeconvolutionLayer ieLayer(name);
ieLayer.setKernel(kernel_size);
ieLayer.setStrides(strides);
ieLayer.setDilation(dilations);
ieLayer.setPaddingsBegin(pads_begin);
if (padMode.empty())
{
std::vector<size_t> paddings_end;
for (int i = 0; i < pads_end.size(); i++) {
paddings_end.push_back(pads_end[i] - adjust_pads[i]);
}
ieLayer.setPaddingsEnd(paddings_end);
}
else if (padMode == "SAME")
{
std::vector<size_t> paddings_end;
for (int i = 0; i < pads_begin.size(); i++) {
paddings_end.push_back(kernel_size[i] - pads_begin[i] - 1 - adjust_pads[i]);
}
ieLayer.setPaddingsEnd(paddings_end);
}
ieLayer.setGroup((size_t)group);
ieLayer.setOutDepth((size_t)numOutput);
InferenceEngine::Builder::Layer l = ieLayer;
addConstantData("weights", ieWeights, l);
if (hasBias())
addConstantData("biases", wrapToInfEngineBlob(biasesMat, {(size_t)numOutput}, InferenceEngine::Layout::C), l);
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,

@ -221,7 +221,7 @@ public:
{
return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_CUDA && !_groupByClasses) ||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && !_locPredTransposed && _bboxesNormalized);
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && !_locPredTransposed && _bboxesNormalized);
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -1001,30 +1001,6 @@ public:
}
#endif
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::DetectionOutputLayer ieLayer(name);
ieLayer.setNumClasses(_numClasses);
ieLayer.setShareLocation(_shareLocation);
ieLayer.setBackgroudLabelId(_backgroundLabelId);
ieLayer.setNMSThreshold(_nmsThreshold);
ieLayer.setTopK(_topK > 0 ? _topK : _keepTopK);
ieLayer.setKeepTopK(_keepTopK);
ieLayer.setConfidenceThreshold(_confidenceThreshold);
ieLayer.setVariantEncodedInTarget(_varianceEncodedInTarget);
ieLayer.setCodeType("caffe.PriorBoxParameter." + _codeType);
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(3));
InferenceEngine::Builder::Layer l = ieLayer;
l.getParameters()["eta"] = std::string("1.0");
l.getParameters()["clip"] = _clip;
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE

@ -186,14 +186,6 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::Layer ieLayer = func.initInfEngineBuilderAPI();
ieLayer.setName(this->name);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
@ -341,10 +333,6 @@ struct ReLUFunctor : public BaseFunctor
bool supportBackend(int backendId, int)
{
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
return slope >= 0 || !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
#endif
#ifdef HAVE_DNN_NGRAPH
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
@ -462,13 +450,6 @@ struct ReLUFunctor : public BaseFunctor
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::ReLULayer("").setNegativeSlope(slope);
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
@ -534,11 +515,14 @@ struct ReLU6Functor : public BaseFunctor
bool supportBackend(int backendId, int)
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_HALIDE ||
backendId == DNN_BACKEND_WEBNN ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
backendId == DNN_BACKEND_WEBNN;
}
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
@ -620,12 +604,6 @@ struct ReLU6Functor : public BaseFunctor
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::ClampLayer("").setMinValue(minValue).setMaxValue(maxValue);
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
@ -743,12 +721,6 @@ struct BaseDefaultFunctor : public BaseFunctor
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
CV_Error(Error::StsNotImplemented, "");
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
@ -782,10 +754,13 @@ struct TanHFunctor : public BaseDefaultFunctor<TanHFunctor>
bool supportBackend(int backendId, int)
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_HALIDE ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
backendId == DNN_BACKEND_HALIDE;
}
inline float calculate(float x) const
@ -808,13 +783,6 @@ struct TanHFunctor : public BaseDefaultFunctor<TanHFunctor>
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::TanHLayer("");
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
@ -937,10 +905,13 @@ struct SigmoidFunctor : public BaseDefaultFunctor<SigmoidFunctor>
bool supportBackend(int backendId, int)
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_HALIDE ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
backendId == DNN_BACKEND_HALIDE;
}
inline float calculate(float x) const
@ -963,12 +934,6 @@ struct SigmoidFunctor : public BaseDefaultFunctor<SigmoidFunctor>
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::SigmoidLayer("");
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
@ -992,10 +957,13 @@ struct ELUFunctor : public BaseDefaultFunctor<ELUFunctor>
bool supportBackend(int backendId, int)
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_HALIDE ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
backendId == DNN_BACKEND_HALIDE;
}
inline float calculate(float x) const
@ -1023,13 +991,6 @@ struct ELUFunctor : public BaseDefaultFunctor<ELUFunctor>
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::ELULayer("");
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
@ -1050,8 +1011,8 @@ struct AbsValFunctor : public BaseDefaultFunctor<AbsValFunctor>
bool supportBackend(int backendId, int)
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
@ -1078,12 +1039,6 @@ struct AbsValFunctor : public BaseDefaultFunctor<AbsValFunctor>
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::ReLULayer("").setNegativeSlope(-0.999999f);
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
@ -1930,14 +1885,15 @@ struct PowerFunctor : public BaseFunctor
bool supportBackend(int backendId, int targetId)
{
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
return (targetId != DNN_TARGET_OPENCL && targetId != DNN_TARGET_OPENCL_FP16) || power == 1.0 || power == 0.5;
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
else
#endif
{
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_HALIDE;
}
}
void finalize()
@ -2029,14 +1985,6 @@ struct PowerFunctor : public BaseFunctor
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::PowerLayer("").setPower(power)
.setScale(scale)
.setShift(shift);
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
@ -2189,10 +2137,13 @@ struct ChannelsPReLUFunctor : public BaseFunctor
bool supportBackend(int backendId, int)
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_HALIDE ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
backendId == DNN_BACKEND_HALIDE;
}
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
@ -2282,15 +2233,6 @@ struct ChannelsPReLUFunctor : public BaseFunctor
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
InferenceEngine::Builder::Layer l = InferenceEngine::Builder::PReLULayer("");
const size_t numChannels = scale.total();
addConstantData("weights", wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C), l);
return l;
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)

@ -164,6 +164,11 @@ public:
if (hasVecInput && ELTWISE_CHANNNELS_SAME)
return backendId == DNN_BACKEND_OPENCV;
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return channelsMode == ELTWISE_CHANNNELS_SAME;
#endif
if (backendId == DNN_BACKEND_CUDA)
{
if(channelsModeInput == ELTWISE_CHANNNELS_INPUT_0 || channelsModeInput == ELTWISE_CHANNNELS_INPUT_0_TRUNCATE)
@ -172,9 +177,8 @@ public:
}
return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_HALIDE && op != DIV) || // TODO: not implemented, see PR #15811
((((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (preferableTarget != DNN_TARGET_OPENCL || coeffs.empty()))
|| backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && channelsMode == ELTWISE_CHANNNELS_SAME));
(backendId == DNN_BACKEND_HALIDE && op != DIV) // TODO: not implemented, see PR #15811
;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -837,34 +841,6 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::Builder::EltwiseLayer ieLayer(name);
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
if (op == SUM)
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::SUM);
else if (op == PROD)
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::MUL);
else if (op == DIV)
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::DIV);
else if (op == MAX)
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::MAX);
else if (op == MIN)
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::MIN);
else
CV_Error(Error::StsNotImplemented, "Unsupported eltwise operation");
InferenceEngine::Builder::Layer l = ieLayer;
if (!coeffs.empty())
l.getParameters()["coeff"] = coeffs;
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

@ -72,9 +72,12 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
backendId == DNN_BACKEND_CUDA;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -171,25 +174,10 @@ public:
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
ieLayer.setType("Flatten");
ieLayer.getParameters()["axis"] = (size_t)_startAxis;
ieLayer.getParameters()["end_axis"] = _endAxis; // Do not cast to size_t because it might be negative.
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
std::vector<size_t> dims = ieInpNode->get_shape();

@ -148,12 +148,15 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return axis == 1;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1) ||
(backendId == DNN_BACKEND_WEBNN && axis == 1) ||
(((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && !blobs.empty()) ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && axis == 1);
(backendId == DNN_BACKEND_WEBNN && axis == 1);
}
virtual bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
@ -570,23 +573,6 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::FullyConnectedLayer ieLayer(name);
const int outNum = blobs[0].size[0];
ieLayer.setOutputNum(outNum);
InferenceEngine::Builder::Layer l = ieLayer;
addConstantData("weights", wrapToInfEngineBlob(blobs[0], {(size_t)blobs[0].size[0], (size_t)blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW), l);
if (bias)
addConstantData("biases", wrapToInfEngineBlob(blobs[1], {(size_t)outNum}, InferenceEngine::Layout::C), l);
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

@ -81,6 +81,8 @@ void fastConv( const float* weights, size_t wstep, const float* bias,
int blockSize, int vecsize, int vecsize_aligned,
const float* relu, bool initOutput )
{
CV_Assert(isAligned<32>(weights));
int outCn = outShape[1];
size_t outPlaneSize = outShape[2]*outShape[3];
float r0 = 1.f, r1 = 1.f, r2 = 1.f;

@ -99,12 +99,10 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) {
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return bias == (int)bias;
}
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
return bias == (int)bias;
}
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_HALIDE ||
@ -444,24 +442,6 @@ public:
#endif // HAVE_HALIDE
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
float alphaSize = alpha;
if (!normBySize)
alphaSize *= (type == SPATIAL_NRM ? size*size : size);
InferenceEngine::Builder::NormLayer ieLayer(name);
ieLayer.setSize(size);
ieLayer.setAlpha(alphaSize);
ieLayer.setBeta(beta);
ieLayer.setAcrossMaps(type == CHANNEL_NRM);
InferenceEngine::Builder::Layer l = ieLayer;
l.getParameters()["k"] = bias;
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE

@ -124,14 +124,7 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
return !zeroDev && (!isMyriad || eps <= 1e-7f);
}
#endif
#ifdef HAVE_DNN_NGRAPH
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
@ -387,16 +380,6 @@ public:
}
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::MVNLayer ieLayer(name);
ieLayer.setAcrossChannels(acrossChannels);
ieLayer.setNormalize(normVariance);
ieLayer.setEpsilon(eps);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

@ -70,17 +70,15 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
if (pnorm != 2)
return false;
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && isMyriad)
return !acrossSpatial;
return startAxis == 1;
}
#endif
return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_CUDA && (pnorm == 1 || pnorm == 2));
}
@ -270,58 +268,6 @@ public:
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
std::vector<size_t> dims = input->getDims();
if (dims.size() == 4)
{
InferenceEngine::Builder::NormalizeLayer ieLayer(name);
ieLayer.setChannelShared(false);
ieLayer.setAcrossMaps(acrossSpatial);
ieLayer.setEpsilon(epsilon);
InferenceEngine::Builder::Layer l = ieLayer;
const int numChannels = dims[1];
InferenceEngine::Blob::Ptr weights;
if (blobs.empty())
{
weights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32,
{(size_t)numChannels}, InferenceEngine::Layout::C
});
weights->allocate();
Mat weightsMat = infEngineBlobToMat(weights).reshape(1, numChannels);
Mat(numChannels, 1, CV_32F, Scalar(1)).copyTo(weightsMat);
l.getParameters()["channel_shared"] = false;
}
else
{
CV_Assert(numChannels == blobs[0].total());
weights = wrapToInfEngineBlob(blobs[0], {(size_t)numChannels}, InferenceEngine::Layout::C);
l.getParameters()["channel_shared"] = blobs[0].total() == 1;
}
addConstantData("weights", weights, l);
l.getParameters()["across_spatial"] = acrossSpatial;
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
else
{
InferenceEngine::Builder::GRNLayer ieLayer(name);
ieLayer.setBeta(epsilon);
InferenceEngine::Builder::Layer l = ieLayer;
l.getParameters()["bias"] = epsilon;
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE

@ -87,11 +87,6 @@ public:
CV_Error(Error::StsNotImplemented, msg);
}
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{
CV_Error(Error::StsNotImplemented, msg);
}
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{

@ -102,10 +102,10 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
if (INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) && isMyriad)
if (isMyriad)
return dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0;
return (dstRanges.size() <= 4 || !isArmComputePlugin());
@ -219,30 +219,6 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
ieLayer.setType("Pad");
std::vector<int> begins(paddings.size(), 0), ends(paddings.size(), 0);
for (int i = 0; i < paddings.size(); ++i)
{
begins[i] = paddings[i].first;
ends[i] = paddings[i].second;
}
ieLayer.getParameters()["pads_begin"] = begins;
ieLayer.getParameters()["pads_end"] = ends;
ieLayer.getParameters()["pad_mode"] = paddingType;
if (paddingType == "constant")
ieLayer.getParameters()["pad_value"] = paddingValue;
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

@ -115,13 +115,16 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && preferableTarget == DNN_TARGET_CPU)
return _order.size() <= 4 || !isArmComputePlugin();
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
if (preferableTarget == DNN_TARGET_CPU)
return _order.size() <= 4 || !isArmComputePlugin();
return true;
}
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_WEBNN ||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine()) ||
(backendId == DNN_BACKEND_VKCOM && haveVulkan());
}
@ -418,16 +421,6 @@ public:
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::PermuteLayer ieLayer(name);
ieLayer.setOrder(_order);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE

@ -199,34 +199,13 @@ public:
{
return type == MAX || type == AVE || type == ROI;
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
if (computeMaxIdx)
return false;
if (kernel_size.size() == 3)
return preferableTarget == DNN_TARGET_CPU;
if (kernel_size.size() == 1)
return false;
if (preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL) {
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
if (type == MAX && (pads_begin[1] == 1 && pads_begin[0] == 1) && (strides[0] == 2 && strides[1] == 2)) {
return !isMyriadX();
}
#endif
return type == MAX || type == AVE;
}
else
return type != STOCHASTIC && type != SUM;
}
#endif
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
#ifdef HAVE_DNN_NGRAPH
return !computeMaxIdx && type != STOCHASTIC && kernel_size.size() > 1 && (kernel_size.size() != 3 || !isArmComputePlugin());
#endif
}
else if (backendId == DNN_BACKEND_OPENCV)
#endif
if (backendId == DNN_BACKEND_OPENCV)
{
if (kernel_size.size() == 3)
return preferableTarget == DNN_TARGET_CPU;
@ -550,54 +529,6 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
if (type == MAX || type == AVE)
{
InferenceEngine::Builder::PoolingLayer ieLayer(name);
ieLayer.setKernel(kernel_size);
ieLayer.setStrides(strides);
ieLayer.setPaddingsBegin(pads_begin);
ieLayer.setPaddingsEnd(pads_end);
ieLayer.setPoolingType(type == MAX ?
InferenceEngine::Builder::PoolingLayer::PoolingType::MAX :
InferenceEngine::Builder::PoolingLayer::PoolingType::AVG);
ieLayer.setRoundingType(ceilMode ?
InferenceEngine::Builder::PoolingLayer::RoundingType::CEIL :
InferenceEngine::Builder::PoolingLayer::RoundingType::FLOOR);
ieLayer.setExcludePad(!avePoolPaddedArea);
InferenceEngine::Builder::Layer l = ieLayer;
if (!padMode.empty())
l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper");
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
else if (type == ROI)
{
InferenceEngine::Builder::ROIPoolingLayer ieLayer(name);
ieLayer.setSpatialScale(spatialScale);
ieLayer.setPooled({pooledSize.height, pooledSize.width});
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(2));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
else if (type == PSROI)
{
InferenceEngine::Builder::PSROIPoolingLayer ieLayer(name);
ieLayer.setSpatialScale(spatialScale);
ieLayer.setOutputDim(psRoiOutChannels);
ieLayer.setGroupSize(pooledSize.width);
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(2));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
else
CV_Error(Error::StsNotImplemented, "Unsupported pooling type");
return Ptr<BackendNode>();
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

@ -298,9 +298,7 @@ public:
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && haveInfEngine() &&
( _explicitSizes || (_minSize.size() == 1 && _maxSize.size() <= 1)))
|| (backendId == DNN_BACKEND_VKCOM && haveVulkan());
(backendId == DNN_BACKEND_VKCOM && haveVulkan());
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -510,69 +508,6 @@ public:
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
if (_explicitSizes)
{
InferenceEngine::Builder::PriorBoxClusteredLayer ieLayer(name);
ieLayer.setSteps({_stepY, _stepX});
CV_CheckEQ(_offsetsX.size(), (size_t)1, ""); CV_CheckEQ(_offsetsY.size(), (size_t)1, ""); CV_CheckEQ(_offsetsX[0], _offsetsY[0], "");
ieLayer.setOffset(_offsetsX[0]);
ieLayer.setClip(_clip);
ieLayer.setFlip(false); // We already flipped aspect ratios.
InferenceEngine::Builder::Layer l = ieLayer;
CV_Assert_N(!_boxWidths.empty(), !_boxHeights.empty(), !_variance.empty());
CV_Assert(_boxWidths.size() == _boxHeights.size());
l.getParameters()["width"] = _boxWidths;
l.getParameters()["height"] = _boxHeights;
l.getParameters()["variance"] = _variance;
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
else
{
InferenceEngine::Builder::PriorBoxLayer ieLayer(name);
CV_Assert(!_explicitSizes);
ieLayer.setMinSize(_minSize[0]);
if (!_maxSize.empty())
ieLayer.setMaxSize(_maxSize[0]);
CV_CheckEQ(_offsetsX.size(), (size_t)1, ""); CV_CheckEQ(_offsetsY.size(), (size_t)1, ""); CV_CheckEQ(_offsetsX[0], _offsetsY[0], "");
ieLayer.setOffset(_offsetsX[0]);
ieLayer.setClip(_clip);
ieLayer.setFlip(false); // We already flipped aspect ratios.
InferenceEngine::Builder::Layer l = ieLayer;
if (_stepX == _stepY)
{
l.getParameters()["step"] = _stepX;
l.getParameters()["step_h"] = 0.0f;
l.getParameters()["step_w"] = 0.0f;
}
else
{
l.getParameters()["step"] = 0.0f;
l.getParameters()["step_h"] = _stepY;
l.getParameters()["step_w"] = _stepX;
}
if (!_aspectRatios.empty())
{
l.getParameters()["aspect_ratio"] = _aspectRatios;
}
CV_Assert(!_variance.empty());
l.getParameters()["variance"] = _variance;
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{

@ -96,7 +96,7 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
return !isMyriad;
@ -338,32 +338,6 @@ public:
layerOutputs[0].col(2).copyTo(dst);
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::ProposalLayer ieLayer(name);
ieLayer.setBaseSize(baseSize);
ieLayer.setFeatStride(featStride);
ieLayer.setMinSize(16);
ieLayer.setNMSThresh(nmsThreshold);
ieLayer.setPostNMSTopN(keepTopAfterNMS);
ieLayer.setPreNMSTopN(keepTopBeforeNMS);
std::vector<float> scalesVec(scales.size());
for (int i = 0; i < scales.size(); ++i)
scalesVec[i] = scales.get<float>(i);
ieLayer.setScale(scalesVec);
std::vector<float> ratiosVec(ratios.size());
for (int i = 0; i < ratios.size(); ++i)
ratiosVec[i] = ratios.get<float>(i);
ieLayer.setRatio(ratiosVec);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

@ -184,7 +184,7 @@ public:
CV_Assert(!reverse || !bidirectional);
// read activations
DictValue activations = params.get<DictValue>("activations", "");
DictValue activations = params.get<DictValue>("activations", DictValue(String()));
if (activations.size() == 1) // if activations wasn't specified use default
{
f_activation = sigmoid;

@ -151,10 +151,12 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
backendId == DNN_BACKEND_CUDA;
}
#ifdef HAVE_OPENCL
@ -198,16 +200,6 @@ public:
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::ReorgYoloLayer ieLayer(name);
ieLayer.setStride(reorgStride);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE

@ -202,10 +202,13 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_WEBNN ||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
backendId == DNN_BACKEND_WEBNN;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -306,17 +309,6 @@ public:
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::Builder::ReshapeLayer ieLayer(name);
CV_Assert(outShapes.size() == 1);
ieLayer.setDims(outShapes[0]);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE

@ -78,7 +78,7 @@ public:
return interpolation == "nearest" || interpolation == "bilinear" || interpolation == "opencv_linear";
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
return (interpolation == "nearest" && scaleWidth == scaleHeight) ||
(interpolation == "bilinear");
@ -308,38 +308,6 @@ public:
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
if (interpolation == "nearest")
{
ieLayer.setType("Resample");
ieLayer.getParameters()["type"] = std::string("caffe.ResampleParameter.NEAREST");
ieLayer.getParameters()["antialias"] = false;
if (scaleWidth != scaleHeight)
CV_Error(Error::StsNotImplemented, "resample with sw != sh");
ieLayer.getParameters()["factor"] = 1.0f / scaleWidth;
}
else if (interpolation == "bilinear")
{
ieLayer.setType("Interp");
ieLayer.getParameters()["pad_beg"] = 0;
ieLayer.getParameters()["pad_end"] = 0;
ieLayer.getParameters()["align_corners"] = alignCorners;
}
else
CV_Error(Error::StsNotImplemented, "Unsupported interpolation: " + interpolation);
ieLayer.getParameters()["width"] = outWidth;
ieLayer.getParameters()["height"] = outHeight;
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE

@ -78,11 +78,13 @@ public:
{
return backendId == DNN_BACKEND_OPENCV;
}
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return axis > 0;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_HALIDE ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && axis == 1 && !blobs.empty()) ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && axis > 0) ||
(backendId == DNN_BACKEND_WEBNN && axis >0);
}
@ -314,34 +316,6 @@ public:
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::Layer l = InferenceEngine::Builder::ScaleShiftLayer(name);
CV_Assert(!blobs.empty());
const size_t numChannels = blobs[0].total();
if (hasWeights)
{
addConstantData("weights", wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C), l);
}
else
{
auto weights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32, {(size_t)numChannels},
InferenceEngine::Layout::C
});
weights->allocate();
float* buf = weights->buffer().as<float*>();
std::fill(buf, buf + numChannels, 1);
addConstantData("weights", weights, l);
}
if (hasBias)
addConstantData("biases", wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C), l);
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE

@ -166,12 +166,7 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
sliceRanges.size() == 1 && sliceRanges[0].size() == 4 && !hasSteps;
#endif
#ifdef HAVE_DNN_NGRAPH
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return sliceRanges.size() == 1 && !hasSteps;
#endif
@ -573,64 +568,6 @@ public:
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
CV_Assert_N(finalSliceRanges.size() == 1, inputs.size() <= 2);
std::vector<size_t> axes, offsets, dims;
int from, to, step;
int numDims = finalSliceRanges[0].size();
if (preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL)
{
from = axis;
to = numDims;
step = 1;
}
else
{
from = numDims - 1;
to = axis - 1;
step = -1;
}
for (int i = from; i != to; i += step)
{
axes.push_back(i);
offsets.push_back(finalSliceRanges[0][i].start);
dims.push_back(finalSliceRanges[0][i].size());
}
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
ieLayer.setType("Crop");
ieLayer.getParameters()["axis"] = axes;
ieLayer.getParameters()["dim"] = dims;
ieLayer.getParameters()["offset"] = offsets;
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(2));
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
if (inputs.size() != 2)
{
std::vector<size_t> outShape(numDims);
for (int i = 0; i < numDims; ++i)
outShape[i] = finalSliceRanges[0][i].size();
ieLayer.getInputPorts()[1].setParameter("type", "weights");
auto shapeSource = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32, outShape,
InferenceEngine::Layout::ANY
});
shapeSource->allocate();
addConstantData("weights", shapeSource, ieLayer);
}
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif
#endif
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE

@ -99,6 +99,10 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
#ifdef HAVE_WEBNN
if (backendId == DNN_BACKEND_WEBNN) {
// TODO: support logSoftMax
@ -112,8 +116,6 @@ public:
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1) ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && haveInfEngine() && !logSoftMax) ||
(backendId == DNN_BACKEND_VKCOM && haveVulkan());
}
@ -360,17 +362,6 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
InferenceEngine::Builder::SoftMaxLayer ieLayer(name);
ieLayer.setAxis(normalize_axis(axisRaw, input->getDims().size()));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save