Merge branch 4.x

pull/26073/head
Alexander Smorkalov 8 months ago
commit 100db1bc0b
  1. 4
      CMakeLists.txt
  2. 24
      cmake/OpenCVFindLibsGrfmt.cmake
  3. 18
      cmake/OpenCVInstallLayout.cmake
  4. 6
      cmake/OpenCVUtils.cmake
  5. 2
      doc/py_tutorials/py_calib3d/py_calibration/py_calibration.markdown
  6. 3
      doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown
  7. 6
      doc/py_tutorials/py_calib3d/py_pose/py_pose.markdown
  8. 23
      doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.markdown
  9. 3
      doc/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.markdown
  10. 3
      doc/py_tutorials/py_core/py_optimization/py_optimization.markdown
  11. 3
      doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown
  12. 6
      doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown
  13. 6
      doc/py_tutorials/py_feature2d/py_features_meaning/py_features_meaning.markdown
  14. 6
      doc/py_tutorials/py_feature2d/py_matcher/py_matcher.markdown
  15. 3
      doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown
  16. 6
      doc/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.markdown
  17. 6
      doc/py_tutorials/py_feature2d/py_sift_intro/py_sift_intro.markdown
  18. 6
      doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown
  19. 2
      doc/py_tutorials/py_gui/py_mouse_handling/py_mouse_handling.markdown
  20. 6
      doc/py_tutorials/py_gui/py_video_display/py_video_display.markdown
  21. 3
      doc/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.markdown
  22. 6
      doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown
  23. 3
      doc/py_tutorials/py_imgproc/py_contours/py_contour_properties/py_contour_properties.markdown
  24. 6
      doc/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.markdown
  25. 6
      doc/py_tutorials/py_imgproc/py_contours/py_contours_hierarchy/py_contours_hierarchy.markdown
  26. 3
      doc/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/py_contours_more_functions.markdown
  27. 3
      doc/py_tutorials/py_imgproc/py_filtering/py_filtering.markdown
  28. 3
      doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.markdown
  29. 3
      doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.markdown
  30. 6
      doc/py_tutorials/py_imgproc/py_gradients/py_gradients.markdown
  31. 6
      doc/py_tutorials/py_imgproc/py_histograms/py_2d_histogram/py_2d_histogram.markdown
  32. 3
      doc/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/py_histogram_backprojection.markdown
  33. 3
      doc/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/py_histogram_begins.markdown
  34. 3
      doc/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.markdown
  35. 6
      doc/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.markdown
  36. 3
      doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.markdown
  37. 3
      doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.markdown
  38. 3
      doc/py_tutorials/py_imgproc/py_pyramids/py_pyramids.markdown
  39. 6
      doc/py_tutorials/py_imgproc/py_template_matching/py_template_matching.markdown
  40. 3
      doc/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.markdown
  41. 6
      doc/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/py_kmeans_opencv.markdown
  42. 3
      doc/py_tutorials/py_ml/py_kmeans/py_kmeans_understanding/py_kmeans_understanding.markdown
  43. 3
      doc/py_tutorials/py_photo/py_non_local_means/py_non_local_means.markdown
  44. 3
      doc/py_tutorials/py_setup/py_setup_in_fedora/py_setup_in_fedora.markdown
  45. 3
      doc/py_tutorials/py_setup/py_setup_in_windows/py_setup_in_windows.markdown
  46. 50
      doc/tutorials/imgproc/basic_geometric_drawing/basic_geometric_drawing.markdown
  47. 4
      doc/tutorials/introduction/config_reference/config_reference.markdown
  48. 3
      doc/tutorials/introduction/env_reference/env_reference.markdown
  49. 22
      doc/tutorials/others/stitcher.markdown
  50. 62
      modules/calib/src/calibinit.cpp
  51. 21
      modules/calib3d/misc/js/gen_dict.json
  52. 3
      modules/core/include/opencv2/core/check.hpp
  53. 32
      modules/core/include/opencv2/core/types.hpp
  54. 2
      modules/core/include/opencv2/core/utils/filesystem.private.hpp
  55. 15
      modules/core/misc/js/gen_dict.json
  56. 18
      modules/core/perf/opencl/perf_arithm.cpp
  57. 7
      modules/core/src/check.cpp
  58. 22
      modules/core/src/mathfuncs.cpp
  59. 8
      modules/core/src/parallel.cpp
  60. 9
      modules/core/src/system.cpp
  61. 8
      modules/core/src/utils/filesystem.cpp
  62. 17
      modules/core/test/test_misc.cpp
  63. 12
      modules/core/test/test_operations.cpp
  64. 6
      modules/dnn/include/opencv2/dnn/all_layers.hpp
  65. 12
      modules/dnn/misc/js/gen_dict.json
  66. 63
      modules/dnn/perf/perf_layer.cpp
  67. 44
      modules/dnn/src/cuda4dnn/csl/cublas.hpp
  68. 1
      modules/dnn/src/init.cpp
  69. 13
      modules/dnn/src/layers/einsum_layer.cpp
  70. 6
      modules/dnn/src/layers/elementwise_layers.cpp
  71. 228
      modules/dnn/src/layers/topk_layer.cpp
  72. 17
      modules/dnn/src/onnx/onnx_importer.cpp
  73. 30
      modules/dnn/test/test_onnx_conformance_layer_filter_opencv_ocl_fp16_denylist.inl.hpp
  74. 13
      modules/dnn/test/test_onnx_conformance_layer_filter_opencv_ocl_fp32_denylist.inl.hpp
  75. 38
      modules/dnn/test/test_onnx_importer.cpp
  76. 19
      modules/features2d/misc/js/gen_dict.json
  77. 4
      modules/features2d/src/keypoint.cpp
  78. 4
      modules/gapi/src/backends/ie/giebackend.cpp
  79. 2
      modules/gapi/src/backends/ov/govbackend.cpp
  80. 4
      modules/gapi/test/streaming/gapi_streaming_tests.cpp
  81. 2
      modules/imgproc/include/opencv2/imgproc.hpp
  82. 95
      modules/imgproc/misc/js/gen_dict.json
  83. 25
      modules/imgproc/misc/python/test/test_imgproc.py
  84. 39
      modules/imgproc/perf/perf_warp.cpp
  85. 8
      modules/imgproc/src/color.hpp
  86. 4
      modules/imgproc/src/color.simd_helpers.hpp
  87. 119
      modules/imgproc/src/demosaicing.cpp
  88. 9
      modules/imgproc/src/drawing.cpp
  89. 27
      modules/imgproc/src/hal_replacement.hpp
  90. 4
      modules/imgproc/src/median_blur.simd.hpp
  91. 85
      modules/imgproc/src/pyramids.cpp
  92. 36
      modules/imgproc/test/test_color.cpp
  93. 27
      modules/imgproc/test/test_cornersubpix.cpp
  94. 67
      modules/java/android_sdk/CMakeLists.txt
  95. 58
      modules/java/android_sdk/android_gradle_lib/build.gradle
  96. 57
      modules/java/android_sdk/build.gradle.in
  97. 15
      modules/js/generator/CMakeLists.txt
  98. 54
      modules/js/generator/embindgen.py
  99. 28
      modules/objdetect/misc/js/gen_dict.json
  100. 25
      modules/photo/misc/js/gen_dict.json
  101. Some files were not shown because too many files have changed in this diff Show More

@ -249,7 +249,7 @@ OCV_OPTION(BUILD_CLAPACK "Build CLapack from source" (((WIN3
# Optional 3rd party components
# ===================================================
OCV_OPTION(WITH_1394 "Include IEEE1394 support" ON
OCV_OPTION(WITH_1394 "Include IEEE1394 support" OFF
VISIBLE_IF NOT ANDROID AND NOT IOS AND NOT XROS AND NOT WINRT
VERIFY HAVE_DC1394_2)
OCV_OPTION(WITH_AVFOUNDATION "Use AVFoundation for Video I/O (iOS/visionOS/Mac)" ON
@ -771,7 +771,7 @@ if(UNIX OR MINGW)
elseif(EMSCRIPTEN)
# no need to link to system libs with emscripten
elseif(QNXNTO)
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} m)
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} m regex)
elseif(MINGW)
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} pthread)
else()

@ -24,7 +24,13 @@ else()
set(_zlib_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
set(CMAKE_FIND_LIBRARY_SUFFIXES .so)
endif()
find_package(ZLIB "${MIN_VER_ZLIB}")
if(QNX)
set(ZLIB_FOUND TRUE)
set(ZLIB_LIBRARY z)
set(ZLIB_LIBRARIES z)
else()
find_package(ZLIB "${MIN_VER_ZLIB}")
endif()
if(ANDROID)
set(CMAKE_FIND_LIBRARY_SUFFIXES ${_zlib_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES})
unset(_zlib_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES)
@ -67,7 +73,13 @@ if(WITH_JPEG)
ocv_clear_vars(JPEG_FOUND)
else()
ocv_clear_internal_cache_vars(JPEG_LIBRARY JPEG_INCLUDE_DIR)
find_package(JPEG)
if(QNX)
set(JPEG_LIBRARY jpeg)
set(JPEG_LIBRARIES jpeg)
set(JPEG_FOUND TRUE)
else()
find_package(JPEG)
endif()
endif()
if(NOT JPEG_FOUND)
@ -106,7 +118,13 @@ if(WITH_TIFF)
ocv_clear_vars(TIFF_FOUND)
else()
ocv_clear_internal_cache_vars(TIFF_LIBRARY TIFF_INCLUDE_DIR)
include(FindTIFF)
if(QNX)
set(TIFF_LIBRARY tiff)
set(TIFF_LIBRARIES tiff)
set(TIFF_FOUND TRUE)
else()
include(FindTIFF)
endif()
if(TIFF_FOUND)
ocv_parse_header("${TIFF_INCLUDE_DIR}/tiff.h" TIFF_VERSION_LINES TIFF_VERSION_CLASSIC TIFF_VERSION_BIG TIFF_VERSION TIFF_BIGTIFF_VERSION)
endif()

@ -56,6 +56,24 @@ elseif(WIN32 AND CMAKE_HOST_SYSTEM_NAME MATCHES Windows)
ocv_update(OPENCV_JNI_INSTALL_PATH "java${_jni_suffix}")
ocv_update(OPENCV_JNI_BIN_INSTALL_PATH "${OPENCV_JNI_INSTALL_PATH}")
elseif(QNX)
ocv_update(OPENCV_BIN_INSTALL_PATH "${CPUVARDIR}/usr/bin")
ocv_update(OPENCV_TEST_INSTALL_PATH "${OPENCV_BIN_INSTALL_PATH}")
ocv_update(OPENCV_SAMPLES_BIN_INSTALL_PATH "${OPENCV_BIN_INSTALL_PATH}")
ocv_update(OPENCV_LIB_INSTALL_PATH "${CPUVARDIR}/usr/lib")
ocv_update(OPENCV_LIB_ARCHIVE_INSTALL_PATH "${OPENCV_LIB_INSTALL_PATH}")
ocv_update(OPENCV_3P_LIB_INSTALL_PATH "${CPUVARDIR}/usr/lib")
ocv_update(OPENCV_CONFIG_INSTALL_PATH "${CPUVARDIR}/usr/share/OpenCV")
ocv_update(OPENCV_INCLUDE_INSTALL_PATH "usr/include/OpenCV/opencv4")
ocv_update(OPENCV_OTHER_INSTALL_PATH "usr/share/OpenCV")
ocv_update(OPENCV_SAMPLES_SRC_INSTALL_PATH "samples/native")
ocv_update(OPENCV_LICENSES_INSTALL_PATH "${OPENCV_OTHER_INSTALL_PATH}/licenses")
ocv_update(OPENCV_TEST_DATA_INSTALL_PATH "${OPENCV_OTHER_INSTALL_PATH}/testdata")
ocv_update(OPENCV_DOC_INSTALL_PATH "doc")
ocv_update(OPENCV_JAR_INSTALL_PATH "${CMAKE_INSTALL_DATAROOTDIR}/java/opencv4")
ocv_update(OPENCV_JNI_INSTALL_PATH "${OPENCV_JAR_INSTALL_PATH}")
ocv_update(OPENCV_JNI_BIN_INSTALL_PATH "${OPENCV_JNI_INSTALL_PATH}")
else() # UNIX
include(GNUInstallDirs)

@ -309,7 +309,11 @@ function(ocv_include_directories)
dir MATCHES "/usr/include$")
# workaround for GCC 6.x bug
else()
include_directories(AFTER SYSTEM "${dir}")
if(${CMAKE_SYSTEM_NAME} MATCHES QNX)
include_directories(AFTER "${dir}")
else()
include_directories(AFTER SYSTEM "${dir}")
endif()
endif()
endforeach()
include_directories(BEFORE ${__add_before})

@ -216,8 +216,6 @@ for i in range(len(objpoints)):
print( "total error: {}".format(mean_error/len(objpoints)) )
@endcode
Additional Resources
--------------------
Exercises
---------

@ -158,9 +158,6 @@ side. That meeting point is the epipole.
For better results, images with good resolution and many non-planar points should be used.
Additional Resources
--------------------
Exercises
---------

@ -119,9 +119,3 @@ And look at the result below:
If you are interested in graphics, augmented reality etc, you can use OpenGL to render more
complicated figures.
Additional Resources
--------------------
Exercises
---------

@ -51,23 +51,6 @@ You can modify the pixel values the same way.
Numpy is an optimized library for fast array calculations. So simply accessing each and every pixel
value and modifying it will be very slow and it is discouraged.
@note The above method is normally used for selecting a region of an array, say the first 5 rows
and last 3 columns. For individual pixel access, the Numpy array methods, array.item() and
array.itemset() are considered better. They always return a scalar, however, so if you want to access
all the B,G,R values, you will need to call array.item() separately for each value.
Better pixel accessing and editing method :
@code{.py}
# accessing RED value
>>> img.item(10,10,2)
59
# modifying RED value
>>> img.itemset((10,10,2),100)
>>> img.item(10,10,2)
100
@endcode
Accessing Image Properties
--------------------------
@ -195,9 +178,3 @@ See the result below. (Image is displayed with matplotlib. So RED and BLUE chann
interchanged):
![image](images/border.jpg)
Additional Resources
--------------------
Exercises
---------

@ -110,9 +110,6 @@ img2_fg.
![image](images/overlay.jpg)
Additional Resources
--------------------
Exercises
---------

@ -163,6 +163,3 @@ Additional Resources
2. Scipy Lecture Notes - [Advanced
Numpy](http://scipy-lectures.github.io/advanced/advanced_numpy/index.html#advanced-numpy)
3. [Timing and Profiling in IPython](http://pynash.org/2013/03/06/timing-and-profiling/)
Exercises
---------

@ -138,6 +138,3 @@ Additional Resources
2. Edward Rosten, Reid Porter, and Tom Drummond, "Faster and better: a machine learning approach to
corner detection" in IEEE Trans. Pattern Analysis and Machine Intelligence, 2010, vol 32, pp.
105-119.
Exercises
---------

@ -102,9 +102,3 @@ plt.imshow(img3, 'gray'),plt.show()
See the result below. Object is marked in white color in cluttered image:
![image](images/homography_findobj.jpg)
Additional Resources
--------------------
Exercises
---------

@ -81,9 +81,3 @@ or do whatever you want.
So in this module, we are looking to different algorithms in OpenCV to find features, describe them,
match them etc.
Additional Resources
--------------------
Exercises
---------

@ -209,9 +209,3 @@ plt.imshow(img3,),plt.show()
See the result below:
![image](images/matcher_flann.jpg)
Additional Resources
--------------------
Exercises
---------

@ -93,6 +93,3 @@ Additional Resources
-# Ethan Rublee, Vincent Rabaud, Kurt Konolige, Gary R. Bradski: ORB: An efficient alternative to
SIFT or SURF. ICCV 2011: 2564-2571.
Exercises
---------

@ -67,9 +67,3 @@ See the result below:
![image](images/shitomasi_block1.jpg)
This function is more appropriate for tracking. We will see that when its time comes.
Additional Resources
--------------------
Exercises
---------

@ -160,9 +160,3 @@ Here kp will be a list of keypoints and des is a numpy array of shape
So we got keypoints, descriptors etc. Now we want to see how to match keypoints in different images.
That we will learn in coming chapters.
Additional Resources
--------------------
Exercises
---------

@ -155,9 +155,3 @@ Finally we check the descriptor size and change it to 128 if it is only 64-dim.
(47, 128)
@endcode
Remaining part is matching which we will do in another chapter.
Additional Resources
--------------------
Exercises
---------

@ -101,8 +101,6 @@ while(1):
cv.destroyAllWindows()
@endcode
Additional Resources
--------------------
Exercises
---------

@ -152,9 +152,3 @@ cap.release()
out.release()
cv.destroyAllWindows()
@endcode
Additional Resources
--------------------
Exercises
---------

@ -103,9 +103,6 @@ Now you take [H-10, 100,100] and [H+10, 255, 255] as the lower bound and upper b
from this method, you can use any image editing tools like GIMP or any online converters to find
these values, but don't forget to adjust the HSV ranges.
Additional Resources
--------------------
Exercises
---------

@ -199,9 +199,3 @@ righty = int(((cols-x)*vy/vx)+y)
cv.line(img,(cols-1,righty),(0,lefty),(0,255,0),2)
@endcode
![image](images/fitline.jpg)
Additional Resources
--------------------
Exercises
---------

@ -114,9 +114,6 @@ For eg, if I apply it to an Indian map, I get the following result :
![image](images/extremepoints.jpg)
Additional Resources
--------------------
Exercises
---------

@ -88,9 +88,3 @@ the contour array (drawn in blue color). First image shows points I got with cv.
much memory it saves!!!
![image](images/none.jpg)
Additional Resources
--------------------
Exercises
---------

@ -212,9 +212,3 @@ array([[[ 7, -1, 1, -1],
[ 8, 0, -1, -1],
[-1, 7, -1, -1]]])
@endcode
Additional Resources
--------------------
Exercises
---------

@ -124,9 +124,6 @@ See, even image rotation doesn't affect much on this comparison.
moments invariant to translation, rotation and scale. Seventh one is skew-invariant. Those values
can be found using **cv.HuMoments()** function.
Additional Resources
====================
Exercises
---------

@ -150,6 +150,3 @@ Additional Resources
--------------------
-# Details about the [bilateral filtering](http://people.csail.mit.edu/sparis/bf_course/)
Exercises
---------

@ -163,6 +163,3 @@ Additional Resources
--------------------
-# "Computer Vision: Algorithms and Applications", Richard Szeliski
Exercises
---------

@ -146,9 +146,6 @@ mark the rectangle area in mask image with 2-pixel or 3-pixel (probable backgrou
mark our sure_foreground with 1-pixel as we did in second example. Then directly apply the grabCut
function with mask mode.
Additional Resources
--------------------
Exercises
---------

@ -103,9 +103,3 @@ plt.show()
Check the result below:
![image](images/double_edge.jpg)
Additional Resources
--------------------
Exercises
---------

@ -125,9 +125,3 @@ output of that code for the same image as above:
You can clearly see in the histogram what colors are present, blue is there, yellow is there, and
some white due to chessboard is there. Nice !!!
Additional Resources
--------------------
Exercises
---------

@ -123,6 +123,3 @@ Additional Resources
-# "Indexing via color histograms", Swain, Michael J. , Third international conference on computer
vision,1990.
Exercises
---------

@ -197,6 +197,3 @@ Additional Resources
--------------------
-# [Cambridge in Color website](http://www.cambridgeincolour.com/tutorials/histograms1.htm)
Exercises
---------

@ -151,6 +151,3 @@ Also check these SOF questions regarding contrast adjustment:
C?](http://stackoverflow.com/questions/10549245/how-can-i-adjust-contrast-in-opencv-in-c)
4. [How do I equalize contrast & brightness of images using
opencv?](http://stackoverflow.com/questions/10561222/how-do-i-equalize-contrast-brightness-of-images-using-opencv)
Exercises
---------

@ -45,9 +45,3 @@ cv.destroyAllWindows()
Result is shown below:
![image](images/houghcircles2.jpg)
Additional Resources
--------------------
Exercises
---------

@ -103,6 +103,3 @@ Additional Resources
--------------------
-# [Hough Transform on Wikipedia](http://en.wikipedia.org/wiki/Hough_transform)
Exercises
---------

@ -152,6 +152,3 @@ Additional Resources
--------------------
-# [Morphological Operations](http://homepages.inf.ed.ac.uk/rbf/HIPR2/morops.htm) at HIPR2
Exercises
---------

@ -139,6 +139,3 @@ Additional Resources
--------------------
-# [Image Blending](http://pages.cs.wisc.edu/~csverma/CS766_09/ImageMosaic/imagemosaic.html)
Exercises
---------

@ -132,9 +132,3 @@ cv.imwrite('res.png',img_rgb)
Result:
![image](images/res_mario.jpg)
Additional Resources
--------------------
Exercises
---------

@ -291,6 +291,3 @@ Additional Resources
Theory](http://cns-alumni.bu.edu/~slehar/fourier/fourier.html) by Steven Lehar
2. [Fourier Transform](http://homepages.inf.ed.ac.uk/rbf/HIPR2/fourier.htm) at HIPR
3. [What does frequency domain denote in case of images?](http://dsp.stackexchange.com/q/1637/818)
Exercises
---------

@ -186,9 +186,3 @@ cv.destroyAllWindows()
See the result below for K=8:
![image](images/oc_color_quantization.jpg)
Additional Resources
--------------------
Exercises
---------

@ -80,6 +80,3 @@ Additional Resources
-# [Machine Learning Course](https://www.coursera.org/course/ml), Video lectures by Prof. Andrew Ng
(Some of the images are taken from this)
Exercises
---------

@ -147,6 +147,3 @@ Additional Resources
recommended to visit. Our test image is generated from this link)
2. [Online course at coursera](https://www.coursera.org/course/images) (First image taken from
here)
Exercises
---------

@ -240,9 +240,6 @@ make doxygen
@endcode
Then open opencv/build/doc/doxygen/html/index.html and bookmark it in the browser.
Additional Resources
--------------------
Exercises
---------

@ -119,9 +119,6 @@ Building OpenCV from source
@note We have installed with no other support like TBB, Eigen, Qt, Documentation etc. It would be
difficult to explain it here. A more detailed video will be added soon or you can just hack around.
Additional Resources
--------------------
Exercises
---------

@ -21,15 +21,22 @@ In this tutorial you will learn how to:
- Draw a **circle** by using the OpenCV function **circle()**
- Draw a **filled polygon** by using the OpenCV function **fillPoly()**
@add_toggle_cpp
OpenCV Theory
-------------
@add_toggle_cpp
For this tutorial, we will heavily use two structures: @ref cv::Point and @ref cv::Scalar :
@end_toggle
@add_toggle_java
For this tutorial, we will heavily use two structures: @ref cv::Point and @ref cv::Scalar :
@end_toggle
@add_toggle_python
For this tutorial, we will heavily use tuples in Python instead of @ref cv::Point and @ref cv::Scalar :
@end_toggle
### Point
It represents a 2D point, specified by its image coordinates \f$x\f$ and \f$y\f$. We can define it as:
@add_toggle_cpp
@code{.cpp}
Point pt;
pt.x = 10;
@ -39,28 +46,8 @@ or
@code{.cpp}
Point pt = Point(10, 8);
@endcode
### Scalar
- Represents a 4-element vector. The type Scalar is widely used in OpenCV for passing pixel
values.
- In this tutorial, we will use it extensively to represent BGR color values (3 parameters). It is
not necessary to define the last argument if it is not going to be used.
- Let's see an example, if we are asked for a color argument and we give:
@code{.cpp}
Scalar( a, b, c )
@endcode
We would be defining a BGR color such as: *Blue = a*, *Green = b* and *Red = c*
@end_toggle
@add_toggle_java
OpenCV Theory
-------------
For this tutorial, we will heavily use two structures: @ref cv::Point and @ref cv::Scalar :
### Point
It represents a 2D point, specified by its image coordinates \f$x\f$ and \f$y\f$. We can define it as:
@code{.java}
Point pt = new Point();
pt.x = 10;
@ -70,6 +57,12 @@ or
@code{.java}
Point pt = new Point(10, 8);
@endcode
@end_toggle
@add_toggle_python
@code{.python}
pt = (10, 0) # x = 10, y = 0
@endcode
@end_toggle
### Scalar
- Represents a 4-element vector. The type Scalar is widely used in OpenCV for passing pixel
@ -77,11 +70,22 @@ Point pt = new Point(10, 8);
- In this tutorial, we will use it extensively to represent BGR color values (3 parameters). It is
not necessary to define the last argument if it is not going to be used.
- Let's see an example, if we are asked for a color argument and we give:
@add_toggle_cpp
@code{.cpp}
Scalar( a, b, c )
@endcode
@end_toggle
@add_toggle_java
@code{.java}
Scalar( a, b, c )
@endcode
We would be defining a BGR color such as: *Blue = a*, *Green = b* and *Red = c*
@end_toggle
@add_toggle_python
@code{.python}
( a, b, c )
@endcode
@end_toggle
We would be defining a BGR color such as: *Blue = a*, *Green = b* and *Red = c*
Code
----

@ -393,7 +393,7 @@ There are multiple less popular frameworks which can be used to read and write v
| Option | Default | Description |
| ------ | ------- | ----------- |
| `WITH_1394` | _ON_ | [IIDC IEEE1394](https://en.wikipedia.org/wiki/IEEE_1394#IIDC) support using DC1394 library |
| `WITH_1394` | _OFF_ | [IIDC IEEE1394](https://en.wikipedia.org/wiki/IEEE_1394#IIDC) support using DC1394 library |
| `WITH_OPENNI` | _OFF_ | [OpenNI](https://en.wikipedia.org/wiki/OpenNI) can be used to capture data from depth-sensing cameras. Deprecated. |
| `WITH_OPENNI2` | _OFF_ | [OpenNI2](https://structure.io/openni) can be used to capture data from depth-sensing cameras. |
| `WITH_PVAPI` | _OFF_ | [PVAPI](https://www.alliedvision.com/en/support/software-downloads.html) is legacy SDK for Prosilica GigE cameras. Deprecated. |
@ -455,6 +455,8 @@ OpenCV relies on various GUI libraries for window drawing.
| `WITH_WIN32UI` | _ON_ | Windows | [WinAPI](https://en.wikipedia.org/wiki/Windows_API) is a standard GUI API in Windows. |
| N/A | _ON_ | macOS | [Cocoa](https://en.wikipedia.org/wiki/Cocoa_(API)) is a framework used in macOS. |
| `WITH_QT` | _OFF_ | Cross-platform | [Qt](https://en.wikipedia.org/wiki/Qt_(software)) is a cross-platform GUI framework. |
| `WITH_FRAMEBUFFER` | _OFF_ | Linux | Experimental backend using [Linux framebuffer](https://en.wikipedia.org/wiki/Linux_framebuffer). Have limited functionality but does not require dependencies. |
| `WITH_FRAMEBUFFER_XVFB` | _OFF_ | Linux | Enables special output mode of the FRAMEBUFFER backend compatible with [xvfb](https://en.wikipedia.org/wiki/Xvfb) tool. Requires some X11 headers. |
@note OpenCV compiled with Qt support enables advanced _highgui_ interface, see @ref highgui_qt for details.

@ -329,6 +329,9 @@ Some external dependencies can be detached into a dynamic library, which will be
|------|------|---------|-------------|
| OPENCV_LEGACY_WAITKEY | non-null | | switch `waitKey` return result (default behavior: `return code & 0xff` (or -1), legacy behavior: `return code`) |
| $XDG_RUNTIME_DIR | | | Wayland backend specific - create shared memory-mapped file for interprocess communication (named `opencv-shared-??????`) |
| OPENCV_HIGHGUI_FB_MODE | string | `FB` | Selects output mode for the framebuffer backend (`FB` - regular frambuffer, `EMU` - emulation, perform internal checks but does nothing, `XVFB` - compatible with _xvfb_ virtual frambuffer) |
| OPENCV_HIGHGUI_FB_DEVICE | file path | | Path to frambuffer device to use (will be checked first) |
| FRAMEBUFFER | file path | `/dev/fb0` | Same as OPENCV_HIGHGUI_FB_DEVICE, commonly used variable for the same purpose (will be checked second) |
## imgproc

@ -23,18 +23,36 @@ In this tutorial you will learn how to:
Code
----
@add_toggle_cpp
This tutorial's code is shown in the lines below. You can download it from [here](https://github.com/opencv/opencv/tree/5.x/samples/cpp/stitching.cpp).
This tutorial code's is shown lines below. You can also download it from
[here](https://github.com/opencv/opencv/tree/5.x/samples/cpp/stitching.cpp).
Note: The C++ version includes additional options such as image division (--d3) and more detailed error handling, which are not present in the Python example.
@include samples/cpp/snippets/stitching.cpp
@end_toggle
@add_toggle_python
This tutorial's code is shown in the lines below. You can download it from [here](https://github.com/opencv/opencv/blob/5.x/samples/python/stitching.py).
Note: The C++ version includes additional options such as image division (--d3) and more detailed error handling, which are not present in the Python example.
@include samples/python/snippets/stitching.py
@end_toggle
Explanation
-----------
The most important code part is:
@add_toggle_cpp
@snippet cpp/snippets/stitching.cpp stitching
@end_toggle
@add_toggle_python
@snippet python/snippets/stitching.py stitching
@end_toggle
A new instance of stitcher is created and the @ref cv::Stitcher::stitch will
do all the hard work.

@ -152,7 +152,7 @@ struct ChessBoardQuad
int group_idx; // quad group ID
int row, col; // row and column of this quad
bool ordered; // true if corners/neighbors are ordered counter-clockwise
float edge_len; // quad edge len, in pix^2
float edge_sqr_len; // quad edge squared length, in pix^2
// neighbors and corners are synced, i.e., neighbor 0 shares corner 0
ChessBoardCorner *corners[4]; // Coordinates of quad corners
struct ChessBoardQuad *neighbors[4]; // Pointers of quad neighbors. M.b. sparse.
@ -163,7 +163,7 @@ struct ChessBoardQuad
group_idx(group_idx_),
row(0), col(0),
ordered(0),
edge_len(0)
edge_sqr_len(0)
{
corners[0] = corners[1] = corners[2] = corners[3] = NULL;
neighbors[0] = neighbors[1] = neighbors[2] = neighbors[3] = NULL;
@ -221,7 +221,7 @@ public:
int all_quads_count;
struct NeighborsFinder {
const float thresh_scale = sqrt(2.f);
const float thresh_sqr_scale = 2.f;
ChessBoardDetector& detector;
std::vector<int> neighbors_indices;
std::vector<float> neighbors_dists;
@ -234,8 +234,8 @@ public:
const int quad_idx,
const int corner_idx,
const cv::Point2f& corner_pt,
float& min_dist,
const float radius,
float& min_sqr_dist,
const float sqr_radius,
int& closest_quad_idx,
int& closest_corner_idx,
cv::Point2f& closest_corner_pt);
@ -531,8 +531,8 @@ bool ChessBoardDetector::NeighborsFinder::findCornerNeighbor(
const int quad_idx,
const int corner_idx,
const cv::Point2f& corner_pt,
float& min_dist,
const float radius,
float& min_sqr_dist,
const float sqr_radius,
int& closest_quad_idx,
int& closest_corner_idx,
cv::Point2f& closest_corner_pt)
@ -546,7 +546,7 @@ bool ChessBoardDetector::NeighborsFinder::findCornerNeighbor(
// find the closest corner in all other quadrangles
const std::vector<float> query = { corner_pt.x, corner_pt.y };
const cvflann::SearchParams search_params(-1);
const int neighbors_count = all_quads_pts_index.radiusSearch(query, neighbors_indices, neighbors_dists, radius, search_params);
const int neighbors_count = all_quads_pts_index.radiusSearch(query, neighbors_indices, neighbors_dists, sqr_radius, search_params);
for (int neighbor_idx_idx = 0; neighbor_idx_idx < neighbors_count; neighbor_idx_idx++)
{
@ -561,16 +561,16 @@ bool ChessBoardDetector::NeighborsFinder::findCornerNeighbor(
continue;
const Point2f neighbor_pt = all_quads_pts[neighbor_idx];
const float dist = normL2Sqr<float>(corner_pt - neighbor_pt);
if (dist <= cur_quad.edge_len * thresh_scale &&
dist <= q_k.edge_len * thresh_scale)
const float sqr_dist = normL2Sqr<float>(corner_pt - neighbor_pt);
if (sqr_dist <= cur_quad.edge_sqr_len * thresh_sqr_scale &&
sqr_dist <= q_k.edge_sqr_len * thresh_sqr_scale)
{
// check edge lengths, make sure they're compatible
// edges that are different by more than 1:4 are rejected.
// edge_len is squared edge length, so we compare them
// edge_sqr_len is edge squared length, so we compare them
// with squared constant 16 = 4^2
if (q_k.edge_len > 16 * cur_quad.edge_len ||
cur_quad.edge_len > 16 * q_k.edge_len)
if (q_k.edge_sqr_len > 16 * cur_quad.edge_sqr_len ||
cur_quad.edge_sqr_len > 16 * q_k.edge_sqr_len)
{
DPRINTF("Incompatible edge lengths");
continue;
@ -590,20 +590,20 @@ bool ChessBoardDetector::NeighborsFinder::findCornerNeighbor(
if (!arePointsOnSameSideFromLine(mid_pt1, mid_pt2, corner_pt, neighbor_pt_diagonal))
continue;
if (!arePointsOnSameSideFromLine(mid_pt3, mid_pt4, neighbor_pt, neighbor_pt_diagonal))
if (!arePointsOnSameSideFromLine(mid_pt3, mid_pt4, corner_pt, neighbor_pt_diagonal))
continue;
closest_neighbor_idx = neighbor_idx;
closest_quad_idx = k;
closest_corner_idx = j;
closest_quad = &q_k;
min_dist = dist;
min_sqr_dist = sqr_dist;
break;
}
}
// we found a matching corner point?
if (closest_neighbor_idx >= 0 && closest_quad_idx >= 0 && closest_corner_idx >= 0 && min_dist < FLT_MAX)
if (closest_neighbor_idx >= 0 && closest_quad_idx >= 0 && closest_corner_idx >= 0 && min_sqr_dist < FLT_MAX)
{
CV_Assert(closest_quad);
@ -622,7 +622,7 @@ bool ChessBoardDetector::NeighborsFinder::findCornerNeighbor(
if (cur_quad.neighbors[j] == closest_quad)
break;
if (normL2Sqr<float>(closest_corner_pt - all_quads_pts[(quad_idx << 2) + j]) < min_dist)
if (normL2Sqr<float>(closest_corner_pt - all_quads_pts[(quad_idx << 2) + j]) < min_sqr_dist)
break;
}
if (j < 4)
@ -1169,7 +1169,7 @@ int ChessBoardDetector::addOuterQuad(ChessBoardQuad& quad, std::vector<ChessBoar
q.group_idx = quad.group_idx;
q.count = 1; // number of neighbors
q.ordered = false;
q.edge_len = quad.edge_len;
q.edge_sqr_len = quad.edge_sqr_len;
// make corners of new quad
// same as neighbor quad, but offset
@ -1815,12 +1815,12 @@ void ChessBoardDetector::findQuadNeighbors()
const cv::Point2f pt = neighborsFinder.all_quads_pts[(idx << 2) + i];
float min_dist = FLT_MAX;
float min_sqr_dist = FLT_MAX;
int closest_quad_idx = -1;
int closest_corner_idx = -1;
float radius = cur_quad.edge_len * neighborsFinder.thresh_scale + 1;
float sqr_radius = cur_quad.edge_sqr_len * neighborsFinder.thresh_sqr_scale + 1;
cv::Point2f closest_corner_pt;
@ -1828,8 +1828,8 @@ void ChessBoardDetector::findQuadNeighbors()
idx,
i,
pt,
min_dist,
radius,
min_sqr_dist,
sqr_radius,
closest_quad_idx,
closest_corner_idx,
closest_corner_pt);
@ -1837,8 +1837,8 @@ void ChessBoardDetector::findQuadNeighbors()
if (!found)
continue;
radius = min_dist + 1;
min_dist = FLT_MAX;
sqr_radius = min_sqr_dist + 1;
min_sqr_dist = FLT_MAX;
int closest_closest_quad_idx = -1;
int closest_closest_corner_idx = -1;
@ -1849,8 +1849,8 @@ void ChessBoardDetector::findQuadNeighbors()
closest_quad_idx,
closest_corner_idx,
closest_corner_pt,
min_dist,
radius,
min_sqr_dist,
sqr_radius,
closest_closest_quad_idx,
closest_closest_corner_idx,
closest_closest_corner_pt);
@ -1991,15 +1991,15 @@ void ChessBoardDetector::generateQuads(const Mat& image_, int flags, int dilatio
corner = ChessBoardCorner(pt);
q.corners[i] = &corner;
}
q.edge_len = FLT_MAX;
q.edge_sqr_len = FLT_MAX;
for (int i = 0; i < 4; ++i)
{
float d = normL2Sqr<float>(q.corners[i]->pt - q.corners[(i+1)&3]->pt);
q.edge_len = std::min(q.edge_len, d);
float sqr_d = normL2Sqr<float>(q.corners[i]->pt - q.corners[(i+1)&3]->pt);
q.edge_sqr_len = std::min(q.edge_sqr_len, sqr_d);
}
const int edge_len_compensation = 2 * dilations;
q.edge_len += 2 * sqrt(q.edge_len) * edge_len_compensation + edge_len_compensation * edge_len_compensation;
q.edge_sqr_len += 2 * sqrt(q.edge_sqr_len) * edge_len_compensation + edge_len_compensation * edge_len_compensation;
}
all_quads_count = quad_count;

@ -0,0 +1,21 @@
{
"whitelist":
{
"": [
"findHomography",
"calibrateCameraExtended",
"drawFrameAxes",
"estimateAffine2D",
"getDefaultNewCameraMatrix",
"initUndistortRectifyMap",
"Rodrigues",
"solvePnP",
"solvePnPRansac",
"solvePnPRefineLM",
"projectPoints",
"undistort",
"fisheye_initUndistortRectifyMap",
"fisheye_projectPoints"
]
}
}

@ -135,6 +135,9 @@ CV_EXPORTS void CV_NORETURN check_failed_MatChannels(const int v, const CheckCon
/// Example: depth == CV_32F || depth == CV_64F
#define CV_CheckDepth(t, test_expr, msg) CV__CHECK_CUSTOM_TEST(_, MatDepth, t, (test_expr), #t, #test_expr, msg)
/// Example: channel == 1 || channel == 3
#define CV_CheckChannels(t, test_expr, msg) CV__CHECK_CUSTOM_TEST(_, MatChannels, t, (test_expr), #t, #test_expr, msg)
/// Example: v == A || v == B
#define CV_Check(v, test_expr, msg) CV__CHECK_CUSTOM_TEST(_, auto, v, (test_expr), #v, #test_expr, msg)

@ -478,7 +478,14 @@ public:
template<typename _Tp2> operator Rect_<_Tp2>() const;
//! checks whether the rectangle contains the point
bool contains(const Point_<_Tp>& pt) const;
/*! @warning After OpenCV 4.11.0, when calling Rect.contains() with cv::Point2f / cv::Point2d point, point should not convert/round to int.
* ```
* Rect_<int> r(0,0,500,500); Point_<float> pt(250.0f, 499.9f);
* r.contains(pt) returns false.(OpenCV 4.10.0 or before)
* r.contains(pt) returns true. (OpenCV 4.11.0 or later)
* ```
*/
template<typename _Tp2> inline bool contains(const Point_<_Tp2>& pt) const;
_Tp x; //!< x coordinate of the top-left corner
_Tp y; //!< y coordinate of the top-left corner
@ -1861,12 +1868,29 @@ Rect_<_Tp>::operator Rect_<_Tp2>() const
return Rect_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height));
}
template<typename _Tp> inline
bool Rect_<_Tp>::contains(const Point_<_Tp>& pt) const
template<typename _Tp> template<typename _Tp2> inline
bool Rect_<_Tp>::contains(const Point_<_Tp2>& pt) const
{
return x <= pt.x && pt.x < x + width && y <= pt.y && pt.y < y + height;
}
// See https://github.com/opencv/opencv/issues/26016
template<> template<> inline
bool Rect_<int>::contains(const Point_<double>& pt) const
{
// std::numeric_limits<int>::digits is 31.
// std::numeric_limits<double>::digits is 53.
// So conversion int->double does not lead to accuracy errors.
const Rect_<double> _rect(static_cast<double>(x), static_cast<double>(y), static_cast<double>(width), static_cast<double>(height));
return _rect.contains(pt);
}
template<> template<> inline
bool Rect_<int>::contains(const Point_<float>& _pt) const
{
// std::numeric_limits<float>::digits is 24.
// std::numeric_limits<double>::digits is 53.
// So conversion float->double does not lead to accuracy errors.
return contains(Point_<double>(static_cast<double>(_pt.x), static_cast<double>(_pt.y)));
}
template<typename _Tp> static inline
Rect_<_Tp>& operator += ( Rect_<_Tp>& a, const Point_<_Tp>& b )

@ -13,7 +13,7 @@
/* not supported */
# elif defined __ANDROID__ || defined __linux__ || defined _WIN32 || \
defined __FreeBSD__ || defined __bsdi__ || defined __HAIKU__ || \
defined __GNU__
defined __GNU__ || defined __QNX__
# define OPENCV_HAVE_FILESYSTEM_SUPPORT 1
# elif defined(__APPLE__)
# include <TargetConditionals.h>

@ -0,0 +1,15 @@
{
"whitelist":
{
"": [
"absdiff", "add", "addWeighted", "bitwise_and", "bitwise_not", "bitwise_or", "bitwise_xor", "cartToPolar",
"compare", "convertScaleAbs", "copyMakeBorder", "countNonZero", "determinant", "dft", "divide", "eigen",
"exp", "flip", "getOptimalDFTSize","gemm", "hconcat", "inRange", "invert", "kmeans", "log", "magnitude",
"max", "mean", "meanStdDev", "merge", "min", "minMaxLoc", "mixChannels", "multiply", "norm", "normalize",
"perspectiveTransform", "polarToCart", "pow", "randn", "randu", "reduce", "repeat", "rotate", "setIdentity", "setRNGSeed",
"solve", "solvePoly", "split", "sqrt", "subtract", "trace", "transform", "transpose", "vconcat",
"setLogLevel", "getLogLevel", "LUT"
],
"Algorithm": []
}
}

@ -689,6 +689,24 @@ OCL_PERF_TEST_P(PowFixture, Pow, ::testing::Combine(
SANITY_CHECK(dst, 1.5e-6, ERROR_RELATIVE);
}
///////////// iPow ////////////////////////
OCL_PERF_TEST_P(PowFixture, iPow, ::testing::Combine(
OCL_TEST_SIZES, OCL_PERF_ENUM(CV_8UC1, CV_8SC1,CV_16UC1,CV_16SC1,CV_32SC1)))
{
const Size_MatType_t params = GetParam();
const Size srcSize = get<0>(params);
const int type = get<1>(params);
checkDeviceMaxMemoryAllocSize(srcSize, type);
UMat src(srcSize, type), dst(srcSize, type);
randu(src, 0, 100);
declare.in(src).out(dst);
OCL_TEST_CYCLE() cv::pow(src, 7.0, dst);
SANITY_CHECK_NOTHING();
}
///////////// AddWeighted////////////////////////
typedef Size_MatType AddWeightedFixture;

@ -156,7 +156,12 @@ void check_failed_MatType(const int v, const CheckContext& ctx)
}
void check_failed_MatChannels(const int v, const CheckContext& ctx)
{
check_failed_auto_<int>(v, ctx);
std::stringstream ss;
ss << ctx.message << ":" << std::endl
<< " '" << ctx.p2_str << "'" << std::endl
<< "where" << std::endl
<< " '" << ctx.p1_str << "' is " << v;
cv::error(cv::Error::BadNumChannels, ss.str(), ctx.func, ctx.file, ctx.line);
}
void check_failed_true(const bool v, const CheckContext& ctx)
{

@ -791,7 +791,7 @@ struct iPow_SIMD
#if (CV_SIMD || CV_SIMD_SCALABLE)
template <>
struct iPow_SIMD<uchar, int>
struct iPow_SIMD<uchar, unsigned>
{
int operator() ( const uchar * src, uchar * dst, int len, int power )
{
@ -871,7 +871,7 @@ struct iPow_SIMD<schar, int>
};
template <>
struct iPow_SIMD<ushort, int>
struct iPow_SIMD<ushort, unsigned>
{
int operator() ( const ushort * src, ushort * dst, int len, int power)
{
@ -1203,16 +1203,6 @@ static bool ocl_pow(InputArray _src, double power, OutputArray _dst,
_dst.createSameSize(_src, type);
if (is_ipower)
{
if (ipower == 0)
{
_dst.setTo(Scalar::all(1));
return true;
}
if (ipower == 1)
{
_src.copyTo(_dst);
return true;
}
if( ipower < 0 )
{
if( depth == CV_32F || depth == CV_64F )
@ -1271,11 +1261,7 @@ void pow( InputArray _src, double power, OutputArray _dst )
bool useOpenCL = _dst.isUMat() && _src.dims() <= 2;
#endif
if( is_ipower
#ifdef HAVE_OPENCL
&& !(useOpenCL && ocl::Device::getDefault().isIntel() && depth != CV_64F)
#endif
)
if (is_ipower)
{
switch( ipower )
{
@ -1291,8 +1277,6 @@ void pow( InputArray _src, double power, OutputArray _dst )
return;
}
}
else
CV_Assert( depth == CV_32F || depth == CV_64F );
CV_OCL_RUN(useOpenCL, ocl_pow(_src, power, _dst, is_ipower, ipower))

@ -72,6 +72,10 @@
#endif
#endif
#if defined (__QNX__)
#include <sys/syspage.h>
#endif
#ifndef OPENCV_DISABLE_THREAD_SUPPORT
#include <thread>
#endif
@ -1011,7 +1015,9 @@ int getNumberOfCPUs_()
static unsigned cpu_count_sysconf = (unsigned)sysconf( _SC_NPROCESSORS_ONLN );
ncpus = minNonZero(ncpus, cpu_count_sysconf);
#elif defined (__QNX__)
static unsigned cpu_count_sysconf = _syspage_ptr->num_cpu;
ncpus = minNonZero(ncpus, cpu_count_sysconf);
#endif
return ncpus != 0 ? ncpus : 1;

@ -46,6 +46,15 @@
#include <iostream>
#include <ostream>
#ifdef __QNX__
#include <unistd.h>
#include <sys/neutrino.h>
#include <sys/syspage.h>
#ifdef __aarch64__
#include <aarch64/syspage.h>
#endif
#endif
#include <opencv2/core/utils/configuration.private.hpp>
#include <opencv2/core/utils/trace.private.hpp>

@ -34,7 +34,7 @@
#include <errno.h>
#include <io.h>
#include <stdio.h>
#elif defined __linux__ || defined __APPLE__ || defined __HAIKU__ || defined __FreeBSD__ || defined __GNU__ || defined __EMSCRIPTEN__
#elif defined __linux__ || defined __APPLE__ || defined __HAIKU__ || defined __FreeBSD__ || defined __GNU__ || defined __EMSCRIPTEN__ || defined __QNX__
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
@ -194,7 +194,7 @@ cv::String getcwd()
sz = GetCurrentDirectoryA((DWORD)buf.size(), buf.data());
return cv::String(buf.data(), (size_t)sz);
#endif
#elif defined __linux__ || defined __APPLE__ || defined __HAIKU__ || defined __FreeBSD__ || defined __EMSCRIPTEN__
#elif defined __linux__ || defined __APPLE__ || defined __HAIKU__ || defined __FreeBSD__ || defined __EMSCRIPTEN__ || defined __QNX__
for(;;)
{
char* p = ::getcwd(buf.data(), buf.size());
@ -228,7 +228,7 @@ bool createDirectory(const cv::String& path)
#else
int result = _mkdir(path.c_str());
#endif
#elif defined __linux__ || defined __APPLE__ || defined __HAIKU__ || defined __FreeBSD__ || defined __EMSCRIPTEN__
#elif defined __linux__ || defined __APPLE__ || defined __HAIKU__ || defined __FreeBSD__ || defined __EMSCRIPTEN__ || defined __QNX__
int result = mkdir(path.c_str(), 0777);
#else
int result = -1;
@ -343,7 +343,7 @@ private:
Impl& operator=(const Impl&); // disabled
};
#elif defined __linux__ || defined __APPLE__ || defined __HAIKU__ || defined __FreeBSD__ || defined __GNU__ || defined __EMSCRIPTEN__
#elif defined __linux__ || defined __APPLE__ || defined __HAIKU__ || defined __FreeBSD__ || defined __GNU__ || defined __EMSCRIPTEN__ || defined __QNX__
struct FileLock::Impl
{

@ -908,7 +908,22 @@ TYPED_TEST_P(Rect_Test, Overflows) {
EXPECT_EQ(R(), R(20, 0, 10, 10) & R(0, num_lowest, 10, 10));
EXPECT_EQ(R(), R(num_lowest, 0, 10, 10) & R(0, num_lowest, 10, 10));
}
REGISTER_TYPED_TEST_CASE_P(Rect_Test, Overflows);
// See https://github.com/opencv/opencv/issues/26016
// Rect_<int>.contains(Point_<float/double>) needs template specialization.
// This is test for a point on the edge and its nearest points.
template<typename T> T cv_nexttoward(T v, T v2);
template<> int cv_nexttoward<int>(int v, int v2) { CV_UNUSED(v); return v2; }
template<> float cv_nexttoward<float>(float v, float v2) { return std::nextafter(v,v2); }
template<> double cv_nexttoward<double>(double v, double v2) { return std::nexttoward(v,v2); }
TYPED_TEST_P(Rect_Test, OnTheEdge) {
Rect_<int> rect(0,0,500,500);
TypeParam h = static_cast<TypeParam>(rect.height);
ASSERT_TRUE ( rect.contains( Point_<TypeParam>(250, cv_nexttoward(h, h - 1))));
ASSERT_FALSE( rect.contains( Point_<TypeParam>(250, cv_nexttoward(h, h ))));
ASSERT_FALSE( rect.contains( Point_<TypeParam>(250, cv_nexttoward(h, h + 1))));
}
REGISTER_TYPED_TEST_CASE_P(Rect_Test, Overflows, OnTheEdge);
typedef ::testing::Types<int, float, double> RectTypes;
INSTANTIATE_TYPED_TEST_CASE_P(Negative_Test, Rect_Test, RectTypes);

@ -42,7 +42,7 @@
#include "test_precomp.hpp"
#include "opencv2/ts/ocl_test.hpp" // T-API like tests
#include "opencv2/core/core_c.h"
#include <fenv.h>
namespace opencv_test {
namespace {
@ -1087,7 +1087,6 @@ bool CV_OperationsTest::operations1()
Size sz(10, 20);
if (sz.area() != 200) throw test_excep();
if (sz.width != 10 || sz.height != 20) throw test_excep();
if (cvSize(sz).width != 10 || cvSize(sz).height != 20) throw test_excep();
Rect r1(0, 0, 10, 20);
Size sz1(5, 10);
@ -1519,7 +1518,7 @@ TEST(Core_sortIdx, regression_8941)
);
cv::Mat result;
cv::sortIdx(src.col(0), result, CV_SORT_EVERY_COLUMN | CV_SORT_ASCENDING);
cv::sortIdx(src.col(0), result, cv::SORT_EVERY_COLUMN | cv::SORT_ASCENDING);
#if 0
std::cout << src.col(0) << std::endl;
std::cout << result << std::endl;
@ -1598,9 +1597,12 @@ TEST_P(Core_Arith_Regression24163, test_for_ties_to_even)
const Mat src2(matSize, matType, Scalar(beta, beta, beta, beta));
const Mat result = ( src1 + src2 ) / 2;
// Expected that default is FE_TONEAREST(Ties to Even).
const int rounding = fegetround();
fesetround(FE_TONEAREST);
const int mean = (int)lrint( static_cast<double>(alpha + beta) / 2.0 );
const Mat expected(matSize, matType, Scalar(mean,mean,mean,mean));
fesetround(rounding);
const Mat expected(matSize, matType, Scalar::all(mean));
// Compare result and extected.
ASSERT_EQ(expected.size(), result.size());

@ -1203,6 +1203,12 @@ CV__DNN_INLINE_NS_BEGIN
static Ptr<SpaceToDepthLayer> create(const LayerParams &params);
};
class CV_EXPORTS TopKLayer : public Layer
{
public:
static Ptr<TopKLayer> create(const LayerParams& params);
};
//! @}
//! @}
CV__DNN_INLINE_NS_END

@ -0,0 +1,12 @@
{
"whitelist":
{
"dnn_Net": ["setInput", "forward", "setPreferableBackend","getUnconnectedOutLayersNames"],
"": ["readNetFromCaffe", "readNetFromTensorflow", "readNetFromTorch", "readNetFromDarknet",
"readNetFromONNX", "readNetFromTFLite", "readNet", "blobFromImage"]
},
"namespace_prefix_override":
{
"dnn": ""
}
}

@ -1041,4 +1041,67 @@ INSTANTIATE_TEST_CASE_P(/**/, Layer_Elementwise,
/* withWebnn= */ false,
/* withCann= */ false));
struct Layer_TopK : public TestBaseWithParam<tuple<Backend, Target>> {
void test_layer(const std::vector<int> &input_shape, const int K, const int axis) {
int backend_id = get<0>(GetParam());
int target_id = get<1>(GetParam());
Mat input_data(input_shape, CV_32F);
randn(input_data, -1.f, 1.f);
Net net;
LayerParams lp;
lp.type = "TopK";
lp.name = "testLayer";
lp.set("k", K);
lp.set("axis", axis);
net.addLayerToPrev(lp.name, lp.type, lp);
// Warmup
{
net.setInput(input_data);
net.setPreferableBackend(backend_id);
net.setPreferableTarget(target_id);
net.forward();
}
TEST_CYCLE() {
net.forward();
}
SANITY_CHECK_NOTHING();
}
std::vector<int> input_shape_2d{1000, 100};
std::vector<int> input_shape_3d{100, 100, 100};
};
PERF_TEST_P_(Layer_TopK, TopK_2D_Axis0) {
test_layer(input_shape_2d, input_shape_2d[0] / 2, 0);
}
PERF_TEST_P_(Layer_TopK, TopK_2D_Axis0_K5) {
test_layer(input_shape_2d, 5, 0);
}
PERF_TEST_P_(Layer_TopK, TopK_2D_Axis1) {
test_layer(input_shape_2d, input_shape_2d[1] / 2, 1);
}
PERF_TEST_P_(Layer_TopK, TopK_3D_Axis0) {
test_layer(input_shape_3d, input_shape_3d[0] / 2, 0);
}
PERF_TEST_P_(Layer_TopK, TopK_3D_Axis1) {
test_layer(input_shape_3d, input_shape_3d[1] / 2, 1);
}
PERF_TEST_P_(Layer_TopK, TopK_3D_Axis2) {
test_layer(input_shape_3d, input_shape_3d[2] / 2, 2);
}
INSTANTIATE_TEST_CASE_P(/**/, Layer_TopK,
dnnBackendsAndTargets(/* withInferenceEngine= */ false,
/* withHalide= */ false,
/* withCpuOCV= */ true,
/* withVkCom= */ false,
/* withCUDA= */ false,
/* withNgraph= */ false,
/* withWebnn= */ false,
/* withCann= */ false));
} // namespace

@ -425,8 +425,8 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { namespace cu
const auto batch_count = static_cast<int>(batchCount);
AutoBuffer<half> buffer(3 * batch_count);
auto A_slices = (half**)(buffer.data());
AutoBuffer<half*> buffer(3 * batch_count);
auto A_slices = buffer.data();
auto B_slices = A_slices + batch_count;
auto C_slices = B_slices + batch_count;
// collect A, B and C slices
@ -438,18 +438,18 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { namespace cu
const half **dev_A_slices = 0, **dev_B_slices = 0;
half **dev_C_slices = 0;
cudaMalloc((void**)&dev_A_slices, batch_count * sizeof(half*));
cudaMalloc((void**)&dev_B_slices, batch_count * sizeof(half*));
cudaMalloc((void**)&dev_C_slices, batch_count * sizeof(half*));
cudaMemcpy(dev_A_slices, A_slices, batch_count * sizeof(half*), cudaMemcpyHostToDevice);
cudaMemcpy(dev_B_slices, B_slices, batch_count * sizeof(half*), cudaMemcpyHostToDevice);
cudaMemcpy(dev_C_slices, C_slices, batch_count * sizeof(half*), cudaMemcpyHostToDevice);
CUDA4DNN_CHECK_CUDA(cudaMalloc((void**)&dev_A_slices, batch_count * sizeof(half*)));
CUDA4DNN_CHECK_CUDA(cudaMalloc((void**)&dev_B_slices, batch_count * sizeof(half*)));
CUDA4DNN_CHECK_CUDA(cudaMalloc((void**)&dev_C_slices, batch_count * sizeof(half*)));
CUDA4DNN_CHECK_CUDA(cudaMemcpy(dev_A_slices, A_slices, batch_count * sizeof(half*), cudaMemcpyHostToDevice));
CUDA4DNN_CHECK_CUDA(cudaMemcpy(dev_B_slices, B_slices, batch_count * sizeof(half*), cudaMemcpyHostToDevice));
CUDA4DNN_CHECK_CUDA(cudaMemcpy(dev_C_slices, C_slices, batch_count * sizeof(half*), cudaMemcpyHostToDevice));
CUDA4DNN_CHECK_CUBLAS(cublasHgemmBatched(handle.get(), opa, opb, iM, iN, iK, &alpha, dev_A_slices, ilda, dev_B_slices, ildb, &beta, dev_C_slices, ildc, batch_count));
cudaFree(dev_A_slices);
cudaFree(dev_B_slices);
cudaFree(dev_C_slices);
CUDA4DNN_CHECK_CUDA(cudaFree(dev_A_slices));
CUDA4DNN_CHECK_CUDA(cudaFree(dev_B_slices));
CUDA4DNN_CHECK_CUDA(cudaFree(dev_C_slices));
}
template <> inline
@ -475,8 +475,8 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { namespace cu
const auto batch_count = static_cast<int>(batchCount);
AutoBuffer<float> buffer(3 * batch_count);
auto A_slices = (float**)(buffer.data());
AutoBuffer<float*> buffer(3 * batch_count);
auto A_slices = buffer.data();
auto B_slices = A_slices + batch_count;
auto C_slices = B_slices + batch_count;
// collect A, B and C slices
@ -488,19 +488,19 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { namespace cu
const float **dev_A_slices = 0, **dev_B_slices = 0;
float **dev_C_slices = 0;
cudaMalloc((void**)&dev_A_slices, batch_count * sizeof(float*));
cudaMalloc((void**)&dev_B_slices, batch_count * sizeof(float*));
cudaMalloc((void**)&dev_C_slices, batch_count * sizeof(float*));
cudaMemcpy(dev_A_slices, A_slices, batch_count * sizeof(float*), cudaMemcpyHostToDevice);
cudaMemcpy(dev_B_slices, B_slices, batch_count * sizeof(float*), cudaMemcpyHostToDevice);
cudaMemcpy(dev_C_slices, C_slices, batch_count * sizeof(float*), cudaMemcpyHostToDevice);
CUDA4DNN_CHECK_CUDA(cudaMalloc((void**)&dev_A_slices, batch_count * sizeof(float*)));
CUDA4DNN_CHECK_CUDA(cudaMalloc((void**)&dev_B_slices, batch_count * sizeof(float*)));
CUDA4DNN_CHECK_CUDA(cudaMalloc((void**)&dev_C_slices, batch_count * sizeof(float*)));
CUDA4DNN_CHECK_CUDA(cudaMemcpy(dev_A_slices, A_slices, batch_count * sizeof(float*), cudaMemcpyHostToDevice));
CUDA4DNN_CHECK_CUDA(cudaMemcpy(dev_B_slices, B_slices, batch_count * sizeof(float*), cudaMemcpyHostToDevice));
CUDA4DNN_CHECK_CUDA(cudaMemcpy(dev_C_slices, C_slices, batch_count * sizeof(float*), cudaMemcpyHostToDevice));
// cuBLAS is column-major
CUDA4DNN_CHECK_CUBLAS(cublasSgemmBatched(handle.get(), opa, opb, iM, iN, iK, &alpha, dev_A_slices, ilda, dev_B_slices, ildb, &beta, dev_C_slices, ildc, batch_count));
cudaFree(dev_A_slices);
cudaFree(dev_B_slices);
cudaFree(dev_C_slices);
CUDA4DNN_CHECK_CUDA(cudaFree(dev_A_slices));
CUDA4DNN_CHECK_CUDA(cudaFree(dev_B_slices));
CUDA4DNN_CHECK_CUDA(cudaFree(dev_C_slices));
}
}}}}} /* namespace cv::dnn::cuda4dnn::csl::cublas */

@ -200,6 +200,7 @@ void initializeLayerFactory()
CV_DNN_REGISTER_LAYER_CLASS(Scatter, ScatterLayer);
CV_DNN_REGISTER_LAYER_CLASS(ScatterND, ScatterNDLayer);
CV_DNN_REGISTER_LAYER_CLASS(Tile, TileLayer);
CV_DNN_REGISTER_LAYER_CLASS(TopK, TopKLayer);
CV_DNN_REGISTER_LAYER_CLASS(Quantize, QuantizeLayer);
CV_DNN_REGISTER_LAYER_CLASS(Dequantize, DequantizeLayer);

@ -459,6 +459,7 @@ public:
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
CV_CheckEQ((size_t)inputs_arr.total(), (size_t)numInputs, "Number of inputs in forward and inputs during graph constructions do not match");
if (inputs_arr.depth() == CV_16F)
{
@ -541,7 +542,7 @@ public:
// Use either the preprocessed inputs (if it is available) or the corresponding raw inputs
result = pairwiseOperandProcess(!result.empty() ? result : rawInputs[0],
!result.empty() ? tmpResult : homogenizedInputDims[0],
(!preProcessedInputs.empty() && !preProcessedInputs[input].empty()) ? preProcessedInputs[input] : rawInputs[input],
(!preProcessedInputs[input].empty()) ? preProcessedInputs[input] : rawInputs[input],
homogenizedInputDims[input],
reducedDims,
isFinalPair);
@ -605,8 +606,8 @@ void LayerEinsumImpl::preProcessInputs(InputArrayOfArrays& inputs_arr)
std::vector<cv::Mat> inputs;
inputs_arr.getMatVector(inputs);
preProcessedInputs.reserve(inputs.size());
homogenizedInputDims.reserve(inputs.size());
preProcessedInputs.resize(inputs.size());
homogenizedInputDims.resize(inputs.size());
int inputIter = 0;
for(const Mat& input : inputs)
@ -616,7 +617,7 @@ void LayerEinsumImpl::preProcessInputs(InputArrayOfArrays& inputs_arr)
// variable to hold processed version of the original input
MatShape input_dims = shape(input);
if (input_dims.empty()){
homogenizedInputDims.emplace_back(MatShape(numLetterIndices, 1));
homogenizedInputDims[inputIter] = MatShape(numLetterIndices, 1);
++inputIter;
continue;
}
@ -672,9 +673,9 @@ void LayerEinsumImpl::preProcessInputs(InputArrayOfArrays& inputs_arr)
{
preprocessed = preprocessed.reshape(1, homogenizedInputDims_.size(), homogenizedInputDims_.data());
}
preProcessedInputs[inputIter] = preprocessed;
homogenizedInputDims[inputIter] = homogenizedInputDims_;
preProcessedInputs.emplace_back(preprocessed);
homogenizedInputDims.emplace_back(homogenizedInputDims_);
++inputIter;
}
}

@ -1520,10 +1520,10 @@ struct RoundFunctor : public BaseDefaultFunctor<RoundFunctor>
inline float calculate(float x) const
{
// Rounds to even numbers in halfway cases, so 2.5 -> 2, -2.5 -> -2
int old_rounding_direction = std::fegetround();
std::fesetround(FE_TONEAREST);
int old_rounding_direction = fegetround();
fesetround(FE_TONEAREST);
float y = std::nearbyint(x);
std::fesetround(old_rounding_direction);
fesetround(old_rounding_direction);
return y;
}

@ -0,0 +1,228 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "../precomp.hpp"
#include "layers_common.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cv { namespace dnn {
namespace {
template<typename T>
class ComparatorGreater {
public:
ComparatorGreater(const T* data, size_t step)
: data_(data), step_(step) {}
void addOffset(size_t offset) {
data_ += offset;
}
void minusOffset(size_t offset) {
data_ -= offset;
}
bool operator()(const size_t lhs_idx, const size_t rhs_idx) {
T lhs = *(data_ + lhs_idx * step_),
rhs = *(data_ + rhs_idx * step_);
return (lhs > rhs || (lhs == rhs && lhs_idx < rhs_idx));
}
private:
const T* data_;
size_t step_;
};
template<typename T>
class ComparatorLess {
public:
ComparatorLess(const T* data, size_t step)
: data_(data), step_(step) {}
void addOffset(size_t offset) {
data_ += offset;
}
void minusOffset(size_t offset) {
data_ -= offset;
}
bool operator()(const size_t lhs_idx, const size_t rhs_idx) {
T lhs = *(data_ + lhs_idx * step_),
rhs = *(data_ + rhs_idx * step_);
return (lhs < rhs || (lhs == rhs && lhs_idx < rhs_idx));
}
private:
const T* data_;
size_t step_;
};
}
class TopKLayerImpl CV_FINAL : public TopKLayer
{
public:
TopKLayerImpl(const LayerParams& params)
{
setParamsFrom(params);
axis = params.get<int>("axis", -1);
largest = params.get<int>("largest", 1) == 1;
sorted = params.get<int>("sorted", 1) == 1;
CV_CheckTrue(sorted, "TopK: sorted == false is not supported"); // TODO: support sorted
CV_CheckTrue(params.has("k"), "TopK: parameter k is required but missing");
K = params.get<int>("k");
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV;
}
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE
{
const auto &input_shape = inputs.front();
int input_dims = input_shape.size();
// Check if axis is valid
CV_CheckGE(axis, -input_dims, "TopK: axis is out of range");
CV_CheckLT(axis, input_dims, "TopK: axis is out of range");
// Normalize axis
int axis_normalized = normalize_axis(axis, input_shape.size());
// Check if K is in range (0, input_shape[axis])
CV_CheckGT(K, 0, "TopK: K needs to be a positive integer");
CV_CheckLT(K, input_shape[axis_normalized], "TopK: K is out of range");
// Assign output shape
auto output_shape = input_shape;
output_shape[axis_normalized] = K;
outputs.assign(1, output_shape);
outputs.assign(2, output_shape); // TODO: support indices of type CV_32S on 5.x
return false;
}
virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE {
std::vector<Mat> inputs;
inputs_arr.getMatVector(inputs);
// Normalize axis
auto input_shape = shape(inputs.front());
axis = normalize_axis(axis, input_shape.size());
}
template<class Comparator>
void FindTopK(const Mat &input, Mat &output_value, Mat &output_index) {
const auto input_shape = shape(input);
size_t loops = std::accumulate(input_shape.begin(), input_shape.begin() + axis, 1, std::multiplies<int>());
size_t step = std::accumulate(input_shape.begin() + axis + 1, input_shape.end(), 1, std::multiplies<int>());
int dim_axis = input_shape[axis];
if (loops == 1) {
auto worker = [&](const Range &r) {
const auto *input_ptr = input.ptr<const float>(); // TODO: support other input type
auto *output_value_ptr = output_value.ptr<float>();
auto *output_index_ptr = output_index.ptr<float>(); // TODO: use CV_32S on 5.x
Comparator cmp(input_ptr, step);
AutoBuffer<int> buffer_index(dim_axis);
auto *buffer_index_ptr = buffer_index.data();
for (int offset = r.start; offset < r.end; offset++) {
const auto *input_offset_ptr = input_ptr + offset;
cmp.addOffset(offset);
std::iota(buffer_index_ptr, buffer_index_ptr + dim_axis, 0);
std::stable_sort(buffer_index_ptr, buffer_index_ptr + dim_axis, cmp);
auto *output_value_offset_ptr = output_value_ptr + offset;
auto *output_index_offset_ptr = output_index_ptr + offset;
for (int i = 0; i < K; i++) {
int source_index = buffer_index_ptr[i];
output_value_offset_ptr[i * step] = *(input_offset_ptr + source_index * step);
output_index_offset_ptr[i * step] = source_index;
}
cmp.minusOffset(offset);
}
};
parallel_for_(Range(0, step), worker);
} else {
auto worker = [&](const Range &r) {
const auto *input_ptr = input.ptr<const float>();
auto *output_value_ptr = output_value.ptr<float>();
auto *output_index_ptr = output_index.ptr<float>();
Comparator cmp(input_ptr, step);
AutoBuffer<int> buffer_index(dim_axis);
auto *buffer_index_ptr = buffer_index.data();
for (int batch_index = r.start; batch_index < r.end; batch_index++) {
for (size_t offset = 0; offset < step; offset++) {
const auto *input_offset_ptr = input_ptr + batch_index * dim_axis * step + offset;
cmp.addOffset(batch_index * dim_axis * step + offset);
std::iota(buffer_index_ptr, buffer_index_ptr + dim_axis, 0);
std::stable_sort(buffer_index_ptr, buffer_index_ptr + dim_axis, cmp);
auto *output_value_offset_ptr = output_value_ptr + batch_index * K * step + offset;
auto *output_index_offset_ptr = output_index_ptr + batch_index * K * step + offset;
for (int i = 0; i < K; i++) {
int source_index = buffer_index_ptr[i];
output_value_offset_ptr[i * step] = *(input_offset_ptr + source_index * step);
output_index_offset_ptr[i * step] = source_index;
}
cmp.minusOffset(batch_index * dim_axis * step + offset);
}
}
};
parallel_for_(Range(0, loops), worker);
}
}
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
if (inputs_arr.depth() == CV_16F)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
const auto &input = inputs.front();
auto &output_value = outputs.front();
auto &output_index = outputs.back();
if (largest) {
FindTopK<ComparatorGreater<float>>(input, output_value, output_index);
} else {
FindTopK<ComparatorLess<float>>(input, output_value, output_index);
}
}
private:
int axis;
bool largest;
bool sorted;
int K; // FIXIT: make it layer input once dynamic shape is supported
};
Ptr<TopKLayer> TopKLayer::create(const LayerParams& params)
{
return makePtr<TopKLayerImpl>(params);
}
}} // namespace cv::dnn

@ -195,6 +195,7 @@ private:
void parseScatter (LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto);
void parseTile (LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto);
void parseLayerNorm (LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto);
void parseTopK (LayerParams& LayerParams, const opencv_onnx::NodeProto& node_proto);
void parseSimpleLayers (LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto);
void parseEinsum (LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto);
@ -3162,6 +3163,21 @@ void ONNXImporter::parseLayerNorm(LayerParams& layerParams, const opencv_onnx::N
}
}
void ONNXImporter::parseTopK(LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto)
{
// K needs to be constant in case of being input (since opset 10)
if (node_proto.input_size() == 2) {
bool K_const = constBlobs.find(node_proto.input(1)) != constBlobs.end();
CV_CheckTrue(K_const, "OnnxImporter/TopK: K being non-constant is not supported");
Mat input_K = getBlob(node_proto, 1);
int K = input_K.at<int>(0);
layerParams.set("k", K);
}
addLayer(layerParams, node_proto);
}
void ONNXImporter::parseSimpleLayers(LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto)
{
bool is_all_input_const = true;
@ -3972,6 +3988,7 @@ void ONNXImporter::buildDispatchMap_ONNX_AI(int opset_version)
dispatch["Tile"] = &ONNXImporter::parseTile;
dispatch["LayerNormalization"] = &ONNXImporter::parseLayerNorm;
dispatch["GroupNormalization"] = &ONNXImporter::parseInstanceNormalization;
dispatch["TopK"] = &ONNXImporter::parseTopK;
dispatch["Equal"] = dispatch["Greater"] = dispatch["Less"] = dispatch["Pow"] = dispatch["Add"] =
dispatch["Sub"] = dispatch["Mul"] = dispatch["Div"] = dispatch["GreaterOrEqual"] =

@ -2,20 +2,8 @@
"test_dequantizelinear",
"test_dequantizelinear_axis",
"test_dequantizelinear_blocked",
"test_dropout_default_ratio",
"test_globalmaxpool",
"test_globalmaxpool_precomputed",
"test_logsoftmax_large_number",
"test_logsoftmax_large_number_expanded",
"test_maxpool_1d_default",
"test_maxpool_2d_ceil",
"test_maxpool_2d_default",
"test_maxpool_2d_pads",
"test_maxpool_2d_precomputed_pads",
"test_maxpool_2d_precomputed_same_upper",
"test_maxpool_2d_precomputed_strides",
"test_maxpool_2d_same_upper",
"test_maxpool_2d_strides",
"test_maxpool_3d_default",
"test_pow",
"test_quantizelinear",
@ -23,12 +11,7 @@
"test_quantizelinear_blocked",
"test_softmax_large_number",
"test_softmax_large_number_expanded",
"test_split_equal_parts_1d",
"test_split_equal_parts_2d",
"test_split_equal_parts_default_axis",
"test_tan",
"test_reduce_l2_default_axes_keepdims_example", // Expected: (normL1) <= (l1), actual: 0.00490189 vs 0.004
"test_reduce_log_sum_exp_default_axes_keepdims_example", // Expected: (normL1) <= (l1), actual: 0.00671387 vs 0.004
"test_reduce_prod_default_axes_keepdims_example", // Expected: (normL1) <= (l1), actual: inf vs 0.004
"test_reduce_prod_default_axes_keepdims_random", // Expected: (normL1) <= (l1), actual: 18.6621 vs 0.004, Expected: (normInf) <= (lInf), actual: 18.6621 vs 0.02
"test_reduce_prod_do_not_keepdims_random", // Expected: (normL1) <= (l1), actual: 0.00436729 vs 0.004, Expected: (normInf) <= (lInf), actual: 0.0201836 vs 0.02
@ -38,16 +21,3 @@
"test_reduce_sum_square_do_not_keepdims_random", // Expected: (normL1) <= (l1), actual: 0.010789 vs 0.004, Expected: (normInf) <= (lInf), actual: 0.0290298 vs 0.02
"test_reduce_sum_square_keepdims_random", // Expected: (normL1) <= (l1), actual: 0.010789 vs 0.004, Expected: (normInf) <= (lInf), actual: 0.0290298 vs 0.02
"test_reduce_sum_square_negative_axes_keepdims_random", // Expected: (normL1) <= (l1), actual: 0.010789 vs 0.004, Expected: (normInf) <= (lInf), actual: 0.0290298 vs 0.02
"test_scatter_elements_with_axis",
"test_scatter_elements_with_duplicate_indices",
"test_scatter_elements_with_negative_indices",
"test_scatter_elements_with_reduction_max",
"test_scatter_elements_with_reduction_min",
"test_scatter_elements_without_axis",
"test_scatter_with_axis",
"test_scatter_without_axis",
"test_scatternd",
"test_scatternd_add",
"test_scatternd_max",
"test_scatternd_min",
"test_scatternd_multiply",

@ -6,16 +6,3 @@
"test_quantizelinear",
"test_quantizelinear_axis",
"test_quantizelinear_blocked",
"test_scatter_elements_with_axis",
"test_scatter_elements_with_duplicate_indices",
"test_scatter_elements_with_negative_indices",
"test_scatter_elements_with_reduction_max",
"test_scatter_elements_with_reduction_min",
"test_scatter_elements_without_axis",
"test_scatter_with_axis",
"test_scatter_without_axis",
"test_scatternd",
"test_scatternd_add",
"test_scatternd_max",
"test_scatternd_min",
"test_scatternd_multiply",

@ -1019,6 +1019,10 @@ TEST_P(Test_ONNX_layers, MatMul_init_bcast)
testONNXModels("matmul_init_bcast");
}
TEST_P(Test_ONNX_layers, MatMul_bcast_3dx2d) {
testONNXModels("matmul_bcast");
}
TEST_P(Test_ONNX_layers, MatMulAdd)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2022010000)
@ -3274,6 +3278,40 @@ TEST_P(Test_ONNX_layers, ClipDivSharedConstant) {
testONNXModels("clip_div_shared_constant");
}
// Bug: https://github.com/opencv/opencv/issues/26076
TEST_P(Test_ONNX_layers, DISABLED_TopK) {
auto test = [&](const std::string &basename, double l1 = 0, double lInf = 0) {
std::string onnxmodel = _tf("models/" + basename + ".onnx", true);
Mat input = readTensorFromONNX(_tf("data/input_" + basename + ".pb"));
Mat output_ref_val = readTensorFromONNX(_tf("data/output_" + basename + "_0.pb")),
output_ref_ind = readTensorFromONNX(_tf("data/output_" + basename + "_1.pb"));
checkBackend(&input, &output_ref_val);
checkBackend(&input, &output_ref_ind);
Net net = readNetFromONNX(onnxmodel);
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
net.setInput(input);
std::vector<Mat> outputs;
net.forward(outputs, std::vector<std::string>{"values", "indices"});
Mat output_res_val = outputs.front(),
output_res_ind = outputs.back();
output_ref_ind.convertTo(output_ref_ind, CV_32F); // TODO: revise this conversion in 5.x
normAssert(output_ref_val, output_res_val, (basename + " values").c_str(), l1 ? l1 : default_l1, lInf ? lInf : default_lInf);
normAssert(output_ref_ind, output_res_ind, (basename + " indices").c_str(), l1 ? l1 : default_l1, lInf ? lInf : default_lInf);
expectNoFallbacksFromIE(net);
};
test("top_k");
test("top_k_negative_axis");
test("top_k_smallest");
}
INSTANTIATE_TEST_CASE_P(/**/, Test_ONNX_nets, dnnBackendsAndTargets());
}} // namespace

@ -0,0 +1,19 @@
{
"whitelist":
{
"Feature2D": ["detect", "compute", "detectAndCompute", "descriptorSize", "descriptorType", "defaultNorm", "empty", "getDefaultName"],
"BRISK": ["create", "getDefaultName"],
"ORB": ["create", "setMaxFeatures", "setScaleFactor", "setNLevels", "setEdgeThreshold", "setFastThreshold", "setFirstLevel", "setWTA_K", "setScoreType", "setPatchSize", "getFastThreshold", "getDefaultName"],
"MSER": ["create", "detectRegions", "setDelta", "getDelta", "setMinArea", "getMinArea", "setMaxArea", "getMaxArea", "setPass2Only", "getPass2Only", "getDefaultName"],
"FastFeatureDetector": ["create", "setThreshold", "getThreshold", "setNonmaxSuppression", "getNonmaxSuppression", "setType", "getType", "getDefaultName"],
"AgastFeatureDetector": ["create", "setThreshold", "getThreshold", "setNonmaxSuppression", "getNonmaxSuppression", "setType", "getType", "getDefaultName"],
"GFTTDetector": ["create", "setMaxFeatures", "getMaxFeatures", "setQualityLevel", "getQualityLevel", "setMinDistance", "getMinDistance", "setBlockSize", "getBlockSize", "setHarrisDetector", "getHarrisDetector", "setK", "getK", "getDefaultName"],
"SimpleBlobDetector": ["create", "setParams", "getParams", "getDefaultName"],
"SimpleBlobDetector_Params": [],
"KAZE": ["create", "setExtended", "getExtended", "setUpright", "getUpright", "setThreshold", "getThreshold", "setNOctaves", "getNOctaves", "setNOctaveLayers", "getNOctaveLayers", "setDiffusivity", "getDiffusivity", "getDefaultName"],
"AKAZE": ["create", "setDescriptorType", "getDescriptorType", "setDescriptorSize", "getDescriptorSize", "setDescriptorChannels", "getDescriptorChannels", "setThreshold", "getThreshold", "setNOctaves", "getNOctaves", "setNOctaveLayers", "getNOctaveLayers", "setDiffusivity", "getDiffusivity", "getDefaultName"],
"DescriptorMatcher": ["add", "clear", "empty", "isMaskSupported", "train", "match", "knnMatch", "radiusMatch", "clone", "create"],
"BFMatcher": ["isMaskSupported", "create"],
"": ["drawKeypoints", "drawMatches", "drawMatchesKnn"]
}
}

@ -96,7 +96,9 @@ struct RoiPredicate
bool operator()( const KeyPoint& keyPt ) const
{
return !r.contains( keyPt.pt );
// workaround for https://github.com/opencv/opencv/issues/26016
// To keep its behaviour, keyPt.pt casts to Point_<int>.
return !r.contains( Point_<int>(keyPt.pt) );
}
Rect r;

@ -1232,8 +1232,8 @@ void cv::gimpl::ie::GIEExecutable::run(cv::gimpl::GIslandExecutable::IInput &in
// General algorithm:
// 1. Collect island inputs/outputs.
// 2. Create kernel context. (Every kernel has his own context).
// 3. If the EndOfStream message is recieved, wait until all passed task are done.
// 4. If the Exception message is revieved, propagate it further.
// 3. If the EndOfStream message is received, wait until all passed task are done.
// 4. If the Exception message is received, propagate it further.
// 5.
// 5.1 Run the kernel.
// 5.2 Kernel wait for all nececcary infer requests and start asynchronous execution.

@ -622,7 +622,7 @@ static void PostOutputs(::ov::InferRequest &infer_request,
ctx->eptr = std::move(eptr);
for (auto i : ade::util::iota(ctx->uu.params.num_out)) {
// NB: Copy data back only if execution finished sucessfuly
// NB: Copy data back only if execution finished successfully
// and inference only mode is disabled.
// Otherwise just post outputs to maintain streaming executor contract.
if (!ctx->eptr && !ctx->getOptions().inference_only) {

@ -316,7 +316,7 @@ public:
static std::string exception_msg()
{
return "InvalidSource sucessfuly failed!";
return "InvalidSource successfully failed!";
}
bool pull(cv::gapi::wip::Data& d) override {
@ -355,7 +355,7 @@ GAPI_OCV_KERNEL(GThrowExceptionKernel, GThrowExceptionOp)
{
static std::string exception_msg()
{
return "GThrowExceptionKernel sucessfuly failed";
return "GThrowExceptionKernel successfully failed";
}
static void run(const cv::Mat&, cv::Mat&)

@ -236,7 +236,7 @@ enum MorphShapes {
MORPH_CROSS = 1, //!< a cross-shaped structuring element:
//!< \f[E_{ij} = \begin{cases} 1 & \texttt{if } {i=\texttt{anchor.y } {or } {j=\texttt{anchor.x}}} \\0 & \texttt{otherwise} \end{cases}\f]
MORPH_ELLIPSE = 2 //!< an elliptic structuring element, that is, a filled ellipse inscribed
//!< into the rectangle Rect(0, 0, esize.width, 0.esize.height)
//!< into the rectangle Rect(0, 0, esize.width, esize.height)
};
//! @} imgproc_filter

@ -0,0 +1,95 @@
{
"whitelist":
{
"": [
"Canny",
"GaussianBlur",
"Laplacian",
"HoughLines",
"HoughLinesP",
"HoughCircles",
"Scharr",
"Sobel",
"adaptiveThreshold",
"approxPolyDP",
"arcLength",
"bilateralFilter",
"blur",
"boundingRect",
"boxFilter",
"calcBackProject",
"calcHist",
"circle",
"compareHist",
"connectedComponents",
"connectedComponentsWithStats",
"contourArea",
"convexHull",
"convexityDefects",
"cornerHarris",
"cornerMinEigenVal",
"createCLAHE",
"createLineSegmentDetector",
"cvtColor",
"demosaicing",
"dilate",
"distanceTransform",
"distanceTransformWithLabels",
"drawContours",
"ellipse",
"ellipse2Poly",
"equalizeHist",
"erode",
"filter2D",
"findContours",
"fitEllipse",
"fitLine",
"floodFill",
"getAffineTransform",
"getPerspectiveTransform",
"getRotationMatrix2D",
"getStructuringElement",
"goodFeaturesToTrack",
"grabCut",
"integral",
"integral2",
"isContourConvex",
"line",
"matchShapes",
"matchTemplate",
"medianBlur",
"minAreaRect",
"minEnclosingCircle",
"moments",
"morphologyEx",
"pointPolygonTest",
"putText",
"pyrDown",
"pyrUp",
"rectangle",
"remap",
"resize",
"sepFilter2D",
"threshold",
"warpAffine",
"warpPerspective",
"warpPolar",
"watershed",
"fillPoly",
"fillConvexPoly",
"polylines"
],
"CLAHE": ["apply", "collectGarbage", "getClipLimit", "getTilesGridSize", "setClipLimit", "setTilesGridSize"],
"segmentation_IntelligentScissorsMB": [
"IntelligentScissorsMB",
"setWeights",
"setGradientMagnitudeMaxLimit",
"setEdgeFeatureZeroCrossingParameters",
"setEdgeFeatureCannyParameters",
"applyImage",
"applyImageFeatures",
"buildMap",
"getContour"
]
}
}

@ -1,19 +1,25 @@
#!/usr/bin/env python
'''
Test for disctrete fourier transform (dft)
'''
# Python 2/3 compatibility
from __future__ import print_function
import cv2 as cv
import numpy as np
import sys
import cv2 as cv
from tests_common import NewOpenCVTests
class imgproc_test(NewOpenCVTests):
class Imgproc_Tests(NewOpenCVTests):
def test_python_986(self):
cntls = []
img = np.zeros((100,100,3), dtype=np.uint8)
color = (0,0,0)
cnts = np.array(cntls, dtype=np.int32).reshape((1, -1, 2))
try:
cv.fillPoly(img, cnts, color)
assert False
except:
assert True
def test_filter2d(self):
img = self.get_sample('samples/data/lena.jpg', 1)
eps = 0.001
@ -22,6 +28,3 @@ class imgproc_test(NewOpenCVTests):
img_blur0 = cv.filter2D(img, cv.CV_32F, kernel*(1./9))
img_blur1 = cv.filter2Dp(img, kernel, ddepth=cv.CV_32F, scale=1./9)
self.assertLess(cv.norm(img_blur0 - img_blur1, cv.NORM_INF), eps)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

@ -13,7 +13,7 @@ CV_ENUM(InterTypeExtended, INTER_NEAREST, INTER_LINEAR, WARP_RELATIVE_MAP)
CV_ENUM(RemapMode, HALF_SIZE, UPSIDE_DOWN, REFLECTION_X, REFLECTION_BOTH)
typedef TestBaseWithParam< tuple<Size, InterType, BorderMode> > TestWarpAffine;
typedef TestBaseWithParam< tuple<Size, InterType, BorderMode> > TestWarpPerspective;
typedef TestBaseWithParam< tuple<Size, InterType, BorderMode, int> > TestWarpPerspective;
typedef TestBaseWithParam< tuple<Size, InterType, BorderMode, MatType> > TestWarpPerspectiveNear_t;
typedef TestBaseWithParam< tuple<MatType, Size, InterTypeExtended, BorderMode, RemapMode> > TestRemap;
@ -42,11 +42,7 @@ PERF_TEST_P( TestWarpAffine, WarpAffine,
TEST_CYCLE() warpAffine( src, dst, warpMat, sz, interType, borderMode, borderColor );
#ifdef __ANDROID__
SANITY_CHECK(dst, interType==INTER_LINEAR? 5 : 10);
#else
SANITY_CHECK(dst, 1);
#endif
}
PERF_TEST_P(TestWarpAffine, DISABLED_WarpAffine_ovx,
@ -72,29 +68,28 @@ PERF_TEST_P(TestWarpAffine, DISABLED_WarpAffine_ovx,
TEST_CYCLE() warpAffine(src, dst, warpMat, sz, interType, borderMode, borderColor);
#ifdef __ANDROID__
SANITY_CHECK(dst, interType == INTER_LINEAR ? 5 : 10);
#else
SANITY_CHECK(dst, 1);
#endif
}
PERF_TEST_P( TestWarpPerspective, WarpPerspective,
Combine(
Values( szVGA, sz720p, sz1080p ),
InterType::all(),
BorderMode::all()
BorderMode::all(),
Values(1, 3, 4)
)
)
{
Size sz, szSrc(512, 512);
int borderMode, interType;
int borderMode, interType, channels;
sz = get<0>(GetParam());
interType = get<1>(GetParam());
borderMode = get<2>(GetParam());
channels = get<3>(GetParam());
Scalar borderColor = Scalar::all(150);
Mat src(szSrc,CV_8UC4), dst(sz, CV_8UC4);
Mat src(szSrc, CV_8UC(channels)), dst(sz, CV_8UC(channels));
cvtest::fillGradient(src);
if(borderMode == BORDER_CONSTANT) cvtest::smoothBorder(src, borderColor, 1);
Mat rotMat = getRotationMatrix2D(Point2f(src.cols/2.f, src.rows/2.f), 30., 2.2);
@ -110,29 +105,27 @@ PERF_TEST_P( TestWarpPerspective, WarpPerspective,
TEST_CYCLE() warpPerspective( src, dst, warpMat, sz, interType, borderMode, borderColor );
#ifdef __ANDROID__
SANITY_CHECK(dst, interType==INTER_LINEAR? 5 : 10);
#else
SANITY_CHECK(dst, 1);
#endif
}
PERF_TEST_P(TestWarpPerspective, DISABLED_WarpPerspective_ovx,
Combine(
Values(szVGA, sz720p, sz1080p),
InterType::all(),
BorderMode::all()
BorderMode::all(),
Values(1)
)
)
{
Size sz, szSrc(512, 512);
int borderMode, interType;
int borderMode, interType, channels;
sz = get<0>(GetParam());
interType = get<1>(GetParam());
borderMode = get<2>(GetParam());
channels = get<3>(GetParam());
Scalar borderColor = Scalar::all(150);
Mat src(szSrc, CV_8UC1), dst(sz, CV_8UC1);
Mat src(szSrc, CV_8UC(channels)), dst(sz, CV_8UC(channels));
cvtest::fillGradient(src);
if (borderMode == BORDER_CONSTANT) cvtest::smoothBorder(src, borderColor, 1);
Mat rotMat = getRotationMatrix2D(Point2f(src.cols / 2.f, src.rows / 2.f), 30., 2.2);
@ -148,11 +141,7 @@ PERF_TEST_P(TestWarpPerspective, DISABLED_WarpPerspective_ovx,
TEST_CYCLE() warpPerspective(src, dst, warpMat, sz, interType, borderMode, borderColor);
#ifdef __ANDROID__
SANITY_CHECK(dst, interType == INTER_LINEAR ? 5 : 10);
#else
SANITY_CHECK(dst, 1);
#endif
}
PERF_TEST_P( TestWarpPerspectiveNear_t, WarpPerspectiveNear,
@ -194,11 +183,7 @@ PERF_TEST_P( TestWarpPerspectiveNear_t, WarpPerspectiveNear,
warpPerspective( src, dst, warpMat, size, interType, borderMode, borderColor );
}
#ifdef __ANDROID__
SANITY_CHECK(dst, interType==INTER_LINEAR? 5 : 10);
#else
SANITY_CHECK(dst, 1);
#endif
}
PERF_TEST_P( TestRemap, remap,

@ -202,8 +202,8 @@ struct CvtHelper
int stype = _src.type();
scn = CV_MAT_CN(stype), depth = CV_MAT_DEPTH(stype);
CV_Check(scn, VScn::contains(scn), "Invalid number of channels in input image");
CV_Check(dcn, VDcn::contains(dcn), "Invalid number of channels in output image");
CV_CheckChannels(scn, VScn::contains(scn), "Invalid number of channels in input image");
CV_CheckChannels(dcn, VDcn::contains(dcn), "Invalid number of channels in output image");
CV_CheckDepth(depth, VDepth::contains(depth), "Unsupported depth of input image");
if (_src.getObj() == _dst.getObj()) // inplace processing (#6653)
@ -247,8 +247,8 @@ struct OclHelper
int scn = src.channels();
int depth = src.depth();
CV_Check(scn, VScn::contains(scn), "Invalid number of channels in input image");
CV_Check(dcn, VDcn::contains(dcn), "Invalid number of channels in output image");
CV_CheckChannels(scn, VScn::contains(scn), "Invalid number of channels in input image");
CV_CheckChannels(dcn, VDcn::contains(dcn), "Invalid number of channels in output image");
CV_CheckDepth(depth, VDepth::contains(depth), "Unsupported depth of input image");
switch (sizePolicy)

@ -89,8 +89,8 @@ struct CvtHelper
int stype = _src.type();
scn = CV_MAT_CN(stype), depth = CV_MAT_DEPTH(stype);
CV_Check(scn, VScn::contains(scn), "Invalid number of channels in input image");
CV_Check(dcn, VDcn::contains(dcn), "Invalid number of channels in output image");
CV_CheckChannels(scn, VScn::contains(scn), "Invalid number of channels in input image");
CV_CheckChannels(dcn, VDcn::contains(dcn), "Invalid number of channels in output image");
CV_CheckDepth(depth, VDepth::contains(depth), "Unsupported depth of input image");
if (_src.getObj() == _dst.getObj()) // inplace processing (#6653)

@ -177,41 +177,102 @@ public:
vst1_u8(dst + 8, p.val[1]);
}
#else
v_uint16x8 _b2y = v_setall_u16((ushort)(rcoeff*2));
v_uint16x8 _g2y = v_setall_u16((ushort)(gcoeff*2));
v_uint16x8 _r2y = v_setall_u16((ushort)(bcoeff*2));
v_uint16x8 v255 = v_setall_u16(255);
v_int16x8 v_descale = v_setall_s16(static_cast<short>(1 << 14));
v_int16x8 dummy;
v_int16x8 cxrb;
v_int16x8 cxg2;
v_zip(v_setall_s16(static_cast<short>(rcoeff)),
v_setall_s16(static_cast<short>(bcoeff)),
cxrb,
dummy);
v_zip(v_setall_s16(static_cast<short>(gcoeff)),
v_setall_s16(static_cast<short>(2)),
cxg2,
dummy);
const uchar* bayer_end = bayer + width;
for( ; bayer <= bayer_end - 18; bayer += 14, dst += 14 )
for (; bayer < bayer_end - 14; bayer += 14, dst += 14)
{
v_uint16x8 r0 = v_reinterpret_as_u16(v_load(bayer));
v_uint16x8 r1 = v_reinterpret_as_u16(v_load(bayer+bayer_step));
v_uint16x8 r2 = v_reinterpret_as_u16(v_load(bayer+bayer_step*2));
v_uint16x8 b1 = v_add(v_shr<7>(v_shl<8>(r0)), v_shr<7>(v_shl<8>(r2)));
v_uint16x8 b0 = v_add(v_rotate_right<1>(b1), b1);
b1 = v_shl<1>(v_rotate_right<1>(b1));
v_uint16x8 g0 = v_add(v_shr<7>(r0), v_shr<7>(r2));
v_uint16x8 g1 = v_shr<7>(v_shl<8>(r1));
g0 = v_add(g0, v_add(v_rotate_right<1>(g1), g1));
g1 = v_shl<2>(v_rotate_right<1>(g1));
r0 = v_shr<8>(r1);
r1 = v_shl<2>(v_add(v_rotate_right<1>(r0), r0));
r0 = v_shl<3>(r0);
g0 = v_shr<2>(v_add(v_add(v_mul_hi(b0, _b2y), v_mul_hi(g0, _g2y)), v_mul_hi(r0, _r2y)));
g1 = v_shr<2>(v_add(v_add(v_mul_hi(b1, _b2y), v_mul_hi(g1, _g2y)), v_mul_hi(r1, _r2y)));
v_uint8x16 pack_lo, pack_hi;
v_zip(v_pack_u(v_reinterpret_as_s16(g0), v_reinterpret_as_s16(g0)),
v_pack_u(v_reinterpret_as_s16(g1), v_reinterpret_as_s16(g1)),
pack_lo, pack_hi);
v_store(dst, pack_lo);
v_uint16x8 first_line = v_reinterpret_as_u16(v_load(bayer));
v_uint16x8 second_line = v_reinterpret_as_u16(v_load(bayer + bayer_step));
v_uint16x8 third_line = v_reinterpret_as_u16(v_load(bayer + bayer_step * 2));
// bayer[0]
v_uint16x8 first_line0 = v_and(first_line, v255);
// bayer[bayer_step*2]
v_uint16x8 third_line0 = v_and(third_line, v255);
// bayer[0] + bayer[bayer_step*2]
v_uint16x8 first_third_line0 = v_add(first_line0, third_line0);
// bayer[2] + bayer[bayer_step*2+2]
v_uint16x8 first_third_line2 = v_rotate_right<1>(first_third_line0);
// bayer[0] + bayer[2] + bayer[bayer_step*2] + bayer[bayer_step*2+2]
v_int16x8 r0 = v_reinterpret_as_s16(v_add(first_third_line0, first_third_line2));
// (bayer[2] + bayer[bayer_step*2+2]) * 2
v_int16x8 r1 = v_reinterpret_as_s16(v_shl<1>(first_third_line2));
// bayer[bayer_step+1]
v_uint16x8 second_line1 = v_shr<8>(second_line);
// bayer[bayer_step+1] * 4
v_int16x8 b0 = v_reinterpret_as_s16(v_shl<2>(second_line1));
// bayer[bayer_step+3]
v_uint16x8 second_line3 = v_rotate_right<1>(second_line1);
// bayer[bayer_step+1] + bayer[bayer_step+3]
v_uint16x8 second_line13 = v_add(second_line1, second_line3);
// (bayer[bayer_step+1] + bayer[bayer_step+3]) * 2
v_int16x8 b1 = v_reinterpret_as_s16(v_shl(second_line13, 1));
// bayer[1]
v_uint16x8 first_line1 = v_shr<8>(first_line);
// bayer[bayer_step]
v_uint16x8 second_line0 = v_and(second_line, v255);
// bayer[bayer_step+2]
v_uint16x8 second_line2 = v_rotate_right<1>(second_line0);
// bayer[bayer_step] + bayer[bayer_step+2]
v_uint16x8 second_line02 = v_add(second_line0, second_line2);
// bayer[bayer_step*2+1]
v_uint16x8 third_line1 = v_shr<8>(third_line);
// bayer[1] + bayer[bayer_step*2+1]
v_uint16x8 first_third_line1 = v_add(first_line1, third_line1);
// bayer[1] + bayer[bayer_step] + bayer[bayer_step+2] + bayer[bayer_step*2+1]
v_int16x8 g0 = v_reinterpret_as_s16(v_add(first_third_line1, second_line02));
// bayer[bayer_step+2] * 4
v_int16x8 g1 = v_reinterpret_as_s16(v_shl<2>(second_line2));
v_int16x8 rb0;
v_int16x8 rb1;
v_int16x8 rb2;
v_int16x8 rb3;
v_zip(r0, b0, rb0, rb1);
v_zip(r1, b1, rb2, rb3);
v_int16x8 gd0;
v_int16x8 gd1;
v_int16x8 gd2;
v_int16x8 gd3;
v_zip(g0, v_descale, gd0, gd1);
v_zip(g1, v_descale, gd2, gd3);
v_int32x4 gray_even0 = v_shr<16>(v_add(v_dotprod(rb0, cxrb), v_dotprod(gd0, cxg2)));
v_int32x4 gray_even1 = v_shr<16>(v_add(v_dotprod(rb1, cxrb), v_dotprod(gd1, cxg2)));
v_int32x4 gray_odd0 = v_shr<16>(v_add(v_dotprod(rb2, cxrb), v_dotprod(gd2, cxg2)));
v_int32x4 gray_odd1 = v_shr<16>(v_add(v_dotprod(rb3, cxrb), v_dotprod(gd3, cxg2)));
v_int16x8 gray_even = v_pack(gray_even0, gray_even1);
v_int16x8 gray_odd = v_pack(gray_odd0, gray_odd1);
v_int16x8 gray_d0;
v_int16x8 gray_d1;
v_zip(gray_even, gray_odd, gray_d0, gray_d1);
v_uint8x16 gray = v_pack(v_reinterpret_as_u16(gray_d0), v_reinterpret_as_u16(gray_d1));
v_store(dst, gray);
}
#endif
return (int)(bayer - (bayer_end - width));
return static_cast<int>(bayer - (bayer_end - width));
}
int bayer2RGB(const uchar* bayer, int bayer_step, uchar* dst, int width, int blue) const

@ -2044,8 +2044,11 @@ void fillPoly( InputOutputArray _img, const Point** pts, const int* npts, int nc
edges.reserve( total + 1 );
for (i = 0; i < ncontours; i++)
{
std::vector<Point2l> _pts(pts[i], pts[i] + npts[i]);
CollectPolyEdges(img, _pts.data(), npts[i], edges, buf, line_type, shift, offset);
if (npts[i] > 0 && pts[i])
{
std::vector<Point2l> _pts(pts[i], pts[i] + npts[i]);
CollectPolyEdges(img, _pts.data(), npts[i], edges, buf, line_type, shift, offset);
}
}
FillEdgeCollection(img, edges, buf, line_type);
@ -2105,7 +2108,7 @@ void cv::fillPoly(InputOutputArray img, InputArrayOfArrays pts,
for( i = 0; i < ncontours; i++ )
{
Mat p = pts.getMat(manyContours ? i : -1);
CV_Assert(p.checkVector(2, CV_32S) >= 0);
CV_Assert(p.checkVector(2, CV_32S) > 0);
ptsptr[i] = p.ptr<Point>();
npts[i] = p.rows*p.cols*p.channels()/2;
}

@ -1247,6 +1247,33 @@ inline int hal_ni_pyrdown(const uchar* src_data, size_t src_step, int src_width,
#define cv_hal_pyrdown hal_ni_pyrdown
//! @endcond
/**
@brief Perform Gaussian Blur and downsampling for input tile with optional margins for submatrix
@param src_data Source image data
@param src_step Source image step
@param src_width Source image width
@param src_height Source image height
@param dst_data Destination image data
@param dst_step Destination image step
@param dst_width Destination image width
@param dst_height Destination image height
@param depth Depths of source and destination image
@param cn Number of channels
@param margin_left Left margins for source image
@param margin_top Top margins for source image
@param margin_right Right margins for source image
@param margin_bottom Bottom margins for source image
@param border_type Border type
*/
inline int hal_ni_pyrdown_offset(const uchar* src_data, size_t src_step, int src_width, int src_height,
uchar* dst_data, size_t dst_step, int dst_width, int dst_height,
int depth, int cn, int margin_left, int margin_top, int margin_right, int margin_bottom, int border_type)
{ return CV_HAL_ERROR_NOT_IMPLEMENTED; }
//! @cond IGNORED
#define cv_hal_pyrdown_offset hal_ni_pyrdown_offset
//! @endcond
/**
@brief Canny edge detector
@param src_data Source image data

@ -845,7 +845,7 @@ void medianBlur(const Mat& src0, /*const*/ Mat& dst, int ksize)
CV_INSTRUMENT_REGION();
bool useSortNet = ksize == 3 || (ksize == 5
#if !(CV_SIMD)
#if !((CV_SIMD || CV_SIMD_SCALABLE))
&& ( src0.depth() > CV_8U || src0.channels() == 2 || src0.channels() > 4 )
#endif
);
@ -881,7 +881,7 @@ void medianBlur(const Mat& src0, /*const*/ Mat& dst, int ksize)
double img_size_mp = (double)(src0.total())/(1 << 20);
if( ksize <= 3 + (img_size_mp < 1 ? 12 : img_size_mp < 4 ? 6 : 2)*
(CV_SIMD ? 1 : 3))
((CV_SIMD || CV_SIMD_SCALABLE) ? 1 : 3))
medianBlur_8u_Om( src, dst, ksize );
else
medianBlur_8u_O1( src, dst, ksize );

@ -1265,69 +1265,6 @@ static bool ocl_pyrUp( InputArray _src, OutputArray _dst, const Size& _dsz, int
}
#if defined(HAVE_IPP)
namespace cv
{
static bool ipp_pyrdown( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType )
{
CV_INSTRUMENT_REGION_IPP();
#if IPP_VERSION_X100 >= 810 && !IPP_DISABLE_PYRAMIDS_DOWN
Size dsz = _dsz.empty() ? Size((_src.cols() + 1)/2, (_src.rows() + 1)/2) : _dsz;
bool isolated = (borderType & BORDER_ISOLATED) != 0;
int borderTypeNI = borderType & ~BORDER_ISOLATED;
Mat src = _src.getMat();
_dst.create( dsz, src.type() );
Mat dst = _dst.getMat();
int depth = src.depth();
{
bool isolated = (borderType & BORDER_ISOLATED) != 0;
int borderTypeNI = borderType & ~BORDER_ISOLATED;
if (borderTypeNI == BORDER_DEFAULT && (!src.isSubmatrix() || isolated) && dsz == Size(src.cols*2, src.rows*2))
{
typedef IppStatus (CV_STDCALL * ippiPyrUp)(const void* pSrc, int srcStep, void* pDst, int dstStep, IppiSize srcRoi, Ipp8u* buffer);
int type = src.type();
CV_SUPPRESS_DEPRECATED_START
ippiPyrUp pyrUpFunc = type == CV_8UC1 ? (ippiPyrUp) ippiPyrUp_Gauss5x5_8u_C1R :
type == CV_8UC3 ? (ippiPyrUp) ippiPyrUp_Gauss5x5_8u_C3R :
type == CV_32FC1 ? (ippiPyrUp) ippiPyrUp_Gauss5x5_32f_C1R :
type == CV_32FC3 ? (ippiPyrUp) ippiPyrUp_Gauss5x5_32f_C3R : 0;
CV_SUPPRESS_DEPRECATED_END
if (pyrUpFunc)
{
int bufferSize;
IppiSize srcRoi = { src.cols, src.rows };
IppDataType dataType = depth == CV_8U ? ipp8u : ipp32f;
CV_SUPPRESS_DEPRECATED_START
IppStatus ok = ippiPyrUpGetBufSize_Gauss5x5(srcRoi.width, dataType, src.channels(), &bufferSize);
CV_SUPPRESS_DEPRECATED_END
if (ok >= 0)
{
Ipp8u* buffer = ippsMalloc_8u_L(bufferSize);
ok = pyrUpFunc(src.data, (int) src.step, dst.data, (int) dst.step, srcRoi, buffer);
ippsFree(buffer);
if (ok >= 0)
{
CV_IMPL_ADD(CV_IMPL_IPP);
return true;
}
}
}
}
}
#else
CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(_dsz); CV_UNUSED(borderType);
#endif
return false;
}
}
#endif
void cv::pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType )
{
CV_INSTRUMENT_REGION();
@ -1343,15 +1280,19 @@ void cv::pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borde
Mat dst = _dst.getMat();
int depth = src.depth();
CALL_HAL(pyrDown, cv_hal_pyrdown, src.data, src.step, src.cols, src.rows, dst.data, dst.step, dst.cols, dst.rows, depth, src.channels(), borderType);
#ifdef HAVE_IPP
bool isolated = (borderType & BORDER_ISOLATED) != 0;
int borderTypeNI = borderType & ~BORDER_ISOLATED;
#endif
CV_IPP_RUN(borderTypeNI == BORDER_DEFAULT && (!_src.isSubmatrix() || isolated) && dsz == Size((_src.cols() + 1)/2, (_src.rows() + 1)/2),
ipp_pyrdown( _src, _dst, _dsz, borderType));
if(src.isSubmatrix() && !(borderType & BORDER_ISOLATED))
{
Point ofs;
Size wsz(src.cols, src.rows);
src.locateROI( wsz, ofs );
CALL_HAL(pyrDown, cv_hal_pyrdown_offset, src.data, src.step, src.cols, src.rows,
dst.data, dst.step, dst.cols, dst.rows, depth, src.channels(),
ofs.x, ofs.y, wsz.width - src.cols - ofs.x, wsz.height - src.rows - ofs.y, borderType & (~BORDER_ISOLATED));
}
else
{
CALL_HAL(pyrDown, cv_hal_pyrdown, src.data, src.step, src.cols, src.rows, dst.data, dst.step, dst.cols, dst.rows, depth, src.channels(), borderType);
}
PyrFunc func = 0;
if( depth == CV_8U )

@ -1863,6 +1863,26 @@ TEST(Imgproc_ColorBayer, regression)
EXPECT_EQ(0, countNonZero(diff.reshape(1) > 1));
}
TEST(Imgproc_ColorBayer2Gray, regression_25823)
{
const int n = 100;
Mat src(n, n, CV_8UC1);
Mat dst;
for (int i = 0; i < src.rows; ++i)
{
for (int j = 0; j < src.cols; ++j)
{
src.at<uchar>(i, j) = (i + j) % 2;
}
}
cvtColor(src, dst, COLOR_BayerBG2GRAY);
Mat gold(n, n, CV_8UC1, Scalar(1));
EXPECT_EQ(0, cv::norm(dst, gold, NORM_INF));
}
TEST(Imgproc_ColorBayerVNG, regression)
{
cvtest::TS* ts = cvtest::TS::ptr();
@ -3203,4 +3223,20 @@ TEST(ImgProc_RGB2Lab, NaN_21111)
#endif
}
// See https://github.com/opencv/opencv/issues/25971
// If num of channels is not suitable for selected cv::ColorConversionCodes,
// e.code must be cv::Error::BadNumChannels.
TEST(ImgProc_cvtColor_InvalidNumOfChannels, regression_25971)
{
try {
cv::Mat src = cv::Mat::zeros(100, 100, CV_8UC1);
cv::Mat dst;
EXPECT_THROW(cv::cvtColor(src, dst, COLOR_RGB2GRAY), cv::Exception);
}catch(const cv::Exception& e) {
EXPECT_EQ(e.code, cv::Error::BadNumChannels);
}catch(...) {
FAIL() << "Unexpected exception is happened.";
}
}
}} // namespace

@ -65,4 +65,31 @@ TEST(Imgproc_CornerSubPix, out_of_image_corners)
ASSERT_TRUE(Rect(0, 0, image.cols, image.rows).contains(corners.front()));
}
// See https://github.com/opencv/opencv/issues/26016
TEST(Imgproc_CornerSubPix, corners_on_the_edge)
{
cv::Mat image(500, 500, CV_8UC1);
cv::Size win(1, 1);
cv::Size zeroZone(-1, -1);
cv::TermCriteria criteria;
std::vector<cv::Point2f> cornersOK1 = { cv::Point2f(250, std::nextafter(499.5f, 499.5f - 1.0f)) };
EXPECT_NO_THROW( cv::cornerSubPix(image, cornersOK1, win, zeroZone, criteria) ) << cornersOK1;
std::vector<cv::Point2f> cornersOK2 = { cv::Point2f(250, 499.5f) };
EXPECT_NO_THROW( cv::cornerSubPix(image, cornersOK2, win, zeroZone, criteria) ) << cornersOK2;
std::vector<cv::Point2f> cornersOK3 = { cv::Point2f(250, std::nextafter(499.5f, 499.5f + 1.0f)) };
EXPECT_NO_THROW( cv::cornerSubPix(image, cornersOK3, win, zeroZone, criteria) ) << cornersOK3;
std::vector<cv::Point2f> cornersOK4 = { cv::Point2f(250, std::nextafter(500.0f, 500.0f - 1.0f)) };
EXPECT_NO_THROW( cv::cornerSubPix(image, cornersOK4, win, zeroZone, criteria) ) << cornersOK4;
std::vector<cv::Point2f> cornersNG1 = { cv::Point2f(250, 500.0f) };
EXPECT_ANY_THROW( cv::cornerSubPix(image, cornersNG1, win, zeroZone, criteria) ) << cornersNG1;
std::vector<cv::Point2f> cornersNG2 = { cv::Point2f(250, std::nextafter(500.0f, 500.0f + 1.0f)) };
EXPECT_ANY_THROW( cv::cornerSubPix(image, cornersNG2, win, zeroZone, criteria) ) << cornersNG2;
}
}} // namespace

@ -15,6 +15,59 @@ file(MAKE_DIRECTORY "${java_src_dir}")
ocv_copyfiles_append_dir(JAVA_SRC_COPY "${OPENCV_JAVA_BINDINGS_DIR}/gen/java" "${java_src_dir}")
set(SOURSE_SETS_JNI_LIBS_SRC_DIRS "'native/libs'")
set(SOURSE_SETS_JAVA_SRC_DIRS "'java/src'")
set(SOURSE_SETS_RES_SRC_DIRS "'java/res'")
set(SOURSE_SETS_MANIFEST_SRC_FILE "'java/AndroidManifest.xml'")
set(BUILD_GRADLE_COMPILE_OPTIONS "
compileOptions {
sourceCompatibility JavaVersion.VERSION_${ANDROID_GRADLE_JAVA_VERSION_INIT}
targetCompatibility JavaVersion.VERSION_${ANDROID_GRADLE_JAVA_VERSION_INIT}
}
")
set(MAVEN_PUBLISH_PLUGIN_DECLARATION "apply plugin: 'maven-publish'")
set(BUILD_GRADLE_ANDROID_PUBLISHING_CONFIG "
buildFeatures {
prefabPublishing true
buildConfig true
}
prefab {
opencv_jni_shared {
headers 'native/jni/include'
}
}
publishing {
singleVariant('release') {
withSourcesJar()
withJavadocJar()
}
}
")
set(BUILD_GRADLE_PUBLISHING_CONFIG "
publishing {
publications {
release(MavenPublication) {
groupId = 'org.opencv'
artifactId = 'opencv'
version = '${OPENCV_VERSION_PLAIN}'
afterEvaluate {
from components.release
}
}
}
repositories {
maven {
name = 'myrepo'
url = \"\${project.buildDir}/repo\"
}
}
}
")
if(ANDROID_EXECUTABLE)
ocv_assert(ANDROID_TOOLS_Pkg_Revision GREATER 13)
@ -108,6 +161,7 @@ if(ANDROID_NATIVE_API_LEVEL GREATER 21)
else()
ocv_update(ANDROID_TARGET_SDK_VERSION "21")
endif()
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/build.gradle.in" "${CMAKE_CURRENT_BINARY_DIR}/build.gradle" @ONLY)
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/build.gradle" DESTINATION ${JAVA_INSTALL_ROOT}/.. COMPONENT java)
@ -117,12 +171,23 @@ else() # gradle build
# Android Gradle-based project
#
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/build.gradle.in" "${ANDROID_TMP_INSTALL_BASE_DIR}/opencv/build.gradle" @ONLY)
#TODO: INSTALL ONLY
ocv_copyfiles_append_dir(JAVA_SRC_COPY "${OPENCV_JAVA_BINDINGS_DIR}/gen/android/java" "${java_src_dir}")
ocv_copyfiles_append_dir(JAVA_SRC_COPY "${OPENCV_JAVA_BINDINGS_DIR}/gen/android-21/java" "${java_src_dir}")
ocv_copyfiles_append_dir(JAVA_SRC_COPY "${OPENCV_JAVA_BINDINGS_DIR}/gen/android-24/java" "${java_src_dir}")
# copy boilerplate
set(SOURSE_SETS_JNI_LIBS_SRC_DIRS "'../../jni'")
set(SOURSE_SETS_JAVA_SRC_DIRS "'src'")
set(SOURSE_SETS_RES_SRC_DIRS "'${OpenCV_SOURCE_DIR}/modules/java/android_sdk/android_gradle_lib/res'")
set(SOURSE_SETS_MANIFEST_SRC_FILE "'AndroidManifest.xml'")
set(BUILD_GRADLE_COMPILE_OPTIONS "")
set(MAVEN_PUBLISH_PLUGIN_DECLARATION "")
set(BUILD_GRADLE_ANDROID_PUBLISHING_CONFIG "")
set(BUILD_GRADLE_PUBLISHING_CONFIG "")
set(__base_dir "${CMAKE_CURRENT_SOURCE_DIR}/android_gradle_lib/")
file(GLOB_RECURSE seed_project_files_rel RELATIVE "${__base_dir}/" "${__base_dir}/*")
list(REMOVE_ITEM seed_project_files_rel "${ANDROID_MANIFEST_FILE}")
@ -134,6 +199,7 @@ foreach(file ${seed_project_files_rel})
install(FILES "${OPENCV_JAVA_DIR}/${file}" DESTINATION "${JAVA_INSTALL_ROOT}/${install_subdir}" COMPONENT java)
endif()
endforeach()
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/build.gradle.in" "${OPENCV_JAVA_DIR}/build.gradle" @ONLY)
# copy libcxx_helper
set(__base_dir "${CMAKE_CURRENT_SOURCE_DIR}/")
@ -165,7 +231,6 @@ file(REMOVE "${OPENCV_DEPHELPER}/${the_module}_android") # force rebuild after
add_custom_target(${the_module}_android ALL DEPENDS "${OPENCV_DEPHELPER}/${the_module}_android" SOURCES "${__base_dir}/${ANDROID_MANIFEST_FILE}")
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/build.gradle.in" "${ANDROID_TMP_INSTALL_BASE_DIR}/opencv/build.gradle" @ONLY)
install(FILES "${ANDROID_TMP_INSTALL_BASE_DIR}/opencv/build.gradle" DESTINATION ${JAVA_INSTALL_ROOT}/.. COMPONENT java)
install(DIRECTORY "${java_src_dir}" DESTINATION "${JAVA_INSTALL_ROOT}" COMPONENT java)

@ -1,58 +0,0 @@
apply plugin: 'com.android.library'
@KOTLIN_PLUGIN_DECLARATION@
def openCVersionName = "@OPENCV_VERSION@"
def openCVersionCode = ((@OPENCV_VERSION_MAJOR@ * 100 + @OPENCV_VERSION_MINOR@) * 100 + @OPENCV_VERSION_PATCH@) * 10 + 0
android {
@OPENCV_ANDROID_NAMESPACE_DECLARATION@
compileSdkVersion @ANDROID_COMPILE_SDK_VERSION@
defaultConfig {
minSdkVersion @ANDROID_MIN_SDK_VERSION@
targetSdkVersion @ANDROID_TARGET_SDK_VERSION@
versionCode openCVersionCode
versionName openCVersionName
externalNativeBuild {
cmake {
arguments "-DANDROID_STL=@ANDROID_STL@"
targets "opencv_jni_shared"
}
}
}
buildTypes {
debug {
packagingOptions {
doNotStrip '**/*.so' // controlled by OpenCV CMake scripts
}
}
release {
packagingOptions {
doNotStrip '**/*.so' // controlled by OpenCV CMake scripts
}
minifyEnabled false
proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.txt'
}
}
sourceSets {
main {
jniLibs.srcDirs = ['../../jni']
java.srcDirs = ['src'] // TODO Use original files instead of copied into build directory
res.srcDirs = ['@OpenCV_SOURCE_DIR@/modules/java/android_sdk/android_gradle_lib/res']
manifest.srcFile 'AndroidManifest.xml'
}
}
externalNativeBuild {
cmake {
path (project.projectDir.toString() + '/libcxx_helper/CMakeLists.txt')
}
}
}
dependencies {
}

@ -89,7 +89,7 @@
//
apply plugin: 'com.android.library'
apply plugin: 'maven-publish'
@MAVEN_PUBLISH_PLUGIN_DECLARATION@
try {
@KOTLIN_PLUGIN_DECLARATION@
println "Configure OpenCV with Kotlin"
@ -120,12 +120,7 @@ android {
}
}
}
compileOptions {
sourceCompatibility JavaVersion.VERSION_@ANDROID_GRADLE_JAVA_VERSION_INIT@
targetCompatibility JavaVersion.VERSION_@ANDROID_GRADLE_JAVA_VERSION_INIT@
}
@BUILD_GRADLE_COMPILE_OPTIONS@
buildTypes {
debug {
packagingOptions {
@ -141,29 +136,12 @@ android {
}
}
buildFeatures {
prefabPublishing true
buildConfig true
}
prefab {
opencv_jni_shared {
headers "native/jni/include"
}
}
sourceSets {
main {
jniLibs.srcDirs = ['native/libs']
java.srcDirs = ['java/src']
res.srcDirs = ['java/res']
manifest.srcFile 'java/AndroidManifest.xml'
}
}
publishing {
singleVariant('release') {
withSourcesJar()
withJavadocJar()
jniLibs.srcDirs = [@SOURSE_SETS_JNI_LIBS_SRC_DIRS@]
java.srcDirs = [@SOURSE_SETS_JAVA_SRC_DIRS@]
res.srcDirs = [@SOURSE_SETS_RES_SRC_DIRS@]
manifest.srcFile @SOURSE_SETS_MANIFEST_SRC_FILE@
}
}
@ -172,27 +150,8 @@ android {
path (project.projectDir.toString() + '/libcxx_helper/CMakeLists.txt')
}
}
@BUILD_GRADLE_ANDROID_PUBLISHING_CONFIG@
}
publishing {
publications {
release(MavenPublication) {
groupId = 'org.opencv'
artifactId = 'opencv'
version = '@OPENCV_VERSION_PLAIN@'
afterEvaluate {
from components.release
}
}
}
repositories {
maven {
name = 'myrepo'
url = "${project.buildDir}/repo"
}
}
}
@BUILD_GRADLE_PUBLISHING_CONFIG@
dependencies {
}

@ -38,8 +38,21 @@ set(scripts_hdr_parser "${JS_SOURCE_DIR}/../python/src2/hdr_parser.py")
if(DEFINED ENV{OPENCV_JS_WHITELIST})
set(OPENCV_JS_WHITELIST_FILE "$ENV{OPENCV_JS_WHITELIST}")
message(STATUS "Use white list from environment ${OPENCV_JS_WHITELIST_FILE}")
else()
set(OPENCV_JS_WHITELIST_FILE "${OpenCV_SOURCE_DIR}/platforms/js/opencv_js.config.py")
#generate white list from modules/<module_name>/misc/js/whitelist.json
set(OPENCV_JS_WHITELIST_FILE "${CMAKE_CURRENT_BINARY_DIR}/whitelist.json")
foreach(m in ${OPENCV_JS_MODULES})
set(js_whitelist "${OPENCV_MODULE_${m}_LOCATION}/misc/js/gen_dict.json")
if (EXISTS "${js_whitelist}")
file(READ "${js_whitelist}" whitelist_content)
list(APPEND OPENCV_JS_WHITELIST_CONTENT "\"${m}\": ${whitelist_content}")
endif()
endforeach(m)
string(REPLACE ";" ", \n" OPENCV_JS_WHITELIST_CONTENT_STRING "${OPENCV_JS_WHITELIST_CONTENT}")
set(OPENCV_JS_WHITELIST_CONTENT_STRING "{\n${OPENCV_JS_WHITELIST_CONTENT_STRING}}\n")
ocv_update_file("${OPENCV_JS_WHITELIST_FILE}" "${OPENCV_JS_WHITELIST_CONTENT_STRING}")
message(STATUS "Use autogenerated whitelist ${OPENCV_JS_WHITELIST_FILE}")
endif()
add_custom_command(

@ -76,6 +76,7 @@ if sys.version_info[0] >= 3:
else:
from cStringIO import StringIO
import json
func_table = {}
@ -103,11 +104,32 @@ def makeWhiteList(module_list):
wl[k] = m[k]
return wl
def makeWhiteListJson(module_list):
wl = {}
for n, gen_dict in module_list.items():
m = gen_dict["whitelist"]
for k in m.keys():
if k in wl:
wl[k] += m[k]
else:
wl[k] = m[k]
return wl
def makeNamespacePrefixOverride(module_list):
wl = {}
for n, gen_dict in module_list.items():
if "namespace_prefix_override" in gen_dict:
m = gen_dict["namespace_prefix_override"]
for k in m.keys():
if k in wl:
wl[k] += m[k]
else:
wl[k] = m[k]
return wl
white_list = None
namespace_prefix_override = {
'dnn' : '',
'aruco' : '',
}
namespace_prefix_override = None
# Features to be exported
export_enums = False
@ -834,6 +856,7 @@ class JSWrapperGenerator(object):
if method.cname in ignore_list:
continue
if not method.name in white_list[method.class_name]:
#print('Not in whitelist: "{}"'.format(method.name))
continue
if method.is_constructor:
for variant in method.variants:
@ -938,9 +961,9 @@ if __name__ == "__main__":
if len(sys.argv) < 5:
print("Usage:\n", \
os.path.basename(sys.argv[0]), \
"<full path to hdr_parser.py> <bindings.cpp> <headers.txt> <core_bindings.cpp> <opencv_js.config.py>")
"<full path to hdr_parser.py> <bindings.cpp> <headers.txt> <core_bindings.cpp> <whitelist.json or opencv_js.config.py>")
print("Current args are: ", ", ".join(["'"+a+"'" for a in sys.argv]))
exit(0)
exit(1)
dstdir = "."
hdr_parser_path = os.path.abspath(sys.argv[1])
@ -953,8 +976,23 @@ if __name__ == "__main__":
headers = open(sys.argv[3], 'r').read().split(';')
coreBindings = sys.argv[4]
whiteListFile = sys.argv[5]
exec(open(whiteListFile).read())
assert(white_list)
if whiteListFile.endswith(".json") or whiteListFile.endswith(".JSON"):
with open(whiteListFile) as f:
gen_dict = json.load(f)
f.close()
white_list = makeWhiteListJson(gen_dict)
namespace_prefix_override = makeNamespacePrefixOverride(gen_dict)
elif whiteListFile.endswith(".py") or whiteListFile.endswith(".PY"):
exec(open(whiteListFile).read())
assert(white_list)
namespace_prefix_override = {
'dnn' : '',
'aruco' : '',
}
else:
print("Unexpected format of OpenCV config file", whiteListFile)
exit(1)
generator = JSWrapperGenerator()
generator.gen(bindingsCpp, headers, coreBindings)

@ -0,0 +1,28 @@
{
"whitelist":
{
"": ["groupRectangles", "getPredefinedDictionary", "extendDictionary", "drawDetectedMarkers", "generateImageMarker", "drawDetectedCornersCharuco", "drawDetectedDiamonds"],
"HOGDescriptor": ["load", "HOGDescriptor", "getDefaultPeopleDetector", "getDaimlerPeopleDetector", "setSVMDetector", "detectMultiScale"],
"CascadeClassifier": ["load", "detectMultiScale2", "CascadeClassifier", "detectMultiScale3", "empty", "detectMultiScale"],
"GraphicalCodeDetector": ["decode", "detect", "detectAndDecode", "detectMulti", "decodeMulti", "detectAndDecodeMulti"],
"QRCodeDetector": ["QRCodeDetector", "decode", "detect", "detectAndDecode", "detectMulti", "decodeMulti", "detectAndDecodeMulti", "decodeCurved", "detectAndDecodeCurved", "setEpsX", "setEpsY"],
"aruco_PredefinedDictionaryType": [],
"aruco_Dictionary": ["Dictionary", "getDistanceToId", "generateImageMarker", "getByteListFromBits", "getBitsFromByteList"],
"aruco_Board": ["Board", "matchImagePoints", "generateImage"],
"aruco_GridBoard": ["GridBoard", "generateImage", "getGridSize", "getMarkerLength", "getMarkerSeparation", "matchImagePoints"],
"aruco_CharucoParameters": ["CharucoParameters"],
"aruco_CharucoBoard": ["CharucoBoard", "generateImage", "getChessboardCorners", "getNearestMarkerCorners", "checkCharucoCornersCollinear", "matchImagePoints", "getLegacyPattern", "setLegacyPattern"],
"aruco_DetectorParameters": ["DetectorParameters"],
"aruco_RefineParameters": ["RefineParameters"],
"aruco_ArucoDetector": ["ArucoDetector", "detectMarkers", "refineDetectedMarkers", "setDictionary", "setDetectorParameters", "setRefineParameters"],
"aruco_CharucoDetector": ["CharucoDetector", "setBoard", "setCharucoParameters", "setDetectorParameters", "setRefineParameters", "detectBoard", "detectDiamonds"],
"QRCodeDetectorAruco_Params": ["Params"],
"QRCodeDetectorAruco": ["QRCodeDetectorAruco", "decode", "detect", "detectAndDecode", "detectMulti", "decodeMulti", "detectAndDecodeMulti", "setDetectorParameters", "setArucoParameters"],
"barcode_BarcodeDetector": ["BarcodeDetector", "decode", "detect", "detectAndDecode", "detectMulti", "decodeMulti", "detectAndDecodeMulti", "decodeWithType", "detectAndDecodeWithType"],
"FaceDetectorYN": ["setInputSize", "getInputSize", "setScoreThreshold", "getScoreThreshold", "setNMSThreshold", "getNMSThreshold", "setTopK", "getTopK", "detect", "create"]
},
"namespace_prefix_override":
{
"aruco": ""
}
}

@ -0,0 +1,25 @@
{
"whitelist":
{
"": [
"createAlignMTB", "createCalibrateDebevec", "createCalibrateRobertson",
"createMergeDebevec", "createMergeMertens", "createMergeRobertson",
"createTonemapDrago", "createTonemapMantiuk", "createTonemapReinhard", "inpaint"],
"CalibrateCRF": ["process"],
"AlignMTB" : ["calculateShift", "shiftMat", "computeBitmaps", "getMaxBits", "setMaxBits",
"getExcludeRange", "setExcludeRange", "getCut", "setCut"],
"CalibrateDebevec" : ["getLambda", "setLambda", "getSamples", "setSamples", "getRandom", "setRandom"],
"CalibrateRobertson" : ["getMaxIter", "setMaxIter", "getThreshold", "setThreshold", "getRadiance"],
"MergeExposures" : ["process"],
"MergeDebevec" : ["process"],
"MergeMertens" : ["process", "getContrastWeight", "setContrastWeight", "getSaturationWeight",
"setSaturationWeight", "getExposureWeight", "setExposureWeight"],
"MergeRobertson" : ["process"],
"Tonemap" : ["process" , "getGamma", "setGamma"],
"TonemapDrago" : ["getSaturation", "setSaturation", "getBias", "setBias",
"getSigmaColor", "setSigmaColor", "getSigmaSpace","setSigmaSpace"],
"TonemapMantiuk" : ["getScale", "setScale", "getSaturation", "setSaturation"],
"TonemapReinhard" : ["getIntensity", "setIntensity", "getLightAdaptation", "setLightAdaptation",
"getColorAdaptation", "setColorAdaptation"]
}
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save