Merge branch 4.x

pull/25915/head
Alexander Smorkalov 9 months ago
commit fc9208cff5
  1. 75
      3rdparty/carotene/hal/tegra_hal.hpp
  2. 11
      CMakeLists.txt
  3. 3
      cmake/OpenCVFindLibsGUI.cmake
  4. 9
      doc/opencv.bib
  5. 51
      doc/tutorials/dnn/dnn_yolo/dnn_yolo.markdown
  6. 1
      doc/tutorials/introduction/config_reference/config_reference.markdown
  7. 10
      modules/calib/include/opencv2/calib.hpp
  8. 17
      modules/calib/misc/python/test/test_calibration.py
  9. 12
      modules/calib/src/chessboard.cpp
  10. 20
      modules/calib/test/test_chesscorners.cpp
  11. 4
      modules/core/CMakeLists.txt
  12. 18
      modules/core/include/opencv2/core/base.hpp
  13. 1
      modules/core/include/opencv2/core/cvdef.h
  14. 3
      modules/core/include/opencv2/core/hal/intrin.hpp
  15. 54
      modules/core/include/opencv2/core/hal/intrin_cpp.hpp
  16. 467
      modules/core/include/opencv2/core/hal/intrin_math.hpp
  17. 2
      modules/core/include/opencv2/core/mat.hpp
  18. 2
      modules/core/include/opencv2/core/opengl.hpp
  19. 12
      modules/core/include/opencv2/core/utility.hpp
  20. 2
      modules/core/src/matrix.cpp
  21. 14
      modules/core/src/system.cpp
  22. 218
      modules/core/test/test_intrin_utils.hpp
  23. 10
      modules/dnn/include/opencv2/dnn/dnn.hpp
  24. 61
      modules/dnn/misc/python/test/test_dnn.py
  25. 45
      modules/dnn/perf/perf_layer.cpp
  26. 37
      modules/dnn/src/cuda4dnn/init.hpp
  27. 2
      modules/dnn/src/layers/concat_layer.cpp
  28. 40
      modules/dnn/src/layers/cpu_kernels/softmax.cpp
  29. 129
      modules/dnn/src/layers/elementwise_layers.cpp
  30. 4
      modules/dnn/src/layers/slice_layer.cpp
  31. 14
      modules/dnn/src/net_impl_backend.cpp
  32. 4
      modules/dnn/src/opencl/activations.cl
  33. 24
      modules/dnn/src/registry.cpp
  34. 2
      modules/dnn/test/test_common.hpp
  35. 67
      modules/dnn/test/test_onnx_conformance.cpp
  36. 30
      modules/dnn/test/test_onnx_conformance_layer_filter__openvino.inl.hpp
  37. 4
      modules/dnn/test/test_onnx_conformance_layer_parser_denylist.inl.hpp
  38. 117
      modules/dnn/test/test_onnx_importer.cpp
  39. 48
      modules/gapi/src/backends/onnx/gonnxbackend.cpp
  40. 3
      modules/highgui/CMakeLists.txt
  41. 14
      modules/highgui/cmake/detect_gtk.cmake
  42. 110
      modules/highgui/src/window_gtk.cpp
  43. 48
      modules/imgcodecs/include/opencv2/imgcodecs.hpp
  44. 17
      modules/imgcodecs/perf/perf_jpeg.cpp
  45. 17
      modules/imgcodecs/perf/perf_png.cpp
  46. 7
      modules/imgcodecs/src/grfmt_avif.cpp
  47. 6
      modules/imgcodecs/src/grfmt_base.cpp
  48. 3
      modules/imgcodecs/src/grfmt_base.hpp
  49. 5
      modules/imgcodecs/src/grfmt_bmp.cpp
  50. 75
      modules/imgcodecs/src/grfmt_exr.cpp
  51. 1
      modules/imgcodecs/src/grfmt_exr.hpp
  52. 4
      modules/imgcodecs/src/grfmt_gdal.cpp
  53. 8
      modules/imgcodecs/src/grfmt_hdr.cpp
  54. 18
      modules/imgcodecs/src/grfmt_jpeg.cpp
  55. 7
      modules/imgcodecs/src/grfmt_jpeg2000.cpp
  56. 16
      modules/imgcodecs/src/grfmt_jpeg2000_openjpeg.cpp
  57. 34
      modules/imgcodecs/src/grfmt_pam.cpp
  58. 2
      modules/imgcodecs/src/grfmt_pfm.cpp
  59. 2
      modules/imgcodecs/src/grfmt_png.cpp
  60. 4
      modules/imgcodecs/src/grfmt_pxm.cpp
  61. 16
      modules/imgcodecs/src/grfmt_spng.cpp
  62. 4
      modules/imgcodecs/src/grfmt_sunras.cpp
  63. 19
      modules/imgcodecs/src/grfmt_tiff.cpp
  64. 8
      modules/imgcodecs/src/grfmt_webp.cpp
  65. 23
      modules/imgcodecs/src/loadsave.cpp
  66. 19
      modules/imgcodecs/src/utils.cpp
  67. 2
      modules/imgcodecs/src/utils.hpp
  68. 8
      modules/imgcodecs/test/test_avif.cpp
  69. 9
      modules/imgcodecs/test/test_exr.impl.hpp
  70. 5
      modules/imgcodecs/test/test_grfmt.cpp
  71. 8
      modules/imgcodecs/test/test_jpeg.cpp
  72. 16
      modules/imgcodecs/test/test_png.cpp
  73. 6
      modules/imgcodecs/test/test_precomp.hpp
  74. 11
      modules/imgcodecs/test/test_read_write.cpp
  75. 23
      modules/imgcodecs/test/test_tiff.cpp
  76. 6
      modules/imgcodecs/test/test_webp.cpp
  77. 26
      modules/imgproc/include/opencv2/imgproc.hpp
  78. 239
      modules/imgproc/src/approx.cpp
  79. 52
      modules/imgproc/src/smooth.dispatch.cpp
  80. 75
      modules/imgproc/test/test_approxpoly.cpp
  81. 214
      modules/imgproc/test/test_canny.cpp
  82. 16
      modules/imgproc/test/test_color.cpp
  83. 1959
      modules/imgproc/test/test_convhull.cpp
  84. 19
      modules/imgproc/test/test_filter.cpp
  85. 13
      modules/imgproc/test/test_imgwarp.cpp
  86. 4
      modules/imgproc/test/test_pc.cpp
  87. 46
      modules/imgproc/test/test_smooth_bitexact.cpp
  88. 396
      modules/imgproc/test/test_templmatch.cpp
  89. 5
      modules/java/android_sdk/build.gradle.in
  90. 6
      modules/photo/include/opencv2/photo.hpp
  91. 5
      modules/python/src2/cv2.hpp
  92. 2
      modules/python/src2/cv2_convert.cpp
  93. 16
      modules/python/src2/cv2_convert.hpp
  94. 5
      modules/python/src2/gen2.py
  95. 4
      modules/python/src2/hdr_parser.py
  96. 4
      modules/python/test/test_misc.py
  97. 1
      modules/ts/src/ts.cpp
  98. 5
      modules/videoio/cmake/detect_obsensor.cmake
  99. 4
      modules/videoio/src/cap_ffmpeg_impl.hpp
  100. 32
      samples/dnn/yolo_detector.cpp
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1286,7 +1286,6 @@ inline int TEGRA_SEPFILTERFREE(cvhalFilter2D *context)
#undef cv_hal_sepFilterFree
#define cv_hal_sepFilterFree TEGRA_SEPFILTERFREE
struct MorphCtx
{
int operation;
@ -1857,6 +1856,80 @@ TegraCvtColor_Invoker(bgrx2hsvf, bgrx2hsv, src_data + static_cast<size_t>(range.
#define cv_hal_cvtTwoPlaneYUVtoBGREx TEGRA_CVT2PYUVTOBGR_EX
#endif
// The optimized branch was developed for old armv7 processors and leads to perf degradation on armv8
#if defined(DCAROTENE_NEON_ARCH) && (DCAROTENE_NEON_ARCH == 7)
inline CAROTENE_NS::BORDER_MODE borderCV2Carotene(int borderType)
{
switch(borderType)
{
case CV_HAL_BORDER_CONSTANT:
return CAROTENE_NS::BORDER_MODE_CONSTANT;
case CV_HAL_BORDER_REPLICATE:
return CAROTENE_NS::BORDER_MODE_REPLICATE;
case CV_HAL_BORDER_REFLECT:
return CAROTENE_NS::BORDER_MODE_REFLECT;
case CV_HAL_BORDER_WRAP:
return CAROTENE_NS::BORDER_MODE_WRAP;
case CV_HAL_BORDER_REFLECT_101:
return CAROTENE_NS::BORDER_MODE_REFLECT101;
}
return CAROTENE_NS::BORDER_MODE_UNDEFINED;
}
inline int TEGRA_GaussianBlurBinomial(const uchar* src_data, size_t src_step, uchar* dst_data, size_t dst_step,
int width, int height, int depth, int cn, size_t margin_left, size_t margin_top,
size_t margin_right, size_t margin_bottom, size_t ksize, int border_type)
{
CAROTENE_NS::Size2D sz(width, height);
CAROTENE_NS::BORDER_MODE border = borderCV2Carotene(border_type);
CAROTENE_NS::Margin mg(margin_left, margin_right, margin_top, margin_bottom);
if (ksize == 3)
{
if ((depth != CV_8U) || (cn != 1))
return CV_HAL_ERROR_NOT_IMPLEMENTED;
if (CAROTENE_NS::isGaussianBlur3x3MarginSupported(sz, border, mg))
{
CAROTENE_NS::gaussianBlur3x3Margin(sz, src_data, src_step, dst_data, dst_step,
border, 0, mg);
return CV_HAL_ERROR_OK;
}
}
else if (ksize == 5)
{
if (!CAROTENE_NS::isGaussianBlur5x5Supported(sz, cn, border))
return CV_HAL_ERROR_NOT_IMPLEMENTED;
if (depth == CV_8U)
{
CAROTENE_NS::gaussianBlur5x5(sz, cn, (uint8_t*)src_data, src_step,
(uint8_t*)dst_data, dst_step, border, 0, mg);
return CV_HAL_ERROR_OK;
}
else if (depth == CV_16U)
{
CAROTENE_NS::gaussianBlur5x5(sz, cn, (uint16_t*)src_data, src_step,
(uint16_t*)dst_data, dst_step, border, 0, mg);
return CV_HAL_ERROR_OK;
}
else if (depth == CV_16S)
{
CAROTENE_NS::gaussianBlur5x5(sz, cn, (int16_t*)src_data, src_step,
(int16_t*)dst_data, dst_step, border, 0, mg);
return CV_HAL_ERROR_OK;
}
}
return CV_HAL_ERROR_NOT_IMPLEMENTED;
}
#undef cv_hal_gaussianBlurBinomial
#define cv_hal_gaussianBlurBinomial TEGRA_GaussianBlurBinomial
#endif // DCAROTENE_NEON_ARCH=7
#endif // OPENCV_IMGPROC_HAL_INTERFACE_H
#endif

@ -262,7 +262,7 @@ OCV_OPTION(WITH_CAP_IOS "Enable iOS video capture" ON
VERIFY HAVE_CAP_IOS)
OCV_OPTION(WITH_CAROTENE "Use NVidia carotene acceleration library for ARM platform" (NOT CV_DISABLE_OPTIMIZATION)
VISIBLE_IF (ARM OR AARCH64) AND NOT IOS AND NOT XROS)
OCV_OPTION(WITH_KLEIDICV "Use KleidiCV library for ARM platforms" OFF
OCV_OPTION(WITH_KLEIDICV "Use KleidiCV library for ARM platforms" (ANDROID AND AARCH64 AND NOT CV_DISABLE_OPTIMIZATION)
VISIBLE_IF (AARCH64 AND (ANDROID OR UNIX AND NOT IOS AND NOT XROS)))
OCV_OPTION(WITH_NDSRVP "Use Andes RVP extension" (NOT CV_DISABLE_OPTIMIZATION)
VISIBLE_IF RISCV)
@ -1240,7 +1240,11 @@ if(CMAKE_GENERATOR MATCHES "Xcode|Visual Studio|Multi-Config")
else()
status(" Configuration:" ${CMAKE_BUILD_TYPE})
endif()
if(DEFINED OPENCV_ALGO_HINT_DEFAULT)
status(" Algorithm Hint:" ${OPENCV_ALGO_HINT_DEFAULT})
else()
status(" Algorithm Hint:" " ALGO_HINT_ACCURATE")
endif()
# ========================= CPU code generation mode =========================
status("")
@ -1446,11 +1450,14 @@ if(WITH_GTK OR HAVE_GTK)
else()
status(" GTK+:" "NO")
endif()
if(HAVE_GTK)
status( " GThread :" HAVE_GTHREAD THEN "YES (ver ${GTHREAD_VERSION})" ELSE NO)
if(NOT HAVE_GTK3)
status( " GtkGlExt:" HAVE_GTKGLEXT THEN "YES (ver ${GTKGLEXT_VERSION})" ELSE NO)
endif()
endif()
endif()
if(WITH_FRAMEBUFFER OR HAVE_FRAMEBUFFER)
status(" Framebuffer UI:" HAVE_FRAMEBUFFER THEN YES ELSE NO)

@ -60,9 +60,10 @@ if(WITH_QT)
endif()
# --- OpenGl ---
ocv_update(OpenGL_GL_PREFERENCE LEGACY)
ocv_clear_vars(HAVE_OPENGL HAVE_QT_OPENGL)
if(WITH_OPENGL)
if(WITH_WIN32UI OR (HAVE_QT AND QT_QTOPENGL_FOUND) OR HAVE_GTKGLEXT)
if(WITH_WIN32UI OR (HAVE_QT AND QT_QTOPENGL_FOUND) OR HAVE_GTK3 OR (HAVE_GTK AND NOT HAVE_GTK3 AND HAVE_GTKGLEXT))
find_package (OpenGL QUIET)
if(OPENGL_FOUND)
set(HAVE_OPENGL TRUE)

@ -1377,3 +1377,12 @@
volume = {60},
journal = {ISPRS Journal of Photogrammetry and Remote Sensing}
}
@article{LowIlie2003,
author = {Kok-Lim Low, Adrian Ilie},
year = {2003},
pages = {3-15},
title = {View Frustum Optimization to Maximize Object's Image Area},
journal = {Journal of Graphics, (GPU, & Game) Tools (JGT)},
volume = {8},
url = {https://citeseerx.ist.psu.edu/document?repid=rep1&type=pdf&doi=1fbd43f3827fffeb76641a9c5ab5b625eb5a75ba}
}

@ -24,7 +24,9 @@ model, but the methodology applies to other supported models.
@note Currently, OpenCV supports the following YOLO models:
- [YOLOX](https://github.com/Megvii-BaseDetection/YOLOX/blob/main),
- [YoloNas](https://github.com/Deci-AI/super-gradients/tree/master),
- [YOLONas](https://github.com/Deci-AI/super-gradients/tree/master),
- [YOLOv10](https://github.com/THU-MIG/yolov10/tree/main),
- [YOLOv9](https://github.com/WongKinYiu/yolov9),
- [YOLOv8](https://github.com/ultralytics/ultralytics/tree/main),
- [YOLOv7](https://github.com/WongKinYiu/yolov7/tree/main),
- [YOLOv6](https://github.com/meituan/YOLOv6/blob/main),
@ -79,7 +81,7 @@ the ONNX graph, a process that we will detail further in the subsequent sections
Now that we know know the parameters of the pre-precessing we can go on and export the model from
Pytorch to ONNX graph. Since in this tutorial we are using YOLOX as our sample model, lets use its
export for demonstration purposes (the process is identical for the rest of the YOLO detectors).
export for demonstration purposes (the process is identical for the rest of the YOLO detectors except `YOLOv10` model, see details on how to export it later in the post).
To exporting YOLOX we can just use [export script](https://github.com/Megvii-BaseDetection/YOLOX/blob/ac58e0a5e68e57454b7b9ac822aced493b553c53/tools/export_onnx.py). Particularly we need following commands:
@code{.bash}
@ -125,6 +127,20 @@ than YOLOX) in case it is needed. However, usually each YOLO repository has pred
onnx.save(model_simp, args.output_name)
@endcode
#### Exporting YOLOv10 model
In oder to run YOLOv10 one needs to cut off postporcessing with dynamic shapes from torch and then convert it to ONNX. If someone is looking for on how to cut off the postprocessing, there is this [forked branch](https://github.com/Abdurrahheem/yolov10/tree/ash/opencv-export) from official YOLOv10. The forked branch cuts of the postprocessing by [returning output](https://github.com/Abdurrahheem/yolov10/blob/4fdaafd912c8891642bfbe85751ea66ec20f05ad/ultralytics/nn/modules/head.py#L522) of the model before postprocessing procedure itself. To convert torch model to ONNX follow this proceduce.
@code{.bash}
git clone git@github.com:Abdurrahheem/yolov10.git
conda create -n yolov10 python=3.9
conda activate yolov10
pip install -r requirements.txt
python export_opencv.py --model=<model-name> --imgsz=<input-img-size>
@endcode
By default `--model="yolov10s"` and `--imgsz=(480,640)`. This will generate file `yolov10s.onnx`, which can be use for inference in OpenCV
### Running Yolo ONNX detector with OpenCV Sample
Once we have our ONNX graph of the model, we just simply can run with OpenCV's sample. To that we need to make sure:
@ -144,24 +160,25 @@ Once we have our ONNX graph of the model, we just simply can run with OpenCV's s
--padvalue=<padding_value> \
--paddingmode=<padding_mode> \
--backend=<computation_backend> \
--target=<target_computation_device>
--target=<target_computation_device> \
--width=<model_input_width> \
--height=<model_input_height> \
@endcode
VIDEO DEMO:
@youtube{NHtRlndE2cg}
- --input: File path to your input image or video. If omitted, it will capture frames from a camera.
- --classes: File path to a text file containing class names for object detection.
- --thr: Confidence threshold for detection (e.g., 0.5).
- --nms: Non-maximum suppression threshold (e.g., 0.4).
- --mean: Mean normalization value (e.g., 0.0 for no mean normalization).
- --scale: Scale factor for input normalization (e.g., 1.0).
- --scale: Scale factor for input normalization (e.g., 1.0, 1/255.0, etc).
- --yolo: YOLO model version (e.g., YOLOv3, YOLOv4, etc.).
- --padvalue: Padding value used in pre-processing (e.g., 114.0).
- --paddingmode: Method for handling image resizing and padding. Options: 0 (resize without extra processing), 1 (crop after resize), 2 (resize with aspect ratio preservation).
- --backend: Selection of computation backend (0 for automatic, 1 for Halide, 2 for OpenVINO, etc.).
- --target: Selection of target computation device (0 for CPU, 1 for OpenCL, etc.).
- --device: Camera device number (0 for default camera). If `--input` is not provided camera with index 0 will used by default.
- --width: Model input width. Not to be confused with the image width. (e.g., 416, 480, 640, 1280, etc).
- --height: Model input height. Not to be confused with the image height. (e.g., 416, 480, 640, 1280, etc).
Here `mean`, `scale`, `padvalue`, `paddingmode` should exactly match those that we discussed
in pre-processing section in order for the model to match result in PyTorch
@ -183,7 +200,8 @@ cd <build directory of OpenCV>
./bin/example_dnn_yolo_detector
@endcode
This will execute the YOLOX detector with your camera. For YOLOv8 (for instance), follow these additional steps:
This will execute the YOLOX detector with your camera.
For YOLOv8 (for instance), follow these additional steps:
@code{.sh}
cd opencv_extra/testdata/dnn
@ -195,6 +213,23 @@ cd <build directory of OpenCV>
./bin/example_dnn_yolo_detector --model=onnx/models/yolov8n.onnx --yolo=yolov8 --mean=0.0 --scale=0.003921568627 --paddingmode=2 --padvalue=144.0 --thr=0.5 --nms=0.4 --rgb=0
@endcode
For YOLOv10, follow these steps:
@code{.sh}
cd opencv_extra/testdata/dnn
python download_models.py yolov10
cd ..
export OPENCV_TEST_DATA_PATH=$(pwd)
cd <build directory of OpenCV>
./bin/example_dnn_yolo_detector --model=onnx/models/yolov10s.onnx --yolo=yolov10 --width=640 --height=480 --scale=0.003921568627 --padvalue=114
@endcode
This will run `YOLOv10` detector on first camera found on your system. If you want to run it on a image/video file, you can use `--input` option to specify the path to the file.
VIDEO DEMO:
@youtube{NHtRlndE2cg}
### Building a Custom Pipeline

@ -217,6 +217,7 @@ Following options can be used to produce special builds with instrumentation or
| `ENABLE_BUILD_HARDENING` | GCC, Clang, MSVC | Enable compiler options which reduce possibility of code exploitation. |
| `ENABLE_LTO` | GCC, Clang, MSVC | Enable Link Time Optimization (LTO). |
| `ENABLE_THIN_LTO` | Clang | Enable thin LTO which incorporates intermediate bitcode to binaries allowing consumers optimize their applications later. |
| `OPENCV_ALGO_HINT_DEFAULT` | Any | Set default OpenCV implementation hint value: `ALGO_HINT_ACCURATE` or `ALGO_HINT_APROX`. Dangerous! The option changes behaviour globally and may affect accuracy of many algorithms. |
@see [GCC instrumentation](https://gcc.gnu.org/onlinedocs/gcc/Instrumentation-Options.html)
@see [Build hardening](https://en.wikipedia.org/wiki/Hardening_(computing))

@ -372,11 +372,11 @@ R & t \\
where R is the rotation matrix corresponding to the rotation vector om: R = rodrigues(om); call x, y
and z the 3 coordinates of Xc:
\f[x = Xc_1 \\ y = Xc_2 \\ z = Xc_3\f]
\f[\begin{array}{l} x = Xc_1 \\ y = Xc_2 \\ z = Xc_3 \end{array} \f]
The pinhole projection coordinates of P is [a; b] where
\f[a = x / z \ and \ b = y / z \\ r^2 = a^2 + b^2 \\ \theta = atan(r)\f]
\f[\begin{array}{l} a = x / z \ and \ b = y / z \\ r^2 = a^2 + b^2 \\ \theta = atan(r) \end{array} \f]
Fisheye distortion:
@ -384,12 +384,12 @@ R & t \\
The distorted point coordinates are [x'; y'] where
\f[x' = (\theta_d / r) a \\ y' = (\theta_d / r) b \f]
\f[\begin{array}{l} x' = (\theta_d / r) a \\ y' = (\theta_d / r) b \end{array} \f]
Finally, conversion into pixel coordinates: The final pixel coordinates vector [u; v] where:
\f[u = f_x (x' + \alpha y') + c_x \\
v = f_y y' + c_y\f]
\f[\begin{array}{l} u = f_x (x' + \alpha y') + c_x \\
v = f_y y' + c_y \end{array} \f]
Summary:
Generic camera model @cite Kannala2006 with perspective projection and without distortion correction

@ -67,7 +67,22 @@ class calibration_test(NewOpenCVTests):
self.assertLess(cv.norm(camera_matrix - cameraMatrixTest, cv.NORM_L1), normCamEps)
self.assertLess(cv.norm(dist_coefs - distCoeffsTest, cv.NORM_L1), normDistEps)
def test_projectPoints(self):
objectPoints = np.array([[181.24588 , 87.80361 , 11.421074],
[ 87.17948 , 184.75563 , 37.223446],
[ 22.558456, 45.495266, 246.05797 ]], dtype=np.float32)
rvec = np.array([[ 0.9357548 , -0.28316498, 0.21019171],
[ 0.30293274, 0.9505806 , -0.06803132],
[-0.18054008, 0.12733458, 0.9752903 ]], dtype=np.float32)
tvec = np.array([ 69.32692 , 17.602057, 135.77672 ], dtype=np.float32)
cameraMatrix = np.array([[214.0047 , 26.98735 , 253.37799 ],
[189.8172 , 10.038101, 18.862494],
[114.07123 , 200.87277 , 194.56332 ]], dtype=np.float32)
distCoeffs = distCoeffs = np.zeros((4, 1), dtype=np.float32)
imagePoints, jacobian = cv.projectPoints(objectPoints, rvec, tvec, cameraMatrix, distCoeffs)
self.assertTrue(imagePoints is not None)
self.assertTrue(jacobian is not None)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

@ -1625,8 +1625,6 @@ bool Chessboard::Board::normalizeMarkerOrientation()
if(!current_cell->marker || !current_cell->right || !current_cell->right->marker)
continue;
if(current_cell->black)
{
if(current_cell->right->top && current_cell->right->top->marker)
{
rotateLeft();
@ -1640,9 +1638,6 @@ bool Chessboard::Board::normalizeMarkerOrientation()
pcell = current_cell->right;
break;
}
}
else
{
if(current_cell->top && current_cell->top->marker)
{
rotateRight();
@ -1657,13 +1652,12 @@ bool Chessboard::Board::normalizeMarkerOrientation()
}
}
}
}
if(pcell)
{
//check for ambiguity
if(rowCount()-pcell->bottom->getRow() > 2)
{
// std::cout << "FIX board " << pcell->bottom->getRow() << " " << rowCount();
CV_LOG_DEBUG(NULL, "FIX board " << pcell->bottom->getRow() << " " << rowCount());
flipVertical();
rotateRight();
}
@ -2265,7 +2259,7 @@ int Chessboard::Board::detectMarkers(cv::InputArray image)
cell->marker = noise-signal > (noise-reference)*0.5;
if(cell->marker)
count++;
// std::cout << x << "/" << y << " signal " << signal << " noise " << noise << " reference " << reference << " has marker " << int(cell->marker) << std::endl;
CV_LOG_DEBUG(NULL, "Cell: " << x << "/" << y << " signal " << signal << " noise " << noise << " reference " << reference << " has marker " << int(cell->marker));
}
}
return count;
@ -3379,7 +3373,7 @@ cv::Scalar Chessboard::Board::calcEdgeSharpness(cv::InputArray _img,float rise_d
}
if(count == 0)
{
std::cout <<"calcEdgeSharpness: checkerboard too small for calculation." << std::endl;
CV_LOG_DEBUG(NULL, "calcEdgeSharpness: checkerboard too small for calculation.");
return cv::Scalar::all(9999);
}
sharpness = sharpness/float(count);

@ -679,6 +679,26 @@ TEST(Calib3d_AsymmetricCirclesPatternDetector, accuracy) { CV_ChessboardDetector
TEST(Calib3d_AsymmetricCirclesPatternDetectorWithClustering, accuracy) { CV_ChessboardDetectorTest test( ASYMMETRIC_CIRCLES_GRID, CALIB_CB_CLUSTERING ); test.safe_run(); }
#endif
TEST(Calib3d_ChessboardWithMarkers, regression_25806_white)
{
const cv::String dataDir = string(TS::ptr()->get_data_path()) + "cv/cameracalibration/";
const cv::Mat image = cv::imread(dataDir + "checkerboard_marker_white.png");
std::vector<Point2f> corners;
const bool success = cv::findChessboardCornersSB(image, Size(9, 14), corners, CALIB_CB_MARKER);
ASSERT_TRUE(success);
}
TEST(Calib3d_ChessboardWithMarkers, regression_25806_black)
{
const cv::String dataDir = string(TS::ptr()->get_data_path()) + "cv/cameracalibration/";
const cv::Mat image = cv::imread(dataDir + "checkerboard_marker_black.png");
std::vector<Point2f> corners;
const bool success = cv::findChessboardCornersSB(image, Size(9, 14), corners, CALIB_CB_MARKER);
ASSERT_TRUE(success);
}
TEST(Calib3d_CirclesPatternDetectorWithClustering, accuracy)
{
cv::String dataDir = string(TS::ptr()->get_data_path()) + "cv/cameracalibration/circles/";

@ -188,6 +188,10 @@ if(OPENCV_SEMIHOSTING)
ocv_target_compile_definitions(${the_module} PRIVATE "-DOPENCV_SEMIHOSTING")
endif(OPENCV_SEMIHOSTING)
if(DEFINED OPENCV_ALGO_HINT_DEFAULT)
ocv_target_compile_definitions(${the_module} PRIVATE "-DOPENCV_ALGO_HINT_DEFAULT=${OPENCV_ALGO_HINT_DEFAULT}")
endif(DEFINED OPENCV_ALGO_HINT_DEFAULT)
if(HAVE_HPX)
ocv_target_link_libraries(${the_module} LINK_PRIVATE "${HPX_LIBRARIES}")
endif()

@ -297,6 +297,21 @@ It is possible to alternate error processing by using redirectError().
*/
CV_EXPORTS CV_NORETURN void error(int _code, const String& _err, const char* _func, const char* _file, int _line);
/*! @brief Signals an error and terminate application.
By default the function prints information about the error to stderr, then it terminates application
with std::terminate. The function is designed for invariants check in functions and methods with
noexcept attribute.
@param _code - error code (Error::Code)
@param _err - error description
@param _func - function name. Available only when the compiler supports getting it
@param _file - source file name where the error has occurred
@param _line - line number in the source file where the error has occurred
@see CV_AssertTerminate
*/
CV_EXPORTS CV_NORETURN void terminate(int _code, const String& _err, const char* _func, const char* _file, int _line) CV_NOEXCEPT;
#ifdef CV_STATIC_ANALYSIS
// In practice, some macro are not processed correctly (noreturn is not detected).
@ -338,8 +353,11 @@ for example:
The macros CV_Assert (and CV_DbgAssert(expr)) evaluate the specified expression. If it is 0, the macros
raise an error (see cv::error). The macro CV_Assert checks the condition in both Debug and Release
configurations while CV_DbgAssert is only retained in the Debug configuration.
CV_AssertTerminate is analog of CV_Assert for invariants check in functions with noexcept attribute.
It does not throw exception, but terminates the application.
*/
#define CV_Assert( expr ) do { if(!!(expr)) ; else cv::error( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ ); } while(0)
#define CV_AssertTerminate( expr ) do { if(!!(expr)) ; else cv::terminate( #expr, CV_Func, __FILE__, __LINE__ ); } while(0)
#endif // CV_STATIC_ANALYSIS

@ -481,6 +481,7 @@ Cv64suf;
#define CV_OUT
#define CV_PROP
#define CV_PROP_RW
#define CV_ND // Indicates that input data should be parsed into Mat without channels
#define CV_WRAP
#define CV_WRAP_AS(synonym)
#define CV_WRAP_MAPPABLE(mappable)

@ -64,7 +64,7 @@
namespace {
inline unsigned int trailingZeros32(unsigned int value) {
#if defined(_MSC_VER)
#if (_MSC_VER < 1700) || defined(_M_ARM) || defined(_M_ARM64)
#if (_MSC_VER < 1700) || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC)
unsigned long index = 0;
_BitScanForward(&index, value);
return (unsigned int)index;
@ -1305,6 +1305,7 @@ namespace CV__SIMD_NAMESPACE {
typedef struct v_float64 { int dummy; } v_float64;
#endif
#include "intrin_math.hpp"
#include "simd_utils.impl.hpp"
#ifndef CV_DOXYGEN

@ -263,7 +263,8 @@ Most of these operations return only one value.
### Other math
- Some frequent operations: @ref v_sqrt, @ref v_invsqrt, @ref v_magnitude, @ref v_sqr_magnitude
- Some frequent operations: @ref v_sqrt, @ref v_invsqrt, @ref v_magnitude, @ref v_sqr_magnitude, @ref v_exp,
@ref v_erf
- Absolute values: @ref v_abs, @ref v_absdiff, @ref v_absdiffs
### Conversions
@ -363,6 +364,8 @@ Floating point:
|reverse | x | x |
|extract_n | x | x |
|broadcast_element | x | |
|exp | x | x |
|log | x | x |
@{ */
@ -721,11 +724,56 @@ template<typename _Tp, int n> inline v_reg<_Tp2, n> func(const v_reg<_Tp, n>& a)
Only for floating point types.*/
OPENCV_HAL_IMPL_MATH_FUNC(v_sqrt, std::sqrt, _Tp)
/**
* @brief Exponential \f$ e^x \f$ of elements
*
* Only for floating point types. Core implementation steps:
* 1. Decompose Input: Convert the input to \f$ 2^{x \cdot \log_2e} \f$ and split its exponential into integer and fractional parts:
* \f$ x \cdot \log_2e = n + f \f$, where \f$ n \f$ is the integer part and \f$ f \f$ is the fractional part.
* 2. Compute \f$ 2^n \f$: Calculated by shifting the bits.
* 3. Adjust Fractional Part: Compute \f$ f \cdot \ln2 \f$ to convert the fractional part to base \f$ e \f$.
* \f$ C1 \f$ and \f$ C2 \f$ are used to adjust the fractional part.
* 4. Polynomial Approximation for \f$ e^{f \cdot \ln2} \f$: The closer the fractional part is to 0, the more accurate the result.
* - For float16 and float32, use a Taylor Series with 6 terms.
* - For float64, use Pade Polynomials Approximation with 4 terms.
* 5. Combine Results: Multiply the two parts together to get the final result:
* \f$ e^x = 2^n \cdot e^{f \cdot \ln2} \f$.
*
* @note The precision of the calculation depends on the implementation and the data type of the input vector.
*/
OPENCV_HAL_IMPL_MATH_FUNC(v_exp, std::exp, _Tp)
#define OPENCV_HAL_MATH_HAVE_EXP 1
/**
* @brief Natural logarithm \f$ \log(x) \f$ of elements
*
* Only for floating point types. Core implementation steps:
* 1. Decompose Input: Use binary representation to decompose the input into mantissa part \f$ m \f$ and exponent part \f$ e \f$. Such that \f$ \log(x) = \log(m \cdot 2^e) = \log(m) + e \cdot \ln(2) \f$.
* 2. Adjust Mantissa and Exponent Parts: If the mantissa is less than \f$ \sqrt{0.5} \f$, adjust the exponent and mantissa to ensure the mantissa is in the range \f$ (\sqrt{0.5}, \sqrt{2}) \f$ for better approximation.
* 3. Polynomial Approximation for \f$ \log(m) \f$: The closer the \f$ m \f$ is to 1, the more accurate the result.
* - For float16 and float32, use a Taylor Series with 9 terms.
* - For float64, use Pade Polynomials Approximation with 6 terms.
* 4. Combine Results: Add the two parts together to get the final result.
*
* @note The precision of the calculation depends on the implementation and the data type of the input.
*
* @note Similar to the behavior of std::log(), \f$ \ln(0) = -\infty \f$.
*/
OPENCV_HAL_IMPL_MATH_FUNC(v_log, std::log, _Tp)
#define OPENCV_HAL_MATH_HAVE_LOG 1
/**
* @brief Error function.
*
* @note Support FP32 precision for now.
*/
OPENCV_HAL_IMPL_MATH_FUNC(v_erf, std::erf, _Tp)
//! @cond IGNORED
OPENCV_HAL_IMPL_MATH_FUNC(v_sin, std::sin, _Tp)
#define OPENCV_HAL_MATH_HAVE_SIN 1
OPENCV_HAL_IMPL_MATH_FUNC(v_cos, std::cos, _Tp)
OPENCV_HAL_IMPL_MATH_FUNC(v_exp, std::exp, _Tp)
OPENCV_HAL_IMPL_MATH_FUNC(v_log, std::log, _Tp)
#define OPENCV_HAL_MATH_HAVE_COS 1
//! @endcond
/** @brief Absolute value of elements

@ -0,0 +1,467 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
// This header is not standalone. Don't include directly, use "intrin.hpp" instead.
#ifdef OPENCV_HAL_INTRIN_HPP // defined in intrin.hpp
namespace CV__SIMD_NAMESPACE {
/* Universal Intrinsics implementation of sin, cos, exp and log
Inspired by Intel Approximate Math library, and based on the
corresponding algorithms of the cephes math library
*/
/* Copyright (C) 2010,2011 RJVB - extensions */
/* Copyright (C) 2011 Julien Pommier
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
(this is the zlib license)
*/
#ifndef OPENCV_HAL_MATH_HAVE_EXP
//! @name Exponential
//! @{
#if defined(CV_SIMD_FP16) && CV_SIMD_FP16
// Implementation is the same as float32 vector.
inline v_float16 v_exp(const v_float16 &x) {
const v_float16 _vexp_lo_f16 = vx_setall_f16(hfloat(-10.7421875f));
const v_float16 _vexp_hi_f16 = vx_setall_f16(hfloat(11.f));
const v_float16 _vexp_half_fp16 = vx_setall_f16(hfloat(0.5f));
const v_float16 _vexp_one_fp16 = vx_setall_f16(hfloat(1.f));
const v_float16 _vexp_LOG2EF_f16 = vx_setall_f16(hfloat(1.44269504088896341f));
const v_float16 _vexp_C1_f16 = vx_setall_f16(hfloat(-6.93359375E-1f));
const v_float16 _vexp_C2_f16 = vx_setall_f16(hfloat(2.12194440E-4f));
const v_float16 _vexp_p0_f16 = vx_setall_f16(hfloat(1.9875691500E-4f));
const v_float16 _vexp_p1_f16 = vx_setall_f16(hfloat(1.3981999507E-3f));
const v_float16 _vexp_p2_f16 = vx_setall_f16(hfloat(8.3334519073E-3f));
const v_float16 _vexp_p3_f16 = vx_setall_f16(hfloat(4.1665795894E-2f));
const v_float16 _vexp_p4_f16 = vx_setall_f16(hfloat(1.6666665459E-1f));
const v_float16 _vexp_p5_f16 = vx_setall_f16(hfloat(5.0000001201E-1f));
const v_int16 _vexp_bias_s16 = vx_setall_s16(0xf);
v_float16 _vexp_, _vexp_x, _vexp_y, _vexp_xx;
v_int16 _vexp_mm;
// compute exponential of x
_vexp_x = v_max(x, _vexp_lo_f16);
_vexp_x = v_min(_vexp_x, _vexp_hi_f16);
_vexp_ = v_fma(_vexp_x, _vexp_LOG2EF_f16, _vexp_half_fp16);
_vexp_mm = v_floor(_vexp_);
_vexp_ = v_cvt_f16(_vexp_mm);
_vexp_mm = v_add(_vexp_mm, _vexp_bias_s16);
_vexp_mm = v_shl(_vexp_mm, 10);
_vexp_x = v_fma(_vexp_, _vexp_C1_f16, _vexp_x);
_vexp_x = v_fma(_vexp_, _vexp_C2_f16, _vexp_x);
_vexp_xx = v_mul(_vexp_x, _vexp_x);
_vexp_y = v_fma(_vexp_x, _vexp_p0_f16, _vexp_p1_f16);
_vexp_y = v_fma(_vexp_y, _vexp_x, _vexp_p2_f16);
_vexp_y = v_fma(_vexp_y, _vexp_x, _vexp_p3_f16);
_vexp_y = v_fma(_vexp_y, _vexp_x, _vexp_p4_f16);
_vexp_y = v_fma(_vexp_y, _vexp_x, _vexp_p5_f16);
_vexp_y = v_fma(_vexp_y, _vexp_xx, _vexp_x);
_vexp_y = v_add(_vexp_y, _vexp_one_fp16);
_vexp_y = v_mul(_vexp_y, v_reinterpret_as_f16(_vexp_mm));
// exp(NAN) -> NAN
v_float16 mask_not_nan = v_not_nan(x);
return v_select(mask_not_nan, _vexp_y, v_reinterpret_as_f16(vx_setall_s16(0x7e00)));
}
#endif
inline v_float32 v_exp(const v_float32 &x) {
const v_float32 _vexp_lo_f32 = vx_setall_f32(-88.3762626647949f);
const v_float32 _vexp_hi_f32 = vx_setall_f32(89.f);
const v_float32 _vexp_half_fp32 = vx_setall_f32(0.5f);
const v_float32 _vexp_one_fp32 = vx_setall_f32(1.f);
const v_float32 _vexp_LOG2EF_f32 = vx_setall_f32(1.44269504088896341f);
const v_float32 _vexp_C1_f32 = vx_setall_f32(-6.93359375E-1f);
const v_float32 _vexp_C2_f32 = vx_setall_f32(2.12194440E-4f);
const v_float32 _vexp_p0_f32 = vx_setall_f32(1.9875691500E-4f);
const v_float32 _vexp_p1_f32 = vx_setall_f32(1.3981999507E-3f);
const v_float32 _vexp_p2_f32 = vx_setall_f32(8.3334519073E-3f);
const v_float32 _vexp_p3_f32 = vx_setall_f32(4.1665795894E-2f);
const v_float32 _vexp_p4_f32 = vx_setall_f32(1.6666665459E-1f);
const v_float32 _vexp_p5_f32 = vx_setall_f32(5.0000001201E-1f);
const v_int32 _vexp_bias_s32 = vx_setall_s32(0x7f);
v_float32 _vexp_, _vexp_x, _vexp_y, _vexp_xx;
v_int32 _vexp_mm;
// compute exponential of x
_vexp_x = v_max(x, _vexp_lo_f32);
_vexp_x = v_min(_vexp_x, _vexp_hi_f32);
_vexp_ = v_fma(_vexp_x, _vexp_LOG2EF_f32, _vexp_half_fp32);
_vexp_mm = v_floor(_vexp_);
_vexp_ = v_cvt_f32(_vexp_mm);
_vexp_mm = v_add(_vexp_mm, _vexp_bias_s32);
_vexp_mm = v_shl(_vexp_mm, 23);
_vexp_x = v_fma(_vexp_, _vexp_C1_f32, _vexp_x);
_vexp_x = v_fma(_vexp_, _vexp_C2_f32, _vexp_x);
_vexp_xx = v_mul(_vexp_x, _vexp_x);
_vexp_y = v_fma(_vexp_x, _vexp_p0_f32, _vexp_p1_f32);
_vexp_y = v_fma(_vexp_y, _vexp_x, _vexp_p2_f32);
_vexp_y = v_fma(_vexp_y, _vexp_x, _vexp_p3_f32);
_vexp_y = v_fma(_vexp_y, _vexp_x, _vexp_p4_f32);
_vexp_y = v_fma(_vexp_y, _vexp_x, _vexp_p5_f32);
_vexp_y = v_fma(_vexp_y, _vexp_xx, _vexp_x);
_vexp_y = v_add(_vexp_y, _vexp_one_fp32);
_vexp_y = v_mul(_vexp_y, v_reinterpret_as_f32(_vexp_mm));
// exp(NAN) -> NAN
v_float32 mask_not_nan = v_not_nan(x);
return v_select(mask_not_nan, _vexp_y, v_reinterpret_as_f32(vx_setall_s32(0x7fc00000)));
}
#if CV_SIMD_64F || CV_SIMD_SCALABLE_64F
inline v_float64 v_exp(const v_float64 &x) {
const v_float64 _vexp_lo_f64 = vx_setall_f64(-709.43613930310391424428);
const v_float64 _vexp_hi_f64 = vx_setall_f64(710.);
const v_float64 _vexp_half_f64 = vx_setall_f64(0.5);
const v_float64 _vexp_one_f64 = vx_setall_f64(1.0);
const v_float64 _vexp_two_f64 = vx_setall_f64(2.0);
const v_float64 _vexp_LOG2EF_f64 = vx_setall_f64(1.44269504088896340736);
const v_float64 _vexp_C1_f64 = vx_setall_f64(-6.93145751953125E-1);
const v_float64 _vexp_C2_f64 = vx_setall_f64(-1.42860682030941723212E-6);
const v_float64 _vexp_p0_f64 = vx_setall_f64(1.26177193074810590878E-4);
const v_float64 _vexp_p1_f64 = vx_setall_f64(3.02994407707441961300E-2);
const v_float64 _vexp_p2_f64 = vx_setall_f64(9.99999999999999999910E-1);
const v_float64 _vexp_q0_f64 = vx_setall_f64(3.00198505138664455042E-6);
const v_float64 _vexp_q1_f64 = vx_setall_f64(2.52448340349684104192E-3);
const v_float64 _vexp_q2_f64 = vx_setall_f64(2.27265548208155028766E-1);
const v_float64 _vexp_q3_f64 = vx_setall_f64(2.00000000000000000009E0);
const v_int64 _vexp_bias_s64 = vx_setall_s64(0x3ff);
v_float64 _vexp_, _vexp_x, _vexp_y, _vexp_z, _vexp_xx;
v_int64 _vexp_mm;
// compute exponential of x
_vexp_x = v_max(x, _vexp_lo_f64);
_vexp_x = v_min(_vexp_x, _vexp_hi_f64);
_vexp_ = v_fma(_vexp_x, _vexp_LOG2EF_f64, _vexp_half_f64);
_vexp_mm = v_expand_low(v_floor(_vexp_));
_vexp_ = v_cvt_f64(_vexp_mm);
_vexp_mm = v_add(_vexp_mm, _vexp_bias_s64);
_vexp_mm = v_shl(_vexp_mm, 52);
_vexp_x = v_fma(_vexp_, _vexp_C1_f64, _vexp_x);
_vexp_x = v_fma(_vexp_, _vexp_C2_f64, _vexp_x);
_vexp_xx = v_mul(_vexp_x, _vexp_x);
_vexp_y = v_fma(_vexp_xx, _vexp_p0_f64, _vexp_p1_f64);
_vexp_y = v_fma(_vexp_y, _vexp_xx, _vexp_p2_f64);
_vexp_y = v_mul(_vexp_y, _vexp_x);
_vexp_z = v_fma(_vexp_xx, _vexp_q0_f64, _vexp_q1_f64);
_vexp_z = v_fma(_vexp_xx, _vexp_z, _vexp_q2_f64);
_vexp_z = v_fma(_vexp_xx, _vexp_z, _vexp_q3_f64);
_vexp_z = v_div(_vexp_y, v_sub(_vexp_z, _vexp_y));
_vexp_z = v_fma(_vexp_two_f64, _vexp_z, _vexp_one_f64);
_vexp_z = v_mul(_vexp_z, v_reinterpret_as_f64(_vexp_mm));
// exp(NAN) -> NAN
v_float64 mask_not_nan = v_not_nan(x);
return v_select(mask_not_nan, _vexp_z, v_reinterpret_as_f64(vx_setall_s64(0x7FF8000000000000)));
}
#endif
#define OPENCV_HAL_MATH_HAVE_EXP 1
//! @}
#endif
#ifndef OPENCV_HAL_MATH_HAVE_LOG
//! @name Natural Logarithm
//! @{
#if defined(CV_SIMD_FP16) && CV_SIMD_FP16
inline v_float16 v_log(const v_float16 &x) {
const v_float16 _vlog_one_fp16 = vx_setall_f16(hfloat(1.0f));
const v_float16 _vlog_SQRTHF_fp16 = vx_setall_f16(hfloat(0.707106781186547524f));
const v_float16 _vlog_q1_fp16 = vx_setall_f16(hfloat(-2.12194440E-4f));
const v_float16 _vlog_q2_fp16 = vx_setall_f16(hfloat(0.693359375f));
const v_float16 _vlog_p0_fp16 = vx_setall_f16(hfloat(7.0376836292E-2f));
const v_float16 _vlog_p1_fp16 = vx_setall_f16(hfloat(-1.1514610310E-1f));
const v_float16 _vlog_p2_fp16 = vx_setall_f16(hfloat(1.1676998740E-1f));
const v_float16 _vlog_p3_fp16 = vx_setall_f16(hfloat(-1.2420140846E-1f));
const v_float16 _vlog_p4_fp16 = vx_setall_f16(hfloat(1.4249322787E-1f));
const v_float16 _vlog_p5_fp16 = vx_setall_f16(hfloat(-1.6668057665E-1f));
const v_float16 _vlog_p6_fp16 = vx_setall_f16(hfloat(2.0000714765E-1f));
const v_float16 _vlog_p7_fp16 = vx_setall_f16(hfloat(-2.4999993993E-1f));
const v_float16 _vlog_p8_fp16 = vx_setall_f16(hfloat(3.3333331174E-1f));
const v_int16 _vlog_inv_mant_mask_s16 = vx_setall_s16(~0x7c00);
v_float16 _vlog_x, _vlog_e, _vlog_y, _vlog_z, _vlog_tmp;
v_int16 _vlog_ux, _vlog_emm0;
_vlog_ux = v_reinterpret_as_s16(x);
_vlog_emm0 = v_shr(_vlog_ux, 10);
_vlog_ux = v_and(_vlog_ux, _vlog_inv_mant_mask_s16);
_vlog_ux = v_or(_vlog_ux, v_reinterpret_as_s16(vx_setall_f16(hfloat(0.5f))));
_vlog_x = v_reinterpret_as_f16(_vlog_ux);
_vlog_emm0 = v_sub(_vlog_emm0, vx_setall_s16(0xf));
_vlog_e = v_cvt_f16(_vlog_emm0);
_vlog_e = v_add(_vlog_e, _vlog_one_fp16);
v_float16 _vlog_mask = v_lt(_vlog_x, _vlog_SQRTHF_fp16);
_vlog_tmp = v_and(_vlog_x, _vlog_mask);
_vlog_x = v_sub(_vlog_x, _vlog_one_fp16);
_vlog_e = v_sub(_vlog_e, v_and(_vlog_one_fp16, _vlog_mask));
_vlog_x = v_add(_vlog_x, _vlog_tmp);
_vlog_z = v_mul(_vlog_x, _vlog_x);
_vlog_y = v_fma(_vlog_p0_fp16, _vlog_x, _vlog_p1_fp16);
_vlog_y = v_fma(_vlog_y, _vlog_x, _vlog_p2_fp16);
_vlog_y = v_fma(_vlog_y, _vlog_x, _vlog_p3_fp16);
_vlog_y = v_fma(_vlog_y, _vlog_x, _vlog_p4_fp16);
_vlog_y = v_fma(_vlog_y, _vlog_x, _vlog_p5_fp16);
_vlog_y = v_fma(_vlog_y, _vlog_x, _vlog_p6_fp16);
_vlog_y = v_fma(_vlog_y, _vlog_x, _vlog_p7_fp16);
_vlog_y = v_fma(_vlog_y, _vlog_x, _vlog_p8_fp16);
_vlog_y = v_mul(_vlog_y, _vlog_x);
_vlog_y = v_mul(_vlog_y, _vlog_z);
_vlog_y = v_fma(_vlog_e, _vlog_q1_fp16, _vlog_y);
_vlog_y = v_sub(_vlog_y, v_mul(_vlog_z, vx_setall_f16(hfloat(0.5f))));
_vlog_x = v_add(_vlog_x, _vlog_y);
_vlog_x = v_fma(_vlog_e, _vlog_q2_fp16, _vlog_x);
// log(0) -> -INF
v_float16 mask_zero = v_eq(x, vx_setzero_f16());
_vlog_x = v_select(mask_zero, v_reinterpret_as_f16(vx_setall_s16(0xfc00)), _vlog_x);
// log(NEG), log(NAN) -> NAN
v_float16 mask_not_nan = v_ge(x, vx_setzero_f16());
_vlog_x = v_select(mask_not_nan, _vlog_x, v_reinterpret_as_f16(vx_setall_s16(0x7e00)));
// log(INF) -> INF
v_float16 mask_inf = v_eq(x, v_reinterpret_as_f16(vx_setall_s16(0x7c00)));
_vlog_x = v_select(mask_inf, x, _vlog_x);
return _vlog_x;
}
#endif
inline v_float32 v_log(const v_float32 &x) {
const v_float32 _vlog_one_fp32 = vx_setall_f32(1.0f);
const v_float32 _vlog_SQRTHF_fp32 = vx_setall_f32(0.707106781186547524f);
const v_float32 _vlog_q1_fp32 = vx_setall_f32(-2.12194440E-4f);
const v_float32 _vlog_q2_fp32 = vx_setall_f32(0.693359375f);
const v_float32 _vlog_p0_fp32 = vx_setall_f32(7.0376836292E-2f);
const v_float32 _vlog_p1_fp32 = vx_setall_f32(-1.1514610310E-1f);
const v_float32 _vlog_p2_fp32 = vx_setall_f32(1.1676998740E-1f);
const v_float32 _vlog_p3_fp32 = vx_setall_f32(-1.2420140846E-1f);
const v_float32 _vlog_p4_fp32 = vx_setall_f32(1.4249322787E-1f);
const v_float32 _vlog_p5_fp32 = vx_setall_f32(-1.6668057665E-1f);
const v_float32 _vlog_p6_fp32 = vx_setall_f32(2.0000714765E-1f);
const v_float32 _vlog_p7_fp32 = vx_setall_f32(-2.4999993993E-1f);
const v_float32 _vlog_p8_fp32 = vx_setall_f32(3.3333331174E-1f);
const v_int32 _vlog_inv_mant_mask_s32 = vx_setall_s32(~0x7f800000);
v_float32 _vlog_x, _vlog_e, _vlog_y, _vlog_z, _vlog_tmp;
v_int32 _vlog_ux, _vlog_emm0;
_vlog_ux = v_reinterpret_as_s32(x);
_vlog_emm0 = v_shr(_vlog_ux, 23);
_vlog_ux = v_and(_vlog_ux, _vlog_inv_mant_mask_s32);
_vlog_ux = v_or(_vlog_ux, v_reinterpret_as_s32(vx_setall_f32(0.5f)));
_vlog_x = v_reinterpret_as_f32(_vlog_ux);
_vlog_emm0 = v_sub(_vlog_emm0, vx_setall_s32(0x7f));
_vlog_e = v_cvt_f32(_vlog_emm0);
_vlog_e = v_add(_vlog_e, _vlog_one_fp32);
v_float32 _vlog_mask = v_lt(_vlog_x, _vlog_SQRTHF_fp32);
_vlog_tmp = v_and(_vlog_x, _vlog_mask);
_vlog_x = v_sub(_vlog_x, _vlog_one_fp32);
_vlog_e = v_sub(_vlog_e, v_and(_vlog_one_fp32, _vlog_mask));
_vlog_x = v_add(_vlog_x, _vlog_tmp);
_vlog_z = v_mul(_vlog_x, _vlog_x);
_vlog_y = v_fma(_vlog_p0_fp32, _vlog_x, _vlog_p1_fp32);
_vlog_y = v_fma(_vlog_y, _vlog_x, _vlog_p2_fp32);
_vlog_y = v_fma(_vlog_y, _vlog_x, _vlog_p3_fp32);
_vlog_y = v_fma(_vlog_y, _vlog_x, _vlog_p4_fp32);
_vlog_y = v_fma(_vlog_y, _vlog_x, _vlog_p5_fp32);
_vlog_y = v_fma(_vlog_y, _vlog_x, _vlog_p6_fp32);
_vlog_y = v_fma(_vlog_y, _vlog_x, _vlog_p7_fp32);
_vlog_y = v_fma(_vlog_y, _vlog_x, _vlog_p8_fp32);
_vlog_y = v_mul(_vlog_y, _vlog_x);
_vlog_y = v_mul(_vlog_y, _vlog_z);
_vlog_y = v_fma(_vlog_e, _vlog_q1_fp32, _vlog_y);
_vlog_y = v_sub(_vlog_y, v_mul(_vlog_z, vx_setall_f32(0.5)));
_vlog_x = v_add(_vlog_x, _vlog_y);
_vlog_x = v_fma(_vlog_e, _vlog_q2_fp32, _vlog_x);
// log(0) -> -INF
v_float32 mask_zero = v_eq(x, vx_setzero_f32());
_vlog_x = v_select(mask_zero, v_reinterpret_as_f32(vx_setall_s32(0xff800000)), _vlog_x);
// log(NEG), log(NAN) -> NAN
v_float32 mask_not_nan = v_ge(x, vx_setzero_f32());
_vlog_x = v_select(mask_not_nan, _vlog_x, v_reinterpret_as_f32(vx_setall_s32(0x7fc00000)));
// log(INF) -> INF
v_float32 mask_inf = v_eq(x, v_reinterpret_as_f32(vx_setall_s32(0x7f800000)));
_vlog_x = v_select(mask_inf, x, _vlog_x);
return _vlog_x;
}
#if CV_SIMD_64F || CV_SIMD_SCALABLE_64F
inline v_float64 v_log(const v_float64 &x) {
const v_float64 _vlog_one_fp64 = vx_setall_f64(1.0);
const v_float64 _vlog_SQRTHF_fp64 = vx_setall_f64(0.7071067811865475244);
const v_float64 _vlog_p0_fp64 = vx_setall_f64(1.01875663804580931796E-4);
const v_float64 _vlog_p1_fp64 = vx_setall_f64(4.97494994976747001425E-1);
const v_float64 _vlog_p2_fp64 = vx_setall_f64(4.70579119878881725854);
const v_float64 _vlog_p3_fp64 = vx_setall_f64(1.44989225341610930846E1);
const v_float64 _vlog_p4_fp64 = vx_setall_f64(1.79368678507819816313E1);
const v_float64 _vlog_p5_fp64 = vx_setall_f64(7.70838733755885391666);
const v_float64 _vlog_q0_fp64 = vx_setall_f64(1.12873587189167450590E1);
const v_float64 _vlog_q1_fp64 = vx_setall_f64(4.52279145837532221105E1);
const v_float64 _vlog_q2_fp64 = vx_setall_f64(8.29875266912776603211E1);
const v_float64 _vlog_q3_fp64 = vx_setall_f64(7.11544750618563894466E1);
const v_float64 _vlog_q4_fp64 = vx_setall_f64(2.31251620126765340583E1);
const v_float64 _vlog_C0_fp64 = vx_setall_f64(2.121944400546905827679e-4);
const v_float64 _vlog_C1_fp64 = vx_setall_f64(0.693359375);
const v_int64 _vlog_inv_mant_mask_s64 = vx_setall_s64(~0x7ff0000000000000);
v_float64 _vlog_x, _vlog_e, _vlog_y, _vlog_z, _vlog_tmp, _vlog_xx;
v_int64 _vlog_ux, _vlog_emm0;
_vlog_ux = v_reinterpret_as_s64(x);
_vlog_emm0 = v_shr(_vlog_ux, 52);
_vlog_ux = v_and(_vlog_ux, _vlog_inv_mant_mask_s64);
_vlog_ux = v_or(_vlog_ux, v_reinterpret_as_s64(vx_setall_f64(0.5)));
_vlog_x = v_reinterpret_as_f64(_vlog_ux);
_vlog_emm0 = v_sub(_vlog_emm0, vx_setall_s64(0x3ff));
_vlog_e = v_cvt_f64(_vlog_emm0);
_vlog_e = v_add(_vlog_e, _vlog_one_fp64);
v_float64 _vlog_mask = v_lt(_vlog_x, _vlog_SQRTHF_fp64);
_vlog_tmp = v_and(_vlog_x, _vlog_mask);
_vlog_x = v_sub(_vlog_x, _vlog_one_fp64);
_vlog_e = v_sub(_vlog_e, v_and(_vlog_one_fp64, _vlog_mask));
_vlog_x = v_add(_vlog_x, _vlog_tmp);
_vlog_xx = v_mul(_vlog_x, _vlog_x);
_vlog_y = v_fma(_vlog_p0_fp64, _vlog_x, _vlog_p1_fp64);
_vlog_y = v_fma(_vlog_y, _vlog_x, _vlog_p2_fp64);
_vlog_y = v_fma(_vlog_y, _vlog_x, _vlog_p3_fp64);
_vlog_y = v_fma(_vlog_y, _vlog_x, _vlog_p4_fp64);
_vlog_y = v_fma(_vlog_y, _vlog_x, _vlog_p5_fp64);
_vlog_y = v_mul(_vlog_y, _vlog_x);
_vlog_y = v_mul(_vlog_y, _vlog_xx);
_vlog_z = v_add(_vlog_x, _vlog_q0_fp64);
_vlog_z = v_fma(_vlog_z, _vlog_x, _vlog_q1_fp64);
_vlog_z = v_fma(_vlog_z, _vlog_x, _vlog_q2_fp64);
_vlog_z = v_fma(_vlog_z, _vlog_x, _vlog_q3_fp64);
_vlog_z = v_fma(_vlog_z, _vlog_x, _vlog_q4_fp64);
_vlog_z = v_div(_vlog_y, _vlog_z);
_vlog_z = v_sub(_vlog_z, v_mul(_vlog_e, _vlog_C0_fp64));
_vlog_z = v_sub(_vlog_z, v_mul(_vlog_xx, vx_setall_f64(0.5)));
_vlog_z = v_add(_vlog_z, _vlog_x);
_vlog_z = v_fma(_vlog_e, _vlog_C1_fp64, _vlog_z);
// log(0) -> -INF
v_float64 mask_zero = v_eq(x, vx_setzero_f64());
_vlog_z = v_select(mask_zero, v_reinterpret_as_f64(vx_setall_s64(0xfff0000000000000)), _vlog_z);
// log(NEG), log(NAN) -> NAN
v_float64 mask_not_nan = v_ge(x, vx_setzero_f64());
_vlog_z = v_select(mask_not_nan, _vlog_z, v_reinterpret_as_f64(vx_setall_s64(0x7ff8000000000000)));
// log(INF) -> INF
v_float64 mask_inf = v_eq(x, v_reinterpret_as_f64(vx_setall_s64(0x7ff0000000000000)));
_vlog_z = v_select(mask_inf, x, _vlog_z);
return _vlog_z;
}
#endif
#define OPENCV_HAL_MATH_HAVE_LOG 1
//! @}
#endif
/* This implementation is derived from the approximation approach of Error Function (Erf) from PyTorch
https://github.com/pytorch/pytorch/blob/9c50ecc84b9a6e699a7f058891b889aafbf976c7/aten/src/ATen/cpu/vec/vec512/vec512_float.h#L189-L220
*/
#ifndef OPENCV_HAL_MATH_HAVE_ERF
//! @name Error Function
//! @{
inline v_float32 v_erf(const v_float32 &v) {
const v_float32 coef0 = vx_setall_f32(0.3275911f),
coef1 = vx_setall_f32(1.061405429f),
coef2 = vx_setall_f32(-1.453152027f),
coef3 = vx_setall_f32(1.421413741f),
coef4 = vx_setall_f32(-0.284496736f),
coef5 = vx_setall_f32(0.254829592f),
ones = vx_setall_f32(1.0f),
neg_zeros = vx_setall_f32(-0.f);
v_float32 t = v_abs(v);
// sign(v)
v_float32 sign_mask = v_and(neg_zeros, v);
t = v_div(ones, v_fma(coef0, t, ones));
v_float32 r = v_fma(coef1, t, coef2);
r = v_fma(r, t, coef3);
r = v_fma(r, t, coef4);
r = v_fma(r, t, coef5);
// - v * v
v_float32 pow_2 = v_mul(v, v);
v_float32 neg_pow_2 = v_xor(neg_zeros, pow_2);
// - exp(- v * v)
v_float32 exp = v_exp(neg_pow_2);
v_float32 neg_exp = v_xor(neg_zeros, exp);
v_float32 res = v_mul(t, neg_exp);
res = v_fma(r, res, ones);
return v_xor(sign_mask, res);
}
#define OPENCV_HAL_MATH_HAVE_ERF 1
//! @}
#endif // OPENCV_HAL_MATH_HAVE_ERF
}
#endif // OPENCV_HAL_INTRIN_HPP

@ -2136,7 +2136,7 @@ public:
/** @overload */
template<typename _Tp, typename Functor> void forEach(const Functor& operation) const;
Mat(Mat&& m);
Mat(Mat&& m) CV_NOEXCEPT;
Mat& operator = (Mat&& m);
enum { MAGIC_VAL = 0x42FF0000, AUTO_STEP = 0, CONTINUOUS_FLAG = CV_MAT_CONT_FLAG, SUBMATRIX_FLAG = CV_SUBMAT_FLAG };

@ -57,7 +57,7 @@ This section describes OpenGL interoperability.
To enable OpenGL support, configure OpenCV using CMake with WITH_OPENGL=ON . Currently OpenGL is
supported only with WIN32, GTK and Qt backends on Windows and Linux (MacOS and Android are not
supported). For GTK backend gtkglext-1.0 library is required.
supported). For GTK-2.0 backend gtkglext-1.0 library is required.
To use OpenGL functionality you should first create OpenGL context (window or frame buffer). You can
do this with namedWindow function or with other OpenGL toolkit (GLUT, for example).

@ -544,6 +544,18 @@ bool isAligned(const void* p1, const void* p2, const void* p3, const void* p4)
return isAligned<N>(((size_t)p1)|((size_t)p2)|((size_t)p3)|((size_t)p4));
}
/*! @brief Flags that allow to midify some functions behavior. Used as set of flags.
*/
enum AlgorithmHint {
ALGO_HINT_DEFAULT = 0, //!< Default algorithm behaviour defined during OpenCV build
ALGO_HINT_ACCURATE = 1, //!< Use generic portable implementation
ALGO_HINT_APPROX = 2, //!< Allow alternative approximations to get faster implementation. Behaviour and result depends on a platform
};
/*! @brief Returns AlgorithmHint defined during OpenCV compilation. Defines #ALGO_HINT_DEFAULT behavior.
*/
CV_EXPORTS_W AlgorithmHint getDefaultAlgorithmHint();
/** @brief Enables or disables the optimized code.
The function can be used to dynamically turn on and off optimized dispatched code (code that uses SSE4.2, AVX/AVX2,

@ -614,7 +614,7 @@ size_t Mat::total(int startDim, int endDim) const
}
Mat::Mat(Mat&& m)
Mat::Mat(Mat&& m) CV_NOEXCEPT
: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data),
datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit), allocator(m.allocator),
u(m.u), size(&rows)

@ -1316,6 +1316,12 @@ redirectError( ErrorCallback errCallback, void* userdata, void** prevUserdata)
return prevCallback;
}
void terminate(int _code, const String& _err, const char* _func, const char* _file, int _line) CV_NOEXCEPT
{
dumpException(cv::Exception(_code, _err, _func, _file, _line));
std::terminate();
}
}
@ -2834,6 +2840,14 @@ bool restoreFPDenormalsState(const FPDenormalsModeState& state)
} // namespace details
AlgorithmHint getDefaultAlgorithmHint()
{
#ifdef OPENCV_ALGO_HINT_DEFAULT
return OPENCV_ALGO_HINT_DEFAULT;
#else
return ALGO_HINT_ACCURATE;
#endif
};
} // namespace cv

@ -1748,6 +1748,7 @@ template<typename R> struct TheTest
TheTest & test_loadstore_fp16_f32()
{
printf("test_loadstore_fp16_f32 ...\n");
AlignedData<v_uint16> data; data.a.clear();
data.a.d[0] = 0x3c00; // 1.0
data.a.d[VTraits<R>::vlanes() - 1] = (unsigned short)0xc000; // -2.0
@ -1873,6 +1874,216 @@ template<typename R> struct TheTest
return *this;
}
void __test_exp(LaneType dataMax, LaneType diff_thr, LaneType enlarge_factor, LaneType flt_min) {
int n = VTraits<R>::vlanes();
// Test overflow and underflow values with step
const LaneType step = (LaneType) 0.01;
for (LaneType i = dataMax + 1; i <= dataMax + 11;) {
Data<R> dataUpperBound, dataLowerBound, resOverflow, resUnderflow;
for (int j = 0; j < n; ++j) {
dataUpperBound[j] = i;
dataLowerBound[j] = -i;
i += step;
}
R upperBound = dataUpperBound, lowerBound = dataLowerBound;
resOverflow = v_exp(upperBound);
resUnderflow = v_exp(lowerBound);
for (int j = 0; j < n; ++j) {
SCOPED_TRACE(cv::format("Overflow/Underflow test value: %f", i));
EXPECT_TRUE(resOverflow[j] > 0 && std::isinf(resOverflow[j]));
EXPECT_GE(resUnderflow[j], 0);
EXPECT_LT(resUnderflow[j], flt_min);
}
}
// Test random values combined with special values
std::vector<LaneType> specialValues = {0, 1, INFINITY, -INFINITY, NAN, dataMax};
const int testRandNum = 10000;
const double specialValueProbability = 0.1; // 10% chance to insert a special value
cv::RNG_MT19937 rng;
for (int i = 0; i < testRandNum; i++) {
Data<R> dataRand, resRand;
for (int j = 0; j < n; ++j) {
if (rng.uniform(0.f, 1.f) <= specialValueProbability) {
// Insert a special value
int specialValueIndex = rng.uniform(0, (int) specialValues.size());
dataRand[j] = specialValues[specialValueIndex];
} else {
// Generate random data in [-dataMax*1.1, dataMax*1.1]
dataRand[j] = (LaneType) rng.uniform(-dataMax * 1.1, dataMax * 1.1);
}
}
// Compare with std::exp
R x = dataRand;
resRand = v_exp(x);
for (int j = 0; j < n; ++j) {
SCOPED_TRACE(cv::format("Random test value: %f", dataRand[j]));
LaneType std_exp = std::exp(dataRand[j]);
if (dataRand[j] == 0) {
// input 0 -> output 1
EXPECT_EQ(resRand[j], 1);
} else if (dataRand[j] == 1) {
// input 1 -> output e
EXPECT_NEAR((LaneType) M_E, resRand[j], 1e-15);
} else if (dataRand[j] > 0 && std::isinf(dataRand[j])) {
// input INF -> output INF
EXPECT_TRUE(resRand[j] > 0 && std::isinf(resRand[j]));
} else if (dataRand[j] < 0 && std::isinf(dataRand[j])) {
// input -INF -> output 0
EXPECT_EQ(resRand[j], 0);
} else if (std::isnan(dataRand[j])) {
// input NaN -> output NaN
EXPECT_TRUE(std::isnan(resRand[j]));
} else if (dataRand[j] == dataMax) {
// input dataMax -> output less than INFINITY
EXPECT_LT(resRand[j], (LaneType) INFINITY);
} else if (std::isinf(resRand[j])) {
// output INF -> input close to edge
EXPECT_GT(dataRand[j], dataMax);
} else {
EXPECT_GE(resRand[j], 0);
EXPECT_LT(std::abs(resRand[j] - std_exp), diff_thr * (std_exp + flt_min * enlarge_factor));
}
}
}
}
TheTest &test_exp_fp16() {
// issue after 4.x merge: float16_t and hfloat conflict: https://github.com/opencv/opencv/issues/25922
#if CV_SIMD_FP16 & 0
float16_t flt16_min;
uint16_t flt16_min_hex = 0x0400;
std::memcpy(&flt16_min, &flt16_min_hex, sizeof(float16_t));
__test_exp((float16_t) 10, (float16_t) 1e-2, (float16_t) 1e2, flt16_min);
#endif
return *this;
}
TheTest &test_exp_fp32() {
__test_exp(88.0f, 1e-6f, 1e6f, FLT_MIN);
return *this;
}
TheTest &test_exp_fp64() {
#if CV_SIMD_64F || CV_SIMD_SCALABLE_64F
__test_exp(709.0, 1e-15, 1e15, DBL_MIN);
#endif
return *this;
}
void __test_log(LaneType expBound, LaneType diff_thr, LaneType flt_min) {
int n = VTraits<R>::vlanes();
// Test special values
std::vector<LaneType> specialValues = {0, 1, (LaneType) M_E, INFINITY, -INFINITY, NAN};
const int testRandNum = 10000;
const double specialValueProbability = 0.1; // 10% chance to insert a special value
cv::RNG_MT19937 rng;
for (int i = 0; i < testRandNum; i++) {
Data<R> dataRand, resRand;
for (int j = 0; j < n; ++j) {
if (rng.uniform(0.f, 1.f) <= specialValueProbability) {
// Insert a special value
int specialValueIndex = rng.uniform(0, (int) specialValues.size());
dataRand[j] = specialValues[specialValueIndex];
} else {
// Generate uniform random data in [-expBound, expBound]
dataRand[j] = (LaneType) std::exp(rng.uniform(-expBound, expBound));
}
}
// Compare with std::log
R x = dataRand;
resRand = v_log(x);
for (int j = 0; j < n; ++j) {
SCOPED_TRACE(cv::format("Random test value: %f", dataRand[j]));
LaneType std_log = std::log(dataRand[j]);
if (dataRand[j] == 0) {
// input 0 -> output -INF
EXPECT_TRUE(std::isinf(resRand[j]) && resRand[j] < 0);
} else if (dataRand[j] < 0 || std::isnan(dataRand[j])) {
// input less than 0 -> output NAN
// input NaN -> output NaN
EXPECT_TRUE(std::isnan(resRand[j]));
} else if (dataRand[j] == 1) {
// input 1 -> output 0
EXPECT_EQ((LaneType) 0, resRand[j]);
} else if (std::isinf(dataRand[j]) && dataRand[j] > 0) {
// input INF -> output INF
EXPECT_TRUE(std::isinf(resRand[j]) && resRand[j] > 0);
} else {
EXPECT_LT(std::abs(resRand[j] - std_log), diff_thr * (std::abs(std_log) + flt_min * 100));
}
}
}
}
TheTest &test_log_fp16() {
// issue after 4.x merge: float16_t and hfloat conflict: https://github.com/opencv/opencv/issues/25922
#if CV_SIMD_FP16 & 0
float16_t flt16_min;
uint16_t flt16_min_hex = 0x0400;
std::memcpy(&flt16_min, &flt16_min_hex, sizeof(float16_t));
__test_log((float16_t) 9, (float16_t) 1e-3, flt16_min);
#endif
return *this;
}
TheTest &test_log_fp32() {
__test_log(25.f, 1e-6f, FLT_MIN);
return *this;
}
TheTest &test_log_fp64() {
#if CV_SIMD_64F || CV_SIMD_SCALABLE_64F
__test_log(200., 1e-15, DBL_MIN);
#endif
return *this;
}
TheTest &test_erf_fp32() {
int n = VTraits<R>::vlanes();
constexpr int num_loops = 10000;
const std::vector<LaneType> singular_inputs{INFINITY, -INFINITY, NAN};
constexpr double insert_singular_input_probability = 0.1;
cv::RNG_MT19937 rng;
for (int i = 0; i < num_loops; i++) {
Data<R> inputs;
for (int j = 0; j < n; j++) {
if (rng.uniform(0.f, 1.f) <= insert_singular_input_probability) {
int singular_input_index = rng.uniform(0, int(singular_inputs.size()));
inputs[j] = singular_inputs[singular_input_index];
} else {
// std::exp(float) overflows at about 88.0f.
// In v_erf, exp is called on input*input. So test range is [-sqrt(88.0f), sqrt(88.0f)]
inputs[j] = (LaneType) rng.uniform(-9.4f, 9.4f);
}
}
Data<R> outputs = v_erf(R(inputs));
for (int j = 0; j < n; j++) {
SCOPED_TRACE(cv::format("Random test value: %f", inputs[j]));
if (std::isinf(inputs[j])) {
if (inputs[j] < 0) {
EXPECT_EQ(-1, outputs[j]);
} else {
EXPECT_EQ(1, outputs[j]);
}
} else if (std::isnan(inputs[j])) {
EXPECT_TRUE(std::isnan(outputs[j]));
} else {
LaneType ref_output = std::erf(inputs[j]);
EXPECT_LT(std::abs(outputs[j] - ref_output), 1e-3f * (std::abs(ref_output) + FLT_MIN * 1e4f));
}
}
}
return *this;
}
};
#define DUMP_ENTRY(type) printf("SIMD%d: %s\n", 8*VTraits<v_uint8>::vlanes(), CV__TRACE_FUNCTION);
@ -2186,6 +2397,9 @@ void test_hal_intrin_float32()
.test_extract_highest()
.test_broadcast_highest()
.test_pack_triplets()
.test_exp_fp32()
.test_log_fp32()
.test_erf_fp32()
#if CV_SIMD_WIDTH == 32
.test_extract<4>().test_extract<5>().test_extract<6>().test_extract<7>()
.test_rotate<4>().test_rotate<5>().test_rotate<6>().test_rotate<7>()
@ -2217,6 +2431,8 @@ void test_hal_intrin_float64()
.test_rotate<0>().test_rotate<1>()
.test_extract_n<0>().test_extract_n<1>()
.test_extract_highest()
.test_exp_fp64()
.test_log_fp64()
//.test_broadcast_element<0>().test_broadcast_element<1>()
#if CV_SIMD_WIDTH == 32
.test_extract<2>().test_extract<3>()
@ -2258,6 +2474,8 @@ void test_hal_intrin_float16()
.test_extract_highest()
.test_broadcast_element<0>().test_broadcast_element<1>()
.test_extract_n<0>().test_extract_n<1>()
.test_exp_fp16()
.test_log_fp16()
#endif
;
#else

@ -608,13 +608,13 @@ CV__DNN_INLINE_NS_BEGIN
* @param outputName name for layer which output is needed to get
* @details If @p outputName is empty, runs forward pass for the whole network.
*/
CV_WRAP void forward(OutputArrayOfArrays outputBlobs, const String& outputName = String());
CV_WRAP void forward(CV_ND OutputArrayOfArrays outputBlobs, const String& outputName = String());
/** @brief Runs forward pass to compute outputs of layers listed in @p outBlobNames.
* @param outputBlobs contains blobs for first outputs of specified layers.
* @param outBlobNames names for layers which outputs are needed to get
*/
CV_WRAP void forward(OutputArrayOfArrays outputBlobs,
CV_WRAP void forward(CV_ND OutputArrayOfArrays outputBlobs,
const std::vector<String>& outBlobNames);
/** @brief Runs forward pass to compute outputs of layers listed in @p outBlobNames.
@ -661,7 +661,7 @@ CV__DNN_INLINE_NS_BEGIN
* as:
* \f[input(n,c,h,w) = scalefactor \times (blob(n,c,h,w) - mean_c)\f]
*/
CV_WRAP void setInput(InputArray blob, const String& name = "",
CV_WRAP void setInput(CV_ND InputArray blob, const String& name = "",
double scalefactor = 1.0, const Scalar& mean = Scalar());
/** @brief Sets the new value for the learned param of the layer.
@ -672,8 +672,8 @@ CV__DNN_INLINE_NS_BEGIN
* @note If shape of the new blob differs from the previous shape,
* then the following forward pass may fail.
*/
CV_WRAP void setParam(int layer, int numParam, const Mat &blob);
CV_WRAP inline void setParam(const String& layerName, int numParam, const Mat &blob) { return setParam(getLayerId(layerName), numParam, blob); }
CV_WRAP void setParam(int layer, int numParam, CV_ND const Mat &blob);
CV_WRAP inline void setParam(const String& layerName, int numParam, CV_ND const Mat &blob) { return setParam(getLayerId(layerName), numParam, blob); }
/** @brief Returns parameter blob of the layer.
* @param layer name or id of the layer.

@ -456,10 +456,6 @@ class dnn_test(NewOpenCVTests):
"Verify OPENCV_DNN_TEST_DATA_PATH configuration parameter.")
input = np.load(input_file)
# we have to expand the shape of input tensor because Python bindings cut 3D tensors to 2D
# it should be fixed in future. see : https://github.com/opencv/opencv/issues/19091
# please remove `expand_dims` after that
input = np.expand_dims(input, axis=3)
gold_output = np.load(output_file)
for backend, target in self.dnnBackendsAndTargets:
@ -470,10 +466,63 @@ class dnn_test(NewOpenCVTests):
net.setPreferableBackend(backend)
net.setPreferableTarget(target)
# Check whether 3d shape is parsed correctly for setInput
net.setInput(input)
real_output = net.forward()
normAssert(self, real_output, gold_output, "", getDefaultThreshold(target))
# Case 0: test API `forward(const String& outputName = String()`
real_output = net.forward() # Retval is a np.array of shape [2, 5, 3]
normAssert(self, real_output, gold_output, "Case 1", getDefaultThreshold(target))
'''
Pre-allocate output memory with correct shape.
Normally Python users do not use in this way,
but we have to test it since we design API in this way
'''
# Case 1: a np.array with a string of output name.
# It tests API `forward(OutputArrayOfArrays outputBlobs, const String& outputName = String()`
# when outputBlobs is a np.array and we expect it to be the only output.
real_output = np.empty([2, 5, 3], dtype=np.float32)
real_output = net.forward(real_output, "237") # Retval is a tuple with a np.array of shape [2, 5, 3]
normAssert(self, real_output, gold_output, "Case 1", getDefaultThreshold(target))
# Case 2: a tuple of np.array with a string of output name.
# It tests API `forward(OutputArrayOfArrays outputBlobs, const String& outputName = String()`
# when outputBlobs is a container of several np.array and we expect to save all outputs accordingly.
real_output = tuple(np.empty([2, 5, 3], dtype=np.float32))
real_output = net.forward(real_output, "237") # Retval is a tuple with a np.array of shape [2, 5, 3]
normAssert(self, real_output, gold_output, "Case 2", getDefaultThreshold(target))
# Case 3: a tuple of np.array with a string of output name.
# It tests API `forward(OutputArrayOfArrays outputBlobs, const std::vector<String>& outBlobNames)`
real_output = tuple(np.empty([2, 5, 3], dtype=np.float32))
# Note that it does not support parsing a list , e.g. ["237"]
real_output = net.forward(real_output, ("237")) # Retval is a tuple with a np.array of shape [2, 5, 3]
normAssert(self, real_output, gold_output, "Case 3", getDefaultThreshold(target))
def test_set_param_3d(self):
model_path = self.find_dnn_file('dnn/onnx/models/matmul_3d_init.onnx')
input_file = self.find_dnn_file('dnn/onnx/data/input_matmul_3d_init.npy')
output_file = self.find_dnn_file('dnn/onnx/data/output_matmul_3d_init.npy')
input = np.load(input_file)
output = np.load(output_file)
for backend, target in self.dnnBackendsAndTargets:
printParams(backend, target)
net = cv.dnn.readNet(model_path)
node_name = net.getLayerNames()[0]
w = net.getParam(node_name, 0) # returns the original tensor of three-dimensional shape
net.setParam(node_name, 0, w) # set param once again to see whether tensor is converted with correct shape
net.setPreferableBackend(backend)
net.setPreferableTarget(target)
net.setInput(input)
res_output = net.forward()
normAssert(self, output, res_output, "", getDefaultThreshold(target))
def test_scalefactor_assign(self):
params = cv.dnn.Image2BlobParams()

@ -973,4 +973,49 @@ INSTANTIATE_TEST_CASE_P(/**/, Layer_Softmax, Combine(
/* withCann= */ false) // only test on CPU
));
using Layer_Elementwise = TestBaseWithParam<tuple<std::vector<int>, std::string, tuple<Backend, Target>>>;
PERF_TEST_P_(Layer_Elementwise, elementwise) {
std::vector<int> input_shape = get<0>(GetParam());
std::string op = get<1>(GetParam());
int backend_id = get<0>(get<2>(GetParam()));
int target_id = get<1>(get<2>(GetParam()));
Mat input(input_shape, CV_32F);
randn(input, 0.f, 1.f);
LayerParams lp;
lp.type = op;
lp.name = "TestLayer";
Net net;
net.addLayerToPrev(lp.name, lp.type, lp);
// Warmup
{
net.setInput(input);
net.setPreferableBackend(backend_id);
net.setPreferableTarget(target_id);
Mat out = net.forward();
}
TEST_CYCLE() {
net.forward();
}
SANITY_CHECK_NOTHING();
}
INSTANTIATE_TEST_CASE_P(/**/, Layer_Elementwise, testing::Combine(
testing::Values(std::vector<int>{1, 50, 3072}),
testing::Values(std::string{"Gelu"}),
dnnBackendsAndTargets(/* withInferenceEngine= */ true,
/* withHalide= */ false,
/* withCpuOCV= */ true,
/* withVkCom= */ false,
/* withCUDA= */ true,
/* withNgraph= */ true,
/* withWebnn= */ false,
/* withCann= */ false) // only test on CPU
));
} // namespace

@ -15,7 +15,7 @@
namespace cv { namespace dnn { namespace cuda4dnn {
void checkVersions()
inline void checkVersions()
{
// https://docs.nvidia.com/deeplearning/cudnn/developer-guide/index.html#programming-model
// cuDNN API Compatibility
@ -23,8 +23,19 @@ namespace cv { namespace dnn { namespace cuda4dnn {
// Any patch release x.y.z is forward or backward-compatible with applications built against another cuDNN patch release x.y.w (meaning, of the same major and minor version number, but having w!=z).
// cuDNN minor releases beginning with cuDNN 7 are binary backward-compatible with applications built against the same or earlier patch release (meaning, an application built against cuDNN 7.x is binary compatible with cuDNN library 7.y, where y>=x).
// Applications compiled with a cuDNN version 7.y are not guaranteed to work with 7.x release when y > x.
auto cudnn_bversion = cudnnGetVersion();
auto cudnn_major_bversion = cudnn_bversion / 1000, cudnn_minor_bversion = cudnn_bversion % 1000 / 100;
int cudnn_bversion = cudnnGetVersion();
int cudnn_major_bversion = 0, cudnn_minor_bversion = 0;
// CuDNN changed major version multiplier in 9.0
if (cudnn_bversion >= 9*10000)
{
cudnn_major_bversion = cudnn_bversion / 10000;
cudnn_minor_bversion = cudnn_bversion % 10000 / 100;
}
else
{
cudnn_major_bversion = cudnn_bversion / 1000;
cudnn_minor_bversion = cudnn_bversion % 1000 / 100;
}
if (cudnn_major_bversion != CUDNN_MAJOR || cudnn_minor_bversion < CUDNN_MINOR)
{
std::ostringstream oss;
@ -33,21 +44,23 @@ namespace cv { namespace dnn { namespace cuda4dnn {
}
}
int getDeviceCount()
inline int getDeviceCount()
{
return cuda::getCudaEnabledDeviceCount();
}
int getDevice()
inline int getDevice()
{
int device_id = -1;
CUDA4DNN_CHECK_CUDA(cudaGetDevice(&device_id));
return device_id;
}
bool isDeviceCompatible()
inline bool isDeviceCompatible(int device_id = -1)
{
int device_id = getDevice();
if (device_id < 0)
device_id = getDevice();
if (device_id < 0)
return false;
@ -65,9 +78,11 @@ namespace cv { namespace dnn { namespace cuda4dnn {
return false;
}
bool doesDeviceSupportFP16()
inline bool doesDeviceSupportFP16(int device_id = -1)
{
int device_id = getDevice();
if (device_id < 0)
device_id = getDevice();
if (device_id < 0)
return false;
@ -76,9 +91,7 @@ namespace cv { namespace dnn { namespace cuda4dnn {
CUDA4DNN_CHECK_CUDA(cudaDeviceGetAttribute(&minor, cudaDevAttrComputeCapabilityMinor, device_id));
int version = major * 10 + minor;
if (version < 53)
return false;
return true;
return (version >= 53);
}
}}} /* namespace cv::dnn::cuda4dnn */

@ -316,6 +316,8 @@ public:
ranges[cAxis].start = 0;
for (size_t i = 0; i < inputs.size(); i++)
{
if (inputs[i].empty())
continue;
ranges[cAxis].end = ranges[cAxis].start + inputs[i].size[cAxis];
for (int j = 0; j < outMat.dims; ++j)
{

@ -71,48 +71,12 @@ void softmax(Mat &dst, const Mat &src, int axis, int axisBias, int axisStep){
// calculate the exp value along the axis
v_float32 vs = vx_setzero_f32();
vmax = vx_setall_f32(maxVal);
// initialize vexp constant
v_float32 _vexp_lo = vx_setall_f32(-88.3762626647949f);
v_float32 _vexp_hi = vx_setall_f32(88.3762626647949f);
v_float32 _vexp_half = vx_setall_f32(0.5f);
v_float32 _vexp_one = vx_setall_f32(1.f);
v_float32 _vexp_LOG2EF = vx_setall_f32(1.44269504088896341f);
v_float32 _vexp_C1 = vx_setall_f32(-0.693359375f);
v_float32 _vexp_C2 = vx_setall_f32(2.12194440e-4f);
v_float32 _vexp_p0 = vx_setall_f32(1.9875691500E-4f);
v_float32 _vexp_p1 = vx_setall_f32(1.3981999507E-3f);
v_float32 _vexp_p2 = vx_setall_f32(8.3334519073E-3f);
v_float32 _vexp_p3 = vx_setall_f32(4.1665795894E-2f);
v_float32 _vexp_p4 = vx_setall_f32(1.6666665459E-1f);
v_float32 _vexp_p5 = vx_setall_f32(5.0000001201E-1f);
// initialize temp vectors for vexp
v_float32 val, _vexp_, _vexp_x, _vexp_y, _vexp_z;
v_int32 _vexp_mm;
v_float32 val;
// calculate and sum all data along axis
for (size_t cnDim = 0; cnDim < axisStep; cnDim += nlanes) {
val = vx_load(axisBuf + cnDim);
val = v_sub(val, vmax);
// compute vexp of val
_vexp_x = v_min(val, _vexp_hi);
_vexp_x = v_max(_vexp_x, _vexp_lo);
_vexp_ = v_fma(_vexp_x, _vexp_LOG2EF, _vexp_half);
_vexp_mm = v_floor(_vexp_);
_vexp_ = v_cvt_f32(_vexp_mm);
_vexp_mm = v_add(_vexp_mm, vx_setall_s32(0x7f));
_vexp_mm = v_shl(_vexp_mm, 23);
_vexp_x = v_fma(_vexp_, _vexp_C1, _vexp_x);
_vexp_x = v_fma(_vexp_, _vexp_C2, _vexp_x);
_vexp_z = v_mul(_vexp_x, _vexp_x);
_vexp_y = v_fma(_vexp_x, _vexp_p0, _vexp_p1);
_vexp_y = v_fma(_vexp_y, _vexp_x, _vexp_p2);
_vexp_y = v_fma(_vexp_y, _vexp_x, _vexp_p3);
_vexp_y = v_fma(_vexp_y, _vexp_x, _vexp_p4);
_vexp_y = v_fma(_vexp_y, _vexp_x, _vexp_p5);
_vexp_y = v_fma(_vexp_y, _vexp_z, _vexp_x);
_vexp_y = v_add(_vexp_y, _vexp_one);
val = v_mul(_vexp_y, v_reinterpret_as_f32(_vexp_mm));
val = v_exp(val);
vs = v_add(vs, val);
v_store(axisBuf + cnDim, val);

@ -684,20 +684,82 @@ private:
static const char* const ocl_kernel_name;
};
struct GeluFunctor : public BaseDefaultFunctor<GeluFunctor>
{
typedef GeluLayer Layer;
namespace {
// Refer to v_erf in modules/core/include/opencv2/core/hal/intrin_math.hpp
constexpr float c_erf_coef0 = 0.3275911f;
constexpr float c_erf_coef1 = 1.061405429f;
constexpr float c_erf_coef2 = -1.453152027f;
constexpr float c_erf_coef3 = 1.421413741f;
constexpr float c_erf_coef4 = -0.284496736f;
constexpr float c_erf_coef5 = 0.254829592f;
explicit GeluFunctor() {}
inline float erf_approx(float v) {
float t = 1.f / fmaf(fabsf(v), c_erf_coef0, 1.f);
float r = fmaf(c_erf_coef1, t, c_erf_coef2);
r = fmaf(r, t, c_erf_coef3);
r = fmaf(r, t, c_erf_coef4);
r = fmaf(r, t, c_erf_coef5);
r = 1.f - r * t * expf(-v * v);
return std::copysignf(r, v);
}
}
bool supportBackend(int backendId, int)
{
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
struct GeluFunctor : public BaseFunctor {
using Layer = GeluLayer;
int vlanes;
explicit GeluFunctor() {
#if (CV_SIMD || CV_SIMD_SCALABLE)
vlanes = VTraits<v_float32>::vlanes();
#else
vlanes = 1;
#endif
}
inline float calculate(float x) const
bool supportBackend(int backendId, int) {
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
}
void apply(const float* srcptr, float* dstptr, int stripeStart, int len, size_t planeSize, int cn0, int cn1) const {
CV_UNUSED(stripeStart);
for (int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize) {
int i = 0;
#if (CV_SIMD || CV_SIMD_SCALABLE)
// 0.5f * x * (1.0f + erf(x * M_SQRT1_2));
v_float32 half = vx_setall_f32(0.5f),
one = vx_setall_f32(1.0f),
reciprocal_sqrt2 = vx_setall_f32(M_SQRT1_2);
for (; i <= len - vlanes; i += vlanes) {
if (i + vlanes > len) {
if (i == 0 || i == len) {
break;
}
i = len - vlanes;
}
v_float32 x0 = vx_load(srcptr + i);
// t = x * M_SQRT1_2
v_float32 t0 = v_mul(reciprocal_sqrt2, x0);
// t = 1.0f + t
t0 = v_add(one, v_erf(t0));
// x = 0.5 * x
x0 = v_mul(half, x0);
// x = x * t
x0 = v_mul(x0, t0);
vx_store(dstptr + i, x0);
}
#endif
// 0.5f * x * (1.0f + erf(x * M_SQRT1_2));
for( ; i < len; i++ )
{
return 0.5f * x * (1.0f + erf(x * M_SQRT1_2));
float x = srcptr[i];
dstptr[i] = 0.5f * x * (1.0f + erf_approx(x * M_SQRT1_2));
}
}
}
#ifdef HAVE_CUDA
@ -707,12 +769,55 @@ struct GeluFunctor : public BaseDefaultFunctor<GeluFunctor>
}
#endif
#ifdef HAVE_OPENCL
bool initKernel(ocl::Kernel &ker, const UMat &src) const
{
String buildopt = oclGetTMacro(src);
if (!ker.create("GeluForward", ocl::dnn::activations_oclsrc, buildopt))
return false;
return true;
}
bool applyOCL(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
{
std::vector<UMat> inputs;
std::vector<UMat> outputs;
inps.getUMatVector(inputs);
outs.getUMatVector(outputs);
for (size_t i = 0; i < inputs.size(); i++)
{
UMat& src = inputs[i];
UMat& dst = outputs[i];
CV_Assert(src.isContinuous() && dst.isContinuous() && !src.offset && !dst.offset);
ocl::Kernel kernel;
CV_Assert(initKernel(kernel, src));
kernel.set(0, (int)src.total());
kernel.set(1, ocl::KernelArg::PtrReadOnly(src));
kernel.set(2, ocl::KernelArg::PtrWriteOnly(dst));
size_t gSize = src.total();
CV_Assert(kernel.run(1, &gSize, NULL, false));
}
return true;
}
#endif
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const ngraph::Output<ngraph::Node>& node)
{
return std::make_shared<ov::op::v0::Gelu>(node);
}
#endif // HAVE_DNN_NGRAPH
int64 getFLOPSPerElement() const { return 100; }
};
template<>
const char* const BaseDefaultFunctor<GeluFunctor>::ocl_kernel_name = "GeluForward";
namespace GeluApproximationConstants
{
static constexpr float sqrt_2_pi = 0.7978845834732056f;

@ -69,11 +69,13 @@ Range normalizeRange(const Range& input_range, int n)
{
Range range = input_range;
if (range.start != n){
range.start = std::min(std::max(range.start, -n), n - 1);
if (range.start < 0)
{
range.start += n;
}
}
range.end = std::min(std::max(range.end, -n), n);
if (range.end < 0)
@ -630,9 +632,11 @@ public:
{
for (size_t i = 0; i < outputs.size(); i++)
{
if (finalSliceRanges[i][0].start != finalSliceRanges[i][0].end){
inpMat(finalSliceRanges[i]).copyTo(outputs[i]);
}
}
}
else
{
int dimsNum = inpMat.dims;

@ -10,6 +10,10 @@
#include "backend.hpp"
#include "factory.hpp"
#ifdef HAVE_CUDA
#include "cuda4dnn/init.hpp"
#endif
namespace cv {
namespace dnn {
CV__DNN_INLINE_NS_BEGIN
@ -240,6 +244,16 @@ void Net::Impl::setPreferableTarget(int targetId)
#endif
}
if (IS_DNN_CUDA_TARGET(targetId))
{
preferableTarget = DNN_TARGET_CPU;
#ifdef HAVE_CUDA
if (cuda4dnn::doesDeviceSupportFP16() && targetId == DNN_TARGET_CUDA_FP16)
preferableTarget = DNN_TARGET_CUDA_FP16;
else
preferableTarget = DNN_TARGET_CUDA;
#endif
}
#if !defined(__arm64__) || !__arm64__
if (targetId == DNN_TARGET_CPU_FP16)
{

@ -48,6 +48,10 @@
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
#endif
#if !defined(M_SQRT1_2)
#define M_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */
#endif
__kernel void ReLUForward(const int count, __global const T* in, __global T* out
#ifndef RELU_NO_SLOPE
, KERNEL_ARG_DTYPE negative_slope

@ -15,6 +15,10 @@
#include "backend.hpp"
#include "factory.hpp"
#ifdef HAVE_CUDA
#include "cuda4dnn/init.hpp"
#endif
namespace cv {
namespace dnn {
CV__DNN_INLINE_NS_BEGIN
@ -107,9 +111,27 @@ private:
#endif
#ifdef HAVE_CUDA
if (haveCUDA())
cuda4dnn::checkVersions();
bool hasCudaCompatible = false;
bool hasCudaFP16 = false;
for (int i = 0; i < cuda4dnn::getDeviceCount(); i++)
{
if (cuda4dnn::isDeviceCompatible(i))
{
hasCudaCompatible = true;
if (cuda4dnn::doesDeviceSupportFP16(i))
{
hasCudaFP16 = true;
break; // we already have all we need here
}
}
}
if (hasCudaCompatible)
{
backends.push_back(std::make_pair(DNN_BACKEND_CUDA, DNN_TARGET_CUDA));
if (hasCudaFP16)
backends.push_back(std::make_pair(DNN_BACKEND_CUDA, DNN_TARGET_CUDA_FP16));
}
#endif

@ -210,7 +210,7 @@ public:
if ((!l->supportBackend(backend) || l->preferableTarget != target) && !fused)
{
hasFallbacks = true;
std::cout << "FALLBACK: Layer [" << l->type << "]:[" << l->name << "] is expected to has backend implementation" << endl;
std::cout << "FALLBACK: Layer [" << l->type << "]:[" << l->name << "] is expected to have backend implementation" << endl;
}
}
if (hasFallbacks && raiseError)

@ -282,6 +282,14 @@ static const TestCase testConformanceConfig[] = {
{"test_gathernd_example_float32", 2, 1},
{"test_gathernd_example_int32", 2, 1},
{"test_gathernd_example_int32_batch_dim1", 2, 1},
{"test_gelu_default_1", 1, 1},
{"test_gelu_default_1_expanded", 1, 1},
{"test_gelu_default_2", 1, 1},
{"test_gelu_default_2_expanded", 1, 1},
{"test_gelu_tanh_1", 1, 1},
{"test_gelu_tanh_1_expanded", 1, 1},
{"test_gelu_tanh_2", 1, 1},
{"test_gelu_tanh_2_expanded", 1, 1},
{"test_gemm_all_attributes", 3, 1},
{"test_gemm_alpha", 3, 1},
{"test_gemm_beta", 3, 1},
@ -962,6 +970,9 @@ public:
static std::set<std::string> opencl_fp16_deny_list;
static std::set<std::string> opencl_deny_list;
static std::set<std::string> cpu_deny_list;
#ifdef HAVE_HALIDE
static std::set<std::string> halide_deny_list;
#endif
#ifdef HAVE_VULKAN
static std::set<std::string> vulkan_deny_list;
#endif
@ -1005,7 +1016,7 @@ public:
if ((!l->supportBackend(backend) || l->preferableTarget != target) && !fused)
{
hasFallbacks = true;
std::cout << "FALLBACK: Layer [" << l->type << "]:[" << l->name << "] is expected to has backend implementation" << endl;
std::cout << "FALLBACK: Layer [" << l->type << "]:[" << l->name << "] is expected to have backend implementation" << endl;
}
}
return hasFallbacks;
@ -1037,6 +1048,12 @@ public:
#include "test_onnx_conformance_layer_filter_opencv_cpu_denylist.inl.hpp"
};
#ifdef HAVE_HALIDE
halide_deny_list = {
#include "test_onnx_conformance_layer_filter__halide_denylist.inl.hpp"
};
#endif
#ifdef HAVE_VULKAN
vulkan_deny_list = {
#include "test_onnx_conformance_layer_filter__vulkan_denylist.inl.hpp"
@ -1061,6 +1078,9 @@ std::set<std::string> Test_ONNX_conformance::opencv_deny_list;
std::set<std::string> Test_ONNX_conformance::opencl_fp16_deny_list;
std::set<std::string> Test_ONNX_conformance::opencl_deny_list;
std::set<std::string> Test_ONNX_conformance::cpu_deny_list;
#ifdef HAVE_HALIDE
std::set<std::string> Test_ONNX_conformance::halide_deny_list;
#endif
#ifdef HAVE_VULKAN
std::set<std::string> Test_ONNX_conformance::vulkan_deny_list;
#endif
@ -1107,7 +1127,29 @@ TEST_P(Test_ONNX_conformance, Layer_Test)
{
applyTestTag(CV_TEST_TAG_DNN_SKIP_CPU, CV_TEST_TAG_DNN_SKIP_OPENCV_BACKEND, CV_TEST_TAG_DNN_SKIP_ONNX_CONFORMANCE);
}
if (name == "test_gelu_tanh_1") {
default_l1 = 0.00011; // Expected: (normL1) <= (l1), actual: 0.000101805 vs 1e-05
default_lInf = 0.00016; // Expected: (normInf) <= (lInf), actual: 0.000152707 vs 0.0001
}
if (name == "test_gelu_tanh_2") {
if (target == DNN_TARGET_OPENCL_FP16) {
default_l1 = 0.00016; // Expected: (normL1) <= (l1), actual: 0.000157223 vs 9e-05
default_lInf = 0.0016; // Expected: (normInf) <= (lInf), actual: 0.00153041 vs 0.0005
} else {
default_l1 = 9e-5; // Expected: (normL1) <= (l1), actual: 8.80073e-05 vs 1e-05
default_lInf = 0.0005; // Expected: (normInf) <= (lInf), actual: 0.000455521 vs 0.0001
}
}
}
#ifdef HAVE_HALIDE
else if (backend == DNN_BACKEND_HALIDE)
{
if (halide_deny_list.find(name) != halide_deny_list.end())
{
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE, CV_TEST_TAG_DNN_SKIP_ONNX_CONFORMANCE);
}
}
#endif
#ifdef HAVE_INF_ENGINE
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
@ -1121,6 +1163,15 @@ TEST_P(Test_ONNX_conformance, Layer_Test)
{
applyTestTag(CV_TEST_TAG_DNN_SKIP_VULKAN, CV_TEST_TAG_DNN_SKIP_ONNX_CONFORMANCE);
}
if (name == "test_gelu_tanh_1") {
default_l1 = 0.00011; // Expected: (normL1) <= (l1), actual: 0.000101805 vs 1e-05
default_lInf = 0.00016; // Expected: (normInf) <= (lInf), actual: 0.000152707 vs 0.0001
}
if (name == "test_gelu_tanh_2") {
default_l1 = 9e-5; // Expected: (normL1) <= (l1), actual: 8.80073e-05 vs 1e-05
default_lInf = 0.0005; // Expected: (normInf) <= (lInf), actual: 0.000455521 vs 0.0001
}
}
#endif
#ifdef HAVE_CUDA
@ -1134,6 +1185,20 @@ TEST_P(Test_ONNX_conformance, Layer_Test)
{
applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA_FP16, CV_TEST_TAG_DNN_SKIP_ONNX_CONFORMANCE);
}
if (name == "test_gelu_tanh_1") {
default_l1 = 0.00011; // Expected: (normL1) <= (l1), actual: 0.000101815 vs 1e-05
default_lInf = 0.00016; // Expected: (normInf) <= (lInf), actual: 0.000152737 vs 0.0001
}
if (name == "test_gelu_tanh_2") {
if (target == DNN_TARGET_CUDA_FP16) {
default_l1 = 0.00023; // Expected: (normL1) <= (l1), actual: 0.000220591 vs 9e-05
default_lInf = 0.0023; // Expected: (normInf) <= (lInf), actual: 0.00220466 vs 0.0005
} else {
default_l1 = 9e-5; // Expected: (normL1) <= (l1), actual: 8.80127e-05 vs 1e-05
default_lInf = 0.0005; // Expected: (normInf) <= (lInf), actual: 0.000455445 vs 0.0001
}
}
}
#endif
else

@ -624,6 +624,36 @@ CASE(test_gathernd_example_int32)
// no filter
CASE(test_gathernd_example_int32_batch_dim1)
// no filter
CASE(test_gelu_default_1)
// no filter
CASE(test_gelu_default_1_expanded)
// no filter
CASE(test_gelu_default_2)
// no filter
CASE(test_gelu_default_2_expanded)
// no filter
CASE(test_gelu_tanh_1)
if (target == DNN_TARGET_CPU) {
default_l1 = 0.00011; // Expected: (normL1) <= (l1), actual: 0.000101805 vs 1e-05
default_lInf = 0.00016; // Expected: (normInf) <= (lInf), actual: 0.000152707 vs 0.0001
}
if (target == DNN_TARGET_OPENCL) {
default_l1 = 0.00011; // Expected: (normL1) <= (l1), actual: 0.000101815 vs 1e-05
default_lInf = 0.00016; // Expected: (normInf) <= (lInf), actual: 0.000152737 vs 0.0001
}
CASE(test_gelu_tanh_1_expanded)
// no filter
CASE(test_gelu_tanh_2)
if (target == DNN_TARGET_CPU) {
default_l1 = 9e-5; // Expected: (normL1) <= (l1), actual: 8.80057e-05 vs 1e-05
default_lInf = 0.00046; // Expected: (normInf) <= (lInf), actual: 0.000455521 vs 0.0001
}
if (target == DNN_TARGET_OPENCL) {
default_l1 = 9e-5; // Expected: (normL1) <= (l1), actual: 8.80144e-05 vs 1e-05
default_lInf = 0.00046; // Expected: (normInf) <= (lInf), actual: 0.000455445 vs 0.0001
}
CASE(test_gelu_tanh_2_expanded)
// no filter
CASE(test_gemm_all_attributes)
// no filter
CASE(test_gemm_alpha)

@ -102,6 +102,10 @@
"test_gathernd_example_float32", // Issues::Layer::Can't create layer
"test_gathernd_example_int32", // ---- same as above ---
"test_gathernd_example_int32_batch_dim1", // ---- same as above ---
"test_gelu_default_1_expanded", // parser: no corresponding layer for CastLike
"test_gelu_default_2_expanded", // parser: no corresponding layer for CastLike
"test_gelu_tanh_1_expanded", // parser: no corresponding layer for CastLike
"test_gelu_tanh_2_expanded", // parser: no corresponding layer for CastLike
"test_gemm_all_attributes", // Issue::Wrong output
"test_gemm_alpha", // Issue::Wrong output
"test_gemm_beta", // Issue::Wrong output

@ -19,7 +19,8 @@ void yoloPostProcessing(
std::vector<Rect2d>& keep_boxes,
float conf_threshold,
float iou_threshold,
const std::string& test_name);
const std::string& model_name,
const int nc=80);
template<typename TString>
static std::string _tf(TString filename, bool required = true)
@ -2735,7 +2736,8 @@ void yoloPostProcessing(
std::vector<Rect2d>& keep_boxes,
float conf_threshold,
float iou_threshold,
const std::string& test_name
const std::string& model_name,
const int nc
){
// Retrieve
@ -2743,11 +2745,13 @@ void yoloPostProcessing(
std::vector<float> confidences;
std::vector<Rect2d> boxes;
if (test_name == "yolov8"){
if (model_name == "yolov8" || model_name == "yolov10" ||
model_name == "yolov9")
{
cv::transposeND(outs[0], {0, 2, 1}, outs[0]);
}
if (test_name == "yolonas"){
if (model_name == "yolonas"){
// outs contains 2 elemets of shape [1, 8400, 80] and [1, 8400, 4]. Concat them to get [1, 8400, 84]
Mat concat_out;
// squeeze the first dimension
@ -2761,22 +2765,27 @@ void yoloPostProcessing(
outs[0] = outs[0].reshape(0, std::vector<int>{1, 8400, 84});
}
// assert if last dim is 85 or 84
CV_CheckEQ(outs[0].dims, 3, "Invalid output shape. The shape should be [1, #anchors, 85 or 84]");
CV_CheckEQ((outs[0].size[2] == nc + 5 || outs[0].size[2] == 80 + 4), true, "Invalid output shape: ");
for (auto preds : outs){
preds = preds.reshape(1, preds.size[1]); // [1, 8400, 85] -> [8400, 85]
for (int i = 0; i < preds.rows; ++i)
{
// filter out non object
float obj_conf = (test_name == "yolov8" || test_name == "yolonas") ? 1.0f : preds.at<float>(i, 4) ;
float obj_conf = (model_name == "yolov8" || model_name == "yolonas" ||
model_name == "yolov9" || model_name == "yolov10") ? 1.0f : preds.at<float>(i, 4) ;
if (obj_conf < conf_threshold)
continue;
Mat scores = preds.row(i).colRange((test_name == "yolov8" || test_name == "yolonas") ? 4 : 5, preds.cols);
Mat scores = preds.row(i).colRange((model_name == "yolov8" || model_name == "yolonas" || model_name == "yolov9" || model_name == "yolov10") ? 4 : 5, preds.cols);
double conf;
Point maxLoc;
minMaxLoc(scores, 0, &conf, 0, &maxLoc);
conf = (test_name == "yolov8" || test_name == "yolonas") ? conf : conf * obj_conf;
conf = (model_name == "yolov8" || model_name == "yolonas" || model_name == "yolov9" || model_name == "yolov10") ? conf : conf * obj_conf;
if (conf < conf_threshold)
continue;
@ -2787,9 +2796,8 @@ void yoloPostProcessing(
double w = det[2];
double h = det[3];
// std::cout << "cx: " << cx << " cy: " << cy << " w: " << w << " h: " << h << " conf: " << conf << " idx: " << maxLoc.x << std::endl;
// [x1, y1, x2, y2]
if (test_name == "yolonas"){
if (model_name == "yolonas" || model_name == "yolov10"){
boxes.push_back(Rect2d(cx, cy, w, h));
} else {
boxes.push_back(Rect2d(cx - 0.5 * w, cy - 0.5 * h,
@ -2812,7 +2820,75 @@ void yoloPostProcessing(
}
}
TEST_P(Test_ONNX_nets, YOLOv10)
{
std::string weightPath = _tf("models/yolov10s.onnx", false);
Size targetSize{640, 480};
float conf_threshold = 0.50;
float iou_threshold = 0.50;
std::vector<int> refClassIds{1, 16, 7};
std::vector<float> refScores{0.9510f, 0.9454f, 0.8404f};
std::vector<Rect2d> refBoxes{
Rect2d(105.5014, 112.8838, 472.9274, 350.0603),
Rect2d(109.8231, 185.7994, 258.5916, 452.9302),
Rect2d(388.5018, 62.1034, 576.6399, 143.3986)
};
Image2BlobParams imgParams(
Scalar::all(1 / 255.0),
targetSize,
Scalar::all(0),
true,
CV_32F,
DNN_LAYOUT_NCHW,
DNN_PMODE_LETTERBOX,
Scalar::all(114)
);
testYOLO(
weightPath, refClassIds, refScores, refBoxes,
imgParams, conf_threshold, iou_threshold,
1.0e-4, 1.0e-4, "yolov10");
}
TEST_P(Test_ONNX_nets, YOLOv9)
{
std::string weightPath = _tf("models/yolov9t.onnx", false);
Size targetSize{640, 480};
float conf_threshold = 0.50;
float iou_threshold = 0.50;
std::vector<int> refClassIds{1, 16, 2}; // wrong class mapping for yolov9
std::vector<float> refScores{0.959274f, 0.901125f, 0.559396f};
std::vector<Rect2d> refBoxes{
Rect2d(106.255, 107.927, 472.497, 350.309),
Rect2d(108.633, 185.256, 259.287, 450.672),
Rect2d(390.701, 62.1454, 576.928, 141.795)
};
Image2BlobParams imgParams(
Scalar::all(1 / 255.0),
targetSize,
Scalar::all(0),
true,
CV_32F,
DNN_LAYOUT_NCHW,
DNN_PMODE_LETTERBOX,
Scalar::all(114)
);
testYOLO(
weightPath, refClassIds, refScores, refBoxes,
imgParams, conf_threshold, iou_threshold,
1.0e-4, 1.0e-4, "yolov9");
}
TEST_P(Test_ONNX_nets, YOLOX)
{
applyTestTag(CV_TEST_TAG_DEBUG_VERYLONG);
@ -3099,6 +3175,20 @@ TEST_P(Test_ONNX_layers, Attention) {
TEST_P(Test_ONNX_layers, AttentionSingleHead) {
testONNXModels("attention_single_head");
}
TEST_P(Test_ONNX_layers, PyTorchAttentionSingleHead) {
// 5.x specific bug: https://github.com/opencv/opencv/issues/25921
if (target == DNN_TARGET_OPENCL)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
if (target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
testONNXModels("pytorch_attention_single_head");
}
TEST_P(Test_ONNX_layers, PyTorchUnflatten){
testONNXModels("unflatten");
}
TEST_P(Test_ONNX_nets, ViT_B_32) {
applyTestTag(CV_TEST_TAG_LONG, CV_TEST_TAG_DEBUG_LONG);
@ -3131,6 +3221,15 @@ TEST_P(Test_ONNX_nets, ViT_B_32) {
l1 = 0.008;
lInf = 0.04;
}
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
if (target == DNN_TARGET_CPU) {
l1 = 4.4e-5; // Expected: (normL1) <= (l1), actual: 4.31208e-05 vs 1e-05
lInf = 0.0002; // Expected: (normInf) <= (lInf), actual: 0.000194907 vs 0.0001
} else if (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16) {
l1 = 0.0092; // Expected: (normL1) <= (l1), actual: 0.00918349 vs 4.4e-05
lInf = 0.056; // Expected: (normInf) <= (lInf), actual: 0.0556431 vs 0.0002
}
}
normAssert(ref, out, "ViTB_32", l1, lInf);
}

@ -123,7 +123,7 @@ class ONNXCompiled {
std::vector<cv::Mat> out_data;
void Run(const std::vector<cv::Mat>& ins,
const std::vector<cv::Mat>& outs);
std::vector<cv::Mat>& outs);
std::vector<std::string> in_names_without_const;
public:
@ -322,22 +322,20 @@ inline std::vector<int64_t> toORT(const cv::MatSize &sz) {
inline void preprocess(const cv::Mat& src,
const cv::gimpl::onnx::TensorInfo& ti,
cv::Mat& dst) {
GAPI_Assert(src.depth() == CV_32F || src.depth() == CV_8U);
// CNN input type
const auto type = toCV(ti.type);
if (src.depth() == CV_32F) {
if (src.depth() != CV_8U) {
// Just pass the tensor as-is.
// No layout or dimension transformations done here!
// TODO: This needs to be aligned across all NN backends.
GAPI_Assert(type == CV_32F && "Only 32F model input is supported for 32F input data");
const auto tensor_dims = toORT(src.size);
if (tensor_dims.size() == ti.dims.size()) {
for (size_t i = 0; i < ti.dims.size(); ++i) {
GAPI_Assert((ti.dims[i] == -1 || ti.dims[i] == tensor_dims[i]) &&
"32F tensor dimensions should match with all non-dynamic NN input dimensions");
"Non-U8 tensor dimensions should match with all non-dynamic NN input dimensions");
}
} else {
GAPI_Error("32F tensor size should match with NN input");
GAPI_Error("Non-U8 tensor size should match with NN input");
}
dst = src;
@ -471,6 +469,25 @@ inline Ort::Value createTensor(const Ort::MemoryInfo& memory_info,
return createTensor<float>(memory_info, tensor_params, data);
case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32:
return createTensor<int32_t>(memory_info, tensor_params, data);
case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64:{
// cv::Mat does not support int64 data directly.
// Following steps are applied to create an ONNX tensor from cv::Mat data:
// - First create a new ONNX tensor 'i64_tensor' with data type int64_t using the default allocator
// - Next retrieve a pointer to the mutable data buffer of 'i64_tensor'
// - Convert the data from int32 (see toCV function) to int64 and deep copy it into 'i64_tensor'
auto ort_dims = toORT(data.size);
Ort::AllocatorWithDefaultOptions allocator;
Ort::Value i64_tensor = Ort::Value::CreateTensor<int64_t>(allocator,
ort_dims.data(),
ort_dims.size());
int64_t* tensor_data = i64_tensor.GetTensorMutableData<int64_t>();
cv::gimpl::convertInt32ToInt64(data.ptr<int>(),
tensor_data,
data.total());
return i64_tensor;
}
default:
GAPI_Error("ONNX. Unsupported data type");
}
@ -747,9 +764,11 @@ ONNXCompiled::ONNXCompiled(const gapi::onnx::detail::ParamDesc &pp)
in_tensor_info.end(),
[](const cv::gimpl::onnx::TensorInfo &p) {
return p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT
|| p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8;
|| p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8
|| p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32
|| p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64;
})
&& "Only FP32 and U8 inputs for NN are supported");
&& "Only FP32, INT32, INT64 and U8 inputs for NN are supported");
// Put mean and std in appropriate tensor params
if (!params.mean.empty() || !params.stdev.empty()) {
@ -864,7 +883,7 @@ cv::Mat ONNXCompiled::allocOutput(int i) const {
}
void ONNXCompiled::Run(const std::vector<cv::Mat>& ins,
const std::vector<cv::Mat>& outs) {
std::vector<cv::Mat>& outs) {
std::vector<Ort::Value> in_tensors, out_tensors;
// Layer names order for run
@ -909,6 +928,17 @@ void ONNXCompiled::Run(const std::vector<cv::Mat>& ins,
out_run_names.data(),
&out_tensors.front(),
params.output_names.size());
if (out_tensor_info[0].type == ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64) {
// cv::Mat does not support int64 output data.
// Conversion from int 64 to int 32 is carried in the copyFromONNX function
// The output is written to out_mat
for (auto &&iter : ade::util::zip(ade::util::toRange(out_tensors),
ade::util::toRange(outs))) {
auto &out_tensor = std::get<0>(iter);
auto &out_mat = std::get<1>(iter);
copyFromONNX(out_tensor, out_mat);
}
}
} else {
// Hard path - run session & user-defined post-processing
// NOTE: use another list of output names here

@ -218,6 +218,9 @@ if(TARGET ocv.3rdparty.gtk3 OR TARGET ocv.3rdparty.gtk2)
)
if(__gtk_dependency STREQUAL "ocv.3rdparty.gtk3")
set(OPENCV_HIGHGUI_BUILTIN_BACKEND "GTK3")
if(OPENGL_LIBRARIES)
list(APPEND HIGHGUI_LIBRARIES "${OPENGL_LIBRARIES}")
endif()
elseif(__gtk_dependency STREQUAL "ocv.3rdparty.gtk2")
set(OPENCV_HIGHGUI_BUILTIN_BACKEND "GTK2")
else()

@ -26,17 +26,27 @@ if(WITH_GTK)
else()
ocv_add_external_target(gthread "${GTHREAD_INCLUDE_DIRS}" "${GTHREAD_LIBRARIES}" "HAVE_GTHREAD")
endif()
if((WITH_OPENGL OR HAVE_OPENGL) AND HAVE_GTK2)
if((WITH_OPENGL OR HAVE_OPENGL) AND (HAVE_GTK2 OR HAVE_GTK3))
if(HAVE_GTK2)
ocv_check_modules(GTKGLEXT gtkglext-1.0)
if(HAVE_GTKGLEXT)
# HACK for https://github.com/opencv/opencv/issues/20850
# pkg-config reports some include directories that do not exist. Just filter them out.
set(GTKGLEXT_INCLUDE_DIRS_EXISTS "")
foreach(p ${GTKGLEXT_INCLUDE_DIRS})
if (EXISTS "${p}")
list(APPEND GTKGLEXT_INCLUDE_DIRS_EXISTS "${p}")
endif()
endforeach()
ocv_add_external_target(gtkglext "${GTKGLEXT_INCLUDE_DIRS}" "${GTKGLEXT_LIBRARIES}" "HAVE_GTKGLEXT")
endif()
endif()
endif()
elseif(HAVE_GTK)
ocv_add_external_target(gtk "${GTK_INCLUDE_DIRS}" "${GTK_LIBRARIES}" "${GTK_DEFINES};HAVE_GTK")
endif()
if(WITH_OPENGL AND HAVE_GTKGLEXT)
if(WITH_OPENGL)
find_package(OpenGL QUIET)
if(OPENGL_FOUND)
set(HAVE_OPENGL TRUE)

@ -46,10 +46,7 @@
#include <gtk/gtk.h>
#if (GTK_MAJOR_VERSION == 3) && defined(HAVE_OPENGL)
#undef HAVE_OPENGL // no support with GTK3
#endif
#if defined(HAVE_OPENGL) && !defined(HAVE_GTKGLEXT)
#if (GTK_MAJOR_VERSION == 2) && defined(HAVE_OPENGL) && !defined(HAVE_GTKGLEXT)
#undef HAVE_OPENGL // gtkglext is required
#endif
@ -68,10 +65,14 @@
#endif
#ifdef HAVE_OPENGL
#ifdef GTK_VERSION3
#include <gtk/gtkglarea.h>
#else
#include <gtk/gtkgl.h>
#include <GL/gl.h>
#include <GL/glu.h>
#endif
#include <GL/gl.h>
#endif
#include <opencv2/core/utils/logger.hpp>
#include "opencv2/core/utils/trace.hpp"
@ -575,7 +576,7 @@ struct CvWindow : CvUIBase
last_key(0), flags(0), status(0),
on_mouse(NULL), on_mouse_param(NULL)
#ifdef HAVE_OPENGL
,useGl(false), glDrawCallback(NULL), glDrawData(NULL)
,useGl(false), glDrawCallback(NULL), glDrawData(NULL), glArea(NULL)
#endif
{
CV_LOG_INFO(NULL, "OpenCV/UI: creating GTK window: " << window_name);
@ -602,6 +603,7 @@ struct CvWindow : CvUIBase
CvOpenGlDrawCallback glDrawCallback;
void* glDrawData;
GtkWidget* glArea;
#endif
};
@ -645,7 +647,7 @@ static int gtk_InitSystem( int argc, char** argv )
setlocale(LC_NUMERIC,"C");
#ifdef HAVE_OPENGL
#if defined(HAVE_OPENGL) && not defined(GTK_VERSION3) // GTK3+ uses GtkGLArea so no need to check for GtkGLExt
if (!gtk_gl_init_check(&argc, &argv))
{
hasError = true;
@ -900,11 +902,42 @@ double cvGetOpenGlProp_GTK(const char* name)
// OpenGL support
#ifdef HAVE_OPENGL
namespace
{
#ifdef GTK_VERSION3
void glRealizeCallback(GtkGLArea* area, gpointer user_data) {
CV_UNUSED(user_data);
gtk_gl_area_make_current(area);
if (gtk_gl_area_get_error(area) != NULL)
CV_Error(cv::Error::OpenGlApiCallError, "OpenGL context is not initialized");
}
gboolean glRenderCallback(GtkGLArea* area, GdkGLContext* context, gpointer user_data) {
CV_UNUSED(context);
CvWindow* window = (CvWindow*)user_data;
gtk_gl_area_make_current(area);
if (gtk_gl_area_get_error(area) != NULL) {
CV_Error(cv::Error::OpenGlApiCallError, "OpenGL context is not initialized");
return FALSE;
}
if(window->glDrawCallback) {
window->glDrawCallback(window->glDrawData);
}
// gtk_gl_area_queue_render(area);
return TRUE;
}
#endif
void createGlContext(CvWindow* window)
{
#ifdef GTK_VERSION3
g_signal_connect(window->glArea, "realize", G_CALLBACK(glRealizeCallback), window);
g_signal_connect(window->glArea, "render", G_CALLBACK(glRenderCallback), window);
#else
GdkGLConfig* glconfig;
// Try double-buffered visual
@ -916,11 +949,24 @@ namespace
if (!gtk_widget_set_gl_capability(window->widget, glconfig, NULL, TRUE, GDK_GL_RGBA_TYPE))
CV_Error( cv::Error::OpenGlApiCallError, "Can't Create A GL Device Context" );
#endif
window->useGl = true;
}
void drawGl(CvWindow* window)
{
#ifdef GTK_VERSION3
GtkGLArea* gtkGlArea = GTK_GL_AREA(window->glArea);
if (gtk_gl_area_get_error(gtkGlArea) != NULL)
CV_Error(cv::Error::OpenGlApiCallError, "Can't Activate The GL Rendering Context");
if (window->glDrawCallback)
window->glDrawCallback(window->glDrawData);
#else
GdkGLContext* glcontext = gtk_widget_get_gl_context(window->widget);
GdkGLDrawable* gldrawable = gtk_widget_get_gl_drawable(window->widget);
@ -940,6 +986,8 @@ namespace
glFlush();
gdk_gl_drawable_gl_end(gldrawable);
#endif
}
}
@ -1035,12 +1083,27 @@ static std::shared_ptr<CvWindow> namedWindow_(const std::string& name, int flags
window->frame = gtk_window_new( GTK_WINDOW_TOPLEVEL );
window->paned = gtk_vbox_new( FALSE, 0 );
window->widget = cvImageWidgetNew( flags );
#if defined(HAVE_OPENGL) && defined(GTK_VERSION3)
if (flags & cv::WINDOW_OPENGL) {
window->glArea = gtk_gl_area_new();
gtk_container_add(GTK_CONTAINER(window->frame), window->glArea);
gtk_widget_show(window->glArea);
} else {
window->paned = gtk_vbox_new( FALSE, 0 );
gtk_box_pack_end( GTK_BOX(window->paned), window->widget, TRUE, TRUE, 0 );
gtk_widget_show( window->widget );
gtk_container_add( GTK_CONTAINER(window->frame), window->paned );
gtk_widget_show( window->paned );
}
#else
window->paned = gtk_vbox_new( FALSE, 0 );
gtk_box_pack_end( GTK_BOX(window->paned), window->widget, TRUE, TRUE, 0 );
gtk_widget_show( window->widget );
gtk_container_add( GTK_CONTAINER(window->frame), window->paned );
gtk_widget_show( window->paned );
#endif
#ifndef HAVE_OPENGL
if (flags & cv::WINDOW_OPENGL)
@ -1116,9 +1179,6 @@ static std::shared_ptr<CvWindow> namedWindow_(const std::string& name, int flags
void setOpenGLContextImpl(const char* name)
{
GdkGLContext* glcontext;
GdkGLDrawable* gldrawable;
CV_Assert(name && "NULL name string");
CV_LOCK_MUTEX();
@ -1130,11 +1190,24 @@ void setOpenGLContextImpl(const char* name)
if (!window->useGl)
CV_Error( cv::Error::OpenGlNotSupported, "Window doesn't support OpenGL" );
#ifdef GTK_VERSION3
if(gtk_gl_area_get_error(GTK_GL_AREA(window->glArea)) != NULL)
CV_Error( cv::Error::OpenGlApiCallError, "Can't Activate The GL Rendering Context");
#else
GdkGLContext* glcontext;
GdkGLDrawable* gldrawable;
glcontext = gtk_widget_get_gl_context(window->widget);
gldrawable = gtk_widget_get_gl_drawable(window->widget);
if (!gdk_gl_drawable_make_current(gldrawable, glcontext))
CV_Error( cv::Error::OpenGlApiCallError, "Can't Activate The GL Rendering Context" );
#endif
}
void updateWindowImpl(const char* name)
@ -1148,9 +1221,22 @@ void updateWindowImpl(const char* name)
return;
// window does not refresh without this
#ifdef GTK_VERSION3
if ( GTK_IS_GL_AREA(window->glArea) ){
gtk_gl_area_queue_render(GTK_GL_AREA(window->glArea));
} else {
gtk_widget_queue_draw( GTK_WIDGET(window->widget));
}
#else
gtk_widget_queue_draw( GTK_WIDGET(window->widget) );
#endif
}
void setOpenGLDrawCallbackImpl(const char* name, CvOpenGlDrawCallback callback, void* userdata)
{
CV_Assert(name && "NULL name string");

@ -68,7 +68,8 @@ namespace cv
enum ImreadModes {
IMREAD_UNCHANGED = -1, //!< If set, return the loaded image as is (with alpha channel, otherwise it gets cropped). Ignore EXIF orientation.
IMREAD_GRAYSCALE = 0, //!< If set, always convert image to the single channel grayscale image (codec internal conversion).
IMREAD_COLOR = 1, //!< If set, always convert image to the 3 channel BGR color image.
IMREAD_COLOR_BGR = 1, //!< If set, always convert image to the 3 channel BGR color image.
IMREAD_COLOR = 1, //!< Same as IMREAD_COLOR_BGR.
IMREAD_ANYDEPTH = 2, //!< If set, return 16-bit/32-bit image when the input has the corresponding depth, otherwise convert it to 8-bit.
IMREAD_ANYCOLOR = 4, //!< If set, the image is read in any possible color format.
IMREAD_LOAD_GDAL = 8, //!< If set, use the gdal driver for loading the image.
@ -78,7 +79,8 @@ enum ImreadModes {
IMREAD_REDUCED_COLOR_4 = 33, //!< If set, always convert image to the 3 channel BGR color image and the image size reduced 1/4.
IMREAD_REDUCED_GRAYSCALE_8 = 64, //!< If set, always convert image to the single channel grayscale image and the image size reduced 1/8.
IMREAD_REDUCED_COLOR_8 = 65, //!< If set, always convert image to the 3 channel BGR color image and the image size reduced 1/8.
IMREAD_IGNORE_ORIENTATION = 128 //!< If set, do not rotate the image according to EXIF's orientation flag.
IMREAD_IGNORE_ORIENTATION = 128, //!< If set, do not rotate the image according to EXIF's orientation flag.
IMREAD_COLOR_RGB = 256, //!< If set, always convert image to the 3 channel RGB color image.
};
//! Imwrite flags
@ -268,7 +270,7 @@ Currently, the following file formats are supported:
@param filename Name of file to be loaded.
@param flags Flag that can take values of cv::ImreadModes
*/
CV_EXPORTS_W Mat imread( const String& filename, int flags = IMREAD_COLOR );
CV_EXPORTS_W Mat imread( const String& filename, int flags = IMREAD_COLOR_BGR );
/** @brief Loads an image from a file.
@ -279,7 +281,7 @@ This is an overloaded member function, provided for convenience. It differs from
@note
The image passing through the img parameter can be pre-allocated. The memory is reused if the shape and the type match with the load image.
*/
CV_EXPORTS_W void imread( const String& filename, OutputArray dst, int flags = IMREAD_COLOR );
CV_EXPORTS_W void imread( const String& filename, OutputArray dst, int flags = IMREAD_COLOR_BGR );
/** @brief Loads a multi-page image from a file.
@ -291,7 +293,7 @@ The function imreadmulti loads a multi-page image from the specified file into a
*/
CV_EXPORTS_W bool imreadmulti(const String& filename, CV_OUT std::vector<Mat>& mats, int flags = IMREAD_ANYCOLOR);
/** @brief Loads a of images of a multi-page image from a file.
/** @brief Loads images of a multi-page image from a file.
The function imreadmulti loads a specified range from a multi-page image from the specified file into a vector of Mat objects.
@param filename Name of file to be loaded.
@ -303,7 +305,7 @@ The function imreadmulti loads a specified range from a multi-page image from th
*/
CV_EXPORTS_W bool imreadmulti(const String& filename, CV_OUT std::vector<Mat>& mats, int start, int count, int flags = IMREAD_ANYCOLOR);
/** @brief Returns the number of images inside the give file
/** @brief Returns the number of images inside the given file
The function imcount will return the number of pages in a multi-page image, or 1 for single-page images
@param filename Name of file to be loaded.
@ -410,27 +412,47 @@ CV_EXPORTS_W bool imencode( const String& ext, InputArray img,
CV_OUT std::vector<uchar>& buf,
const std::vector<int>& params = std::vector<int>());
/** @brief Returns true if the specified image can be decoded by OpenCV
/** @brief Checks if the specified image file can be decoded by OpenCV.
@param filename File name of the image
The function haveImageReader checks if OpenCV is capable of reading the specified file.
This can be useful for verifying support for a given image format before attempting to load an image.
@param filename The name of the file to be checked.
@return true if an image reader for the specified file is available and the file can be opened, false otherwise.
@note The function checks the availability of image codecs that are either built into OpenCV or dynamically loaded.
It does not check for the actual existence of the file but rather the ability to read the specified file type.
If the file cannot be opened or the format is unsupported, the function will return false.
@sa cv::haveImageWriter, cv::imread, cv::imdecode
*/
CV_EXPORTS_W bool haveImageReader( const String& filename );
/** @brief Returns true if an image with the specified filename can be encoded by OpenCV
/** @brief Checks if the specified image file or specified file extension can be encoded by OpenCV.
The function haveImageWriter checks if OpenCV is capable of writing images with the specified file extension.
This can be useful for verifying support for a given image format before attempting to save an image.
@param filename The name of the file or the file extension (e.g., ".jpg", ".png").
It is recommended to provide the file extension rather than the full file name.
@return true if an image writer for the specified extension is available, false otherwise.
@note The function checks the availability of image codecs that are either built into OpenCV or dynamically loaded.
It does not check for the actual existence of the file but rather the ability to write files of the given type.
@param filename File name of the image
@sa cv::haveImageReader, cv::imwrite, cv::imencode
*/
CV_EXPORTS_W bool haveImageWriter( const String& filename );
/** @brief To read Multi Page images on demand
/** @brief To read multi-page images on demand
The ImageCollection class provides iterator API to read multi page images on demand. Create iterator
The ImageCollection class provides iterator API to read multi-page images on demand. Create iterator
to the collection of the images and iterate over the collection. Decode the necessary page with operator*.
The performance of page decoding is O(1) if collection is increment sequentially. If the user wants to access random page,
then the time Complexity is O(n) because the collection has to be reinitialized every time in order to go to the correct page.
However, the intermediate pages are not decoded during the process, so typically it's quite fast.
This is required because multipage codecs does not support going backwards.
This is required because multi-page codecs does not support going backwards.
After decoding the one page, it is stored inside the collection cache. Hence, trying to get Mat object from already decoded page is O(1).
If you need memory, you can use .releaseCache() method to release cached index.
The space complexity is O(n) if all pages are decoded into memory. The user is able to decode and release images on demand.

@ -27,6 +27,23 @@ PERF_TEST(JPEG, Decode)
SANITY_CHECK_NOTHING();
}
PERF_TEST(JPEG, Decode_rgb)
{
String filename = getDataPath("stitching/boat1.jpg");
FILE *f = fopen(filename.c_str(), "rb");
fseek(f, 0, SEEK_END);
long len = ftell(f);
fseek(f, 0, SEEK_SET);
vector<uchar> file_buf((size_t)len);
EXPECT_EQ(len, (long)fread(&file_buf[0], 1, (size_t)len, f));
fclose(f); f = NULL;
TEST_CYCLE() imdecode(file_buf, IMREAD_COLOR_RGB);
SANITY_CHECK_NOTHING();
}
PERF_TEST(JPEG, Encode)
{
String filename = getDataPath("stitching/boat1.jpg");

@ -30,6 +30,23 @@ PERF_TEST(PNG, decode)
SANITY_CHECK_NOTHING();
}
PERF_TEST(PNG, decode_rgb)
{
String filename = getDataPath("perf/2560x1600.png");
FILE *f = fopen(filename.c_str(), "rb");
fseek(f, 0, SEEK_END);
long len = ftell(f);
fseek(f, 0, SEEK_SET);
vector<uchar> file_buf((size_t)len);
EXPECT_EQ(len, (long)fread(&file_buf[0], 1, (size_t)len, f));
fclose(f); f = NULL;
TEST_CYCLE() imdecode(file_buf, IMREAD_COLOR_RGB);
SANITY_CHECK_NOTHING();
}
PERF_TEST(PNG, encode)
{
String filename = getDataPath("perf/2560x1600.png");

@ -33,7 +33,7 @@ struct AvifImageDeleter {
using AvifImageUniquePtr = std::unique_ptr<avifImage, AvifImageDeleter>;
avifResult CopyToMat(const avifImage *image, int channels, Mat *mat) {
avifResult CopyToMat(const avifImage *image, int channels, bool useRGB , Mat *mat) {
CV_Assert((int)image->height == mat->rows);
CV_Assert((int)image->width == mat->cols);
if (channels == 1) {
@ -53,6 +53,9 @@ avifResult CopyToMat(const avifImage *image, int channels, Mat *mat) {
avifRGBImage rgba;
avifRGBImageSetDefaults(&rgba, image);
if (channels == 3) {
if (useRGB)
rgba.format = AVIF_RGB_FORMAT_RGB;
else
rgba.format = AVIF_RGB_FORMAT_BGR;
} else {
CV_Assert(channels == 4);
@ -227,7 +230,7 @@ bool AvifDecoder::readData(Mat &img) {
is_first_image_ = false;
}
if (CopyToMat(decoder_->image, channels_, &read_img) != AVIF_RESULT_OK) {
if (CopyToMat(decoder_->image, channels_, m_use_rgb, &read_img) != AVIF_RESULT_OK) {
CV_Error(Error::StsInternal, "Cannot convert from AVIF to Mat");
return false;
}

@ -53,6 +53,7 @@ BaseImageDecoder::BaseImageDecoder()
m_type = -1;
m_buf_supported = false;
m_scale_denom = 1;
m_use_rgb = false;
}
@ -94,6 +95,11 @@ int BaseImageDecoder::setScale( const int& scale_denom )
return temp;
}
void BaseImageDecoder::setRGB(bool useRGB)
{
m_use_rgb = useRGB;
}
ImageDecoder BaseImageDecoder::newDecoder() const
{
return ImageDecoder();

@ -73,6 +73,8 @@ public:
virtual bool readHeader() = 0;
virtual bool readData( Mat& img ) = 0;
virtual void setRGB(bool useRGB);
/// Called after readData to advance to the next page, if any.
virtual bool nextPage() { return false; }
@ -89,6 +91,7 @@ protected:
String m_signature;
Mat m_buf;
bool m_buf_supported;
bool m_use_rgb; // flag of decode image as RGB order instead of BGR.
ExifReader m_exif;
};

@ -544,6 +544,11 @@ decode_rle8_bad: ;
throw;
}
if (m_use_rgb && color && img.channels() == 3)
{
cv::cvtColor(img, img, cv::COLOR_BGR2RGB);
}
return result;
}

@ -372,6 +372,17 @@ bool ExrDecoder::readData( Mat& img )
m_file->readPixels( m_datawindow.min.y, m_datawindow.max.y );
if( m_iscolor )
{
if (m_use_rgb)
{
if( m_red && (m_red->xSampling != 1 || m_red->ySampling != 1) )
UpSample( data, channelstoread, step / xstep, m_red->xSampling, m_red->ySampling );
if( m_green && (m_green->xSampling != 1 || m_green->ySampling != 1) )
UpSample( data + xstep, channelstoread, step / xstep, m_green->xSampling, m_green->ySampling );
if( m_blue && (m_blue->xSampling != 1 || m_blue->ySampling != 1) )
UpSample( data + 2 * xstep, channelstoread, step / xstep, m_blue->xSampling, m_blue->ySampling );
}
else
{
if( m_blue && (m_blue->xSampling != 1 || m_blue->ySampling != 1) )
UpSample( data, channelstoread, step / xstep, m_blue->xSampling, m_blue->ySampling );
@ -380,12 +391,18 @@ bool ExrDecoder::readData( Mat& img )
if( m_red && (m_red->xSampling != 1 || m_red->ySampling != 1) )
UpSample( data + 2 * xstep, channelstoread, step / xstep, m_red->xSampling, m_red->ySampling );
}
}
else if( m_green && (m_green->xSampling != 1 || m_green->ySampling != 1) )
UpSample( data, channelstoread, step / xstep, m_green->xSampling, m_green->ySampling );
if( chromatorgb )
{
if (m_use_rgb)
ChromaToRGB( (float *)data, m_height, channelstoread, step / xstep );
else
ChromaToBGR( (float *)data, m_height, channelstoread, step / xstep );
}
}
else
{
uchar *out = data;
@ -406,7 +423,12 @@ bool ExrDecoder::readData( Mat& img )
else
{
if( chromatorgb )
{
if (m_use_rgb)
ChromaToRGB( (float *)buffer, 1, defaultchannels, step );
else
ChromaToBGR( (float *)buffer, 1, defaultchannels, step );
}
if( m_type == FLOAT )
{
@ -429,6 +451,17 @@ bool ExrDecoder::readData( Mat& img )
out += step;
}
if( color )
{
if (m_use_rgb)
{
if( m_red && (m_red->xSampling != 1 || m_red->ySampling != 1) )
UpSampleY( data, defaultchannels, step / xstep, m_red->ySampling );
if( m_green && (m_green->xSampling != 1 || m_green->ySampling != 1) )
UpSampleY( data + xstep, defaultchannels, step / xstep, m_green->ySampling );
if( m_blue && (m_blue->xSampling != 1 || m_blue->ySampling != 1) )
UpSampleY( data + 2 * xstep, defaultchannels, step / xstep, m_blue->ySampling );
}
else
{
if( m_blue && (m_blue->xSampling != 1 || m_blue->ySampling != 1) )
UpSampleY( data, defaultchannels, step / xstep, m_blue->ySampling );
@ -437,6 +470,7 @@ bool ExrDecoder::readData( Mat& img )
if( m_red && (m_red->xSampling != 1 || m_red->ySampling != 1) )
UpSampleY( data + 2 * xstep, defaultchannels, step / xstep, m_red->ySampling );
}
}
else if( m_green && (m_green->xSampling != 1 || m_green->ySampling != 1) )
UpSampleY( data, 1, step / xstep, m_green->ySampling );
}
@ -558,6 +592,47 @@ void ExrDecoder::ChromaToBGR( float *data, int numlines, int xstep, int ystep )
}
}
void ExrDecoder::ChromaToRGB(float *data, int numlines, int xstep, int ystep)
{
for( int y = 0; y < numlines; y++ )
{
for( int x = 0; x < m_width; x++ )
{
double b, Y, r;
if( m_type == FLOAT )
{
b = data[y * ystep + x * xstep];
Y = data[y * ystep + x * xstep + 1];
r = data[y * ystep + x * xstep + 2];
}
else
{
b = ((unsigned *)data)[y * ystep + x * xstep];
Y = ((unsigned *)data)[y * ystep + x * xstep + 1];
r = ((unsigned *)data)[y * ystep + x * xstep + 2];
}
r = (r + 1) * Y;
b = (b + 1) * Y;
Y = (Y - b * m_chroma.blue[1] - r * m_chroma.red[1]) / m_chroma.green[1];
if( m_type == FLOAT )
{
data[y * ystep + x * xstep] = (float)r;
data[y * ystep + x * xstep + 1] = (float)Y;
data[y * ystep + x * xstep + 2] = (float)b;
}
else
{
int t = cvRound(r);
((unsigned *)data)[y * ystep + x * xstep + 0] = (unsigned)MAX(t, 0);
t = cvRound(Y);
((unsigned *)data)[y * ystep + x * xstep + 1] = (unsigned)MAX(t, 0);
t = cvRound(b);
((unsigned *)data)[y * ystep + x * xstep + 2] = (unsigned)MAX(t, 0);
}
}
}
}
/**
// convert one row to gray

@ -83,6 +83,7 @@ protected:
void UpSampleX( float *data, int xstep, int xsample );
void UpSampleY( uchar *data, int xstep, int ystep, int ysample );
void ChromaToBGR( float *data, int numlines, int xstep, int ystep );
void ChromaToRGB( float *data, int numlines, int xstep, int ystep );
void RGBToGray( float *in, float *out );
InputFile *m_file;

@ -397,13 +397,13 @@ bool GdalDecoder::readData( Mat& img ){
case GCI_PaletteIndex:
case GCI_GrayIndex:
case GCI_BlueBand:
color = 0;
color = m_use_rgb ? 2 : 0;
break;
case GCI_GreenBand:
color = 1;
break;
case GCI_RedBand:
color = 2;
color = m_use_rgb ? 0 : 2;
break;
case GCI_AlphaBand:
color = 3;

@ -106,7 +106,13 @@ bool HdrDecoder::readData(Mat& _img)
switch (_img.channels())
{
case 1: cvtColor(img, _img, COLOR_BGR2GRAY); break;
case 3: img.copyTo(_img); break;
case 3:
// TODO, try to modify RGBE_ReadPixels_RLE to load rgb data directly.
if (m_use_rgb)
cv::cvtColor(img, _img, cv::COLOR_BGR2RGB);
else
img.copyTo(_img);
break;
default: CV_Error(Error::StsError, "Wrong expected image channels, allowed: 1 and 3");
}
return true;

@ -437,13 +437,13 @@ bool JpegDecoder::readData( Mat& img )
if( cinfo->num_components != 4 )
{
#ifdef JCS_EXTENSIONS
cinfo->out_color_space = JCS_EXT_BGR;
cinfo->out_color_space = m_use_rgb ? JCS_EXT_RGB : JCS_EXT_BGR;
cinfo->out_color_components = 3;
doDirectRead = true; // BGR -> BGR
#else
cinfo->out_color_space = JCS_RGB;
cinfo->out_color_components = 3;
doDirectRead = false; // RGB -> BGR
doDirectRead = m_use_rgb ? true : false; // RGB -> BGR
#endif
}
else
@ -499,7 +499,7 @@ bool JpegDecoder::readData( Mat& img )
for( int iy = 0 ; iy < m_height; iy ++ )
{
uchar* data = img.ptr<uchar>(iy);
jpeg_read_scanlines( cinfo, &data, 1 );
if (jpeg_read_scanlines( cinfo, &data, 1 ) != 1) return false;
}
}
else
@ -510,15 +510,25 @@ bool JpegDecoder::readData( Mat& img )
for( int iy = 0 ; iy < m_height; iy ++ )
{
uchar* data = img.ptr<uchar>(iy);
jpeg_read_scanlines( cinfo, buffer, 1 );
if (jpeg_read_scanlines( cinfo, buffer, 1 ) != 1) return false;
if( color )
{
if (m_use_rgb)
{
if( cinfo->out_color_components == 3 )
icvCvt_BGR2RGB_8u_C3R( buffer[0], 0, data, 0, Size(m_width,1) );
else
icvCvt_CMYK2RGB_8u_C4C3R( buffer[0], 0, data, 0, Size(m_width,1) );
}
else
{
if( cinfo->out_color_components == 3 )
icvCvt_RGB2BGR_8u_C3R( buffer[0], 0, data, 0, Size(m_width,1) );
else
icvCvt_CMYK2BGR_8u_C4C3R( buffer[0], 0, data, 0, Size(m_width,1) );
}
}
else
{
if( cinfo->out_color_components == 1 )

@ -286,11 +286,12 @@ bool Jpeg2KDecoder::readData( Mat& img )
{
int ncmpts;
int cmptlut[3];
int swap_rb = m_use_rgb ? 0 : 2;
if( color )
{
cmptlut[0] = jas_image_getcmptbytype( image, JAS_IMAGE_CT_RGB_B );
cmptlut[1] = jas_image_getcmptbytype( image, JAS_IMAGE_CT_RGB_G );
cmptlut[2] = jas_image_getcmptbytype( image, JAS_IMAGE_CT_RGB_R );
cmptlut[0] = jas_image_getcmptbytype( image, swap_rb );
cmptlut[1] = jas_image_getcmptbytype( image, 1 );
cmptlut[2] = jas_image_getcmptbytype( image, swap_rb^2 );
if( cmptlut[0] < 0 || cmptlut[1] < 0 || cmptlut[2] < 0 )
result = false;
ncmpts = 3;

@ -350,7 +350,7 @@ opj_cparameters setupEncoderParameters(const std::vector<int>& params)
return parameters;
}
bool decodeSRGBData(const opj_image_t& inImg, cv::Mat& outImg, uint8_t shift)
bool decodeSRGBData(const opj_image_t& inImg, cv::Mat& outImg, uint8_t shift, bool use_rgb)
{
using ImageComponents = std::vector<const OPJ_INT32*>;
@ -377,8 +377,9 @@ bool decodeSRGBData(const opj_image_t& inImg, cv::Mat& outImg, uint8_t shift)
if (inChannels >= 3)
{
int swap_rb = use_rgb ? 0 : 2;
// Assume RGB (+ alpha) for 3 channels -> BGR
ImageComponents incomps { inImg.comps[2].data, inImg.comps[1].data, inImg.comps[0].data };
ImageComponents incomps { inImg.comps[swap_rb].data, inImg.comps[1].data, inImg.comps[swap_rb^2].data };
// Assume RGBA for 4 channels -> BGRA
if (outChannels > 3)
{
@ -393,7 +394,7 @@ bool decodeSRGBData(const opj_image_t& inImg, cv::Mat& outImg, uint8_t shift)
return false;
}
bool decodeGrayscaleData(const opj_image_t& inImg, cv::Mat& outImg, uint8_t shift)
bool decodeGrayscaleData(const opj_image_t& inImg, cv::Mat& outImg, uint8_t shift, bool)
{
using ImageComponents = std::vector<const OPJ_INT32*>;
@ -411,7 +412,7 @@ bool decodeGrayscaleData(const opj_image_t& inImg, cv::Mat& outImg, uint8_t shif
return false;
}
bool decodeSYCCData(const opj_image_t& inImg, cv::Mat& outImg, uint8_t shift)
bool decodeSYCCData(const opj_image_t& inImg, cv::Mat& outImg, uint8_t shift, bool use_rgb)
{
using ImageComponents = std::vector<const OPJ_INT32*>;
@ -426,6 +427,9 @@ bool decodeSYCCData(const opj_image_t& inImg, cv::Mat& outImg, uint8_t shift)
if (outChannels == 3 && inChannels >= 3) {
copyToMat(ImageComponents { inImg.comps[0].data, inImg.comps[1].data, inImg.comps[2].data },
outImg, shift);
if (use_rgb)
cvtColor(outImg, outImg, COLOR_YUV2RGB);
else
cvtColor(outImg, outImg, COLOR_YUV2BGR);
return true;
}
@ -585,7 +589,7 @@ bool Jpeg2KOpjDecoderBase::readHeader()
bool Jpeg2KOpjDecoderBase::readData( Mat& img )
{
using DecodeFunc = bool(*)(const opj_image_t&, cv::Mat&, uint8_t shift);
using DecodeFunc = bool(*)(const opj_image_t&, cv::Mat&, uint8_t shift, bool use_rgb);
if (!opj_decode(codec_.get(), stream_.get(), image_.get()))
{
@ -647,7 +651,7 @@ bool Jpeg2KOpjDecoderBase::readData( Mat& img )
CV_Assert(comp.data && "OpenJPEG2000: missing component data (unsupported / broken input)");
}
return decode(*image_, img, shift);
return decode(*image_, img, shift, m_use_rgb);
}
} // namespace detail

@ -90,7 +90,7 @@ const static struct pam_header_field fields[] = {
#define PAM_FIELDS_NO (sizeof (fields) / sizeof ((fields)[0]))
typedef bool (*cvtFunc) (void *src, void *target, int width, int target_channels,
int target_depth);
int target_depth, bool use_rgb);
struct channel_layout {
uint rchan, gchan, bchan, graychan;
@ -108,7 +108,7 @@ struct pam_format {
};
static bool rgb_convert (void *src, void *target, int width, int target_channels,
int target_depth);
int target_depth, bool use_rgb);
const static struct pam_format formats[] = {
{IMWRITE_PAM_FORMAT_NULL, "", NULL, {0, 0, 0, 0} },
@ -125,17 +125,23 @@ const static struct pam_format formats[] = {
*/
static bool
rgb_convert (void *src, void *target, int width, int target_channels, int target_depth)
rgb_convert (void *src, void *target, int width, int target_channels, int target_depth, bool use_rgb)
{
bool ret = false;
if (target_channels == 3) {
switch (target_depth) {
case CV_8U:
if (use_rgb)
memcpy(target, src, sizeof(uchar) * width);
else
icvCvt_RGB2BGR_8u_C3R( (uchar*) src, 0, (uchar*) target, 0,
Size(width,1) );
ret = true;
break;
case CV_16U:
if (use_rgb)
memcpy(target, src, sizeof(ushort) * width);
else
icvCvt_RGB2BGR_16u_C3R( (ushort *)src, 0, (ushort *)target, 0,
Size(width,1) );
ret = true;
@ -169,7 +175,7 @@ rgb_convert (void *src, void *target, int width, int target_channels, int target
static void
basic_conversion (void *src, const struct channel_layout *layout, int src_sampe_size,
int src_width, void *target, int target_channels, int target_depth)
int src_width, void *target, int target_channels, int target_depth, bool use_rgb)
{
switch (target_depth) {
case CV_8U:
@ -182,6 +188,13 @@ basic_conversion (void *src, const struct channel_layout *layout, int src_sampe_
d[0] = d[1] = d[2] = s[layout->graychan];
break;
case 3:
if (use_rgb)
for( ; s < end; d += 3, s += src_sampe_size ) {
d[0] = s[layout->rchan];
d[1] = s[layout->gchan];
d[2] = s[layout->bchan];
}
else
for( ; s < end; d += 3, s += src_sampe_size ) {
d[0] = s[layout->bchan];
d[1] = s[layout->gchan];
@ -203,6 +216,13 @@ basic_conversion (void *src, const struct channel_layout *layout, int src_sampe_
d[0] = d[1] = d[2] = s[layout->graychan];
break;
case 3:
if (use_rgb)
for( ; s < end; d += 3, s += src_sampe_size ) {
d[0] = s[layout->rchan];
d[1] = s[layout->gchan];
d[2] = s[layout->bchan];
}
else
for( ; s < end; d += 3, s += src_sampe_size ) {
d[0] = s[layout->bchan];
d[1] = s[layout->gchan];
@ -610,18 +630,18 @@ bool PAMDecoder::readData(Mat& img)
bool funcout = false;
if (fmt->cvt_func)
funcout = fmt->cvt_func (src, data, m_width, target_channels,
img.depth());
img.depth(), m_use_rgb);
/* fall back to default if there is no conversion function or it
* can't handle the specified characteristics
*/
if (!funcout)
basic_conversion (src, &fmt->layout, m_channels,
m_width, data, target_channels, img.depth());
m_width, data, target_channels, img.depth(), m_use_rgb);
/* default to selecting the first available channels */
} else {
basic_conversion (src, &layout, m_channels,
m_width, data, target_channels, img.depth());
m_width, data, target_channels, img.depth(), m_use_rgb);
}
}
}

@ -142,7 +142,7 @@ bool PFMDecoder::readData(Mat& mat)
}
}
if (buffer.channels() == 3) {
if (buffer.channels() == 3 && !m_use_rgb) {
cv::cvtColor(buffer, buffer, cv::COLOR_BGR2RGB);
}

@ -261,7 +261,7 @@ bool PngDecoder::readData( Mat& img )
png_set_gray_1_2_4_to_8( png_ptr );
#endif
if( (m_color_type & PNG_COLOR_MASK_COLOR) && color )
if( (m_color_type & PNG_COLOR_MASK_COLOR) && color && !m_use_rgb)
png_set_bgr( png_ptr ); // convert RGB to BGR
else if( color )
png_set_gray_to_rgb( png_ptr ); // Gray->RGB

@ -340,7 +340,9 @@ bool PxMDecoder::readData( Mat& img )
{
if( color )
{
if( img.depth() == CV_8U )
if (m_use_rgb)
memcpy(data, src, m_width * CV_ELEM_SIZE(img.type()));
else if( img.depth() == CV_8U )
icvCvt_RGB2BGR_8u_C3R( src, 0, data, 0, Size(m_width,1) );
else
icvCvt_RGB2BGR_16u_C3R( (ushort *)src, 0, (ushort *)data, 0, Size(m_width,1) );

@ -381,14 +381,14 @@ bool SPngDecoder::readData(Mat &img)
break;
ret = spng_decode_row(png_ptr, buffer[row_info.row_num], image_width);
if (ihdr.interlace_method == 0)
if (ihdr.interlace_method == 0 && !m_use_rgb)
{
icvCvt_RGBA2BGRA_16u_C4R(reinterpret_cast<const ushort *>(buffer[row_info.row_num]), 0,
reinterpret_cast<ushort *>(buffer[row_info.row_num]), 0,
Size(m_width, 1));
}
} while (ret == SPNG_OK);
if (ihdr.interlace_method)
if (ihdr.interlace_method && !m_use_rgb)
{
icvCvt_RGBA2BGRA_16u_C4R(reinterpret_cast<const ushort *>(img.data), step * 2, reinterpret_cast<ushort *>(img.data), step * 2, Size(m_width, m_height));
}
@ -402,12 +402,12 @@ bool SPngDecoder::readData(Mat &img)
break;
ret = spng_decode_row(png_ptr, buffer[row_info.row_num], image_width);
if (ihdr.interlace_method == 0)
if (ihdr.interlace_method == 0 && !m_use_rgb)
{
icvCvt_RGBA2BGRA_8u_C4R(buffer[row_info.row_num], 0, buffer[row_info.row_num], 0, Size(m_width, 1));
}
} while (ret == SPNG_OK);
if (ihdr.interlace_method)
if (ihdr.interlace_method && !m_use_rgb)
{
icvCvt_RGBA2BGRA_8u_C4R(img.data, step, img.data, step, Size(m_width, m_height));
}
@ -421,13 +421,13 @@ bool SPngDecoder::readData(Mat &img)
break;
ret = spng_decode_row(png_ptr, buffer[row_info.row_num], image_width);
if (ihdr.interlace_method == 0)
if (ihdr.interlace_method == 0 && !m_use_rgb)
{
icvCvt_RGB2BGR_16u_C3R(reinterpret_cast<const ushort *>(buffer[row_info.row_num]), 0,
reinterpret_cast<ushort *>(buffer[row_info.row_num]), 0, Size(m_width, 1));
}
} while (ret == SPNG_OK);
if (ihdr.interlace_method)
if (ihdr.interlace_method && !m_use_rgb)
{
icvCvt_RGB2BGR_16u_C3R(reinterpret_cast<const ushort *>(img.data), step,
reinterpret_cast<ushort *>(img.data), step, Size(m_width, m_height));
@ -442,12 +442,12 @@ bool SPngDecoder::readData(Mat &img)
break;
ret = spng_decode_row(png_ptr, buffer[row_info.row_num], image_width);
if (ihdr.interlace_method == 0)
if (ihdr.interlace_method == 0 && !m_use_rgb)
{
icvCvt_RGB2BGR_8u_C3R(buffer[row_info.row_num], 0, buffer[row_info.row_num], 0, Size(m_width, 1));
}
} while (ret == SPNG_OK);
if (ihdr.interlace_method)
if (ihdr.interlace_method && !m_use_rgb)
{
icvCvt_RGB2BGR_8u_C3R(img.data, step, img.data, step, Size(m_width, m_height));
}

@ -342,7 +342,7 @@ bad_decoding_end:
if( color )
{
if( m_type == RAS_FORMAT_RGB )
if( m_type == RAS_FORMAT_RGB || m_use_rgb)
icvCvt_RGB2BGR_8u_C3R(src, 0, data, 0, Size(m_width,1) );
else
memcpy(data, src, std::min(step, (size_t)src_pitch));
@ -365,7 +365,7 @@ bad_decoding_end:
if( color )
icvCvt_BGRA2BGR_8u_C4C3R( src + 4, 0, data, 0, Size(m_width,1),
m_type == RAS_FORMAT_RGB ? 2 : 0 );
(m_type == RAS_FORMAT_RGB || m_use_rgb) ? 2 : 0 );
else
icvCvt_BGRA2Gray_8u_C4C1R( src + 4, 0, data, 0, Size(m_width,1),
m_type == RAS_FORMAT_RGB ? 2 : 0 );

@ -865,6 +865,11 @@ bool TiffDecoder::readData( Mat& img )
break;
case MAKE_FLAG( 3, 3 ): // RGB to BGR
if (m_use_rgb)
memcpy( (void*) img_line_buffer,
(void*) bstart,
tile_width * sizeof(uchar) );
else
icvCvt_BGR2RGB_8u_C3R( bstart, 0,
img_line_buffer, 0,
Size(tile_width, 1) );
@ -879,7 +884,7 @@ bool TiffDecoder::readData( Mat& img )
case MAKE_FLAG( 4, 3 ): // RGBA to BGR
icvCvt_BGRA2BGR_8u_C4C3R( bstart, 0,
img_line_buffer, 0,
Size(tile_width, 1), 2 );
Size(tile_width, 1), m_use_rgb ? 0 : 2);
break;
case MAKE_FLAG( 4, 4 ): // RGBA to BGRA
@ -909,7 +914,7 @@ bool TiffDecoder::readData( Mat& img )
CV_CheckEQ(wanted_channels, 3, "TIFF-8bpp: BGR/BGRA images are supported only");
icvCvt_BGRA2BGR_8u_C4C3R(bstart + i*tile_width0*4, 0,
img.ptr(img_y + tile_height - i - 1, x), 0,
Size(tile_width, 1), 2);
Size(tile_width, 1), m_use_rgb ? 0 : 2);
}
}
else
@ -972,6 +977,9 @@ bool TiffDecoder::readData( Mat& img )
else if (ncn == 3)
{
CV_CheckEQ(wanted_channels, 3, "");
if (m_use_rgb)
memcpy(buffer16, img.ptr<ushort>(img_y + i, x), tile_width * sizeof(ushort));
else
icvCvt_RGB2BGR_16u_C3R(buffer16, 0,
img.ptr<ushort>(img_y + i, x), 0,
Size(tile_width, 1));
@ -989,7 +997,7 @@ bool TiffDecoder::readData( Mat& img )
CV_CheckEQ(wanted_channels, 3, "TIFF-16bpp: BGR/BGRA images are supported only");
icvCvt_BGRA2BGR_16u_C4C3R(buffer16, 0,
img.ptr<ushort>(img_y + i, x), 0,
Size(tile_width, 1), 2);
Size(tile_width, 1), m_use_rgb ? 0 : 2);
}
}
else
@ -1032,7 +1040,7 @@ bool TiffDecoder::readData( Mat& img )
Mat m_tile(Size(tile_width0, tile_height0), CV_MAKETYPE((dst_bpp == 32) ? (depth == CV_32S ? CV_32S : CV_32F) : CV_64F, ncn), src_buffer);
Rect roi_tile(0, 0, tile_width, tile_height);
Rect roi_img(x, img_y, tile_width, tile_height);
if (!m_hdr && ncn == 3)
if (!m_hdr && ncn == 3 && !m_use_rgb)
extend_cvtColor(m_tile(roi_tile), img(roi_img), COLOR_RGB2BGR);
else if (!m_hdr && ncn == 4)
extend_cvtColor(m_tile(roi_tile), img(roi_img), COLOR_RGBA2BGRA);
@ -1060,6 +1068,9 @@ bool TiffDecoder::readData( Mat& img )
if (m_hdr && depth >= CV_32F)
{
CV_Assert(photometric == PHOTOMETRIC_LOGLUV);
if (m_use_rgb)
cvtColor(img, img, COLOR_XYZ2RGB);
else
cvtColor(img, img, COLOR_XYZ2BGR);
}
return true;

@ -184,12 +184,20 @@ bool WebPDecoder::readData(Mat &img)
if (channels == 3)
{
CV_CheckTypeEQ(read_img.type(), CV_8UC3, "");
if (m_use_rgb)
res_ptr = WebPDecodeRGBInto(data.ptr(), data.total(), out_data,
(int)out_data_size, (int)read_img.step);
else
res_ptr = WebPDecodeBGRInto(data.ptr(), data.total(), out_data,
(int)out_data_size, (int)read_img.step);
}
else if (channels == 4)
{
CV_CheckTypeEQ(read_img.type(), CV_8UC4, "");
if (m_use_rgb)
res_ptr = WebPDecodeRGBAInto(data.ptr(), data.total(), out_data,
(int)out_data_size, (int)read_img.step);
else
res_ptr = WebPDecodeBGRAInto(data.ptr(), data.total(), out_data,
(int)out_data_size, (int)read_img.step);
}

@ -88,7 +88,7 @@ static inline int calcType(int type, int flags)
if( (flags & IMREAD_ANYDEPTH) == 0 )
type = CV_MAKETYPE(CV_8U, CV_MAT_CN(type));
if( (flags & IMREAD_COLOR) != 0 ||
if( (flags & IMREAD_COLOR) != 0 || (flags & IMREAD_COLOR_RGB) != 0 ||
((flags & IMREAD_ANYCOLOR) != 0 && CV_MAT_CN(type) > 1) )
type = CV_MAKETYPE(CV_MAT_DEPTH(type), 3);
else
@ -432,6 +432,12 @@ imread_( const String& filename, int flags, OutputArray mat )
scale_denom = 8;
}
// Try to decode image by RGB instead of BGR.
if (flags & IMREAD_COLOR_RGB && flags != IMREAD_UNCHANGED)
{
decoder->setRGB(true);
}
/// set the scale_denom in the driver
decoder->setScale( scale_denom );
@ -542,6 +548,9 @@ imreadmulti_(const String& filename, int flags, std::vector<Mat>& mats, int star
count = std::numeric_limits<int>::max();
}
if (flags & IMREAD_COLOR_RGB && flags != IMREAD_UNCHANGED)
decoder->setRGB(true);
/// set the filename in the driver
decoder->setSource(filename);
@ -811,6 +820,12 @@ imdecode_( const Mat& buf, int flags, Mat& mat )
scale_denom = 8;
}
// Try to decode image by RGB instead of BGR.
if (flags & IMREAD_COLOR_RGB && flags != IMREAD_UNCHANGED)
{
decoder->setRGB(true);
}
/// set the scale_denom in the driver
decoder->setScale( scale_denom );
@ -947,6 +962,12 @@ imdecodemulti_(const Mat& buf, int flags, std::vector<Mat>& mats, int start, int
if (!decoder)
return 0;
// Try to decode image by RGB instead of BGR.
if (flags & IMREAD_COLOR_RGB && flags != IMREAD_UNCHANGED)
{
decoder->setRGB(true);
}
if (count < 0) {
count = std::numeric_limits<int>::max();
}

@ -352,6 +352,25 @@ void icvCvt_CMYK2BGR_8u_C4C3R( const uchar* cmyk, int cmyk_step,
}
}
void icvCvt_CMYK2RGB_8u_C4C3R( const uchar* cmyk, int cmyk_step,
uchar* rgb, int rgb_step, Size size )
{
int i;
for( ; size.height--; )
{
for( i = 0; i < size.width; i++, rgb += 3, cmyk += 4 )
{
int c = cmyk[0], m = cmyk[1], y = cmyk[2], k = cmyk[3];
c = k - ((255 - c)*k>>8);
m = k - ((255 - m)*k>>8);
y = k - ((255 - y)*k>>8);
rgb[0] = (uchar)c; rgb[1] = (uchar)m; rgb[2] = (uchar)y;
}
rgb += rgb_step - size.width*3;
cmyk += cmyk_step - size.width*4;
}
}
void icvCvt_CMYK2Gray_8u_C4C1R( const uchar* cmyk, int cmyk_step,
uchar* gray, int gray_step, Size size )

@ -115,6 +115,8 @@ void icvCvt_BGR5652BGR_8u_C2C3R( const uchar* bgr565, int bgr565_step,
uchar* bgr, int bgr_step, Size size );
void icvCvt_CMYK2BGR_8u_C4C3R( const uchar* cmyk, int cmyk_step,
uchar* bgr, int bgr_step, Size size );
void icvCvt_CMYK2RGB_8u_C4C3R( const uchar* cmyk, int cmyk_step,
uchar* rgb, int rgb_step, Size size );
void icvCvt_CMYK2Gray_8u_C4C1R( const uchar* ycck, int ycck_step,
uchar* gray, int gray_step, Size size );

@ -150,7 +150,7 @@ INSTANTIATE_TEST_CASE_P(
::testing::ValuesIn({1, 3, 4}),
::testing::ValuesIn({0, 50, 100}),
::testing::ValuesIn({IMREAD_UNCHANGED, IMREAD_GRAYSCALE,
IMREAD_COLOR})));
IMREAD_COLOR, IMREAD_COLOR_RGB})));
class Imgcodecs_Avif_Image_EncodeDecodeSuite
: public Imgcodecs_Avif_Image_RoundTripSuite {};
@ -183,7 +183,7 @@ INSTANTIATE_TEST_CASE_P(
::testing::ValuesIn({1, 3, 4}),
::testing::ValuesIn({0, 50, 100}),
::testing::ValuesIn({IMREAD_UNCHANGED, IMREAD_GRAYSCALE,
IMREAD_COLOR})));
IMREAD_COLOR, IMREAD_COLOR_RGB})));
////////////////////////////////////////////////////////////////////////////////
@ -311,7 +311,7 @@ INSTANTIATE_TEST_CASE_P(
::testing::Combine(::testing::ValuesIn({8, 10, 12}),
::testing::ValuesIn({1, 3}), ::testing::ValuesIn({50}),
::testing::ValuesIn({IMREAD_UNCHANGED, IMREAD_GRAYSCALE,
IMREAD_COLOR})));
IMREAD_COLOR, IMREAD_COLOR_RGB})));
class Imgcodecs_Avif_Animation_WriteDecodeSuite
: public Imgcodecs_Avif_Animation_RoundTripSuite {};
@ -347,7 +347,7 @@ INSTANTIATE_TEST_CASE_P(
::testing::Combine(::testing::ValuesIn({8, 10, 12}),
::testing::ValuesIn({1, 3}), ::testing::ValuesIn({50}),
::testing::ValuesIn({IMREAD_UNCHANGED, IMREAD_GRAYSCALE,
IMREAD_COLOR})));
IMREAD_COLOR, IMREAD_COLOR_RGB})));
} // namespace
} // namespace opencv_test

@ -192,6 +192,15 @@ TEST(Imgcodecs_EXR, read_YC_changeDepth)
ASSERT_FALSE(img.empty());
ASSERT_EQ(CV_8UC3, img.type());
const Mat img_rgb = cv::imread(filenameInput, IMREAD_COLOR_RGB);
ASSERT_FALSE(img_rgb.empty());
ASSERT_EQ(CV_8UC3, img_rgb.type());
cvtColor(img_rgb, img_rgb, COLOR_RGB2BGR);
EXPECT_TRUE(cvtest::norm(img, img_rgb, NORM_INF) == 0);
// Cannot test writing, EXR encoder doesn't support 8U depth
}

@ -108,6 +108,7 @@ const int basic_modes[] =
IMREAD_UNCHANGED,
IMREAD_GRAYSCALE,
IMREAD_COLOR,
IMREAD_COLOR_RGB,
IMREAD_ANYDEPTH,
IMREAD_ANYCOLOR
};
@ -356,6 +357,10 @@ TEST(Imgcodecs_Bmp, rgba_scale)
ASSERT_FALSE(img.empty());
ASSERT_EQ(CV_8UC3, img.type());
img = cv::imread(filenameInput, IMREAD_COLOR_RGB);
ASSERT_FALSE(img.empty());
ASSERT_EQ(CV_8UC3, img.type());
data = img.ptr();
ASSERT_EQ(data[0], 255);
ASSERT_EQ(data[1], 255);

@ -217,6 +217,7 @@ TEST_P(Imgcodecs_Jpeg_decode_cmyk, regression25274)
INSTANTIATE_TEST_CASE_P( /* nothing */,
Imgcodecs_Jpeg_decode_cmyk,
testing::Values(cv::IMREAD_COLOR,
cv::IMREAD_COLOR_RGB,
cv::IMREAD_GRAYSCALE,
cv::IMREAD_ANYCOLOR));
@ -327,6 +328,13 @@ TEST_P(Imgcodecs_Jpeg_encode_withLumaChromaQuality, basic)
cv::Mat src = imread(fname, cv::IMREAD_COLOR);
ASSERT_FALSE(src.empty());
// Add imread RGB test
cv::Mat src_rgb = imread(fname, cv::IMREAD_COLOR_RGB);
ASSERT_FALSE(src_rgb.empty());
cvtColor(src_rgb, src_rgb, COLOR_RGB2BGR);
EXPECT_TRUE(cvtest::norm(src, src_rgb, NORM_INF) == 0);
std::vector<uint8_t> jpegNormal;
ASSERT_NO_THROW(cv::imencode(".jpg", src, jpegNormal));

@ -83,6 +83,14 @@ TEST(Imgcodecs_Png, read_color_palette_with_alpha)
EXPECT_EQ(img.at<Vec3b>(0, 0), Vec3b(0, 0, 255));
EXPECT_EQ(img.at<Vec3b>(0, 1), Vec3b(0, 0, 255));
img = imread(root + "readwrite/color_palette_alpha.png", IMREAD_COLOR_RGB);
ASSERT_FALSE(img.empty());
ASSERT_TRUE(img.channels() == 3);
// pixel is red in RGB
EXPECT_EQ(img.at<Vec3b>(0, 0), Vec3b(255, 0, 0));
EXPECT_EQ(img.at<Vec3b>(0, 1), Vec3b(255, 0, 0));
// Fourth Test : Read PNG without alpha, imread flag 1
img = imread(root + "readwrite/color_palette_no_alpha.png", IMREAD_COLOR);
ASSERT_FALSE(img.empty());
@ -91,6 +99,14 @@ TEST(Imgcodecs_Png, read_color_palette_with_alpha)
// pixel is red in BGR
EXPECT_EQ(img.at<Vec3b>(0, 0), Vec3b(0, 0, 255));
EXPECT_EQ(img.at<Vec3b>(0, 1), Vec3b(0, 0, 255));
img = imread(root + "readwrite/color_palette_no_alpha.png", IMREAD_COLOR_RGB);
ASSERT_FALSE(img.empty());
ASSERT_TRUE(img.channels() == 3);
// pixel is red in RGB
EXPECT_EQ(img.at<Vec3b>(0, 0), Vec3b(255, 0, 0));
EXPECT_EQ(img.at<Vec3b>(0, 1), Vec3b(255, 0, 0));
}
/**

@ -50,6 +50,11 @@ void PrintTo(const ImreadModes& val, std::ostream* os)
v &= ~IMREAD_IGNORE_ORIENTATION;
*os << "IMREAD_IGNORE_ORIENTATION" << (v == 0 ? "" : " | ");
}
if ((v & IMREAD_COLOR_RGB) != 0)
{
v &= ~IMREAD_COLOR_RGB;
*os << "IMREAD_COLOR_RGB" << (v == 0 ? "" : " | ");
}
switch (v)
{
case IMREAD_UNCHANGED: return;
@ -65,6 +70,7 @@ void PrintTo(const ImreadModes& val, std::ostream* os)
case IMREAD_REDUCED_GRAYSCALE_8: // fallthru
case IMREAD_REDUCED_COLOR_8: *os << "REDUCED_8"; return;
case IMREAD_IGNORE_ORIENTATION: return;
case IMREAD_COLOR_RGB: return;
} // don't use "default:" to emit compiler warnings
*os << "IMREAD_UNKNOWN(" << (int)v << ")";
}

@ -196,9 +196,19 @@ void test_image_io(const Mat& image, const std::string& fname, const std::string
Mat buf_loaded = imdecode(Mat(buf), imreadFlag);
EXPECT_FALSE(buf_loaded.empty());
if (imreadFlag & IMREAD_COLOR_RGB && imreadFlag != -1)
{
cvtColor(buf_loaded, buf_loaded, COLOR_RGB2BGR);
}
Mat loaded = imread(fname, imreadFlag);
EXPECT_FALSE(loaded.empty());
if (imreadFlag & IMREAD_COLOR_RGB && imreadFlag != -1)
{
cvtColor(loaded, loaded, COLOR_RGB2BGR);
}
EXPECT_EQ(0, cv::norm(loaded, buf_loaded, NORM_INF)) << "imread() and imdecode() calls must provide the same result (bit-exact)";
double psnr = cvtest::PSNR(loaded, image);
@ -238,6 +248,7 @@ TEST_P(Imgcodecs_Image, read_write_BGR)
Mat image = generateTestImageBGR();
EXPECT_NO_THROW(test_image_io(image, fname, ext, IMREAD_COLOR, psnrThreshold));
EXPECT_NO_THROW(test_image_io(image, fname, ext, IMREAD_COLOR_RGB, psnrThreshold));
EXPECT_EQ(0, remove(fname.c_str()));
}

@ -53,7 +53,7 @@ enum ImreadMixModes
{
IMREAD_MIX_UNCHANGED = IMREAD_UNCHANGED ,
IMREAD_MIX_GRAYSCALE = IMREAD_GRAYSCALE ,
IMREAD_MIX_COLOR = IMREAD_COLOR ,
IMREAD_MIX_COLOR = IMREAD_COLOR | IMREAD_COLOR_RGB ,
IMREAD_MIX_GRAYSCALE_ANYDEPTH = IMREAD_GRAYSCALE | IMREAD_ANYDEPTH ,
IMREAD_MIX_GRAYSCALE_ANYCOLOR = IMREAD_GRAYSCALE | IMREAD_ANYCOLOR,
IMREAD_MIX_GRAYSCALE_ANYDEPTH_ANYCOLOR = IMREAD_GRAYSCALE | IMREAD_ANYDEPTH | IMREAD_ANYCOLOR,
@ -125,7 +125,7 @@ TEST_P(Imgcodecs_Tiff_decode_Huge, regression)
case IMREAD_GRAYSCALE | IMREAD_ANYCOLOR | IMREAD_ANYDEPTH:
ncn = (ncn == 1)?1:3;
break;
case IMREAD_COLOR:
case IMREAD_COLOR | IMREAD_COLOR_RGB:
ncn = 3;
depth = 1;
break;
@ -818,6 +818,24 @@ TEST(Imgcodecs_Tiff, read_palette_color_image)
ASSERT_EQ(CV_8UC3, img.type());
}
TEST(Imgcodecs_Tiff, read_palette_color_image_rgb_and_bgr)
{
const string root = cvtest::TS::ptr()->get_data_path();
const string filenameInput = root + "readwrite/test_palette_color_image.tif";
Mat img_rgb, img_bgr;
ASSERT_NO_THROW(img_rgb = cv::imread(filenameInput, IMREAD_COLOR_RGB));
ASSERT_NO_THROW(img_bgr = cv::imread(filenameInput, IMREAD_COLOR_BGR));
ASSERT_FALSE(img_rgb.empty());
ASSERT_EQ(CV_8UC3, img_rgb.type());
ASSERT_FALSE(img_bgr.empty());
ASSERT_EQ(CV_8UC3, img_bgr.type());
EXPECT_EQ(img_rgb.at<Vec3b>(32, 24), Vec3b(255, 0, 0));
EXPECT_EQ(img_bgr.at<Vec3b>(32, 24), Vec3b(0, 0, 255));
}
TEST(Imgcodecs_Tiff, read_4_bit_palette_color_image)
{
const string root = cvtest::TS::ptr()->get_data_path();
@ -1066,6 +1084,7 @@ const int all_modes[] =
IMREAD_UNCHANGED,
IMREAD_GRAYSCALE,
IMREAD_COLOR,
IMREAD_COLOR_RGB,
IMREAD_ANYDEPTH,
IMREAD_ANYCOLOR
};

@ -51,6 +51,12 @@ TEST(Imgcodecs_WebP, encode_decode_lossless_webp)
ASSERT_FALSE(decode.empty());
EXPECT_TRUE(cvtest::norm(decode, img_webp, NORM_INF) == 0);
cv::Mat decode_rgb = cv::imdecode(buf, IMREAD_COLOR_RGB);
ASSERT_FALSE(decode_rgb.empty());
cvtColor(decode_rgb, decode_rgb, COLOR_RGB2BGR);
EXPECT_TRUE(cvtest::norm(decode_rgb, img_webp, NORM_INF) == 0);
ASSERT_FALSE(img_webp.empty());
EXPECT_TRUE(cvtest::norm(img, img_webp, NORM_INF) == 0);

@ -1541,12 +1541,14 @@ respectively (see #getGaussianKernel for details); to fully control the result r
possible future modifications of all this semantics, it is recommended to specify all of ksize,
sigmaX, and sigmaY.
@param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
@param hint Implementation modfication flags. See #AlgorithmHint
@sa sepFilter2D, filter2D, blur, boxFilter, bilateralFilter, medianBlur
*/
CV_EXPORTS_W void GaussianBlur( InputArray src, OutputArray dst, Size ksize,
double sigmaX, double sigmaY = 0,
int borderType = BORDER_DEFAULT );
int borderType = BORDER_DEFAULT,
AlgorithmHint hint = cv::ALGO_HINT_DEFAULT );
/** @brief Applies the bilateral filter to an image.
@ -4084,6 +4086,28 @@ CV_EXPORTS_W void approxPolyDP( InputArray curve,
OutputArray approxCurve,
double epsilon, bool closed );
/** @brief Approximates a polygon with a convex hull with a specified accuracy and number of sides.
The cv::approxPolyN function approximates a polygon with a convex hull
so that the difference between the contour area of the original contour and the new polygon is minimal.
It uses a greedy algorithm for contracting two vertices into one in such a way that the additional area is minimal.
Straight lines formed by each edge of the convex contour are drawn and the areas of the resulting triangles are considered.
Each vertex will lie either on the original contour or outside it.
The algorithm based on the paper @cite LowIlie2003 .
@param curve Input vector of a 2D points stored in std::vector or Mat, points must be float or integer.
@param approxCurve Result of the approximation. The type is vector of a 2D point (Point2f or Point) in std::vector or Mat.
@param nsides The parameter defines the number of sides of the result polygon.
@param epsilon_percentage defines the percentage of the maximum of additional area.
If it equals -1, it is not used. Otherwise algorighm stops if additional area is greater than contourArea(_curve) * percentage.
If additional area exceeds the limit, algorithm returns as many vertices as there were at the moment the limit was exceeded.
@param ensure_convex If it is true, algorithm creates a convex hull of input contour. Otherwise input vector should be convex.
*/
CV_EXPORTS_W void approxPolyN(InputArray curve, OutputArray approxCurve,
int nsides, float epsilon_percentage = -1.0,
bool ensure_convex = true);
/** @brief Calculates a contour perimeter or a curve length.
The function computes a curve length or a closed contour perimeter.

@ -39,6 +39,7 @@
//
//M*/
#include "precomp.hpp"
#include <queue>
/****************************************************************************************\
* Polygonal Approximation *
@ -284,3 +285,241 @@ void cv::approxPolyDP( InputArray _curve, OutputArray _approxCurve,
Mat(nout, 1, CV_MAKETYPE(depth, 2), buf).copyTo(_approxCurve);
}
enum class PointStatus : int8_t
{
REMOVED = -1,
RECALCULATE = 0,
CALCULATED = 1
};
struct neighbours
{
PointStatus pointStatus;
cv::Point2f point;
int next;
int prev;
explicit neighbours(int next_ = -1, int prev_ = -1, const cv::Point2f& point_ = { -1, -1 })
{
next = next_;
prev = prev_;
point = point_;
pointStatus = PointStatus::CALCULATED;
}
};
struct changes
{
float area;
int vertex;
cv::Point2f intersection;
explicit changes(float area_, int vertex_, const cv::Point2f& intersection_)
{
area = area_;
vertex = vertex_;
intersection = intersection_;
}
bool operator < (const changes& elem) const
{
return (area < elem.area) || ((area == elem.area) && (vertex < elem.vertex));
}
bool operator > (const changes& elem) const
{
return (area > elem.area) || ((area == elem.area) && (vertex > elem.vertex));
}
};
/*
returns intersection point and extra area
*/
static void recalculation(std::vector<neighbours>& hull, int vertex_id, float& area_, float& x, float& y)
{
cv::Point2f vertex = hull[vertex_id].point,
next_vertex = hull[hull[vertex_id].next].point,
extra_vertex_1 = hull[hull[vertex_id].prev].point,
extra_vertex_2 = hull[hull[hull[vertex_id].next].next].point;
cv::Point2f curr_edge = next_vertex - vertex,
prev_edge = vertex - extra_vertex_1,
next_edge = extra_vertex_2 - next_vertex;
float cross = prev_edge.x * next_edge.y - prev_edge.y * next_edge.x;
if (abs(cross) < 1e-8)
{
area_ = FLT_MAX;
x = -1;
y = -1;
return;
}
float t = (curr_edge.x * next_edge.y - curr_edge.y * next_edge.x) / cross;
cv::Point2f intersection = vertex + cv::Point2f(prev_edge.x * t, prev_edge.y * t);
float area = 0.5f * abs((next_vertex.x - vertex.x) * (intersection.y - vertex.y)
- (intersection.x - vertex.x) * (next_vertex.y - vertex.y));
area_ = area;
x = intersection.x;
y = intersection.y;
}
static void update(std::vector<neighbours>& hull, int vertex_id)
{
neighbours& v1 = hull[vertex_id], & removed = hull[v1.next], & v2 = hull[removed.next];
removed.pointStatus = PointStatus::REMOVED;
v1.pointStatus = PointStatus::RECALCULATE;
v2.pointStatus = PointStatus::RECALCULATE;
hull[v1.prev].pointStatus = PointStatus::RECALCULATE;
v1.next = removed.next;
v2.prev = removed.prev;
}
/*
A greedy algorithm based on contraction of vertices for approximating a convex contour by a bounding polygon
*/
void cv::approxPolyN(InputArray _curve, OutputArray _approxCurve,
int nsides, float epsilon_percentage, bool ensure_convex)
{
CV_INSTRUMENT_REGION();
CV_Assert(epsilon_percentage > 0 || epsilon_percentage == -1);
CV_Assert(nsides > 2);
if (_approxCurve.fixedType())
{
CV_Assert(_approxCurve.type() == CV_32FC2 || _approxCurve.type() == CV_32SC2);
}
Mat curve;
int depth = _curve.depth();
CV_Assert(depth == CV_32F || depth == CV_32S);
if (ensure_convex)
{
cv::convexHull(_curve, curve);
}
else
{
CV_Assert(isContourConvex(_curve));
curve = _curve.getMat();
}
CV_Assert((curve.cols == 1 && curve.rows >= nsides)
|| (curve.rows == 1 && curve.cols >= nsides));
if (curve.rows == 1)
{
curve = curve.reshape(0, curve.cols);
}
std::vector<neighbours> hull(curve.rows);
int size = curve.rows;
std::priority_queue<changes, std::vector<changes>, std::greater<changes>> areas;
float extra_area = 0, max_extra_area = epsilon_percentage * static_cast<float>(contourArea(_curve));
if (curve.depth() == CV_32S)
{
for (int i = 0; i < size; ++i)
{
Point t = curve.at<cv::Point>(i, 0);
hull[i] = neighbours(i + 1, i - 1, Point2f(static_cast<float>(t.x), static_cast<float>(t.y)));
}
}
else
{
for (int i = 0; i < size; ++i)
{
Point2f t = curve.at<cv::Point2f>(i, 0);
hull[i] = neighbours(i + 1, i - 1, t);
}
}
hull[0].prev = size - 1;
hull[size - 1].next = 0;
if (size > nsides)
{
for (int vertex_id = 0; vertex_id < size; ++vertex_id)
{
float area, new_x, new_y;
recalculation(hull, vertex_id, area, new_x, new_y);
areas.push(changes(area, vertex_id, Point2f(new_x, new_y)));
}
}
while (size > nsides)
{
changes base = areas.top();
int vertex_id = base.vertex;
if (hull[vertex_id].pointStatus == PointStatus::REMOVED)
{
areas.pop();
}
else if (hull[vertex_id].pointStatus == PointStatus::RECALCULATE)
{
float area, new_x, new_y;
areas.pop();
recalculation(hull, vertex_id, area, new_x, new_y);
areas.push(changes(area, vertex_id, Point2f(new_x, new_y)));
hull[vertex_id].pointStatus = PointStatus::CALCULATED;
}
else
{
if (epsilon_percentage != -1)
{
extra_area += base.area;
if (extra_area > max_extra_area)
{
break;
}
}
size--;
hull[vertex_id].point = base.intersection;
update(hull, vertex_id);
}
}
if (_approxCurve.fixedType())
{
depth = _approxCurve.depth();
}
_approxCurve.create(1, size, CV_MAKETYPE(depth, 2));
Mat buf = _approxCurve.getMat();
int last_free = 0;
if (depth == CV_32S)
{
for (int i = 0; i < curve.rows; ++i)
{
if (hull[i].pointStatus != PointStatus::REMOVED)
{
Point t = Point(static_cast<int>(round(hull[i].point.x)),
static_cast<int>(round(hull[i].point.y)));
buf.at<Point>(0, last_free) = t;
last_free++;
}
}
}
else
{
for (int i = 0; i < curve.rows; ++i)
{
if (hull[i].pointStatus != PointStatus::REMOVED)
{
buf.at<Point2f>(0, last_free) = hull[i].point;
last_free++;
}
}
}
}
/* End of file. */

@ -384,7 +384,7 @@ static bool ocl_GaussianBlur_8UC1(InputArray _src, OutputArray _dst, Size ksize,
#endif
#if defined ENABLE_IPP_GAUSSIAN_BLUR // see CMake's OPENCV_IPP_GAUSSIAN_BLUR option
#ifdef ENABLE_IPP_GAUSSIAN_BLUR // see CMake's OPENCV_IPP_GAUSSIAN_BLUR option
#define IPP_DISABLE_GAUSSIAN_BLUR_LARGE_KERNELS_1TH 1
#define IPP_DISABLE_GAUSSIAN_BLUR_16SC4_1TH 1
@ -442,14 +442,14 @@ private:
#endif
static bool ipp_GaussianBlur(InputArray _src, OutputArray _dst, Size ksize,
static bool ipp_GaussianBlur(cv::Mat& src, cv::Mat& dst, Size ksize,
double sigma1, double sigma2, int borderType )
{
#ifdef HAVE_IPP_IW
CV_INSTRUMENT_REGION_IPP();
#if IPP_VERSION_X100 < 201800 && ((defined _MSC_VER && defined _M_IX86) || (defined __GNUC__ && defined __i386__))
CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(ksize); CV_UNUSED(sigma1); CV_UNUSED(sigma2); CV_UNUSED(borderType);
CV_UNUSED(src); CV_UNUSED(dst); CV_UNUSED(ksize); CV_UNUSED(sigma1); CV_UNUSED(sigma2); CV_UNUSED(borderType);
return false; // bug on ia32
#else
if(sigma1 != sigma2)
@ -464,8 +464,6 @@ static bool ipp_GaussianBlur(InputArray _src, OutputArray _dst, Size ksize,
// Acquire data and begin processing
try
{
Mat src = _src.getMat();
Mat dst = _dst.getMat();
::ipp::IwiImage iwSrc = ippiGetImage(src);
::ipp::IwiImage iwDst = ippiGetImage(dst);
::ipp::IwiBorderSize borderSize = ::ipp::iwiSizeToBorderSize(ippiGetSize(ksize));
@ -505,7 +503,7 @@ static bool ipp_GaussianBlur(InputArray _src, OutputArray _dst, Size ksize,
return true;
#endif
#else
CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(ksize); CV_UNUSED(sigma1); CV_UNUSED(sigma2); CV_UNUSED(borderType);
CV_UNUSED(src); CV_UNUSED(dst); CV_UNUSED(ksize); CV_UNUSED(sigma1); CV_UNUSED(sigma2); CV_UNUSED(borderType);
return false;
#endif
}
@ -526,10 +524,13 @@ static bool validateGaussianBlurKernel(std::vector<T>& kernel)
void GaussianBlur(InputArray _src, OutputArray _dst, Size ksize,
double sigma1, double sigma2,
int borderType)
int borderType, AlgorithmHint hint)
{
CV_INSTRUMENT_REGION();
if (hint == cv::ALGO_HINT_DEFAULT)
hint = cv::getDefaultAlgorithmHint();
CV_Assert(!_src.empty());
int type = _src.type();
@ -609,7 +610,25 @@ void GaussianBlur(InputArray _src, OutputArray _dst, Size ksize,
src2.locateROI( wsz, ofs );
CALL_HAL(gaussianBlurBinomial, cv_hal_gaussianBlurBinomial, src2.ptr(), src2.step, dst.ptr(), dst.step, src2.cols, src2.rows, sdepth, cn,
ofs.x, ofs.y, wsz.width - src2.cols - ofs.x, wsz.height - src2.rows - ofs.y, ksize.width, borderType&~BORDER_ISOLATED);
ofs.x, ofs.y, wsz.width - src2.cols - ofs.x, wsz.height - src2.rows - ofs.y, ksize.width,
borderType & ~BORDER_ISOLATED);
}
if (hint == ALGO_HINT_APPROX)
{
Point ofs;
Size wsz(src.cols, src.rows);
if(!(borderType & BORDER_ISOLATED))
src.locateROI( wsz, ofs );
CALL_HAL(gaussianBlur, cv_hal_gaussianBlur, src.ptr(), src.step, dst.ptr(), dst.step, src.cols, src.rows, sdepth, cn,
ofs.x, ofs.y, wsz.width - src.cols - ofs.x, wsz.height - src.rows - ofs.y, ksize.width, ksize.height,
sigma1, sigma2, borderType & ~BORDER_ISOLATED);
#ifdef ENABLE_IPP_GAUSSIAN_BLUR
// IPP is not bit-exact to OpenCV implementation
CV_IPP_RUN_FAST(ipp_GaussianBlur(src, dst, ksize, sigma1, sigma2, borderType));
#endif
}
CV_CPU_DISPATCH(GaussianBlurFixedPoint, (src, dst, (const uint16_t*)&fkx[0], (int)fkx.size(), (const uint16_t*)&fky[0], (int)fky.size(), borderType),
@ -663,6 +682,23 @@ void GaussianBlur(InputArray _src, OutputArray _dst, Size ksize,
ofs.x, ofs.y, wsz.width - src2.cols - ofs.x, wsz.height - src2.rows - ofs.y, ksize.width, borderType&~BORDER_ISOLATED);
}
if (hint == ALGO_HINT_APPROX)
{
Point ofs;
Size wsz(src.cols, src.rows);
if(!(borderType & BORDER_ISOLATED))
src.locateROI( wsz, ofs );
CALL_HAL(gaussianBlur, cv_hal_gaussianBlur, src.ptr(), src.step, dst.ptr(), dst.step, src.cols, src.rows, sdepth, cn,
ofs.x, ofs.y, wsz.width - src.cols - ofs.x, wsz.height - src.rows - ofs.y, ksize.width, ksize.height,
sigma1, sigma2, borderType & ~BORDER_ISOLATED);
#ifdef ENABLE_IPP_GAUSSIAN_BLUR
// IPP is not bit-exact to OpenCV implementation
CV_IPP_RUN_FAST(ipp_GaussianBlur(src, dst, ksize, sigma1, sigma2, borderType));
#endif
}
CV_CPU_DISPATCH(GaussianBlurFixedPoint, (src, dst, (const uint32_t*)&fkx[0], (int)fkx.size(), (const uint32_t*)&fky[0], (int)fky.size(), borderType),
CV_CPU_DISPATCH_MODES_ALL);

@ -76,4 +76,79 @@ TEST(Imgproc_ApproxPoly, bad_epsilon)
ASSERT_ANY_THROW(approxPolyDP(inputPoints, outputPoints, eps, false));
}
struct ApproxPolyN: public testing::Test
{
void SetUp()
{
vector<vector<Point>> inputPoints = {
{ {87, 103}, {100, 112}, {96, 138}, {80, 169}, {60, 183}, {38, 176}, {41, 145}, {56, 118}, {76, 104} },
{ {196, 102}, {205, 118}, {174, 196}, {152, 207}, {102, 194}, {100, 175}, {131, 109} },
{ {372, 101}, {377, 119}, {337, 238}, {324, 248}, {240, 229}, {199, 214}, {232, 123}, {245, 103} },
{ {463, 86}, {563, 112}, {574, 135}, {596, 221}, {518, 298}, {412, 266}, {385, 164}, {462, 86} }
};
Mat image(600, 600, CV_8UC1, Scalar(0));
for (vector<Point>& polygon : inputPoints) {
polylines(image, { polygon }, true, Scalar(255), 1);
}
findContours(image, contours, RETR_LIST, CHAIN_APPROX_NONE);
}
vector<vector<Point>> contours;
};
TEST_F(ApproxPolyN, accuracyInt)
{
vector<vector<Point>> rightCorners = {
{ {72, 187}, {37, 176}, {42, 127}, {133, 64} },
{ {168, 212}, {92, 192}, {131, 109}, {213, 100} },
{ {72, 187}, {37, 176}, {42, 127}, {133, 64} },
{ {384, 100}, {333, 251}, {197, 220}, {239, 103} },
{ {168, 212}, {92, 192}, {131, 109}, {213, 100} },
{ {333, 251}, {197, 220}, {239, 103}, {384, 100} },
{ {542, 6}, {596, 221}, {518, 299}, {312, 236} },
{ {596, 221}, {518, 299}, {312, 236}, {542, 6} }
};
EXPECT_EQ(rightCorners.size(), contours.size());
for (size_t i = 0; i < contours.size(); ++i) {
std::vector<Point> corners;
approxPolyN(contours[i], corners, 4, -1, true);
ASSERT_EQ(rightCorners[i], corners );
}
}
TEST_F(ApproxPolyN, accuracyFloat)
{
vector<vector<Point2f>> rightCorners = {
{ {72.f, 187.f}, {37.f, 176.f}, {42.f, 127.f}, {133.f, 64.f} },
{ {168.f, 212.f}, {92.f, 192.f}, {131.f, 109.f}, {213.f, 100.f} },
{ {72.f, 187.f}, {37.f, 176.f}, {42.f, 127.f}, {133.f, 64.f} },
{ {384.f, 100.f}, {333.f, 251.f}, {197.f, 220.f}, {239.f, 103.f} },
{ {168.f, 212.f}, {92.f, 192.f}, {131.f, 109.f}, {213.f, 100.f} },
{ {333.f, 251.f}, {197.f, 220.f}, {239.f, 103.f}, {384.f, 100.f} },
{ {542.f, 6.f}, {596.f, 221.f}, {518.f, 299.f}, {312.f, 236.f} },
{ {596.f, 221.f}, {518.f, 299.f}, {312.f, 236.f}, {542.f, 6.f} }
};
EXPECT_EQ(rightCorners.size(), contours.size());
for (size_t i = 0; i < contours.size(); ++i) {
std::vector<Point2f> corners;
approxPolyN(contours[i], corners, 4, -1, true);
EXPECT_LT(cvtest::norm(rightCorners[i], corners, NORM_INF), .5f);
}
}
TEST_F(ApproxPolyN, bad_args)
{
Mat contour(10, 1, CV_32FC2);
vector<vector<Point>> bad_contours;
vector<Point> corners;
ASSERT_ANY_THROW(approxPolyN(contour, corners, 0));
ASSERT_ANY_THROW(approxPolyN(contour, corners, 3, 0));
ASSERT_ANY_THROW(approxPolyN(bad_contours, corners, 4));
}
}} // namespace

@ -40,134 +40,10 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/core/core_c.h"
namespace opencv_test { namespace {
class CV_CannyTest : public cvtest::ArrayTest
{
public:
CV_CannyTest(bool custom_deriv = false);
protected:
void get_test_array_types_and_sizes( int test_case_idx, vector<vector<Size> >& sizes, vector<vector<int> >& types );
double get_success_error_level( int test_case_idx, int i, int j );
int prepare_test_case( int test_case_idx );
void run_func();
void prepare_to_validation( int );
int validate_test_results( int /*test_case_idx*/ );
int aperture_size;
bool use_true_gradient;
double threshold1, threshold2;
bool test_cpp;
bool test_custom_deriv;
Mat img;
};
CV_CannyTest::CV_CannyTest(bool custom_deriv)
{
test_array[INPUT].push_back(NULL);
test_array[OUTPUT].push_back(NULL);
test_array[REF_OUTPUT].push_back(NULL);
element_wise_relative_error = true;
aperture_size = 0;
use_true_gradient = false;
threshold1 = threshold2 = 0;
test_custom_deriv = custom_deriv;
const char imgPath[] = "shared/fruits.png";
img = cv::imread(cvtest::TS::ptr()->get_data_path() + imgPath, IMREAD_GRAYSCALE);
}
void CV_CannyTest::get_test_array_types_and_sizes( int test_case_idx,
vector<vector<Size> >& sizes,
vector<vector<int> >& types )
{
RNG& rng = ts->get_rng();
double thresh_range;
cvtest::ArrayTest::get_test_array_types_and_sizes( test_case_idx, sizes, types );
types[INPUT][0] = types[OUTPUT][0] = types[REF_OUTPUT][0] = CV_8U;
aperture_size = cvtest::randInt(rng) % 2 ? 5 : 3;
thresh_range = aperture_size == 3 ? 300 : 1000;
threshold1 = cvtest::randReal(rng)*thresh_range;
threshold2 = cvtest::randReal(rng)*thresh_range*0.3;
if( cvtest::randInt(rng) % 2 )
CV_SWAP( threshold1, threshold2, thresh_range );
use_true_gradient = cvtest::randInt(rng) % 2 != 0;
test_cpp = (cvtest::randInt(rng) & 256) == 0;
ts->printf(cvtest::TS::LOG, "Canny(size = %d x %d, aperture_size = %d, threshold1 = %g, threshold2 = %g, L2 = %s) test_cpp = %s (test case #%d)\n",
sizes[0][0].width, sizes[0][0].height, aperture_size, threshold1, threshold2, use_true_gradient ? "TRUE" : "FALSE", test_cpp ? "TRUE" : "FALSE", test_case_idx);
}
int CV_CannyTest::prepare_test_case( int test_case_idx )
{
int code = cvtest::ArrayTest::prepare_test_case( test_case_idx );
if( code > 0 )
{
RNG& rng = ts->get_rng();
Mat& src = test_mat[INPUT][0];
//GaussianBlur(src, src, Size(11, 11), 5, 5);
if(src.cols > img.cols || src.rows > img.rows)
resize(img, src, src.size(), 0, 0, INTER_LINEAR_EXACT);
else
img(
Rect(
cvtest::randInt(rng) % (img.cols-src.cols),
cvtest::randInt(rng) % (img.rows-src.rows),
src.cols,
src.rows
)
).copyTo(src);
GaussianBlur(src, src, Size(5, 5), 0);
}
return code;
}
double CV_CannyTest::get_success_error_level( int /*test_case_idx*/, int /*i*/, int /*j*/ )
{
return 0;
}
void CV_CannyTest::run_func()
{
if (test_custom_deriv)
{
cv::Mat _out = cv::cvarrToMat(test_array[OUTPUT][0]);
cv::Mat src = cv::cvarrToMat(test_array[INPUT][0]);
cv::Mat dx, dy;
int m = aperture_size;
Point anchor(m/2, m/2);
Mat dxkernel = cvtest::calcSobelKernel2D( 1, 0, m, 0 );
Mat dykernel = cvtest::calcSobelKernel2D( 0, 1, m, 0 );
cvtest::filter2D(src, dx, CV_16S, dxkernel, anchor, 0, BORDER_REPLICATE);
cvtest::filter2D(src, dy, CV_16S, dykernel, anchor, 0, BORDER_REPLICATE);
cv::Canny(dx, dy, _out, threshold1, threshold2, use_true_gradient);
}
else
{
cv::Mat _out = cv::cvarrToMat(test_array[OUTPUT][0]);
cv::Canny(cv::cvarrToMat(test_array[INPUT][0]), _out, threshold1, threshold2,
aperture_size, use_true_gradient);
}
}
static void
cannyFollow( int x, int y, float lowThreshold, const Mat& mag, Mat& dst )
static void Canny_reference_follow( int x, int y, float lowThreshold, const Mat& mag, Mat& dst )
{
static const int ofs[][2] = {{1,0},{1,-1},{0,-1},{-1,-1},{-1,0},{-1,1},{0,1},{1,1}};
int i;
@ -182,16 +58,15 @@ cannyFollow( int x, int y, float lowThreshold, const Mat& mag, Mat& dst )
(unsigned)y1 < (unsigned)mag.rows &&
mag.at<float>(y1, x1) > lowThreshold &&
!dst.at<uchar>(y1, x1) )
cannyFollow( x1, y1, lowThreshold, mag, dst );
Canny_reference_follow( x1, y1, lowThreshold, mag, dst );
}
}
static void
test_Canny( const Mat& src, Mat& dst,
static void Canny_reference( const Mat& src, Mat& dst,
double threshold1, double threshold2,
int aperture_size, bool use_true_gradient )
{
dst.create(src.size(), src.type());
int m = aperture_size;
Point anchor(m/2, m/2);
const double tan_pi_8 = tan(CV_PI/8.);
@ -274,47 +149,80 @@ test_Canny( const Mat& src, Mat& dst,
{
for( x = 0; x < width; x++ )
if( mag.at<float>(y, x) > highThreshold && !dst.at<uchar>(y, x) )
cannyFollow( x, y, lowThreshold, mag, dst );
Canny_reference_follow( x, y, lowThreshold, mag, dst );
}
}
//==============================================================================
void CV_CannyTest::prepare_to_validation( int )
{
Mat src = test_mat[INPUT][0], dst = test_mat[REF_OUTPUT][0];
test_Canny( src, dst, threshold1, threshold2, aperture_size, use_true_gradient );
}
// aperture, true gradient
typedef testing::TestWithParam<testing::tuple<int, bool>> Canny_Modes;
TEST_P(Canny_Modes, accuracy)
{
const int aperture = get<0>(GetParam());
const bool trueGradient = get<1>(GetParam());
const double range = aperture == 3 ? 300. : 1000.;
RNG & rng = TS::ptr()->get_rng();
int CV_CannyTest::validate_test_results( int test_case_idx )
for (int ITER = 0; ITER < 20; ++ITER)
{
int code = cvtest::TS::OK, nz0;
prepare_to_validation(test_case_idx);
SCOPED_TRACE(cv::format("iteration %d", ITER));
double err = cvtest::norm(test_mat[OUTPUT][0], test_mat[REF_OUTPUT][0], NORM_L1);
if( err == 0 )
return code;
const std::string fname = cvtest::findDataFile("shared/fruits.png");
const Mat original = cv::imread(fname, IMREAD_GRAYSCALE);
if( err != cvRound(err) || cvRound(err)%255 != 0 )
const double thresh1 = rng.uniform(0., range);
const double thresh2 = rng.uniform(0., range * 0.3);
const Size sz(rng.uniform(127, 800), rng.uniform(127, 600));
const Size osz = original.size();
// preparation
Mat img;
if (sz.width >= osz.width || sz.height >= osz.height)
{
ts->printf( cvtest::TS::LOG, "Some of the pixels, produced by Canny, are not 0's or 255's; the difference is %g\n", err );
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
return code;
// larger image -> scale
resize(original, img, sz, 0, 0, INTER_LINEAR_EXACT);
}
else
{
// smaller image -> crop
Point origin(rng.uniform(0, osz.width - sz.width), rng.uniform(0, osz.height - sz.height));
Rect roi(origin, sz);
original(roi).copyTo(img);
}
GaussianBlur(img, img, Size(5, 5), 0);
nz0 = cvRound(cvtest::norm(test_mat[REF_OUTPUT][0], NORM_L1)/255);
err = (err/255/MAX(nz0,100))*100;
if( err > 1 )
// regular function
Mat result;
{
ts->printf( cvtest::TS::LOG, "Too high percentage of non-matching edge pixels = %g%%\n", err);
ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
cv::Canny(img, result, thresh1, thresh2, aperture, trueGradient);
}
return code;
// custom derivatives
Mat customResult;
{
Mat dxkernel = cvtest::calcSobelKernel2D(1, 0, aperture, 0);
Mat dykernel = cvtest::calcSobelKernel2D(0, 1, aperture, 0);
Point anchor(aperture / 2, aperture / 2);
cv::Mat dx, dy;
cvtest::filter2D(img, dx, CV_16S, dxkernel, anchor, 0, BORDER_REPLICATE);
cvtest::filter2D(img, dy, CV_16S, dykernel, anchor, 0, BORDER_REPLICATE);
cv::Canny(dx, dy, customResult, thresh1, thresh2, trueGradient);
}
Mat reference;
Canny_reference(img, reference, thresh1, thresh2, aperture, trueGradient);
EXPECT_MAT_NEAR(result, reference, 0);
EXPECT_MAT_NEAR(customResult, reference, 0);
}
}
TEST(Imgproc_Canny, accuracy) { CV_CannyTest test; test.safe_run(); }
TEST(Imgproc_Canny, accuracy_deriv) { CV_CannyTest test(true); test.safe_run(); }
INSTANTIATE_TEST_CASE_P(/**/, Canny_Modes,
testing::Combine(
testing::Values(3, 5),
testing::Values(true, false)));
}} // namespace
/* End of file. */

@ -159,17 +159,17 @@ int CV_ColorCvtBaseTest::prepare_test_case( int test_case_idx )
void CV_ColorCvtBaseTest::run_func()
{
CvArr* out0 = test_array[OUTPUT][0];
cv::Mat _out0 = cv::cvarrToMat(out0), _out1 = cv::cvarrToMat(test_array[OUTPUT][1]);
cv::Mat out0 = test_mat[OUTPUT][0];
cv::Mat _out0 = out0, _out1 = test_mat[OUTPUT][1];
cv::cvtColor( cv::cvarrToMat(inplace ? out0 : test_array[INPUT][0]), _out0, fwd_code, _out0.channels());
cv::cvtColor( inplace ? out0 : test_mat[INPUT][0], _out0, fwd_code, _out0.channels());
if( inplace )
{
cvCopy( out0, test_array[OUTPUT][1] );
out0 = test_array[OUTPUT][1];
out0.copyTo(test_mat[OUTPUT][1]);
out0 = test_mat[OUTPUT][1];
}
cv::cvtColor(cv::cvarrToMat(out0), _out1, inv_code, _out1.channels());
cv::cvtColor(out0, _out1, inv_code, _out1.channels());
}
@ -1722,8 +1722,8 @@ double CV_ColorBayerTest::get_success_error_level( int /*test_case_idx*/, int /*
void CV_ColorBayerTest::run_func()
{
cv::Mat _out = cv::cvarrToMat(test_array[OUTPUT][0]);
cv::cvtColor(cv::cvarrToMat(test_array[INPUT][0]), _out, fwd_code, _out.channels());
cv::Mat _out = test_mat[OUTPUT][0];
cv::cvtColor(test_mat[INPUT][0], _out, fwd_code, _out.channels());
}

File diff suppressed because it is too large Load Diff

@ -40,7 +40,6 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/core/core_c.h"
namespace opencv_test { namespace {
@ -200,7 +199,7 @@ void CV_DerivBaseTest::get_test_array_types_and_sizes( int test_case_idx,
int sameDepth = cvtest::randInt(rng) % 2;
types[OUTPUT][0] = types[REF_OUTPUT][0] = sameDepth ? depth : CV_MAKETYPE(depth==CV_8U?CV_16S:CV_32F,1);
_aperture_size = (cvtest::randInt(rng)%5)*2 - 1;
sizes[INPUT][1] = aperture_size = cvSize(_aperture_size, _aperture_size);
sizes[INPUT][1] = aperture_size = Size(_aperture_size, _aperture_size);
}
@ -249,21 +248,21 @@ void CV_SobelTest::get_test_array_types_and_sizes( int test_case_idx,
}
if( _aperture_size < 0 )
aperture_size = cvSize(3, 3);
aperture_size = Size(3, 3);
else if( _aperture_size == 1 )
{
if( dx == 0 )
aperture_size = cvSize(1, 3);
aperture_size = Size(1, 3);
else if( dy == 0 )
aperture_size = cvSize(3, 1);
aperture_size = Size(3, 1);
else
{
_aperture_size = 3;
aperture_size = cvSize(3, 3);
aperture_size = Size(3, 3);
}
}
else
aperture_size = cvSize(_aperture_size, _aperture_size);
aperture_size = Size(_aperture_size, _aperture_size);
sizes[INPUT][1] = aperture_size;
anchor.x = aperture_size.width / 2;
@ -377,10 +376,10 @@ void CV_LaplaceTest::get_test_array_types_and_sizes( int test_case_idx,
{
if( _aperture_size < 0 )
_aperture_size = 1;
aperture_size = cvSize(3, 3);
aperture_size = Size(3, 3);
}
else
aperture_size = cvSize(_aperture_size, _aperture_size);
aperture_size = Size(_aperture_size, _aperture_size);
sizes[INPUT][1] = aperture_size;
anchor.x = aperture_size.width / 2;
@ -522,7 +521,7 @@ CV_PreCornerDetectTest::CV_PreCornerDetectTest() : CV_FeatureSelBaseTest( 1 )
void CV_PreCornerDetectTest::run_func()
{
preCornerDetect( cvarrToMat( test_array[INPUT][0] ), cvarrToMat( test_array[OUTPUT][0] ), aperture_size, BORDER_REPLICATE );
cv::preCornerDetect( test_mat[INPUT][0], test_mat[OUTPUT][0], aperture_size, BORDER_REPLICATE );
}

@ -39,8 +39,9 @@
//
//M*/
#include "opencv2/ts/ocl_test.hpp"
#include "opencv2/ts/ts_gtest.h"
#include "test_precomp.hpp"
#include "opencv2/core/core_c.h"
namespace opencv_test { namespace {
@ -360,8 +361,8 @@ void CV_RemapTest::fill_array( int test_case_idx, int i, int j, Mat& arr )
void CV_RemapTest::run_func()
{
remap( cvarrToMat( test_array[INPUT][0] ), cvarrToMat( test_array[INPUT_OUTPUT][0] ),
cvarrToMat( test_array[INPUT][1] ), cvarrToMat( test_array[INPUT][2] ), interpolation );
cv::remap(test_mat[INPUT][0], test_mat[INPUT_OUTPUT][0],
test_mat[INPUT][1], test_mat[INPUT][2], interpolation );
}
@ -465,7 +466,7 @@ protected:
double get_success_error_level( int test_case_idx, int i, int j );
void fill_array( int test_case_idx, int i, int j, Mat& arr );
CvPoint2D32f center;
Point2f center;
bool test_cpp;
};
@ -517,8 +518,8 @@ void CV_GetRectSubPixTest::fill_array( int test_case_idx, int i, int j, Mat& arr
void CV_GetRectSubPixTest::run_func()
{
cv::Mat _out = cv::cvarrToMat(test_array[INPUT_OUTPUT][0]);
cv::getRectSubPix( cv::cvarrToMat(test_array[INPUT][0]), _out.size(), center, _out, _out.type());
cv::Mat _out = test_mat[INPUT_OUTPUT][0];
cv::getRectSubPix(test_mat[INPUT][0], _out.size(), center, _out, _out.type());
}

@ -43,6 +43,8 @@
#include "test_precomp.hpp"
#include "opencv2/core/core_c.h"
#define CV_DXT_MUL_CONJ 8
namespace opencv_test { namespace {
/// phase correlation
@ -182,7 +184,7 @@ void CV_DivSpectrumsTest::get_test_array_types_and_sizes( int test_case_idx, vec
// Get the flag of the input.
const int rand_int_flags = cvtest::randInt(rng);
flags = rand_int_flags & (CV_DXT_MUL_CONJ | CV_DXT_ROWS);
flags = rand_int_flags & (CV_DXT_MUL_CONJ | DFT_ROWS);
// Get input type.
const int rand_int_type = cvtest::randInt(rng);

@ -260,4 +260,50 @@ TEST(GaussianBlur_Bitexact, overflow_20792)
EXPECT_GT(count, nintyPercent);
}
CV_ENUM(GaussInputType, CV_8U, CV_16S);
CV_ENUM(GaussBorder, BORDER_CONSTANT, BORDER_REPLICATE, BORDER_REFLECT_101);
struct GaussianBlurVsBitexact: public testing::TestWithParam<tuple<GaussInputType, int, double, GaussBorder>>
{
virtual void SetUp()
{
orig = imread(findDataFile("shared/lena.png"));
EXPECT_FALSE(orig.empty()) << "Cannot find test image shared/lena.png";
}
Mat orig;
};
// NOTE: The test was designed for IPP (-DOPENCV_IPP_GAUSSIAN_BLUR=ON)
// Should be extended after new HAL integration
TEST_P(GaussianBlurVsBitexact, approx)
{
auto testParams = GetParam();
int dtype = get<0>(testParams);
int ksize = get<1>(testParams);
double sigma = get<2>(testParams);
int border = get<3>(testParams);
Mat src;
orig.convertTo(src, dtype);
cv::Mat gt;
GaussianBlur(src, gt, Size(ksize, ksize), sigma, sigma, border, ALGO_HINT_ACCURATE);
cv::Mat dst;
GaussianBlur(src, dst, Size(ksize, ksize), sigma, sigma, border, ALGO_HINT_APPROX);
EXPECT_LE(cvtest::norm(dst, gt, NORM_INF), 1);
EXPECT_LE(cvtest::norm(dst, gt, NORM_L1 | NORM_RELATIVE), 0.06); // Less 6% of different pixels
}
INSTANTIATE_TEST_CASE_P(/*nothing*/, GaussianBlurVsBitexact,
testing::Combine(
GaussInputType::all(),
testing::Values(3, 5, 7),
testing::Values(0.75, 1.25),
GaussBorder::all()
)
);
}} // namespace

@ -40,131 +40,134 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/core/core_c.h"
namespace opencv_test { namespace {
class CV_TemplMatchTest : public cvtest::ArrayTest
{
public:
CV_TemplMatchTest();
protected:
int read_params( const cv::FileStorage& fs );
void get_test_array_types_and_sizes( int test_case_idx, vector<vector<Size> >& sizes, vector<vector<int> >& types );
void get_minmax_bounds( int i, int j, int type, Scalar& low, Scalar& high );
double get_success_error_level( int test_case_idx, int i, int j );
void run_func();
void prepare_to_validation( int );
int max_template_size;
int method;
bool test_cpp;
};
CV_TemplMatchTest::CV_TemplMatchTest()
{
test_array[INPUT].push_back(NULL);
test_array[INPUT].push_back(NULL);
test_array[OUTPUT].push_back(NULL);
test_array[REF_OUTPUT].push_back(NULL);
element_wise_relative_error = false;
max_template_size = 100;
method = 0;
test_cpp = false;
}
int CV_TemplMatchTest::read_params( const cv::FileStorage& fs )
{
int code = cvtest::ArrayTest::read_params( fs );
if( code < 0 )
return code;
read( find_param( fs, "max_template_size" ), max_template_size, max_template_size );
max_template_size = cvtest::clipInt( max_template_size, 1, 100 );
return code;
}
void CV_TemplMatchTest::get_minmax_bounds( int i, int j, int type, Scalar& low, Scalar& high )
{
cvtest::ArrayTest::get_minmax_bounds( i, j, type, low, high );
int depth = CV_MAT_DEPTH(type);
if( depth == CV_32F )
{
low = Scalar::all(-10.);
high = Scalar::all(10.);
}
}
void CV_TemplMatchTest::get_test_array_types_and_sizes( int test_case_idx,
vector<vector<Size> >& sizes, vector<vector<int> >& types )
{
RNG& rng = ts->get_rng();
int depth = cvtest::randInt(rng) % 2, cn = cvtest::randInt(rng) & 1 ? 3 : 1;
cvtest::ArrayTest::get_test_array_types_and_sizes( test_case_idx, sizes, types );
depth = depth == 0 ? CV_8U : CV_32F;
types[INPUT][0] = types[INPUT][1] = CV_MAKETYPE(depth,cn);
types[OUTPUT][0] = types[REF_OUTPUT][0] = CV_32FC1;
sizes[INPUT][1].width = cvtest::randInt(rng)%MIN(sizes[INPUT][1].width,max_template_size) + 1;
sizes[INPUT][1].height = cvtest::randInt(rng)%MIN(sizes[INPUT][1].height,max_template_size) + 1;
sizes[OUTPUT][0].width = sizes[INPUT][0].width - sizes[INPUT][1].width + 1;
sizes[OUTPUT][0].height = sizes[INPUT][0].height - sizes[INPUT][1].height + 1;
sizes[REF_OUTPUT][0] = sizes[OUTPUT][0];
method = cvtest::randInt(rng)%6;
test_cpp = (cvtest::randInt(rng) & 256) == 0;
}
double CV_TemplMatchTest::get_success_error_level( int /*test_case_idx*/, int /*i*/, int /*j*/ )
{
if( test_mat[INPUT][1].depth() == CV_8U ||
(method >= cv::TM_CCOEFF && test_mat[INPUT][1].cols*test_mat[INPUT][1].rows <= 2) )
return 1e-2;
else
return 1e-3;
TEST(Imgproc_MatchTemplate, bug_9597) {
const uint8_t img[] = {
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 246, 246, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 246, 246, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 246, 246, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 246, 246, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 246,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 246, 246, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 246, 246, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 246, 246, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 246, 246, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 247, 247, 247, 247, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 247, 247, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245};
const uint8_t tmpl[] = {
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245 };
cv::Mat cvimg(cv::Size(61, 82), CV_8UC1, (void*)img, cv::Mat::AUTO_STEP);
cv::Mat cvtmpl(cv::Size(17, 17), CV_8UC1, (void*)tmpl, cv::Mat::AUTO_STEP);
cv::Mat result;
cv::matchTemplate(cvimg, cvtmpl, result, cv::TM_SQDIFF);
double minValue;
cv::minMaxLoc(result, &minValue, NULL, NULL, NULL);
ASSERT_GE(minValue, 0);
}
//==============================================================================
void CV_TemplMatchTest::run_func()
static void matchTemplate_reference(Mat & img, Mat & templ, Mat & result, const int method)
{
cv::Mat _out = cv::cvarrToMat(test_array[OUTPUT][0]);
cv::matchTemplate(cv::cvarrToMat(test_array[INPUT][0]), cv::cvarrToMat(test_array[INPUT][1]), _out, method);
}
CV_Assert(cv::TM_SQDIFF <= method && method <= cv::TM_CCOEFF_NORMED);
const Size res_sz(img.cols - templ.cols + 1, img.rows - templ.rows + 1);
result.create(res_sz, CV_32FC1);
static void cvTsMatchTemplate( const CvMat* img, const CvMat* templ, CvMat* result, int method )
{
int i, j, k, l;
int depth = CV_MAT_DEPTH(img->type), cn = CV_MAT_CN(img->type);
int width_n = templ->cols*cn, height = templ->rows;
int a_step = img->step / CV_ELEM_SIZE(img->type & CV_MAT_DEPTH_MASK);
int b_step = templ->step / CV_ELEM_SIZE(templ->type & CV_MAT_DEPTH_MASK);
CvScalar b_mean = CV_STRUCT_INITIALIZER, b_sdv = CV_STRUCT_INITIALIZER;
double b_denom = 1., b_sum2 = 0;
int area = templ->rows*templ->cols;
const int depth = img.depth();
const int cn = img.channels();
const int area = templ.size().area();
const int width_n = templ.cols * cn;
const int height = templ.rows;
int a_step = (int)(img.step / img.elemSize1());
int b_step = (int)(templ.step / templ.elemSize1());
cvAvgSdv(templ, &b_mean, &b_sdv);
Scalar b_mean = Scalar::all(0);
Scalar b_sdv = Scalar::all(0);
cv::meanStdDev(templ, b_mean, b_sdv);
for( i = 0; i < cn; i++ )
double b_sum2 = 0.;
for (int i = 0; i < cn; i++ )
b_sum2 += (b_sdv.val[i] * b_sdv.val[i] + b_mean.val[i] * b_mean.val[i]) * area;
if (b_sdv.val[0] * b_sdv.val[0] + b_sdv.val[1] * b_sdv.val[1] +
b_sdv.val[2] * b_sdv.val[2] + b_sdv.val[3] * b_sdv.val[3] < DBL_EPSILON &&
method == cv::TM_CCOEFF_NORMED)
{
cvSet( result, cvScalarAll(1.) );
result = Scalar::all(1.);
return;
}
if( method & 1 )
double b_denom = 1.;
if (method & 1) // _NORMED
{
b_denom = 0;
if (method != cv::TM_CCOEFF_NORMED)
@ -173,7 +176,7 @@ static void cvTsMatchTemplate( const CvMat* img, const CvMat* templ, CvMat* resu
}
else
{
for( i = 0; i < cn; i++ )
for (int i = 0; i < cn; i++)
b_denom += b_sdv.val[i] * b_sdv.val[i] * area;
}
b_denom = sqrt(b_denom);
@ -181,11 +184,9 @@ static void cvTsMatchTemplate( const CvMat* img, const CvMat* templ, CvMat* resu
b_denom = 1.;
}
CV_Assert( cv::TM_SQDIFF <= method && method <= cv::TM_CCOEFF_NORMED );
for( i = 0; i < result->rows; i++ )
for (int i = 0; i < result.rows; i++)
{
for( j = 0; j < result->cols; j++ )
for (int j = 0; j < result.cols; j++)
{
Scalar a_sum(0), a_sum2(0);
Scalar ccorr(0);
@ -193,13 +194,13 @@ static void cvTsMatchTemplate( const CvMat* img, const CvMat* templ, CvMat* resu
if (depth == CV_8U)
{
const uchar* a = img->data.ptr + i*img->step + j*cn;
const uchar* b = templ->data.ptr;
const uchar* a = img.ptr<uchar>(i, j); // ??? ->data.ptr + i*img->step + j*cn;
const uchar* b = templ.ptr<uchar>();
if( cn == 1 || method < cv::TM_CCOEFF )
{
for( k = 0; k < height; k++, a += a_step, b += b_step )
for( l = 0; l < width_n; l++ )
for (int k = 0; k < height; k++, a += a_step, b += b_step)
for (int l = 0; l < width_n; l++)
{
ccorr.val[0] += a[l]*b[l];
a_sum.val[0] += a[l];
@ -208,8 +209,8 @@ static void cvTsMatchTemplate( const CvMat* img, const CvMat* templ, CvMat* resu
}
else
{
for( k = 0; k < height; k++, a += a_step, b += b_step )
for( l = 0; l < width_n; l += 3 )
for (int k = 0; k < height; k++, a += a_step, b += b_step)
for (int l = 0; l < width_n; l += 3)
{
ccorr.val[0] += a[l]*b[l];
ccorr.val[1] += a[l+1]*b[l+1];
@ -223,15 +224,15 @@ static void cvTsMatchTemplate( const CvMat* img, const CvMat* templ, CvMat* resu
}
}
}
else
else // CV_32F
{
const float* a = (const float*)(img->data.ptr + i*img->step) + j*cn;
const float* b = (const float*)templ->data.ptr;
const float* a = img.ptr<float>(i, j); // ???? (const float*)(img->data.ptr + i*img->step) + j*cn;
const float* b = templ.ptr<float>();
if( cn == 1 || method < cv::TM_CCOEFF )
{
for( k = 0; k < height; k++, a += a_step, b += b_step )
for( l = 0; l < width_n; l++ )
for (int k = 0; k < height; k++, a += a_step, b += b_step)
for (int l = 0; l < width_n; l++)
{
ccorr.val[0] += a[l]*b[l];
a_sum.val[0] += a[l];
@ -240,8 +241,8 @@ static void cvTsMatchTemplate( const CvMat* img, const CvMat* templ, CvMat* resu
}
else
{
for( k = 0; k < height; k++, a += a_step, b += b_step )
for( l = 0; l < width_n; l += 3 )
for (int k = 0; k < height; k++, a += a_step, b += b_step)
for (int l = 0; l < width_n; l += 3)
{
ccorr.val[0] += a[l]*b[l];
ccorr.val[1] += a[l+1]*b[l+1];
@ -295,130 +296,49 @@ static void cvTsMatchTemplate( const CvMat* img, const CvMat* templ, CvMat* resu
else
value = method != cv::TM_SQDIFF_NORMED ? 0 : 1;
}
((float*)(result->data.ptr + result->step*i))[j] = (float)value;
result.at<float>(i, j) = (float)value;
}
}
}
//==============================================================================
void CV_TemplMatchTest::prepare_to_validation( int /*test_case_idx*/ )
{
CvMat _input = cvMat(test_mat[INPUT][0]), _templ = cvMat(test_mat[INPUT][1]);
CvMat _output = cvMat(test_mat[REF_OUTPUT][0]);
cvTsMatchTemplate( &_input, &_templ, &_output, method );
CV_ENUM(MatchModes, TM_SQDIFF, TM_SQDIFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_CCOEFF, TM_CCOEFF_NORMED);
//if( ts->get_current_test_info()->test_case_idx == 0 )
/*{
CvFileStorage* fs = cvOpenFileStorage( "_match_template.yml", 0, CV_STORAGE_WRITE );
cvWrite( fs, "image", &test_mat[INPUT][0] );
cvWrite( fs, "template", &test_mat[INPUT][1] );
cvWrite( fs, "ref", &test_mat[REF_OUTPUT][0] );
cvWrite( fs, "opencv", &test_mat[OUTPUT][0] );
cvWriteInt( fs, "method", method );
cvReleaseFileStorage( &fs );
}*/
typedef testing::TestWithParam<testing::tuple<perf::MatDepth, int, MatchModes>> matchTemplate_Modes;
if( method >= cv::TM_CCOEFF )
TEST_P(matchTemplate_Modes, accuracy)
{
// avoid numerical stability problems in singular cases (when the results are near to 0)
const double delta = 10.;
test_mat[REF_OUTPUT][0] += Scalar::all(delta);
test_mat[OUTPUT][0] += Scalar::all(delta);
}
}
const int data_type = CV_MAKE_TYPE(get<0>(GetParam()), get<1>(GetParam()));
const int method = get<2>(GetParam());
RNG & rng = TS::ptr()->get_rng();
TEST(Imgproc_MatchTemplate, accuracy) { CV_TemplMatchTest test; test.safe_run(); }
for (int ITER = 0; ITER < 20; ++ITER)
{
SCOPED_TRACE(cv::format("iteration %d", ITER));
}
const Size imgSize(rng.uniform(128, 320), rng.uniform(128, 240));
const Size templSize(rng.uniform(1, 30), rng.uniform(1, 30));
Mat img(imgSize, data_type, Scalar::all(0));
Mat templ(templSize, data_type, Scalar::all(0));
cvtest::randUni(rng, img, Scalar::all(0), Scalar::all(255));
cvtest::randUni(rng, templ, Scalar::all(0), Scalar::all(255));
TEST(Imgproc_MatchTemplate, bug_9597) {
const uint8_t img[] = {
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 246, 246, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 246, 246, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 246, 246, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 246, 246, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 246,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 246, 246, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 246, 246, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 246, 246, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 246, 246, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 247, 247, 247, 247, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 247, 247, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245};
const uint8_t tmpl[] = {
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245 };
cv::Mat cvimg(cv::Size(61, 82), CV_8UC1, (void*)img, cv::Mat::AUTO_STEP);
cv::Mat cvtmpl(cv::Size(17, 17), CV_8UC1, (void*)tmpl, cv::Mat::AUTO_STEP);
cv::Mat result;
cv::matchTemplate(cvimg, cvtmpl, result, cv::TM_SQDIFF);
double minValue;
cv::minMaxLoc(result, &minValue, NULL, NULL, NULL);
ASSERT_GE(minValue, 0);
Mat result;
cv::matchTemplate(img, templ, result, method);
Mat reference;
matchTemplate_reference(img, templ, reference, method);
EXPECT_MAT_NEAR_RELATIVE(result, reference, 1e-3);
}
} // namespace
}
INSTANTIATE_TEST_CASE_P(/**/,
matchTemplate_Modes,
testing::Combine(
testing::Values(CV_8U, CV_32F),
testing::Values(1, 3),
testing::Values(TM_SQDIFF, TM_SQDIFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_CCOEFF, TM_CCOEFF_NORMED)));
}} // namespace

@ -90,7 +90,12 @@
apply plugin: 'com.android.library'
apply plugin: 'maven-publish'
try {
@KOTLIN_PLUGIN_DECLARATION@
println "Configure OpenCV with Kotlin"
} catch (Exception e) {
println "Configure OpenCV without Kotlin"
}
def openCVersionName = "@OPENCV_VERSION@"
def openCVersionCode = ((@OPENCV_VERSION_MAJOR@ * 100 + @OPENCV_VERSION_MINOR@) * 100 + @OPENCV_VERSION_PATCH@) * 10 + 0

@ -209,7 +209,7 @@ size.
@param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
@param temporalWindowSize Number of surrounding images to use for target image denoising. Should
be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
imgToDenoiseIndex + temporalWindowSize / 2 from srcImgs will be used to denoise
srcImgs[imgToDenoiseIndex] image.
@param dst Output image with the same size and type as srcImgs images.
@param templateWindowSize Size in pixels of the template patch that is used to compute weights.
@ -236,7 +236,7 @@ have the same type and size.
@param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
@param temporalWindowSize Number of surrounding images to use for target image denoising. Should
be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
imgToDenoiseIndex + temporalWindowSize / 2 from srcImgs will be used to denoise
srcImgs[imgToDenoiseIndex] image.
@param dst Output image with the same size and type as srcImgs images.
@param templateWindowSize Size in pixels of the template patch that is used to compute weights.
@ -263,7 +263,7 @@ size.
@param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
@param temporalWindowSize Number of surrounding images to use for target image denoising. Should
be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
imgToDenoiseIndex + temporalWindowSize / 2 from srcImgs will be used to denoise
srcImgs[imgToDenoiseIndex] image.
@param dst Output image with the same size and type as srcImgs images.
@param templateWindowSize Size in pixels of the template patch that is used to compute weights.

@ -46,19 +46,22 @@ private:
static const uint32_t arg_outputarg_flag = 0x1;
static const uint32_t arg_arithm_op_src_flag = 0x2;
static const uint32_t arg_pathlike_flag = 0x4;
static const uint32_t arg_nd_mat_flag = 0x8;
public:
const char* name;
bool outputarg;
bool arithm_op_src;
bool pathlike;
bool nd_mat;
// more fields may be added if necessary
ArgInfo(const char* name_, uint32_t arg_) :
name(name_),
outputarg((arg_ & arg_outputarg_flag) != 0),
arithm_op_src((arg_ & arg_arithm_op_src_flag) != 0),
pathlike((arg_ & arg_pathlike_flag) != 0) {}
pathlike((arg_ & arg_pathlike_flag) != 0),
nd_mat((arg_ & arg_nd_mat_flag) != 0) {}
private:
ArgInfo(const ArgInfo&) = delete;

@ -173,7 +173,7 @@ bool pyopencv_to(PyObject* o, Mat& m, const ArgInfo& info)
CV_LOG_DEBUG(NULL, "Incoming ndarray '" << info.name << "': ndims=" << ndims << " _sizes=" << pycv_dumpArray(_sizes, ndims) << " _strides=" << pycv_dumpArray(_strides, ndims));
bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX;
bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX && !info.nd_mat;
if (pyopencv_Mat_TypePtr && PyObject_TypeCheck(o, pyopencv_Mat_TypePtr))
{
bool wrapChannels = false;

@ -340,6 +340,21 @@ static bool pyopencv_to_generic_vec(PyObject* obj, std::vector<Tp>& value, const
{
return true;
}
if (info.nd_mat && PyArray_Check(obj))
{
/*
If obj is marked as nd mat and of array type, it is parsed to a single
mat in the target vector to avoid being split into multiple mats
*/
value.resize(1);
if (!pyopencv_to(obj, value.front(), info))
{
failmsg("Can't parse '%s'. Array item has a wrong type", info.name);
return false;
}
}
else // parse as sequence
{
if (!PySequence_Check(obj))
{
failmsg("Can't parse '%s'. Input argument doesn't provide sequence protocol", info.name);
@ -356,6 +371,7 @@ static bool pyopencv_to_generic_vec(PyObject* obj, std::vector<Tp>& value, const
return false;
}
}
}
return true;
}

@ -488,6 +488,10 @@ class ArgInfo(object):
return self.name + '_'
return self.name
@property
def nd_mat(self):
return '/ND' in self._modifiers
@property
def inputarg(self):
return '/O' not in self._modifiers
@ -528,6 +532,7 @@ class ArgInfo(object):
arg = 0x01 if self.outputarg else 0x0
arg += 0x02 if self.arithm_op_src_arg else 0x0
arg += 0x04 if self.pathlike else 0x0
arg += 0x08 if self.nd_mat else 0x0
return "ArgInfo(\"%s\", %d)" % (self.name, arg)

@ -84,6 +84,10 @@ class CppHeaderParser(object):
modlist = []
# pass 0: extracts the modifiers
if "CV_ND" in arg_str:
modlist.append("/ND")
arg_str = arg_str.replace("CV_ND", "")
if "CV_OUT" in arg_str:
modlist.append("/O")
arg_str = arg_str.replace("CV_OUT", "")

@ -972,6 +972,10 @@ class SamplesFindFile(NewOpenCVTests):
except cv.error as _e:
pass
class AlgorithmImplHit(NewOpenCVTests):
def test_callable(self):
res = cv.getDefaultAlgorithmHint()
self.assertTrue(res is not None)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

@ -1126,6 +1126,7 @@ void SystemInfoCollector::OnTestProgramStart(const testing::UnitTest&)
recordPropertyVerbose("cv_vcs_version", "OpenCV VCS version", getSnippetFromConfig("Version control:", "\n"));
recordPropertyVerbose("cv_build_type", "Build type", getSnippetFromConfig("Configuration:", "\n"), CV_TEST_BUILD_CONFIG);
recordPropertyVerbose("cv_compiler", "Compiler", getSnippetFromConfig("C++ Compiler:", "\n"));
recordPropertyVerbose("implementation_hint", "Algorithm hint", getSnippetFromConfig("Algorithm Hint:", "\n"));
const char* parallelFramework = cv::currentParallelFramework();
if (parallelFramework)
{

@ -1,10 +1,5 @@
# --- obsensor ---
if(NOT HAVE_OBSENSOR)
if(APPLE)
# force to use orbbec sdk on mac
set(OBSENSOR_USE_ORBBEC_SDK ON)
endif()
if(OBSENSOR_USE_ORBBEC_SDK)
include(${CMAKE_SOURCE_DIR}/3rdparty/orbbecsdk/orbbecsdk.cmake)
download_orbbec_sdk(ORBBEC_SDK_ROOT_DIR)

@ -1485,10 +1485,6 @@ bool CvCapture_FFMPEG::grabFrame()
if( !ic || !video_st || (!rawMode && !context) ) return false;
if( ic->streams[video_stream]->nb_frames > 0 &&
frame_number > ic->streams[video_stream]->nb_frames )
return false;
picture_pts = AV_NOPTS_VALUE_;
#if USE_AV_INTERRUPT_CALLBACK

@ -27,7 +27,8 @@ void yoloPostProcessing(
std::vector<Rect2d>& keep_boxes,
float conf_threshold,
float iou_threshold,
const std::string& test_name
const std::string& model_name,
const int nc
);
std::vector<std::string> classes;
@ -40,6 +41,7 @@ std::string keys =
"{ yolo | yolox | yolo model version. }"
"{ input i | | Path to input image or video file. Skip this argument to capture frames from a camera. }"
"{ classes | | Optional path to a text file with names of classes to label detected objects. }"
"{ nc | 80 | Number of classes. Default is 80 (coming from COCO dataset). }"
"{ thr | .5 | Confidence threshold. }"
"{ nms | .4 | Non-maximum suppression threshold. }"
"{ mean | 0.0 | Normalization constant. }"
@ -107,19 +109,21 @@ void yoloPostProcessing(
std::vector<Rect2d>& keep_boxes,
float conf_threshold,
float iou_threshold,
const std::string& test_name)
const std::string& model_name,
const int nc=80)
{
// Retrieve
std::vector<int> classIds;
std::vector<float> confidences;
std::vector<Rect2d> boxes;
if (test_name == "yolov8")
if (model_name == "yolov8" || model_name == "yolov10" ||
model_name == "yolov9")
{
cv::transposeND(outs[0], {0, 2, 1}, outs[0]);
}
if (test_name == "yolonas")
if (model_name == "yolonas")
{
// outs contains 2 elemets of shape [1, 8400, 80] and [1, 8400, 4]. Concat them to get [1, 8400, 84]
Mat concat_out;
@ -131,25 +135,30 @@ void yoloPostProcessing(
// remove the second element
outs.pop_back();
// unsqueeze the first dimension
outs[0] = outs[0].reshape(0, std::vector<int>{1, 8400, 84});
outs[0] = outs[0].reshape(0, std::vector<int>{1, 8400, nc + 4});
}
// assert if last dim is 85 or 84
CV_CheckEQ(outs[0].dims, 3, "Invalid output shape. The shape should be [1, #anchors, 85 or 84]");
CV_CheckEQ((outs[0].size[2] == nc + 5 || outs[0].size[2] == 80 + 4), true, "Invalid output shape: ");
for (auto preds : outs)
{
preds = preds.reshape(1, preds.size[1]); // [1, 8400, 85] -> [8400, 85]
for (int i = 0; i < preds.rows; ++i)
{
// filter out non object
float obj_conf = (test_name == "yolov8" || test_name == "yolonas") ? 1.0f : preds.at<float>(i, 4) ;
float obj_conf = (model_name == "yolov8" || model_name == "yolonas" ||
model_name == "yolov9" || model_name == "yolov10") ? 1.0f : preds.at<float>(i, 4) ;
if (obj_conf < conf_threshold)
continue;
Mat scores = preds.row(i).colRange((test_name == "yolov8" || test_name == "yolonas") ? 4 : 5, preds.cols);
Mat scores = preds.row(i).colRange((model_name == "yolov8" || model_name == "yolonas" || model_name == "yolov9" || model_name == "yolov10") ? 4 : 5, preds.cols);
double conf;
Point maxLoc;
minMaxLoc(scores, 0, &conf, 0, &maxLoc);
conf = (test_name == "yolov8" || test_name == "yolonas") ? conf : conf * obj_conf;
conf = (model_name == "yolov8" || model_name == "yolonas" || model_name == "yolov9" || model_name == "yolov10") ? conf : conf * obj_conf;
if (conf < conf_threshold)
continue;
@ -161,7 +170,7 @@ void yoloPostProcessing(
double h = det[3];
// [x1, y1, x2, y2]
if (test_name == "yolonas"){
if (model_name == "yolonas" || model_name == "yolov10"){
boxes.push_back(Rect2d(cx, cy, w, h));
} else {
boxes.push_back(Rect2d(cx - 0.5 * w, cy - 0.5 * h,
@ -203,6 +212,7 @@ int main(int argc, char** argv)
// if model is default, use findFile to get the full path otherwise use the given path
std::string weightPath = findFile(parser.get<String>("model"));
std::string yolo_model = parser.get<String>("yolo");
int nc = parser.get<int>("nc");
float confThreshold = parser.get<float>("thr");
float nmsThreshold = parser.get<float>("nms");
@ -219,6 +229,7 @@ int main(int argc, char** argv)
// check if yolo model is valid
if (yolo_model != "yolov5" && yolo_model != "yolov6"
&& yolo_model != "yolov7" && yolo_model != "yolov8"
&& yolo_model != "yolov10" && yolo_model !="yolov9"
&& yolo_model != "yolox" && yolo_model != "yolonas")
CV_Error(Error::StsError, "Invalid yolo model: " + yolo_model);
@ -331,7 +342,8 @@ int main(int argc, char** argv)
yoloPostProcessing(
outs, keep_classIds, keep_confidences, keep_boxes,
confThreshold, nmsThreshold,
yolo_model);
yolo_model,
nc);
//![postprocess]
// covert Rect2d to Rect

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save