Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/22642/head
Alexander Alekhin 2 years ago
commit 762481411d
  1. 4
      3rdparty/carotene/CMakeLists.txt
  2. 36
      3rdparty/carotene/src/add_weighted.cpp
  3. 4
      cmake/OpenCVCompilerOptimizations.cmake
  4. 2
      doc/py_tutorials/py_calib3d/py_calibration/py_calibration.markdown
  5. 10
      doc/tutorials/features2d/homography/homography.markdown
  6. BIN
      doc/tutorials/features2d/homography/images/homography_camera_poses_interpolation.jpg
  7. 97
      doc/tutorials/imgproc/generalized_hough_ballard_guil/generalized_hough_ballard_guil.markdown
  8. BIN
      doc/tutorials/imgproc/generalized_hough_ballard_guil/images/generalized_hough_image.jpg
  9. BIN
      doc/tutorials/imgproc/generalized_hough_ballard_guil/images/generalized_hough_less_perfect_result_img.jpg
  10. BIN
      doc/tutorials/imgproc/generalized_hough_ballard_guil/images/generalized_hough_mini_image.jpg
  11. BIN
      doc/tutorials/imgproc/generalized_hough_ballard_guil/images/generalized_hough_mini_template.jpg
  12. BIN
      doc/tutorials/imgproc/generalized_hough_ballard_guil/images/generalized_hough_result_img.jpg
  13. BIN
      doc/tutorials/imgproc/generalized_hough_ballard_guil/images/generalized_hough_template.jpg
  14. 2
      doc/tutorials/imgproc/imgtrans/hough_circle/hough_circle.markdown
  15. 2
      doc/tutorials/imgproc/imgtrans/remap/remap.markdown
  16. 1
      doc/tutorials/imgproc/table_of_content_imgproc.markdown
  17. 48
      modules/calib3d/include/opencv2/calib3d.hpp
  18. 2
      modules/core/include/opencv2/core/cvdef.h
  19. 10
      modules/core/include/opencv2/core/hal/intrin.hpp
  20. 6
      modules/core/src/hal_internal.cpp
  21. 39
      modules/dnn/include/opencv2/dnn/shape_utils.hpp
  22. 4
      modules/dnn/src/layers/normalize_bbox_layer.cpp
  23. 249
      modules/imgcodecs/src/grfmt_tiff.cpp
  24. 2
      modules/imgcodecs/src/grfmt_webp.cpp
  25. 428
      modules/imgcodecs/test/test_tiff.cpp
  26. 24
      modules/imgproc/include/opencv2/imgproc.hpp
  27. 22
      modules/imgproc/src/hough.cpp
  28. 139
      modules/imgproc/src/pyramids.cpp
  29. 10
      modules/imgproc/src/sumpixels.avx512_skx.hpp
  30. 11
      modules/imgproc/test/test_houghlines.cpp
  31. 35
      modules/imgproc/test/test_pyramid.cpp
  32. 15
      modules/objdetect/perf/perf_qrcode_pipeline.cpp
  33. 6
      modules/objdetect/test/test_qrcode.cpp
  34. 8
      modules/videoio/include/opencv2/videoio.hpp
  35. 4
      platforms/android/android.toolchain.cmake
  36. 108
      samples/cpp/tutorial_code/ImgTrans/generalizedHoughTransform.cpp
  37. 2
      samples/cpp/tutorial_code/video/meanshift/camshift.cpp
  38. 2
      samples/cpp/tutorial_code/video/meanshift/meanshift.cpp
  39. 2
      samples/java/tutorial_code/video/meanshift/CamshiftDemo.java
  40. 2
      samples/java/tutorial_code/video/meanshift/MeanshiftDemo.java
  41. 2
      samples/winrt/ImageManipulations/MainPage.xaml.cpp

@ -27,6 +27,10 @@ if(CMAKE_COMPILER_IS_GNUCC)
endif()
endif()
if(APPLE AND CV_CLANG AND WITH_NEON)
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-unused-function)
endif()
add_library(carotene_objs OBJECT EXCLUDE_FROM_ALL
${carotene_headers}
${carotene_sources}

@ -109,9 +109,9 @@ template <> struct wAdd<s32>
vgamma = vdupq_n_f32(_gamma + 0.5);
}
void operator() (const typename VecTraits<s32>::vec128 & v_src0,
const typename VecTraits<s32>::vec128 & v_src1,
typename VecTraits<s32>::vec128 & v_dst) const
void operator() (const VecTraits<s32>::vec128 & v_src0,
const VecTraits<s32>::vec128 & v_src1,
VecTraits<s32>::vec128 & v_dst) const
{
float32x4_t vs1 = vcvtq_f32_s32(v_src0);
float32x4_t vs2 = vcvtq_f32_s32(v_src1);
@ -121,9 +121,9 @@ template <> struct wAdd<s32>
v_dst = vcvtq_s32_f32(vs1);
}
void operator() (const typename VecTraits<s32>::vec64 & v_src0,
const typename VecTraits<s32>::vec64 & v_src1,
typename VecTraits<s32>::vec64 & v_dst) const
void operator() (const VecTraits<s32>::vec64 & v_src0,
const VecTraits<s32>::vec64 & v_src1,
VecTraits<s32>::vec64 & v_dst) const
{
float32x2_t vs1 = vcvt_f32_s32(v_src0);
float32x2_t vs2 = vcvt_f32_s32(v_src1);
@ -153,9 +153,9 @@ template <> struct wAdd<u32>
vgamma = vdupq_n_f32(_gamma + 0.5);
}
void operator() (const typename VecTraits<u32>::vec128 & v_src0,
const typename VecTraits<u32>::vec128 & v_src1,
typename VecTraits<u32>::vec128 & v_dst) const
void operator() (const VecTraits<u32>::vec128 & v_src0,
const VecTraits<u32>::vec128 & v_src1,
VecTraits<u32>::vec128 & v_dst) const
{
float32x4_t vs1 = vcvtq_f32_u32(v_src0);
float32x4_t vs2 = vcvtq_f32_u32(v_src1);
@ -165,9 +165,9 @@ template <> struct wAdd<u32>
v_dst = vcvtq_u32_f32(vs1);
}
void operator() (const typename VecTraits<u32>::vec64 & v_src0,
const typename VecTraits<u32>::vec64 & v_src1,
typename VecTraits<u32>::vec64 & v_dst) const
void operator() (const VecTraits<u32>::vec64 & v_src0,
const VecTraits<u32>::vec64 & v_src1,
VecTraits<u32>::vec64 & v_dst) const
{
float32x2_t vs1 = vcvt_f32_u32(v_src0);
float32x2_t vs2 = vcvt_f32_u32(v_src1);
@ -197,17 +197,17 @@ template <> struct wAdd<f32>
vgamma = vdupq_n_f32(_gamma + 0.5);
}
void operator() (const typename VecTraits<f32>::vec128 & v_src0,
const typename VecTraits<f32>::vec128 & v_src1,
typename VecTraits<f32>::vec128 & v_dst) const
void operator() (const VecTraits<f32>::vec128 & v_src0,
const VecTraits<f32>::vec128 & v_src1,
VecTraits<f32>::vec128 & v_dst) const
{
float32x4_t vs1 = vmlaq_f32(vgamma, v_src0, valpha);
v_dst = vmlaq_f32(vs1, v_src1, vbeta);
}
void operator() (const typename VecTraits<f32>::vec64 & v_src0,
const typename VecTraits<f32>::vec64 & v_src1,
typename VecTraits<f32>::vec64 & v_dst) const
void operator() (const VecTraits<f32>::vec64 & v_src0,
const VecTraits<f32>::vec64 & v_src1,
VecTraits<f32>::vec64 & v_dst) const
{
float32x2_t vs1 = vmla_f32(vget_low(vgamma), v_src0, vget_low(valpha));
v_dst = vmla_f32(vs1, v_src1, vget_low(vbeta));

@ -698,7 +698,7 @@ macro(ocv_compiler_optimization_process_sources SOURCES_VAR_NAME LIBS_VAR_NAME T
if(fname_LOWER MATCHES "\\.${OPT_LOWER}\\.cpp$")
#message("${fname} BASELINE-${OPT}")
set(__opt_found 1)
list(APPEND __result "${fname}")
list(APPEND __result_${OPT} "${fname}")
break()
endif()
endforeach()
@ -732,7 +732,7 @@ macro(ocv_compiler_optimization_process_sources SOURCES_VAR_NAME LIBS_VAR_NAME T
endif()
endforeach()
foreach(OPT ${CPU_DISPATCH_FINAL})
foreach(OPT ${CPU_BASELINE_FINAL} ${CPU_DISPATCH_FINAL})
if(__result_${OPT})
#message("${OPT}: ${__result_${OPT}}")
if(CMAKE_GENERATOR MATCHES "^Visual"

@ -127,7 +127,7 @@ for fname in images:
objpoints.append(objp)
corners2 = cv.cornerSubPix(gray,corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners)
imgpoints.append(corners2)
# Draw and display the corners
cv.drawChessboardCorners(img, (7,6), corners2, ret)

@ -418,10 +418,18 @@ homography from camera displacement:
The homography matrices are similar. If we compare the image 1 warped using both homography matrices:
![Left: image warped using the homography estimated. Right: using the homography computed from the camera displacement](images/homography_camera_displacement_compare.jpg)
![Left: image warped using the estimated homography. Right: using the homography computed from the camera displacement.](images/homography_camera_displacement_compare.jpg)
Visually, it is hard to distinguish a difference between the result image from the homography computed from the camera displacement and the one estimated with @ref cv::findHomography function.
#### Exercise
This demo shows you how to compute the homography transformation from two camera poses. Try to perform the same operations, but by computing N inter homography this time. Instead of computing one homography to directly warp the source image to the desired camera viewpoint, perform N warping operations to see the different transformations operating.
You should get something similar to the following:
![The first three images show the source image warped at three different interpolated camera viewpoints. The 4th image shows the "error image" between the warped source image at the final camera viewpoint and the desired image.](images/homography_camera_poses_interpolation.jpg)
### Demo 4: Decompose the homography matrix {#tutorial_homography_Demo4}
OpenCV 3 contains the function @ref cv::decomposeHomographyMat which allows to decompose the homography matrix to a set of rotations, translations and plane normals.

Binary file not shown.

After

Width:  |  Height:  |  Size: 78 KiB

@ -0,0 +1,97 @@
Object detection with Generalized Ballard and Guil Hough Transform {#tutorial_generalized_hough_ballard_guil}
==================================================================
@tableofcontents
@prev_tutorial{tutorial_hough_circle}
@next_tutorial{tutorial_remap}
| | |
| -: | :- |
| Original author | Markus Heck |
| Compatibility | OpenCV >= 3.4 |
Goal
----
In this tutorial you will learn how to:
- Use @ref cv::GeneralizedHoughBallard and @ref cv::GeneralizedHoughGuil to detect an object
Example
-------
### What does this program do?
1. Load the image and template
![image](images/generalized_hough_mini_image.jpg)
![template](images/generalized_hough_mini_template.jpg)
2. Instantiate @ref cv::GeneralizedHoughBallard with the help of `createGeneralizedHoughBallard()`
3. Instantiate @ref cv::GeneralizedHoughGuil with the help of `createGeneralizedHoughGuil()`
4. Set the required parameters for both GeneralizedHough variants
5. Detect and show found results
@note
- Both variants can't be instantiated directly. Using the create methods is required.
- Guil Hough is very slow. Calculating the results for the "mini" files used in this tutorial
takes only a few seconds. With image and template in a higher resolution, as shown below,
my notebook requires about 5 minutes to calculate a result.
![image](images/generalized_hough_image.jpg)
![template](images/generalized_hough_template.jpg)
### Code
The complete code for this tutorial is shown below.
@include samples/cpp/tutorial_code/ImgTrans/generalizedHoughTransform.cpp
Explanation
-----------
### Load image, template and setup variables
@snippet samples/cpp/tutorial_code/ImgTrans/generalizedHoughTransform.cpp generalized-hough-transform-load-and-setup
The position vectors will contain the matches the detectors will find.
Every entry contains four floating point values:
position vector
- *[0]*: x coordinate of center point
- *[1]*: y coordinate of center point
- *[2]*: scale of detected object compared to template
- *[3]*: rotation of detected object in degree in relation to template
An example could look as follows: `[200, 100, 0.9, 120]`
### Setup parameters
@snippet samples/cpp/tutorial_code/ImgTrans/generalizedHoughTransform.cpp generalized-hough-transform-setup-parameters
Finding the optimal values can end up in trial and error and depends on many factors, such as the image resolution.
### Run detection
@snippet samples/cpp/tutorial_code/ImgTrans/generalizedHoughTransform.cpp generalized-hough-transform-run
As mentioned above, this step will take some time, especially with larger images and when using Guil.
### Draw results and show image
@snippet samples/cpp/tutorial_code/ImgTrans/generalizedHoughTransform.cpp generalized-hough-transform-draw-results
Result
------
![result image](images/generalized_hough_result_img.jpg)
The blue rectangle shows the result of @ref cv::GeneralizedHoughBallard and the green rectangles the results of @ref
cv::GeneralizedHoughGuil.
Getting perfect results like in this example is unlikely if the parameters are not perfectly adapted to the sample.
An example with less perfect parameters is shown below.
For the Ballard variant, only the center of the result is marked as a black dot on this image. The rectangle would be
the same as on the previous image.
![less perfect result](images/generalized_hough_less_perfect_result_img.jpg)

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

@ -4,7 +4,7 @@ Hough Circle Transform {#tutorial_hough_circle}
@tableofcontents
@prev_tutorial{tutorial_hough_lines}
@next_tutorial{tutorial_remap}
@next_tutorial{tutorial_generalized_hough_ballard_guil}
| | |
| -: | :- |

@ -3,7 +3,7 @@ Remapping {#tutorial_remap}
@tableofcontents
@prev_tutorial{tutorial_hough_circle}
@prev_tutorial{tutorial_generalized_hough_ballard_guil}
@next_tutorial{tutorial_warp_affine}
| | |

@ -23,6 +23,7 @@ Transformations
- @subpage tutorial_canny_detector
- @subpage tutorial_hough_lines
- @subpage tutorial_hough_circle
- @subpage tutorial_generalized_hough_ballard_guil
- @subpage tutorial_remap
- @subpage tutorial_warp_affine

@ -685,7 +685,7 @@ a vector\<Point2f\> .
- @ref RHO - PROSAC-based robust method
@param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
(used in the RANSAC and RHO methods only). That is, if
\f[\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\f]
\f[\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} \cdot \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\f]
then the point \f$i\f$ is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
it usually makes sense to set this parameter somewhere in the range of 1 to 10.
@param mask Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input
@ -788,7 +788,7 @@ be used in OpenGL. Note, there is always more than one sequence of rotations abo
principal axes that results in the same orientation of an object, e.g. see @cite Slabaugh . Returned
tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
The function is based on RQDecomp3x3 .
The function is based on #RQDecomp3x3 .
*/
CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix,
OutputArray rotMatrix, OutputArray transVect,
@ -834,10 +834,10 @@ The functions compute:
\f[\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\f]
where \f$\mathrm{rodrigues}\f$ denotes a rotation vector to a rotation matrix transformation, and
\f$\mathrm{rodrigues}^{-1}\f$ denotes the inverse transformation. See Rodrigues for details.
\f$\mathrm{rodrigues}^{-1}\f$ denotes the inverse transformation. See #Rodrigues for details.
Also, the functions can compute the derivatives of the output vectors with regards to the input
vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
function that contains a matrix multiplication.
*/
@ -1206,7 +1206,7 @@ coordinate space. In the old interface all the per-view vectors are concatenated
old interface all the per-view vectors are concatenated.
@param imageSize Image size in pixels used to initialize the principal point.
@param aspectRatio If it is zero or negative, both \f$f_x\f$ and \f$f_y\f$ are estimated independently.
Otherwise, \f$f_x = f_y * \texttt{aspectRatio}\f$ .
Otherwise, \f$f_x = f_y \cdot \texttt{aspectRatio}\f$ .
The function estimates and returns an initial camera intrinsic matrix for the camera calibration process.
Currently, the function only supports planar calibration patterns, which are patterns where each
@ -1225,7 +1225,7 @@ CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints,
@param flags Various operation flags that can be zero or a combination of the following values:
- @ref CALIB_CB_ADAPTIVE_THRESH Use adaptive thresholding to convert the image to black
and white, rather than a fixed threshold level (computed from the average image brightness).
- @ref CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before
- @ref CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with #equalizeHist before
applying fixed or adaptive thresholding.
- @ref CALIB_CB_FILTER_QUADS Use additional criteria (like contour area, perimeter,
square-like shape) to filter out false quads extracted at the contour retrieval stage.
@ -1239,7 +1239,7 @@ are found and they are placed in a certain order (row by row, left to right in e
Otherwise, if the function fails to find all the corners or reorder them, it returns 0. For example,
a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black
squares touch each other. The detected coordinates are approximate, and to determine their positions
more accurately, the function calls cornerSubPix. You also may use the function cornerSubPix with
more accurately, the function calls #cornerSubPix. You also may use the function #cornerSubPix with
different parameters if returned coordinates are not accurate enough.
Sample usage of detecting and drawing chessboard corners: :
@ -1942,11 +1942,18 @@ coordinates. The function distinguishes the following two cases:
\end{bmatrix}\f]
\f[\texttt{P2} = \begin{bmatrix}
f & 0 & cx_2 & T_x*f \\
f & 0 & cx_2 & T_x \cdot f \\
0 & f & cy & 0 \\
0 & 0 & 1 & 0
\end{bmatrix} ,\f]
\f[\texttt{Q} = \begin{bmatrix}
1 & 0 & 0 & -cx_1 \\
0 & 1 & 0 & -cy \\
0 & 0 & 0 & f \\
0 & 0 & -\frac{1}{T_x} & \frac{cx_1 - cx_2}{T_x}
\end{bmatrix} \f]
where \f$T_x\f$ is a horizontal shift between the cameras and \f$cx_1=cx_2\f$ if
@ref CALIB_ZERO_DISPARITY is set.
@ -1962,10 +1969,17 @@ coordinates. The function distinguishes the following two cases:
\f[\texttt{P2} = \begin{bmatrix}
f & 0 & cx & 0 \\
0 & f & cy_2 & T_y*f \\
0 & f & cy_2 & T_y \cdot f \\
0 & 0 & 1 & 0
\end{bmatrix},\f]
\f[\texttt{Q} = \begin{bmatrix}
1 & 0 & 0 & -cx \\
0 & 1 & 0 & -cy_1 \\
0 & 0 & 0 & f \\
0 & 0 & -\frac{1}{T_y} & \frac{cy_1 - cy_2}{T_y}
\end{bmatrix} \f]
where \f$T_y\f$ is a vertical shift between the cameras and \f$cy_1=cy_2\f$ if
@ref CALIB_ZERO_DISPARITY is set.
@ -2001,8 +2015,8 @@ CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs
@param H2 Output rectification homography matrix for the second image.
@param threshold Optional threshold used to filter out the outliers. If the parameter is greater
than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points
for which \f$|\texttt{points2[i]}^T*\texttt{F}*\texttt{points1[i]}|>\texttt{threshold}\f$ ) are
rejected prior to computing the homographies. Otherwise, all the points are considered inliers.
for which \f$|\texttt{points2[i]}^T \cdot \texttt{F} \cdot \texttt{points1[i]}|>\texttt{threshold}\f$ )
are rejected prior to computing the homographies. Otherwise, all the points are considered inliers.
The function computes the rectification transformations without knowing intrinsic parameters of the
cameras and their relative position in the space, which explains the suffix "uncalibrated". Another
@ -2425,7 +2439,7 @@ the found fundamental matrix. Normally just one matrix is found. But in case of
algorithm, the function may return up to 3 solutions ( \f$9 \times 3\f$ matrix that stores all 3
matrices sequentially).
The calculated fundamental matrix may be passed further to computeCorrespondEpilines that finds the
The calculated fundamental matrix may be passed further to #computeCorrespondEpilines that finds the
epipolar lines corresponding to the specified points. It can also be passed to
#stereoRectifyUncalibrated to compute the rectification transformation. :
@code
@ -2495,7 +2509,7 @@ This function estimates essential matrix based on the five-point algorithm solve
where \f$E\f$ is an essential matrix, \f$p_1\f$ and \f$p_2\f$ are corresponding points in the first and the
second images, respectively. The result of this function may be passed further to
#decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
#decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
*/
CV_EXPORTS_W
Mat findEssentialMat(
@ -2888,12 +2902,12 @@ CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2,
@param newPoints1 The optimized points1.
@param newPoints2 The optimized points2.
The function implements the Optimal Triangulation Method (see Multiple View Geometry for details).
The function implements the Optimal Triangulation Method (see Multiple View Geometry @cite HartleyZ00 for details).
For each given point correspondence points1[i] \<-\> points2[i], and a fundamental matrix F, it
computes the corrected correspondences newPoints1[i] \<-\> newPoints2[i] that minimize the geometric
error \f$d(points1[i], newPoints1[i])^2 + d(points2[i],newPoints2[i])^2\f$ (where \f$d(a,b)\f$ is the
geometric distance between points \f$a\f$ and \f$b\f$ ) subject to the epipolar constraint
\f$newPoints2^T * F * newPoints1 = 0\f$ .
\f$newPoints2^T \cdot F \cdot newPoints1 = 0\f$ .
*/
CV_EXPORTS_W void correctMatches( InputArray F, InputArray points1, InputArray points2,
OutputArray newPoints1, OutputArray newPoints2 );
@ -3566,7 +3580,7 @@ where cameraMatrix can be chosen arbitrarily.
of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
@param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2 ,
computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation
is assumed. In cvInitUndistortMap R assumed to be an identity matrix.
is assumed. In #initUndistortRectifyMap R assumed to be an identity matrix.
@param newCameraMatrix New camera matrix \f$A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}\f$.
@param size Undistorted image size.
@param m1type Type of the first output map that can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps
@ -3959,7 +3973,7 @@ optimization. It is the \f$max(width,height)/\pi\f$ or the provided \f$f_x\f$, \
camera.
@param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
camera.
@param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see reprojectImageTo3D ).
@param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see #reprojectImageTo3D ).
@param flags Operation flags that may be zero or @ref fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
the function makes the principal points of each camera have the same pixel coordinates in the
rectified views. And if the flag is not set, the function may still shift the images in the

@ -810,7 +810,7 @@ __CV_ENUM_FLAGS_BITWISE_XOR_EQ (EnumType, EnumType)
# define CV_CONSTEXPR
#endif
// Integer types portatibility
// Integer types portability
#ifdef OPENCV_STDINT_HEADER
#include OPENCV_STDINT_HEADER
#elif defined(__cplusplus)

@ -50,6 +50,12 @@
#include <stdlib.h>
#include "opencv2/core/cvdef.h"
#if defined(__GNUC__) && __GNUC__ == 12
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
#define OPENCV_HAL_ADD(a, b) ((a) + (b))
#define OPENCV_HAL_AND(a, b) ((a) & (b))
#define OPENCV_HAL_NOP(a) (a)
@ -925,4 +931,8 @@ CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
//! @endcond
#if defined(__GNUC__) && __GNUC__ == 12
#pragma GCC diagnostic pop
#endif
#endif

@ -244,6 +244,11 @@ lapack_SVD(fptype* a, size_t a_step, fptype *w, fptype* u, size_t u_step, fptype
lwork = (int)round(work1); //optimal buffer size
fptype* buffer = new fptype[lwork + 1];
// Make sure MSAN sees the memory as having been written.
// MSAN does not think it has been written because a different language is called.
// Note: we do this here because if dgesdd is C++, MSAN errors can be reported within it.
CV_ANNOTATE_MEMORY_IS_INITIALIZED(buffer, sizeof(fptype) * (lwork + 1));
if(typeid(fptype) == typeid(float))
OCV_LAPACK_FUNC(sgesdd)(mode, &m, &n, (float*)a, &lda, (float*)w, (float*)u, &ldu, (float*)vt, &ldv, (float*)buffer, &lwork, iworkBuf, info);
else if(typeid(fptype) == typeid(double))
@ -252,7 +257,6 @@ lapack_SVD(fptype* a, size_t a_step, fptype *w, fptype* u, size_t u_step, fptype
// Make sure MSAN sees the memory as having been written.
// MSAN does not think it has been written because a different language was called.
CV_ANNOTATE_MEMORY_IS_INITIALIZED(a, a_step * n);
CV_ANNOTATE_MEMORY_IS_INITIALIZED(buffer, sizeof(fptype) * (lwork + 1));
if (u)
CV_ANNOTATE_MEMORY_IS_INITIALIZED(u, u_step * m);
if (vt)

@ -160,22 +160,49 @@ static inline MatShape shape(int a0, int a1=-1, int a2=-1, int a3=-1)
static inline int total(const MatShape& shape, int start = -1, int end = -1)
{
if (start == -1) start = 0;
if (end == -1) end = (int)shape.size();
if (shape.empty())
return 0;
int dims = (int)shape.size();
if (start == -1) start = 0;
if (end == -1) end = dims;
CV_CheckLE(0, start, "");
CV_CheckLE(start, end, "");
CV_CheckLE(end, dims, "");
int elems = 1;
CV_Assert(start <= (int)shape.size() && end <= (int)shape.size() &&
start <= end);
for(int i = start; i < end; i++)
for (int i = start; i < end; i++)
{
elems *= shape[i];
}
return elems;
}
// TODO: rename to countDimsElements()
static inline int total(const Mat& mat, int start = -1, int end = -1)
{
if (mat.empty())
return 0;
int dims = mat.dims;
if (start == -1) start = 0;
if (end == -1) end = dims;
CV_CheckLE(0, start, "");
CV_CheckLE(start, end, "");
CV_CheckLE(end, dims, "");
int elems = 1;
for (int i = start; i < end; i++)
{
elems *= mat.size[i];
}
return elems;
}
static inline MatShape concat(const MatShape& a, const MatShape& b)
{
MatShape c = a;

@ -215,8 +215,8 @@ public:
const float* inpData = inp0.ptr<float>();
float* outData = outputs[0].ptr<float>();
size_t num = total(shape(inp0.size), 0, startAxis);
size_t numPlanes = total(shape(inp0.size), startAxis, endAxis + 1);
size_t num = total(inp0, 0, startAxis);
size_t numPlanes = total(inp0, startAxis, endAxis + 1);
CV_Assert(num * numPlanes != 0);
size_t planeSize = inp0.total() / (num * numPlanes);
for (size_t n = 0; n < num; ++n)

@ -238,7 +238,6 @@ public:
bool TiffDecoder::readHeader()
{
bool result = false;
TIFF* tif = static_cast<TIFF*>(m_tif.get());
if (!tif)
{
@ -437,18 +436,15 @@ static void fixOrientationFull(Mat &img, int orientation)
* For 8 bit some corrections are done by TIFFReadRGBAStrip/Tile already.
* Not so for 16/32/64 bit.
*/
static void fixOrientation(Mat &img, uint16 orientation, int dst_bpp)
static void fixOrientation(Mat &img, uint16 orientation, bool isOrientationFull)
{
switch(dst_bpp) {
case 8:
fixOrientationPartial(img, orientation);
break;
case 16:
case 32:
case 64:
fixOrientationFull(img, orientation);
break;
if( isOrientationFull )
{
fixOrientationFull(img, orientation);
}
else
{
fixOrientationPartial(img, orientation);
}
}
@ -632,17 +628,7 @@ bool TiffDecoder::readData( Mat& img )
(img_orientation == ORIENTATION_BOTRIGHT || img_orientation == ORIENTATION_RIGHTBOT ||
img_orientation == ORIENTATION_BOTLEFT || img_orientation == ORIENTATION_LEFTBOT);
int wanted_channels = normalizeChannelsNumber(img.channels());
if (dst_bpp == 8)
{
char errmsg[1024];
if (!TIFFRGBAImageOK(tif, errmsg))
{
CV_LOG_WARNING(NULL, "OpenCV TIFF: TIFFRGBAImageOK: " << errmsg);
close();
return false;
}
}
bool doReadScanline = false;
uint32 tile_width0 = m_width, tile_height0 = 0;
@ -672,21 +658,123 @@ bool TiffDecoder::readData( Mat& img )
const uint64_t MAX_TILE_SIZE = (CV_BIG_UINT(1) << 30);
CV_CheckLE((int)ncn, 4, "");
CV_CheckLE((int)bpp, 64, "");
CV_Assert(((uint64_t)tile_width0 * tile_height0 * ncn * std::max(1, (int)(bpp / bitsPerByte)) < MAX_TILE_SIZE) && "TIFF tile size is too large: >= 1Gb");
if (dst_bpp == 8)
{
// we will use TIFFReadRGBA* functions, so allocate temporary buffer for 32bit RGBA
bpp = 8;
ncn = 4;
const int _ncn = 4; // Read RGBA
const int _bpp = 8; // Read 8bit
// if buffer_size(as 32bit RGBA) >= MAX_TILE_SIZE*95%,
// we will use TIFFReadScanline function.
if (
(uint64_t)tile_width0 * tile_height0 * _ncn * std::max(1, (int)(_bpp / bitsPerByte))
>=
( (uint64_t) MAX_TILE_SIZE * 95 / 100)
)
{
uint16_t planerConfig = (uint16)-1;
CV_TIFF_CHECK_CALL(TIFFGetField(tif, TIFFTAG_PLANARCONFIG, &planerConfig));
doReadScanline = (!is_tiled) // no tile
&&
( ( ncn == 1 ) || ( ncn == 3 ) || ( ncn == 4 ) )
&&
( ( bpp == 8 ) || ( bpp == 16 ) )
&&
(tile_height0 == (uint32_t) m_height) // single strip
&&
(
(photometric == PHOTOMETRIC_MINISWHITE)
||
(photometric == PHOTOMETRIC_MINISBLACK)
||
(photometric == PHOTOMETRIC_RGB)
)
&&
(planerConfig != PLANARCONFIG_SEPARATE);
// Currently only EXTRASAMPLE_ASSOCALPHA is supported.
if ( doReadScanline && ( ncn == 4 ) )
{
uint16_t extra_samples_num;
uint16_t *extra_samples = NULL;
CV_TIFF_CHECK_CALL(TIFFGetField(tif, TIFFTAG_EXTRASAMPLES, &extra_samples_num, &extra_samples ));
doReadScanline = ( extra_samples_num == 1 ) && ( extra_samples[0] == EXTRASAMPLE_ASSOCALPHA );
}
}
if ( !doReadScanline )
{
// we will use TIFFReadRGBA* functions, so allocate temporary buffer for 32bit RGBA
bpp = 8;
ncn = 4;
char errmsg[1024];
if (!TIFFRGBAImageOK(tif, errmsg))
{
CV_LOG_WARNING(NULL, "OpenCV TIFF: TIFFRGBAImageOK: " << errmsg);
close();
return false;
}
}
}
else if (dst_bpp == 16)
{
// if buffer_size >= MAX_TILE_SIZE*95%,
// we will use TIFFReadScanline function.
if (
(uint64_t)tile_width0 * tile_height0 * ncn * std::max(1, (int)(bpp / bitsPerByte))
>=
MAX_TILE_SIZE * 95 / 100
)
{
uint16_t planerConfig = (uint16)-1;
CV_TIFF_CHECK_CALL(TIFFGetField(tif, TIFFTAG_PLANARCONFIG, &planerConfig));
doReadScanline = (!is_tiled) // no tile
&&
( ( ncn == 1 ) || ( ncn == 3 ) || ( ncn == 4 ) )
&&
( ( bpp == 8 ) || ( bpp == 16 ) )
&&
(tile_height0 == (uint32_t) m_height) // single strip
&&
(
(photometric == PHOTOMETRIC_MINISWHITE)
||
(photometric == PHOTOMETRIC_MINISBLACK)
||
(photometric == PHOTOMETRIC_RGB)
)
&&
(planerConfig != PLANARCONFIG_SEPARATE);
// Currently only EXTRASAMPLE_ASSOCALPHA is supported.
if ( doReadScanline && ( ncn == 4 ) )
{
uint16_t extra_samples_num;
uint16_t *extra_samples = NULL;
CV_TIFF_CHECK_CALL(TIFFGetField(tif, TIFFTAG_EXTRASAMPLES, &extra_samples_num, &extra_samples ));
doReadScanline = ( extra_samples_num == 1 ) && ( extra_samples[0] == EXTRASAMPLE_ASSOCALPHA );
}
}
}
else if (dst_bpp == 32 || dst_bpp == 64)
{
CV_Assert(ncn == img.channels());
CV_TIFF_CHECK_CALL(TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_IEEEFP));
}
if ( doReadScanline )
{
// Read each scanlines.
tile_height0 = 1;
}
const size_t src_buffer_bytes_per_row = divUp(static_cast<size_t>(ncn * tile_width0 * bpp), static_cast<size_t>(bitsPerByte));
const size_t src_buffer_size = tile_height0 * src_buffer_bytes_per_row;
CV_CheckLT(src_buffer_size, MAX_TILE_SIZE, "buffer_size is too large: >= 1Gb");
const size_t src_buffer_unpacked_bytes_per_row = divUp(static_cast<size_t>(ncn * tile_width0 * dst_bpp), static_cast<size_t>(bitsPerByte));
const size_t src_buffer_unpacked_size = tile_height0 * src_buffer_unpacked_bytes_per_row;
const bool needsUnpacking = (bpp < dst_bpp);
@ -694,8 +782,20 @@ bool TiffDecoder::readData( Mat& img )
uchar* src_buffer = _src_buffer.data();
AutoBuffer<uchar> _src_buffer_unpacked(needsUnpacking ? src_buffer_unpacked_size : 0);
uchar* src_buffer_unpacked = needsUnpacking ? _src_buffer_unpacked.data() : nullptr;
if ( doReadScanline )
{
CV_CheckGE(src_buffer_size,
static_cast<size_t>(TIFFScanlineSize(tif)),
"src_buffer_size is smaller than TIFFScanlineSize().");
}
int tileidx = 0;
#define MAKE_FLAG(a,b) ( (a << 8) | b )
const int convert_flag = MAKE_FLAG( ncn, wanted_channels );
const bool isNeedConvert16to8 = ( doReadScanline ) && ( bpp == 16 ) && ( dst_bpp == 8);
for (int y = 0; y < m_height; y += (int)tile_height0)
{
int tile_height = std::min((int)tile_height0, m_height - y);
@ -711,7 +811,29 @@ bool TiffDecoder::readData( Mat& img )
case 8:
{
uchar* bstart = src_buffer;
if (!is_tiled)
if (doReadScanline)
{
CV_TIFF_CHECK_CALL((int)TIFFReadScanline(tif, (uint32*)src_buffer, y) >= 0);
if ( isNeedConvert16to8 )
{
// Convert buffer image from 16bit to 8bit.
int ix;
for ( ix = 0 ; ix < tile_width * ncn - 4; ix += 4 )
{
src_buffer[ ix ] = src_buffer[ ix * 2 + 1 ];
src_buffer[ ix + 1 ] = src_buffer[ ix * 2 + 3 ];
src_buffer[ ix + 2 ] = src_buffer[ ix * 2 + 5 ];
src_buffer[ ix + 3 ] = src_buffer[ ix * 2 + 7 ];
}
for ( ; ix < tile_width * ncn ; ix ++ )
{
src_buffer[ ix ] = src_buffer[ ix * 2 + 1];
}
}
}
else if (!is_tiled)
{
CV_TIFF_CHECK_CALL(TIFFReadRGBAStrip(tif, y, (uint32*)src_buffer));
}
@ -722,9 +844,65 @@ bool TiffDecoder::readData( Mat& img )
bstart += (tile_height0 - tile_height) * tile_width0 * 4;
}
uchar* img_line_buffer = (uchar*) img.ptr(y, 0);
for (int i = 0; i < tile_height; i++)
{
if (color)
if (doReadScanline)
{
switch ( convert_flag )
{
case MAKE_FLAG( 1, 1 ): // GRAY to GRAY
memcpy( (void*) img_line_buffer,
(void*) bstart,
tile_width * sizeof(uchar) );
break;
case MAKE_FLAG( 1, 3 ): // GRAY to BGR
icvCvt_Gray2BGR_8u_C1C3R( bstart, 0,
img_line_buffer, 0,
Size(tile_width, 1) );
break;
case MAKE_FLAG( 3, 1): // RGB to GRAY
icvCvt_BGR2Gray_8u_C3C1R( bstart, 0,
img_line_buffer, 0,
Size(tile_width, 1) );
break;
case MAKE_FLAG( 3, 3 ): // RGB to BGR
icvCvt_BGR2RGB_8u_C3R( bstart, 0,
img_line_buffer, 0,
Size(tile_width, 1) );
break;
case MAKE_FLAG( 4, 1 ): // RGBA to GRAY
icvCvt_BGRA2Gray_8u_C4C1R( bstart, 0,
img_line_buffer, 0,
Size(tile_width, 1) );
break;
case MAKE_FLAG( 4, 3 ): // RGBA to BGR
icvCvt_BGRA2BGR_8u_C4C3R( bstart, 0,
img_line_buffer, 0,
Size(tile_width, 1), 2 );
break;
case MAKE_FLAG( 4, 4 ): // RGBA to BGRA
icvCvt_BGRA2RGBA_8u_C4R(bstart, 0,
img_line_buffer, 0,
Size(tile_width, 1) );
break;
default:
CV_LOG_ONCE_ERROR(NULL, "OpenCV TIFF(line " << __LINE__ << "): Unsupported convertion :"
<< " bpp = " << bpp << " ncn = " << (int)ncn
<< " wanted_channels =" << wanted_channels );
break;
}
#undef MAKE_FLAG
}
else if (color)
{
if (wanted_channels == 4)
{
@ -753,7 +931,11 @@ bool TiffDecoder::readData( Mat& img )
case 16:
{
if (!is_tiled)
if (doReadScanline)
{
CV_TIFF_CHECK_CALL((int)TIFFReadScanline(tif, (uint32*)src_buffer, y) >= 0);
}
else if (!is_tiled)
{
CV_TIFF_CHECK_CALL((int)TIFFReadEncodedStrip(tif, tileidx, (uint32*)src_buffer, src_buffer_size) >= 0);
}
@ -874,7 +1056,11 @@ bool TiffDecoder::readData( Mat& img )
}
if (bpp < dst_bpp)
img *= (1<<(dst_bpp-bpp));
fixOrientation(img, img_orientation, dst_bpp);
// If TIFFReadRGBA* function is used -> fixOrientationPartial().
// Otherwise -> fixOrientationFull().
fixOrientation(img, img_orientation,
( ( dst_bpp != 8 ) && ( !doReadScanline ) ) );
}
if (m_hdr && depth >= CV_32F)
@ -899,6 +1085,7 @@ TiffEncoder::~TiffEncoder()
ImageEncoder TiffEncoder::newEncoder() const
{
cv_tiffSetErrorHandler();
return makePtr<TiffEncoder>();
}

@ -126,6 +126,8 @@ bool WebPDecoder::readHeader()
WebPBitstreamFeatures features;
if (VP8_STATUS_OK == WebPGetFeatures(header, sizeof(header), &features))
{
CV_CheckEQ(features.has_animation, false, "WebP backend does not support animated webp images");
m_width = features.width;
m_height = features.height;

@ -2,6 +2,8 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "test_precomp.hpp"
#include "opencv2/core/utils/logger.hpp"
#include "opencv2/core/utils/configuration.private.hpp"
namespace opencv_test { namespace {
@ -46,6 +48,432 @@ TEST(Imgcodecs_Tiff, decode_tile16384x16384)
EXPECT_EQ(0, remove(file4.c_str()));
}
//==================================================================================================
// See https://github.com/opencv/opencv/issues/22388
/**
* Dummy enum to show combination of IMREAD_*.
*/
enum ImreadMixModes
{
IMREAD_MIX_UNCHANGED = IMREAD_UNCHANGED ,
IMREAD_MIX_GRAYSCALE = IMREAD_GRAYSCALE ,
IMREAD_MIX_COLOR = IMREAD_COLOR ,
IMREAD_MIX_GRAYSCALE_ANYDEPTH = IMREAD_GRAYSCALE | IMREAD_ANYDEPTH ,
IMREAD_MIX_GRAYSCALE_ANYCOLOR = IMREAD_GRAYSCALE | IMREAD_ANYCOLOR,
IMREAD_MIX_GRAYSCALE_ANYDEPTH_ANYCOLOR = IMREAD_GRAYSCALE | IMREAD_ANYDEPTH | IMREAD_ANYCOLOR,
IMREAD_MIX_COLOR_ANYDEPTH = IMREAD_COLOR | IMREAD_ANYDEPTH ,
IMREAD_MIX_COLOR_ANYCOLOR = IMREAD_COLOR | IMREAD_ANYCOLOR,
IMREAD_MIX_COLOR_ANYDEPTH_ANYCOLOR = IMREAD_COLOR | IMREAD_ANYDEPTH | IMREAD_ANYCOLOR
};
typedef tuple< uint64_t, tuple<string, int>, ImreadMixModes > Bufsize_and_Type;
typedef testing::TestWithParam<Bufsize_and_Type> Imgcodecs_Tiff_decode_Huge;
static inline
void PrintTo(const ImreadMixModes& val, std::ostream* os)
{
PrintTo( static_cast<ImreadModes>(val), os );
}
TEST_P(Imgcodecs_Tiff_decode_Huge, regression)
{
// Get test parameters
const uint64_t buffer_size = get<0>(GetParam());
const string mat_type_string = get<0>(get<1>(GetParam()));
const int mat_type = get<1>(get<1>(GetParam()));
const int imread_mode = get<2>(GetParam());
// Detect data file
const string req_filename = cv::format("readwrite/huge-tiff/%s_%zu.tif", mat_type_string.c_str(), (size_t)buffer_size);
const string filename = findDataFile( req_filename );
// Preparation process for test
{
// Convert from mat_type and buffer_size to tiff file information.
const uint64_t width = 32768;
int ncn = CV_MAT_CN(mat_type);
int depth = ( CV_MAT_DEPTH(mat_type) == CV_16U) ? 2 : 1; // 16bit or 8 bit
const uint64_t height = (uint64_t) buffer_size / width / ncn / depth;
const uint64_t base_scanline_size = (uint64_t) width * ncn * depth;
const uint64_t base_strip_size = (uint64_t) base_scanline_size * height;
// To avoid exception about pixel size, check it.
static const size_t CV_IO_MAX_IMAGE_PIXELS = utils::getConfigurationParameterSizeT("OPENCV_IO_MAX_IMAGE_PIXELS", 1 << 30);
uint64_t pixels = (uint64_t) width * height;
if ( pixels > CV_IO_MAX_IMAGE_PIXELS )
{
throw SkipTestException( cv::format("Test is skipped( pixels(%zu) > CV_IO_MAX_IMAGE_PIXELS(%zu) )",
(size_t)pixels, CV_IO_MAX_IMAGE_PIXELS) );
}
// If buffer_size >= 1GB * 95%, TIFFReadScanline() is used.
const uint64_t BUFFER_SIZE_LIMIT_FOR_READS_CANLINE = (uint64_t) 1024*1024*1024*95/100;
const bool doReadScanline = ( base_strip_size >= BUFFER_SIZE_LIMIT_FOR_READS_CANLINE );
// Update ncn and depth for destination Mat.
switch ( imread_mode )
{
case IMREAD_UNCHANGED:
break;
case IMREAD_GRAYSCALE:
ncn = 1;
depth = 1;
break;
case IMREAD_GRAYSCALE | IMREAD_ANYDEPTH:
ncn = 1;
break;
case IMREAD_GRAYSCALE | IMREAD_ANYCOLOR:
ncn = (ncn == 1)?1:3;
depth = 1;
break;
case IMREAD_GRAYSCALE | IMREAD_ANYCOLOR | IMREAD_ANYDEPTH:
ncn = (ncn == 1)?1:3;
break;
case IMREAD_COLOR:
ncn = 3;
depth = 1;
break;
case IMREAD_COLOR | IMREAD_ANYDEPTH:
ncn = 3;
break;
case IMREAD_COLOR | IMREAD_ANYCOLOR:
ncn = 3;
depth = 1;
break;
case IMREAD_COLOR | IMREAD_ANYDEPTH | IMREAD_ANYCOLOR:
ncn = 3;
break;
default:
break;
}
// Memory usage for Destination Mat
const uint64_t memory_usage_cvmat = (uint64_t) width * ncn * depth * height;
// Memory usage for Work memory in libtiff.
uint64_t memory_usage_tiff = 0;
if ( ( depth == 1 ) && ( !doReadScanline ) )
{
// TIFFReadRGBA*() request to allocate RGBA(32bit) buffer.
memory_usage_tiff = (uint64_t)
width *
4 * // ncn = RGBA
1 * // dst_bpp = 8 bpp
height;
}
else
{
// TIFFReadEncodedStrip() or TIFFReadScanline() request to allocate strip memory.
memory_usage_tiff = base_strip_size;
}
// Memory usage for Work memory in imgcodec/grfmt_tiff.cpp
const uint64_t memory_usage_work =
( doReadScanline ) ? base_scanline_size // for TIFFReadScanline()
: base_strip_size; // for TIFFReadRGBA*() or TIFFReadEncodedStrip()
// Total memory usage.
const uint64_t memory_usage_total =
memory_usage_cvmat + // Destination Mat
memory_usage_tiff + // Work memory in libtiff
memory_usage_work; // Work memory in imgcodecs
// Output memory usage log.
CV_LOG_DEBUG(NULL, cv::format("OpenCV TIFF-test: memory usage info : mat(%zu), libtiff(%zu), work(%zu) -> total(%zu)",
(size_t)memory_usage_cvmat, (size_t)memory_usage_tiff, (size_t)memory_usage_work, (size_t)memory_usage_total) );
// Add test tags.
if ( memory_usage_total >= (uint64_t) 6144 * 1024 * 1024 )
{
applyTestTag( CV_TEST_TAG_MEMORY_14GB, CV_TEST_TAG_VERYLONG );
}
else if ( memory_usage_total >= (uint64_t) 2048 * 1024 * 1024 )
{
applyTestTag( CV_TEST_TAG_MEMORY_6GB, CV_TEST_TAG_VERYLONG );
}
else if ( memory_usage_total >= (uint64_t) 1024 * 1024 * 1024 )
{
applyTestTag( CV_TEST_TAG_MEMORY_2GB, CV_TEST_TAG_LONG );
}
else if ( memory_usage_total >= (uint64_t) 512 * 1024 * 1024 )
{
applyTestTag( CV_TEST_TAG_MEMORY_1GB );
}
else if ( memory_usage_total >= (uint64_t) 200 * 1024 * 1024 )
{
applyTestTag( CV_TEST_TAG_MEMORY_512MB );
}
else
{
// do nothing.
}
}
// TEST Main
cv::Mat img;
ASSERT_NO_THROW( img = cv::imread(filename, imread_mode) );
ASSERT_FALSE(img.empty());
/**
* Test marker pixels at each corners.
*
* 0xAn,0x00 ... 0x00, 0xBn
* 0x00,0x00 ... 0x00, 0x00
* : : : :
* 0x00,0x00 ... 0x00, 0x00
* 0xCn,0x00 .., 0x00, 0xDn
*
*/
#define MAKE_FLAG(from_type, to_type) (((uint64_t)from_type << 32 ) | to_type )
switch ( MAKE_FLAG(mat_type, img.type() ) )
{
// GRAY TO GRAY
case MAKE_FLAG(CV_8UC1, CV_8UC1):
case MAKE_FLAG(CV_16UC1, CV_8UC1):
EXPECT_EQ( 0xA0, img.at<uchar>(0, 0) );
EXPECT_EQ( 0xB0, img.at<uchar>(0, img.cols-1) );
EXPECT_EQ( 0xC0, img.at<uchar>(img.rows-1, 0) );
EXPECT_EQ( 0xD0, img.at<uchar>(img.rows-1, img.cols-1) );
break;
// RGB/RGBA TO BGR
case MAKE_FLAG(CV_8UC3, CV_8UC3):
case MAKE_FLAG(CV_8UC4, CV_8UC3):
case MAKE_FLAG(CV_16UC3, CV_8UC3):
case MAKE_FLAG(CV_16UC4, CV_8UC3):
EXPECT_EQ( 0xA2, img.at<Vec3b>(0, 0) [0] );
EXPECT_EQ( 0xA1, img.at<Vec3b>(0, 0) [1] );
EXPECT_EQ( 0xA0, img.at<Vec3b>(0, 0) [2] );
EXPECT_EQ( 0xB2, img.at<Vec3b>(0, img.cols-1)[0] );
EXPECT_EQ( 0xB1, img.at<Vec3b>(0, img.cols-1)[1] );
EXPECT_EQ( 0xB0, img.at<Vec3b>(0, img.cols-1)[2] );
EXPECT_EQ( 0xC2, img.at<Vec3b>(img.rows-1, 0) [0] );
EXPECT_EQ( 0xC1, img.at<Vec3b>(img.rows-1, 0) [1] );
EXPECT_EQ( 0xC0, img.at<Vec3b>(img.rows-1, 0) [2] );
EXPECT_EQ( 0xD2, img.at<Vec3b>(img.rows-1, img.cols-1)[0] );
EXPECT_EQ( 0xD1, img.at<Vec3b>(img.rows-1, img.cols-1)[1] );
EXPECT_EQ( 0xD0, img.at<Vec3b>(img.rows-1, img.cols-1)[2] );
break;
// RGBA TO BGRA
case MAKE_FLAG(CV_8UC4, CV_8UC4):
case MAKE_FLAG(CV_16UC4, CV_8UC4):
EXPECT_EQ( 0xA2, img.at<Vec4b>(0, 0) [0] );
EXPECT_EQ( 0xA1, img.at<Vec4b>(0, 0) [1] );
EXPECT_EQ( 0xA0, img.at<Vec4b>(0, 0) [2] );
EXPECT_EQ( 0xA3, img.at<Vec4b>(0, 0) [3] );
EXPECT_EQ( 0xB2, img.at<Vec4b>(0, img.cols-1)[0] );
EXPECT_EQ( 0xB1, img.at<Vec4b>(0, img.cols-1)[1] );
EXPECT_EQ( 0xB0, img.at<Vec4b>(0, img.cols-1)[2] );
EXPECT_EQ( 0xB3, img.at<Vec4b>(0, img.cols-1)[3] );
EXPECT_EQ( 0xC2, img.at<Vec4b>(img.rows-1, 0) [0] );
EXPECT_EQ( 0xC1, img.at<Vec4b>(img.rows-1, 0) [1] );
EXPECT_EQ( 0xC0, img.at<Vec4b>(img.rows-1, 0) [2] );
EXPECT_EQ( 0xC3, img.at<Vec4b>(img.rows-1, 0) [3] );
EXPECT_EQ( 0xD2, img.at<Vec4b>(img.rows-1, img.cols-1)[0] );
EXPECT_EQ( 0xD1, img.at<Vec4b>(img.rows-1, img.cols-1)[1] );
EXPECT_EQ( 0xD0, img.at<Vec4b>(img.rows-1, img.cols-1)[2] );
EXPECT_EQ( 0xD3, img.at<Vec4b>(img.rows-1, img.cols-1)[3] );
break;
// RGB/RGBA to GRAY
case MAKE_FLAG(CV_8UC3, CV_8UC1):
case MAKE_FLAG(CV_8UC4, CV_8UC1):
case MAKE_FLAG(CV_16UC3, CV_8UC1):
case MAKE_FLAG(CV_16UC4, CV_8UC1):
EXPECT_LE( 0xA0, img.at<uchar>(0, 0) );
EXPECT_GE( 0xA2, img.at<uchar>(0, 0) );
EXPECT_LE( 0xB0, img.at<uchar>(0, img.cols-1) );
EXPECT_GE( 0xB2, img.at<uchar>(0, img.cols-1) );
EXPECT_LE( 0xC0, img.at<uchar>(img.rows-1, 0) );
EXPECT_GE( 0xC2, img.at<uchar>(img.rows-1, 0) );
EXPECT_LE( 0xD0, img.at<uchar>(img.rows-1, img.cols-1) );
EXPECT_GE( 0xD2, img.at<uchar>(img.rows-1, img.cols-1) );
break;
// GRAY to BGR
case MAKE_FLAG(CV_8UC1, CV_8UC3):
case MAKE_FLAG(CV_16UC1, CV_8UC3):
EXPECT_EQ( 0xA0, img.at<Vec3b>(0, 0) [0] );
EXPECT_EQ( 0xB0, img.at<Vec3b>(0, img.cols-1)[0] );
EXPECT_EQ( 0xC0, img.at<Vec3b>(img.rows-1, 0) [0] );
EXPECT_EQ( 0xD0, img.at<Vec3b>(img.rows-1, img.cols-1)[0] );
// R==G==B
EXPECT_EQ( img.at<Vec3b>(0, 0) [0], img.at<Vec3b>(0, 0) [1] );
EXPECT_EQ( img.at<Vec3b>(0, 0) [0], img.at<Vec3b>(0, 0) [2] );
EXPECT_EQ( img.at<Vec3b>(0, img.cols-1) [0], img.at<Vec3b>(0, img.cols-1)[1] );
EXPECT_EQ( img.at<Vec3b>(0, img.cols-1) [0], img.at<Vec3b>(0, img.cols-1)[2] );
EXPECT_EQ( img.at<Vec3b>(img.rows-1, 0) [0], img.at<Vec3b>(img.rows-1, 0) [1] );
EXPECT_EQ( img.at<Vec3b>(img.rows-1, 0) [0], img.at<Vec3b>(img.rows-1, 0) [2] );
EXPECT_EQ( img.at<Vec3b>(img.rows-1, img.cols-1) [0], img.at<Vec3b>(img.rows-1, img.cols-1)[1] );
EXPECT_EQ( img.at<Vec3b>(img.rows-1, img.cols-1) [0], img.at<Vec3b>(img.rows-1, img.cols-1)[2] );
break;
// GRAY TO GRAY
case MAKE_FLAG(CV_16UC1, CV_16UC1):
EXPECT_EQ( 0xA090, img.at<ushort>(0, 0) );
EXPECT_EQ( 0xB080, img.at<ushort>(0, img.cols-1) );
EXPECT_EQ( 0xC070, img.at<ushort>(img.rows-1, 0) );
EXPECT_EQ( 0xD060, img.at<ushort>(img.rows-1, img.cols-1) );
break;
// RGB/RGBA TO BGR
case MAKE_FLAG(CV_16UC3, CV_16UC3):
case MAKE_FLAG(CV_16UC4, CV_16UC3):
EXPECT_EQ( 0xA292, img.at<Vec3w>(0, 0) [0] );
EXPECT_EQ( 0xA191, img.at<Vec3w>(0, 0) [1] );
EXPECT_EQ( 0xA090, img.at<Vec3w>(0, 0) [2] );
EXPECT_EQ( 0xB282, img.at<Vec3w>(0, img.cols-1)[0] );
EXPECT_EQ( 0xB181, img.at<Vec3w>(0, img.cols-1)[1] );
EXPECT_EQ( 0xB080, img.at<Vec3w>(0, img.cols-1)[2] );
EXPECT_EQ( 0xC272, img.at<Vec3w>(img.rows-1, 0) [0] );
EXPECT_EQ( 0xC171, img.at<Vec3w>(img.rows-1, 0) [1] );
EXPECT_EQ( 0xC070, img.at<Vec3w>(img.rows-1, 0) [2] );
EXPECT_EQ( 0xD262, img.at<Vec3w>(img.rows-1, img.cols-1)[0] );
EXPECT_EQ( 0xD161, img.at<Vec3w>(img.rows-1, img.cols-1)[1] );
EXPECT_EQ( 0xD060, img.at<Vec3w>(img.rows-1, img.cols-1)[2] );
break;
// RGBA TO RGBA
case MAKE_FLAG(CV_16UC4, CV_16UC4):
EXPECT_EQ( 0xA292, img.at<Vec4w>(0, 0) [0] );
EXPECT_EQ( 0xA191, img.at<Vec4w>(0, 0) [1] );
EXPECT_EQ( 0xA090, img.at<Vec4w>(0, 0) [2] );
EXPECT_EQ( 0xA393, img.at<Vec4w>(0, 0) [3] );
EXPECT_EQ( 0xB282, img.at<Vec4w>(0, img.cols-1)[0] );
EXPECT_EQ( 0xB181, img.at<Vec4w>(0, img.cols-1)[1] );
EXPECT_EQ( 0xB080, img.at<Vec4w>(0, img.cols-1)[2] );
EXPECT_EQ( 0xB383, img.at<Vec4w>(0, img.cols-1)[3] );
EXPECT_EQ( 0xC272, img.at<Vec4w>(img.rows-1, 0) [0] );
EXPECT_EQ( 0xC171, img.at<Vec4w>(img.rows-1, 0) [1] );
EXPECT_EQ( 0xC070, img.at<Vec4w>(img.rows-1, 0) [2] );
EXPECT_EQ( 0xC373, img.at<Vec4w>(img.rows-1, 0) [3] );
EXPECT_EQ( 0xD262, img.at<Vec4w>(img.rows-1,img.cols-1) [0] );
EXPECT_EQ( 0xD161, img.at<Vec4w>(img.rows-1,img.cols-1) [1] );
EXPECT_EQ( 0xD060, img.at<Vec4w>(img.rows-1,img.cols-1) [2] );
EXPECT_EQ( 0xD363, img.at<Vec4w>(img.rows-1,img.cols-1) [3] );
break;
// RGB/RGBA to GRAY
case MAKE_FLAG(CV_16UC3, CV_16UC1):
case MAKE_FLAG(CV_16UC4, CV_16UC1):
EXPECT_LE( 0xA090, img.at<ushort>(0, 0) );
EXPECT_GE( 0xA292, img.at<ushort>(0, 0) );
EXPECT_LE( 0xB080, img.at<ushort>(0, img.cols-1) );
EXPECT_GE( 0xB282, img.at<ushort>(0, img.cols-1) );
EXPECT_LE( 0xC070, img.at<ushort>(img.rows-1, 0) );
EXPECT_GE( 0xC272, img.at<ushort>(img.rows-1, 0) );
EXPECT_LE( 0xD060, img.at<ushort>(img.rows-1, img.cols-1) );
EXPECT_GE( 0xD262, img.at<ushort>(img.rows-1, img.cols-1) );
break;
// GRAY to RGB
case MAKE_FLAG(CV_16UC1, CV_16UC3):
EXPECT_EQ( 0xA090, img.at<Vec3w>(0, 0) [0] );
EXPECT_EQ( 0xB080, img.at<Vec3w>(0, img.cols-1)[0] );
EXPECT_EQ( 0xC070, img.at<Vec3w>(img.rows-1, 0) [0] );
EXPECT_EQ( 0xD060, img.at<Vec3w>(img.rows-1, img.cols-1)[0] );
// R==G==B
EXPECT_EQ( img.at<Vec3w>(0, 0) [0], img.at<Vec3w>(0, 0) [1] );
EXPECT_EQ( img.at<Vec3w>(0, 0) [0], img.at<Vec3w>(0, 0) [2] );
EXPECT_EQ( img.at<Vec3w>(0, img.cols-1) [0], img.at<Vec3w>(0, img.cols-1)[1] );
EXPECT_EQ( img.at<Vec3w>(0, img.cols-1) [0], img.at<Vec3w>(0, img.cols-1)[2] );
EXPECT_EQ( img.at<Vec3w>(img.rows-1, 0) [0], img.at<Vec3w>(img.rows-1, 0) [1] );
EXPECT_EQ( img.at<Vec3w>(img.rows-1, 0) [0], img.at<Vec3w>(img.rows-1, 0) [2] );
EXPECT_EQ( img.at<Vec3w>(img.rows-1, img.cols-1) [0], img.at<Vec3w>(img.rows-1, img.cols-1)[1] );
EXPECT_EQ( img.at<Vec3w>(img.rows-1, img.cols-1) [0], img.at<Vec3w>(img.rows-1, img.cols-1)[2] );
break;
// No supported.
// (1) 8bit to 16bit
case MAKE_FLAG(CV_8UC1, CV_16UC1):
case MAKE_FLAG(CV_8UC1, CV_16UC3):
case MAKE_FLAG(CV_8UC1, CV_16UC4):
case MAKE_FLAG(CV_8UC3, CV_16UC1):
case MAKE_FLAG(CV_8UC3, CV_16UC3):
case MAKE_FLAG(CV_8UC3, CV_16UC4):
case MAKE_FLAG(CV_8UC4, CV_16UC1):
case MAKE_FLAG(CV_8UC4, CV_16UC3):
case MAKE_FLAG(CV_8UC4, CV_16UC4):
// (2) GRAY/RGB TO RGBA
case MAKE_FLAG(CV_8UC1, CV_8UC4):
case MAKE_FLAG(CV_8UC3, CV_8UC4):
case MAKE_FLAG(CV_16UC1, CV_8UC4):
case MAKE_FLAG(CV_16UC3, CV_8UC4):
case MAKE_FLAG(CV_16UC1, CV_16UC4):
case MAKE_FLAG(CV_16UC3, CV_16UC4):
default:
FAIL() << cv::format("Unknown test pattern: from = %d ( %d, %d) to = %d ( %d, %d )",
mat_type, (int)CV_MAT_CN(mat_type ), ( CV_MAT_DEPTH(mat_type )==CV_16U)?16:8,
img.type(), (int)CV_MAT_CN(img.type() ), ( CV_MAT_DEPTH(img.type() )==CV_16U)?16:8);
break;
}
#undef MAKE_FLAG
}
// Basic Test
const Bufsize_and_Type Imgcodecs_Tiff_decode_Huge_list_basic[] =
{
make_tuple<uint64_t, tuple<string,int>,ImreadMixModes>( 1073479680ull, make_tuple<string,int>("CV_8UC1", CV_8UC1), IMREAD_MIX_COLOR ),
make_tuple<uint64_t, tuple<string,int>,ImreadMixModes>( 2147483648ull, make_tuple<string,int>("CV_16UC4", CV_16UC4), IMREAD_MIX_COLOR ),
};
INSTANTIATE_TEST_CASE_P(Imgcodecs_Tiff, Imgcodecs_Tiff_decode_Huge,
testing::ValuesIn( Imgcodecs_Tiff_decode_Huge_list_basic )
);
// Full Test
/**
* Test lists for combination of IMREAD_*.
*/
const ImreadMixModes all_modes_Huge_Full[] =
{
IMREAD_MIX_UNCHANGED,
IMREAD_MIX_GRAYSCALE,
IMREAD_MIX_GRAYSCALE_ANYDEPTH,
IMREAD_MIX_GRAYSCALE_ANYCOLOR,
IMREAD_MIX_GRAYSCALE_ANYDEPTH_ANYCOLOR,
IMREAD_MIX_COLOR,
IMREAD_MIX_COLOR_ANYDEPTH,
IMREAD_MIX_COLOR_ANYCOLOR,
IMREAD_MIX_COLOR_ANYDEPTH_ANYCOLOR,
};
const uint64_t huge_buffer_sizes_decode_Full[] =
{
1048576ull, // 1 * 1024 * 1024
1073479680ull, // 1024 * 1024 * 1024 - 32768 * 4 * 2
1073741824ull, // 1024 * 1024 * 1024
2147483648ull, // 2048 * 1024 * 1024
};
const tuple<string, int> mat_types_Full[] =
{
make_tuple<string, int>("CV_8UC1", CV_8UC1), // 8bit GRAY
make_tuple<string, int>("CV_8UC3", CV_8UC3), // 24bit RGB
make_tuple<string, int>("CV_8UC4", CV_8UC4), // 32bit RGBA
make_tuple<string, int>("CV_16UC1", CV_16UC1), // 16bit GRAY
make_tuple<string, int>("CV_16UC3", CV_16UC3), // 48bit RGB
make_tuple<string, int>("CV_16UC4", CV_16UC4), // 64bit RGBA
};
INSTANTIATE_TEST_CASE_P(DISABLED_Imgcodecs_Tiff_Full, Imgcodecs_Tiff_decode_Huge,
testing::Combine(
testing::ValuesIn(huge_buffer_sizes_decode_Full),
testing::ValuesIn(mat_types_Full),
testing::ValuesIn(all_modes_Huge_Full)
)
);
//==================================================================================================
TEST(Imgcodecs_Tiff, write_read_16bit_big_little_endian)
{
// see issue #2601 "16-bit Grayscale TIFF Load Failures Due to Buffer Underflow and Endianness"

@ -2115,23 +2115,24 @@ transform.
@param image 8-bit, single-channel binary source image. The image may be modified by the function.
@param lines Output vector of lines. Each line is represented by a 2 or 3 element vector
\f$(\rho, \theta)\f$ or \f$(\rho, \theta, \textrm{votes})\f$ . \f$\rho\f$ is the distance from the coordinate origin \f$(0,0)\f$ (top-left corner of
the image). \f$\theta\f$ is the line rotation angle in radians (
\f$0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\f$ ).
\f$(\rho, \theta)\f$ or \f$(\rho, \theta, \textrm{votes})\f$, where \f$\rho\f$ is the distance from
the coordinate origin \f$(0,0)\f$ (top-left corner of the image), \f$\theta\f$ is the line rotation
angle in radians ( \f$0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\f$ ), and
\f$\textrm{votes}\f$ is the value of accumulator.
@param rho Distance resolution of the accumulator in pixels.
@param theta Angle resolution of the accumulator in radians.
@param threshold Accumulator threshold parameter. Only those lines are returned that get enough
@param threshold %Accumulator threshold parameter. Only those lines are returned that get enough
votes ( \f$>\texttt{threshold}\f$ ).
@param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho .
@param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho.
The coarse accumulator distance resolution is rho and the accurate accumulator resolution is
rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these
rho/srn. If both srn=0 and stn=0, the classical Hough transform is used. Otherwise, both these
parameters should be positive.
@param stn For the multi-scale Hough transform, it is a divisor for the distance resolution theta.
@param min_theta For standard and multi-scale Hough transform, minimum angle to check for lines.
Must fall between 0 and max_theta.
@param max_theta For standard and multi-scale Hough transform, maximum angle to check for lines.
Must fall between min_theta and CV_PI.
@param max_theta For standard and multi-scale Hough transform, an upper bound for the angle.
Must fall between min_theta and CV_PI. The actual maximum angle in the accumulator may be slightly
less than max_theta, depending on the parameters min_theta and theta.
*/
CV_EXPORTS_W void HoughLines( InputArray image, OutputArray lines,
double rho, double theta, int threshold,
@ -2159,7 +2160,7 @@ And this is the output of the above program in case of the probabilistic Hough t
line segment.
@param rho Distance resolution of the accumulator in pixels.
@param theta Angle resolution of the accumulator in radians.
@param threshold Accumulator threshold parameter. Only those lines are returned that get enough
@param threshold %Accumulator threshold parameter. Only those lines are returned that get enough
votes ( \f$>\texttt{threshold}\f$ ).
@param minLineLength Minimum line length. Line segments shorter than that are rejected.
@param maxLineGap Maximum allowed gap between points on the same line to link them.
@ -2178,13 +2179,14 @@ The function finds lines in a set of points using a modification of the Hough tr
@param lines Output vector of found lines. Each vector is encoded as a vector<Vec3d> \f$(votes, rho, theta)\f$.
The larger the value of 'votes', the higher the reliability of the Hough line.
@param lines_max Max count of Hough lines.
@param threshold Accumulator threshold parameter. Only those lines are returned that get enough
@param threshold %Accumulator threshold parameter. Only those lines are returned that get enough
votes ( \f$>\texttt{threshold}\f$ ).
@param min_rho Minimum value for \f$\rho\f$ for the accumulator (Note: \f$\rho\f$ can be negative. The absolute value \f$|\rho|\f$ is the distance of a line to the origin.).
@param max_rho Maximum value for \f$\rho\f$ for the accumulator.
@param rho_step Distance resolution of the accumulator.
@param min_theta Minimum angle value of the accumulator in radians.
@param max_theta Maximum angle value of the accumulator in radians.
@param max_theta Upper bound for the angle value of the accumulator in radians. The actual maximum
angle may be slightly less than max_theta, depending on the parameters min_theta and theta_step.
@param theta_step Angle resolution of the accumulator in radians.
*/
CV_EXPORTS_W void HoughLinesPointSet( InputArray point, OutputArray lines, int lines_max, int threshold,

@ -68,6 +68,18 @@ struct hough_cmp_gt
const int* aux;
};
static inline int
computeNumangle( double min_theta, double max_theta, double theta_step )
{
int numangle = cvFloor((max_theta - min_theta) / theta_step) + 1;
// If the distance between the first angle and the last angle is
// approximately equal to pi, then the last angle will be removed
// in order to prevent a line to be detected twice.
if ( numangle > 1 && fabs(CV_PI - (numangle-1)*theta_step) < theta_step/2 )
--numangle;
return numangle;
}
static void
createTrigTable( int numangle, double min_theta, double theta_step,
float irho, float *tabSin, float *tabCos )
@ -130,7 +142,7 @@ HoughLinesStandard( InputArray src, OutputArray lines, int type,
CV_CheckGE(max_theta, min_theta, "max_theta must be greater than min_theta");
int numangle = cvRound((max_theta - min_theta) / theta);
int numangle = computeNumangle(min_theta, max_theta, theta);
int numrho = cvRound(((max_rho - min_rho) + 1) / rho);
#if defined HAVE_IPP && IPP_VERSION_X100 >= 810 && !IPP_DISABLE_HOUGH
@ -475,7 +487,7 @@ HoughLinesProbabilistic( Mat& image,
int width = image.cols;
int height = image.rows;
int numangle = cvRound(CV_PI / theta);
int numangle = computeNumangle(0.0, CV_PI, theta);
int numrho = cvRound(((width + height) * 2 + 1) / rho);
#if defined HAVE_IPP && IPP_VERSION_X100 >= 810 && !IPP_DISABLE_HOUGH
@ -792,7 +804,7 @@ static bool ocl_HoughLines(InputArray _src, OutputArray _lines, double rho, doub
}
UMat src = _src.getUMat();
int numangle = cvRound((max_theta - min_theta) / theta);
int numangle = computeNumangle(min_theta, max_theta, theta);
int numrho = cvRound(((src.cols + src.rows) * 2 + 1) / rho);
UMat pointsList;
@ -846,7 +858,7 @@ static bool ocl_HoughLinesP(InputArray _src, OutputArray _lines, double rho, dou
}
UMat src = _src.getUMat();
int numangle = cvRound(CV_PI / theta);
int numangle = computeNumangle(0.0, CV_PI, theta);
int numrho = cvRound(((src.cols + src.rows) * 2 + 1) / rho);
UMat pointsList;
@ -956,7 +968,7 @@ void HoughLinesPointSet( InputArray _point, OutputArray _lines, int lines_max, i
int i;
float irho = 1 / (float)rho_step;
float irho_min = ((float)min_rho * irho);
int numangle = cvRound((max_theta - min_theta) / theta_step);
int numangle = computeNumangle(min_theta, max_theta, theta_step);
int numrho = cvRound((max_rho - min_rho + 1) / rho_step);
Mat _accum = Mat::zeros( (numangle+2), (numrho+2), CV_32SC1 );

@ -82,6 +82,8 @@ template<typename T1, typename T2> int PyrDownVecV(T1**, T2*, int) { return 0; }
template<typename T1, typename T2> int PyrUpVecV(T1**, T2**, int) { return 0; }
template<typename T1, typename T2> int PyrUpVecVOneRow(T1**, T2*, int) { return 0; }
#if CV_SIMD
template<> int PyrDownVecH<uchar, int, 1>(const uchar* src, int* row, int width)
@ -717,6 +719,120 @@ template <> int PyrUpVecV<float, float>(float** src, float** dst, int width)
return x;
}
template <> int PyrUpVecVOneRow<int, uchar>(int** src, uchar* dst, int width)
{
int x = 0;
const int *row0 = src[0], *row1 = src[1], *row2 = src[2];
for( ; x <= width - v_uint8::nlanes; x += v_uint8::nlanes)
{
v_int16 v_r00 = v_pack(vx_load(row0 + x), vx_load(row0 + x + v_int32::nlanes)),
v_r01 = v_pack(vx_load(row0 + x + 2 * v_int32::nlanes), vx_load(row0 + x + 3 * v_int32::nlanes)),
v_r10 = v_pack(vx_load(row1 + x), vx_load(row1 + x + v_int32::nlanes)),
v_r11 = v_pack(vx_load(row1 + x + 2 * v_int32::nlanes), vx_load(row1 + x + 3 * v_int32::nlanes)),
v_r20 = v_pack(vx_load(row2 + x), vx_load(row2 + x + v_int32::nlanes)),
v_r21 = v_pack(vx_load(row2 + x + 2 * v_int32::nlanes), vx_load(row2 + x + 3 * v_int32::nlanes));
v_int16 v_2r10 = v_r10 + v_r10, v_2r11 = (v_r11 + v_r11);
v_store(dst + x, v_rshr_pack_u<6>(v_r00 + v_r20 + (v_2r10 + v_2r10 + v_2r10), v_r01 + v_r21 + (v_2r11 + v_2r11 + v_2r11)));
}
if(x <= width - v_uint16::nlanes)
{
v_int16 v_r00 = v_pack(vx_load(row0 + x), vx_load(row0 + x + v_int32::nlanes)),
v_r10 = v_pack(vx_load(row1 + x), vx_load(row1 + x + v_int32::nlanes)),
v_r20 = v_pack(vx_load(row2 + x), vx_load(row2 + x + v_int32::nlanes));
v_int16 v_2r10 = v_r10 + v_r10;
v_rshr_pack_u_store<6>(dst + x, v_r00 + v_r20 + (v_2r10 + v_2r10 + v_2r10));
x += v_uint16::nlanes;
}
typedef int CV_DECL_ALIGNED(1) unaligned_int;
for (; x <= width - v_int32x4::nlanes; x += v_int32x4::nlanes)
{
v_int32 v_r00 = vx_load(row0 + x),
v_r10 = vx_load(row1 + x),
v_r20 = vx_load(row2 + x);
v_int32 v_2r10 = v_r10 + v_r10;
v_int16 d = v_pack(v_r00 + v_r20 + (v_2r10 + v_2r10 + v_2r10), (v_r10 + v_r20) << 2);
*(unaligned_int*)(dst + x) = v_reinterpret_as_s32(v_rshr_pack_u<6>(d, vx_setzero_s16())).get0();
}
vx_cleanup();
return x;
}
template <> int PyrUpVecVOneRow<int, short>(int** src, short* dst, int width)
{
int x = 0;
const int *row0 = src[0], *row1 = src[1], *row2 = src[2];
for( ; x <= width - v_int16::nlanes; x += v_int16::nlanes)
{
v_int32 v_r00 = vx_load(row0 + x),
v_r01 = vx_load(row0 + x + v_int32::nlanes),
v_r10 = vx_load(row1 + x),
v_r11 = vx_load(row1 + x + v_int32::nlanes),
v_r20 = vx_load(row2 + x),
v_r21 = vx_load(row2 + x + v_int32::nlanes);
v_store(dst + x, v_rshr_pack<6>(v_r00 + v_r20 + ((v_r10 << 1) + (v_r10 << 2)), v_r01 + v_r21 + ((v_r11 << 1) + (v_r11 << 2))));
}
if(x <= width - v_int32::nlanes)
{
v_int32 v_r00 = vx_load(row0 + x),
v_r10 = vx_load(row1 + x),
v_r20 = vx_load(row2 + x);
v_rshr_pack_store<6>(dst + x, v_r00 + v_r20 + ((v_r10 << 1) + (v_r10 << 2)));
x += v_int32::nlanes;
}
vx_cleanup();
return x;
}
template <> int PyrUpVecVOneRow<int, ushort>(int** src, ushort* dst, int width)
{
int x = 0;
const int *row0 = src[0], *row1 = src[1], *row2 = src[2];
for( ; x <= width - v_uint16::nlanes; x += v_uint16::nlanes)
{
v_int32 v_r00 = vx_load(row0 + x),
v_r01 = vx_load(row0 + x + v_int32::nlanes),
v_r10 = vx_load(row1 + x),
v_r11 = vx_load(row1 + x + v_int32::nlanes),
v_r20 = vx_load(row2 + x),
v_r21 = vx_load(row2 + x + v_int32::nlanes);
v_store(dst + x, v_rshr_pack_u<6>(v_r00 + v_r20 + ((v_r10 << 1) + (v_r10 << 2)), v_r01 + v_r21 + ((v_r11 << 1) + (v_r11 << 2))));
}
if(x <= width - v_int32::nlanes)
{
v_int32 v_r00 = vx_load(row0 + x),
v_r10 = vx_load(row1 + x),
v_r20 = vx_load(row2 + x);
v_rshr_pack_u_store<6>(dst + x, v_r00 + v_r20 + ((v_r10 << 1) + (v_r10 << 2)));
x += v_int32::nlanes;
}
vx_cleanup();
return x;
}
template <> int PyrUpVecVOneRow<float, float>(float** src, float* dst, int width)
{
int x = 0;
const float *row0 = src[0], *row1 = src[1], *row2 = src[2];
v_float32 v_6 = vx_setall_f32(6.0f), v_scale = vx_setall_f32(1.f/64.f);
for( ; x <= width - v_float32::nlanes; x += v_float32::nlanes)
{
v_float32 v_r0 = vx_load(row0 + x),
v_r1 = vx_load(row1 + x),
v_r2 = vx_load(row2 + x);
v_store(dst + x, v_scale * (v_muladd(v_6, v_r1, v_r0) + v_r2));
}
vx_cleanup();
return x;
}
#endif
template<class CastOp>
@ -983,13 +1099,26 @@ pyrUp_( const Mat& _src, Mat& _dst, int)
row0 = rows[0]; row1 = rows[1]; row2 = rows[2];
dsts[0] = dst0; dsts[1] = dst1;
x = PyrUpVecV<WT, T>(rows, dsts, dsize.width);
for( ; x < dsize.width; x++ )
if (dst0 != dst1)
{
T t1 = castOp((row1[x] + row2[x])*4);
T t0 = castOp(row0[x] + row1[x]*6 + row2[x]);
dst1[x] = t1; dst0[x] = t0;
x = PyrUpVecV<WT, T>(rows, dsts, dsize.width);
for( ; x < dsize.width; x++ )
{
T t1 = castOp((row1[x] + row2[x])*4);
T t0 = castOp(row0[x] + row1[x]*6 + row2[x]);
dst1[x] = t1; dst0[x] = t0;
}
}
else
{
x = PyrUpVecVOneRow<WT, T>(rows, dst0, dsize.width);
for( ; x < dsize.width; x++ )
{
T t0 = castOp(row0[x] + row1[x]*6 + row2[x]);
dst0[x] = t0;
}
}
}
if (dsize.height > ssize.height*2)

@ -6,6 +6,12 @@
#include "opencv2/core/hal/intrin.hpp"
#if defined(__GNUC__) && __GNUC__ == 12
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
namespace cv { namespace hal {
CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
@ -465,3 +471,7 @@ void calculate_integral_avx512(const uchar *src, size_t _srcstep,
CV_CPU_OPTIMIZATION_NAMESPACE_END
}} // end namespace cv::hal
#if defined(__GNUC__) && __GNUC__ == 12
#pragma GCC diagnostic pop
#endif

@ -329,6 +329,17 @@ TEST(HoughLinesPointSet, regression_21029)
EXPECT_TRUE(lines.empty());
}
TEST(HoughLines, regression_21983)
{
Mat img(200, 200, CV_8UC1, Scalar(0));
line(img, Point(0, 100), Point(100, 100), Scalar(255));
std::vector<Vec2f> lines;
HoughLines(img, lines, 1, CV_PI/180, 90, 0, 0, 0.001, 1.58);
ASSERT_EQ(lines.size(), 1U);
EXPECT_EQ(lines[0][0], 100);
EXPECT_NEAR(lines[0][1], 1.57179642, 1e-4);
}
INSTANTIATE_TEST_CASE_P( ImgProc, StandartHoughLinesTest, testing::Combine(testing::Values( "shared/pic5.png", "../stitching/a1.png" ),
testing::Values( 1, 10 ),
testing::Values( 0.05, 0.1 ),

@ -8,12 +8,41 @@ namespace opencv_test { namespace {
TEST(Imgproc_PyrUp, pyrUp_regression_22184)
{
Mat src(100, 100, CV_16UC3, Scalar::all(255));
Mat dst(100 * 2 + 1, 100 * 2 + 1, CV_16UC3, Scalar::all(0));
Mat src(100,100,CV_16UC3,Scalar(255,255,255));
Mat dst(100 * 2 + 1, 100 * 2 + 1, CV_16UC3, Scalar(0,0,0));
pyrUp(src, dst, Size(dst.cols, dst.rows));
double min_val = 0;
minMaxLoc(dst, &min_val);
ASSERT_GT(cvRound(min_val), 0);
}
}} // namespace
TEST(Imgproc_PyrUp, pyrUp_regression_22194)
{
Mat src(13, 13,CV_16UC3,Scalar(0,0,0));
{
int swidth = src.cols;
int sheight = src.rows;
int cn = src.channels();
int count = 0;
for (int y = 0; y < sheight; y++)
{
ushort *src_c = src.ptr<ushort>(y);
for (int x = 0; x < swidth * cn; x++)
{
src_c[x] = (count++) % 10;
}
}
}
Mat dst(src.cols * 2 - 1, src.rows * 2 - 1, CV_16UC3, Scalar(0,0,0));
pyrUp(src, dst, Size(dst.cols, dst.rows));
{
ushort *dst_c = dst.ptr<ushort>(dst.rows - 1);
ASSERT_EQ(dst_c[0], 6);
ASSERT_EQ(dst_c[1], 6);
ASSERT_EQ(dst_c[2], 1);
}
}
}
}

@ -55,6 +55,10 @@ PERF_TEST_P_(Perf_Objdetect_QRCode, decode)
typedef ::perf::TestBaseWithParam< std::string > Perf_Objdetect_QRCode_Multi;
static inline bool compareCorners(const Point2f& corner1, const Point2f& corner2) {
return corner1.x == corner2.x ? corner1.y < corner2.y : corner1.x < corner2.x;
}
PERF_TEST_P_(Perf_Objdetect_QRCode_Multi, detectMulti)
{
const std::string name_current_image = GetParam();
@ -66,11 +70,14 @@ PERF_TEST_P_(Perf_Objdetect_QRCode_Multi, detectMulti)
std::vector<Point2f> corners;
QRCodeDetector qrcode;
TEST_CYCLE() ASSERT_TRUE(qrcode.detectMulti(src, corners));
sort(corners.begin(), corners.end(), [](const Point2f& corner1, const Point2f& corner2)
{return corner1.x == corner2.x ? corner1.y < corner2.y : corner1.x < corner2.x;});
sort(corners.begin(), corners.end(), compareCorners);
SANITY_CHECK(corners);
}
static inline bool compareQR(const pair<string, Mat>& v1, const pair<string, Mat>& v2) {
return v1.first < v2.first;
}
#ifdef HAVE_QUIRC
PERF_TEST_P_(Perf_Objdetect_QRCode_Multi, decodeMulti)
{
@ -97,8 +104,8 @@ PERF_TEST_P_(Perf_Objdetect_QRCode_Multi, decodeMulti)
for (size_t i = 0ull; i < decoded_info.size(); i++) {
result.push_back(make_pair(decoded_info[i], straight_barcode[i]));
}
sort(result.begin(), result.end(), [](const pair<string, Mat>& v1, const pair<string, Mat>& v2)
{return v1.first < v2.first; });
sort(result.begin(), result.end(), compareQR);
vector<vector<uint8_t> > decoded_info_sort;
vector<Mat> straight_barcode_sort;
for (size_t i = 0ull; i < result.size(); i++) {

@ -3,6 +3,7 @@
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
#include "opencv2/imgproc.hpp"
namespace opencv_test { namespace {
@ -16,8 +17,9 @@ std::string qrcode_images_name[] = {
// version_5_right.jpg DISABLED after tile fix, PR #22025
};
// Todo: fix corner align in big QRs to enable close_5.png
std::string qrcode_images_close[] = {
"close_1.png", "close_2.png", "close_3.png", "close_4.png", "close_5.png"
"close_1.png", "close_2.png", "close_3.png", "close_4.png"//, "close_5.png"
};
std::string qrcode_images_monitor[] = {
"monitor_1.png", "monitor_2.png", "monitor_3.png", "monitor_4.png", "monitor_5.png"
@ -313,7 +315,7 @@ TEST_P(Objdetect_QRCode_Close, regression)
const int width = cvRound(src.size().width * coeff_expansion);
const int height = cvRound(src.size().height * coeff_expansion);
Size new_size(width, height);
resize(src, barcode, new_size, 0, 0, INTER_LINEAR);
resize(src, barcode, new_size, 0, 0, INTER_LINEAR_EXACT);
std::vector<Point> corners;
std::string decoded_info;
QRCodeDetector qrcode;

@ -1004,9 +1004,11 @@ public:
/** @overload
@param filename Name of the output video file.
@param fourcc 4-character code of codec used to compress the frames. For example,
VideoWriter::fourcc('P','I','M','1') is a MPEG-1 codec, VideoWriter::fourcc('M','J','P','G') is a
motion-jpeg codec etc. List of codes can be obtained at [Video Codecs by
FOURCC](http://www.fourcc.org/codecs.php) page. FFMPEG backend with MP4 container natively uses
VideoWriter::fourcc('P','I','M','1') is a MPEG-1 codec, VideoWriter::fourcc('M','J','P','G')
is a motion-jpeg codec etc. List of codes can be obtained at
[MSDN](https://docs.microsoft.com/en-us/windows/win32/medfound/video-fourccs) page
or with this [archived page](https://web.archive.org/web/20220316062600/http://www.fourcc.org/codecs.php)
of the fourcc site for a more complete list). FFMPEG backend with MP4 container natively uses
other values as fourcc code: see [ObjectType](http://mp4ra.org/#/codecs),
so you may receive a warning message from OpenCV about fourcc code conversion.
@param fps Framerate of the created video stream.

@ -399,7 +399,7 @@ if( NOT ANDROID_NDK )
__INIT_VARIABLE( ANDROID_STANDALONE_TOOLCHAIN PATH ENV_ANDROID_STANDALONE_TOOLCHAIN )
if( NOT ANDROID_STANDALONE_TOOLCHAIN )
#try to find Android NDK in one of the the default locations
#try to find Android NDK in one of the default locations
set( __ndkSearchPaths )
foreach( __ndkSearchPath ${ANDROID_NDK_SEARCH_PATHS} )
foreach( suffix ${ANDROID_SUPPORTED_NDK_VERSIONS} )
@ -413,7 +413,7 @@ if( NOT ANDROID_NDK )
message( STATUS "Using default path for Android NDK: ${ANDROID_NDK}" )
message( STATUS " If you prefer to use a different location, please define a cmake or environment variable: ANDROID_NDK" )
else()
#try to find Android standalone toolchain in one of the the default locations
#try to find Android standalone toolchain in one of the default locations
__INIT_VARIABLE( ANDROID_STANDALONE_TOOLCHAIN PATH ANDROID_STANDALONE_TOOLCHAIN_SEARCH_PATH )
if( ANDROID_STANDALONE_TOOLCHAIN )

@ -0,0 +1,108 @@
/**
@file generalizedHoughTransform.cpp
@author Markus Heck
@brief Detects an object, given by a template, in an image using GeneralizedHoughBallard and GeneralizedHoughGuil.
*/
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
using namespace cv;
using namespace std;
int main() {
//! [generalized-hough-transform-load-and-setup]
// load source image and grayscale template
Mat image = imread("images/generalized_hough_mini_image.jpg");
Mat templ = imread("images/generalized_hough_mini_template.jpg", IMREAD_GRAYSCALE);
// create grayscale image
Mat grayImage;
cvtColor(image, grayImage, COLOR_RGB2GRAY);
// create variable for location, scale and rotation of detected templates
vector<Vec4f> positionBallard, positionGuil;
// template width and height
int w = templ.cols;
int h = templ.rows;
//! [generalized-hough-transform-load-and-setup]
//! [generalized-hough-transform-setup-parameters]
// create ballard and set options
Ptr<GeneralizedHoughBallard> ballard = createGeneralizedHoughBallard();
ballard->setMinDist(10);
ballard->setLevels(360);
ballard->setDp(2);
ballard->setMaxBufferSize(1000);
ballard->setVotesThreshold(40);
ballard->setCannyLowThresh(30);
ballard->setCannyHighThresh(110);
ballard->setTemplate(templ);
// create guil and set options
Ptr<GeneralizedHoughGuil> guil = createGeneralizedHoughGuil();
guil->setMinDist(10);
guil->setLevels(360);
guil->setDp(3);
guil->setMaxBufferSize(1000);
guil->setMinAngle(0);
guil->setMaxAngle(360);
guil->setAngleStep(1);
guil->setAngleThresh(1500);
guil->setMinScale(0.5);
guil->setMaxScale(2.0);
guil->setScaleStep(0.05);
guil->setScaleThresh(50);
guil->setPosThresh(10);
guil->setCannyLowThresh(30);
guil->setCannyHighThresh(110);
guil->setTemplate(templ);
//! [generalized-hough-transform-setup-parameters]
//! [generalized-hough-transform-run]
// execute ballard detection
ballard->detect(grayImage, positionBallard);
// execute guil detection
guil->detect(grayImage, positionGuil);
//! [generalized-hough-transform-run]
//! [generalized-hough-transform-draw-results]
// draw ballard
for (vector<Vec4f>::iterator iter = positionBallard.begin(); iter != positionBallard.end(); ++iter) {
RotatedRect rRect = RotatedRect(Point2f((*iter)[0], (*iter)[1]),
Size2f(w * (*iter)[2], h * (*iter)[2]),
(*iter)[3]);
Point2f vertices[4];
rRect.points(vertices);
for (int i = 0; i < 4; i++)
line(image, vertices[i], vertices[(i + 1) % 4], Scalar(255, 0, 0), 6);
}
// draw guil
for (vector<Vec4f>::iterator iter = positionGuil.begin(); iter != positionGuil.end(); ++iter) {
RotatedRect rRect = RotatedRect(Point2f((*iter)[0], (*iter)[1]),
Size2f(w * (*iter)[2], h * (*iter)[2]),
(*iter)[3]);
Point2f vertices[4];
rRect.points(vertices);
for (int i = 0; i < 4; i++)
line(image, vertices[i], vertices[(i + 1) % 4], Scalar(0, 255, 0), 2);
}
imshow("result_img", image);
waitKey();
//! [generalized-hough-transform-draw-results]
return EXIT_SUCCESS;
}

@ -58,7 +58,7 @@ int main(int argc, char **argv)
calcHist(&hsv_roi, 1, channels, mask, roi_hist, 1, histSize, range);
normalize(roi_hist, roi_hist, 0, 255, NORM_MINMAX);
// Setup the termination criteria, either 10 iteration or move by atleast 1 pt
// Setup the termination criteria, either 10 iteration or move by at least 1 pt
TermCriteria term_crit(TermCriteria::EPS | TermCriteria::COUNT, 10, 1);
while(true){

@ -58,7 +58,7 @@ int main(int argc, char **argv)
calcHist(&hsv_roi, 1, channels, mask, roi_hist, 1, histSize, range);
normalize(roi_hist, roi_hist, 0, 255, NORM_MINMAX);
// Setup the termination criteria, either 10 iteration or move by atleast 1 pt
// Setup the termination criteria, either 10 iteration or move by at least 1 pt
TermCriteria term_crit(TermCriteria::EPS | TermCriteria::COUNT, 10, 1);
while(true){

@ -35,7 +35,7 @@ class Camshift {
Imgproc.calcHist(Arrays.asList(hsv_roi), channels, mask, roi_hist, histSize, range);
Core.normalize(roi_hist, roi_hist, 0, 255, Core.NORM_MINMAX);
// Setup the termination criteria, either 10 iteration or move by atleast 1 pt
// Setup the termination criteria, either 10 iteration or move by at least 1 pt
TermCriteria term_crit = new TermCriteria(TermCriteria.EPS | TermCriteria.COUNT, 10, 1);
while (true) {

@ -34,7 +34,7 @@ class Meanshift {
Imgproc.calcHist(Arrays.asList(hsv_roi), channels, mask, roi_hist, histSize, range);
Core.normalize(roi_hist, roi_hist, 0, 255, Core.NORM_MINMAX);
// Setup the termination criteria, either 10 iteration or move by atleast 1 pt
// Setup the termination criteria, either 10 iteration or move by at least 1 pt
TermCriteria term_crit = new TermCriteria(TermCriteria.EPS | TermCriteria.COUNT, 10, 1);
while (true) {

@ -91,7 +91,7 @@ void MainPage::InvalidateSize()
// We have different widths to use depending on the view state
if (ApplicationView::Value != ApplicationViewState::Snapped)
{
// Make us as big as the the left over space, factoring in the ListBox width, the ListBox margins.
// Make us as big as the left over space, factoring in the ListBox width, the ListBox margins.
// and the LayoutRoot's margins
InputSection->Width = ((availableWidth) -
(layoutRootMarginLeft + layoutRootMarginRight + listBoxMarginLeft + listBoxMarginRight));

Loading…
Cancel
Save