Merge pull request #23109 from seanm:misc-warnings

* Fixed clang -Wnewline-eof warnings
* Fixed all trivial clang -Wextra-semi and -Wc++98-compat-extra-semi warnings
* Removed trailing semi from various macros
* Fixed various -Wunused-macros warnings
* Fixed some trivial -Wdocumentation warnings
* Fixed some -Wdocumentation-deprecated-sync warnings
* Fixed incorrect indentation
* Suppressed some clang warnings in 3rd party code
* Fixed QRCodeEncoder::Params documentation.

---------

Co-authored-by: Alexander Smorkalov <alexander.smorkalov@xperience.ai>
pull/24370/head
Sean McBride 1 year ago committed by GitHub
parent 24fd39538e
commit 5fb3869775
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 1
      3rdparty/openjpeg/CMakeLists.txt
  2. 2
      3rdparty/protobuf/CMakeLists.txt
  3. 2
      modules/calib3d/perf/perf_stereosgbm.cpp
  4. 14
      modules/calib3d/src/chessboard.hpp
  5. 4
      modules/calib3d/src/fundam.cpp
  6. 10
      modules/calib3d/src/ippe.hpp
  7. 4
      modules/calib3d/src/p3p.cpp
  8. 2
      modules/calib3d/src/precomp.hpp
  9. 2
      modules/calib3d/src/rho.cpp
  10. 2
      modules/calib3d/src/rho.h
  11. 6
      modules/core/include/opencv2/core/dualquaternion.inl.hpp
  12. 2
      modules/core/include/opencv2/core/hal/intrin_forward.hpp
  13. 2
      modules/core/include/opencv2/core/matx.hpp
  14. 2
      modules/core/include/opencv2/core/ocl.hpp
  15. 6
      modules/core/include/opencv2/core/types.hpp
  16. 8
      modules/core/include/opencv2/core/utils/filesystem.private.hpp
  17. 24
      modules/core/include/opencv2/core/utils/trace.hpp
  18. 4
      modules/core/misc/python/pyopencv_async.hpp
  19. 20
      modules/core/misc/python/pyopencv_cuda.hpp
  20. 4
      modules/core/misc/python/pyopencv_umat.hpp
  21. 2
      modules/core/perf/perf_allocation.cpp
  22. 2
      modules/core/src/alloc.cpp
  23. 2
      modules/core/src/arithm.dispatch.cpp
  24. 2
      modules/core/src/arithm_ipp.hpp
  25. 2
      modules/core/src/dxt.cpp
  26. 142
      modules/core/src/hal_replacement.hpp
  27. 6
      modules/core/src/minmax.cpp
  28. 2
      modules/core/src/parallel.cpp
  29. 2
      modules/core/src/persistence_base64_encoding.cpp
  30. 2
      modules/core/src/persistence_base64_encoding.hpp
  31. 34
      modules/core/src/softfloat.cpp
  32. 2
      modules/core/src/split.simd.hpp
  33. 14
      modules/core/test/test_arithm.cpp
  34. 6
      modules/dnn/include/opencv2/dnn/all_layers.hpp
  35. 4
      modules/dnn/src/graph_simplifier.hpp
  36. 2
      modules/dnn/src/layers/convolution_layer.cpp
  37. 5
      modules/dnn/src/layers/elementwise_layers.cpp
  38. 2
      modules/dnn/src/legacy_backend.cpp
  39. 8
      modules/dnn/src/model.cpp
  40. 10
      modules/dnn/src/onnx/onnx_importer.cpp
  41. 2
      modules/dnn/src/tensorflow/tf_importer.cpp
  42. 2
      modules/dnn/test/test_main.cpp
  43. 2
      modules/dnn/test/test_onnx_conformance.cpp
  44. 15
      modules/features2d/src/hal_replacement.hpp
  45. 30
      modules/features2d/src/kaze/nldiffusion_functions.cpp
  46. 2
      modules/features2d/src/kaze/utils.h
  47. 1
      modules/flann/include/opencv2/flann/composite_index.h
  48. 3
      modules/flann/include/opencv2/flann/dynamic_bitset.h
  49. 1
      modules/flann/include/opencv2/flann/logger.h
  50. 3
      modules/flann/include/opencv2/flann/lsh_table.h
  51. 1
      modules/flann/include/opencv2/flann/random.h
  52. 3
      modules/flann/include/opencv2/flann/result_set.h
  53. 2
      modules/flann/src/flann.cpp
  54. 2
      modules/gapi/include/opencv2/gapi/garg.hpp
  55. 6
      modules/gapi/include/opencv2/gapi/garray.hpp
  56. 1
      modules/gapi/include/opencv2/gapi/gcomputation.hpp
  57. 6
      modules/gapi/include/opencv2/gapi/gkernel.hpp
  58. 1
      modules/gapi/include/opencv2/gapi/gmat.hpp
  59. 6
      modules/gapi/include/opencv2/gapi/gopaque.hpp
  60. 2
      modules/gapi/include/opencv2/gapi/gstreaming.hpp
  61. 2
      modules/gapi/include/opencv2/gapi/gtransform.hpp
  62. 4
      modules/gapi/include/opencv2/gapi/gtype_traits.hpp
  63. 2
      modules/gapi/include/opencv2/gapi/gtyped.hpp
  64. 8
      modules/gapi/include/opencv2/gapi/infer/ie.hpp
  65. 2
      modules/gapi/include/opencv2/gapi/infer/onnx.hpp
  66. 1
      modules/gapi/include/opencv2/gapi/media.hpp
  67. 2
      modules/gapi/include/opencv2/gapi/own/convert.hpp
  68. 2
      modules/gapi/include/opencv2/gapi/own/scalar.hpp
  69. 2
      modules/gapi/include/opencv2/gapi/s11n.hpp
  70. 2
      modules/gapi/misc/python/pyopencv_gapi.hpp
  71. 4
      modules/gapi/misc/python/python_bridge.hpp
  72. 2
      modules/gapi/perf/common/gapi_render_perf_tests_inl.hpp
  73. 10
      modules/gapi/src/api/render_ocv.cpp
  74. 3
      modules/gapi/src/backends/common/serialization.cpp
  75. 2
      modules/gapi/src/backends/common/serialization.hpp
  76. 8
      modules/gapi/src/backends/streaming/gstreamingbackend.cpp
  77. 2
      modules/gapi/src/compiler/gcompiled_priv.hpp
  78. 2
      modules/gapi/src/compiler/gstreaming_priv.hpp
  79. 6
      modules/gapi/src/compiler/passes/pattern_matching.cpp
  80. 4
      modules/gapi/test/common/gapi_core_tests_inl.hpp
  81. 2
      modules/gapi/test/common/gapi_render_tests.cpp
  82. 4
      modules/gapi/test/common/gapi_render_tests.hpp
  83. 4
      modules/gapi/test/cpu/gapi_ocv_stateful_kernel_tests.cpp
  84. 2
      modules/gapi/test/gapi_async_test.cpp
  85. 6
      modules/gapi/test/gapi_fluid_test.cpp
  86. 4
      modules/gapi/test/gapi_kernel_tests.cpp
  87. 10
      modules/gapi/test/internal/gapi_int_executor_tests.cpp
  88. 2
      modules/gapi/test/internal/gapi_int_island_tests.cpp
  89. 44
      modules/highgui/include/opencv2/highgui.hpp
  90. 2
      modules/highgui/src/registry.impl.hpp
  91. 2
      modules/highgui/src/roiSelector.cpp
  92. 4
      modules/imgcodecs/include/opencv2/imgcodecs.hpp
  93. 2
      modules/imgcodecs/src/exif.cpp
  94. 5
      modules/imgcodecs/src/grfmt_tiff.cpp
  95. 8
      modules/imgproc/include/opencv2/imgproc.hpp
  96. 2
      modules/imgproc/perf/perf_integral.cpp
  97. 2
      modules/imgproc/src/accum.dispatch.cpp
  98. 3
      modules/imgproc/src/contours.cpp
  99. 3
      modules/imgproc/src/drawing.cpp
  100. 2
      modules/imgproc/src/filter.dispatch.cpp
  101. Some files were not shown because too many files have changed in this diff Show More

@ -16,6 +16,7 @@ ocv_warnings_disable(CMAKE_C_FLAGS
-Wunused-but-set-variable # clang15
-Wmissing-prototypes # clang, function opj_t1_ht_decode_cblk
-Wmissing-declarations # gcc, function opj_t1_ht_decode_cblk
-Wdocumentation # clang
)
#-----------------------------------------------------------------------------

@ -27,6 +27,8 @@ else()
-Wimplicit-fallthrough
-Warray-bounds # GCC 9+
-Wstringop-overflow -Wstringop-overread # GCC 11-12
-Wextra-semi # clang
-Wcomma # clang
)
endif()
if(CV_ICC)

@ -43,7 +43,7 @@ using namespace testing;
static void MakeArtificialExample(Mat& dst_left_view, Mat& dst_view);
CV_ENUM(SGBMModes, StereoSGBM::MODE_SGBM, StereoSGBM::MODE_SGBM_3WAY, StereoSGBM::MODE_HH4);
CV_ENUM(SGBMModes, StereoSGBM::MODE_SGBM, StereoSGBM::MODE_SGBM_3WAY, StereoSGBM::MODE_HH4)
typedef tuple<Size, int, SGBMModes> SGBMParams;
typedef TestBaseWithParam<SGBMParams> TestStereoCorrespSGBM;

@ -203,12 +203,12 @@ class Chessboard: public cv::Feature2D
* d12/d34 = d13/d24
*
* point order on the line:
* pt1 --> pt2 --> pt3 --> pt4
* p0 --> p1 --> p2 --> p3
*
* \param[in] pt1 First point coordinate
* \param[in] pt2 Second point coordinate
* \param[in] pt3 Third point coordinate
* \param[out] pt4 Forth point coordinate
* \param[in] p0 First point coordinate
* \param[in] p1 Second point coordinate
* \param[in] p2 Third point coordinate
* \param[out] p3 Forth point coordinate
*
*/
static bool estimatePoint(const cv::Point2f &p0,const cv::Point2f &p1,const cv::Point2f &p2,cv::Point2f &p3);
@ -309,7 +309,7 @@ class Chessboard: public cv::Feature2D
* \brief Draws the corners into the given image
*
* \param[in] m The image
* \param[out] m The resulting image
* \param[out] out The resulting image
* \param[in] H optional homography to calculate search area
*
*/
@ -668,7 +668,7 @@ class Chessboard: public cv::Feature2D
* \brief Calculates the average edge sharpness for the chessboard
*
* \param[in] image The image where the chessboard was detected
* \param[in] rise_distante Rise distance 0.8 means 10% ... 90%
* \param[in] rise_distance Rise distance 0.8 means 10% ... 90%
* \param[in] vertical by default only edge response for horiontal lines are calculated
*
* \returns Scalar(sharpness, average min_val, average max_val)

@ -113,7 +113,7 @@ public:
* 2 columns 1 channel
* @param _m2 destination points containing (x,y), depth is CV_32F with 1 column 2 channels or
* 2 columns 1 channel
* @param _model, CV_64FC1, 3x3, normalized, i.e., the last element is 1
* @param _model CV_64FC1, 3x3, normalized, i.e., the last element is 1
*/
int runKernel( InputArray _m1, InputArray _m2, OutputArray _model ) const CV_OVERRIDE
{
@ -188,7 +188,7 @@ public:
* @param _m1 depth CV_32F, 1-channel with 2 columns or 2-channel with 1 column
* @param _m2 depth CV_32F, 1-channel with 2 columns or 2-channel with 1 column
* @param _model CV_64FC1, 3x3
* @param _err, output, CV_32FC1, square of the L2 norm
* @param _err output, CV_32FC1, square of the L2 norm
*/
void computeError( InputArray _m1, InputArray _m2, InputArray _model, OutputArray _err ) const CV_OVERRIDE
{

@ -111,7 +111,7 @@ private:
/**
* @brief Computes the translation solution for a given rotation solution
* @param objectPoints Array of corresponding object points, 1xN/Nx1 3-channel where N is the number of points
* @param normalizedImagePoints Array of corresponding image points (undistorted), 1xN/Nx1 2-channel where N is the number of points
* @param normalizedImgPoints Array of corresponding image points (undistorted), 1xN/Nx1 2-channel where N is the number of points
* @param R Rotation solution (3x1 rotation vector)
* @param t Translation solution (3x1 rotation vector)
*/
@ -220,10 +220,10 @@ private:
/**
* @brief Computes the average depth of an object given its pose in camera coordinates
* @param objectPoints: Object points defined in 3D object space
* @param rvec: Rotation component of pose
* @param tvec: Translation component of pose
* @return: average depth of the object
* @param objectPoints Object points defined in 3D object space
* @param rvec Rotation component of pose
* @param tvec Translation component of pose
* @return average depth of the object
*/
double meanSceneDepth(InputArray objectPoints, InputArray rvec, InputArray tvec);

@ -214,8 +214,8 @@ int p3p::solve(double R[4][3][3], double t[4][3],
/// Only the solution to the main branch.
/// Reference : X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
/// IEEE Trans. on PAMI, vol. 25, No. 8, August 2003
/// \param lengths3D Lengths of line segments up to four solutions.
/// \param dist3D Distance between 3D points in pairs |BC|, |AC|, |AB|.
/// \param lengths Lengths of line segments up to four solutions.
/// \param distances Distance between 3D points in pairs |BC|, |AC|, |AB|.
/// \param cosines Cosine of the angles /_BPC, /_APC, /_APB.
/// \returns Number of solutions.
/// WARNING: NOT ALL THE DEGENERATE CASES ARE IMPLEMENTED

@ -69,7 +69,7 @@ namespace cv
* @param ep outlier ratio
* @param modelPoints number of model points required for estimation
* @param maxIters maximum number of iterations
* @return
* @return The number of iterations according to the formula
* \f[
* \frac{\ln(1-p)}{\ln\left(1-(1-ep)^\mathrm{modelPoints}\right)}
* \f]

@ -490,7 +490,7 @@ void rhoSeed(Ptr<RHO_HEST> p, uint64_t seed){
* Estimates the homography using the given context, matches and parameters to
* PROSAC.
*
* @param [in/out] p The context to use for homography estimation. Must
* @param [in,out] p The context to use for homography estimation. Must
* be already initialized. Cannot be NULL.
* @param [in] src The pointer to the source points of the matches.
* Must be aligned to 4 bytes. Cannot be NULL.

@ -215,7 +215,7 @@ void rhoSeed(Ptr<RHO_HEST> p, uint64_t seed);
* homography with at least the minimum required support, and 0 if it was not.
*
*
* @param [in/out] p The context to use for homography estimation. Must
* @param [in,out] p The context to use for homography estimation. Must
* be already initialized. Cannot be NULL.
* @param [in] src The pointer to the source points of the matches.
* Must be aligned to 4 bytes. Cannot be NULL.

@ -36,15 +36,15 @@
namespace cv {
template <typename T>
DualQuat<T>::DualQuat():w(0), x(0), y(0), z(0), w_(0), x_(0), y_(0), z_(0){};
DualQuat<T>::DualQuat():w(0), x(0), y(0), z(0), w_(0), x_(0), y_(0), z_(0){}
template <typename T>
DualQuat<T>::DualQuat(const T vw, const T vx, const T vy, const T vz, const T _w, const T _x, const T _y, const T _z):
w(vw), x(vx), y(vy), z(vz), w_(_w), x_(_x), y_(_y), z_(_z){};
w(vw), x(vx), y(vy), z(vz), w_(_w), x_(_x), y_(_y), z_(_z){}
template <typename T>
DualQuat<T>::DualQuat(const Vec<T, 8> &q):w(q[0]), x(q[1]), y(q[2]), z(q[3]),
w_(q[4]), x_(q[5]), y_(q[6]), z_(q[7]){};
w_(q[4]), x_(q[5]), y_(q[6]), z_(q[7]){}
template <typename T>
DualQuat<T> DualQuat<T>::createFromQuat(const Quat<T> &realPart, const Quat<T> &dualPart)

@ -188,4 +188,4 @@ CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
//! @endcond
} // cv::
} // cv::

@ -215,7 +215,7 @@ public:
template<int l> Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp);
Matx(const Matx<_Tp, n, m>& a, Matx_TOp);
_Tp val[m*n]; //< matrix elements
_Tp val[m*n]; ///< matrix elements
};
typedef Matx<float, 1, 2> Matx12f;

@ -779,7 +779,7 @@ public:
void start();
void stop();
uint64 durationNS() const; //< duration in nanoseconds
uint64 durationNS() const; ///< duration in nanoseconds
protected:
struct Impl;

@ -89,7 +89,7 @@ public:
//! conjugation
Complex conj() const;
_Tp re, im; //< the real and the imaginary parts
_Tp re, im; ///< the real and the imaginary parts
};
typedef Complex<float> Complexf;
@ -2031,8 +2031,8 @@ double jaccardDistance(const Rect_<_Tp>& a, const Rect_<_Tp>& b) {
/** @brief Finds out if there is any intersection between two rectangles
*
* mainly useful for language bindings
* @param rect1 First rectangle
* @param rect2 Second rectangle
* @param a First rectangle
* @param b Second rectangle
* @return the area of the intersection
*/
CV_EXPORTS_W inline double rectangleIntersectionArea(const Rect2d& a, const Rect2d& b) { return (a & b).area(); }

@ -47,11 +47,11 @@ public:
explicit FileLock(const char* fname);
~FileLock();
void lock(); //< acquire exclusive (writer) lock
void unlock(); //< release exclusive (writer) lock
void lock(); ///< acquire exclusive (writer) lock
void unlock(); ///< release exclusive (writer) lock
void lock_shared(); //< acquire shareable (reader) lock
void unlock_shared(); //< release shareable (reader) lock
void lock_shared(); ///< acquire shareable (reader) lock
void unlock_shared(); ///< release shareable (reader) lock
struct Impl;
protected:

@ -70,11 +70,11 @@ public:
struct LocationExtraData;
struct LocationStaticStorage
{
LocationExtraData** ppExtra; //< implementation specific data
const char* name; //< region name (function name or other custom name)
const char* filename; //< source code filename
int line; //< source code line
int flags; //< flags (implementation code path: Plain, IPP, OpenCL)
LocationExtraData** ppExtra; ///< implementation specific data
const char* name; ///< region name (function name or other custom name)
const char* filename; ///< source code filename
int line; ///< source code line
int flags; ///< flags (implementation code path: Plain, IPP, OpenCL)
};
Region(const LocationStaticStorage& location);
@ -100,18 +100,18 @@ private:
//! Specify region flags
enum RegionLocationFlag {
REGION_FLAG_FUNCTION = (1 << 0), //< region is function (=1) / nested named region (=0)
REGION_FLAG_APP_CODE = (1 << 1), //< region is Application code (=1) / OpenCV library code (=0)
REGION_FLAG_SKIP_NESTED = (1 << 2), //< avoid processing of nested regions
REGION_FLAG_FUNCTION = (1 << 0), ///< region is function (=1) / nested named region (=0)
REGION_FLAG_APP_CODE = (1 << 1), ///< region is Application code (=1) / OpenCV library code (=0)
REGION_FLAG_SKIP_NESTED = (1 << 2), ///< avoid processing of nested regions
REGION_FLAG_IMPL_IPP = (1 << 16), //< region is part of IPP code path
REGION_FLAG_IMPL_OPENCL = (2 << 16), //< region is part of OpenCL code path
REGION_FLAG_IMPL_OPENVX = (3 << 16), //< region is part of OpenVX code path
REGION_FLAG_IMPL_IPP = (1 << 16), ///< region is part of IPP code path
REGION_FLAG_IMPL_OPENCL = (2 << 16), ///< region is part of OpenCL code path
REGION_FLAG_IMPL_OPENVX = (3 << 16), ///< region is part of OpenVX code path
REGION_FLAG_IMPL_MASK = (15 << 16),
REGION_FLAG_REGION_FORCE = (1 << 30),
REGION_FLAG_REGION_NEXT = (1 << 31), //< close previous region (see #CV_TRACE_REGION_NEXT macro)
REGION_FLAG_REGION_NEXT = (1 << 31), ///< close previous region (see #CV_TRACE_REGION_NEXT macro)
ENUM_REGION_FLAG_FORCE_INT = INT_MAX
};

@ -2,7 +2,7 @@
#include "opencv2/core/async.hpp"
CV_PY_TO_CLASS(AsyncArray);
CV_PY_FROM_CLASS(AsyncArray);
CV_PY_TO_CLASS(AsyncArray)
CV_PY_FROM_CLASS(AsyncArray)
#endif

@ -20,18 +20,18 @@ template<> struct pyopencvVecConverter<cuda::GpuMat>
}
};
CV_PY_TO_CLASS(cuda::GpuMat);
CV_PY_TO_CLASS(cuda::Stream);
CV_PY_TO_CLASS(cuda::Event);
CV_PY_TO_CLASS(cuda::HostMem);
CV_PY_TO_CLASS(cuda::GpuMat)
CV_PY_TO_CLASS(cuda::Stream)
CV_PY_TO_CLASS(cuda::Event)
CV_PY_TO_CLASS(cuda::HostMem)
CV_PY_TO_CLASS_PTR(cuda::GpuMat);
CV_PY_TO_CLASS_PTR(cuda::GpuMat::Allocator);
CV_PY_TO_CLASS_PTR(cuda::GpuMat)
CV_PY_TO_CLASS_PTR(cuda::GpuMat::Allocator)
CV_PY_FROM_CLASS(cuda::GpuMat);
CV_PY_FROM_CLASS(cuda::Stream);
CV_PY_FROM_CLASS(cuda::HostMem);
CV_PY_FROM_CLASS(cuda::GpuMat)
CV_PY_FROM_CLASS(cuda::Stream)
CV_PY_FROM_CLASS(cuda::HostMem)
CV_PY_FROM_CLASS_PTR(cuda::GpuMat::Allocator);
CV_PY_FROM_CLASS_PTR(cuda::GpuMat::Allocator)
#endif

@ -4,8 +4,8 @@
typedef std::vector<Range> vector_Range;
CV_PY_TO_CLASS(UMat);
CV_PY_FROM_CLASS(UMat);
CV_PY_TO_CLASS(UMat)
CV_PY_FROM_CLASS(UMat)
static bool cv_mappable_to(const Ptr<Mat>& src, Ptr<UMat>& dst)
{

@ -45,4 +45,4 @@ PERF_TEST_P(MatDepth_tb, DISABLED_Allocation_Aligned,
SANITY_CHECK_NOTHING();
}
};
}

@ -53,7 +53,6 @@
#undef CV__ALLOCATOR_STATS_LOG
//#define OPENCV_ALLOC_ENABLE_STATISTICS
#define OPENCV_ALLOC_STATISTICS_LIMIT 4096 // don't track buffers less than N bytes
#ifdef HAVE_POSIX_MEMALIGN
@ -63,6 +62,7 @@
#endif
#ifdef OPENCV_ALLOC_ENABLE_STATISTICS
#define OPENCV_ALLOC_STATISTICS_LIMIT 4096 // don't track buffers less than N bytes
#include <map>
#endif

@ -8,4 +8,4 @@
#include "arithm.simd_declarations.hpp"
#define ARITHM_DISPATCHING_ONLY
#include "arithm.simd.hpp"
#include "arithm.simd.hpp"

@ -414,4 +414,4 @@ inline int arithm_ipp_mul32f(const float *src1, size_t step1, const float *src2,
#if !ARITHM_USE_IPP
#define ARITHM_CALL_IPP(...)
#endif
#endif

@ -64,8 +64,6 @@ namespace cv
Discrete Fourier Transform
\****************************************************************************************/
#define CV_MAX_LOCAL_DFT_SIZE (1 << 15)
static unsigned char bitrevTab[] =
{
0x00,0x80,0x40,0xc0,0x20,0xa0,0x60,0xe0,0x10,0x90,0x50,0xd0,0x30,0xb0,0x70,0xf0,

@ -69,10 +69,14 @@
/**
Add: _dst[i] = src1[i] + src2[i]_ @n
Sub: _dst[i] = src1[i] - src2[i]_
@param src1_data,src1_step first source image data and step
@param src2_data,src2_step second source image data and step
@param dst_data,dst_step destination image data and step
@param width,height dimensions of the images
@param src1_data first source image data
@param src1_step first source image step
@param src2_data second source image data
@param src2_step second source image step
@param dst_data destination image data
@param dst_step destination image step
@param width width of the images
@param height height of the images
*/
//! @addtogroup core_hal_interface_addsub Element-wise add and subtract
//! @{
@ -96,10 +100,14 @@ inline int hal_ni_sub64f(const double *src1_data, size_t src1_step, const double
/**
Minimum: _dst[i] = min(src1[i], src2[i])_ @n
Maximum: _dst[i] = max(src1[i], src2[i])_
@param src1_data,src1_step first source image data and step
@param src2_data,src2_step second source image data and step
@param dst_data,dst_step destination image data and step
@param width,height dimensions of the images
@param src1_data first source image data
@param src1_step first source image step
@param src2_data second source image data
@param src2_step second source image step
@param dst_data destination image data
@param dst_step destination image step
@param width width of the images
@param height height of the images
*/
//! @addtogroup core_hal_interface_minmax Element-wise minimum or maximum
//! @{
@ -122,11 +130,14 @@ inline int hal_ni_min64f(const double *src1_data, size_t src1_step, const double
/**
Absolute difference: _dst[i] = | src1[i] - src2[i] |_
@param src1_data,src1_step first source image data and step
@param src2_data,src2_step second source image data and step
@param dst_data,dst_step destination image data and step
@param width,height dimensions of the images
@param scale additional multiplier
@param src1_data first source image data
@param src1_step first source image step
@param src2_data second source image data
@param src2_step second source image step
@param dst_data destination image data
@param dst_step destination image step
@param width width of the images
@param height height of the images
*/
//! @addtogroup core_hal_interface_absdiff Element-wise absolute difference
//! @{
@ -144,10 +155,14 @@ Bitwise AND: _dst[i] = src1[i] & src2[i]_ @n
Bitwise OR: _dst[i] = src1[i] | src2[i]_ @n
Bitwise XOR: _dst[i] = src1[i] ^ src2[i]_ @n
Bitwise NOT: _dst[i] = !src[i]_
@param src1_data,src1_step first source image data and step
@param src2_data,src2_step second source image data and step
@param dst_data,dst_step destination image data and step
@param width,height dimensions of the images
@param src1_data first source image data
@param src1_step first source image step
@param src2_data second source image data
@param src2_step second source image step
@param dst_data destination image data
@param dst_step destination image step
@param width width of the images
@param height height of the images
*/
//! @addtogroup core_hal_interface_logical Bitwise logical operations
//! @{
@ -201,10 +216,14 @@ inline int hal_ni_not8u(const uchar *src_data, size_t src_step, uchar *dst_data,
/**
Compare: _dst[i] = src1[i] op src2[i]_
@param src1_data,src1_step first source image data and step
@param src2_data,src2_step second source image data and step
@param dst_data,dst_step destination image data and step
@param width,height dimensions of the images
@param src1_data first source image data
@param src1_step first source image step
@param src2_data second source image data
@param src2_step second source image step
@param dst_data destination image data
@param dst_step destination image step
@param width width of the images
@param height height of the images
@param operation one of (CV_HAL_CMP_EQ, CV_HAL_CMP_GT, ...)
*/
//! @addtogroup core_hal_interface_compare Element-wise compare
@ -230,10 +249,14 @@ inline int hal_ni_cmp64f(const double *src1_data, size_t src1_step, const double
/**
Multiply: _dst[i] = scale * src1[i] * src2[i]_
@param src1_data,src1_step first source image data and step
@param src2_data,src2_step second source image data and step
@param dst_data,dst_step destination image data and step
@param width,height dimensions of the images
@param src1_data first source image data
@param src1_step first source image step
@param src2_data second source image data
@param src2_step second source image step
@param dst_data destination image data
@param dst_step destination image step
@param width width of the images
@param height height of the images
@param scale additional multiplier
*/
//! @addtogroup core_hal_interface_multiply Element-wise multiply
@ -249,10 +272,14 @@ inline int hal_ni_mul64f(const double *src1_data, size_t src1_step, const double
/**
Divide: _dst[i] = scale * src1[i] / src2[i]_
@param src1_data,src1_step first source image data and step
@param src2_data,src2_step second source image data and step
@param dst_data,dst_step destination image data and step
@param width,height dimensions of the images
@param src1_data first source image data and step
@param src1_step first source image data and step
@param src2_data second source image data and step
@param src2_step second source image data and step
@param dst_data destination image data and step
@param dst_step destination image data and step
@param width dimensions of the images
@param height dimensions of the images
@param scale additional multiplier
*/
//! @addtogroup core_hal_interface_divide Element-wise divide
@ -268,9 +295,12 @@ inline int hal_ni_div64f(const double *src1_data, size_t src1_step, const double
/**
Computes reciprocial: _dst[i] = scale / src[i]_
@param src_data,src_step source image data and step
@param dst_data,dst_step destination image data and step
@param width,height dimensions of the images
@param src_data source image data
@param src_step source image step
@param dst_data destination image data
@param dst_step destination image step
@param width width of the images
@param height height of the images
@param scale additional multiplier
*/
//! @addtogroup core_hal_interface_reciprocial Element-wise reciprocial
@ -310,10 +340,14 @@ inline int hal_ni_recip64f(const double *src_data, size_t src_step, double *dst_
/**
Computes weighted sum of two arrays using formula: _dst[i] = a * src1[i] + b * src2[i] + c_
@param src1_data,src1_step first source image data and step
@param src2_data,src2_step second source image data and step
@param dst_data,dst_step destination image data and step
@param width,height dimensions of the images
@param src1_data first source image data
@param src1_step first source image step
@param src2_data second source image data
@param src2_step second source image step
@param dst_data destination image data
@param dst_step destination image step
@param width width of the images
@param height height of the images
@param scalars numbers _a_, _b_, and _c_
*/
//! @addtogroup core_hal_interface_addWeighted Element-wise weighted sum
@ -381,7 +415,8 @@ inline int hal_ni_merge64s(const int64 **src_data, int64 *dst_data, int len, int
/**
@param y,x source Y and X arrays
@param y source Y arrays
@param x source X arrays
@param dst destination array
@param len length of arrays
@param angleInDegrees if set to true return angles in degrees, otherwise in radians
@ -399,7 +434,8 @@ inline int hal_ni_fastAtan64f(const double* y, const double* x, double* dst, int
/**
@param x,y source X and Y arrays
@param x source X array
@param y source Y array
@param dst destination array
@param len length of arrays
*/
@ -530,7 +566,8 @@ inline int hal_ni_dftFree1D(cvhalDFT *context) { return CV_HAL_ERROR_NOT_IMPLEME
/**
@param context double pointer to context storing all necessary data
@param width,height image dimensions
@param width image width
@param height image height
@param depth image type (CV_32F or CV_64F)
@param src_channels number of channels in input image
@param dst_channels number of channels in output image
@ -540,8 +577,10 @@ inline int hal_ni_dftFree1D(cvhalDFT *context) { return CV_HAL_ERROR_NOT_IMPLEME
inline int hal_ni_dftInit2D(cvhalDFT **context, int width, int height, int depth, int src_channels, int dst_channels, int flags, int nonzero_rows) { return CV_HAL_ERROR_NOT_IMPLEMENTED; }
/**
@param context pointer to context storing all necessary data
@param src_data,src_step source image data and step
@param dst_data,dst_step destination image data and step
@param src_data source image data
@param src_step source image step
@param dst_data destination image data
@param dst_step destination image step
*/
inline int hal_ni_dft2D(cvhalDFT *context, const uchar *src_data, size_t src_step, uchar *dst_data, size_t dst_step) { return CV_HAL_ERROR_NOT_IMPLEMENTED; }
/**
@ -557,15 +596,18 @@ inline int hal_ni_dftFree2D(cvhalDFT *context) { return CV_HAL_ERROR_NOT_IMPLEME
/**
@param context double pointer to context storing all necessary data
@param width,height image dimensions
@param width image width
@param height image height
@param depth image type (CV_32F or CV_64F)
@param flags algorithm options (combination of CV_HAL_DFT_INVERSE, ...)
*/
inline int hal_ni_dctInit2D(cvhalDFT **context, int width, int height, int depth, int flags) { return CV_HAL_ERROR_NOT_IMPLEMENTED; }
/**
@param context pointer to context storing all necessary data
@param src_data,src_step source image data and step
@param dst_data,dst_step destination image data and step
@param src_data source image data
@param src_step source image step
@param dst_data destination image data
@param dst_step destination image step
*/
inline int hal_ni_dct2D(cvhalDFT *context, const uchar *src_data, size_t src_step, uchar *dst_data, size_t dst_step) { return CV_HAL_ERROR_NOT_IMPLEMENTED; }
/**
@ -717,11 +759,15 @@ inline int hal_ni_gemm64fc(const double* src1, size_t src1_step, const double* s
/**
@brief Finds the global minimum and maximum in an array.
@param src_data,src_step Source image
@param width,height Source image dimensions
@param src_data Source image
@param src_step Source image
@param width Source image dimensions
@param height Source image dimensions
@param depth Depth of source image
@param minVal,maxVal Pointer to the returned global minimum and maximum in an array.
@param minIdx,maxIdx Pointer to the returned minimum and maximum location.
@param minVal Pointer to the returned global minimum and maximum in an array.
@param maxVal Pointer to the returned global minimum and maximum in an array.
@param minIdx Pointer to the returned minimum and maximum location.
@param maxIdx Pointer to the returned minimum and maximum location.
@param mask Specified array region.
*/
inline int hal_ni_minMaxIdx(const uchar* src_data, size_t src_step, int width, int height, int depth, double* minVal, double* maxVal,

@ -1545,9 +1545,9 @@ void cv::minMaxIdx(InputArray _src, double* minVal,
if (!src.empty() && mask.empty())
{
if( minidx == 0 )
minidx = 1;
if( maxidx == 0 )
maxidx = 1;
minidx = 1;
if( maxidx == 0 )
maxidx = 1;
}
if( minidx == 0 )

@ -791,7 +791,7 @@ int getThreadNum()
return 0;
#endif
#elif defined HAVE_HPX
return (int)(hpx::get_num_worker_threads());
return (int)(hpx::get_num_worker_threads());
#elif defined HAVE_OPENMP
return omp_get_thread_num();
#elif defined HAVE_GCD

@ -367,4 +367,4 @@ size_t base64::RawDataToBinaryConvertor::make_to_binary_funcs(const std::string
return offset_packed;
}
}
}

@ -124,4 +124,4 @@ private:
}
}
#endif
#endif

@ -306,9 +306,6 @@ softdouble cos(const softdouble& a) { return f64_cos(a); }
| The values to return on conversions to 32-bit integer formats that raise an
| invalid exception.
*----------------------------------------------------------------------------*/
#define ui32_fromPosOverflow 0xFFFFFFFF
#define ui32_fromNegOverflow 0
#define ui32_fromNaN 0xFFFFFFFF
#define i32_fromPosOverflow 0x7FFFFFFF
#define i32_fromNegOverflow (-0x7FFFFFFF - 1)
#define i32_fromNaN 0x7FFFFFFF
@ -317,9 +314,6 @@ softdouble cos(const softdouble& a) { return f64_cos(a); }
| The values to return on conversions to 64-bit integer formats that raise an
| invalid exception.
*----------------------------------------------------------------------------*/
#define ui64_fromPosOverflow UINT64_C( 0xFFFFFFFFFFFFFFFF )
#define ui64_fromNegOverflow 0
#define ui64_fromNaN UINT64_C( 0xFFFFFFFFFFFFFFFF )
#define i64_fromPosOverflow UINT64_C( 0x7FFFFFFFFFFFFFFF )
//fixed unsigned unary minus: -x == ~x + 1
//#define i64_fromNegOverflow (-UINT64_C( 0x7FFFFFFFFFFFFFFF ) - 1)
@ -422,34 +416,6 @@ struct uint64_extra { uint64_t v, extra; };
struct uint128_extra { struct uint128 v; uint64_t extra; };
#endif
/*----------------------------------------------------------------------------
| These macros are used to isolate the differences in word order between big-
| endian and little-endian platforms.
*----------------------------------------------------------------------------*/
#ifndef WORDS_BIGENDIAN
#define wordIncr 1
#define indexWord( total, n ) (n)
#define indexWordHi( total ) ((total) - 1)
#define indexWordLo( total ) 0
#define indexMultiword( total, m, n ) (n)
#define indexMultiwordHi( total, n ) ((total) - (n))
#define indexMultiwordLo( total, n ) 0
#define indexMultiwordHiBut( total, n ) (n)
#define indexMultiwordLoBut( total, n ) 0
#define INIT_UINTM4( v3, v2, v1, v0 ) { v0, v1, v2, v3 }
#else
#define wordIncr -1
#define indexWord( total, n ) ((total) - 1 - (n))
#define indexWordHi( total ) 0
#define indexWordLo( total ) ((total) - 1)
#define indexMultiword( total, m, n ) ((total) - 1 - (m))
#define indexMultiwordHi( total, n ) 0
#define indexMultiwordLo( total, n ) ((total) - (n))
#define indexMultiwordHiBut( total, n ) 0
#define indexMultiwordLoBut( total, n ) (n)
#define INIT_UINTM4( v3, v2, v1, v0 ) { v3, v2, v1, v0 }
#endif
enum {
softfloat_mulAdd_subC = 1,
softfloat_mulAdd_subProd = 2

@ -220,4 +220,4 @@ void split64s(const int64* src, int64** dst, int len, int cn )
#endif
CV_CPU_OPTIMIZATION_NAMESPACE_END
}} // namespace
}} // namespace

@ -648,7 +648,7 @@ static void inRangeS(const Mat& src, const Scalar& lb, const Scalar& rb, Mat& ds
}
} // namespace
CVTEST_GUARD_SYMBOL(inRange);
CVTEST_GUARD_SYMBOL(inRange)
struct InRangeSOp : public BaseElemWiseOp
{
@ -1178,7 +1178,7 @@ struct MeanOp : public BaseElemWiseOp
MeanOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+FIX_GAMMA+SUPPORT_MASK+SCALAR_OUTPUT, 1, 1, Scalar::all(0))
{
context = 3;
};
}
void op(const vector<Mat>& src, Mat& dst, const Mat& mask)
{
dst.create(1, 1, CV_64FC4);
@ -1201,7 +1201,7 @@ struct SumOp : public BaseElemWiseOp
SumOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+FIX_GAMMA+SCALAR_OUTPUT, 1, 1, Scalar::all(0))
{
context = 3;
};
}
void op(const vector<Mat>& src, Mat& dst, const Mat&)
{
dst.create(1, 1, CV_64FC4);
@ -1261,7 +1261,7 @@ struct MeanStdDevOp : public BaseElemWiseOp
{
cn = 0;
context = 7;
};
}
void op(const vector<Mat>& src, Mat& dst, const Mat& mask)
{
dst.create(1, 2, CV_64FC4);
@ -1302,7 +1302,7 @@ struct NormOp : public BaseElemWiseOp
{
context = 1;
normType = 0;
};
}
int getRandomType(RNG& rng)
{
int type = cvtest::randomType(rng, _OutputArray::DEPTH_MASK_ALL_BUT_8S, 1, 4);
@ -1348,7 +1348,7 @@ struct MinMaxLocOp : public BaseElemWiseOp
MinMaxLocOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+FIX_GAMMA+SUPPORT_MASK+SCALAR_OUTPUT, 1, 1, Scalar::all(0))
{
context = ARITHM_MAX_NDIMS*2 + 2;
};
}
int getRandomType(RNG& rng)
{
return cvtest::randomType(rng, _OutputArray::DEPTH_MASK_ALL_BUT_8S, 1, 1);
@ -1395,7 +1395,7 @@ struct reduceArgMinMaxOp : public BaseElemWiseOp
isLast(false), isMax(false), axis(0)
{
context = ARITHM_MAX_NDIMS*2 + 2;
};
}
int getRandomType(RNG& rng) override
{
return cvtest::randomType(rng, _OutputArray::DEPTH_MASK_ALL_BUT_8S, 1, 1);

@ -588,11 +588,11 @@ CV__DNN_INLINE_NS_BEGIN
{
public:
virtual void forwardSlice(const float* src, float* dst, int len,
size_t outPlaneSize, int cn0, int cn1) const {};
size_t outPlaneSize, int cn0, int cn1) const {}
virtual void forwardSlice(const int* src, const int* lut, int* dst, int len,
size_t outPlaneSize, int cn0, int cn1) const {};
size_t outPlaneSize, int cn0, int cn1) const {}
virtual void forwardSlice(const int8_t* src, const int8_t* lut, int8_t* dst, int len,
size_t outPlaneSize, int cn0, int cn1) const {};
size_t outPlaneSize, int cn0, int cn1) const {}
};
class CV_EXPORTS ReLULayer : public ActivationLayer

@ -17,7 +17,7 @@ namespace cv { namespace dnn {
class ImportNodeWrapper
{
public:
virtual ~ImportNodeWrapper() {};
virtual ~ImportNodeWrapper() {}
virtual int getNumInputs() const = 0;
@ -33,7 +33,7 @@ public:
class ImportGraphWrapper
{
public:
virtual ~ImportGraphWrapper() {};
virtual ~ImportGraphWrapper() {}
virtual Ptr<ImportNodeWrapper> getNode(int idx) const = 0;

@ -242,8 +242,6 @@ public:
};
#define IS_POWER_LAYER(layer) \
(!layer.empty() && !layer->type.compare("Power"))
//TODO: simultaneously convolution and bias addition for cache optimization
class ConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
{

@ -2768,11 +2768,6 @@ template<>
const char* const ReciprocalFunctor::BaseDefaultFunctor<ReciprocalFunctor>::ocl_kernel_name = "ReciprocalForward";
#define ACTIVATION_CREATOR_FOR(_Layer, _Functor, ...) \
Ptr<_Layer> _Layer::create() { \
return return Ptr<_Layer>( new ElementWiseLayer<_Functor>(_Functor()) ); }
Ptr<ReLULayer> ReLULayer::create(const LayerParams& params)
{
float negativeSlope = params.get<float>("negative_slope", 0.f);

@ -24,7 +24,7 @@ BackendNode::BackendNode(int backendId)
: backendId(backendId)
{}
BackendNode::~BackendNode() {};
BackendNode::~BackendNode() {}
BackendWrapper::BackendWrapper(int backendId, int targetId)
: backendId(backendId)

@ -306,9 +306,9 @@ void ClassificationModel::classify(InputArray frame, int& classId, float& conf)
}
KeypointsModel::KeypointsModel(const String& model, const String& config)
: Model(model, config) {};
: Model(model, config) {}
KeypointsModel::KeypointsModel(const Net& network) : Model(network) {};
KeypointsModel::KeypointsModel(const Net& network) : Model(network) {}
std::vector<Point2f> KeypointsModel::estimate(InputArray frame, float thresh)
{
@ -364,9 +364,9 @@ std::vector<Point2f> KeypointsModel::estimate(InputArray frame, float thresh)
}
SegmentationModel::SegmentationModel(const String& model, const String& config)
: Model(model, config) {};
: Model(model, config) {}
SegmentationModel::SegmentationModel(const Net& network) : Model(network) {};
SegmentationModel::SegmentationModel(const Net& network) : Model(network) {}
void SegmentationModel::segment(InputArray frame, OutputArray mask)
{

@ -1498,7 +1498,7 @@ void ONNXImporter::lstm_extractConsts(LayerParams& layerParams, const opencv_onn
blob = Mat(blobShape, CV_32FC1, 0.);
}
layerParams.blobs.push_back(blob);
};
}
void ONNXImporter::lstm_add_reshape(const std::string& input_name, const std::string& output_name, int* layerShape, size_t n)
{
@ -1513,7 +1513,7 @@ void ONNXImporter::lstm_add_reshape(const std::string& input_name, const std::st
reshape_proto.add_input(input_name);
reshape_proto.add_output(output_name);
addLayer(reshapeLp, reshape_proto);
};
}
std::string ONNXImporter::lstm_add_slice(int index, const std::string& input_name, int* begin, int* end, size_t n)
{
@ -1532,7 +1532,7 @@ std::string ONNXImporter::lstm_add_slice(int index, const std::string& input_nam
addLayer(sliceLP, slice_proto);
return slice_proto.output(0);
};
}
std::string ONNXImporter::lstm_fix_dims(LayerParams& layerParams, const opencv_onnx::NodeProto& lstm_proto,
int batch_size, int num_directions, int hidden_size, bool need_y, const std::string& y_name,
@ -1560,7 +1560,7 @@ std::string ONNXImporter::lstm_fix_dims(LayerParams& layerParams, const opencv_o
addLayer(permuteLP, permute_proto);
return permute_proto.output(0);
};
}
void ONNXImporter::lstm_add_transform(int num_directions, int batch_size, int hidden_size,
int index, const std::string& input_name, const std::string& output_name)
@ -1602,7 +1602,7 @@ void ONNXImporter::lstm_add_transform(int num_directions, int batch_size, int hi
int layerShape[] = {2, batch_size, hidden_size};
lstm_add_reshape(concat_proto.output(0), output_name, layerShape, sizeof(layerShape) / sizeof(layerShape[0]));
}
};
}
void ONNXImporter::parseLSTM(LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto_)
{

@ -3227,7 +3227,7 @@ void TFLayerHandler::fillRegistry(const tensorflow::GraphDef& net)
}
}
printMissing();
};
}
bool TFLayerHandler::handleMissing(const tensorflow::NodeDef& layer)
{

@ -4,4 +4,4 @@
#include <hpx/hpx_main.hpp>
#endif
CV_TEST_MAIN("", initDNNTests());
CV_TEST_MAIN("", initDNNTests())

@ -1257,4 +1257,4 @@ INSTANTIATE_TEST_CASE_P(/**/, Test_ONNX_conformance,
printOnnxConfParams
);
};
}

@ -64,9 +64,12 @@
//! @{
/**
@brief Detects corners using the FAST algorithm, returns mask.
@param src_data,src_step Source image
@param dst_data,dst_step Destination mask
@param width,height Source image dimensions
@param src_data Source image data
@param src_step Source image step
@param dst_data Destination mask data
@param dst_step Destination mask step
@param width Source image width
@param height Source image height
@param type FAST type
*/
inline int hal_ni_FAST_dense(const uchar* src_data, size_t src_step, uchar* dst_data, size_t dst_step, int width, int height, cv::FastFeatureDetector::DetectorType type) { return CV_HAL_ERROR_NOT_IMPLEMENTED; }
@ -89,8 +92,10 @@ inline int hal_ni_FAST_NMS(const uchar* src_data, size_t src_step, uchar* dst_da
/**
@brief Detects corners using the FAST algorithm.
@param src_data,src_step Source image
@param width,height Source image dimensions
@param src_data Source image data
@param src_step Source image step
@param width Source image width
@param height Source image height
@param keypoints_data Pointer to keypoints
@param keypoints_count Count of keypoints
@param threshold Threshold for keypoint

@ -86,9 +86,9 @@ void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int
/**
* @brief This function computes the Perona and Malik conductivity coefficient g1
* g1 = exp(-|dL|^2/k^2)
* @param Lx First order image derivative in X-direction (horizontal)
* @param Ly First order image derivative in Y-direction (vertical)
* @param dst Output image
* @param _Lx First order image derivative in X-direction (horizontal)
* @param _Ly First order image derivative in Y-direction (vertical)
* @param _dst Output image
* @param k Contrast factor parameter
*/
void pm_g1(InputArray _Lx, InputArray _Ly, OutputArray _dst, float k) {
@ -117,9 +117,9 @@ void pm_g1(InputArray _Lx, InputArray _Ly, OutputArray _dst, float k) {
/**
* @brief This function computes the Perona and Malik conductivity coefficient g2
* g2 = 1 / (1 + dL^2 / k^2)
* @param Lx First order image derivative in X-direction (horizontal)
* @param Ly First order image derivative in Y-direction (vertical)
* @param dst Output image
* @param _Lx First order image derivative in X-direction (horizontal)
* @param _Ly First order image derivative in Y-direction (vertical)
* @param _dst Output image
* @param k Contrast factor parameter
*/
void pm_g2(InputArray _Lx, InputArray _Ly, OutputArray _dst, float k) {
@ -146,9 +146,9 @@ void pm_g2(InputArray _Lx, InputArray _Ly, OutputArray _dst, float k) {
/* ************************************************************************* */
/**
* @brief This function computes Weickert conductivity coefficient gw
* @param Lx First order image derivative in X-direction (horizontal)
* @param Ly First order image derivative in Y-direction (vertical)
* @param dst Output image
* @param _Lx First order image derivative in X-direction (horizontal)
* @param _Ly First order image derivative in Y-direction (vertical)
* @param _dst Output image
* @param k Contrast factor parameter
* @note For more information check the following paper: J. Weickert
* Applications of nonlinear diffusion in image processing and computer vision,
@ -183,9 +183,9 @@ void weickert_diffusivity(InputArray _Lx, InputArray _Ly, OutputArray _dst, floa
/**
* @brief This function computes Charbonnier conductivity coefficient gc
* gc = 1 / sqrt(1 + dL^2 / k^2)
* @param Lx First order image derivative in X-direction (horizontal)
* @param Ly First order image derivative in Y-direction (vertical)
* @param dst Output image
* @param _Lx First order image derivative in X-direction (horizontal)
* @param _Ly First order image derivative in Y-direction (vertical)
* @param _dst Output image
* @param k Contrast factor parameter
* @note For more information check the following paper: J. Weickert
* Applications of nonlinear diffusion in image processing and computer vision,
@ -323,7 +323,7 @@ void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, in
* @param _ky Vertical kernel values
* @param dx Derivative order in X-direction (horizontal)
* @param dy Derivative order in Y-direction (vertical)
* @param scale_ Scale factor or derivative size
* @param scale Scale factor or derivative size
*/
void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale) {
CV_INSTRUMENT_REGION();
@ -415,7 +415,7 @@ private:
/* ************************************************************************* */
/**
* @brief This function performs a scalar non-linear diffusion step
* @param Ld2 Output image in the evolution
* @param Ld Output image in the evolution
* @param c Conductivity image
* @param Lstep Previous image in the evolution
* @param stepsize The step size in time units
@ -490,7 +490,7 @@ void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsi
/* ************************************************************************* */
/**
* @brief This function downsamples the input image using OpenCV resize
* @param img Input image to be downsampled
* @param src Input image to be downsampled
* @param dst Output image with half of the resolution of the input image
*/
void halfsample_image(const cv::Mat& src, cv::Mat& dst) {

@ -6,7 +6,7 @@
* @brief This function computes the value of a 2D Gaussian function
* @param x X Position
* @param y Y Position
* @param sig Standard Deviation
* @param sigma Standard Deviation
*/
inline float gaussian(float x, float y, float sigma) {
return expf(-(x*x + y*y) / (2.0f*sigma*sigma));

@ -80,7 +80,6 @@ public:
* @param inputData dataset containing the points to index
* @param params Index parameters
* @param d Distance functor
* @return
*/
CompositeIndex(const Matrix<ElementType>& inputData, const IndexParams& params = CompositeIndexParams(),
Distance d = Distance()) : index_params_(params)

@ -97,7 +97,6 @@ public:
}
/** @brief set one bit to 0
* @param index
*/
void reset(size_t index)
{
@ -108,7 +107,6 @@ public:
* This function is useful when resetting a given set of bits so that the
* whole bitset ends up being 0: if that's the case, we don't care about setting
* other bits to 0
* @param index
*/
void reset_block(size_t index)
{
@ -116,7 +114,6 @@ public:
}
/** resize the bitset so that it contains at least sz bits
* @param sz
*/
void resize(size_t sz)
{

@ -101,7 +101,6 @@ public:
* Print log message
* @param level Log level
* @param fmt Message format
* @return
*/
static int log(int level, const char* fmt, ...)
{

@ -214,8 +214,6 @@ public:
}
/** Get a bucket given the key
* @param key
* @return
*/
inline const Bucket* getBucketFromKey(BucketKey key) const
{
@ -253,7 +251,6 @@ public:
}
/** Get statistics about the table
* @return
*/
LshStats getStats() const;

@ -106,7 +106,6 @@ public:
/**
* Constructor.
* @param n Size of the interval from which to generate
* @return
*/
UniqueRandom(int n)
{

@ -360,7 +360,6 @@ public:
}
/** The number of neighbors in the set
* @return
*/
size_t size() const
{
@ -369,7 +368,6 @@ public:
/** The distance of the furthest neighbor
* If we don't have enough neighbors, it returns the max possible value
* @return
*/
inline DistanceType worstDist() const CV_OVERRIDE
{
@ -490,7 +488,6 @@ public:
/** The distance of the furthest neighbor
* If we don't have enough neighbors, it returns the max possible value
* @return
*/
inline DistanceType worstDist() const CV_OVERRIDE
{

@ -35,7 +35,7 @@ namespace cvflann
* \deprecated Provided for backward compatibility
*/
flann_distance_t flann_distance_type_ = FLANN_DIST_L2;
flann_distance_t flann_distance_type() { return flann_distance_type_; }
CV_DEPRECATED flann_distance_t flann_distance_type() { return flann_distance_type_; }
/**
* Set distance type to used

@ -241,6 +241,7 @@ namespace gapi
*
* @brief G-API functions and classes for serialization and deserialization.
*/
/** @brief Wraps deserialized output GRunArgs to GRunArgsP which can be used by GCompiled.
*
* Since it's impossible to get modifiable output arguments from deserialization
@ -254,6 +255,7 @@ namespace gapi
* @see deserialize
*/
GAPI_EXPORTS cv::GRunArgsP bind(cv::GRunArgs &out_args);
/** @brief Wraps output GRunArgsP available during graph execution to GRunArgs which can be serialized.
*
* GRunArgsP is pointer-to-value, so to be serialized they need to be binded to real values

@ -102,17 +102,17 @@ namespace detail
GAPI_Assert(m_hint != nullptr);
using U = typename std::decay<T>::type;
return dynamic_cast<TypeHint<U>*>(m_hint.get()) != nullptr;
};
}
template <typename T>
void GArrayU::specifyType(){
m_hint.reset(new TypeHint<typename std::decay<T>::type>);
};
}
template <typename T>
void GArrayU::storeKind(){
setKind(cv::detail::GOpaqueTraits<T>::kind);
};
}
// This class represents a typed STL vector reference.
// Depending on origins, this reference may be either "just a" reference to

@ -50,6 +50,7 @@ namespace s11n {
*
* @brief G-API classes for constructed and compiled graphs.
*/
/**
* @brief GComputation class represents a captured computation
* graph. GComputation objects form boundaries for expression code

@ -430,7 +430,7 @@ namespace gapi {
virtual ~GFunctor() = default;
protected:
GFunctor(const char* id) : m_id(id) { };
GFunctor(const char* id) : m_id(id) { }
private:
const char* m_id;
};
@ -692,7 +692,7 @@ namespace gapi {
int unused[] = { 0, (pkg.include<KK>(), 0)... };
cv::util::suppress_unused_warning(unused);
return pkg;
};
}
template<typename... FF>
GKernelPackage kernels(FF&... functors)
@ -701,7 +701,7 @@ namespace gapi {
int unused[] = { 0, (pkg.include(functors), 0)... };
cv::util::suppress_unused_warning(unused);
return pkg;
};
}
/** @} */

@ -48,6 +48,7 @@ struct GOrigin;
* `cv::GOpaque<T>` | T
* cv::GFrame | cv::MediaFrame
*/
/**
* @brief GMat class represents image or tensor data in the
* graph.

@ -98,18 +98,18 @@ namespace detail
GAPI_Assert(m_hint != nullptr);
using U = util::decay_t<T>;
return dynamic_cast<TypeHint<U>*>(m_hint.get()) != nullptr;
};
}
template <typename T>
void GOpaqueU::specifyType(){
m_hint.reset(new TypeHint<util::decay_t<T>>);
};
}
template <typename T>
void GOpaqueU::storeKind(){
// FIXME: Add assert here on cv::Mat and cv::Scalar?
setKind(cv::detail::GOpaqueTraits<T>::kind);
};
}
// This class represents a typed object reference.
// Depending on origins, this reference may be either "just a" reference to

@ -409,7 +409,7 @@ namespace streaming {
struct GAPI_EXPORTS_W_SIMPLE queue_capacity
{
GAPI_WRAP
explicit queue_capacity(size_t cap = 1) : capacity(cap) { };
explicit queue_capacity(size_t cap = 1) : capacity(cap) { }
GAPI_PROP_RW
size_t capacity;
};

@ -91,7 +91,7 @@ public:
{ \
struct G_DESCR_HELPER_CLASS(Class) \
{ \
static constexpr const char *descr() { return Descr; }; \
static constexpr const char *descr() { return Descr; } \
}; \
}

@ -231,10 +231,10 @@ template<typename T> struct GObtainCtor {
static HostCtor get() { return HostCtor{}; }
};
template<typename T> struct GObtainCtor<GArray<T> > {
static HostCtor get() { return HostCtor{ConstructVec{&GArray<T>::VCtor}}; };
static HostCtor get() { return HostCtor{ConstructVec{&GArray<T>::VCtor}}; }
};
template<typename T> struct GObtainCtor<GOpaque<T> > {
static HostCtor get() { return HostCtor{ConstructOpaque{&GOpaque<T>::Ctor}}; };
static HostCtor get() { return HostCtor{ConstructOpaque{&GOpaque<T>::Ctor}}; }
};
} // namespace detail
} // namespace cv

@ -40,7 +40,7 @@ namespace detail
//workaround for MSVC 19.0 bug
template <typename T>
auto make_default()->decltype(T{}) {return {};}
}; // detail
} // detail
/**
* @brief This class is a typed wrapper over a regular GComputation.

@ -173,7 +173,7 @@ public:
, {}
, {}
, {} } {
};
}
/** @overload
Use this constructor to work with pre-compiled network.
@ -202,7 +202,7 @@ public:
, {}
, {}
, {} } {
};
}
/** @brief Specifies sequence of network input layers names for inference.
@ -547,7 +547,7 @@ public:
detail::ParamDesc::Kind::Load, true, {}, {}, {}, 1u,
{}, {}, {}, {}, InferMode::Async, {}, {}, {}, {} },
m_tag(tag) {
};
}
/** @overload
@ -565,7 +565,7 @@ public:
detail::ParamDesc::Kind::Import, true, {}, {}, {}, 1u,
{}, {}, {}, {}, InferMode::Async, {}, {}, {}, {} },
m_tag(tag) {
};
}
/** @see ie::Params::pluginConfig. */
Params& pluginConfig(const IEConfig& cfg) {

@ -293,7 +293,7 @@ public:
desc.num_out = std::tuple_size<typename Net::OutArgs>::value;
desc.is_generic = false;
desc.disable_mem_pattern = false;
};
}
/** @brief Specifies sequence of network input layers names for inference.

@ -33,6 +33,7 @@ namespace cv {
* @brief Extra G-API data structures used to pass input/output data
* to the graph for processing.
*/
/**
* @brief cv::MediaFrame class represents an image/media frame
* obtained from an external source.

@ -31,7 +31,7 @@ namespace cv
return (m.dims == 2)
? cv::gapi::own::Mat{m.rows, m.cols, m.type(), m.data, m.step}
: cv::gapi::own::Mat{to_own<int>(m.size), m.type(), m.data};
};
}
namespace gapi
{

@ -21,7 +21,7 @@ class GAPI_EXPORTS Scalar
{
public:
Scalar() = default;
explicit Scalar(double v0) { val[0] = v0; };
explicit Scalar(double v0) { val[0] = v0; }
Scalar(double v0, double v1, double v2 = 0, double v3 = 0)
: val{v0, v1, v2, v3}
{

@ -337,7 +337,7 @@ namespace detail {
template<typename V>
IOStream& put_v(IOStream&, const V&, std::size_t) {
GAPI_Error("variant>>: requested index is invalid");
};
}
template<typename V, typename X, typename... Xs>
IOStream& put_v(IOStream& os, const V& v, std::size_t x) {

@ -321,7 +321,7 @@ PyObject* pyopencv_from(const cv::detail::OpaqueRef& o)
PyErr_SetString(PyExc_TypeError, "Unsupported GOpaque type");
return NULL;
};
}
template <>
PyObject* pyopencv_from(const cv::detail::VectorRef& v)

@ -137,7 +137,7 @@ public:
using Storage = cv::detail::MakeVariantType<cv::GOpaque, GOPAQUE_TYPE_LIST_G(ID_, ID)>;
template<typename T>
GOpaqueT(cv::GOpaque<T> arg) : m_type(cv::detail::ArgTypeTraits<T>::type), m_arg(arg) { };
GOpaqueT(cv::GOpaque<T> arg) : m_type(cv::detail::ArgTypeTraits<T>::type), m_arg(arg) { }
GAPI_WRAP GOpaqueT(gapi::ArgType type) : m_type(type)
{
@ -175,7 +175,7 @@ public:
using Storage = cv::detail::MakeVariantType<cv::GArray, GARRAY_TYPE_LIST_G(ID_, ID)>;
template<typename T>
GArrayT(cv::GArray<T> arg) : m_type(cv::detail::ArgTypeTraits<T>::type), m_arg(arg) { };
GArrayT(cv::GArray<T> arg) : m_type(cv::detail::ArgTypeTraits<T>::type), m_arg(arg) { }
GAPI_WRAP GArrayT(gapi::ArgType type) : m_type(type)
{

@ -16,7 +16,7 @@ void create_rand_mats(const cv::Size &size, MatType type, cv::Mat &ref_mat, cv::
ref_mat.create(size, type);
cv::randu(ref_mat, cv::Scalar::all(0), cv::Scalar::all(255));
ref_mat.copyTo(gapi_mat);
};
}
} // namespace

@ -67,7 +67,7 @@ inline void mosaic(cv::Mat& mat, const cv::Rect &rect, int cellSz)
cell_roi = cv::mean(cell_roi);
}
}
};
}
inline void blendImage(const cv::Mat& img,
const cv::Mat& alpha,
@ -120,7 +120,7 @@ inline void poly(cv::Mat& mat,
{
std::vector<std::vector<cv::Point>> points{pp.points};
cv::fillPoly(mat, points, pp.color, pp.lt, pp.shift);
};
}
struct BGR2YUVConverter
{
@ -133,13 +133,13 @@ struct BGR2YUVConverter
return {y, u, v};
}
void cvtImg(const cv::Mat& in, cv::Mat& out) { cv::cvtColor(in, out, cv::COLOR_BGR2YUV); };
void cvtImg(const cv::Mat& in, cv::Mat& out) { cv::cvtColor(in, out, cv::COLOR_BGR2YUV); }
};
struct EmptyConverter
{
cv::Scalar cvtColor(const cv::Scalar& bgr) const { return bgr; };
void cvtImg(const cv::Mat& in, cv::Mat& out) const { out = in; };
cv::Scalar cvtColor(const cv::Scalar& bgr) const { return bgr; }
void cvtImg(const cv::Mat& in, cv::Mat& out) const { out = in; }
};
// FIXME util::visitor ?

@ -8,9 +8,8 @@
#include <map> // map
#include <ade/util/zip_range.hpp> // indexed
#define NOMINMAX
#ifdef _WIN32
#define NOMINMAX
#include <winsock.h> // htonl, ntohl
#else
#include <netinet/in.h> // htonl, ntohl

@ -195,7 +195,7 @@ class GAPI_EXPORTS ByteMemoryInStream final: public IIStream {
size_t m_idx = 0u;
void check(std::size_t n) { (void) n; GAPI_DbgAssert(m_idx+n-1 < m_storage.size()); }
uint32_t getU32() { uint32_t v{}; *this >> v; return v; };
uint32_t getU32() { uint32_t v{}; *this >> v; return v; }
//virtual IIStream& operator>> (uint32_t &) final;

@ -159,7 +159,7 @@ struct Copy: public cv::detail::KernelTag
return cv::gapi::streaming::IActor::Ptr(new Actor(args));
}
static cv::gapi::streaming::GStreamingKernel kernel() { return {&create}; };
static cv::gapi::streaming::GStreamingKernel kernel() { return {&create}; }
};
void Copy::Actor::run(cv::gimpl::GIslandExecutable::IInput &in,
@ -249,7 +249,7 @@ struct GOCVBGR: public cv::detail::KernelTag
{
return cv::gapi::streaming::IActor::Ptr(new Actor(args));
}
static cv::gapi::streaming::GStreamingKernel kernel() { return {&create}; };
static cv::gapi::streaming::GStreamingKernel kernel() { return {&create}; }
};
void GOCVBGR::Actor::extractRMat(const cv::MediaFrame& frame, cv::RMat& rmat)
@ -323,7 +323,7 @@ struct GOCVY: public cv::detail::KernelTag
{
return cv::gapi::streaming::IActor::Ptr(new Actor(args));
}
static cv::gapi::streaming::GStreamingKernel kernel() { return {&create}; };
static cv::gapi::streaming::GStreamingKernel kernel() { return {&create}; }
};
void GOCVY::Actor::extractRMat(const cv::MediaFrame& frame, cv::RMat& rmat)
@ -389,7 +389,7 @@ struct GOCVUV: public cv::detail::KernelTag
{
return cv::gapi::streaming::IActor::Ptr(new Actor(args));
}
static cv::gapi::streaming::GStreamingKernel kernel() { return {&create}; };
static cv::gapi::streaming::GStreamingKernel kernel() { return {&create}; }
};
void GOCVUV::Actor::extractRMat(const cv::MediaFrame& frame, cv::RMat& rmat)

@ -27,7 +27,7 @@ namespace cv {
namespace gimpl
{
struct GRuntimeArgs;
};
}
// FIXME: GAPI_EXPORTS is here only due to tests and Windows linker issues
class GAPI_EXPORTS GCompiled::Priv

@ -16,7 +16,7 @@ namespace cv {
namespace gimpl
{
struct GRuntimeArgs;
};
}
// FIXME: GAPI_EXPORTS is here only due to tests and Windows linker issues
// FIXME: It seems it clearly duplicates the GStreamingCompiled and

@ -73,7 +73,7 @@ bool compareDataNodes(const ade::NodeHandle& first, const std::vector<std::size_
// check that first and second nodes have the same type of DATA::Storage.
return true;
};
}
// Returns true if two OP nodes semantically and structurally identical:
// - both nodes have the same kernel name
@ -130,7 +130,7 @@ bool compareOpNodes(const VisitedMatchings& matchedVisitedNodes,
}
return true;
};
}
// Retrieves and return sample from the cartesian product of candidates sets
VisitedMatchings sampleFromProduct(std::size_t sampleIdx, // index of the sample in the product
@ -168,7 +168,7 @@ std::size_t labelOf (const ade::NodeHandle& node, // reader node
else {
return graph.metadata(edge).get<cv::gimpl::Output>().port;
}
};
}
inline bool IS_STARTPOINT(const ade::NodeHandle& nh){
return nh->inEdges().empty();

@ -1699,7 +1699,7 @@ namespace {
return cv::MediaFrame::View(std::move(pp), std::move(ss));
}
};
};
}
namespace {
class TestMediaGray final : public cv::MediaFrame::IAdapter {
@ -1718,7 +1718,7 @@ namespace {
return cv::MediaFrame::View(std::move(pp), std::move(ss));
}
};
};
}
TEST_P(SizeMFTest, ParseTest)
{

@ -92,6 +92,6 @@ void blendImageRef(cv::Mat& mat, const cv::Point& org, const cv::Mat& img, const
roi32f += img32f;
roi32f.convertTo(roi, CV_8U, 255.0);
};
}
} // namespace opencv_test

@ -115,7 +115,7 @@ struct Fixture : public RenderNV12TestBase API { \
__WRAP_VAARGS(DEFINE_SPECIFIC_PARAMS_##Number(__VA_ARGS__)) \
Fixture() { \
Init(sz_); \
}; \
} \
};
#define GAPI_RENDER_TEST_FIXTURE_BGR(Fixture, API, Number, ...) \
@ -123,7 +123,7 @@ struct Fixture : public RenderBGRTestBase API { \
__WRAP_VAARGS(DEFINE_SPECIFIC_PARAMS_##Number(__VA_ARGS__)) \
Fixture() { \
Init(sz_); \
}; \
} \
};
#define GET_VA_ARGS(...) __VA_ARGS__

@ -165,7 +165,7 @@ namespace
out = true;
}
};
};
}
TEST(StatefulKernel, StateInitOnceInRegularMode)
{
@ -190,7 +190,7 @@ TEST(StatefulKernel, StateInitOnceInRegularMode)
EXPECT_TRUE(params.pSetupsCount != nullptr);
EXPECT_EQ(1, *params.pSetupsCount);
}
};
}
struct StateInitOnce : public ::testing::TestWithParam<bool>{};
TEST_P(StateInitOnce, StreamingCompiledWithMeta)

@ -207,7 +207,7 @@ struct CallBack: crtp_cast<crtp_final_t> {
mtx.unlock();
cv.notify_one();
};
};
}
template<typename... Args >
void start_async(Args&&... args){

@ -28,12 +28,12 @@ namespace
void WriteFunction(uint8_t* row, int nr, int w) {
for (int i = 0; i < w; i++)
row[i] = static_cast<uint8_t>(nr+i);
};
}
void ReadFunction1x1(const uint8_t* row, int w) {
for (int i = 0; i < w; i++)
std::cout << std::setw(4) << static_cast<int>(row[i]) << " ";
std::cout << "\n";
};
}
void ReadFunction3x3(const uint8_t* rows[3], int w) {
for (int i = 0; i < 3; i++) {
for (int j = -1; j < w+1; j++) {
@ -42,7 +42,7 @@ namespace
std::cout << "\n";
}
std::cout << "\n";
};
}
}
TEST(FluidBuffer, InputTest)

@ -215,7 +215,7 @@ TEST(KernelPackage, RemoveBackend)
EXPECT_FALSE(pkg.includes<J::Foo>());
EXPECT_FALSE(pkg.includes<J::Bar>());
EXPECT_TRUE(pkg.includes<S::Baz>());
};
}
TEST(KernelPackage, RemoveAPI)
{
@ -228,7 +228,7 @@ TEST(KernelPackage, RemoveAPI)
pkg.remove<I::Foo>();
EXPECT_TRUE(pkg.includes<J::Bar>());
EXPECT_FALSE(pkg.includes<J::Foo>());
};
}
TEST(KernelPackage, CreateHetero)
{

@ -55,7 +55,7 @@ public:
GMockExecutable(bool can_reshape = true)
: m_priv(new Priv{can_reshape, 0, 0})
{
};
}
void setReshape(bool can_reshape) { m_priv->m_can_reshape = can_reshape; }
@ -92,7 +92,7 @@ class GMockBackendImpl final: public cv::gapi::GBackend::Priv
}
public:
GMockBackendImpl(const GMockExecutable& exec) : m_exec(exec) { };
GMockBackendImpl(const GMockExecutable& exec) : m_exec(exec) { }
int getCompileCounter() const { return m_compile_counter; }
};
@ -124,8 +124,8 @@ GMockFunctor mock_kernel(const cv::gapi::GBackend& backend, Callable c)
};
}
void dummyFooImpl(const cv::Mat&, cv::Mat&) { };
void dummyBarImpl(const cv::Mat&, const cv::Mat&, cv::Mat&) { };
void dummyFooImpl(const cv::Mat&, cv::Mat&) { }
void dummyBarImpl(const cv::Mat&, const cv::Mat&, cv::Mat&) { }
struct GExecutorReshapeTest: public ::testing::Test
{
@ -155,7 +155,7 @@ struct GExecutorReshapeTest: public ::testing::Test
std::shared_ptr<GMockBackendImpl> backend_impl2;
cv::gapi::GBackend backend2;
cv::GKernelPackage pkg;
cv::Mat in_mat1, in_mat2, out_mat;;
cv::Mat in_mat1, in_mat2, out_mat;
};
} // anonymous namespace

@ -627,7 +627,7 @@ namespace
void assignIsland(const std::string &s)
{
cv::gapi::island(s, cv::GIn(tmp[0]), cv::GOut(tmp[2]));
};
}
};
TEST_P(CheckName, Test)
{

@ -300,9 +300,7 @@ You can call cv::destroyWindow or cv::destroyAllWindows to close the window and
memory usage. For a simple program, you do not really have to call these functions because all the
resources and windows of the application are closed automatically by the operating system upon exit.
@note
Qt backend supports additional flags:
@note Qt backend supports additional flags:
- **WINDOW_NORMAL or WINDOW_AUTOSIZE:** WINDOW_NORMAL enables you to resize the
window, whereas WINDOW_AUTOSIZE adjusts automatically the window size to fit the
displayed image (see imshow ), and you cannot change the window size manually.
@ -335,9 +333,7 @@ CV_EXPORTS_W int startWindowThread();
/** @brief Similar to #waitKey, but returns full key code.
@note
Key code is implementation specific and depends on used backend: QT/GTK/Win32/etc
@note Key code is implementation specific and depends on used backend: QT/GTK/Win32/etc
*/
CV_EXPORTS_W int waitKeyEx(int delay = 0);
@ -404,11 +400,7 @@ For example, **waitKey(0)** will display the window infinitely until any keypres
for image display). **waitKey(25)** will display a frame and wait approximately 25 ms for a key
press (suitable for displaying a video frame-by-frame). To remove the window, use cv::destroyWindow.
@note
[__Windows Backend Only__] Pressing Ctrl+C will copy the image to the clipboard.
[__Windows Backend Only__] Pressing Ctrl+S will show a dialog to save the image.
@note [__Windows Backend Only__] Pressing Ctrl+C will copy the image to the clipboard. Pressing Ctrl+S will show a dialog to save the image.
@param winname Name of the window.
@param mat Image to be shown.
@ -417,10 +409,8 @@ CV_EXPORTS_W void imshow(const String& winname, InputArray mat);
/** @brief Resizes the window to the specified size
@note
- The specified window size is for the image area. Toolbars are not counted.
- Only windows created without cv::WINDOW_AUTOSIZE flag can be resized.
@note The specified window size is for the image area. Toolbars are not counted.
Only windows created without cv::WINDOW_AUTOSIZE flag can be resized.
@param winname Window name.
@param width The new window width.
@ -502,9 +492,7 @@ For cv::EVENT_MOUSEWHEEL positive and negative values mean forward and backward
respectively. For cv::EVENT_MOUSEHWHEEL, where available, positive and negative values mean right and
left scrolling, respectively.
@note
Mouse-wheel events are currently supported only on Windows and Cocoa
@note Mouse-wheel events are currently supported only on Windows and Cocoa.
@param flags The mouse callback flags parameter.
*/
@ -559,9 +547,7 @@ and range, assigns a variable value to be a position synchronized with the track
the callback function onChange to be called on the trackbar position change. The created trackbar is
displayed in the specified window winname.
@note
[__Qt Backend Only__] winname can be empty if the trackbar should be attached to the
@note [__Qt Backend Only__] winname can be empty if the trackbar should be attached to the
control panel.
Clicking the label of each trackbar enables editing the trackbar values manually.
@ -587,9 +573,7 @@ CV_EXPORTS int createTrackbar(const String& trackbarname, const String& winname,
The function returns the current position of the specified trackbar.
@note
[__Qt Backend Only__] winname can be empty if the trackbar is attached to the control
@note [__Qt Backend Only__] winname can be empty if the trackbar is attached to the control
panel.
@param trackbarname Name of the trackbar.
@ -601,9 +585,7 @@ CV_EXPORTS_W int getTrackbarPos(const String& trackbarname, const String& winnam
The function sets the position of the specified trackbar in the specified window.
@note
[__Qt Backend Only__] winname can be empty if the trackbar is attached to the control
@note [__Qt Backend Only__] winname can be empty if the trackbar is attached to the control
panel.
@param trackbarname Name of the trackbar.
@ -616,9 +598,7 @@ CV_EXPORTS_W void setTrackbarPos(const String& trackbarname, const String& winna
The function sets the maximum position of the specified trackbar in the specified window.
@note
[__Qt Backend Only__] winname can be empty if the trackbar is attached to the control
@note [__Qt Backend Only__] winname can be empty if the trackbar is attached to the control
panel.
@param trackbarname Name of the trackbar.
@ -631,9 +611,7 @@ CV_EXPORTS_W void setTrackbarMax(const String& trackbarname, const String& winna
The function sets the minimum position of the specified trackbar in the specified window.
@note
[__Qt Backend Only__] winname can be empty if the trackbar is attached to the control
@note [__Qt Backend Only__] winname can be empty if the trackbar is attached to the control
panel.
@param trackbarname Name of the trackbar.

@ -61,7 +61,7 @@ std::vector<BackendInfo>& getBuiltinBackendsInfo()
#endif
};
return g_backends;
};
}
static
bool sortByPriority(const BackendInfo &lhs, const BackendInfo &rhs)

@ -118,7 +118,7 @@ class ROISelector
bool drawFromCenter;
// initializer list
handlerT() : isDrawing(false), drawFromCenter(true){};
handlerT() : isDrawing(false), drawFromCenter(true){}
} selectorParams;
private:

@ -319,8 +319,8 @@ See cv::imread for the list of supported formats and flags description.
CV_EXPORTS_W Mat imdecode( InputArray buf, int flags );
/** @overload
@param buf
@param flags
@param buf Input array or vector of bytes.
@param flags The same flags as in cv::imread, see cv::ImreadModes.
@param dst The optional output placeholder for the decoded matrix. It can save the image
reallocations when the function is called repeatedly for images of the same size.
*/

@ -133,7 +133,7 @@ bool ExifReader::parseExif(unsigned char* data, const size_t size)
* @brief Filling m_exif member with exif directory elements
* This is internal function and is not exposed to client
*
* @return The function doesn't return any value. In case of unsuccessful parsing
* The function doesn't return any value. In case of unsuccessful parsing
* the m_exif member is not filled up
*/
void ExifReader::parseExif()

@ -72,11 +72,6 @@ static void extend_cvtColor( InputArray _src, OutputArray _dst, int code );
CV_Error(Error::StsError, "OpenCV TIFF: failed " #call); \
}
#define CV_TIFF_CHECK_CALL_INFO(call) \
if (0 == (call)) { \
CV_LOG_INFO(NULL, "OpenCV TIFF(line " << __LINE__ << "): failed optional call: " #call ", ignoring"); \
}
#define CV_TIFF_CHECK_CALL_DEBUG(call) \
if (0 == (call)) { \
CV_LOG_DEBUG(NULL, "OpenCV TIFF(line " << __LINE__ << "): failed optional call: " #call ", ignoring"); \

@ -3699,10 +3699,10 @@ stored in two planes.
This function only supports YUV420 to RGB conversion as of now.
@param src1: 8-bit image (#CV_8U) of the Y plane.
@param src2: image containing interleaved U/V plane.
@param dst: output image.
@param code: Specifies the type of conversion. It can take any of the following values:
@param src1 8-bit image (#CV_8U) of the Y plane.
@param src2 image containing interleaved U/V plane.
@param dst output image.
@param code Specifies the type of conversion. It can take any of the following values:
- #COLOR_YUV2BGR_NV12
- #COLOR_YUV2RGB_NV12
- #COLOR_YUV2BGRA_NV12

@ -13,7 +13,7 @@ enum PerfSqMatDepth{
DEPTH_32F_64F,
DEPTH_64F_64F};
CV_ENUM(IntegralOutputDepths, DEPTH_32S_32S, DEPTH_32S_32F, DEPTH_32S_64F, DEPTH_32F_32F, DEPTH_32F_64F, DEPTH_64F_64F);
CV_ENUM(IntegralOutputDepths, DEPTH_32S_32S, DEPTH_32S_32F, DEPTH_32S_64F, DEPTH_32F_32F, DEPTH_32F_64F, DEPTH_64F_64F)
static int extraOutputDepths[6][2] = {{CV_32S, CV_32S}, {CV_32S, CV_32F}, {CV_32S, CV_64F}, {CV_32F, CV_32F}, {CV_32F, CV_64F}, {CV_64F, CV_64F}};

@ -17,4 +17,4 @@ DEF_ACC_FLT_FUNCS(32f, float, float)
DEF_ACC_FLT_FUNCS(32f64f, float, double)
DEF_ACC_FLT_FUNCS(64f, double, double)
} //cv::hal
} //cv::hal

@ -170,9 +170,6 @@ typedef struct _CvContourScanner
}
_CvContourScanner;
#define _CV_FIND_CONTOURS_FLAGS_EXTERNAL_ONLY 1
#define _CV_FIND_CONTOURS_FLAGS_HIERARCHIC 2
/*
Initializes scanner structure.
Prepare image for scanning ( clear borders and convert all pixels to 0-1.

@ -2584,9 +2584,6 @@ void cv::drawContours( InputOutputArray _image, InputArrayOfArrays _contours,
static const int CodeDeltas[8][2] =
{ {1, 0}, {1, -1}, {0, -1}, {-1, -1}, {-1, 0}, {-1, 1}, {0, 1}, {1, 1} };
#define CV_ADJUST_EDGE_COUNT( count, seq ) \
((count) -= ((count) == (seq)->total && !CV_IS_SEQ_CLOSED(seq)))
CV_IMPL void
cvDrawContours( void* _img, CvSeq* contour,
CvScalar _externalColor, CvScalar _holeColor,

@ -163,8 +163,6 @@ void FilterEngine::init( const Ptr<BaseFilter>& _filter2D,
wholeSize = Size(-1,-1);
}
#define VEC_ALIGN CV_MALLOC_ALIGN
int FilterEngine::start(const Size& _wholeSize, const Size& sz, const Point& ofs)
{
CV_INSTRUMENT_REGION();

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save