diff --git a/3rdparty/carotene/hal/tegra_hal.hpp b/3rdparty/carotene/hal/tegra_hal.hpp index 7fcca975fe..c2ae0c0d87 100644 --- a/3rdparty/carotene/hal/tegra_hal.hpp +++ b/3rdparty/carotene/hal/tegra_hal.hpp @@ -1844,14 +1844,18 @@ TegraCvtColor_Invoker(bgrx2hsvf, bgrx2hsv, src_data + static_cast(range. #define cv_hal_cvtBGRtoGray TEGRA_CVTBGRTOGRAY #undef cv_hal_cvtGraytoBGR #define cv_hal_cvtGraytoBGR TEGRA_CVTGRAYTOBGR +#if 0 // bit-exact tests are failed #undef cv_hal_cvtBGRtoYUV #define cv_hal_cvtBGRtoYUV TEGRA_CVTBGRTOYUV +#endif #undef cv_hal_cvtBGRtoHSV #define cv_hal_cvtBGRtoHSV TEGRA_CVTBGRTOHSV +#if 0 // bit-exact tests are failed #undef cv_hal_cvtTwoPlaneYUVtoBGR #define cv_hal_cvtTwoPlaneYUVtoBGR TEGRA_CVT2PYUVTOBGR #undef cv_hal_cvtTwoPlaneYUVtoBGREx #define cv_hal_cvtTwoPlaneYUVtoBGREx TEGRA_CVT2PYUVTOBGR_EX +#endif #endif // OPENCV_IMGPROC_HAL_INTERFACE_H diff --git a/cmake/OpenCVDetectPython.cmake b/cmake/OpenCVDetectPython.cmake index 4ff02a77d3..6e7bb18c1b 100644 --- a/cmake/OpenCVDetectPython.cmake +++ b/cmake/OpenCVDetectPython.cmake @@ -177,7 +177,7 @@ if(NOT ${found}) if(NOT ANDROID AND NOT IOS) if(CMAKE_HOST_UNIX) - execute_process(COMMAND ${_executable} -c "from distutils.sysconfig import *; print(get_python_lib())" + execute_process(COMMAND ${_executable} -c "from sysconfig import *; print(get_path('purelib'))" RESULT_VARIABLE _cvpy_process OUTPUT_VARIABLE _std_packages_path OUTPUT_STRIP_TRAILING_WHITESPACE) diff --git a/cmake/OpenCVFindLibsGUI.cmake b/cmake/OpenCVFindLibsGUI.cmake index 7224bddf90..79758fa813 100644 --- a/cmake/OpenCVFindLibsGUI.cmake +++ b/cmake/OpenCVFindLibsGUI.cmake @@ -66,7 +66,6 @@ if(WITH_OPENGL) find_package (OpenGL QUIET) if(OPENGL_FOUND) set(HAVE_OPENGL TRUE) - list(APPEND OPENCV_LINKER_LIBS ${OPENGL_LIBRARIES}) if(QT_QTOPENGL_FOUND) set(HAVE_QT_OPENGL TRUE) else() diff --git a/doc/js_tutorials/js_video/js_lucas_kanade/js_lucas_kanade.markdown b/doc/js_tutorials/js_video/js_lucas_kanade/js_lucas_kanade.markdown index a86bf11223..f4e4f231b0 100644 --- a/doc/js_tutorials/js_video/js_lucas_kanade/js_lucas_kanade.markdown +++ b/doc/js_tutorials/js_video/js_lucas_kanade/js_lucas_kanade.markdown @@ -133,9 +133,9 @@ Dense Optical Flow in OpenCV.js Lucas-Kanade method computes optical flow for a sparse feature set (in our example, corners detected using Shi-Tomasi algorithm). OpenCV.js provides another algorithm to find the dense optical flow. It -computes the optical flow for all the points in the frame. It is based on Gunner Farneback's +computes the optical flow for all the points in the frame. It is based on Gunnar Farneback's algorithm which is explained in "Two-Frame Motion Estimation Based on Polynomial Expansion" by -Gunner Farneback in 2003. +Gunnar Farneback in 2003. We use the function: **cv.calcOpticalFlowFarneback (prev, next, flow, pyrScale, levels, winsize, iterations, polyN, polySigma, flags)** diff --git a/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown b/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown index 8602cc9398..f8836b095b 100644 --- a/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown +++ b/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown @@ -78,7 +78,7 @@ if len(good)>MIN_MATCH_COUNT: M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,5.0) matchesMask = mask.ravel().tolist() - h,w,d = img1.shape + h,w = img1.shape pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) dst = cv.perspectiveTransform(pts,M) diff --git a/doc/tutorials/others/optical_flow.markdown b/doc/tutorials/others/optical_flow.markdown index 07456d7ea9..c8a23743f7 100644 --- a/doc/tutorials/others/optical_flow.markdown +++ b/doc/tutorials/others/optical_flow.markdown @@ -139,9 +139,9 @@ Dense Optical Flow in OpenCV Lucas-Kanade method computes optical flow for a sparse feature set (in our example, corners detected using Shi-Tomasi algorithm). OpenCV provides another algorithm to find the dense optical flow. It -computes the optical flow for all the points in the frame. It is based on Gunner Farneback's +computes the optical flow for all the points in the frame. It is based on Gunnar Farneback's algorithm which is explained in "Two-Frame Motion Estimation Based on Polynomial Expansion" by -Gunner Farneback in 2003. +Gunnar Farneback in 2003. Below sample shows how to find the dense optical flow using above algorithm. We get a 2-channel array with optical flow vectors, \f$(u,v)\f$. We find their magnitude and direction. We color code the diff --git a/modules/core/CMakeLists.txt b/modules/core/CMakeLists.txt index 13d0af4db8..51a0abbfe8 100644 --- a/modules/core/CMakeLists.txt +++ b/modules/core/CMakeLists.txt @@ -144,6 +144,7 @@ ocv_create_module(${extra_libs}) ocv_target_link_libraries(${the_module} PRIVATE "${ZLIB_LIBRARIES}" "${OPENCL_LIBRARIES}" "${VA_LIBRARIES}" + "${OPENGL_LIBRARIES}" "${LAPACK_LIBRARIES}" "${CPUFEATURES_LIBRARIES}" "${HALIDE_LIBRARIES}" "${ITT_LIBRARIES}" "${OPENCV_HAL_LINKER_LIBS}" diff --git a/modules/core/include/opencv2/core/types.hpp b/modules/core/include/opencv2/core/types.hpp index 7dfadb2522..2867520361 100644 --- a/modules/core/include/opencv2/core/types.hpp +++ b/modules/core/include/opencv2/core/types.hpp @@ -1895,13 +1895,33 @@ Rect_<_Tp>& operator -= ( Rect_<_Tp>& a, const Size_<_Tp>& b ) template static inline Rect_<_Tp>& operator &= ( Rect_<_Tp>& a, const Rect_<_Tp>& b ) { - _Tp x1 = std::max(a.x, b.x); - _Tp y1 = std::max(a.y, b.y); - a.width = std::min(a.x + a.width, b.x + b.width) - x1; - a.height = std::min(a.y + a.height, b.y + b.height) - y1; - a.x = x1; - a.y = y1; - if( a.width <= 0 || a.height <= 0 ) + if (a.empty() || b.empty()) { + a = Rect(); + return a; + } + const Rect_<_Tp>& Rx_min = (a.x < b.x) ? a : b; + const Rect_<_Tp>& Rx_max = (a.x < b.x) ? b : a; + const Rect_<_Tp>& Ry_min = (a.y < b.y) ? a : b; + const Rect_<_Tp>& Ry_max = (a.y < b.y) ? b : a; + // Looking at the formula below, we will compute Rx_min.width - (Rx_max.x - Rx_min.x) + // but we want to avoid overflows. Rx_min.width >= 0 and (Rx_max.x - Rx_min.x) >= 0 + // by definition so the difference does not overflow. The only thing that can overflow + // is (Rx_max.x - Rx_min.x). And it can only overflow if Rx_min.x < 0. + // Let us first deal with the following case. + if ((Rx_min.x < 0 && Rx_min.x + Rx_min.width < Rx_max.x) || + (Ry_min.y < 0 && Ry_min.y + Ry_min.height < Ry_max.y)) { + a = Rect(); + return a; + } + // We now know that either Rx_min.x >= 0, or + // Rx_min.x < 0 && Rx_min.x + Rx_min.width >= Rx_max.x and therefore + // Rx_min.width >= (Rx_max.x - Rx_min.x) which means (Rx_max.x - Rx_min.x) + // is inferior to a valid int and therefore does not overflow. + a.width = std::min(Rx_min.width - (Rx_max.x - Rx_min.x), Rx_max.width); + a.height = std::min(Ry_min.height - (Ry_max.y - Ry_min.y), Ry_max.height); + a.x = Rx_max.x; + a.y = Ry_max.y; + if (a.empty()) a = Rect(); return a; } diff --git a/modules/core/test/test_misc.cpp b/modules/core/test/test_misc.cpp index 55615b0d5f..d9df475fa6 100644 --- a/modules/core/test/test_misc.cpp +++ b/modules/core/test/test_misc.cpp @@ -821,4 +821,36 @@ TEST(Core_Types, trivially_copyable_extra) } #endif +template class Rect_Test : public testing::Test {}; + +TYPED_TEST_CASE_P(Rect_Test); + +// Reimplement C++11 std::numeric_limits<>::lowest. +template T cv_numeric_limits_lowest(); +template<> int cv_numeric_limits_lowest() { return INT_MIN; } +template<> float cv_numeric_limits_lowest() { return -FLT_MAX; } +template<> double cv_numeric_limits_lowest() { return -DBL_MAX; } + +TYPED_TEST_P(Rect_Test, Overflows) { + typedef Rect_ R; + TypeParam num_max = std::numeric_limits::max(); + TypeParam num_lowest = cv_numeric_limits_lowest(); + EXPECT_EQ(R(0, 0, 10, 10), R(0, 0, 10, 10) & R(0, 0, 10, 10)); + EXPECT_EQ(R(5, 6, 4, 3), R(0, 0, 10, 10) & R(5, 6, 4, 3)); + EXPECT_EQ(R(5, 6, 3, 2), R(0, 0, 8, 8) & R(5, 6, 4, 3)); + // Test with overflowing dimenions. + EXPECT_EQ(R(5, 0, 5, 10), R(0, 0, 10, 10) & R(5, 0, num_max, num_max)); + // Test with overflowing dimensions for floats/doubles. + EXPECT_EQ(R(num_max, 0, num_max / 4, 10), R(num_max, 0, num_max / 2, 10) & R(num_max, 0, num_max / 4, 10)); + // Test with overflowing coordinates. + EXPECT_EQ(R(), R(20, 0, 10, 10) & R(num_lowest, 0, 10, 10)); + EXPECT_EQ(R(), R(20, 0, 10, 10) & R(0, num_lowest, 10, 10)); + EXPECT_EQ(R(), R(num_lowest, 0, 10, 10) & R(0, num_lowest, 10, 10)); +} +REGISTER_TYPED_TEST_CASE_P(Rect_Test, Overflows); + +typedef ::testing::Types RectTypes; +INSTANTIATE_TYPED_TEST_CASE_P(Negative_Test, Rect_Test, RectTypes); + + }} // namespace diff --git a/modules/imgproc/src/opencl/resize.cl b/modules/imgproc/src/opencl/resize.cl index 67603e4f17..a28c59296e 100644 --- a/modules/imgproc/src/opencl/resize.cl +++ b/modules/imgproc/src/opencl/resize.cl @@ -51,8 +51,6 @@ #endif #endif -#define INTER_RESIZE_COEF_SCALE (1 << INTER_RESIZE_COEF_BITS) -#define CAST_BITS (INTER_RESIZE_COEF_BITS << 1) #define INC(x,l) min(x+1,l-1) #define noconvert @@ -188,7 +186,9 @@ __kernel void resizeLN(__global const uchar * srcptr, int src_step, int src_offs int y_ = INC(y, src_rows); int x_ = INC(x, src_cols); -#if depth <= 4 +#if depth <= 1 // 8U/8S only, 16U+ cause integer overflows +#define INTER_RESIZE_COEF_SCALE (1 << INTER_RESIZE_COEF_BITS) +#define CAST_BITS (INTER_RESIZE_COEF_BITS << 1) u = u * INTER_RESIZE_COEF_SCALE; v = v * INTER_RESIZE_COEF_SCALE; @@ -214,7 +214,7 @@ __kernel void resizeLN(__global const uchar * srcptr, int src_step, int src_offs WT data2 = convertToWT(loadpix(srcptr + mad24(y_, src_step, mad24(x, TSIZE, src_offset)))); WT data3 = convertToWT(loadpix(srcptr + mad24(y_, src_step, mad24(x_, TSIZE, src_offset)))); - T uval = u1 * v1 * data0 + u * v1 * data1 + u1 * v *data2 + u * v *data3; + T uval = convertToDT((u1 * v1) * data0 + (u * v1) * data1 + (u1 * v) * data2 + (u * v) * data3); #endif storepix(uval, dstptr + mad24(dy, dst_step, mad24(dx, TSIZE, dst_offset))); } diff --git a/modules/imgproc/src/resize.cpp b/modules/imgproc/src/resize.cpp index 4f1a4576ce..90a05085e3 100644 --- a/modules/imgproc/src/resize.cpp +++ b/modules/imgproc/src/resize.cpp @@ -3376,7 +3376,8 @@ static bool ocl_resize( InputArray _src, OutputArray _dst, Size dsize, } else { - int wdepth = std::max(depth, CV_32S), wtype = CV_MAKETYPE(wdepth, cn); + int wdepth = depth <= CV_8S ? CV_32S : std::max(depth, CV_32F); + int wtype = CV_MAKETYPE(wdepth, cn); k.create("resizeLN", ocl::imgproc::resize_oclsrc, format("-D INTER_LINEAR -D depth=%d -D T=%s -D T1=%s " "-D WT=%s -D convertToWT=%s -D convertToDT=%s -D cn=%d " diff --git a/modules/imgproc/test/ocl/test_warp.cpp b/modules/imgproc/test/ocl/test_warp.cpp index 15e024a140..b43c9b6732 100644 --- a/modules/imgproc/test/ocl/test_warp.cpp +++ b/modules/imgproc/test/ocl/test_warp.cpp @@ -327,6 +327,20 @@ OCL_TEST_P(Resize, Mat) } } +OCL_TEST(Resize, overflow_21198) +{ + Mat src(Size(600, 600), CV_16UC3, Scalar::all(32768)); + UMat src_u; + src.copyTo(src_u); + + Mat dst; + cv::resize(src, dst, Size(1024, 1024), 0, 0, INTER_LINEAR); + UMat dst_u; + cv::resize(src_u, dst_u, Size(1024, 1024), 0, 0, INTER_LINEAR); + EXPECT_LE(cv::norm(dst_u, dst, NORM_INF), 1.0f); +} + + ///////////////////////////////////////////////////////////////////////////////////////////////// // remap diff --git a/modules/imgproc/test/test_color.cpp b/modules/imgproc/test/test_color.cpp index 431aa76a28..3d4a588ca2 100644 --- a/modules/imgproc/test/test_color.cpp +++ b/modules/imgproc/test/test_color.cpp @@ -2812,6 +2812,83 @@ TEST(Imgproc_ColorLuv_Full, bitExactness) } } + +static +void runCvtColorBitExactCheck(ColorConversionCodes code, int inputType, uint32_t hash, Size sz = Size(263, 255), int rngSeed = 0) +{ + RNG rng(rngSeed); + + Mat src(sz, inputType, Scalar::all(0)); + Mat dst; + rng.fill(src, RNG::UNIFORM, 0, 255, true); + + cv::cvtColor(src, dst, code); + + uint32_t dst_hash = adler32(dst); + + EXPECT_EQ(hash, dst_hash) << cv::format("0x%08llx", (long long int)dst_hash); + + if (cvtest::debugLevel > 0) + { + const ::testing::TestInfo* const test_info = ::testing::UnitTest::GetInstance()->current_test_info(); + CV_Assert(test_info); + std::string name = (std::string(test_info->test_case_name()) + "--" + test_info->name() + ".xml"); + cv::FileStorage fs(name, cv::FileStorage::WRITE); + fs << "dst" << dst; + } +} + +TEST(Imgproc_cvtColor_BE, COLOR_BGR2YUV) { runCvtColorBitExactCheck(COLOR_BGR2YUV, CV_8UC3, 0xc2cbcfda); } +TEST(Imgproc_cvtColor_BE, COLOR_RGB2YUV) { runCvtColorBitExactCheck(COLOR_RGB2YUV, CV_8UC3, 0x4e98e757); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2BGR) { runCvtColorBitExactCheck(COLOR_YUV2BGR, CV_8UC3, 0xb2c62a3f); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2RGB) { runCvtColorBitExactCheck(COLOR_YUV2RGB, CV_8UC3, 0x6d242a3f); } + +// packed input +TEST(Imgproc_cvtColor_BE, COLOR_YUV2RGB_NV12) { runCvtColorBitExactCheck(COLOR_YUV2RGB_NV12, CV_8UC1, 0x46a1bb76, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2BGR_NV12) { runCvtColorBitExactCheck(COLOR_YUV2BGR_NV12, CV_8UC1, 0x3843bb76, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2RGB_NV21) { runCvtColorBitExactCheck(COLOR_YUV2RGB_NV21, CV_8UC1, 0xf3fdf2ea, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2BGR_NV21) { runCvtColorBitExactCheck(COLOR_YUV2BGR_NV21, CV_8UC1, 0x6e84f2ea, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2RGBA_NV12) { runCvtColorBitExactCheck(COLOR_YUV2RGBA_NV12, CV_8UC1, 0xb6a16bd3, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2BGRA_NV12) { runCvtColorBitExactCheck(COLOR_YUV2BGRA_NV12, CV_8UC1, 0xa8436bd3, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2RGBA_NV21) { runCvtColorBitExactCheck(COLOR_YUV2RGBA_NV21, CV_8UC1, 0x1c7fa347, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2BGRA_NV21) { runCvtColorBitExactCheck(COLOR_YUV2BGRA_NV21, CV_8UC1, 0x96f7a347, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2RGB_YV12) { runCvtColorBitExactCheck(COLOR_YUV2RGB_YV12, CV_8UC1, 0xc5da1651, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2BGR_YV12) { runCvtColorBitExactCheck(COLOR_YUV2BGR_YV12, CV_8UC1, 0x12161651, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2RGB_IYUV) { runCvtColorBitExactCheck(COLOR_YUV2RGB_IYUV, CV_8UC1, 0xb4e62ea5, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2BGR_IYUV) { runCvtColorBitExactCheck(COLOR_YUV2BGR_IYUV, CV_8UC1, 0xfa632ea5, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2RGBA_YV12) { runCvtColorBitExactCheck(COLOR_YUV2RGBA_YV12, CV_8UC1, 0x0db4c69f, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2BGRA_YV12) { runCvtColorBitExactCheck(COLOR_YUV2BGRA_YV12, CV_8UC1, 0x59e1c69f, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2RGBA_IYUV) { runCvtColorBitExactCheck(COLOR_YUV2RGBA_IYUV, CV_8UC1, 0xfe09def3, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2BGRA_IYUV) { runCvtColorBitExactCheck(COLOR_YUV2BGRA_IYUV, CV_8UC1, 0x4395def3, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2GRAY_420) { runCvtColorBitExactCheck(COLOR_YUV2GRAY_420, CV_8UC1, 0xf672b440, Size(262, 510)); } + +TEST(Imgproc_cvtColor_BE, COLOR_YUV2RGB_UYVY) { runCvtColorBitExactCheck(COLOR_YUV2RGB_UYVY, CV_8UC2, 0x69bea2c1, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2BGR_UYVY) { runCvtColorBitExactCheck(COLOR_YUV2BGR_UYVY, CV_8UC2, 0xdc51a2c1, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2RGBA_UYVY) { runCvtColorBitExactCheck(COLOR_YUV2RGBA_UYVY, CV_8UC2, 0x851eab45, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2BGRA_UYVY) { runCvtColorBitExactCheck(COLOR_YUV2BGRA_UYVY, CV_8UC2, 0xf7b1ab45, Size(262, 510)); } + +TEST(Imgproc_cvtColor_BE, COLOR_YUV2RGB_YUY2) { runCvtColorBitExactCheck(COLOR_YUV2RGB_YUY2, CV_8UC2, 0x607e8889, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2BGR_YUY2) { runCvtColorBitExactCheck(COLOR_YUV2BGR_YUY2, CV_8UC2, 0xfb148889, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2RGB_YVYU) { runCvtColorBitExactCheck(COLOR_YUV2RGB_YVYU, CV_8UC2, 0x239b13d4, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2BGR_YVYU) { runCvtColorBitExactCheck(COLOR_YUV2BGR_YVYU, CV_8UC2, 0x402b13d4, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2RGBA_YUY2) { runCvtColorBitExactCheck(COLOR_YUV2RGBA_YUY2, CV_8UC2, 0xf6af910d, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2BGRA_YUY2) { runCvtColorBitExactCheck(COLOR_YUV2BGRA_YUY2, CV_8UC2, 0x9154910d, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2RGBA_YVYU) { runCvtColorBitExactCheck(COLOR_YUV2RGBA_YVYU, CV_8UC2, 0x14481c58, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2BGRA_YVYU) { runCvtColorBitExactCheck(COLOR_YUV2BGRA_YVYU, CV_8UC2, 0x30d81c58, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2GRAY_UYVY) { runCvtColorBitExactCheck(COLOR_YUV2GRAY_UYVY, CV_8UC2, 0x228e669c, Size(262, 510)); } +TEST(Imgproc_cvtColor_BE, COLOR_YUV2GRAY_YUY2) { runCvtColorBitExactCheck(COLOR_YUV2GRAY_YUY2, CV_8UC2, 0x125c62fd, Size(262, 510)); } + +TEST(Imgproc_cvtColor_BE, COLOR_RGB2YUV_I420) { runCvtColorBitExactCheck(COLOR_RGB2YUV_I420, CV_8UC3, 0x44bb076a, Size(262, 254)); } +TEST(Imgproc_cvtColor_BE, COLOR_BGR2YUV_I420) { runCvtColorBitExactCheck(COLOR_BGR2YUV_I420, CV_8UC3, 0xf908ff52, Size(262, 254)); } +TEST(Imgproc_cvtColor_BE, COLOR_RGBA2YUV_I420) { runCvtColorBitExactCheck(COLOR_RGBA2YUV_I420, CV_8UC3, 0x44bb076a, Size(262, 254)); } +TEST(Imgproc_cvtColor_BE, COLOR_BGRA2YUV_I420) { runCvtColorBitExactCheck(COLOR_BGRA2YUV_I420, CV_8UC3, 0xf908ff52, Size(262, 254)); } + +TEST(Imgproc_cvtColor_BE, COLOR_RGB2YUV_YV12) { runCvtColorBitExactCheck(COLOR_RGB2YUV_YV12, CV_8UC3, 0x1b0d076a, Size(262, 254)); } +TEST(Imgproc_cvtColor_BE, COLOR_BGR2YUV_YV12) { runCvtColorBitExactCheck(COLOR_BGR2YUV_YV12, CV_8UC3, 0xda8aff52, Size(262, 254)); } +TEST(Imgproc_cvtColor_BE, COLOR_RGBA2YUV_YV12) { runCvtColorBitExactCheck(COLOR_RGBA2YUV_YV12, CV_8UC3, 0x1b0d076a, Size(262, 254)); } +TEST(Imgproc_cvtColor_BE, COLOR_BGRA2YUV_YV12) { runCvtColorBitExactCheck(COLOR_BGRA2YUV_YV12, CV_8UC3, 0xda8aff52, Size(262, 254)); } + + static void test_Bayer2RGB_EdgeAware_8u(const Mat& src, Mat& dst, int code) { if (dst.empty()) diff --git a/modules/python/common.cmake b/modules/python/common.cmake index 264714f187..c5df8bca8f 100644 --- a/modules/python/common.cmake +++ b/modules/python/common.cmake @@ -73,7 +73,7 @@ else() if("${${PYTHON}_VERSION_MAJOR}" STREQUAL "2") set(__python_ext_suffix_var "SO") endif() - execute_process(COMMAND ${${PYTHON}_EXECUTABLE} -c "import distutils.sysconfig; print(distutils.sysconfig.get_config_var('${__python_ext_suffix_var}'))" + execute_process(COMMAND ${${PYTHON}_EXECUTABLE} -c "import sysconfig; print(sysconfig.get_config_var('${__python_ext_suffix_var}'))" RESULT_VARIABLE PYTHON_CVPY_PROCESS OUTPUT_VARIABLE CVPY_SUFFIX OUTPUT_STRIP_TRAILING_WHITESPACE) diff --git a/samples/cpp/CMakeLists.txt b/samples/cpp/CMakeLists.txt index 050397ee91..c9c4440f22 100644 --- a/samples/cpp/CMakeLists.txt +++ b/samples/cpp/CMakeLists.txt @@ -56,6 +56,7 @@ foreach(sample_filename ${cpp_samples}) endif() if(HAVE_OPENGL AND sample_filename MATCHES "detect_mser") target_compile_definitions(${tgt} PRIVATE HAVE_OPENGL) + ocv_target_link_libraries(${tgt} PRIVATE "${OPENGL_LIBRARIES}") endif() if(sample_filename MATCHES "simd_") # disabled intentionally - demonstration purposes only diff --git a/samples/opengl/CMakeLists.txt b/samples/opengl/CMakeLists.txt index 1e5d68dfe5..158151c300 100644 --- a/samples/opengl/CMakeLists.txt +++ b/samples/opengl/CMakeLists.txt @@ -23,7 +23,7 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND) endif() foreach(sample_filename ${all_samples}) ocv_define_sample(tgt ${sample_filename} opengl) - ocv_target_link_libraries(${tgt} PRIVATE ${OPENCV_LINKER_LIBS} ${OPENCV_OPENGL_SAMPLES_REQUIRED_DEPS}) + ocv_target_link_libraries(${tgt} PRIVATE "${OPENGL_LIBRARIES}" "${OPENCV_OPENGL_SAMPLES_REQUIRED_DEPS}") if(sample_filename STREQUAL "opengl_interop.cpp") ocv_target_link_libraries(${tgt} PRIVATE ${X11_LIBRARIES}) ocv_target_include_directories(${tgt} ${X11_INCLUDE_DIR}) diff --git a/samples/python/tutorial_code/video/optical_flow/optical_flow.py b/samples/python/tutorial_code/video/optical_flow/optical_flow.py index 93bb2c421e..0e298e773a 100644 --- a/samples/python/tutorial_code/video/optical_flow/optical_flow.py +++ b/samples/python/tutorial_code/video/optical_flow/optical_flow.py @@ -17,12 +17,12 @@ feature_params = dict( maxCorners = 100, blockSize = 7 ) # Parameters for lucas kanade optical flow -lk_params = dict( winSize = (15,15), +lk_params = dict( winSize = (15, 15), maxLevel = 2, criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03)) # Create some random colors -color = np.random.randint(0,255,(100,3)) +color = np.random.randint(0, 255, (100, 3)) # Take first frame and find corners in it ret, old_frame = cap.read() @@ -33,7 +33,11 @@ p0 = cv.goodFeaturesToTrack(old_gray, mask = None, **feature_params) mask = np.zeros_like(old_frame) while(1): - ret,frame = cap.read() + ret, frame = cap.read() + if not ret: + print('No frames grabbed!') + break + frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) # calculate optical flow @@ -45,18 +49,20 @@ while(1): good_old = p0[st==1] # draw the tracks - for i,(new,old) in enumerate(zip(good_new, good_old)): - a,b = new.ravel() - c,d = old.ravel() - mask = cv.line(mask, (int(a),int(b)),(int(c),int(d)), color[i].tolist(), 2) - frame = cv.circle(frame,(int(a),int(b)),5,color[i].tolist(),-1) - img = cv.add(frame,mask) - - cv.imshow('frame',img) + for i, (new, old) in enumerate(zip(good_new, good_old)): + a, b = new.ravel() + c, d = old.ravel() + mask = cv.line(mask, (int(a), int(b)), (int(c), int(d)), color[i].tolist(), 2) + frame = cv.circle(frame, (int(a), int(b)), 5, color[i].tolist(), -1) + img = cv.add(frame, mask) + + cv.imshow('frame', img) k = cv.waitKey(30) & 0xff if k == 27: break # Now update the previous frame and previous points old_gray = frame_gray.copy() - p0 = good_new.reshape(-1,1,2) + p0 = good_new.reshape(-1, 1, 2) + +cv.destroyAllWindows() diff --git a/samples/python/tutorial_code/video/optical_flow/optical_flow_dense.py b/samples/python/tutorial_code/video/optical_flow/optical_flow_dense.py index b937b24ea7..8980c151c5 100644 --- a/samples/python/tutorial_code/video/optical_flow/optical_flow_dense.py +++ b/samples/python/tutorial_code/video/optical_flow/optical_flow_dense.py @@ -2,22 +2,28 @@ import numpy as np import cv2 as cv cap = cv.VideoCapture(cv.samples.findFile("vtest.avi")) ret, frame1 = cap.read() -prvs = cv.cvtColor(frame1,cv.COLOR_BGR2GRAY) +prvs = cv.cvtColor(frame1, cv.COLOR_BGR2GRAY) hsv = np.zeros_like(frame1) -hsv[...,1] = 255 +hsv[..., 1] = 255 while(1): ret, frame2 = cap.read() - next = cv.cvtColor(frame2,cv.COLOR_BGR2GRAY) - flow = cv.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0) - mag, ang = cv.cartToPolar(flow[...,0], flow[...,1]) - hsv[...,0] = ang*180/np.pi/2 - hsv[...,2] = cv.normalize(mag,None,0,255,cv.NORM_MINMAX) - bgr = cv.cvtColor(hsv,cv.COLOR_HSV2BGR) - cv.imshow('frame2',bgr) + if not ret: + print('No frames grabbed!') + break + + next = cv.cvtColor(frame2, cv.COLOR_BGR2GRAY) + flow = cv.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0) + mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1]) + hsv[..., 0] = ang*180/np.pi/2 + hsv[..., 2] = cv.normalize(mag, None, 0, 255, cv.NORM_MINMAX) + bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR) + cv.imshow('frame2', bgr) k = cv.waitKey(30) & 0xff if k == 27: break elif k == ord('s'): - cv.imwrite('opticalfb.png',frame2) - cv.imwrite('opticalhsv.png',bgr) + cv.imwrite('opticalfb.png', frame2) + cv.imwrite('opticalhsv.png', bgr) prvs = next + +cv.destroyAllWindows()