diff --git a/CMakeLists.txt b/CMakeLists.txt index 75fcf9659b..494b851f35 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -131,7 +131,7 @@ OCV_OPTION(WITH_NVCUVID "Include NVidia Video Decoding library support" OCV_OPTION(WITH_EIGEN "Include Eigen2/Eigen3 support" ON) OCV_OPTION(WITH_VFW "Include Video for Windows support" ON IF WIN32 ) OCV_OPTION(WITH_FFMPEG "Include FFMPEG support" ON IF (NOT ANDROID AND NOT IOS)) -OCV_OPTION(WITH_GSTREAMER "Include Gstreamer support" ON IF (UNIX AND NOT APPLE AND NOT ANDROID) ) +OCV_OPTION(WITH_GSTREAMER "Include Gstreamer support" ON IF (UNIX AND NOT ANDROID) ) OCV_OPTION(WITH_GSTREAMER_0_10 "Enable Gstreamer 0.10 support (instead of 1.x)" OFF ) OCV_OPTION(WITH_GTK "Include GTK support" ON IF (UNIX AND NOT APPLE AND NOT ANDROID) ) OCV_OPTION(WITH_GTK_2_X "Use GTK version 2" OFF IF (UNIX AND NOT APPLE AND NOT ANDROID) ) @@ -162,6 +162,7 @@ OCV_OPTION(WITH_XIMEA "Include XIMEA cameras support" OFF OCV_OPTION(WITH_XINE "Include Xine support (GPL)" OFF IF (UNIX AND NOT APPLE AND NOT ANDROID) ) OCV_OPTION(WITH_CLP "Include Clp support (EPL)" OFF) OCV_OPTION(WITH_OPENCL "Include OpenCL Runtime support" ON IF (NOT IOS) ) +OCV_OPTION(WITH_OPENCL_SVM "Include OpenCL Shared Virtual Memory support" OFF ) # experimental OCV_OPTION(WITH_OPENCLAMDFFT "Include AMD OpenCL FFT library support" ON IF (NOT ANDROID AND NOT IOS) ) OCV_OPTION(WITH_OPENCLAMDBLAS "Include AMD OpenCL BLAS library support" ON IF (NOT ANDROID AND NOT IOS) ) OCV_OPTION(WITH_DIRECTX "Include DirectX support" ON IF WIN32 ) @@ -216,11 +217,14 @@ OCV_OPTION(ENABLE_POWERPC "Enable PowerPC for GCC" OCV_OPTION(ENABLE_FAST_MATH "Enable -ffast-math (not recommended for GCC 4.6.x)" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_SSE "Enable SSE instructions" ON IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_SSE2 "Enable SSE2 instructions" ON IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) -OCV_OPTION(ENABLE_SSE3 "Enable SSE3 instructions" ON IF ((CV_ICC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) -OCV_OPTION(ENABLE_SSSE3 "Enable SSSE3 instructions" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) ) -OCV_OPTION(ENABLE_SSE41 "Enable SSE4.1 instructions" OFF IF ((CV_ICC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) -OCV_OPTION(ENABLE_SSE42 "Enable SSE4.2 instructions" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) ) +OCV_OPTION(ENABLE_SSE3 "Enable SSE3 instructions" ON IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX OR CV_ICC) AND (X86 OR X86_64)) ) +OCV_OPTION(ENABLE_SSSE3 "Enable SSSE3 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) +OCV_OPTION(ENABLE_SSE41 "Enable SSE4.1 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX OR CV_ICC) AND (X86 OR X86_64)) ) +OCV_OPTION(ENABLE_SSE42 "Enable SSE4.2 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) +OCV_OPTION(ENABLE_POPCNT "Enable POPCNT instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_AVX "Enable AVX instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) +OCV_OPTION(ENABLE_AVX2 "Enable AVX2 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) +OCV_OPTION(ENABLE_FMA3 "Enable FMA3 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) ) OCV_OPTION(ENABLE_NEON "Enable NEON instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR IOS) ) OCV_OPTION(ENABLE_VFPV3 "Enable VFPv3-D32 instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR IOS) ) OCV_OPTION(ENABLE_NOISY_WARNINGS "Show all warnings even if they are too noisy" OFF ) diff --git a/cmake/FindCUDA.cmake b/cmake/FindCUDA.cmake index ccfc4b93b3..ceaed5e3a3 100644 --- a/cmake/FindCUDA.cmake +++ b/cmake/FindCUDA.cmake @@ -529,7 +529,7 @@ endmacro() # Check to see if the CUDA_TOOLKIT_ROOT_DIR and CUDA_SDK_ROOT_DIR have changed, # if they have then clear the cache variables, so that will be detected again. -if(NOT "${CUDA_TOOLKIT_ROOT_DIR}" STREQUAL "${CUDA_TOOLKIT_ROOT_DIR_INTERNAL}") +if(DEFINED CUDA_TOOLKIT_ROOT_DIR_INTERNAL AND (NOT "${CUDA_TOOLKIT_ROOT_DIR}" STREQUAL "${CUDA_TOOLKIT_ROOT_DIR_INTERNAL}")) unset(CUDA_TARGET_TRIPLET CACHE) unset(CUDA_TOOLKIT_TARGET_DIR CACHE) unset(CUDA_NVCC_EXECUTABLE CACHE) @@ -537,8 +537,8 @@ if(NOT "${CUDA_TOOLKIT_ROOT_DIR}" STREQUAL "${CUDA_TOOLKIT_ROOT_DIR_INTERNAL}") cuda_unset_include_and_libraries() endif() -if(NOT "${CUDA_TARGET_TRIPLET}" STREQUAL "${CUDA_TARGET_TRIPLET_INTERNAL}" OR - NOT "${CUDA_TOOLKIT_TARGET_DIR}" STREQUAL "${CUDA_TOOLKIT_TARGET_DIR_INTERNAL}") +if(DEFINED CUDA_TARGET_TRIPLET_INTERNAL AND (NOT "${CUDA_TARGET_TRIPLET}" STREQUAL "${CUDA_TARGET_TRIPLET_INTERNAL}") OR + (DEFINED CUDA_TOOLKIT_TARGET_DIR AND DEFINED CUDA_TOOLKIT_TARGET_DIR_INTERNAL AND NOT "${CUDA_TOOLKIT_TARGET_DIR}" STREQUAL "${CUDA_TOOLKIT_TARGET_DIR_INTERNAL}")) cuda_unset_include_and_libraries() endif() diff --git a/cmake/OpenCVCRTLinkage.cmake b/cmake/OpenCVCRTLinkage.cmake index 5265e3e8a6..7b7fcad7ec 100644 --- a/cmake/OpenCVCRTLinkage.cmake +++ b/cmake/OpenCVCRTLinkage.cmake @@ -77,7 +77,7 @@ else() endforeach(flag_var) endif() -if(NOT ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION} LESS 2.8 AND NOT ${CMAKE_MINOR_VERSION}.${CMAKE_PATCH_VERSION} LESS 8.6) +if(CMAKE_VERSION VERSION_GREATER "2.8.6") include(ProcessorCount) ProcessorCount(N) if(NOT N EQUAL 0) diff --git a/cmake/OpenCVCompilerOptions.cmake b/cmake/OpenCVCompilerOptions.cmake index 2f9068c60d..13559b5c8a 100644 --- a/cmake/OpenCVCompilerOptions.cmake +++ b/cmake/OpenCVCompilerOptions.cmake @@ -128,10 +128,10 @@ if(CMAKE_COMPILER_IS_GNUCXX) if(ENABLE_SSE2) add_extra_compiler_option(-msse2) endif() - if (ENABLE_NEON) + if(ENABLE_NEON) add_extra_compiler_option("-mfpu=neon") endif() - if (ENABLE_VFPV3 AND NOT ENABLE_NEON) + if(ENABLE_VFPV3 AND NOT ENABLE_NEON) add_extra_compiler_option("-mfpu=vfpv3") endif() @@ -140,6 +140,13 @@ if(CMAKE_COMPILER_IS_GNUCXX) if(ENABLE_AVX) add_extra_compiler_option(-mavx) endif() + if(ENABLE_AVX2) + add_extra_compiler_option(-mavx2) + + if(ENABLE_FMA3) + add_extra_compiler_option(-mfma) + endif() + endif() # GCC depresses SSEx instructions when -mavx is used. Instead, it generates new AVX instructions or AVX equivalence for all SSEx instructions when needed. if(NOT OPENCV_EXTRA_CXX_FLAGS MATCHES "-mavx") @@ -158,6 +165,10 @@ if(CMAKE_COMPILER_IS_GNUCXX) if(ENABLE_SSE42) add_extra_compiler_option(-msse4.2) endif() + + if(ENABLE_POPCNT) + add_extra_compiler_option(-mpopcnt) + endif() endif() endif(NOT MINGW) @@ -214,7 +225,10 @@ if(MSVC) set(OPENCV_EXTRA_FLAGS_RELEASE "${OPENCV_EXTRA_FLAGS_RELEASE} /Zi") endif() - if(ENABLE_AVX AND NOT MSVC_VERSION LESS 1600) + if(ENABLE_AVX2 AND NOT MSVC_VERSION LESS 1800) + set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /arch:AVX2") + endif() + if(ENABLE_AVX AND NOT MSVC_VERSION LESS 1600 AND NOT OPENCV_EXTRA_FLAGS MATCHES "/arch:") set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /arch:AVX") endif() @@ -236,7 +250,7 @@ if(MSVC) endif() endif() - if(ENABLE_SSE OR ENABLE_SSE2 OR ENABLE_SSE3 OR ENABLE_SSE4_1 OR ENABLE_AVX) + if(ENABLE_SSE OR ENABLE_SSE2 OR ENABLE_SSE3 OR ENABLE_SSE4_1 OR ENABLE_AVX OR ENABLE_AVX2) set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /Oi") endif() @@ -308,6 +322,7 @@ if(MSVC) endforeach() if(NOT ENABLE_NOISY_WARNINGS) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4251") #class 'std::XXX' needs to have dll-interface to be used by clients of YYY + ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4251) # class 'std::XXX' needs to have dll-interface to be used by clients of YYY + ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4324) # 'struct_name' : structure was padded due to __declspec(align()) endif() endif() diff --git a/cmake/OpenCVDetectOpenCL.cmake b/cmake/OpenCVDetectOpenCL.cmake index f732546e51..ce76ad1732 100644 --- a/cmake/OpenCVDetectOpenCL.cmake +++ b/cmake/OpenCVDetectOpenCL.cmake @@ -26,6 +26,10 @@ if(OPENCL_FOUND) set(HAVE_OPENCL 1) + if(WITH_OPENCL_SVM) + set(HAVE_OPENCL_SVM 1) + endif() + if(HAVE_OPENCL_STATIC) set(OPENCL_LIBRARIES "${OPENCL_LIBRARY}") else() diff --git a/cmake/OpenCVFindLibsVideo.cmake b/cmake/OpenCVFindLibsVideo.cmake index 6d6ca62e86..be394857c0 100644 --- a/cmake/OpenCVFindLibsVideo.cmake +++ b/cmake/OpenCVFindLibsVideo.cmake @@ -32,7 +32,7 @@ if(WITH_GSTREAMER AND NOT WITH_GSTREAMER_0_10) endif(WITH_GSTREAMER AND NOT WITH_GSTREAMER_0_10) # if gstreamer 1.x was not found, or we specified we wanted 0.10, try to find it -if(WITH_GSTREAMER_0_10 OR NOT HAVE_GSTREAMER) +if(WITH_GSTREAMER AND NOT HAVE_GSTREAMER OR WITH_GSTREAMER_0_10) CHECK_MODULE(gstreamer-base-0.10 HAVE_GSTREAMER_BASE) CHECK_MODULE(gstreamer-video-0.10 HAVE_GSTREAMER_VIDEO) CHECK_MODULE(gstreamer-app-0.10 HAVE_GSTREAMER_APP) @@ -47,7 +47,7 @@ if(WITH_GSTREAMER_0_10 OR NOT HAVE_GSTREAMER) set(GSTREAMER_RIFF_VERSION ${ALIASOF_gstreamer-riff-0.10_VERSION}) set(GSTREAMER_PBUTILS_VERSION ${ALIASOF_gstreamer-pbutils-0.10_VERSION}) endif() -endif(WITH_GSTREAMER_0_10 OR NOT HAVE_GSTREAMER) +endif(WITH_GSTREAMER AND NOT HAVE_GSTREAMER OR WITH_GSTREAMER_0_10) # --- unicap --- ocv_clear_vars(HAVE_UNICAP) diff --git a/cmake/OpenCVModule.cmake b/cmake/OpenCVModule.cmake index 708578d8b5..fa02919633 100644 --- a/cmake/OpenCVModule.cmake +++ b/cmake/OpenCVModule.cmake @@ -109,7 +109,7 @@ endmacro() # Usage: # ocv_add_module( [INTERNAL|BINDINGS] [REQUIRED] [] [OPTIONAL ]) # Example: -# ocv_add_module(yaom INTERNAL opencv_core opencv_highgui opencv_flann OPTIONAL opencv_cuda) +# ocv_add_module(yaom INTERNAL opencv_core opencv_highgui opencv_flann OPTIONAL opencv_cudev) macro(ocv_add_module _name) ocv_debug_message("ocv_add_module(" ${_name} ${ARGN} ")") string(TOLOWER "${_name}" name) diff --git a/cmake/templates/cvconfig.h.in b/cmake/templates/cvconfig.h.in index 3eea4fafe4..f8c1c40357 100644 --- a/cmake/templates/cvconfig.h.in +++ b/cmake/templates/cvconfig.h.in @@ -122,6 +122,7 @@ /* OpenCL Support */ #cmakedefine HAVE_OPENCL #cmakedefine HAVE_OPENCL_STATIC +#cmakedefine HAVE_OPENCL_SVM /* OpenEXR codec */ #cmakedefine HAVE_OPENEXR diff --git a/data/haarcascades/haarcascade_fullbody.xml b/data/haarcascades/haarcascade_fullbody.xml index 1f4e3a70d7..831f4708e1 100644 --- a/data/haarcascades/haarcascade_fullbody.xml +++ b/data/haarcascades/haarcascade_fullbody.xml @@ -139,8 +139,8 @@ Thanks to Martin Spengler, ETH Zurich, for providing the demo movie. BOOST HAAR - 14 - 28 + 28 + 14 107 diff --git a/data/haarcascades/haarcascade_lowerbody.xml b/data/haarcascades/haarcascade_lowerbody.xml index 59949b0d82..db6d8d89ff 100644 --- a/data/haarcascades/haarcascade_lowerbody.xml +++ b/data/haarcascades/haarcascade_lowerbody.xml @@ -139,8 +139,8 @@ Thanks to Martin Spengler, ETH Zurich, for providing the demo movie. BOOST HAAR - 19 - 23 + 23 + 19 89 diff --git a/data/haarcascades/haarcascade_smile.xml b/data/haarcascades/haarcascade_smile.xml index b7df2217b7..569db9910e 100644 --- a/data/haarcascades/haarcascade_smile.xml +++ b/data/haarcascades/haarcascade_smile.xml @@ -47,8 +47,8 @@ BOOST HAAR - 36 - 18 + 18 + 36 53 diff --git a/data/haarcascades/haarcascade_upperbody.xml b/data/haarcascades/haarcascade_upperbody.xml index 778687fbce..6070a7471e 100644 --- a/data/haarcascades/haarcascade_upperbody.xml +++ b/data/haarcascades/haarcascade_upperbody.xml @@ -139,8 +139,8 @@ Thanks to Martin Spengler, ETH Zurich, for providing the demo movie. BOOST HAAR - 22 - 18 + 18 + 22 152 diff --git a/doc/opencv.bib b/doc/opencv.bib index 067a1aa005..be53dbaa56 100644 --- a/doc/opencv.bib +++ b/doc/opencv.bib @@ -106,11 +106,11 @@ year = {1998}, publisher = {Citeseer} } -@ARTICLE{Breiman84, - author = {Olshen, LBJFR and Stone, Charles J}, - title = {Classification and regression trees}, - year = {1984}, - journal = {Wadsworth International Group} +@book{Breiman84, + title={Classification and regression trees}, + author={Breiman, Leo and Friedman, Jerome and Stone, Charles J and Olshen, Richard A}, + year={1984}, + publisher={CRC press} } @INCOLLECTION{Brox2004, author = {Brox, Thomas and Bruhn, Andres and Papenberg, Nils and Weickert, Joachim}, diff --git a/doc/tutorials/imgproc/imgtrans/distance_transformation/distance_transform.markdown b/doc/tutorials/imgproc/imgtrans/distance_transformation/distance_transform.markdown new file mode 100644 index 0000000000..1674c404b8 --- /dev/null +++ b/doc/tutorials/imgproc/imgtrans/distance_transformation/distance_transform.markdown @@ -0,0 +1,57 @@ +Image Segmentation with Distance Transform and Watershed Algorithm {#tutorial_distance_transform} +============= + +Goal +---- + +In this tutorial you will learn how to: + +- Use the OpenCV function @ref cv::filter2D in order to perform some laplacian filtering for image sharpening +- Use the OpenCV function @ref cv::distanceTransform in order to obtain the derived representation of a binary image, where the value of each pixel is replaced by its distance to the nearest background pixel +- Use the OpenCV function @ref cv::watershed in order to isolate objects in the image from the background + +Theory +------ + +Code +---- + +This tutorial code's is shown lines below. You can also download it from + [here](https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp). +@includelineno samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp + +Explanation / Result +-------------------- + +-# Load the source image and check if it is loaded without any problem, then show it: + @snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp load_image + ![](images/source.jpeg) + +-# Then if we have an image with white background, it is good to tranform it black. This will help us to desciminate the foreground objects easier when we will apply the Distance Transform: + @snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp black_bg + ![](images/black_bg.jpeg) + +-# Afterwards we will sharp our image in order to acute the edges of the foreground objects. We will apply a laplacian filter with a quite strong filter (an approximation of second derivative): + @snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp sharp + ![](images/laplace.jpeg) + ![](images/sharp.jpeg) + +-# Now we tranfrom our new sharped source image to a grayscale and a binary one, respectively: + @snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp bin + ![](images/bin.jpeg) + +-# We are ready now to apply the Distance Tranform on the binary image. Moreover, we normalize the output image in order to be able visualize and threshold the result: + @snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp dist + ![](images/dist_transf.jpeg) + +-# We threshold the *dist* image and then perform some morphology operation (i.e. dilation) in order to extract the peaks from the above image: + @snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp peaks + ![](images/peaks.jpeg) + +-# From each blob then we create a seed/marker for the watershed algorithm with the help of the @ref cv::findContours function: + @snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp seeds + ![](images/markers.jpeg) + +-# Finally, we can apply the watershed algorithm, and visualize the result: + @snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp watershed + ![](images/final.jpeg) \ No newline at end of file diff --git a/doc/tutorials/imgproc/imgtrans/distance_transformation/images/bin.jpeg b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/bin.jpeg new file mode 100644 index 0000000000..c8515ec1f9 Binary files /dev/null and b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/bin.jpeg differ diff --git a/doc/tutorials/imgproc/imgtrans/distance_transformation/images/black_bg.jpeg b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/black_bg.jpeg new file mode 100644 index 0000000000..fdef57510e Binary files /dev/null and b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/black_bg.jpeg differ diff --git a/doc/tutorials/imgproc/imgtrans/distance_transformation/images/dist_transf.jpeg b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/dist_transf.jpeg new file mode 100644 index 0000000000..8eaecc1393 Binary files /dev/null and b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/dist_transf.jpeg differ diff --git a/doc/tutorials/imgproc/imgtrans/distance_transformation/images/final.jpeg b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/final.jpeg new file mode 100644 index 0000000000..9a91e6bef8 Binary files /dev/null and b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/final.jpeg differ diff --git a/doc/tutorials/imgproc/imgtrans/distance_transformation/images/laplace.jpeg b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/laplace.jpeg new file mode 100644 index 0000000000..1ce3be3e57 Binary files /dev/null and b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/laplace.jpeg differ diff --git a/doc/tutorials/imgproc/imgtrans/distance_transformation/images/markers.jpeg b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/markers.jpeg new file mode 100644 index 0000000000..6ae81a40d2 Binary files /dev/null and b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/markers.jpeg differ diff --git a/doc/tutorials/imgproc/imgtrans/distance_transformation/images/peaks.jpeg b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/peaks.jpeg new file mode 100644 index 0000000000..4b47e9b4fd Binary files /dev/null and b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/peaks.jpeg differ diff --git a/doc/tutorials/imgproc/imgtrans/distance_transformation/images/sharp.jpeg b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/sharp.jpeg new file mode 100644 index 0000000000..c2187a44c9 Binary files /dev/null and b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/sharp.jpeg differ diff --git a/doc/tutorials/imgproc/imgtrans/distance_transformation/images/source.jpeg b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/source.jpeg new file mode 100644 index 0000000000..e78681b10b Binary files /dev/null and b/doc/tutorials/imgproc/imgtrans/distance_transformation/images/source.jpeg differ diff --git a/doc/tutorials/imgproc/table_of_content_imgproc.markdown b/doc/tutorials/imgproc/table_of_content_imgproc.markdown index 17380359ca..615da70039 100644 --- a/doc/tutorials/imgproc/table_of_content_imgproc.markdown +++ b/doc/tutorials/imgproc/table_of_content_imgproc.markdown @@ -202,3 +202,11 @@ In this section you will learn about the image processing (manipulation) functio *Author:* Ana Huamán Where we learn how to calculate distances from the image to contours + +- @subpage tutorial_distance_transform + + *Compatibility:* \> OpenCV 2.0 + + *Author:* Theodore Tsesmelis + + Where we learn to segment objects using Laplacian filtering, the Distance Transformation and the Watershed algorithm. \ No newline at end of file diff --git a/modules/calib3d/include/opencv2/calib3d.hpp b/modules/calib3d/include/opencv2/calib3d.hpp index ac90e2fbac..3b8e3c7c14 100644 --- a/modules/calib3d/include/opencv2/calib3d.hpp +++ b/modules/calib3d/include/opencv2/calib3d.hpp @@ -1652,7 +1652,7 @@ namespace fisheye InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray()); /** @overload */ - CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray rvec, InputArray tvec, + CV_EXPORTS_W void projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray rvec, InputArray tvec, InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray()); /** @brief Distorts 2D points using fisheye model. @@ -1664,7 +1664,7 @@ namespace fisheye @param alpha The skew coefficient. @param distorted Output array of image points, 1xN/Nx1 2-channel, or vector\ . */ - CV_EXPORTS void distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha = 0); + CV_EXPORTS_W void distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha = 0); /** @brief Undistorts 2D points using fisheye model @@ -1677,7 +1677,7 @@ namespace fisheye @param P New camera matrix (3x3) or new projection matrix (3x4) @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector\ . */ - CV_EXPORTS void undistortPoints(InputArray distorted, OutputArray undistorted, + CV_EXPORTS_W void undistortPoints(InputArray distorted, OutputArray undistorted, InputArray K, InputArray D, InputArray R = noArray(), InputArray P = noArray()); /** @brief Computes undistortion and rectification maps for image transform by cv::remap(). If D is empty zero @@ -1694,7 +1694,7 @@ namespace fisheye @param map1 The first output map. @param map2 The second output map. */ - CV_EXPORTS void initUndistortRectifyMap(InputArray K, InputArray D, InputArray R, InputArray P, + CV_EXPORTS_W void initUndistortRectifyMap(InputArray K, InputArray D, InputArray R, InputArray P, const cv::Size& size, int m1type, OutputArray map1, OutputArray map2); /** @brief Transforms an image to compensate for fisheye lens distortion. @@ -1725,7 +1725,7 @@ namespace fisheye ![image](pics/fisheye_undistorted.jpg) */ - CV_EXPORTS void undistortImage(InputArray distorted, OutputArray undistorted, + CV_EXPORTS_W void undistortImage(InputArray distorted, OutputArray undistorted, InputArray K, InputArray D, InputArray Knew = cv::noArray(), const Size& new_size = Size()); /** @brief Estimates new camera matrix for undistortion or rectification. @@ -1741,7 +1741,7 @@ namespace fisheye @param new_size @param fov_scale Divisor for new focal length. */ - CV_EXPORTS void estimateNewCameraMatrixForUndistortRectify(InputArray K, InputArray D, const Size &image_size, InputArray R, + CV_EXPORTS_W void estimateNewCameraMatrixForUndistortRectify(InputArray K, InputArray D, const Size &image_size, InputArray R, OutputArray P, double balance = 0.0, const Size& new_size = Size(), double fov_scale = 1.0); /** @brief Performs camera calibaration @@ -1775,7 +1775,7 @@ namespace fisheye zero. @param criteria Termination criteria for the iterative optimization algorithm. */ - CV_EXPORTS double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size, + CV_EXPORTS_W double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size, InputOutputArray K, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = 0, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON)); @@ -1809,7 +1809,7 @@ namespace fisheye length. Balance is in range of [0, 1]. @param fov_scale Divisor for new focal length. */ - CV_EXPORTS void stereoRectify(InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size &imageSize, InputArray R, InputArray tvec, + CV_EXPORTS_W void stereoRectify(InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size &imageSize, InputArray R, InputArray tvec, OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags, const Size &newImageSize = Size(), double balance = 0.0, double fov_scale = 1.0); @@ -1845,9 +1845,9 @@ namespace fisheye zero. @param criteria Termination criteria for the iterative optimization algorithm. */ - CV_EXPORTS double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, + CV_EXPORTS_W double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize, - OutputArray R, OutputArray T, int flags = CALIB_FIX_INTRINSIC, + OutputArray R, OutputArray T, int flags = fisheye::CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON)); //! @} calib3d_fisheye diff --git a/modules/calib3d/src/fisheye.cpp b/modules/calib3d/src/fisheye.cpp index ce4144bbb0..b0e92a3adc 100644 --- a/modules/calib3d/src/fisheye.cpp +++ b/modules/calib3d/src/fisheye.cpp @@ -794,8 +794,20 @@ double cv::fisheye::calibrate(InputArrayOfArrays objectPoints, InputArrayOfArray if (K.needed()) cv::Mat(_K).convertTo(K, K.empty() ? CV_64FC1 : K.type()); if (D.needed()) cv::Mat(finalParam.k).convertTo(D, D.empty() ? CV_64FC1 : D.type()); - if (rvecs.needed()) cv::Mat(omc).convertTo(rvecs, rvecs.empty() ? CV_64FC3 : rvecs.type()); - if (tvecs.needed()) cv::Mat(Tc).convertTo(tvecs, tvecs.empty() ? CV_64FC3 : tvecs.type()); + if (rvecs.kind()==_InputArray::STD_VECTOR_MAT) + { + int i; + for( i = 0; i < (int)objectPoints.total(); i++ ) + { + rvecs.getMat(i)=omc[i]; + tvecs.getMat(i)=Tc[i]; + } + } + else + { + if (rvecs.needed()) cv::Mat(omc).convertTo(rvecs, rvecs.empty() ? CV_64FC3 : rvecs.type()); + if (tvecs.needed()) cv::Mat(Tc).convertTo(tvecs, tvecs.empty() ? CV_64FC3 : tvecs.type()); + } return rms; } diff --git a/modules/cuda/doc/introduction.markdown b/modules/core/doc/cuda.markdown similarity index 100% rename from modules/cuda/doc/introduction.markdown rename to modules/core/doc/cuda.markdown diff --git a/modules/core/include/opencv2/core/base.hpp b/modules/core/include/opencv2/core/base.hpp index e43fbbc951..f2acaa3fb4 100644 --- a/modules/core/include/opencv2/core/base.hpp +++ b/modules/core/include/opencv2/core/base.hpp @@ -13,6 +13,7 @@ // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2014, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -813,4 +814,6 @@ inline float32x2_t cv_vsqrt_f32(float32x2_t val) } // cv +#include "sse_utils.hpp" + #endif //__OPENCV_CORE_BASE_HPP__ diff --git a/modules/core/include/opencv2/core/cvdef.h b/modules/core/include/opencv2/core/cvdef.h index 06894d7a5d..3fdaa6954d 100644 --- a/modules/core/include/opencv2/core/cvdef.h +++ b/modules/core/include/opencv2/core/cvdef.h @@ -13,6 +13,7 @@ // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -104,17 +105,32 @@ #endif /* CPU features and intrinsics support */ -#define CV_CPU_NONE 0 -#define CV_CPU_MMX 1 -#define CV_CPU_SSE 2 -#define CV_CPU_SSE2 3 -#define CV_CPU_SSE3 4 -#define CV_CPU_SSSE3 5 -#define CV_CPU_SSE4_1 6 -#define CV_CPU_SSE4_2 7 -#define CV_CPU_POPCNT 8 -#define CV_CPU_AVX 10 -#define CV_CPU_NEON 11 +#define CV_CPU_NONE 0 +#define CV_CPU_MMX 1 +#define CV_CPU_SSE 2 +#define CV_CPU_SSE2 3 +#define CV_CPU_SSE3 4 +#define CV_CPU_SSSE3 5 +#define CV_CPU_SSE4_1 6 +#define CV_CPU_SSE4_2 7 +#define CV_CPU_POPCNT 8 + +#define CV_CPU_AVX 10 +#define CV_CPU_AVX2 11 +#define CV_CPU_FMA3 12 + +#define CV_CPU_AVX_512F 13 +#define CV_CPU_AVX_512BW 14 +#define CV_CPU_AVX_512CD 15 +#define CV_CPU_AVX_512DQ 16 +#define CV_CPU_AVX_512ER 17 +#define CV_CPU_AVX_512IFMA512 18 +#define CV_CPU_AVX_512PF 19 +#define CV_CPU_AVX_512VBMI 20 +#define CV_CPU_AVX_512VL 21 + +#define CV_CPU_NEON 100 + // when adding to this list remember to update the enum in core/utility.cpp #define CV_HARDWARE_MAX_FEATURE 255 @@ -123,6 +139,7 @@ #if defined __SSE2__ || defined _M_X64 || (defined _M_IX86_FP && _M_IX86_FP >= 2) # include +# define CV_MMX 1 # define CV_SSE 1 # define CV_SSE2 1 # if defined __SSE3__ || (defined _MSC_VER && _MSC_VER >= 1500) @@ -141,7 +158,15 @@ # include # define CV_SSE4_2 1 # endif -# if defined __AVX__ || (defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219) +# if defined __POPCNT__ || (defined _MSC_VER && _MSC_VER >= 1500) +# ifdef _MSC_VER +# include +# else +# include +# endif +# define CV_POPCNT 1 +# endif +# if defined __AVX__ || (defined _MSC_VER && _MSC_VER >= 1600 && 0) // MS Visual Studio 2010 (2012?) has no macro pre-defined to identify the use of /arch:AVX // See: http://connect.microsoft.com/VisualStudio/feedback/details/605858/arch-avx-should-define-a-predefined-macro-in-x64-and-set-a-unique-value-for-m-ix86-fp-in-win32 # include @@ -152,6 +177,13 @@ # define __xgetbv() 0 # endif # endif +# if defined __AVX2__ || (defined _MSC_VER && _MSC_VER >= 1800 && 0) +# include +# define CV_AVX2 1 +# if defined __FMA__ +# define CV_FMA3 1 +# endif +# endif #endif #if (defined WIN32 || defined _WIN32) && defined(_M_ARM) @@ -166,6 +198,12 @@ #endif // __CUDACC__ +#ifndef CV_POPCNT +#define CV_POPCNT 0 +#endif +#ifndef CV_MMX +# define CV_MMX 0 +#endif #ifndef CV_SSE # define CV_SSE 0 #endif @@ -187,6 +225,40 @@ #ifndef CV_AVX # define CV_AVX 0 #endif +#ifndef CV_AVX2 +# define CV_AVX2 0 +#endif +#ifndef CV_FMA3 +# define CV_FMA3 0 +#endif +#ifndef CV_AVX_512F +# define CV_AVX_512F 0 +#endif +#ifndef CV_AVX_512BW +# define CV_AVX_512BW 0 +#endif +#ifndef CV_AVX_512CD +# define CV_AVX_512CD 0 +#endif +#ifndef CV_AVX_512DQ +# define CV_AVX_512DQ 0 +#endif +#ifndef CV_AVX_512ER +# define CV_AVX_512ER 0 +#endif +#ifndef CV_AVX_512IFMA512 +# define CV_AVX_512IFMA512 0 +#endif +#ifndef CV_AVX_512PF +# define CV_AVX_512PF 0 +#endif +#ifndef CV_AVX_512VBMI +# define CV_AVX_512VBMI 0 +#endif +#ifndef CV_AVX_512VL +# define CV_AVX_512VL 0 +#endif + #ifndef CV_NEON # define CV_NEON 0 #endif diff --git a/modules/core/include/opencv2/core/mat.hpp b/modules/core/include/opencv2/core/mat.hpp index 2b41486246..8b0d94f6e1 100644 --- a/modules/core/include/opencv2/core/mat.hpp +++ b/modules/core/include/opencv2/core/mat.hpp @@ -376,9 +376,10 @@ enum UMatUsageFlags { USAGE_DEFAULT = 0, - // default allocation policy is platform and usage specific + // buffer allocation policy is platform and usage specific USAGE_ALLOCATE_HOST_MEMORY = 1 << 0, USAGE_ALLOCATE_DEVICE_MEMORY = 1 << 1, + USAGE_ALLOCATE_SHARED_MEMORY = 1 << 2, // It is not equal to: USAGE_ALLOCATE_HOST_MEMORY | USAGE_ALLOCATE_DEVICE_MEMORY __UMAT_USAGE_FLAGS_32BIT = 0x7fffffff // Binary compatibility hint }; @@ -414,7 +415,7 @@ public: const size_t dstofs[], const size_t dststep[], bool sync) const; // default implementation returns DummyBufferPoolController - virtual BufferPoolController* getBufferPoolController() const; + virtual BufferPoolController* getBufferPoolController(const char* id = NULL) const; }; @@ -480,7 +481,7 @@ struct CV_EXPORTS UMatData int refcount; uchar* data; uchar* origdata; - size_t size, capacity; + size_t size; int flags; void* handle; diff --git a/modules/core/include/opencv2/core/ocl.hpp b/modules/core/include/opencv2/core/ocl.hpp index 3b023fb09e..f87e15ee6a 100644 --- a/modules/core/include/opencv2/core/ocl.hpp +++ b/modules/core/include/opencv2/core/ocl.hpp @@ -56,6 +56,8 @@ CV_EXPORTS_W bool haveAmdFft(); CV_EXPORTS_W void setUseOpenCL(bool flag); CV_EXPORTS_W void finish(); +CV_EXPORTS bool haveSVM(); + class CV_EXPORTS Context; class CV_EXPORTS Device; class CV_EXPORTS Kernel; @@ -248,7 +250,10 @@ public: void* ptr() const; friend void initializeContextFromHandle(Context& ctx, void* platform, void* context, void* device); -protected: + + bool useSVM() const; + void setUseSVM(bool enabled); + struct Impl; Impl* p; }; @@ -666,8 +671,17 @@ protected: CV_EXPORTS MatAllocator* getOpenCLAllocator(); -CV_EXPORTS_W bool isPerformanceCheckBypassed(); -#define OCL_PERFORMANCE_CHECK(condition) (cv::ocl::isPerformanceCheckBypassed() || (condition)) + +#ifdef __OPENCV_BUILD +namespace internal { + +CV_EXPORTS bool isPerformanceCheckBypassed(); +#define OCL_PERFORMANCE_CHECK(condition) (cv::ocl::internal::isPerformanceCheckBypassed() || (condition)) + +CV_EXPORTS bool isCLBuffer(UMat& u); + +} // namespace internal +#endif //! @} diff --git a/modules/core/include/opencv2/core/opencl/opencl_svm.hpp b/modules/core/include/opencv2/core/opencl/opencl_svm.hpp new file mode 100644 index 0000000000..e9f7ba0232 --- /dev/null +++ b/modules/core/include/opencv2/core/opencl/opencl_svm.hpp @@ -0,0 +1,81 @@ +/* See LICENSE file in the root OpenCV directory */ + +#ifndef __OPENCV_CORE_OPENCL_SVM_HPP__ +#define __OPENCV_CORE_OPENCL_SVM_HPP__ + +// +// Internal usage only (binary compatibility is not guaranteed) +// +#ifndef __OPENCV_BUILD +#error Internal header file +#endif + +#if defined(HAVE_OPENCL) && defined(HAVE_OPENCL_SVM) +#include "runtime/opencl_core.hpp" +#include "runtime/opencl_svm_20.hpp" +#include "runtime/opencl_svm_hsa_extension.hpp" + +namespace cv { namespace ocl { namespace svm { + +struct SVMCapabilities +{ + enum Value + { + SVM_COARSE_GRAIN_BUFFER = (1 << 0), + SVM_FINE_GRAIN_BUFFER = (1 << 1), + SVM_FINE_GRAIN_SYSTEM = (1 << 2), + SVM_ATOMICS = (1 << 3), + }; + int value_; + + SVMCapabilities(int capabilities = 0) : value_(capabilities) { } + operator int() const { return value_; } + + inline bool isNoSVMSupport() const { return value_ == 0; } + inline bool isSupportCoarseGrainBuffer() const { return (value_ & SVM_COARSE_GRAIN_BUFFER) != 0; } + inline bool isSupportFineGrainBuffer() const { return (value_ & SVM_FINE_GRAIN_BUFFER) != 0; } + inline bool isSupportFineGrainSystem() const { return (value_ & SVM_FINE_GRAIN_SYSTEM) != 0; } + inline bool isSupportAtomics() const { return (value_ & SVM_ATOMICS) != 0; } +}; + +CV_EXPORTS const SVMCapabilities getSVMCapabilitites(const ocl::Context& context); + +struct SVMFunctions +{ + clSVMAllocAMD_fn fn_clSVMAlloc; + clSVMFreeAMD_fn fn_clSVMFree; + clSetKernelArgSVMPointerAMD_fn fn_clSetKernelArgSVMPointer; + //clSetKernelExecInfoAMD_fn fn_clSetKernelExecInfo; + //clEnqueueSVMFreeAMD_fn fn_clEnqueueSVMFree; + clEnqueueSVMMemcpyAMD_fn fn_clEnqueueSVMMemcpy; + clEnqueueSVMMemFillAMD_fn fn_clEnqueueSVMMemFill; + clEnqueueSVMMapAMD_fn fn_clEnqueueSVMMap; + clEnqueueSVMUnmapAMD_fn fn_clEnqueueSVMUnmap; + + inline SVMFunctions() + : fn_clSVMAlloc(NULL), fn_clSVMFree(NULL), + fn_clSetKernelArgSVMPointer(NULL), /*fn_clSetKernelExecInfo(NULL),*/ + /*fn_clEnqueueSVMFree(NULL),*/ fn_clEnqueueSVMMemcpy(NULL), fn_clEnqueueSVMMemFill(NULL), + fn_clEnqueueSVMMap(NULL), fn_clEnqueueSVMUnmap(NULL) + { + // nothing + } + + inline bool isValid() const + { + return fn_clSVMAlloc != NULL && fn_clSVMFree && fn_clSetKernelArgSVMPointer && + /*fn_clSetKernelExecInfo && fn_clEnqueueSVMFree &&*/ fn_clEnqueueSVMMemcpy && + fn_clEnqueueSVMMemFill && fn_clEnqueueSVMMap && fn_clEnqueueSVMUnmap; + } +}; + +// We should guarantee that SVMFunctions lifetime is not less than context's lifetime +CV_EXPORTS const SVMFunctions* getSVMFunctions(const ocl::Context& context); + +CV_EXPORTS bool useSVM(UMatUsageFlags usageFlags); + +}}} //namespace cv::ocl::svm +#endif + +#endif // __OPENCV_CORE_OPENCL_SVM_HPP__ +/* End of file. */ diff --git a/modules/core/include/opencv2/core/opencl/runtime/opencl_core.hpp b/modules/core/include/opencv2/core/opencl/runtime/opencl_core.hpp index b19563cbc2..bd30f813d7 100644 --- a/modules/core/include/opencv2/core/opencl/runtime/opencl_core.hpp +++ b/modules/core/include/opencv2/core/opencl/runtime/opencl_core.hpp @@ -62,6 +62,18 @@ #endif #endif +#ifdef HAVE_OPENCL_SVM +#define clSVMAlloc clSVMAlloc_ +#define clSVMFree clSVMFree_ +#define clSetKernelArgSVMPointer clSetKernelArgSVMPointer_ +#define clSetKernelExecInfo clSetKernelExecInfo_ +#define clEnqueueSVMFree clEnqueueSVMFree_ +#define clEnqueueSVMMemcpy clEnqueueSVMMemcpy_ +#define clEnqueueSVMMemFill clEnqueueSVMMemFill_ +#define clEnqueueSVMMap clEnqueueSVMMap_ +#define clEnqueueSVMUnmap clEnqueueSVMUnmap_ +#endif + #include "autogenerated/opencl_core.hpp" #endif // HAVE_OPENCL_STATIC diff --git a/modules/core/include/opencv2/core/opencl/runtime/opencl_svm_20.hpp b/modules/core/include/opencv2/core/opencl/runtime/opencl_svm_20.hpp new file mode 100644 index 0000000000..7f0ff91d12 --- /dev/null +++ b/modules/core/include/opencv2/core/opencl/runtime/opencl_svm_20.hpp @@ -0,0 +1,52 @@ +/* See LICENSE file in the root OpenCV directory */ + +#ifndef __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_2_0_HPP__ +#define __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_2_0_HPP__ + +#if defined(HAVE_OPENCL_SVM) +#include "opencl_core.hpp" + +#include "opencl_svm_definitions.hpp" + +#ifndef HAVE_OPENCL_STATIC + +#undef clSVMAlloc +#define clSVMAlloc clSVMAlloc_pfn +#undef clSVMFree +#define clSVMFree clSVMFree_pfn +#undef clSetKernelArgSVMPointer +#define clSetKernelArgSVMPointer clSetKernelArgSVMPointer_pfn +#undef clSetKernelExecInfo +//#define clSetKernelExecInfo clSetKernelExecInfo_pfn +#undef clEnqueueSVMFree +//#define clEnqueueSVMFree clEnqueueSVMFree_pfn +#undef clEnqueueSVMMemcpy +#define clEnqueueSVMMemcpy clEnqueueSVMMemcpy_pfn +#undef clEnqueueSVMMemFill +#define clEnqueueSVMMemFill clEnqueueSVMMemFill_pfn +#undef clEnqueueSVMMap +#define clEnqueueSVMMap clEnqueueSVMMap_pfn +#undef clEnqueueSVMUnmap +#define clEnqueueSVMUnmap clEnqueueSVMUnmap_pfn + +extern CL_RUNTIME_EXPORT void* (CL_API_CALL *clSVMAlloc)(cl_context context, cl_svm_mem_flags flags, size_t size, unsigned int alignment); +extern CL_RUNTIME_EXPORT void (CL_API_CALL *clSVMFree)(cl_context context, void* svm_pointer); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clSetKernelArgSVMPointer)(cl_kernel kernel, cl_uint arg_index, const void* arg_value); +//extern CL_RUNTIME_EXPORT void* (CL_API_CALL *clSetKernelExecInfo)(cl_kernel kernel, cl_kernel_exec_info param_name, size_t param_value_size, const void* param_value); +//extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clEnqueueSVMFree)(cl_command_queue command_queue, cl_uint num_svm_pointers, void* svm_pointers[], +// void (CL_CALLBACK *pfn_free_func)(cl_command_queue queue, cl_uint num_svm_pointers, void* svm_pointers[], void* user_data), void* user_data, +// cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clEnqueueSVMMemcpy)(cl_command_queue command_queue, cl_bool blocking_copy, void* dst_ptr, const void* src_ptr, size_t size, + cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clEnqueueSVMMemFill)(cl_command_queue command_queue, void* svm_ptr, const void* pattern, size_t pattern_size, size_t size, + cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clEnqueueSVMMap)(cl_command_queue command_queue, cl_bool blocking_map, cl_map_flags map_flags, void* svm_ptr, size_t size, + cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clEnqueueSVMUnmap)(cl_command_queue command_queue, void* svm_ptr, + cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event); + +#endif // HAVE_OPENCL_STATIC + +#endif // HAVE_OPENCL_SVM + +#endif // __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_2_0_HPP__ diff --git a/modules/core/include/opencv2/core/opencl/runtime/opencl_svm_definitions.hpp b/modules/core/include/opencv2/core/opencl/runtime/opencl_svm_definitions.hpp new file mode 100644 index 0000000000..a4fd5fc810 --- /dev/null +++ b/modules/core/include/opencv2/core/opencl/runtime/opencl_svm_definitions.hpp @@ -0,0 +1,42 @@ +/* See LICENSE file in the root OpenCV directory */ + +#ifndef __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_DEFINITIONS_HPP__ +#define __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_DEFINITIONS_HPP__ + +#if defined(HAVE_OPENCL_SVM) +#if defined(CL_VERSION_2_0) + +// OpenCL 2.0 contains SVM definitions + +#else + +typedef cl_bitfield cl_device_svm_capabilities; +typedef cl_bitfield cl_svm_mem_flags; +typedef cl_uint cl_kernel_exec_info; + +// +// TODO Add real values after OpenCL 2.0 release +// + +#ifndef CL_DEVICE_SVM_CAPABILITIES +#define CL_DEVICE_SVM_CAPABILITIES 0x1053 + +#define CL_DEVICE_SVM_COARSE_GRAIN_BUFFER (1 << 0) +#define CL_DEVICE_SVM_FINE_GRAIN_BUFFER (1 << 1) +#define CL_DEVICE_SVM_FINE_GRAIN_SYSTEM (1 << 2) +#define CL_DEVICE_SVM_ATOMICS (1 << 3) +#endif + +#ifndef CL_MEM_SVM_FINE_GRAIN_BUFFER +#define CL_MEM_SVM_FINE_GRAIN_BUFFER (1 << 10) +#endif + +#ifndef CL_MEM_SVM_ATOMICS +#define CL_MEM_SVM_ATOMICS (1 << 11) +#endif + + +#endif // CL_VERSION_2_0 +#endif // HAVE_OPENCL_SVM + +#endif // __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_DEFINITIONS_HPP__ diff --git a/modules/core/include/opencv2/core/opencl/runtime/opencl_svm_hsa_extension.hpp b/modules/core/include/opencv2/core/opencl/runtime/opencl_svm_hsa_extension.hpp new file mode 100644 index 0000000000..9e50408f06 --- /dev/null +++ b/modules/core/include/opencv2/core/opencl/runtime/opencl_svm_hsa_extension.hpp @@ -0,0 +1,166 @@ +/* See LICENSE file in the root OpenCV directory */ + +#ifndef __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_HSA_EXTENSION_HPP__ +#define __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_HSA_EXTENSION_HPP__ + +#if defined(HAVE_OPENCL_SVM) +#include "opencl_core.hpp" + +#ifndef CL_DEVICE_SVM_CAPABILITIES_AMD +// +// Part of the file is an extract from the cl_ext.h file from AMD APP SDK package. +// Below is the original copyright. +// +/******************************************************************************* + * Copyright (c) 2008-2013 The Khronos Group Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and/or associated documentation files (the + * "Materials"), to deal in the Materials without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Materials, and to + * permit persons to whom the Materials are furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Materials. + * + * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. + ******************************************************************************/ + +/******************************************* + * Shared Virtual Memory (SVM) extension + *******************************************/ +typedef cl_bitfield cl_device_svm_capabilities_amd; +typedef cl_bitfield cl_svm_mem_flags_amd; +typedef cl_uint cl_kernel_exec_info_amd; + +/* cl_device_info */ +#define CL_DEVICE_SVM_CAPABILITIES_AMD 0x1053 +#define CL_DEVICE_PREFERRED_PLATFORM_ATOMIC_ALIGNMENT_AMD 0x1054 + +/* cl_device_svm_capabilities_amd */ +#define CL_DEVICE_SVM_COARSE_GRAIN_BUFFER_AMD (1 << 0) +#define CL_DEVICE_SVM_FINE_GRAIN_BUFFER_AMD (1 << 1) +#define CL_DEVICE_SVM_FINE_GRAIN_SYSTEM_AMD (1 << 2) +#define CL_DEVICE_SVM_ATOMICS_AMD (1 << 3) + +/* cl_svm_mem_flags_amd */ +#define CL_MEM_SVM_FINE_GRAIN_BUFFER_AMD (1 << 10) +#define CL_MEM_SVM_ATOMICS_AMD (1 << 11) + +/* cl_mem_info */ +#define CL_MEM_USES_SVM_POINTER_AMD 0x1109 + +/* cl_kernel_exec_info_amd */ +#define CL_KERNEL_EXEC_INFO_SVM_PTRS_AMD 0x11B6 +#define CL_KERNEL_EXEC_INFO_SVM_FINE_GRAIN_SYSTEM_AMD 0x11B7 + +/* cl_command_type */ +#define CL_COMMAND_SVM_FREE_AMD 0x1209 +#define CL_COMMAND_SVM_MEMCPY_AMD 0x120A +#define CL_COMMAND_SVM_MEMFILL_AMD 0x120B +#define CL_COMMAND_SVM_MAP_AMD 0x120C +#define CL_COMMAND_SVM_UNMAP_AMD 0x120D + +typedef CL_API_ENTRY void* +(CL_API_CALL * clSVMAllocAMD_fn)( + cl_context /* context */, + cl_svm_mem_flags_amd /* flags */, + size_t /* size */, + unsigned int /* alignment */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY void +(CL_API_CALL * clSVMFreeAMD_fn)( + cl_context /* context */, + void* /* svm_pointer */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clEnqueueSVMFreeAMD_fn)( + cl_command_queue /* command_queue */, + cl_uint /* num_svm_pointers */, + void** /* svm_pointers */, + void (CL_CALLBACK *)( /*pfn_free_func*/ + cl_command_queue /* queue */, + cl_uint /* num_svm_pointers */, + void** /* svm_pointers */, + void* /* user_data */), + void* /* user_data */, + cl_uint /* num_events_in_wait_list */, + const cl_event* /* event_wait_list */, + cl_event* /* event */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clEnqueueSVMMemcpyAMD_fn)( + cl_command_queue /* command_queue */, + cl_bool /* blocking_copy */, + void* /* dst_ptr */, + const void* /* src_ptr */, + size_t /* size */, + cl_uint /* num_events_in_wait_list */, + const cl_event* /* event_wait_list */, + cl_event* /* event */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clEnqueueSVMMemFillAMD_fn)( + cl_command_queue /* command_queue */, + void* /* svm_ptr */, + const void* /* pattern */, + size_t /* pattern_size */, + size_t /* size */, + cl_uint /* num_events_in_wait_list */, + const cl_event* /* event_wait_list */, + cl_event* /* event */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clEnqueueSVMMapAMD_fn)( + cl_command_queue /* command_queue */, + cl_bool /* blocking_map */, + cl_map_flags /* map_flags */, + void* /* svm_ptr */, + size_t /* size */, + cl_uint /* num_events_in_wait_list */, + const cl_event* /* event_wait_list */, + cl_event* /* event */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clEnqueueSVMUnmapAMD_fn)( + cl_command_queue /* command_queue */, + void* /* svm_ptr */, + cl_uint /* num_events_in_wait_list */, + const cl_event* /* event_wait_list */, + cl_event* /* event */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clSetKernelArgSVMPointerAMD_fn)( + cl_kernel /* kernel */, + cl_uint /* arg_index */, + const void * /* arg_value */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clSetKernelExecInfoAMD_fn)( + cl_kernel /* kernel */, + cl_kernel_exec_info_amd /* param_name */, + size_t /* param_value_size */, + const void * /* param_value */ +) CL_EXT_SUFFIX__VERSION_1_2; + +#endif + +#endif // HAVE_OPENCL_SVM + +#endif // __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_HSA_EXTENSION_HPP__ diff --git a/modules/core/include/opencv2/core/sse_utils.hpp b/modules/core/include/opencv2/core/sse_utils.hpp new file mode 100644 index 0000000000..e0283eb3f3 --- /dev/null +++ b/modules/core/include/opencv2/core/sse_utils.hpp @@ -0,0 +1,645 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2015, Itseez Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_SSE_UTILS_HPP__ +#define __OPENCV_CORE_SSE_UTILS_HPP__ + +#ifndef __cplusplus +# error sse_utils.hpp header must be compiled as C++ +#endif + +#if CV_SSE2 + +inline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1) +{ + __m128i layer1_chunk0 = _mm_unpacklo_epi8(v_r0, v_g0); + __m128i layer1_chunk1 = _mm_unpackhi_epi8(v_r0, v_g0); + __m128i layer1_chunk2 = _mm_unpacklo_epi8(v_r1, v_g1); + __m128i layer1_chunk3 = _mm_unpackhi_epi8(v_r1, v_g1); + + __m128i layer2_chunk0 = _mm_unpacklo_epi8(layer1_chunk0, layer1_chunk2); + __m128i layer2_chunk1 = _mm_unpackhi_epi8(layer1_chunk0, layer1_chunk2); + __m128i layer2_chunk2 = _mm_unpacklo_epi8(layer1_chunk1, layer1_chunk3); + __m128i layer2_chunk3 = _mm_unpackhi_epi8(layer1_chunk1, layer1_chunk3); + + __m128i layer3_chunk0 = _mm_unpacklo_epi8(layer2_chunk0, layer2_chunk2); + __m128i layer3_chunk1 = _mm_unpackhi_epi8(layer2_chunk0, layer2_chunk2); + __m128i layer3_chunk2 = _mm_unpacklo_epi8(layer2_chunk1, layer2_chunk3); + __m128i layer3_chunk3 = _mm_unpackhi_epi8(layer2_chunk1, layer2_chunk3); + + __m128i layer4_chunk0 = _mm_unpacklo_epi8(layer3_chunk0, layer3_chunk2); + __m128i layer4_chunk1 = _mm_unpackhi_epi8(layer3_chunk0, layer3_chunk2); + __m128i layer4_chunk2 = _mm_unpacklo_epi8(layer3_chunk1, layer3_chunk3); + __m128i layer4_chunk3 = _mm_unpackhi_epi8(layer3_chunk1, layer3_chunk3); + + v_r0 = _mm_unpacklo_epi8(layer4_chunk0, layer4_chunk2); + v_r1 = _mm_unpackhi_epi8(layer4_chunk0, layer4_chunk2); + v_g0 = _mm_unpacklo_epi8(layer4_chunk1, layer4_chunk3); + v_g1 = _mm_unpackhi_epi8(layer4_chunk1, layer4_chunk3); +} + +inline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, + __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) +{ + __m128i layer1_chunk0 = _mm_unpacklo_epi8(v_r0, v_g1); + __m128i layer1_chunk1 = _mm_unpackhi_epi8(v_r0, v_g1); + __m128i layer1_chunk2 = _mm_unpacklo_epi8(v_r1, v_b0); + __m128i layer1_chunk3 = _mm_unpackhi_epi8(v_r1, v_b0); + __m128i layer1_chunk4 = _mm_unpacklo_epi8(v_g0, v_b1); + __m128i layer1_chunk5 = _mm_unpackhi_epi8(v_g0, v_b1); + + __m128i layer2_chunk0 = _mm_unpacklo_epi8(layer1_chunk0, layer1_chunk3); + __m128i layer2_chunk1 = _mm_unpackhi_epi8(layer1_chunk0, layer1_chunk3); + __m128i layer2_chunk2 = _mm_unpacklo_epi8(layer1_chunk1, layer1_chunk4); + __m128i layer2_chunk3 = _mm_unpackhi_epi8(layer1_chunk1, layer1_chunk4); + __m128i layer2_chunk4 = _mm_unpacklo_epi8(layer1_chunk2, layer1_chunk5); + __m128i layer2_chunk5 = _mm_unpackhi_epi8(layer1_chunk2, layer1_chunk5); + + __m128i layer3_chunk0 = _mm_unpacklo_epi8(layer2_chunk0, layer2_chunk3); + __m128i layer3_chunk1 = _mm_unpackhi_epi8(layer2_chunk0, layer2_chunk3); + __m128i layer3_chunk2 = _mm_unpacklo_epi8(layer2_chunk1, layer2_chunk4); + __m128i layer3_chunk3 = _mm_unpackhi_epi8(layer2_chunk1, layer2_chunk4); + __m128i layer3_chunk4 = _mm_unpacklo_epi8(layer2_chunk2, layer2_chunk5); + __m128i layer3_chunk5 = _mm_unpackhi_epi8(layer2_chunk2, layer2_chunk5); + + __m128i layer4_chunk0 = _mm_unpacklo_epi8(layer3_chunk0, layer3_chunk3); + __m128i layer4_chunk1 = _mm_unpackhi_epi8(layer3_chunk0, layer3_chunk3); + __m128i layer4_chunk2 = _mm_unpacklo_epi8(layer3_chunk1, layer3_chunk4); + __m128i layer4_chunk3 = _mm_unpackhi_epi8(layer3_chunk1, layer3_chunk4); + __m128i layer4_chunk4 = _mm_unpacklo_epi8(layer3_chunk2, layer3_chunk5); + __m128i layer4_chunk5 = _mm_unpackhi_epi8(layer3_chunk2, layer3_chunk5); + + v_r0 = _mm_unpacklo_epi8(layer4_chunk0, layer4_chunk3); + v_r1 = _mm_unpackhi_epi8(layer4_chunk0, layer4_chunk3); + v_g0 = _mm_unpacklo_epi8(layer4_chunk1, layer4_chunk4); + v_g1 = _mm_unpackhi_epi8(layer4_chunk1, layer4_chunk4); + v_b0 = _mm_unpacklo_epi8(layer4_chunk2, layer4_chunk5); + v_b1 = _mm_unpackhi_epi8(layer4_chunk2, layer4_chunk5); +} + +inline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, + __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) +{ + __m128i layer1_chunk0 = _mm_unpacklo_epi8(v_r0, v_b0); + __m128i layer1_chunk1 = _mm_unpackhi_epi8(v_r0, v_b0); + __m128i layer1_chunk2 = _mm_unpacklo_epi8(v_r1, v_b1); + __m128i layer1_chunk3 = _mm_unpackhi_epi8(v_r1, v_b1); + __m128i layer1_chunk4 = _mm_unpacklo_epi8(v_g0, v_a0); + __m128i layer1_chunk5 = _mm_unpackhi_epi8(v_g0, v_a0); + __m128i layer1_chunk6 = _mm_unpacklo_epi8(v_g1, v_a1); + __m128i layer1_chunk7 = _mm_unpackhi_epi8(v_g1, v_a1); + + __m128i layer2_chunk0 = _mm_unpacklo_epi8(layer1_chunk0, layer1_chunk4); + __m128i layer2_chunk1 = _mm_unpackhi_epi8(layer1_chunk0, layer1_chunk4); + __m128i layer2_chunk2 = _mm_unpacklo_epi8(layer1_chunk1, layer1_chunk5); + __m128i layer2_chunk3 = _mm_unpackhi_epi8(layer1_chunk1, layer1_chunk5); + __m128i layer2_chunk4 = _mm_unpacklo_epi8(layer1_chunk2, layer1_chunk6); + __m128i layer2_chunk5 = _mm_unpackhi_epi8(layer1_chunk2, layer1_chunk6); + __m128i layer2_chunk6 = _mm_unpacklo_epi8(layer1_chunk3, layer1_chunk7); + __m128i layer2_chunk7 = _mm_unpackhi_epi8(layer1_chunk3, layer1_chunk7); + + __m128i layer3_chunk0 = _mm_unpacklo_epi8(layer2_chunk0, layer2_chunk4); + __m128i layer3_chunk1 = _mm_unpackhi_epi8(layer2_chunk0, layer2_chunk4); + __m128i layer3_chunk2 = _mm_unpacklo_epi8(layer2_chunk1, layer2_chunk5); + __m128i layer3_chunk3 = _mm_unpackhi_epi8(layer2_chunk1, layer2_chunk5); + __m128i layer3_chunk4 = _mm_unpacklo_epi8(layer2_chunk2, layer2_chunk6); + __m128i layer3_chunk5 = _mm_unpackhi_epi8(layer2_chunk2, layer2_chunk6); + __m128i layer3_chunk6 = _mm_unpacklo_epi8(layer2_chunk3, layer2_chunk7); + __m128i layer3_chunk7 = _mm_unpackhi_epi8(layer2_chunk3, layer2_chunk7); + + __m128i layer4_chunk0 = _mm_unpacklo_epi8(layer3_chunk0, layer3_chunk4); + __m128i layer4_chunk1 = _mm_unpackhi_epi8(layer3_chunk0, layer3_chunk4); + __m128i layer4_chunk2 = _mm_unpacklo_epi8(layer3_chunk1, layer3_chunk5); + __m128i layer4_chunk3 = _mm_unpackhi_epi8(layer3_chunk1, layer3_chunk5); + __m128i layer4_chunk4 = _mm_unpacklo_epi8(layer3_chunk2, layer3_chunk6); + __m128i layer4_chunk5 = _mm_unpackhi_epi8(layer3_chunk2, layer3_chunk6); + __m128i layer4_chunk6 = _mm_unpacklo_epi8(layer3_chunk3, layer3_chunk7); + __m128i layer4_chunk7 = _mm_unpackhi_epi8(layer3_chunk3, layer3_chunk7); + + v_r0 = _mm_unpacklo_epi8(layer4_chunk0, layer4_chunk4); + v_r1 = _mm_unpackhi_epi8(layer4_chunk0, layer4_chunk4); + v_g0 = _mm_unpacklo_epi8(layer4_chunk1, layer4_chunk5); + v_g1 = _mm_unpackhi_epi8(layer4_chunk1, layer4_chunk5); + v_b0 = _mm_unpacklo_epi8(layer4_chunk2, layer4_chunk6); + v_b1 = _mm_unpackhi_epi8(layer4_chunk2, layer4_chunk6); + v_a0 = _mm_unpacklo_epi8(layer4_chunk3, layer4_chunk7); + v_a1 = _mm_unpackhi_epi8(layer4_chunk3, layer4_chunk7); +} + +inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1) +{ + __m128i v_mask = _mm_set1_epi16(0x00ff); + + __m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); + __m128i layer4_chunk2 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8)); + __m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); + __m128i layer4_chunk3 = _mm_packus_epi16(_mm_srli_epi16(v_g0, 8), _mm_srli_epi16(v_g1, 8)); + + __m128i layer3_chunk0 = _mm_packus_epi16(_mm_and_si128(layer4_chunk0, v_mask), _mm_and_si128(layer4_chunk1, v_mask)); + __m128i layer3_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk0, 8), _mm_srli_epi16(layer4_chunk1, 8)); + __m128i layer3_chunk1 = _mm_packus_epi16(_mm_and_si128(layer4_chunk2, v_mask), _mm_and_si128(layer4_chunk3, v_mask)); + __m128i layer3_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk2, 8), _mm_srli_epi16(layer4_chunk3, 8)); + + __m128i layer2_chunk0 = _mm_packus_epi16(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); + __m128i layer2_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk0, 8), _mm_srli_epi16(layer3_chunk1, 8)); + __m128i layer2_chunk1 = _mm_packus_epi16(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); + __m128i layer2_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk2, 8), _mm_srli_epi16(layer3_chunk3, 8)); + + __m128i layer1_chunk0 = _mm_packus_epi16(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); + __m128i layer1_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk0, 8), _mm_srli_epi16(layer2_chunk1, 8)); + __m128i layer1_chunk1 = _mm_packus_epi16(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); + __m128i layer1_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk2, 8), _mm_srli_epi16(layer2_chunk3, 8)); + + v_r0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); + v_g0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8)); + v_r1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); + v_g1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8)); +} + +inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, + __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) +{ + __m128i v_mask = _mm_set1_epi16(0x00ff); + + __m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); + __m128i layer4_chunk3 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8)); + __m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); + __m128i layer4_chunk4 = _mm_packus_epi16(_mm_srli_epi16(v_g0, 8), _mm_srli_epi16(v_g1, 8)); + __m128i layer4_chunk2 = _mm_packus_epi16(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask)); + __m128i layer4_chunk5 = _mm_packus_epi16(_mm_srli_epi16(v_b0, 8), _mm_srli_epi16(v_b1, 8)); + + __m128i layer3_chunk0 = _mm_packus_epi16(_mm_and_si128(layer4_chunk0, v_mask), _mm_and_si128(layer4_chunk1, v_mask)); + __m128i layer3_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk0, 8), _mm_srli_epi16(layer4_chunk1, 8)); + __m128i layer3_chunk1 = _mm_packus_epi16(_mm_and_si128(layer4_chunk2, v_mask), _mm_and_si128(layer4_chunk3, v_mask)); + __m128i layer3_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk2, 8), _mm_srli_epi16(layer4_chunk3, 8)); + __m128i layer3_chunk2 = _mm_packus_epi16(_mm_and_si128(layer4_chunk4, v_mask), _mm_and_si128(layer4_chunk5, v_mask)); + __m128i layer3_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk4, 8), _mm_srli_epi16(layer4_chunk5, 8)); + + __m128i layer2_chunk0 = _mm_packus_epi16(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); + __m128i layer2_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk0, 8), _mm_srli_epi16(layer3_chunk1, 8)); + __m128i layer2_chunk1 = _mm_packus_epi16(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); + __m128i layer2_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk2, 8), _mm_srli_epi16(layer3_chunk3, 8)); + __m128i layer2_chunk2 = _mm_packus_epi16(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask)); + __m128i layer2_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk4, 8), _mm_srli_epi16(layer3_chunk5, 8)); + + __m128i layer1_chunk0 = _mm_packus_epi16(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); + __m128i layer1_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk0, 8), _mm_srli_epi16(layer2_chunk1, 8)); + __m128i layer1_chunk1 = _mm_packus_epi16(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); + __m128i layer1_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk2, 8), _mm_srli_epi16(layer2_chunk3, 8)); + __m128i layer1_chunk2 = _mm_packus_epi16(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask)); + __m128i layer1_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk4, 8), _mm_srli_epi16(layer2_chunk5, 8)); + + v_r0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); + v_g1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8)); + v_r1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); + v_b0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8)); + v_g0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask)); + v_b1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk4, 8), _mm_srli_epi16(layer1_chunk5, 8)); +} + +inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, + __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) +{ + __m128i v_mask = _mm_set1_epi16(0x00ff); + + __m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); + __m128i layer4_chunk4 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8)); + __m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); + __m128i layer4_chunk5 = _mm_packus_epi16(_mm_srli_epi16(v_g0, 8), _mm_srli_epi16(v_g1, 8)); + __m128i layer4_chunk2 = _mm_packus_epi16(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask)); + __m128i layer4_chunk6 = _mm_packus_epi16(_mm_srli_epi16(v_b0, 8), _mm_srli_epi16(v_b1, 8)); + __m128i layer4_chunk3 = _mm_packus_epi16(_mm_and_si128(v_a0, v_mask), _mm_and_si128(v_a1, v_mask)); + __m128i layer4_chunk7 = _mm_packus_epi16(_mm_srli_epi16(v_a0, 8), _mm_srli_epi16(v_a1, 8)); + + __m128i layer3_chunk0 = _mm_packus_epi16(_mm_and_si128(layer4_chunk0, v_mask), _mm_and_si128(layer4_chunk1, v_mask)); + __m128i layer3_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk0, 8), _mm_srli_epi16(layer4_chunk1, 8)); + __m128i layer3_chunk1 = _mm_packus_epi16(_mm_and_si128(layer4_chunk2, v_mask), _mm_and_si128(layer4_chunk3, v_mask)); + __m128i layer3_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk2, 8), _mm_srli_epi16(layer4_chunk3, 8)); + __m128i layer3_chunk2 = _mm_packus_epi16(_mm_and_si128(layer4_chunk4, v_mask), _mm_and_si128(layer4_chunk5, v_mask)); + __m128i layer3_chunk6 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk4, 8), _mm_srli_epi16(layer4_chunk5, 8)); + __m128i layer3_chunk3 = _mm_packus_epi16(_mm_and_si128(layer4_chunk6, v_mask), _mm_and_si128(layer4_chunk7, v_mask)); + __m128i layer3_chunk7 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk6, 8), _mm_srli_epi16(layer4_chunk7, 8)); + + __m128i layer2_chunk0 = _mm_packus_epi16(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); + __m128i layer2_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk0, 8), _mm_srli_epi16(layer3_chunk1, 8)); + __m128i layer2_chunk1 = _mm_packus_epi16(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); + __m128i layer2_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk2, 8), _mm_srli_epi16(layer3_chunk3, 8)); + __m128i layer2_chunk2 = _mm_packus_epi16(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask)); + __m128i layer2_chunk6 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk4, 8), _mm_srli_epi16(layer3_chunk5, 8)); + __m128i layer2_chunk3 = _mm_packus_epi16(_mm_and_si128(layer3_chunk6, v_mask), _mm_and_si128(layer3_chunk7, v_mask)); + __m128i layer2_chunk7 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk6, 8), _mm_srli_epi16(layer3_chunk7, 8)); + + __m128i layer1_chunk0 = _mm_packus_epi16(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); + __m128i layer1_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk0, 8), _mm_srli_epi16(layer2_chunk1, 8)); + __m128i layer1_chunk1 = _mm_packus_epi16(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); + __m128i layer1_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk2, 8), _mm_srli_epi16(layer2_chunk3, 8)); + __m128i layer1_chunk2 = _mm_packus_epi16(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask)); + __m128i layer1_chunk6 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk4, 8), _mm_srli_epi16(layer2_chunk5, 8)); + __m128i layer1_chunk3 = _mm_packus_epi16(_mm_and_si128(layer2_chunk6, v_mask), _mm_and_si128(layer2_chunk7, v_mask)); + __m128i layer1_chunk7 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk6, 8), _mm_srli_epi16(layer2_chunk7, 8)); + + v_r0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); + v_b0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8)); + v_r1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); + v_b1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8)); + v_g0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask)); + v_a0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk4, 8), _mm_srli_epi16(layer1_chunk5, 8)); + v_g1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk6, v_mask), _mm_and_si128(layer1_chunk7, v_mask)); + v_a1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk6, 8), _mm_srli_epi16(layer1_chunk7, 8)); +} + +inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1) +{ + __m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_g0); + __m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_g0); + __m128i layer1_chunk2 = _mm_unpacklo_epi16(v_r1, v_g1); + __m128i layer1_chunk3 = _mm_unpackhi_epi16(v_r1, v_g1); + + __m128i layer2_chunk0 = _mm_unpacklo_epi16(layer1_chunk0, layer1_chunk2); + __m128i layer2_chunk1 = _mm_unpackhi_epi16(layer1_chunk0, layer1_chunk2); + __m128i layer2_chunk2 = _mm_unpacklo_epi16(layer1_chunk1, layer1_chunk3); + __m128i layer2_chunk3 = _mm_unpackhi_epi16(layer1_chunk1, layer1_chunk3); + + __m128i layer3_chunk0 = _mm_unpacklo_epi16(layer2_chunk0, layer2_chunk2); + __m128i layer3_chunk1 = _mm_unpackhi_epi16(layer2_chunk0, layer2_chunk2); + __m128i layer3_chunk2 = _mm_unpacklo_epi16(layer2_chunk1, layer2_chunk3); + __m128i layer3_chunk3 = _mm_unpackhi_epi16(layer2_chunk1, layer2_chunk3); + + v_r0 = _mm_unpacklo_epi16(layer3_chunk0, layer3_chunk2); + v_r1 = _mm_unpackhi_epi16(layer3_chunk0, layer3_chunk2); + v_g0 = _mm_unpacklo_epi16(layer3_chunk1, layer3_chunk3); + v_g1 = _mm_unpackhi_epi16(layer3_chunk1, layer3_chunk3); +} + +inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, + __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) +{ + __m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_g1); + __m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_g1); + __m128i layer1_chunk2 = _mm_unpacklo_epi16(v_r1, v_b0); + __m128i layer1_chunk3 = _mm_unpackhi_epi16(v_r1, v_b0); + __m128i layer1_chunk4 = _mm_unpacklo_epi16(v_g0, v_b1); + __m128i layer1_chunk5 = _mm_unpackhi_epi16(v_g0, v_b1); + + __m128i layer2_chunk0 = _mm_unpacklo_epi16(layer1_chunk0, layer1_chunk3); + __m128i layer2_chunk1 = _mm_unpackhi_epi16(layer1_chunk0, layer1_chunk3); + __m128i layer2_chunk2 = _mm_unpacklo_epi16(layer1_chunk1, layer1_chunk4); + __m128i layer2_chunk3 = _mm_unpackhi_epi16(layer1_chunk1, layer1_chunk4); + __m128i layer2_chunk4 = _mm_unpacklo_epi16(layer1_chunk2, layer1_chunk5); + __m128i layer2_chunk5 = _mm_unpackhi_epi16(layer1_chunk2, layer1_chunk5); + + __m128i layer3_chunk0 = _mm_unpacklo_epi16(layer2_chunk0, layer2_chunk3); + __m128i layer3_chunk1 = _mm_unpackhi_epi16(layer2_chunk0, layer2_chunk3); + __m128i layer3_chunk2 = _mm_unpacklo_epi16(layer2_chunk1, layer2_chunk4); + __m128i layer3_chunk3 = _mm_unpackhi_epi16(layer2_chunk1, layer2_chunk4); + __m128i layer3_chunk4 = _mm_unpacklo_epi16(layer2_chunk2, layer2_chunk5); + __m128i layer3_chunk5 = _mm_unpackhi_epi16(layer2_chunk2, layer2_chunk5); + + v_r0 = _mm_unpacklo_epi16(layer3_chunk0, layer3_chunk3); + v_r1 = _mm_unpackhi_epi16(layer3_chunk0, layer3_chunk3); + v_g0 = _mm_unpacklo_epi16(layer3_chunk1, layer3_chunk4); + v_g1 = _mm_unpackhi_epi16(layer3_chunk1, layer3_chunk4); + v_b0 = _mm_unpacklo_epi16(layer3_chunk2, layer3_chunk5); + v_b1 = _mm_unpackhi_epi16(layer3_chunk2, layer3_chunk5); +} + +inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, + __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) +{ + __m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_b0); + __m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_b0); + __m128i layer1_chunk2 = _mm_unpacklo_epi16(v_r1, v_b1); + __m128i layer1_chunk3 = _mm_unpackhi_epi16(v_r1, v_b1); + __m128i layer1_chunk4 = _mm_unpacklo_epi16(v_g0, v_a0); + __m128i layer1_chunk5 = _mm_unpackhi_epi16(v_g0, v_a0); + __m128i layer1_chunk6 = _mm_unpacklo_epi16(v_g1, v_a1); + __m128i layer1_chunk7 = _mm_unpackhi_epi16(v_g1, v_a1); + + __m128i layer2_chunk0 = _mm_unpacklo_epi16(layer1_chunk0, layer1_chunk4); + __m128i layer2_chunk1 = _mm_unpackhi_epi16(layer1_chunk0, layer1_chunk4); + __m128i layer2_chunk2 = _mm_unpacklo_epi16(layer1_chunk1, layer1_chunk5); + __m128i layer2_chunk3 = _mm_unpackhi_epi16(layer1_chunk1, layer1_chunk5); + __m128i layer2_chunk4 = _mm_unpacklo_epi16(layer1_chunk2, layer1_chunk6); + __m128i layer2_chunk5 = _mm_unpackhi_epi16(layer1_chunk2, layer1_chunk6); + __m128i layer2_chunk6 = _mm_unpacklo_epi16(layer1_chunk3, layer1_chunk7); + __m128i layer2_chunk7 = _mm_unpackhi_epi16(layer1_chunk3, layer1_chunk7); + + __m128i layer3_chunk0 = _mm_unpacklo_epi16(layer2_chunk0, layer2_chunk4); + __m128i layer3_chunk1 = _mm_unpackhi_epi16(layer2_chunk0, layer2_chunk4); + __m128i layer3_chunk2 = _mm_unpacklo_epi16(layer2_chunk1, layer2_chunk5); + __m128i layer3_chunk3 = _mm_unpackhi_epi16(layer2_chunk1, layer2_chunk5); + __m128i layer3_chunk4 = _mm_unpacklo_epi16(layer2_chunk2, layer2_chunk6); + __m128i layer3_chunk5 = _mm_unpackhi_epi16(layer2_chunk2, layer2_chunk6); + __m128i layer3_chunk6 = _mm_unpacklo_epi16(layer2_chunk3, layer2_chunk7); + __m128i layer3_chunk7 = _mm_unpackhi_epi16(layer2_chunk3, layer2_chunk7); + + v_r0 = _mm_unpacklo_epi16(layer3_chunk0, layer3_chunk4); + v_r1 = _mm_unpackhi_epi16(layer3_chunk0, layer3_chunk4); + v_g0 = _mm_unpacklo_epi16(layer3_chunk1, layer3_chunk5); + v_g1 = _mm_unpackhi_epi16(layer3_chunk1, layer3_chunk5); + v_b0 = _mm_unpacklo_epi16(layer3_chunk2, layer3_chunk6); + v_b1 = _mm_unpackhi_epi16(layer3_chunk2, layer3_chunk6); + v_a0 = _mm_unpacklo_epi16(layer3_chunk3, layer3_chunk7); + v_a1 = _mm_unpackhi_epi16(layer3_chunk3, layer3_chunk7); +} + +#if CV_SSE4_1 + +inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1) +{ + __m128i v_mask = _mm_set1_epi32(0x0000ffff); + + __m128i layer3_chunk0 = _mm_packus_epi32(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); + __m128i layer3_chunk2 = _mm_packus_epi32(_mm_srli_epi32(v_r0, 16), _mm_srli_epi32(v_r1, 16)); + __m128i layer3_chunk1 = _mm_packus_epi32(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); + __m128i layer3_chunk3 = _mm_packus_epi32(_mm_srli_epi32(v_g0, 16), _mm_srli_epi32(v_g1, 16)); + + __m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); + __m128i layer2_chunk2 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16)); + __m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); + __m128i layer2_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16)); + + __m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); + __m128i layer1_chunk2 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16)); + __m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); + __m128i layer1_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16)); + + v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); + v_g0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16)); + v_r1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); + v_g1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk2, 16), _mm_srli_epi32(layer1_chunk3, 16)); +} + +inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, + __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) +{ + __m128i v_mask = _mm_set1_epi32(0x0000ffff); + + __m128i layer3_chunk0 = _mm_packus_epi32(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); + __m128i layer3_chunk3 = _mm_packus_epi32(_mm_srli_epi32(v_r0, 16), _mm_srli_epi32(v_r1, 16)); + __m128i layer3_chunk1 = _mm_packus_epi32(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); + __m128i layer3_chunk4 = _mm_packus_epi32(_mm_srli_epi32(v_g0, 16), _mm_srli_epi32(v_g1, 16)); + __m128i layer3_chunk2 = _mm_packus_epi32(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask)); + __m128i layer3_chunk5 = _mm_packus_epi32(_mm_srli_epi32(v_b0, 16), _mm_srli_epi32(v_b1, 16)); + + __m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); + __m128i layer2_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16)); + __m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); + __m128i layer2_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16)); + __m128i layer2_chunk2 = _mm_packus_epi32(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask)); + __m128i layer2_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk4, 16), _mm_srli_epi32(layer3_chunk5, 16)); + + __m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); + __m128i layer1_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16)); + __m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); + __m128i layer1_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16)); + __m128i layer1_chunk2 = _mm_packus_epi32(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask)); + __m128i layer1_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk4, 16), _mm_srli_epi32(layer2_chunk5, 16)); + + v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); + v_g1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16)); + v_r1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); + v_b0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk2, 16), _mm_srli_epi32(layer1_chunk3, 16)); + v_g0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask)); + v_b1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk4, 16), _mm_srli_epi32(layer1_chunk5, 16)); +} + +inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, + __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) +{ + __m128i v_mask = _mm_set1_epi32(0x0000ffff); + + __m128i layer3_chunk0 = _mm_packus_epi32(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); + __m128i layer3_chunk4 = _mm_packus_epi32(_mm_srli_epi32(v_r0, 16), _mm_srli_epi32(v_r1, 16)); + __m128i layer3_chunk1 = _mm_packus_epi32(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); + __m128i layer3_chunk5 = _mm_packus_epi32(_mm_srli_epi32(v_g0, 16), _mm_srli_epi32(v_g1, 16)); + __m128i layer3_chunk2 = _mm_packus_epi32(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask)); + __m128i layer3_chunk6 = _mm_packus_epi32(_mm_srli_epi32(v_b0, 16), _mm_srli_epi32(v_b1, 16)); + __m128i layer3_chunk3 = _mm_packus_epi32(_mm_and_si128(v_a0, v_mask), _mm_and_si128(v_a1, v_mask)); + __m128i layer3_chunk7 = _mm_packus_epi32(_mm_srli_epi32(v_a0, 16), _mm_srli_epi32(v_a1, 16)); + + __m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); + __m128i layer2_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16)); + __m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); + __m128i layer2_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16)); + __m128i layer2_chunk2 = _mm_packus_epi32(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask)); + __m128i layer2_chunk6 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk4, 16), _mm_srli_epi32(layer3_chunk5, 16)); + __m128i layer2_chunk3 = _mm_packus_epi32(_mm_and_si128(layer3_chunk6, v_mask), _mm_and_si128(layer3_chunk7, v_mask)); + __m128i layer2_chunk7 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk6, 16), _mm_srli_epi32(layer3_chunk7, 16)); + + __m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); + __m128i layer1_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16)); + __m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); + __m128i layer1_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16)); + __m128i layer1_chunk2 = _mm_packus_epi32(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask)); + __m128i layer1_chunk6 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk4, 16), _mm_srli_epi32(layer2_chunk5, 16)); + __m128i layer1_chunk3 = _mm_packus_epi32(_mm_and_si128(layer2_chunk6, v_mask), _mm_and_si128(layer2_chunk7, v_mask)); + __m128i layer1_chunk7 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk6, 16), _mm_srli_epi32(layer2_chunk7, 16)); + + v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); + v_b0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16)); + v_r1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); + v_b1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk2, 16), _mm_srli_epi32(layer1_chunk3, 16)); + v_g0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask)); + v_a0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk4, 16), _mm_srli_epi32(layer1_chunk5, 16)); + v_g1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk6, v_mask), _mm_and_si128(layer1_chunk7, v_mask)); + v_a1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk6, 16), _mm_srli_epi32(layer1_chunk7, 16)); +} + +#endif // CV_SSE4_1 + +inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1) +{ + __m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_g0); + __m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_g0); + __m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_g1); + __m128 layer1_chunk3 = _mm_unpackhi_ps(v_r1, v_g1); + + __m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk2); + __m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk2); + __m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk3); + __m128 layer2_chunk3 = _mm_unpackhi_ps(layer1_chunk1, layer1_chunk3); + + v_r0 = _mm_unpacklo_ps(layer2_chunk0, layer2_chunk2); + v_r1 = _mm_unpackhi_ps(layer2_chunk0, layer2_chunk2); + v_g0 = _mm_unpacklo_ps(layer2_chunk1, layer2_chunk3); + v_g1 = _mm_unpackhi_ps(layer2_chunk1, layer2_chunk3); +} + +inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, + __m128 & v_g1, __m128 & v_b0, __m128 & v_b1) +{ + __m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_g1); + __m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_g1); + __m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_b0); + __m128 layer1_chunk3 = _mm_unpackhi_ps(v_r1, v_b0); + __m128 layer1_chunk4 = _mm_unpacklo_ps(v_g0, v_b1); + __m128 layer1_chunk5 = _mm_unpackhi_ps(v_g0, v_b1); + + __m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk3); + __m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk3); + __m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk4); + __m128 layer2_chunk3 = _mm_unpackhi_ps(layer1_chunk1, layer1_chunk4); + __m128 layer2_chunk4 = _mm_unpacklo_ps(layer1_chunk2, layer1_chunk5); + __m128 layer2_chunk5 = _mm_unpackhi_ps(layer1_chunk2, layer1_chunk5); + + v_r0 = _mm_unpacklo_ps(layer2_chunk0, layer2_chunk3); + v_r1 = _mm_unpackhi_ps(layer2_chunk0, layer2_chunk3); + v_g0 = _mm_unpacklo_ps(layer2_chunk1, layer2_chunk4); + v_g1 = _mm_unpackhi_ps(layer2_chunk1, layer2_chunk4); + v_b0 = _mm_unpacklo_ps(layer2_chunk2, layer2_chunk5); + v_b1 = _mm_unpackhi_ps(layer2_chunk2, layer2_chunk5); +} + +inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1, + __m128 & v_b0, __m128 & v_b1, __m128 & v_a0, __m128 & v_a1) +{ + __m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_b0); + __m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_b0); + __m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_b1); + __m128 layer1_chunk3 = _mm_unpackhi_ps(v_r1, v_b1); + __m128 layer1_chunk4 = _mm_unpacklo_ps(v_g0, v_a0); + __m128 layer1_chunk5 = _mm_unpackhi_ps(v_g0, v_a0); + __m128 layer1_chunk6 = _mm_unpacklo_ps(v_g1, v_a1); + __m128 layer1_chunk7 = _mm_unpackhi_ps(v_g1, v_a1); + + __m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk4); + __m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk4); + __m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk5); + __m128 layer2_chunk3 = _mm_unpackhi_ps(layer1_chunk1, layer1_chunk5); + __m128 layer2_chunk4 = _mm_unpacklo_ps(layer1_chunk2, layer1_chunk6); + __m128 layer2_chunk5 = _mm_unpackhi_ps(layer1_chunk2, layer1_chunk6); + __m128 layer2_chunk6 = _mm_unpacklo_ps(layer1_chunk3, layer1_chunk7); + __m128 layer2_chunk7 = _mm_unpackhi_ps(layer1_chunk3, layer1_chunk7); + + v_r0 = _mm_unpacklo_ps(layer2_chunk0, layer2_chunk4); + v_r1 = _mm_unpackhi_ps(layer2_chunk0, layer2_chunk4); + v_g0 = _mm_unpacklo_ps(layer2_chunk1, layer2_chunk5); + v_g1 = _mm_unpackhi_ps(layer2_chunk1, layer2_chunk5); + v_b0 = _mm_unpacklo_ps(layer2_chunk2, layer2_chunk6); + v_b1 = _mm_unpackhi_ps(layer2_chunk2, layer2_chunk6); + v_a0 = _mm_unpacklo_ps(layer2_chunk3, layer2_chunk7); + v_a1 = _mm_unpackhi_ps(layer2_chunk3, layer2_chunk7); +} + +inline void _mm_interleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1) +{ + const int mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1); + + __m128 layer2_chunk0 = _mm_shuffle_ps(v_r0, v_r1, mask_lo); + __m128 layer2_chunk2 = _mm_shuffle_ps(v_r0, v_r1, mask_hi); + __m128 layer2_chunk1 = _mm_shuffle_ps(v_g0, v_g1, mask_lo); + __m128 layer2_chunk3 = _mm_shuffle_ps(v_g0, v_g1, mask_hi); + + __m128 layer1_chunk0 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_lo); + __m128 layer1_chunk2 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_hi); + __m128 layer1_chunk1 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_lo); + __m128 layer1_chunk3 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_hi); + + v_r0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_lo); + v_g0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_hi); + v_r1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_lo); + v_g1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_hi); +} + +inline void _mm_interleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, + __m128 & v_g1, __m128 & v_b0, __m128 & v_b1) +{ + const int mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1); + + __m128 layer2_chunk0 = _mm_shuffle_ps(v_r0, v_r1, mask_lo); + __m128 layer2_chunk3 = _mm_shuffle_ps(v_r0, v_r1, mask_hi); + __m128 layer2_chunk1 = _mm_shuffle_ps(v_g0, v_g1, mask_lo); + __m128 layer2_chunk4 = _mm_shuffle_ps(v_g0, v_g1, mask_hi); + __m128 layer2_chunk2 = _mm_shuffle_ps(v_b0, v_b1, mask_lo); + __m128 layer2_chunk5 = _mm_shuffle_ps(v_b0, v_b1, mask_hi); + + __m128 layer1_chunk0 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_lo); + __m128 layer1_chunk3 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_hi); + __m128 layer1_chunk1 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_lo); + __m128 layer1_chunk4 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_hi); + __m128 layer1_chunk2 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_lo); + __m128 layer1_chunk5 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_hi); + + v_r0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_lo); + v_g1 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_hi); + v_r1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_lo); + v_b0 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_hi); + v_g0 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_lo); + v_b1 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_hi); +} + +inline void _mm_interleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1, + __m128 & v_b0, __m128 & v_b1, __m128 & v_a0, __m128 & v_a1) +{ + const int mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1); + + __m128 layer2_chunk0 = _mm_shuffle_ps(v_r0, v_r1, mask_lo); + __m128 layer2_chunk4 = _mm_shuffle_ps(v_r0, v_r1, mask_hi); + __m128 layer2_chunk1 = _mm_shuffle_ps(v_g0, v_g1, mask_lo); + __m128 layer2_chunk5 = _mm_shuffle_ps(v_g0, v_g1, mask_hi); + __m128 layer2_chunk2 = _mm_shuffle_ps(v_b0, v_b1, mask_lo); + __m128 layer2_chunk6 = _mm_shuffle_ps(v_b0, v_b1, mask_hi); + __m128 layer2_chunk3 = _mm_shuffle_ps(v_a0, v_a1, mask_lo); + __m128 layer2_chunk7 = _mm_shuffle_ps(v_a0, v_a1, mask_hi); + + __m128 layer1_chunk0 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_lo); + __m128 layer1_chunk4 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_hi); + __m128 layer1_chunk1 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_lo); + __m128 layer1_chunk5 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_hi); + __m128 layer1_chunk2 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_lo); + __m128 layer1_chunk6 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_hi); + __m128 layer1_chunk3 = _mm_shuffle_ps(layer2_chunk6, layer2_chunk7, mask_lo); + __m128 layer1_chunk7 = _mm_shuffle_ps(layer2_chunk6, layer2_chunk7, mask_hi); + + v_r0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_lo); + v_b0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_hi); + v_r1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_lo); + v_b1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_hi); + v_g0 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_lo); + v_a0 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_hi); + v_g1 = _mm_shuffle_ps(layer1_chunk6, layer1_chunk7, mask_lo); + v_a1 = _mm_shuffle_ps(layer1_chunk6, layer1_chunk7, mask_hi); +} + +#endif // CV_SSE2 + +#endif //__OPENCV_CORE_SSE_UTILS_HPP__ diff --git a/modules/core/include/opencv2/core/utility.hpp b/modules/core/include/opencv2/core/utility.hpp index 88989ef5cb..f89560a809 100644 --- a/modules/core/include/opencv2/core/utility.hpp +++ b/modules/core/include/opencv2/core/utility.hpp @@ -13,6 +13,7 @@ // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -281,16 +282,30 @@ CV_EXPORTS_W int64 getCPUTickCount(); remember to keep this list identical to the one in cvdef.h */ enum CpuFeatures { - CPU_MMX = 1, - CPU_SSE = 2, - CPU_SSE2 = 3, - CPU_SSE3 = 4, - CPU_SSSE3 = 5, - CPU_SSE4_1 = 6, - CPU_SSE4_2 = 7, - CPU_POPCNT = 8, - CPU_AVX = 10, - CPU_NEON = 11 + CPU_MMX = 1, + CPU_SSE = 2, + CPU_SSE2 = 3, + CPU_SSE3 = 4, + CPU_SSSE3 = 5, + CPU_SSE4_1 = 6, + CPU_SSE4_2 = 7, + CPU_POPCNT = 8, + + CPU_AVX = 10, + CPU_AVX2 = 11, + CPU_FMA3 = 12, + + CPU_AVX_512F = 13, + CPU_AVX_512BW = 14, + CPU_AVX_512CD = 15, + CPU_AVX_512DQ = 16, + CPU_AVX_512ER = 17, + CPU_AVX_512IFMA512 = 18, + CPU_AVX_512PF = 19, + CPU_AVX_512VBMI = 20, + CPU_AVX_512VL = 21, + + CPU_NEON = 100 }; /** @brief Returns true if the specified feature is supported by the host hardware. diff --git a/modules/core/perf/perf_arithm.cpp b/modules/core/perf/perf_arithm.cpp index 3598c8639f..c6c2a1b29f 100644 --- a/modules/core/perf/perf_arithm.cpp +++ b/modules/core/perf/perf_arithm.cpp @@ -242,3 +242,31 @@ PERF_TEST_P(Size_MatType, multiplyScale, TYPICAL_MATS_CORE_ARITHM) SANITY_CHECK(c, 1e-8); } + +PERF_TEST_P(Size_MatType, divide, TYPICAL_MATS_CORE_ARITHM) +{ + Size sz = get<0>(GetParam()); + int type = get<1>(GetParam()); + cv::Mat a(sz, type), b(sz, type), c(sz, type); + double scale = 0.5; + + declare.in(a, b, WARMUP_RNG).out(c); + + TEST_CYCLE() divide(a, b, c, scale); + + SANITY_CHECK_NOTHING(); +} + +PERF_TEST_P(Size_MatType, reciprocal, TYPICAL_MATS_CORE_ARITHM) +{ + Size sz = get<0>(GetParam()); + int type = get<1>(GetParam()); + cv::Mat b(sz, type), c(sz, type); + double scale = 0.5; + + declare.in(b, WARMUP_RNG).out(c); + + TEST_CYCLE() divide(scale, b, c); + + SANITY_CHECK_NOTHING(); +} diff --git a/modules/core/src/arithm.cpp b/modules/core/src/arithm.cpp index f881c785b3..c4de2c4bed 100644 --- a/modules/core/src/arithm.cpp +++ b/modules/core/src/arithm.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -63,6 +64,10 @@ FUNCTOR_TEMPLATE(VLoadStore128); #if CV_SSE2 FUNCTOR_TEMPLATE(VLoadStore64); FUNCTOR_TEMPLATE(VLoadStore128Aligned); +#if CV_AVX2 +FUNCTOR_TEMPLATE(VLoadStore256); +FUNCTOR_TEMPLATE(VLoadStore256Aligned); +#endif #endif #endif @@ -75,17 +80,28 @@ void vBinOp(const T* src1, size_t step1, const T* src2, size_t step2, T* dst, si #endif Op op; - for( ; sz.height--; src1 += step1/sizeof(src1[0]), - src2 += step2/sizeof(src2[0]), - dst += step/sizeof(dst[0]) ) + for( ; sz.height--; src1 = (const T *)((const uchar *)src1 + step1), + src2 = (const T *)((const uchar *)src2 + step2), + dst = (T *)((uchar *)dst + step) ) { int x = 0; #if CV_NEON || CV_SSE2 +#if CV_AVX2 + if( USE_AVX2 ) + { + for( ; x <= sz.width - 32/(int)sizeof(T); x += 32/sizeof(T) ) + { + typename VLoadStore256::reg_type r0 = VLoadStore256::load(src1 + x); + r0 = vop(r0, VLoadStore256::load(src2 + x)); + VLoadStore256::store(dst + x, r0); + } + } +#else #if CV_SSE2 if( USE_SSE2 ) { -#endif +#endif // CV_SSE2 for( ; x <= sz.width - 32/(int)sizeof(T); x += 32/sizeof(T) ) { typename VLoadStore128::reg_type r0 = VLoadStore128::load(src1 + x ); @@ -97,9 +113,13 @@ void vBinOp(const T* src1, size_t step1, const T* src2, size_t step2, T* dst, si } #if CV_SSE2 } -#endif -#endif -#if CV_SSE2 +#endif // CV_SSE2 +#endif // CV_AVX2 +#endif // CV_NEON || CV_SSE2 + +#if CV_AVX2 + // nothing +#elif CV_SSE2 if( USE_SSE2 ) { for( ; x <= sz.width - 8/(int)sizeof(T); x += 8/sizeof(T) ) @@ -110,6 +130,7 @@ void vBinOp(const T* src1, size_t step1, const T* src2, size_t step2, T* dst, si } } #endif + #if CV_ENABLE_UNROLLED for( ; x <= sz.width - 4; x += 4 ) { @@ -136,13 +157,26 @@ void vBinOp32(const T* src1, size_t step1, const T* src2, size_t step2, #endif Op op; - for( ; sz.height--; src1 += step1/sizeof(src1[0]), - src2 += step2/sizeof(src2[0]), - dst += step/sizeof(dst[0]) ) + for( ; sz.height--; src1 = (const T *)((const uchar *)src1 + step1), + src2 = (const T *)((const uchar *)src2 + step2), + dst = (T *)((uchar *)dst + step) ) { int x = 0; -#if CV_SSE2 +#if CV_AVX2 + if( USE_AVX2 ) + { + if( (((size_t)src1|(size_t)src2|(size_t)dst)&31) == 0 ) + { + for( ; x <= sz.width - 8; x += 8 ) + { + typename VLoadStore256Aligned::reg_type r0 = VLoadStore256Aligned::load(src1 + x); + r0 = op32(r0, VLoadStore256Aligned::load(src2 + x)); + VLoadStore256Aligned::store(dst + x, r0); + } + } + } +#elif CV_SSE2 if( USE_SSE2 ) { if( (((size_t)src1|(size_t)src2|(size_t)dst)&15) == 0 ) @@ -158,12 +192,24 @@ void vBinOp32(const T* src1, size_t step1, const T* src2, size_t step2, } } } -#endif +#endif // CV_AVX2 + #if CV_NEON || CV_SSE2 +#if CV_AVX2 + if( USE_AVX2 ) + { + for( ; x <= sz.width - 8; x += 8 ) + { + typename VLoadStore256::reg_type r0 = VLoadStore256::load(src1 + x); + r0 = op32(r0, VLoadStore256::load(src2 + x)); + VLoadStore256::store(dst + x, r0); + } + } +#else #if CV_SSE2 if( USE_SSE2 ) { -#endif +#endif // CV_SSE2 for( ; x <= sz.width - 8; x += 8 ) { typename VLoadStore128::reg_type r0 = VLoadStore128::load(src1 + x ); @@ -175,8 +221,10 @@ void vBinOp32(const T* src1, size_t step1, const T* src2, size_t step2, } #if CV_SSE2 } -#endif -#endif +#endif // CV_SSE2 +#endif // CV_AVX2 +#endif // CV_NEON || CV_SSE2 + #if CV_ENABLE_UNROLLED for( ; x <= sz.width - 4; x += 4 ) { @@ -204,13 +252,26 @@ void vBinOp64(const T* src1, size_t step1, const T* src2, size_t step2, #endif Op op; - for( ; sz.height--; src1 += step1/sizeof(src1[0]), - src2 += step2/sizeof(src2[0]), - dst += step/sizeof(dst[0]) ) + for( ; sz.height--; src1 = (const T *)((const uchar *)src1 + step1), + src2 = (const T *)((const uchar *)src2 + step2), + dst = (T *)((uchar *)dst + step) ) { int x = 0; -#if CV_SSE2 +#if CV_AVX2 + if( USE_AVX2 ) + { + if( (((size_t)src1|(size_t)src2|(size_t)dst)&31) == 0 ) + { + for( ; x <= sz.width - 4; x += 4 ) + { + typename VLoadStore256Aligned::reg_type r0 = VLoadStore256Aligned::load(src1 + x); + r0 = op64(r0, VLoadStore256Aligned::load(src2 + x)); + VLoadStore256Aligned::store(dst + x, r0); + } + } + } +#elif CV_SSE2 if( USE_SSE2 ) { if( (((size_t)src1|(size_t)src2|(size_t)dst)&15) == 0 ) @@ -243,7 +304,141 @@ void vBinOp64(const T* src1, size_t step1, const T* src2, size_t step2, } } -#if CV_SSE2 +#if CV_AVX2 + +#define FUNCTOR_LOADSTORE_CAST(name, template_arg, register_type, load_body, store_body) \ + template <> \ + struct name{ \ + typedef register_type reg_type; \ + static reg_type load(const template_arg * p) { return load_body ((const reg_type *)p); } \ + static void store(template_arg * p, reg_type v) { store_body ((reg_type *)p, v); } \ + } + +#define FUNCTOR_LOADSTORE(name, template_arg, register_type, load_body, store_body) \ + template <> \ + struct name{ \ + typedef register_type reg_type; \ + static reg_type load(const template_arg * p) { return load_body (p); } \ + static void store(template_arg * p, reg_type v) { store_body (p, v); } \ + } + +#define FUNCTOR_CLOSURE_2arg(name, template_arg, body) \ + template<> \ + struct name \ + { \ + VLoadStore256::reg_type operator()( \ + const VLoadStore256::reg_type & a, \ + const VLoadStore256::reg_type & b) const \ + { \ + body; \ + } \ + } + +#define FUNCTOR_CLOSURE_1arg(name, template_arg, body) \ + template<> \ + struct name \ + { \ + VLoadStore256::reg_type operator()( \ + const VLoadStore256::reg_type & a, \ + const VLoadStore256::reg_type & ) const \ + { \ + body; \ + } \ + } + +FUNCTOR_LOADSTORE_CAST(VLoadStore256, uchar, __m256i, _mm256_loadu_si256, _mm256_storeu_si256); +FUNCTOR_LOADSTORE_CAST(VLoadStore256, schar, __m256i, _mm256_loadu_si256, _mm256_storeu_si256); +FUNCTOR_LOADSTORE_CAST(VLoadStore256, ushort, __m256i, _mm256_loadu_si256, _mm256_storeu_si256); +FUNCTOR_LOADSTORE_CAST(VLoadStore256, short, __m256i, _mm256_loadu_si256, _mm256_storeu_si256); +FUNCTOR_LOADSTORE_CAST(VLoadStore256, int, __m256i, _mm256_loadu_si256, _mm256_storeu_si256); +FUNCTOR_LOADSTORE( VLoadStore256, float, __m256 , _mm256_loadu_ps , _mm256_storeu_ps ); +FUNCTOR_LOADSTORE( VLoadStore256, double, __m256d, _mm256_loadu_pd , _mm256_storeu_pd ); + +FUNCTOR_LOADSTORE_CAST(VLoadStore256Aligned, int, __m256i, _mm256_load_si256, _mm256_store_si256); +FUNCTOR_LOADSTORE( VLoadStore256Aligned, float, __m256 , _mm256_load_ps , _mm256_store_ps ); +FUNCTOR_LOADSTORE( VLoadStore256Aligned, double, __m256d, _mm256_load_pd , _mm256_store_pd ); + +FUNCTOR_TEMPLATE(VAdd); +FUNCTOR_CLOSURE_2arg(VAdd, uchar, return _mm256_adds_epu8 (a, b)); +FUNCTOR_CLOSURE_2arg(VAdd, schar, return _mm256_adds_epi8 (a, b)); +FUNCTOR_CLOSURE_2arg(VAdd, ushort, return _mm256_adds_epu16(a, b)); +FUNCTOR_CLOSURE_2arg(VAdd, short, return _mm256_adds_epi16(a, b)); +FUNCTOR_CLOSURE_2arg(VAdd, int, return _mm256_add_epi32 (a, b)); +FUNCTOR_CLOSURE_2arg(VAdd, float, return _mm256_add_ps (a, b)); +FUNCTOR_CLOSURE_2arg(VAdd, double, return _mm256_add_pd (a, b)); + +FUNCTOR_TEMPLATE(VSub); +FUNCTOR_CLOSURE_2arg(VSub, uchar, return _mm256_subs_epu8 (a, b)); +FUNCTOR_CLOSURE_2arg(VSub, schar, return _mm256_subs_epi8 (a, b)); +FUNCTOR_CLOSURE_2arg(VSub, ushort, return _mm256_subs_epu16(a, b)); +FUNCTOR_CLOSURE_2arg(VSub, short, return _mm256_subs_epi16(a, b)); +FUNCTOR_CLOSURE_2arg(VSub, int, return _mm256_sub_epi32 (a, b)); +FUNCTOR_CLOSURE_2arg(VSub, float, return _mm256_sub_ps (a, b)); +FUNCTOR_CLOSURE_2arg(VSub, double, return _mm256_sub_pd (a, b)); + +FUNCTOR_TEMPLATE(VMin); +FUNCTOR_CLOSURE_2arg(VMin, uchar, return _mm256_min_epu8 (a, b)); +FUNCTOR_CLOSURE_2arg(VMin, schar, return _mm256_min_epi8 (a, b)); +FUNCTOR_CLOSURE_2arg(VMin, ushort, return _mm256_min_epi16(a, b)); +FUNCTOR_CLOSURE_2arg(VMin, short, return _mm256_min_epi16(a, b)); +FUNCTOR_CLOSURE_2arg(VMin, int, return _mm256_min_epi32(a, b)); +FUNCTOR_CLOSURE_2arg(VMin, float, return _mm256_min_ps (a, b)); +FUNCTOR_CLOSURE_2arg(VMin, double, return _mm256_min_pd (a, b)); + +FUNCTOR_TEMPLATE(VMax); +FUNCTOR_CLOSURE_2arg(VMax, uchar, return _mm256_max_epu8 (a, b)); +FUNCTOR_CLOSURE_2arg(VMax, schar, return _mm256_max_epi8 (a, b)); +FUNCTOR_CLOSURE_2arg(VMax, ushort, return _mm256_max_epu16(a, b)); +FUNCTOR_CLOSURE_2arg(VMax, short, return _mm256_max_epi16(a, b)); +FUNCTOR_CLOSURE_2arg(VMax, int, return _mm256_max_epi32(a, b)); +FUNCTOR_CLOSURE_2arg(VMax, float, return _mm256_max_ps (a, b)); +FUNCTOR_CLOSURE_2arg(VMax, double, return _mm256_max_pd (a, b)); + + +static unsigned int CV_DECL_ALIGNED(32) v32f_absmask[] = { 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, + 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff }; +static unsigned int CV_DECL_ALIGNED(32) v64f_absmask[] = { 0xffffffff, 0x7fffffff, 0xffffffff, 0x7fffffff, + 0xffffffff, 0x7fffffff, 0xffffffff, 0x7fffffff }; + +FUNCTOR_TEMPLATE(VAbsDiff); +FUNCTOR_CLOSURE_2arg(VAbsDiff, uchar, + return _mm256_add_epi8(_mm256_subs_epu8(a, b), _mm256_subs_epu8(b, a)); + ); +FUNCTOR_CLOSURE_2arg(VAbsDiff, schar, + __m256i d = _mm256_subs_epi8(a, b); + __m256i m = _mm256_cmpgt_epi8(b, a); + return _mm256_subs_epi8(_mm256_xor_si256(d, m), m); + ); +FUNCTOR_CLOSURE_2arg(VAbsDiff, ushort, + return _mm256_add_epi16(_mm256_subs_epu16(a, b), _mm256_subs_epu16(b, a)); + ); +FUNCTOR_CLOSURE_2arg(VAbsDiff, short, + __m256i M = _mm256_max_epi16(a, b); + __m256i m = _mm256_min_epi16(a, b); + return _mm256_subs_epi16(M, m); + ); +FUNCTOR_CLOSURE_2arg(VAbsDiff, int, + __m256i d = _mm256_sub_epi32(a, b); + __m256i m = _mm256_cmpgt_epi32(b, a); + return _mm256_sub_epi32(_mm256_xor_si256(d, m), m); + ); +FUNCTOR_CLOSURE_2arg(VAbsDiff, float, + return _mm256_and_ps(_mm256_sub_ps(a, b), *(const __m256*)v32f_absmask); + ); +FUNCTOR_CLOSURE_2arg(VAbsDiff, double, + return _mm256_and_pd(_mm256_sub_pd(a, b), *(const __m256d*)v64f_absmask); + ); + +FUNCTOR_TEMPLATE(VAnd); +FUNCTOR_CLOSURE_2arg(VAnd, uchar, return _mm256_and_si256(a, b)); +FUNCTOR_TEMPLATE(VOr); +FUNCTOR_CLOSURE_2arg(VOr , uchar, return _mm256_or_si256 (a, b)); +FUNCTOR_TEMPLATE(VXor); +FUNCTOR_CLOSURE_2arg(VXor, uchar, return _mm256_xor_si256(a, b)); +FUNCTOR_TEMPLATE(VNot); +FUNCTOR_CLOSURE_1arg(VNot, uchar, return _mm256_xor_si256(_mm256_set1_epi32(-1), a)); + +#elif CV_SSE2 #define FUNCTOR_LOADSTORE_CAST(name, template_arg, register_type, load_body, store_body)\ template <> \ @@ -2574,6 +2769,263 @@ mul_( const T* src1, size_t step1, const T* src2, size_t step2, } } +template +struct Div_SIMD +{ + int operator() (const T *, const T *, T *, int, double) const + { + return 0; + } +}; + +#if CV_SSE2 + +#if CV_SSE4_1 + +template <> +struct Div_SIMD +{ + bool haveSIMD; + Div_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE4_1); } + + int operator() (const uchar * src1, const uchar * src2, uchar * dst, int width, double scale) const + { + int x = 0; + + if (!haveSIMD) + return x; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(src1 + x)), v_zero); + __m128i _v_src2 = _mm_loadl_epi64((const __m128i *)(src2 + x)); + __m128i v_src2 = _mm_unpacklo_epi8(_v_src2, v_zero); + + __m128i v_src1i = _mm_unpacklo_epi16(v_src1, v_zero); + __m128i v_src2i = _mm_unpacklo_epi16(v_src2, v_zero); + __m128d v_src1d = _mm_cvtepi32_pd(v_src1i); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2i); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + v_src1d = _mm_cvtepi32_pd(_mm_srli_si128(v_src1i, 8)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + __m128i v_dst_0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + v_src1i = _mm_unpackhi_epi16(v_src1, v_zero); + v_src2i = _mm_unpackhi_epi16(v_src2, v_zero); + v_src1d = _mm_cvtepi32_pd(v_src1i); + v_src2d = _mm_cvtepi32_pd(v_src2i); + v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + v_src1d = _mm_cvtepi32_pd(_mm_srli_si128(v_src1i, 8)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + __m128i v_dst_1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + __m128i v_mask = _mm_cmpeq_epi8(_v_src2, v_zero); + _mm_storel_epi64((__m128i *)(dst + x), _mm_andnot_si128(v_mask, _mm_packus_epi16(_mm_packs_epi32(v_dst_0, v_dst_1), v_zero))); + } + + return x; + } +}; + +#endif // CV_SSE4_1 + +template <> +struct Div_SIMD +{ + bool haveSIMD; + Div_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE2); } + + int operator() (const schar * src1, const schar * src2, schar * dst, int width, double scale) const + { + int x = 0; + + if (!haveSIMD) + return x; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src1 = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((const __m128i *)(src1 + x))), 8); + __m128i _v_src2 = _mm_loadl_epi64((const __m128i *)(src2 + x)); + __m128i v_src2 = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _v_src2), 8); + + __m128i v_src1i = _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src1), 16); + __m128i v_src2i = _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src2), 16); + __m128d v_src1d = _mm_cvtepi32_pd(v_src1i); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2i); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + v_src1d = _mm_cvtepi32_pd(_mm_srli_si128(v_src1i, 8)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + __m128i v_dst_0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + v_src1i = _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src1), 16); + v_src2i = _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src2), 16); + v_src1d = _mm_cvtepi32_pd(v_src1i); + v_src2d = _mm_cvtepi32_pd(v_src2i); + v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + v_src1d = _mm_cvtepi32_pd(_mm_srli_si128(v_src1i, 8)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + __m128i v_dst_1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + __m128i v_mask = _mm_cmpeq_epi8(_v_src2, v_zero); + _mm_storel_epi64((__m128i *)(dst + x), _mm_andnot_si128(v_mask, _mm_packs_epi16(_mm_packs_epi32(v_dst_0, v_dst_1), v_zero))); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct Div_SIMD +{ + bool haveSIMD; + Div_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE4_1); } + + int operator() (const ushort * src1, const ushort * src2, ushort * dst, int width, double scale) const + { + int x = 0; + + if (!haveSIMD) + return x; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src1 = _mm_loadu_si128((const __m128i *)(src1 + x)); + __m128i v_src2 = _mm_loadu_si128((const __m128i *)(src2 + x)); + + __m128i v_src1i = _mm_unpacklo_epi16(v_src1, v_zero); + __m128i v_src2i = _mm_unpacklo_epi16(v_src2, v_zero); + __m128d v_src1d = _mm_cvtepi32_pd(v_src1i); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2i); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + v_src1d = _mm_cvtepi32_pd(_mm_srli_si128(v_src1i, 8)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + __m128i v_dst_0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + v_src1i = _mm_unpackhi_epi16(v_src1, v_zero); + v_src2i = _mm_unpackhi_epi16(v_src2, v_zero); + v_src1d = _mm_cvtepi32_pd(v_src1i); + v_src2d = _mm_cvtepi32_pd(v_src2i); + v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + v_src1d = _mm_cvtepi32_pd(_mm_srli_si128(v_src1i, 8)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + __m128i v_dst_1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + __m128i v_mask = _mm_cmpeq_epi16(v_src2, v_zero); + _mm_storeu_si128((__m128i *)(dst + x), _mm_andnot_si128(v_mask, _mm_packus_epi32(v_dst_0, v_dst_1))); + } + + return x; + } +}; + +#endif // CV_SSE4_1 + +template <> +struct Div_SIMD +{ + bool haveSIMD; + Div_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE2); } + + int operator() (const short * src1, const short * src2, short * dst, int width, double scale) const + { + int x = 0; + + if (!haveSIMD) + return x; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src1 = _mm_loadu_si128((const __m128i *)(src1 + x)); + __m128i v_src2 = _mm_loadu_si128((const __m128i *)(src2 + x)); + + __m128i v_src1i = _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src1), 16); + __m128i v_src2i = _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src2), 16); + __m128d v_src1d = _mm_cvtepi32_pd(v_src1i); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2i); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + v_src1d = _mm_cvtepi32_pd(_mm_srli_si128(v_src1i, 8)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + __m128i v_dst_0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + v_src1i = _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src1), 16); + v_src2i = _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src2), 16); + v_src1d = _mm_cvtepi32_pd(v_src1i); + v_src2d = _mm_cvtepi32_pd(v_src2i); + v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + v_src1d = _mm_cvtepi32_pd(_mm_srli_si128(v_src1i, 8)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + __m128i v_dst_1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + __m128i v_mask = _mm_cmpeq_epi16(v_src2, v_zero); + _mm_storeu_si128((__m128i *)(dst + x), _mm_andnot_si128(v_mask, _mm_packs_epi32(v_dst_0, v_dst_1))); + } + + return x; + } +}; + +template <> +struct Div_SIMD +{ + bool haveSIMD; + Div_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE2); } + + int operator() (const int * src1, const int * src2, int * dst, int width, double scale) const + { + int x = 0; + + if (!haveSIMD) + return x; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 4; x += 4) + { + __m128i v_src1 = _mm_loadu_si128((const __m128i *)(src1 + x)); + __m128i v_src2 = _mm_loadu_si128((const __m128i *)(src2 + x)); + + __m128d v_src1d = _mm_cvtepi32_pd(v_src1); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + + v_src1d = _mm_cvtepi32_pd(_mm_srli_si128(v_src1, 8)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(_mm_mul_pd(v_src1d, v_scale), v_src2d)); + + __m128i v_dst = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + __m128i v_mask = _mm_cmpeq_epi32(v_src2, v_zero); + _mm_storeu_si128((__m128i *)(dst + x), _mm_andnot_si128(v_mask, v_dst)); + } + + return x; + } +}; + +#endif + template static void div_( const T* src1, size_t step1, const T* src2, size_t step2, T* dst, size_t step, Size size, double scale ) @@ -2582,9 +3034,11 @@ div_( const T* src1, size_t step1, const T* src2, size_t step2, step2 /= sizeof(src2[0]); step /= sizeof(dst[0]); + Div_SIMD vop; + for( ; size.height--; src1 += step1, src2 += step2, dst += step ) { - int i = 0; + int i = vop(src1, src2, dst, size.width, scale); #if CV_ENABLE_UNROLLED for( ; i <= size.width - 4; i += 4 ) { @@ -2621,6 +3075,232 @@ div_( const T* src1, size_t step1, const T* src2, size_t step2, } } +template +struct Recip_SIMD +{ + int operator() (const T *, T *, int, double) const + { + return 0; + } +}; + +#if CV_SSE2 + +#if CV_SSE4_1 + +template <> +struct Recip_SIMD +{ + bool haveSIMD; + Recip_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE4_1); } + + int operator() (const uchar * src2, uchar * dst, int width, double scale) const + { + int x = 0; + + if (!haveSIMD) + return x; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128i _v_src2 = _mm_loadl_epi64((const __m128i *)(src2 + x)); + __m128i v_src2 = _mm_unpacklo_epi8(_v_src2, v_zero); + + __m128i v_src2i = _mm_unpacklo_epi16(v_src2, v_zero); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2i); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + v_src2i = _mm_unpackhi_epi16(v_src2, v_zero); + v_src2d = _mm_cvtepi32_pd(v_src2i); + v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + __m128i v_mask = _mm_cmpeq_epi8(_v_src2, v_zero); + _mm_storel_epi64((__m128i *)(dst + x), _mm_andnot_si128(v_mask, _mm_packus_epi16(_mm_packs_epi32(v_dst_0, v_dst_1), v_zero))); + } + + return x; + } +}; + +#endif // CV_SSE4_1 + +template <> +struct Recip_SIMD +{ + bool haveSIMD; + Recip_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE2); } + + int operator() (const schar * src2, schar * dst, int width, double scale) const + { + int x = 0; + + if (!haveSIMD) + return x; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128i _v_src2 = _mm_loadl_epi64((const __m128i *)(src2 + x)); + __m128i v_src2 = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _v_src2), 8); + + __m128i v_src2i = _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src2), 16); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2i); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + v_src2i = _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src2), 16); + v_src2d = _mm_cvtepi32_pd(v_src2i); + v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + __m128i v_mask = _mm_cmpeq_epi8(_v_src2, v_zero); + _mm_storel_epi64((__m128i *)(dst + x), _mm_andnot_si128(v_mask, _mm_packs_epi16(_mm_packs_epi32(v_dst_0, v_dst_1), v_zero))); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct Recip_SIMD +{ + bool haveSIMD; + Recip_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE4_1); } + + int operator() (const ushort * src2, ushort * dst, int width, double scale) const + { + int x = 0; + + if (!haveSIMD) + return x; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src2 = _mm_loadu_si128((const __m128i *)(src2 + x)); + + __m128i v_src2i = _mm_unpacklo_epi16(v_src2, v_zero); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2i); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + v_src2i = _mm_unpackhi_epi16(v_src2, v_zero); + v_src2d = _mm_cvtepi32_pd(v_src2i); + v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + __m128i v_mask = _mm_cmpeq_epi16(v_src2, v_zero); + _mm_storeu_si128((__m128i *)(dst + x), _mm_andnot_si128(v_mask, _mm_packus_epi32(v_dst_0, v_dst_1))); + } + + return x; + } +}; + +#endif // CV_SSE4_1 + +template <> +struct Recip_SIMD +{ + bool haveSIMD; + Recip_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE2); } + + int operator() (const short * src2, short * dst, int width, double scale) const + { + int x = 0; + + if (!haveSIMD) + return x; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src2 = _mm_loadu_si128((const __m128i *)(src2 + x)); + + __m128i v_src2i = _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src2), 16); + __m128d v_src2d = _mm_cvtepi32_pd(v_src2i); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + v_src2i = _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src2), 16); + v_src2d = _mm_cvtepi32_pd(v_src2i); + v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2i, 8)); + v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + __m128i v_dst_1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + + __m128i v_mask = _mm_cmpeq_epi16(v_src2, v_zero); + _mm_storeu_si128((__m128i *)(dst + x), _mm_andnot_si128(v_mask, _mm_packs_epi32(v_dst_0, v_dst_1))); + } + + return x; + } +}; + +template <> +struct Recip_SIMD +{ + bool haveSIMD; + Recip_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE2); } + + int operator() (const int * src2, int * dst, int width, double scale) const + { + int x = 0; + + if (!haveSIMD) + return x; + + __m128d v_scale = _mm_set1_pd(scale); + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= width - 4; x += 4) + { + __m128i v_src2 = _mm_loadu_si128((const __m128i *)(src2 + x)); + + __m128d v_src2d = _mm_cvtepi32_pd(v_src2); + __m128i v_dst0 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + + v_src2d = _mm_cvtepi32_pd(_mm_srli_si128(v_src2, 8)); + __m128i v_dst1 = _mm_cvtpd_epi32(_mm_div_pd(v_scale, v_src2d)); + + __m128i v_dst = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(v_dst0), _mm_castsi128_ps(v_dst1))); + __m128i v_mask = _mm_cmpeq_epi32(v_src2, v_zero); + _mm_storeu_si128((__m128i *)(dst + x), _mm_andnot_si128(v_mask, v_dst)); + } + + return x; + } +}; + +#endif + template static void recip_( const T*, size_t, const T* src2, size_t step2, T* dst, size_t step, Size size, double scale ) @@ -2628,9 +3308,11 @@ recip_( const T*, size_t, const T* src2, size_t step2, step2 /= sizeof(src2[0]); step /= sizeof(dst[0]); + Recip_SIMD vop; + for( ; size.height--; src2 += step2, dst += step ) { - int i = 0; + int i = vop(src2, dst, size.width, scale); #if CV_ENABLE_UNROLLED for( ; i <= size.width - 4; i += 4 ) { @@ -3564,6 +4246,130 @@ struct Cmp_SIMD uint8x8_t v_mask; }; +#elif CV_SSE2 + +template <> +struct Cmp_SIMD +{ + explicit Cmp_SIMD(int code_) : + code(code_) + { + CV_Assert(code == CMP_GT || code == CMP_LE || + code == CMP_EQ || code == CMP_NE); + + haveSSE = checkHardwareSupport(CV_CPU_SSE2); + + v_mask = _mm_set1_epi8(-1); + } + + int operator () (const schar * src1, const schar * src2, uchar * dst, int width) const + { + int x = 0; + + if (!haveSSE) + return x; + + if (code == CMP_GT) + for ( ; x <= width - 16; x += 16) + _mm_storeu_si128((__m128i *)(dst + x), _mm_cmpgt_epi8(_mm_loadu_si128((const __m128i *)(src1 + x)), + _mm_loadu_si128((const __m128i *)(src2 + x)))); + else if (code == CMP_LE) + for ( ; x <= width - 16; x += 16) + { + __m128i v_gt = _mm_cmpgt_epi8(_mm_loadu_si128((const __m128i *)(src1 + x)), + _mm_loadu_si128((const __m128i *)(src2 + x))); + _mm_storeu_si128((__m128i *)(dst + x), _mm_xor_si128(v_mask, v_gt)); + } + else if (code == CMP_EQ) + for ( ; x <= width - 16; x += 16) + _mm_storeu_si128((__m128i *)(dst + x), _mm_cmpeq_epi8(_mm_loadu_si128((const __m128i *)(src1 + x)), + _mm_loadu_si128((const __m128i *)(src2 + x)))); + else if (code == CMP_NE) + for ( ; x <= width - 16; x += 16) + { + __m128i v_eq = _mm_cmpeq_epi8(_mm_loadu_si128((const __m128i *)(src1 + x)), + _mm_loadu_si128((const __m128i *)(src2 + x))); + _mm_storeu_si128((__m128i *)(dst + x), _mm_xor_si128(v_mask, v_eq)); + } + + return x; + } + + int code; + __m128i v_mask; + bool haveSSE; +}; + +template <> +struct Cmp_SIMD +{ + explicit Cmp_SIMD(int code_) : + code(code_) + { + CV_Assert(code == CMP_GT || code == CMP_LE || + code == CMP_EQ || code == CMP_NE); + + haveSSE = checkHardwareSupport(CV_CPU_SSE2); + + v_mask = _mm_set1_epi32(0xffffffff); + } + + int operator () (const int * src1, const int * src2, uchar * dst, int width) const + { + int x = 0; + + if (!haveSSE) + return x; + + if (code == CMP_GT) + for ( ; x <= width - 8; x += 8) + { + __m128i v_dst0 = _mm_cmpgt_epi32(_mm_loadu_si128((const __m128i *)(src1 + x)), + _mm_loadu_si128((const __m128i *)(src2 + x))); + __m128i v_dst1 = _mm_cmpgt_epi32(_mm_loadu_si128((const __m128i *)(src1 + x + 4)), + _mm_loadu_si128((const __m128i *)(src2 + x + 4))); + + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(_mm_packs_epi32(v_dst0, v_dst1), v_mask)); + } + else if (code == CMP_LE) + for ( ; x <= width - 8; x += 8) + { + __m128i v_dst0 = _mm_cmpgt_epi32(_mm_loadu_si128((const __m128i *)(src1 + x)), + _mm_loadu_si128((const __m128i *)(src2 + x))); + __m128i v_dst1 = _mm_cmpgt_epi32(_mm_loadu_si128((const __m128i *)(src1 + x + 4)), + _mm_loadu_si128((const __m128i *)(src2 + x + 4))); + + _mm_storel_epi64((__m128i *)(dst + x), _mm_xor_si128(_mm_packs_epi16(_mm_packs_epi32(v_dst0, v_dst1), v_mask), v_mask)); + } + else if (code == CMP_EQ) + for ( ; x <= width - 8; x += 8) + { + __m128i v_dst0 = _mm_cmpeq_epi32(_mm_loadu_si128((const __m128i *)(src1 + x)), + _mm_loadu_si128((const __m128i *)(src2 + x))); + __m128i v_dst1 = _mm_cmpeq_epi32(_mm_loadu_si128((const __m128i *)(src1 + x + 4)), + _mm_loadu_si128((const __m128i *)(src2 + x + 4))); + + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(_mm_packs_epi32(v_dst0, v_dst1), v_mask)); + } + else if (code == CMP_NE) + for ( ; x <= width - 8; x += 8) + { + __m128i v_dst0 = _mm_cmpeq_epi32(_mm_loadu_si128((const __m128i *)(src1 + x)), + _mm_loadu_si128((const __m128i *)(src2 + x))); + __m128i v_dst1 = _mm_cmpeq_epi32(_mm_loadu_si128((const __m128i *)(src1 + x + 4)), + _mm_loadu_si128((const __m128i *)(src2 + x + 4))); + + _mm_storel_epi64((__m128i *)(dst + x), _mm_xor_si128(v_mask, _mm_packs_epi16(_mm_packs_epi32(v_dst0, v_dst1), v_mask))); + } + + return x; + } + + int code; + __m128i v_mask; + bool haveSSE; +}; + #endif template static void @@ -3676,7 +4482,8 @@ static void cmp8u(const uchar* src1, size_t step1, const uchar* src2, size_t ste { int x =0; #if CV_SSE2 - if( USE_SSE2 ){ + if( USE_SSE2 ) + { __m128i m128 = code == CMP_GT ? _mm_setzero_si128() : _mm_set1_epi8 (-1); __m128i c128 = _mm_set1_epi8 (-128); for( ; x <= size.width - 16; x += 16 ) @@ -3692,7 +4499,7 @@ static void cmp8u(const uchar* src1, size_t step1, const uchar* src2, size_t ste } } - #elif CV_NEON + #elif CV_NEON uint8x16_t mask = code == CMP_GT ? vdupq_n_u8(0) : vdupq_n_u8(255); for( ; x <= size.width - 16; x += 16 ) @@ -3714,7 +4521,8 @@ static void cmp8u(const uchar* src1, size_t step1, const uchar* src2, size_t ste { int x = 0; #if CV_SSE2 - if( USE_SSE2 ){ + if( USE_SSE2 ) + { __m128i m128 = code == CMP_EQ ? _mm_setzero_si128() : _mm_set1_epi8 (-1); for( ; x <= size.width - 16; x += 16 ) { @@ -3724,7 +4532,7 @@ static void cmp8u(const uchar* src1, size_t step1, const uchar* src2, size_t ste _mm_storeu_si128((__m128i*)(dst + x), r00); } } - #elif CV_NEON + #elif CV_NEON uint8x16_t mask = code == CMP_EQ ? vdupq_n_u8(0) : vdupq_n_u8(255); for( ; x <= size.width - 16; x += 16 ) @@ -3804,7 +4612,8 @@ static void cmp16s(const short* src1, size_t step1, const short* src2, size_t st { int x =0; #if CV_SSE2 - if( USE_SSE2){// + if( USE_SSE2) + { __m128i m128 = code == CMP_GT ? _mm_setzero_si128() : _mm_set1_epi16 (-1); for( ; x <= size.width - 16; x += 16 ) { @@ -3828,7 +4637,7 @@ static void cmp16s(const short* src1, size_t step1, const short* src2, size_t st x += 8; } } - #elif CV_NEON + #elif CV_NEON uint8x16_t mask = code == CMP_GT ? vdupq_n_u8(0) : vdupq_n_u8(255); for( ; x <= size.width - 16; x += 16 ) @@ -3843,8 +4652,7 @@ static void cmp16s(const short* src1, size_t step1, const short* src2, size_t st vst1q_u8(dst+x, veorq_u8(vcombine_u8(t1, t2), mask)); } - - #endif + #endif for( ; x < size.width; x++ ){ dst[x] = (uchar)(-(src1[x] > src2[x]) ^ m); @@ -3858,7 +4666,8 @@ static void cmp16s(const short* src1, size_t step1, const short* src2, size_t st { int x = 0; #if CV_SSE2 - if( USE_SSE2 ){ + if( USE_SSE2 ) + { __m128i m128 = code == CMP_EQ ? _mm_setzero_si128() : _mm_set1_epi16 (-1); for( ; x <= size.width - 16; x += 16 ) { @@ -3882,7 +4691,7 @@ static void cmp16s(const short* src1, size_t step1, const short* src2, size_t st x += 8; } } - #elif CV_NEON + #elif CV_NEON uint8x16_t mask = code == CMP_EQ ? vdupq_n_u8(0) : vdupq_n_u8(255); for( ; x <= size.width - 16; x += 16 ) @@ -3897,8 +4706,8 @@ static void cmp16s(const short* src1, size_t step1, const short* src2, size_t st vst1q_u8(dst+x, veorq_u8(vcombine_u8(t1, t2), mask)); } - #endif - for( ; x < size.width; x++ ) + #endif + for( ; x < size.width; x++ ) dst[x] = (uchar)(-(src1[x] == src2[x]) ^ m); } } diff --git a/modules/core/src/convert.cpp b/modules/core/src/convert.cpp index 829b984c9f..090acf5508 100644 --- a/modules/core/src/convert.cpp +++ b/modules/core/src/convert.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -62,8 +63,11 @@ template struct VSplit4; #define SPLIT2_KERNEL_TEMPLATE(name, data_type, reg_type, load_func, store_func) \ template<> \ - struct name{ \ - void operator()(const data_type* src, data_type* dst0, data_type* dst1){ \ + struct name \ + { \ + void operator()(const data_type* src, data_type* dst0, \ + data_type* dst1) const \ + { \ reg_type r = load_func(src); \ store_func(dst0, r.val[0]); \ store_func(dst1, r.val[1]); \ @@ -72,9 +76,11 @@ template struct VSplit4; #define SPLIT3_KERNEL_TEMPLATE(name, data_type, reg_type, load_func, store_func) \ template<> \ - struct name{ \ + struct name \ + { \ void operator()(const data_type* src, data_type* dst0, data_type* dst1, \ - data_type* dst2){ \ + data_type* dst2) const \ + { \ reg_type r = load_func(src); \ store_func(dst0, r.val[0]); \ store_func(dst1, r.val[1]); \ @@ -84,9 +90,11 @@ template struct VSplit4; #define SPLIT4_KERNEL_TEMPLATE(name, data_type, reg_type, load_func, store_func) \ template<> \ - struct name{ \ + struct name \ + { \ void operator()(const data_type* src, data_type* dst0, data_type* dst1, \ - data_type* dst2, data_type* dst3){ \ + data_type* dst2, data_type* dst3) const \ + { \ reg_type r = load_func(src); \ store_func(dst0, r.val[0]); \ store_func(dst1, r.val[1]); \ @@ -96,28 +104,174 @@ template struct VSplit4; } SPLIT2_KERNEL_TEMPLATE(VSplit2, uchar , uint8x16x2_t, vld2q_u8 , vst1q_u8 ); -SPLIT2_KERNEL_TEMPLATE(VSplit2, schar , int8x16x2_t, vld2q_s8 , vst1q_s8 ); SPLIT2_KERNEL_TEMPLATE(VSplit2, ushort, uint16x8x2_t, vld2q_u16, vst1q_u16); -SPLIT2_KERNEL_TEMPLATE(VSplit2, short , int16x8x2_t, vld2q_s16, vst1q_s16); SPLIT2_KERNEL_TEMPLATE(VSplit2, int , int32x4x2_t, vld2q_s32, vst1q_s32); -SPLIT2_KERNEL_TEMPLATE(VSplit2, float , float32x4x2_t, vld2q_f32, vst1q_f32); SPLIT2_KERNEL_TEMPLATE(VSplit2, int64 , int64x1x2_t, vld2_s64 , vst1_s64 ); SPLIT3_KERNEL_TEMPLATE(VSplit3, uchar , uint8x16x3_t, vld3q_u8 , vst1q_u8 ); -SPLIT3_KERNEL_TEMPLATE(VSplit3, schar , int8x16x3_t, vld3q_s8 , vst1q_s8 ); SPLIT3_KERNEL_TEMPLATE(VSplit3, ushort, uint16x8x3_t, vld3q_u16, vst1q_u16); -SPLIT3_KERNEL_TEMPLATE(VSplit3, short , int16x8x3_t, vld3q_s16, vst1q_s16); SPLIT3_KERNEL_TEMPLATE(VSplit3, int , int32x4x3_t, vld3q_s32, vst1q_s32); -SPLIT3_KERNEL_TEMPLATE(VSplit3, float , float32x4x3_t, vld3q_f32, vst1q_f32); SPLIT3_KERNEL_TEMPLATE(VSplit3, int64 , int64x1x3_t, vld3_s64 , vst1_s64 ); SPLIT4_KERNEL_TEMPLATE(VSplit4, uchar , uint8x16x4_t, vld4q_u8 , vst1q_u8 ); -SPLIT4_KERNEL_TEMPLATE(VSplit4, schar , int8x16x4_t, vld4q_s8 , vst1q_s8 ); SPLIT4_KERNEL_TEMPLATE(VSplit4, ushort, uint16x8x4_t, vld4q_u16, vst1q_u16); -SPLIT4_KERNEL_TEMPLATE(VSplit4, short , int16x8x4_t, vld4q_s16, vst1q_s16); SPLIT4_KERNEL_TEMPLATE(VSplit4, int , int32x4x4_t, vld4q_s32, vst1q_s32); -SPLIT4_KERNEL_TEMPLATE(VSplit4, float , float32x4x4_t, vld4q_f32, vst1q_f32); SPLIT4_KERNEL_TEMPLATE(VSplit4, int64 , int64x1x4_t, vld4_s64 , vst1_s64 ); + +#elif CV_SSE2 + +template +struct VSplit2 +{ + VSplit2() : support(false) { } + void operator()(const T *, T *, T *) const { } + + bool support; +}; + +template +struct VSplit3 +{ + VSplit3() : support(false) { } + void operator()(const T *, T *, T *, T *) const { } + + bool support; +}; + +template +struct VSplit4 +{ + VSplit4() : support(false) { } + void operator()(const T *, T *, T *, T *, T *) const { } + + bool support; +}; + +#define SPLIT2_KERNEL_TEMPLATE(data_type, reg_type, cast_type, _mm_deinterleave, flavor) \ +template <> \ +struct VSplit2 \ +{ \ + enum \ + { \ + ELEMS_IN_VEC = 16 / sizeof(data_type) \ + }; \ + \ + VSplit2() \ + { \ + support = checkHardwareSupport(CV_CPU_SSE2); \ + } \ + \ + void operator()(const data_type * src, \ + data_type * dst0, data_type * dst1) const \ + { \ + reg_type v_src0 = _mm_loadu_##flavor((cast_type const *)(src)); \ + reg_type v_src1 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC)); \ + reg_type v_src2 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 2)); \ + reg_type v_src3 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 3)); \ + \ + _mm_deinterleave(v_src0, v_src1, v_src2, v_src3); \ + \ + _mm_storeu_##flavor((cast_type *)(dst0), v_src0); \ + _mm_storeu_##flavor((cast_type *)(dst0 + ELEMS_IN_VEC), v_src1); \ + _mm_storeu_##flavor((cast_type *)(dst1), v_src2); \ + _mm_storeu_##flavor((cast_type *)(dst1 + ELEMS_IN_VEC), v_src3); \ + } \ + \ + bool support; \ +} + +#define SPLIT3_KERNEL_TEMPLATE(data_type, reg_type, cast_type, _mm_deinterleave, flavor) \ +template <> \ +struct VSplit3 \ +{ \ + enum \ + { \ + ELEMS_IN_VEC = 16 / sizeof(data_type) \ + }; \ + \ + VSplit3() \ + { \ + support = checkHardwareSupport(CV_CPU_SSE2); \ + } \ + \ + void operator()(const data_type * src, \ + data_type * dst0, data_type * dst1, data_type * dst2) const \ + { \ + reg_type v_src0 = _mm_loadu_##flavor((cast_type const *)(src)); \ + reg_type v_src1 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC)); \ + reg_type v_src2 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 2)); \ + reg_type v_src3 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 3)); \ + reg_type v_src4 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 4)); \ + reg_type v_src5 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 5)); \ + \ + _mm_deinterleave(v_src0, v_src1, v_src2, \ + v_src3, v_src4, v_src5); \ + \ + _mm_storeu_##flavor((cast_type *)(dst0), v_src0); \ + _mm_storeu_##flavor((cast_type *)(dst0 + ELEMS_IN_VEC), v_src1); \ + _mm_storeu_##flavor((cast_type *)(dst1), v_src2); \ + _mm_storeu_##flavor((cast_type *)(dst1 + ELEMS_IN_VEC), v_src3); \ + _mm_storeu_##flavor((cast_type *)(dst2), v_src4); \ + _mm_storeu_##flavor((cast_type *)(dst2 + ELEMS_IN_VEC), v_src5); \ + } \ + \ + bool support; \ +} + +#define SPLIT4_KERNEL_TEMPLATE(data_type, reg_type, cast_type, _mm_deinterleave, flavor) \ +template <> \ +struct VSplit4 \ +{ \ + enum \ + { \ + ELEMS_IN_VEC = 16 / sizeof(data_type) \ + }; \ + \ + VSplit4() \ + { \ + support = checkHardwareSupport(CV_CPU_SSE2); \ + } \ + \ + void operator()(const data_type * src, data_type * dst0, data_type * dst1, \ + data_type * dst2, data_type * dst3) const \ + { \ + reg_type v_src0 = _mm_loadu_##flavor((cast_type const *)(src)); \ + reg_type v_src1 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC)); \ + reg_type v_src2 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 2)); \ + reg_type v_src3 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 3)); \ + reg_type v_src4 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 4)); \ + reg_type v_src5 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 5)); \ + reg_type v_src6 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 6)); \ + reg_type v_src7 = _mm_loadu_##flavor((cast_type const *)(src + ELEMS_IN_VEC * 7)); \ + \ + _mm_deinterleave(v_src0, v_src1, v_src2, v_src3, \ + v_src4, v_src5, v_src6, v_src7); \ + \ + _mm_storeu_##flavor((cast_type *)(dst0), v_src0); \ + _mm_storeu_##flavor((cast_type *)(dst0 + ELEMS_IN_VEC), v_src1); \ + _mm_storeu_##flavor((cast_type *)(dst1), v_src2); \ + _mm_storeu_##flavor((cast_type *)(dst1 + ELEMS_IN_VEC), v_src3); \ + _mm_storeu_##flavor((cast_type *)(dst2), v_src4); \ + _mm_storeu_##flavor((cast_type *)(dst2 + ELEMS_IN_VEC), v_src5); \ + _mm_storeu_##flavor((cast_type *)(dst3), v_src6); \ + _mm_storeu_##flavor((cast_type *)(dst3 + ELEMS_IN_VEC), v_src7); \ + } \ + \ + bool support; \ +} + +SPLIT2_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_deinterleave_epi8, si128); +SPLIT2_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_deinterleave_epi16, si128); +SPLIT2_KERNEL_TEMPLATE( int, __m128, float, _mm_deinterleave_ps, ps); + +SPLIT3_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_deinterleave_epi8, si128); +SPLIT3_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_deinterleave_epi16, si128); +SPLIT3_KERNEL_TEMPLATE( int, __m128, float, _mm_deinterleave_ps, ps); + +SPLIT4_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_deinterleave_epi8, si128); +SPLIT4_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_deinterleave_epi16, si128); +SPLIT4_KERNEL_TEMPLATE( int, __m128, float, _mm_deinterleave_ps, ps); + #endif template static void @@ -154,6 +308,19 @@ split_( const T* src, T** dst, int len, int cn ) for( ; i < len - inc_i; i += inc_i, j += inc_j) vsplit(src + j, dst0 + i, dst1 + i); } +#elif CV_SSE2 + if (cn == 2) + { + int inc_i = 32/sizeof(T); + int inc_j = 2 * inc_i; + + VSplit2 vsplit; + if (vsplit.support) + { + for( ; i <= len - inc_i; i += inc_i, j += inc_j) + vsplit(src + j, dst0 + i, dst1 + i); + } + } #endif for( ; i < len; i++, j += cn ) { @@ -176,6 +343,20 @@ split_( const T* src, T** dst, int len, int cn ) for( ; i <= len - inc_i; i += inc_i, j += inc_j) vsplit(src + j, dst0 + i, dst1 + i, dst2 + i); } +#elif CV_SSE2 + if (cn == 3) + { + int inc_i = 32/sizeof(T); + int inc_j = 3 * inc_i; + + VSplit3 vsplit; + + if (vsplit.support) + { + for( ; i <= len - inc_i; i += inc_i, j += inc_j) + vsplit(src + j, dst0 + i, dst1 + i, dst2 + i); + } + } #endif for( ; i < len; i++, j += cn ) { @@ -199,6 +380,19 @@ split_( const T* src, T** dst, int len, int cn ) for( ; i <= len - inc_i; i += inc_i, j += inc_j) vsplit(src + j, dst0 + i, dst1 + i, dst2 + i, dst3 + i); } +#elif CV_SSE2 + if (cn == 4) + { + int inc_i = 32/sizeof(T); + int inc_j = 4 * inc_i; + + VSplit4 vsplit; + if (vsplit.support) + { + for( ; i <= len - inc_i; i += inc_i, j += inc_j) + vsplit(src + j, dst0 + i, dst1 + i, dst2 + i, dst3 + i); + } + } #endif for( ; i < len; i++, j += cn ) { @@ -265,28 +459,177 @@ template struct VMerge4; } MERGE2_KERNEL_TEMPLATE(VMerge2, uchar , uint8x16x2_t, vld1q_u8 , vst2q_u8 ); -MERGE2_KERNEL_TEMPLATE(VMerge2, schar , int8x16x2_t, vld1q_s8 , vst2q_s8 ); MERGE2_KERNEL_TEMPLATE(VMerge2, ushort, uint16x8x2_t, vld1q_u16, vst2q_u16); -MERGE2_KERNEL_TEMPLATE(VMerge2, short , int16x8x2_t, vld1q_s16, vst2q_s16); MERGE2_KERNEL_TEMPLATE(VMerge2, int , int32x4x2_t, vld1q_s32, vst2q_s32); -MERGE2_KERNEL_TEMPLATE(VMerge2, float , float32x4x2_t, vld1q_f32, vst2q_f32); MERGE2_KERNEL_TEMPLATE(VMerge2, int64 , int64x1x2_t, vld1_s64 , vst2_s64 ); MERGE3_KERNEL_TEMPLATE(VMerge3, uchar , uint8x16x3_t, vld1q_u8 , vst3q_u8 ); -MERGE3_KERNEL_TEMPLATE(VMerge3, schar , int8x16x3_t, vld1q_s8 , vst3q_s8 ); MERGE3_KERNEL_TEMPLATE(VMerge3, ushort, uint16x8x3_t, vld1q_u16, vst3q_u16); -MERGE3_KERNEL_TEMPLATE(VMerge3, short , int16x8x3_t, vld1q_s16, vst3q_s16); MERGE3_KERNEL_TEMPLATE(VMerge3, int , int32x4x3_t, vld1q_s32, vst3q_s32); -MERGE3_KERNEL_TEMPLATE(VMerge3, float , float32x4x3_t, vld1q_f32, vst3q_f32); MERGE3_KERNEL_TEMPLATE(VMerge3, int64 , int64x1x3_t, vld1_s64 , vst3_s64 ); MERGE4_KERNEL_TEMPLATE(VMerge4, uchar , uint8x16x4_t, vld1q_u8 , vst4q_u8 ); -MERGE4_KERNEL_TEMPLATE(VMerge4, schar , int8x16x4_t, vld1q_s8 , vst4q_s8 ); MERGE4_KERNEL_TEMPLATE(VMerge4, ushort, uint16x8x4_t, vld1q_u16, vst4q_u16); -MERGE4_KERNEL_TEMPLATE(VMerge4, short , int16x8x4_t, vld1q_s16, vst4q_s16); MERGE4_KERNEL_TEMPLATE(VMerge4, int , int32x4x4_t, vld1q_s32, vst4q_s32); -MERGE4_KERNEL_TEMPLATE(VMerge4, float , float32x4x4_t, vld1q_f32, vst4q_f32); MERGE4_KERNEL_TEMPLATE(VMerge4, int64 , int64x1x4_t, vld1_s64 , vst4_s64 ); + +#elif CV_SSE2 + +template +struct VMerge2 +{ + VMerge2() : support(false) { } + void operator()(const T *, const T *, T *) const { } + + bool support; +}; + +template +struct VMerge3 +{ + VMerge3() : support(false) { } + void operator()(const T *, const T *, const T *, T *) const { } + + bool support; +}; + +template +struct VMerge4 +{ + VMerge4() : support(false) { } + void operator()(const T *, const T *, const T *, const T *, T *) const { } + + bool support; +}; + +#define MERGE2_KERNEL_TEMPLATE(data_type, reg_type, cast_type, _mm_interleave, flavor, se) \ +template <> \ +struct VMerge2 \ +{ \ + enum \ + { \ + ELEMS_IN_VEC = 16 / sizeof(data_type) \ + }; \ + \ + VMerge2() \ + { \ + support = checkHardwareSupport(se); \ + } \ + \ + void operator()(const data_type * src0, const data_type * src1, \ + data_type * dst) const \ + { \ + reg_type v_src0 = _mm_loadu_##flavor((const cast_type *)(src0)); \ + reg_type v_src1 = _mm_loadu_##flavor((const cast_type *)(src0 + ELEMS_IN_VEC)); \ + reg_type v_src2 = _mm_loadu_##flavor((const cast_type *)(src1)); \ + reg_type v_src3 = _mm_loadu_##flavor((const cast_type *)(src1 + ELEMS_IN_VEC)); \ + \ + _mm_interleave(v_src0, v_src1, v_src2, v_src3); \ + \ + _mm_storeu_##flavor((cast_type *)(dst), v_src0); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC), v_src1); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 2), v_src2); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 3), v_src3); \ + } \ + \ + bool support; \ +} + +#define MERGE3_KERNEL_TEMPLATE(data_type, reg_type, cast_type, _mm_interleave, flavor, se) \ +template <> \ +struct VMerge3 \ +{ \ + enum \ + { \ + ELEMS_IN_VEC = 16 / sizeof(data_type) \ + }; \ + \ + VMerge3() \ + { \ + support = checkHardwareSupport(se); \ + } \ + \ + void operator()(const data_type * src0, const data_type * src1, const data_type * src2,\ + data_type * dst) const \ + { \ + reg_type v_src0 = _mm_loadu_##flavor((const cast_type *)(src0)); \ + reg_type v_src1 = _mm_loadu_##flavor((const cast_type *)(src0 + ELEMS_IN_VEC)); \ + reg_type v_src2 = _mm_loadu_##flavor((const cast_type *)(src1)); \ + reg_type v_src3 = _mm_loadu_##flavor((const cast_type *)(src1 + ELEMS_IN_VEC)); \ + reg_type v_src4 = _mm_loadu_##flavor((const cast_type *)(src2)); \ + reg_type v_src5 = _mm_loadu_##flavor((const cast_type *)(src2 + ELEMS_IN_VEC)); \ + \ + _mm_interleave(v_src0, v_src1, v_src2, \ + v_src3, v_src4, v_src5); \ + \ + _mm_storeu_##flavor((cast_type *)(dst), v_src0); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC), v_src1); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 2), v_src2); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 3), v_src3); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 4), v_src4); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 5), v_src5); \ + } \ + \ + bool support; \ +} + +#define MERGE4_KERNEL_TEMPLATE(data_type, reg_type, cast_type, _mm_interleave, flavor, se) \ +template <> \ +struct VMerge4 \ +{ \ + enum \ + { \ + ELEMS_IN_VEC = 16 / sizeof(data_type) \ + }; \ + \ + VMerge4() \ + { \ + support = checkHardwareSupport(se); \ + } \ + \ + void operator()(const data_type * src0, const data_type * src1, \ + const data_type * src2, const data_type * src3, \ + data_type * dst) const \ + { \ + reg_type v_src0 = _mm_loadu_##flavor((const cast_type *)(src0)); \ + reg_type v_src1 = _mm_loadu_##flavor((const cast_type *)(src0 + ELEMS_IN_VEC)); \ + reg_type v_src2 = _mm_loadu_##flavor((const cast_type *)(src1)); \ + reg_type v_src3 = _mm_loadu_##flavor((const cast_type *)(src1 + ELEMS_IN_VEC)); \ + reg_type v_src4 = _mm_loadu_##flavor((const cast_type *)(src2)); \ + reg_type v_src5 = _mm_loadu_##flavor((const cast_type *)(src2 + ELEMS_IN_VEC)); \ + reg_type v_src6 = _mm_loadu_##flavor((const cast_type *)(src3)); \ + reg_type v_src7 = _mm_loadu_##flavor((const cast_type *)(src3 + ELEMS_IN_VEC)); \ + \ + _mm_interleave(v_src0, v_src1, v_src2, v_src3, \ + v_src4, v_src5, v_src6, v_src7); \ + \ + _mm_storeu_##flavor((cast_type *)(dst), v_src0); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC), v_src1); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 2), v_src2); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 3), v_src3); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 4), v_src4); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 5), v_src5); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 6), v_src6); \ + _mm_storeu_##flavor((cast_type *)(dst + ELEMS_IN_VEC * 7), v_src7); \ + } \ + \ + bool support; \ +} + +MERGE2_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_interleave_epi8, si128, CV_CPU_SSE2); +MERGE3_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_interleave_epi8, si128, CV_CPU_SSE2); +MERGE4_KERNEL_TEMPLATE( uchar, __m128i, __m128i, _mm_interleave_epi8, si128, CV_CPU_SSE2); + +#if CV_SSE4_1 +MERGE2_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_interleave_epi16, si128, CV_CPU_SSE4_1); +MERGE3_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_interleave_epi16, si128, CV_CPU_SSE4_1); +MERGE4_KERNEL_TEMPLATE(ushort, __m128i, __m128i, _mm_interleave_epi16, si128, CV_CPU_SSE4_1); +#endif + +MERGE2_KERNEL_TEMPLATE( int, __m128, float, _mm_interleave_ps, ps, CV_CPU_SSE2); +MERGE3_KERNEL_TEMPLATE( int, __m128, float, _mm_interleave_ps, ps, CV_CPU_SSE2); +MERGE4_KERNEL_TEMPLATE( int, __m128, float, _mm_interleave_ps, ps, CV_CPU_SSE2); + #endif template static void @@ -314,6 +657,17 @@ merge_( const T** src, T* dst, int len, int cn ) for( ; i < len - inc_i; i += inc_i, j += inc_j) vmerge(src0 + i, src1 + i, dst + j); } +#elif CV_SSE2 + if(cn == 2) + { + int inc_i = 32/sizeof(T); + int inc_j = 2 * inc_i; + + VMerge2 vmerge; + if (vmerge.support) + for( ; i < len - inc_i; i += inc_i, j += inc_j) + vmerge(src0 + i, src1 + i, dst + j); + } #endif for( ; i < len; i++, j += cn ) { @@ -335,6 +689,17 @@ merge_( const T** src, T* dst, int len, int cn ) for( ; i < len - inc_i; i += inc_i, j += inc_j) vmerge(src0 + i, src1 + i, src2 + i, dst + j); } +#elif CV_SSE2 + if(cn == 3) + { + int inc_i = 32/sizeof(T); + int inc_j = 3 * inc_i; + + VMerge3 vmerge; + if (vmerge.support) + for( ; i < len - inc_i; i += inc_i, j += inc_j) + vmerge(src0 + i, src1 + i, src2 + i, dst + j); + } #endif for( ; i < len; i++, j += cn ) { @@ -357,6 +722,17 @@ merge_( const T** src, T* dst, int len, int cn ) for( ; i < len - inc_i; i += inc_i, j += inc_j) vmerge(src0 + i, src1 + i, src2 + i, src3 + i, dst + j); } +#elif CV_SSE2 + if(cn == 4) + { + int inc_i = 32/sizeof(T); + int inc_j = 4 * inc_i; + + VMerge4 vmerge; + if (vmerge.support) + for( ; i < len - inc_i; i += inc_i, j += inc_j) + vmerge(src0 + i, src1 + i, src2 + i, src3 + i, dst + j); + } #endif for( ; i < len; i++, j += cn ) { @@ -1123,6 +1499,48 @@ struct cvtScaleAbs_SIMD } }; +template <> +struct cvtScaleAbs_SIMD +{ + int operator () (const schar * src, uchar * dst, int width, + float scale, float shift) const + { + int x = 0; + + if (USE_SSE2) + { + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift), + v_zero_f = _mm_setzero_ps(); + __m128i v_zero_i = _mm_setzero_si128(); + + for ( ; x <= width - 16; x += 16) + { + __m128i v_src = _mm_loadu_si128((const __m128i *)(src + x)); + __m128i v_src_12 = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero_i, v_src), 8), + v_src_34 = _mm_srai_epi16(_mm_unpackhi_epi8(v_zero_i, v_src), 8); + __m128 v_dst1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps( + _mm_srai_epi32(_mm_unpacklo_epi16(v_zero_i, v_src_12), 16)), v_scale), v_shift); + v_dst1 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst1), v_dst1); + __m128 v_dst2 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps( + _mm_srai_epi32(_mm_unpackhi_epi16(v_zero_i, v_src_12), 16)), v_scale), v_shift); + v_dst2 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst2), v_dst2); + __m128 v_dst3 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps( + _mm_srai_epi32(_mm_unpacklo_epi16(v_zero_i, v_src_34), 16)), v_scale), v_shift); + v_dst3 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst3), v_dst3); + __m128 v_dst4 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps( + _mm_srai_epi32(_mm_unpackhi_epi16(v_zero_i, v_src_34), 16)), v_scale), v_shift); + v_dst4 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst4), v_dst4); + + __m128i v_dst_i = _mm_packus_epi16(_mm_packs_epi32(_mm_cvtps_epi32(v_dst1), _mm_cvtps_epi32(v_dst2)), + _mm_packs_epi32(_mm_cvtps_epi32(v_dst3), _mm_cvtps_epi32(v_dst4))); + _mm_storeu_si128((__m128i *)(dst + x), v_dst_i); + } + } + + return x; + } +}; + template <> struct cvtScaleAbs_SIMD { @@ -1242,6 +1660,44 @@ struct cvtScaleAbs_SIMD } }; +template <> +struct cvtScaleAbs_SIMD +{ + int operator () (const double * src, uchar * dst, int width, + float scale, float shift) const + { + int x = 0; + + if (USE_SSE2) + { + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift), + v_zero_f = _mm_setzero_ps(); + __m128i v_zero_i = _mm_setzero_si128(); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src1 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x)), + _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2))); + __m128 v_src2 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)), + _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6))); + + __m128 v_dst1 = _mm_add_ps(_mm_mul_ps(v_src1, v_scale), v_shift); + v_dst1 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst1), v_dst1); + + __m128 v_dst2 = _mm_add_ps(_mm_mul_ps(v_src2, v_scale), v_shift); + v_dst2 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst2), v_dst2); + + __m128i v_dst_i = _mm_packs_epi32(_mm_cvtps_epi32(v_dst1), + _mm_cvtps_epi32(v_dst2)); + + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst_i, v_zero_i)); + } + } + + return x; + } +}; + #elif CV_NEON template <> @@ -1489,7 +1945,7 @@ struct cvtScale_SIMD } }; -#if CV_NEON +#if CV_SSE2 // from uchar @@ -1499,17 +1955,25 @@ struct cvtScale_SIMD int operator () (const uchar * src, uchar * dst, int width, float scale, float shift) const { int x = 0; - float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); for ( ; x <= width - 8; x += 8) { - uint16x8_t v_src = vmovl_u8(vld1_u8(src + x)); - float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift); - float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift); + __m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); - uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)), - vqmovn_u32(cv_vrndq_u32_f32(v_dst2))); - vst1_u8(dst + x, vqmovn_u16(v_dst)); + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero)); } return x; @@ -1522,63 +1986,98 @@ struct cvtScale_SIMD int operator () (const uchar * src, schar * dst, int width, float scale, float shift) const { int x = 0; - float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); for ( ; x <= width - 8; x += 8) { - uint16x8_t v_src = vmovl_u8(vld1_u8(src + x)); - float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift); - float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift); + __m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); - int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)), - vqmovn_s32(cv_vrndq_s32_f32(v_dst2))); - vst1_s8(dst + x, vqmovn_s16(v_dst)); + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero)); } return x; } }; +#if CV_SSE4_1 + template <> struct cvtScale_SIMD { + cvtScale_SIMD() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE4_1); + } + int operator () (const uchar * src, ushort * dst, int width, float scale, float shift) const { int x = 0; - float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + if (!haveSSE) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); for ( ; x <= width - 8; x += 8) { - uint16x8_t v_src = vmovl_u8(vld1_u8(src + x)); - float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift); - float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift); + __m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); - uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)), - vqmovn_u32(cv_vrndq_u32_f32(v_dst2))); - vst1q_u16(dst + x, v_dst); + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); } return x; } + + bool haveSSE; }; +#endif + template <> struct cvtScale_SIMD { int operator () (const uchar * src, short * dst, int width, float scale, float shift) const { int x = 0; - float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); for ( ; x <= width - 8; x += 8) { - uint16x8_t v_src = vmovl_u8(vld1_u8(src + x)); - float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift); - float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift); + __m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); - int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)), - vqmovn_s32(cv_vrndq_s32_f32(v_dst2))); - vst1q_s16(dst + x, v_dst); + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); } return x; @@ -1591,16 +2090,24 @@ struct cvtScale_SIMD int operator () (const uchar * src, int * dst, int width, float scale, float shift) const { int x = 0; - float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); for ( ; x <= width - 8; x += 8) { - uint16x8_t v_src = vmovl_u8(vld1_u8(src + x)); - float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift); - float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift); + __m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); - vst1q_s32(dst + x, cv_vrndq_s32_f32(v_dst1)); - vst1q_s32(dst + x + 4, cv_vrndq_s32_f32(v_dst2)); + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0)); + _mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1)); } return x; @@ -1613,13 +2120,58 @@ struct cvtScale_SIMD int operator () (const uchar * src, float * dst, int width, float scale, float shift) const { int x = 0; - float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); for ( ; x <= width - 8; x += 8) { - uint16x8_t v_src = vmovl_u8(vld1_u8(src + x)); - vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift)); - vst1q_f32(dst + x + 4, vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift)); + __m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_ps(dst + x, v_dst_0); + _mm_storeu_ps(dst + x + 4, v_dst_1); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const uchar * src, double * dst, int width, double scale, double shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero); + + __m128i v_src_s32 = _mm_unpacklo_epi16(v_src, v_zero); + __m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift); + __m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift); + _mm_storeu_pd(dst + x, v_dst_0); + _mm_storeu_pd(dst + x + 2, v_dst_1); + + v_src_s32 = _mm_unpackhi_epi16(v_src, v_zero); + v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift); + v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift); + _mm_storeu_pd(dst + x + 4, v_dst_0); + _mm_storeu_pd(dst + x + 6, v_dst_1); } return x; @@ -1634,17 +2186,25 @@ struct cvtScale_SIMD int operator () (const schar * src, uchar * dst, int width, float scale, float shift) const { int x = 0; - float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); for ( ; x <= width - 8; x += 8) { - int16x8_t v_src = vmovl_s8(vld1_s8(src + x)); - float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift); - float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift); + __m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); - uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)), - vqmovn_u32(cv_vrndq_u32_f32(v_dst2))); - vst1_u8(dst + x, vqmovn_u16(v_dst)); + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero)); } return x; @@ -1657,30 +2217,1501 @@ struct cvtScale_SIMD int operator () (const schar * src, schar * dst, int width, float scale, float shift) const { int x = 0; - float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); for ( ; x <= width - 8; x += 8) { - int16x8_t v_src = vmovl_s8(vld1_s8(src + x)); - float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift); - float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift); + __m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); - int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)), - vqmovn_s32(cv_vrndq_s32_f32(v_dst2))); - vst1_s8(dst + x, vqmovn_s16(v_dst)); + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero)); } return x; } }; +#if CV_SSE4_1 + template <> struct cvtScale_SIMD { + cvtScale_SIMD() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE4_1); + } + int operator () (const schar * src, ushort * dst, int width, float scale, float shift) const { int x = 0; - float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + if (!haveSSE) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } + + bool haveSSE; +}; + +#endif + +template <> +struct cvtScale_SIMD +{ + int operator () (const schar * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const schar * src, int * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0)); + _mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const schar * src, float * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_ps(dst + x, v_dst_0); + _mm_storeu_ps(dst + x + 4, v_dst_1); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const schar * src, double * dst, int width, double scale, double shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))); + v_src = _mm_srai_epi16(v_src, 8); + + __m128i v_src_s32 = _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16); + __m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift); + __m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift); + _mm_storeu_pd(dst + x, v_dst_0); + _mm_storeu_pd(dst + x + 2, v_dst_1); + + v_src_s32 = _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16); + v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift); + v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift); + _mm_storeu_pd(dst + x + 4, v_dst_0); + _mm_storeu_pd(dst + x + 6, v_dst_1); + } + + return x; + } +}; + +// from ushort + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct cvtScale_SIMD +{ + cvtScale_SIMD() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE4_1); + } + + int operator () (const ushort * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!haveSSE) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } + + bool haveSSE; +}; + +#endif + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, int * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0)); + _mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, float * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_ps(dst + x, v_dst_0); + _mm_storeu_ps(dst + x + 4, v_dst_1); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, double * dst, int width, double scale, double shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + + __m128i v_src_s32 = _mm_unpacklo_epi16(v_src, v_zero); + __m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift); + __m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift); + _mm_storeu_pd(dst + x, v_dst_0); + _mm_storeu_pd(dst + x + 2, v_dst_1); + + v_src_s32 = _mm_unpackhi_epi16(v_src, v_zero); + v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift); + v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift); + _mm_storeu_pd(dst + x + 4, v_dst_0); + _mm_storeu_pd(dst + x + 6, v_dst_1); + } + + return x; + } +}; + +// from short + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct cvtScale_SIMD +{ + cvtScale_SIMD() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE4_1); + } + + int operator () (const short * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!haveSSE) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } + + bool haveSSE; +}; + +#endif + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, int * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0)); + _mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, float * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_ps(dst + x, v_dst_0); + _mm_storeu_ps(dst + x + 4, v_dst_1); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, double * dst, int width, double scale, double shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + + __m128i v_src_s32 = _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16); + __m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift); + __m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift); + _mm_storeu_pd(dst + x, v_dst_0); + _mm_storeu_pd(dst + x + 2, v_dst_1); + + v_src_s32 = _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16); + v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift); + v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift); + _mm_storeu_pd(dst + x + 4, v_dst_0); + _mm_storeu_pd(dst + x + 6, v_dst_1); + } + + return x; + } +}; + +// from int + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct cvtScale_SIMD +{ + cvtScale_SIMD() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE4_1); + } + + int operator () (const int * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!haveSSE) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + __m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } + + bool haveSSE; +}; + +#endif + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, int * dst, int width, double scale, double shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); + + for ( ; x <= width - 4; x += 4) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src), v_scale), v_shift); + + v_src = _mm_srli_si128(v_src, 8); + __m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src), v_scale), v_shift); + + __m128 v_dst = _mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_dst_0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_dst_1))); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_castps_si128(v_dst)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, float * dst, int width, double scale, double shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); + + for ( ; x <= width - 4; x += 4) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src), v_scale), v_shift); + + v_src = _mm_srli_si128(v_src, 8); + __m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src), v_scale), v_shift); + + _mm_storeu_ps(dst + x, _mm_movelh_ps(_mm_cvtpd_ps(v_dst_0), + _mm_cvtpd_ps(v_dst_1))); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, double * dst, int width, double scale, double shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); + + for ( ; x <= width - 4; x += 4) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src), v_scale), v_shift); + + v_src = _mm_srli_si128(v_src, 8); + __m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src), v_scale), v_shift); + + _mm_storeu_pd(dst + x, v_dst_0); + _mm_storeu_pd(dst + x + 2, v_dst_1); + } + + return x; + } +}; + +// from float + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_loadu_ps(src + x + 4); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_loadu_ps(src + x + 4); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct cvtScale_SIMD +{ + cvtScale_SIMD() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE4_1); + } + + int operator () (const float * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!haveSSE) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_loadu_ps(src + x + 4); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + __m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } + + bool haveSSE; +}; + +#endif + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_loadu_ps(src + x + 4); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, int * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_loadu_ps(src + x + 4); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0)); + _mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, float * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 4; x += 4) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_dst = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + _mm_storeu_ps(dst + x, v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, double * dst, int width, double scale, double shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); + + for ( ; x <= width - 4; x += 4) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtps_pd(v_src), v_scale), v_shift); + v_src = _mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8)); + __m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtps_pd(v_src), v_scale), v_shift); + + _mm_storeu_pd(dst + x, v_dst_0); + _mm_storeu_pd(dst + x + 2, v_dst_1); + } + + return x; + } +}; + +// from double + +template <> +struct cvtScale_SIMD +{ + int operator () (const double * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x)), + _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2))); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)), + _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6))); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const double * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x)), + _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2))); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)), + _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6))); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct cvtScale_SIMD +{ + cvtScale_SIMD() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE4_1); + } + + int operator () (const double * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!haveSSE) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x)), + _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2))); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)), + _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6))); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + __m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } + + bool haveSSE; +}; + +#endif + +template <> +struct cvtScale_SIMD +{ + int operator () (const double * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x)), + _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2))); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)), + _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6))); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const double * src, int * dst, int width, double scale, double shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); + + for ( ; x <= width - 4; x += 4) + { + __m128d v_src = _mm_loadu_pd(src + x); + __m128d v_dst0 = _mm_add_pd(_mm_mul_pd(v_src, v_scale), v_shift); + + v_src = _mm_loadu_pd(src + x + 2); + __m128d v_dst1 = _mm_add_pd(_mm_mul_pd(v_src, v_scale), v_shift); + + __m128 v_dst = _mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_dst0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_dst1))); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_castps_si128(v_dst)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const double * src, float * dst, int width, double scale, double shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); + + for ( ; x <= width - 4; x += 4) + { + __m128d v_src = _mm_loadu_pd(src + x); + __m128d v_dst0 = _mm_add_pd(_mm_mul_pd(v_src, v_scale), v_shift); + + v_src = _mm_loadu_pd(src + x + 2); + __m128d v_dst1 = _mm_add_pd(_mm_mul_pd(v_src, v_scale), v_shift); + + __m128 v_dst = _mm_movelh_ps(_mm_cvtpd_ps(v_dst0), + _mm_cvtpd_ps(v_dst1)); + + _mm_storeu_ps(dst + x, v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const double * src, double * dst, int width, double scale, double shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift); + + for ( ; x <= width - 2; x += 2) + { + __m128d v_src = _mm_loadu_pd(src + x); + __m128d v_dst = _mm_add_pd(_mm_mul_pd(v_src, v_scale), v_shift); + _mm_storeu_pd(dst + x, v_dst); + } + + return x; + } +}; + +#elif CV_NEON + +// from uchar + +template <> +struct cvtScale_SIMD +{ + int operator () (const uchar * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + uint16x8_t v_src = vmovl_u8(vld1_u8(src + x)); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift); + + uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)), + vqmovn_u32(cv_vrndq_u32_f32(v_dst2))); + vst1_u8(dst + x, vqmovn_u16(v_dst)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const uchar * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + uint16x8_t v_src = vmovl_u8(vld1_u8(src + x)); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift); + + int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)), + vqmovn_s32(cv_vrndq_s32_f32(v_dst2))); + vst1_s8(dst + x, vqmovn_s16(v_dst)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const uchar * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + uint16x8_t v_src = vmovl_u8(vld1_u8(src + x)); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift); + + uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)), + vqmovn_u32(cv_vrndq_u32_f32(v_dst2))); + vst1q_u16(dst + x, v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const uchar * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + uint16x8_t v_src = vmovl_u8(vld1_u8(src + x)); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift); + + int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)), + vqmovn_s32(cv_vrndq_s32_f32(v_dst2))); + vst1q_s16(dst + x, v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const uchar * src, int * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + uint16x8_t v_src = vmovl_u8(vld1_u8(src + x)); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift); + + vst1q_s32(dst + x, cv_vrndq_s32_f32(v_dst1)); + vst1q_s32(dst + x + 4, cv_vrndq_s32_f32(v_dst2)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const uchar * src, float * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + uint16x8_t v_src = vmovl_u8(vld1_u8(src + x)); + vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift)); + vst1q_f32(dst + x + 4, vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift)); + } + + return x; + } +}; + +// from schar + +template <> +struct cvtScale_SIMD +{ + int operator () (const schar * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + int16x8_t v_src = vmovl_s8(vld1_s8(src + x)); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift); + + uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)), + vqmovn_u32(cv_vrndq_u32_f32(v_dst2))); + vst1_u8(dst + x, vqmovn_u16(v_dst)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const schar * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); + + for ( ; x <= width - 8; x += 8) + { + int16x8_t v_src = vmovl_s8(vld1_s8(src + x)); + float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift); + float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift); + + int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)), + vqmovn_s32(cv_vrndq_s32_f32(v_dst2))); + vst1_s8(dst + x, vqmovn_s16(v_dst)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const schar * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale); for ( ; x <= width - 8; x += 8) { @@ -2294,26 +4325,44 @@ cvtScale_( const short* src, size_t sstep, { int x = 0; - #if CV_SSE2 - if(USE_SSE2)//~5X + #if CV_AVX2 + if (USE_AVX2) + { + __m256 scale256 = _mm256_set1_ps(scale); + __m256 shift256 = _mm256_set1_ps(shift); + const int shuffle = 0xD8; + + for ( ; x <= size.width - 16; x += 16) { - __m128 scale128 = _mm_set1_ps (scale); - __m128 shift128 = _mm_set1_ps (shift); - for(; x <= size.width - 8; x += 8 ) - { - __m128i r0 = _mm_loadl_epi64((const __m128i*)(src + x)); - __m128i r1 = _mm_loadl_epi64((const __m128i*)(src + x + 4)); - __m128 rf0 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r0, r0), 16)); - __m128 rf1 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r1, r1), 16)); - rf0 = _mm_add_ps(_mm_mul_ps(rf0, scale128), shift128); - rf1 = _mm_add_ps(_mm_mul_ps(rf1, scale128), shift128); - r0 = _mm_cvtps_epi32(rf0); - r1 = _mm_cvtps_epi32(rf1); + __m256i v_src = _mm256_loadu_si256((const __m256i *)(src + x)); + v_src = _mm256_permute4x64_epi64(v_src, shuffle); + __m256i v_src_lo = _mm256_srai_epi32(_mm256_unpacklo_epi16(v_src, v_src), 16); + __m256i v_src_hi = _mm256_srai_epi32(_mm256_unpackhi_epi16(v_src, v_src), 16); + __m256 v_dst0 = _mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(v_src_lo), scale256), shift256); + __m256 v_dst1 = _mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(v_src_hi), scale256), shift256); + _mm256_storeu_si256((__m256i *)(dst + x), _mm256_cvtps_epi32(v_dst0)); + _mm256_storeu_si256((__m256i *)(dst + x + 8), _mm256_cvtps_epi32(v_dst1)); + } + } + #endif + #if CV_SSE2 + if (USE_SSE2)//~5X + { + __m128 scale128 = _mm_set1_ps (scale); + __m128 shift128 = _mm_set1_ps (shift); + for(; x <= size.width - 8; x += 8 ) + { + __m128i r0 = _mm_loadu_si128((const __m128i*)(src + x)); - _mm_storeu_si128((__m128i*)(dst + x), r0); - _mm_storeu_si128((__m128i*)(dst + x + 4), r1); - } + __m128 rf0 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r0, r0), 16)); + __m128 rf1 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(r0, r0), 16)); + rf0 = _mm_add_ps(_mm_mul_ps(rf0, scale128), shift128); + rf1 = _mm_add_ps(_mm_mul_ps(rf1, scale128), shift128); + + _mm_storeu_si128((__m128i*)(dst + x), _mm_cvtps_epi32(rf0)); + _mm_storeu_si128((__m128i*)(dst + x + 4), _mm_cvtps_epi32(rf1)); } + } #elif CV_NEON float32x4_t v_shift = vdupq_n_f32(shift); for(; x <= size.width - 8; x += 8 ) @@ -2330,24 +4379,6 @@ cvtScale_( const short* src, size_t sstep, } #endif - //We will wait Haswell - /* - #if CV_AVX - if(USE_AVX)//2X - bad variant - { - ////TODO:AVX implementation (optimization?) required - __m256 scale256 = _mm256_set1_ps (scale); - __m256 shift256 = _mm256_set1_ps (shift); - for(; x <= size.width - 8; x += 8 ) - { - __m256i buf = _mm256_set_epi32((int)(*(src+x+7)),(int)(*(src+x+6)),(int)(*(src+x+5)),(int)(*(src+x+4)),(int)(*(src+x+3)),(int)(*(src+x+2)),(int)(*(src+x+1)),(int)(*(src+x))); - __m256 r0 = _mm256_add_ps( _mm256_mul_ps(_mm256_cvtepi32_ps (buf), scale256), shift256); - __m256i res = _mm256_cvtps_epi32(r0); - _mm256_storeu_si256 ((__m256i*)(dst+x), res); - } - } - #endif*/ - for(; x < size.width; x++ ) dst[x] = saturate_cast(src[x]*scale + shift); } @@ -2362,7 +4393,180 @@ struct Cvt_SIMD } }; -#if CV_NEON +#if CV_SSE2 + +// from double + +template <> +struct Cvt_SIMD +{ + int operator() (const double * src, uchar * dst, int width) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x)); + __m128 v_src1 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2)); + __m128 v_src2 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)); + __m128 v_src3 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6)); + + v_src0 = _mm_movelh_ps(v_src0, v_src1); + v_src1 = _mm_movelh_ps(v_src2, v_src3); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_src0), + _mm_cvtps_epi32(v_src1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_dst)); + } + + return x; + } +}; + +template <> +struct Cvt_SIMD +{ + int operator() (const double * src, schar * dst, int width) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x)); + __m128 v_src1 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2)); + __m128 v_src2 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)); + __m128 v_src3 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6)); + + v_src0 = _mm_movelh_ps(v_src0, v_src1); + v_src1 = _mm_movelh_ps(v_src2, v_src3); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_src0), + _mm_cvtps_epi32(v_src1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_dst)); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct Cvt_SIMD +{ + bool haveSIMD; + Cvt_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE4_1); } + + int operator() (const double * src, ushort * dst, int width) const + { + int x = 0; + + if (!haveSIMD) + return x; + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x)); + __m128 v_src1 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2)); + __m128 v_src2 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)); + __m128 v_src3 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6)); + + v_src0 = _mm_movelh_ps(v_src0, v_src1); + v_src1 = _mm_movelh_ps(v_src2, v_src3); + + __m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_src0), + _mm_cvtps_epi32(v_src1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } +}; + +#endif // CV_SSE4_1 + +template <> +struct Cvt_SIMD +{ + int operator() (const double * src, short * dst, int width) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x)); + __m128 v_src1 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2)); + __m128 v_src2 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)); + __m128 v_src3 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6)); + + v_src0 = _mm_movelh_ps(v_src0, v_src1); + v_src1 = _mm_movelh_ps(v_src2, v_src3); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_src0), + _mm_cvtps_epi32(v_src1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } +}; + +template <> +struct Cvt_SIMD +{ + int operator() (const double * src, int * dst, int width) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + for ( ; x <= width - 4; x += 4) + { + __m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x)); + __m128 v_src1 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2)); + v_src0 = _mm_movelh_ps(v_src0, v_src1); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_src0)); + } + + return x; + } +}; + +template <> +struct Cvt_SIMD +{ + int operator() (const double * src, float * dst, int width) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + for ( ; x <= width - 4; x += 4) + { + __m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x)); + __m128 v_src1 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2)); + + _mm_storeu_ps(dst + x, _mm_movelh_ps(v_src0, v_src1)); + } + + return x; + } +}; + + +#elif CV_NEON // from uchar @@ -2931,8 +5135,9 @@ cvt_( const float* src, size_t sstep, { int x = 0; #if CV_SSE2 - if(USE_SSE2){ - for( ; x <= size.width - 8; x += 8 ) + if(USE_SSE2) + { + for( ; x <= size.width - 8; x += 8 ) { __m128 src128 = _mm_loadu_ps (src + x); __m128i src_int128 = _mm_cvtps_epi32 (src128); diff --git a/modules/core/src/copy.cpp b/modules/core/src/copy.cpp index 301ea80a1f..fe8ffd7718 100644 --- a/modules/core/src/copy.cpp +++ b/modules/core/src/copy.cpp @@ -11,6 +11,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/core/src/mathfuncs.cpp b/modules/core/src/mathfuncs.cpp index 1c045f3faa..13ada1d1d6 100644 --- a/modules/core/src/mathfuncs.cpp +++ b/modules/core/src/mathfuncs.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -593,14 +594,46 @@ void phase( InputArray src1, InputArray src2, OutputArray dst, bool angleInDegre { const double *x = (const double*)ptrs[0], *y = (const double*)ptrs[1]; double *angle = (double*)ptrs[2]; - for( k = 0; k < len; k++ ) + k = 0; + +#if CV_SSE2 + if (USE_SSE2) + { + for ( ; k <= len - 4; k += 4) + { + __m128 v_dst0 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(x + k)), + _mm_cvtpd_ps(_mm_loadu_pd(x + k + 2))); + __m128 v_dst1 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(y + k)), + _mm_cvtpd_ps(_mm_loadu_pd(y + k + 2))); + + _mm_storeu_ps(buf[0] + k, v_dst0); + _mm_storeu_ps(buf[1] + k, v_dst1); + } + } +#endif + + for( ; k < len; k++ ) { buf[0][k] = (float)x[k]; buf[1][k] = (float)y[k]; } FastAtan2_32f( buf[1], buf[0], buf[0], len, angleInDegrees ); - for( k = 0; k < len; k++ ) + k = 0; + +#if CV_SSE2 + if (USE_SSE2) + { + for ( ; k <= len - 4; k += 4) + { + __m128 v_src = _mm_loadu_ps(buf[0] + k); + _mm_storeu_pd(angle + k, _mm_cvtps_pd(v_src)); + _mm_storeu_pd(angle + k + 2, _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8)))); + } + } +#endif + + for( ; k < len; k++ ) angle[k] = buf[0][k]; } ptrs[0] += len*esz1; @@ -698,14 +731,46 @@ void cartToPolar( InputArray src1, InputArray src2, double *angle = (double*)ptrs[3]; Magnitude_64f(x, y, (double*)ptrs[2], len); - for( k = 0; k < len; k++ ) + k = 0; + +#if CV_SSE2 + if (USE_SSE2) + { + for ( ; k <= len - 4; k += 4) + { + __m128 v_dst0 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(x + k)), + _mm_cvtpd_ps(_mm_loadu_pd(x + k + 2))); + __m128 v_dst1 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(y + k)), + _mm_cvtpd_ps(_mm_loadu_pd(y + k + 2))); + + _mm_storeu_ps(buf[0] + k, v_dst0); + _mm_storeu_ps(buf[1] + k, v_dst1); + } + } +#endif + + for( ; k < len; k++ ) { buf[0][k] = (float)x[k]; buf[1][k] = (float)y[k]; } FastAtan2_32f( buf[1], buf[0], buf[0], len, angleInDegrees ); - for( k = 0; k < len; k++ ) + k = 0; + +#if CV_SSE2 + if (USE_SSE2) + { + for ( ; k <= len - 4; k += 4) + { + __m128 v_src = _mm_loadu_ps(buf[0] + k); + _mm_storeu_pd(angle + k, _mm_cvtps_pd(v_src)); + _mm_storeu_pd(angle + k + 2, _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8)))); + } + } +#endif + + for( ; k < len; k++ ) angle[k] = buf[0][k]; } ptrs[0] += len*esz1; @@ -771,14 +836,77 @@ static void SinCos_32f( const float *angle, float *sinval, float* cosval, /*static const double cos_a2 = 1;*/ double k1; - int i; + int i = 0; if( !angle_in_degrees ) k1 = N/(2*CV_PI); else k1 = N/360.; - for( i = 0; i < len; i++ ) +#if CV_AVX2 + if (USE_AVX2) + { + __m128d v_k1 = _mm_set1_pd(k1); + __m128d v_1 = _mm_set1_pd(1); + __m128i v_N1 = _mm_set1_epi32(N - 1); + __m128i v_N4 = _mm_set1_epi32(N >> 2); + __m128d v_sin_a0 = _mm_set1_pd(sin_a0); + __m128d v_sin_a2 = _mm_set1_pd(sin_a2); + __m128d v_cos_a0 = _mm_set1_pd(cos_a0); + + for ( ; i <= len - 4; i += 4) + { + __m128 v_angle = _mm_loadu_ps(angle + i); + + // 0-1 + __m128d v_t = _mm_mul_pd(_mm_cvtps_pd(v_angle), v_k1); + __m128i v_it = _mm_cvtpd_epi32(v_t); + v_t = _mm_sub_pd(v_t, _mm_cvtepi32_pd(v_it)); + + __m128i v_sin_idx = _mm_and_si128(v_it, v_N1); + __m128i v_cos_idx = _mm_and_si128(_mm_sub_epi32(v_N4, v_sin_idx), v_N1); + + __m128d v_t2 = _mm_mul_pd(v_t, v_t); + __m128d v_sin_b = _mm_mul_pd(_mm_add_pd(_mm_mul_pd(v_sin_a0, v_t2), v_sin_a2), v_t); + __m128d v_cos_b = _mm_add_pd(_mm_mul_pd(v_cos_a0, v_t2), v_1); + + __m128d v_sin_a = _mm_i32gather_pd(sin_table, v_sin_idx, 8); + __m128d v_cos_a = _mm_i32gather_pd(sin_table, v_cos_idx, 8); + + __m128d v_sin_val_0 = _mm_add_pd(_mm_mul_pd(v_sin_a, v_cos_b), + _mm_mul_pd(v_cos_a, v_sin_b)); + __m128d v_cos_val_0 = _mm_sub_pd(_mm_mul_pd(v_cos_a, v_cos_b), + _mm_mul_pd(v_sin_a, v_sin_b)); + + // 2-3 + v_t = _mm_mul_pd(_mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_angle), 8))), v_k1); + v_it = _mm_cvtpd_epi32(v_t); + v_t = _mm_sub_pd(v_t, _mm_cvtepi32_pd(v_it)); + + v_sin_idx = _mm_and_si128(v_it, v_N1); + v_cos_idx = _mm_and_si128(_mm_sub_epi32(v_N4, v_sin_idx), v_N1); + + v_t2 = _mm_mul_pd(v_t, v_t); + v_sin_b = _mm_mul_pd(_mm_add_pd(_mm_mul_pd(v_sin_a0, v_t2), v_sin_a2), v_t); + v_cos_b = _mm_add_pd(_mm_mul_pd(v_cos_a0, v_t2), v_1); + + v_sin_a = _mm_i32gather_pd(sin_table, v_sin_idx, 8); + v_cos_a = _mm_i32gather_pd(sin_table, v_cos_idx, 8); + + __m128d v_sin_val_1 = _mm_add_pd(_mm_mul_pd(v_sin_a, v_cos_b), + _mm_mul_pd(v_cos_a, v_sin_b)); + __m128d v_cos_val_1 = _mm_sub_pd(_mm_mul_pd(v_cos_a, v_cos_b), + _mm_mul_pd(v_sin_a, v_sin_b)); + + _mm_storeu_ps(sinval + i, _mm_movelh_ps(_mm_cvtpd_ps(v_sin_val_0), + _mm_cvtpd_ps(v_sin_val_1))); + _mm_storeu_ps(cosval + i, _mm_movelh_ps(_mm_cvtpd_ps(v_cos_val_0), + _mm_cvtpd_ps(v_cos_val_1))); + } + } +#endif + + for( ; i < len; i++ ) { double t = angle[i]*k1; int it = cvRound(t); @@ -914,6 +1042,16 @@ void polarToCart( InputArray src1, InputArray src2, vst1q_f32(x + k, vmulq_f32(vld1q_f32(x + k), v_m)); vst1q_f32(y + k, vmulq_f32(vld1q_f32(y + k), v_m)); } + #elif CV_SSE2 + if (USE_SSE2) + { + for( ; k <= len - 4; k += 4 ) + { + __m128 v_m = _mm_loadu_ps(mag + k); + _mm_storeu_ps(x + k, _mm_mul_ps(_mm_loadu_ps(x + k), v_m)); + _mm_storeu_ps(y + k, _mm_mul_ps(_mm_loadu_ps(y + k), v_m)); + } + } #endif for( ; k < len; k++ ) @@ -939,10 +1077,10 @@ void polarToCart( InputArray src1, InputArray src2, x[k] = buf[0][k]*m; y[k] = buf[1][k]*m; } else - for( k = 0; k < len; k++ ) - { - x[k] = buf[0][k]; y[k] = buf[1][k]; - } + { + std::memcpy(x, buf[0], sizeof(float) * len); + std::memcpy(y, buf[1], sizeof(float) * len); + } } if( ptrs[0] ) diff --git a/modules/core/src/matmul.cpp b/modules/core/src/matmul.cpp index b2f36b3292..feffc8d32f 100644 --- a/modules/core/src/matmul.cpp +++ b/modules/core/src/matmul.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -720,6 +721,16 @@ static bool ocl_gemm_amdblas( InputArray matA, InputArray matB, double alpha, return false; UMat A = matA.getUMat(), B = matB.getUMat(), D = matD.getUMat(); + if (!ocl::internal::isCLBuffer(A) || !ocl::internal::isCLBuffer(B) || !ocl::internal::isCLBuffer(D)) + { + return false; + } + if (haveC) + { + UMat C = matC.getUMat(); + if (!ocl::internal::isCLBuffer(C)) + return false; + } if (haveC) ctrans ? transpose(matC, D) : matC.copyTo(D); else diff --git a/modules/core/src/matrix.cpp b/modules/core/src/matrix.cpp index 38ff7ed53a..e1e9caa837 100644 --- a/modules/core/src/matrix.cpp +++ b/modules/core/src/matrix.cpp @@ -159,8 +159,9 @@ void MatAllocator::copy(UMatData* usrc, UMatData* udst, int dims, const size_t s memcpy(ptrs[1], ptrs[0], planesz); } -BufferPoolController* MatAllocator::getBufferPoolController() const +BufferPoolController* MatAllocator::getBufferPoolController(const char* id) const { + (void)id; static DummyBufferPoolController dummy; return &dummy; } @@ -2644,6 +2645,10 @@ void _OutputArray::assign(const UMat& u) const { u.copyTo(*(Mat*)obj); // TODO check u.getMat() } + else if (k == MATX) + { + u.copyTo(getMat()); // TODO check u.getMat() + } else { CV_Error(Error::StsNotImplemented, ""); @@ -2662,6 +2667,10 @@ void _OutputArray::assign(const Mat& m) const { *(Mat*)obj = m; } + else if (k == MATX) + { + m.copyTo(getMat()); + } else { CV_Error(Error::StsNotImplemented, ""); diff --git a/modules/core/src/ocl.cpp b/modules/core/src/ocl.cpp index c18d8ba61a..efe3b936d5 100644 --- a/modules/core/src/ocl.cpp +++ b/modules/core/src/ocl.cpp @@ -48,6 +48,8 @@ #define CV_OPENCL_ALWAYS_SHOW_BUILD_LOG 0 #define CV_OPENCL_SHOW_RUN_ERRORS 0 +#define CV_OPENCL_SHOW_SVM_ERROR_LOG 1 +#define CV_OPENCL_SHOW_SVM_LOG 0 #include "opencv2/core/bufferpool.hpp" #ifndef LOG_BUFFER_POOL @@ -111,6 +113,20 @@ static size_t getConfigurationParameterForSize(const char* name, size_t defaultV CV_ErrorNoReturn(cv::Error::StsBadArg, cv::format("Invalid value for %s parameter: %s", name, value.c_str())); } +#if CV_OPENCL_SHOW_SVM_LOG +// TODO add timestamp logging +#define CV_OPENCL_SVM_TRACE_P printf("line %d (ocl.cpp): ", __LINE__); printf +#else +#define CV_OPENCL_SVM_TRACE_P(...) +#endif + +#if CV_OPENCL_SHOW_SVM_ERROR_LOG +// TODO add timestamp logging +#define CV_OPENCL_SVM_TRACE_ERROR_P printf("Error on line %d (ocl.cpp): ", __LINE__); printf +#else +#define CV_OPENCL_SVM_TRACE_ERROR_P(...) +#endif + #include "opencv2/core/opencl/runtime/opencl_clamdblas.hpp" #include "opencv2/core/opencl/runtime/opencl_clamdfft.hpp" @@ -920,6 +936,7 @@ OCL_FUNC(cl_int, clGetSupportedImageFormats, cl_uint * num_image_formats), (context, flags, image_type, num_entries, image_formats, num_image_formats)) + /* OCL_FUNC(cl_int, clGetMemObjectInfo, (cl_mem memobj, @@ -1342,6 +1359,12 @@ static bool isRaiseError() #define CV_OclDbgAssert(expr) do { if (isRaiseError()) { CV_Assert(expr); } else { (void)(expr); } } while ((void)0, 0) #endif +#ifdef HAVE_OPENCL_SVM +#include "opencv2/core/opencl/runtime/opencl_svm_20.hpp" +#include "opencv2/core/opencl/runtime/opencl_svm_hsa_extension.hpp" +#include "opencv2/core/opencl/opencl_svm.hpp" +#endif + namespace cv { namespace ocl { struct UMat2D @@ -1627,6 +1650,15 @@ bool haveAmdFft() #endif +bool haveSVM() +{ +#ifdef HAVE_OPENCL_SVM + return true; +#else + return false; +#endif +} + void finish() { Queue::getDefault().finish(); @@ -2357,12 +2389,86 @@ not_found: } #endif +#ifdef HAVE_OPENCL_SVM +namespace svm { + +enum AllocatorFlags { // don't use first 16 bits + OPENCL_SVM_COARSE_GRAIN_BUFFER = 1 << 16, // clSVMAlloc + SVM map/unmap + OPENCL_SVM_FINE_GRAIN_BUFFER = 2 << 16, // clSVMAlloc + OPENCL_SVM_FINE_GRAIN_SYSTEM = 3 << 16, // direct access + OPENCL_SVM_BUFFER_MASK = 3 << 16, + OPENCL_SVM_BUFFER_MAP = 4 << 16 +}; + +static bool checkForceSVMUmatUsage() +{ + static bool initialized = false; + static bool force = false; + if (!initialized) + { + force = getBoolParameter("OPENCV_OPENCL_SVM_FORCE_UMAT_USAGE", false); + initialized = true; + } + return force; +} +static bool checkDisableSVMUMatUsage() +{ + static bool initialized = false; + static bool force = false; + if (!initialized) + { + force = getBoolParameter("OPENCV_OPENCL_SVM_DISABLE_UMAT_USAGE", false); + initialized = true; + } + return force; +} +static bool checkDisableSVM() +{ + static bool initialized = false; + static bool force = false; + if (!initialized) + { + force = getBoolParameter("OPENCV_OPENCL_SVM_DISABLE", false); + initialized = true; + } + return force; +} +// see SVMCapabilities +static unsigned int getSVMCapabilitiesMask() +{ + static bool initialized = false; + static unsigned int mask = 0; + if (!initialized) + { + const char* envValue = getenv("OPENCV_OPENCL_SVM_CAPABILITIES_MASK"); + if (envValue == NULL) + { + return ~0U; // all bits 1 + } + mask = atoi(envValue); + initialized = true; + } + return mask; +} +} // namespace +#endif + struct Context::Impl { - Impl() + static Context::Impl* get(Context& context) { return context.p; } + + void __init() { refcount = 1; handle = 0; +#ifdef HAVE_OPENCL_SVM + svmInitialized = false; +#endif + } + + Impl() + { + __init(); } void setDefault() @@ -2401,8 +2507,7 @@ struct Context::Impl Impl(int dtype0) { - refcount = 1; - handle = 0; + __init(); cl_int retval = 0; cl_platform_id pl = (cl_platform_id)Platform::getDefault().ptr(); @@ -2419,7 +2524,7 @@ struct Context::Impl AutoBuffer dlistbuf(nd0*2+1); cl_device_id* dlist = (cl_device_id*)(void**)dlistbuf; cl_device_id* dlist_new = dlist + nd0; - CV_OclDbgAssert(clGetDeviceIDs( pl, dtype, nd0, dlist, &nd0 ) == CL_SUCCESS); + CV_OclDbgAssert(clGetDeviceIDs( pl, dtype, nd0, dlist, &nd0 ) == CL_SUCCESS); String name0; for(i = 0; i < nd0; i++) @@ -2496,6 +2601,144 @@ struct Context::Impl }; typedef std::map phash_t; phash_t phash; + +#ifdef HAVE_OPENCL_SVM + bool svmInitialized; + bool svmAvailable; + bool svmEnabled; + svm::SVMCapabilities svmCapabilities; + svm::SVMFunctions svmFunctions; + + void svmInit() + { + CV_Assert(handle != NULL); + const Device& device = devices[0]; + cl_device_svm_capabilities deviceCaps = 0; + CV_Assert(((void)0, CL_DEVICE_SVM_CAPABILITIES == CL_DEVICE_SVM_CAPABILITIES_AMD)); // Check assumption + cl_int status = clGetDeviceInfo((cl_device_id)device.ptr(), CL_DEVICE_SVM_CAPABILITIES, sizeof(deviceCaps), &deviceCaps, NULL); + if (status != CL_SUCCESS) + { + CV_OPENCL_SVM_TRACE_ERROR_P("CL_DEVICE_SVM_CAPABILITIES via clGetDeviceInfo failed: %d\n", status); + goto noSVM; + } + CV_OPENCL_SVM_TRACE_P("CL_DEVICE_SVM_CAPABILITIES returned: 0x%x\n", (int)deviceCaps); + CV_Assert(((void)0, CL_DEVICE_SVM_COARSE_GRAIN_BUFFER == CL_DEVICE_SVM_COARSE_GRAIN_BUFFER_AMD)); // Check assumption + svmCapabilities.value_ = + ((deviceCaps & CL_DEVICE_SVM_COARSE_GRAIN_BUFFER) ? svm::SVMCapabilities::SVM_COARSE_GRAIN_BUFFER : 0) | + ((deviceCaps & CL_DEVICE_SVM_FINE_GRAIN_BUFFER) ? svm::SVMCapabilities::SVM_FINE_GRAIN_BUFFER : 0) | + ((deviceCaps & CL_DEVICE_SVM_FINE_GRAIN_SYSTEM) ? svm::SVMCapabilities::SVM_FINE_GRAIN_SYSTEM : 0) | + ((deviceCaps & CL_DEVICE_SVM_ATOMICS) ? svm::SVMCapabilities::SVM_ATOMICS : 0); + svmCapabilities.value_ &= svm::getSVMCapabilitiesMask(); + if (svmCapabilities.value_ == 0) + { + CV_OPENCL_SVM_TRACE_ERROR_P("svmCapabilities is empty\n"); + goto noSVM; + } + try + { + // Try OpenCL 2.0 + CV_OPENCL_SVM_TRACE_P("Try SVM from OpenCL 2.0 ...\n"); + void* ptr = clSVMAlloc(handle, CL_MEM_READ_WRITE, 100, 0); + if (!ptr) + { + CV_OPENCL_SVM_TRACE_ERROR_P("clSVMAlloc returned NULL...\n"); + CV_ErrorNoReturn(Error::StsBadArg, "clSVMAlloc returned NULL"); + } + try + { + bool error = false; + cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); + if (CL_SUCCESS != clEnqueueSVMMap(q, CL_TRUE, CL_MAP_WRITE, ptr, 100, 0, NULL, NULL)) + { + CV_OPENCL_SVM_TRACE_ERROR_P("clEnqueueSVMMap failed...\n"); + CV_ErrorNoReturn(Error::StsBadArg, "clEnqueueSVMMap FAILED"); + } + clFinish(q); + try + { + ((int*)ptr)[0] = 100; + } + catch (...) + { + CV_OPENCL_SVM_TRACE_ERROR_P("SVM buffer access test FAILED\n"); + error = true; + } + if (CL_SUCCESS != clEnqueueSVMUnmap(q, ptr, 0, NULL, NULL)) + { + CV_OPENCL_SVM_TRACE_ERROR_P("clEnqueueSVMUnmap failed...\n"); + CV_ErrorNoReturn(Error::StsBadArg, "clEnqueueSVMUnmap FAILED"); + } + clFinish(q); + if (error) + { + CV_ErrorNoReturn(Error::StsBadArg, "OpenCL SVM buffer access test was FAILED"); + } + } + catch (...) + { + CV_OPENCL_SVM_TRACE_ERROR_P("OpenCL SVM buffer access test was FAILED\n"); + clSVMFree(handle, ptr); + throw; + } + clSVMFree(handle, ptr); + svmFunctions.fn_clSVMAlloc = clSVMAlloc; + svmFunctions.fn_clSVMFree = clSVMFree; + svmFunctions.fn_clSetKernelArgSVMPointer = clSetKernelArgSVMPointer; + //svmFunctions.fn_clSetKernelExecInfo = clSetKernelExecInfo; + //svmFunctions.fn_clEnqueueSVMFree = clEnqueueSVMFree; + svmFunctions.fn_clEnqueueSVMMemcpy = clEnqueueSVMMemcpy; + svmFunctions.fn_clEnqueueSVMMemFill = clEnqueueSVMMemFill; + svmFunctions.fn_clEnqueueSVMMap = clEnqueueSVMMap; + svmFunctions.fn_clEnqueueSVMUnmap = clEnqueueSVMUnmap; + } + catch (...) + { + CV_OPENCL_SVM_TRACE_P("clSVMAlloc failed, trying HSA extension...\n"); + try + { + // Try HSA extension + String extensions = device.extensions(); + if (extensions.find("cl_amd_svm") == String::npos) + { + CV_OPENCL_SVM_TRACE_P("Device extension doesn't have cl_amd_svm: %s\n", extensions.c_str()); + goto noSVM; + } + cl_platform_id p = NULL; + status = clGetDeviceInfo((cl_device_id)device.ptr(), CL_DEVICE_PLATFORM, sizeof(cl_platform_id), &p, NULL); + CV_Assert(status == CL_SUCCESS); + svmFunctions.fn_clSVMAlloc = (clSVMAllocAMD_fn)clGetExtensionFunctionAddressForPlatform(p, "clSVMAllocAMD"); + svmFunctions.fn_clSVMFree = (clSVMFreeAMD_fn)clGetExtensionFunctionAddressForPlatform(p, "clSVMFreeAMD"); + svmFunctions.fn_clSetKernelArgSVMPointer = (clSetKernelArgSVMPointerAMD_fn)clGetExtensionFunctionAddressForPlatform(p, "clSetKernelArgSVMPointerAMD"); + //svmFunctions.fn_clSetKernelExecInfo = (clSetKernelExecInfoAMD_fn)clGetExtensionFunctionAddressForPlatform(p, "clSetKernelExecInfoAMD"); + //svmFunctions.fn_clEnqueueSVMFree = (clEnqueueSVMFreeAMD_fn)clGetExtensionFunctionAddressForPlatform(p, "clEnqueueSVMFreeAMD"); + svmFunctions.fn_clEnqueueSVMMemcpy = (clEnqueueSVMMemcpyAMD_fn)clGetExtensionFunctionAddressForPlatform(p, "clEnqueueSVMMemcpyAMD"); + svmFunctions.fn_clEnqueueSVMMemFill = (clEnqueueSVMMemFillAMD_fn)clGetExtensionFunctionAddressForPlatform(p, "clEnqueueSVMMemFillAMD"); + svmFunctions.fn_clEnqueueSVMMap = (clEnqueueSVMMapAMD_fn)clGetExtensionFunctionAddressForPlatform(p, "clEnqueueSVMMapAMD"); + svmFunctions.fn_clEnqueueSVMUnmap = (clEnqueueSVMUnmapAMD_fn)clGetExtensionFunctionAddressForPlatform(p, "clEnqueueSVMUnmapAMD"); + CV_Assert(svmFunctions.isValid()); + } + catch (...) + { + CV_OPENCL_SVM_TRACE_P("Something is totally wrong\n"); + goto noSVM; + } + } + + svmAvailable = true; + svmEnabled = !svm::checkDisableSVM(); + svmInitialized = true; + CV_OPENCL_SVM_TRACE_P("OpenCV OpenCL SVM support initialized\n"); + return; + noSVM: + CV_OPENCL_SVM_TRACE_P("OpenCL SVM is not detected\n"); + svmAvailable = false; + svmEnabled = false; + svmCapabilities.value_ = 0; + svmInitialized = true; + svmFunctions.fn_clSVMAlloc = NULL; + return; + } +#endif }; @@ -2610,6 +2853,71 @@ Program Context::getProg(const ProgramSource& prog, return p ? p->getProg(prog, buildopts, errmsg) : Program(); } + + +#ifdef HAVE_OPENCL_SVM +bool Context::useSVM() const +{ + Context::Impl* i = p; + CV_Assert(i); + if (!i->svmInitialized) + i->svmInit(); + return i->svmEnabled; +} +void Context::setUseSVM(bool enabled) +{ + Context::Impl* i = p; + CV_Assert(i); + if (!i->svmInitialized) + i->svmInit(); + if (enabled && !i->svmAvailable) + { + CV_ErrorNoReturn(Error::StsError, "OpenCL Shared Virtual Memory (SVM) is not supported by OpenCL device"); + } + i->svmEnabled = enabled; +} +#else +bool Context::useSVM() const { return false; } +void Context::setUseSVM(bool enabled) { CV_Assert(!enabled); } +#endif + +#ifdef HAVE_OPENCL_SVM +namespace svm { + +const SVMCapabilities getSVMCapabilitites(const ocl::Context& context) +{ + Context::Impl* i = context.p; + CV_Assert(i); + if (!i->svmInitialized) + i->svmInit(); + return i->svmCapabilities; +} + +CV_EXPORTS const SVMFunctions* getSVMFunctions(const ocl::Context& context) +{ + Context::Impl* i = context.p; + CV_Assert(i); + CV_Assert(i->svmInitialized); // getSVMCapabilitites() must be called first + CV_Assert(i->svmFunctions.fn_clSVMAlloc != NULL); + return &i->svmFunctions; +} + +CV_EXPORTS bool useSVM(UMatUsageFlags usageFlags) +{ + if (checkForceSVMUmatUsage()) + return true; + if (checkDisableSVMUMatUsage()) + return false; + if ((usageFlags & USAGE_ALLOCATE_SHARED_MEMORY) != 0) + return true; + return false; // don't use SVM by default +} + +} // namespace cv::ocl::svm +#endif // HAVE_OPENCL_SVM + + + void initializeContextFromHandle(Context& ctx, void* platform, void* _context, void* _device) { cl_context context = (cl_context)_context; @@ -2979,12 +3287,33 @@ int Kernel::set(int i, const KernelArg& arg) return -1; } +#ifdef HAVE_OPENCL_SVM + if ((arg.m->u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) + { + const Context& ctx = Context::getDefault(); + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + uchar*& svmDataPtr = (uchar*&)arg.m->u->handle; + CV_OPENCL_SVM_TRACE_P("clSetKernelArgSVMPointer: %p\n", svmDataPtr); +#if 1 // TODO + cl_int status = svmFns->fn_clSetKernelArgSVMPointer(p->handle, (cl_uint)i, svmDataPtr); +#else + cl_int status = svmFns->fn_clSetKernelArgSVMPointer(p->handle, (cl_uint)i, &svmDataPtr); +#endif + CV_Assert(status == CL_SUCCESS); + } + else +#endif + { + CV_OclDbgAssert(clSetKernelArg(p->handle, (cl_uint)i, sizeof(h), &h) == CL_SUCCESS); + } + if (ptronly) - CV_OclDbgAssert(clSetKernelArg(p->handle, (cl_uint)i++, sizeof(h), &h) == CL_SUCCESS); + { + i++; + } else if( arg.m->dims <= 2 ) { UMat2D u2d(*arg.m); - CV_OclDbgAssert(clSetKernelArg(p->handle, (cl_uint)i, sizeof(h), &h) == CL_SUCCESS); CV_OclDbgAssert(clSetKernelArg(p->handle, (cl_uint)(i+1), sizeof(u2d.step), &u2d.step) == CL_SUCCESS); CV_OclDbgAssert(clSetKernelArg(p->handle, (cl_uint)(i+2), sizeof(u2d.offset), &u2d.offset) == CL_SUCCESS); i += 3; @@ -3000,7 +3329,6 @@ int Kernel::set(int i, const KernelArg& arg) else { UMat3D u3d(*arg.m); - CV_OclDbgAssert(clSetKernelArg(p->handle, (cl_uint)i, sizeof(h), &h) == CL_SUCCESS); CV_OclDbgAssert(clSetKernelArg(p->handle, (cl_uint)(i+1), sizeof(u3d.slicestep), &u3d.slicestep) == CL_SUCCESS); CV_OclDbgAssert(clSetKernelArg(p->handle, (cl_uint)(i+2), sizeof(u3d.step), &u3d.step) == CL_SUCCESS); CV_OclDbgAssert(clSetKernelArg(p->handle, (cl_uint)(i+3), sizeof(u3d.offset), &u3d.offset) == CL_SUCCESS); @@ -3433,39 +3761,55 @@ ProgramSource::hash_t ProgramSource::hash() const //////////////////////////////////////////// OpenCLAllocator ////////////////////////////////////////////////// +template class OpenCLBufferPool { protected: ~OpenCLBufferPool() { } public: - virtual cl_mem allocate(size_t size, CV_OUT size_t& capacity) = 0; - virtual void release(cl_mem handle, size_t capacity) = 0; + virtual T allocate(size_t size) = 0; + virtual void release(T buffer) = 0; }; -class OpenCLBufferPoolImpl : public BufferPoolController, public OpenCLBufferPool +template +class OpenCLBufferPoolBaseImpl : public BufferPoolController, public OpenCLBufferPool { -public: - struct BufferEntry - { - cl_mem clBuffer_; - size_t capacity_; - }; +private: + inline Derived& derived() { return *static_cast(this); } protected: Mutex mutex_; size_t currentReservedSize; size_t maxReservedSize; - std::list reservedEntries_; // LRU order + std::list allocatedEntries_; // Allocated and used entries + std::list reservedEntries_; // LRU order. Allocated, but not used entries + + // synchronized + bool _findAndRemoveEntryFromAllocatedList(CV_OUT BufferEntry& entry, T buffer) + { + typename std::list::iterator i = allocatedEntries_.begin(); + for (; i != allocatedEntries_.end(); ++i) + { + BufferEntry& e = *i; + if (e.clBuffer_ == buffer) + { + entry = e; + allocatedEntries_.erase(i); + return true; + } + } + return false; + } // synchronized bool _findAndRemoveEntryFromReservedList(CV_OUT BufferEntry& entry, const size_t size) { if (reservedEntries_.empty()) return false; - std::list::iterator i = reservedEntries_.begin(); - std::list::iterator result_pos = reservedEntries_.end(); - BufferEntry result = {NULL, 0}; + typename std::list::iterator i = reservedEntries_.begin(); + typename std::list::iterator result_pos = reservedEntries_.end(); + BufferEntry result; size_t minDiff = (size_t)(-1); for (; i != reservedEntries_.end(); ++i) { @@ -3489,6 +3833,7 @@ protected: reservedEntries_.erase(result_pos); entry = result; currentReservedSize -= entry.capacity_; + allocatedEntries_.push_back(entry); return true; } return false; @@ -3503,7 +3848,7 @@ protected: const BufferEntry& entry = reservedEntries_.back(); CV_DbgAssert(currentReservedSize >= entry.capacity_); currentReservedSize -= entry.capacity_; - _releaseBufferEntry(entry); + derived()._releaseBufferEntry(entry); reservedEntries_.pop_back(); } } @@ -3523,72 +3868,45 @@ protected: return 1024*1024; } - void _allocateBufferEntry(BufferEntry& entry, size_t size) - { - CV_DbgAssert(entry.clBuffer_ == NULL); - entry.capacity_ = alignSize(size, (int)_allocationGranularity(size)); - Context& ctx = Context::getDefault(); - cl_int retval = CL_SUCCESS; - entry.clBuffer_ = clCreateBuffer((cl_context)ctx.ptr(), CL_MEM_READ_WRITE, entry.capacity_, 0, &retval); - CV_Assert(retval == CL_SUCCESS); - CV_Assert(entry.clBuffer_ != NULL); - if(retval == CL_SUCCESS) - { - CV_IMPL_ADD(CV_IMPL_OCL); - } - LOG_BUFFER_POOL("OpenCL allocate %lld (0x%llx) bytes: %p\n", - (long long)entry.capacity_, (long long)entry.capacity_, entry.clBuffer_); - } - - void _releaseBufferEntry(const BufferEntry& entry) - { - CV_Assert(entry.capacity_ != 0); - CV_Assert(entry.clBuffer_ != NULL); - LOG_BUFFER_POOL("OpenCL release buffer: %p, %lld (0x%llx) bytes\n", - entry.clBuffer_, (long long)entry.capacity_, (long long)entry.capacity_); - clReleaseMemObject(entry.clBuffer_); - } public: - OpenCLBufferPoolImpl() - : currentReservedSize(0), maxReservedSize(0) + OpenCLBufferPoolBaseImpl() + : currentReservedSize(0), + maxReservedSize(0) { - int poolSize = ocl::Device::getDefault().isIntel() ? 1 << 27 : 0; - maxReservedSize = getConfigurationParameterForSize("OPENCV_OPENCL_BUFFERPOOL_LIMIT", poolSize); + // nothing } - virtual ~OpenCLBufferPoolImpl() + virtual ~OpenCLBufferPoolBaseImpl() { freeAllReservedBuffers(); CV_Assert(reservedEntries_.empty()); } public: - virtual cl_mem allocate(size_t size, CV_OUT size_t& capacity) + virtual T allocate(size_t size) { - BufferEntry entry = {NULL, 0}; - if (maxReservedSize > 0) + AutoLock locker(mutex_); + BufferEntry entry; + if (maxReservedSize > 0 && _findAndRemoveEntryFromReservedList(entry, size)) { - AutoLock locker(mutex_); - if (_findAndRemoveEntryFromReservedList(entry, size)) - { - CV_DbgAssert(size <= entry.capacity_); - LOG_BUFFER_POOL("Reuse reserved buffer: %p\n", entry.clBuffer_); - capacity = entry.capacity_; - return entry.clBuffer_; - } + CV_DbgAssert(size <= entry.capacity_); + LOG_BUFFER_POOL("Reuse reserved buffer: %p\n", entry.clBuffer_); + } + else + { + derived()._allocateBufferEntry(entry, size); } - _allocateBufferEntry(entry, size); - capacity = entry.capacity_; return entry.clBuffer_; } - virtual void release(cl_mem handle, size_t capacity) + virtual void release(T buffer) { - BufferEntry entry = {handle, capacity}; + AutoLock locker(mutex_); + BufferEntry entry; + CV_Assert(_findAndRemoveEntryFromAllocatedList(entry, buffer)); if (maxReservedSize == 0 || entry.capacity_ > maxReservedSize / 8) { - _releaseBufferEntry(entry); + derived()._releaseBufferEntry(entry); } else { - AutoLock locker(mutex_); reservedEntries_.push_front(entry); currentReservedSize += entry.capacity_; _checkSizeOfReservedEntries(); @@ -3604,7 +3922,7 @@ public: maxReservedSize = size; if (maxReservedSize < oldMaxReservedSize) { - std::list::iterator i = reservedEntries_.begin(); + typename std::list::iterator i = reservedEntries_.begin(); for (; i != reservedEntries_.end();) { const BufferEntry& entry = *i; @@ -3612,7 +3930,7 @@ public: { CV_DbgAssert(currentReservedSize >= entry.capacity_); currentReservedSize -= entry.capacity_; - _releaseBufferEntry(entry); + derived()._releaseBufferEntry(entry); i = reservedEntries_.erase(i); continue; } @@ -3624,16 +3942,123 @@ public: virtual void freeAllReservedBuffers() { AutoLock locker(mutex_); - std::list::const_iterator i = reservedEntries_.begin(); + typename std::list::const_iterator i = reservedEntries_.begin(); for (; i != reservedEntries_.end(); ++i) { const BufferEntry& entry = *i; - _releaseBufferEntry(entry); + derived()._releaseBufferEntry(entry); } reservedEntries_.clear(); } }; +struct CLBufferEntry +{ + cl_mem clBuffer_; + size_t capacity_; + CLBufferEntry() : clBuffer_((cl_mem)NULL), capacity_(0) { } +}; + +class OpenCLBufferPoolImpl : public OpenCLBufferPoolBaseImpl +{ +public: + typedef struct CLBufferEntry BufferEntry; +protected: + int createFlags_; +public: + OpenCLBufferPoolImpl(int createFlags = 0) + : createFlags_(createFlags) + { + } + + void _allocateBufferEntry(BufferEntry& entry, size_t size) + { + CV_DbgAssert(entry.clBuffer_ == NULL); + entry.capacity_ = alignSize(size, (int)_allocationGranularity(size)); + Context& ctx = Context::getDefault(); + cl_int retval = CL_SUCCESS; + entry.clBuffer_ = clCreateBuffer((cl_context)ctx.ptr(), CL_MEM_READ_WRITE|createFlags_, entry.capacity_, 0, &retval); + CV_Assert(retval == CL_SUCCESS); + CV_Assert(entry.clBuffer_ != NULL); + if(retval == CL_SUCCESS) + { + CV_IMPL_ADD(CV_IMPL_OCL); + } + LOG_BUFFER_POOL("OpenCL allocate %lld (0x%llx) bytes: %p\n", + (long long)entry.capacity_, (long long)entry.capacity_, entry.clBuffer_); + allocatedEntries_.push_back(entry); + } + + void _releaseBufferEntry(const BufferEntry& entry) + { + CV_Assert(entry.capacity_ != 0); + CV_Assert(entry.clBuffer_ != NULL); + LOG_BUFFER_POOL("OpenCL release buffer: %p, %lld (0x%llx) bytes\n", + entry.clBuffer_, (long long)entry.capacity_, (long long)entry.capacity_); + clReleaseMemObject(entry.clBuffer_); + } +}; + +#ifdef HAVE_OPENCL_SVM +struct CLSVMBufferEntry +{ + void* clBuffer_; + size_t capacity_; + CLSVMBufferEntry() : clBuffer_(NULL), capacity_(0) { } +}; +class OpenCLSVMBufferPoolImpl : public OpenCLBufferPoolBaseImpl +{ +public: + typedef struct CLSVMBufferEntry BufferEntry; +public: + OpenCLSVMBufferPoolImpl() + { + } + + void _allocateBufferEntry(BufferEntry& entry, size_t size) + { + CV_DbgAssert(entry.clBuffer_ == NULL); + entry.capacity_ = alignSize(size, (int)_allocationGranularity(size)); + + Context& ctx = Context::getDefault(); + const svm::SVMCapabilities svmCaps = svm::getSVMCapabilitites(ctx); + bool isFineGrainBuffer = svmCaps.isSupportFineGrainBuffer(); + cl_svm_mem_flags memFlags = CL_MEM_READ_WRITE | + (isFineGrainBuffer ? CL_MEM_SVM_FINE_GRAIN_BUFFER : 0); + + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + + CV_OPENCL_SVM_TRACE_P("clSVMAlloc: %d\n", (int)entry.capacity_); + void *buf = svmFns->fn_clSVMAlloc((cl_context)ctx.ptr(), memFlags, entry.capacity_, 0); + CV_Assert(buf); + + entry.clBuffer_ = buf; + { + CV_IMPL_ADD(CV_IMPL_OCL); + } + LOG_BUFFER_POOL("OpenCL SVM allocate %lld (0x%llx) bytes: %p\n", + (long long)entry.capacity_, (long long)entry.capacity_, entry.clBuffer_); + allocatedEntries_.push_back(entry); + } + + void _releaseBufferEntry(const BufferEntry& entry) + { + CV_Assert(entry.capacity_ != 0); + CV_Assert(entry.clBuffer_ != NULL); + LOG_BUFFER_POOL("OpenCL release SVM buffer: %p, %lld (0x%llx) bytes\n", + entry.clBuffer_, (long long)entry.capacity_, (long long)entry.capacity_); + Context& ctx = Context::getDefault(); + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + CV_OPENCL_SVM_TRACE_P("clSVMFree: %p\n", entry.clBuffer_); + svmFns->fn_clSVMFree((cl_context)ctx.ptr(), entry.clBuffer_); + } +}; +#endif + + + #if defined _MSC_VER #pragma warning(disable:4127) // conditional expression is constant #endif @@ -3697,12 +4122,37 @@ private: class OpenCLAllocator : public MatAllocator { mutable OpenCLBufferPoolImpl bufferPool; + mutable OpenCLBufferPoolImpl bufferPoolHostPtr; +#ifdef HAVE_OPENCL_SVM + mutable OpenCLSVMBufferPoolImpl bufferPoolSVM; +#endif + enum AllocatorFlags { - ALLOCATOR_FLAGS_BUFFER_POOL_USED = 1 << 0 + ALLOCATOR_FLAGS_BUFFER_POOL_USED = 1 << 0, + ALLOCATOR_FLAGS_BUFFER_POOL_HOST_PTR_USED = 1 << 1 +#ifdef HAVE_OPENCL_SVM + ,ALLOCATOR_FLAGS_BUFFER_POOL_SVM_USED = 1 << 2 +#endif }; public: - OpenCLAllocator() { matStdAllocator = Mat::getStdAllocator(); } + OpenCLAllocator() + : bufferPool(0), + bufferPoolHostPtr(CL_MEM_ALLOC_HOST_PTR) + { + size_t defaultPoolSize, poolSize; + defaultPoolSize = ocl::Device::getDefault().isIntel() ? 1 << 27 : 0; + poolSize = getConfigurationParameterForSize("OPENCV_OPENCL_BUFFERPOOL_LIMIT", defaultPoolSize); + bufferPool.setMaxReservedSize(poolSize); + poolSize = getConfigurationParameterForSize("OPENCV_OPENCL_HOST_PTR_BUFFERPOOL_LIMIT", defaultPoolSize); + bufferPoolHostPtr.setMaxReservedSize(poolSize); +#ifdef HAVE_OPENCL_SVM + poolSize = getConfigurationParameterForSize("OPENCV_OPENCL_SVM_BUFFERPOOL_LIMIT", defaultPoolSize); + bufferPoolSVM.setMaxReservedSize(poolSize); +#endif + + matStdAllocator = Mat::getStdAllocator(); + } UMatData* defaultAllocate(int dims, const int* sizes, int type, void* data, size_t* step, int flags, UMatUsageFlags usageFlags) const @@ -3739,33 +4189,47 @@ public: } Context& ctx = Context::getDefault(); + int createFlags = 0, flags0 = 0; getBestFlags(ctx, flags, usageFlags, createFlags, flags0); - size_t capacity = 0; void* handle = NULL; int allocatorFlags = 0; + +#ifdef HAVE_OPENCL_SVM + const svm::SVMCapabilities svmCaps = svm::getSVMCapabilitites(ctx); + if (ctx.useSVM() && svm::useSVM(usageFlags) && !svmCaps.isNoSVMSupport()) + { + allocatorFlags = ALLOCATOR_FLAGS_BUFFER_POOL_SVM_USED; + handle = bufferPoolSVM.allocate(total); + + // this property is constant, so single buffer pool can be used here + bool isFineGrainBuffer = svmCaps.isSupportFineGrainBuffer(); + allocatorFlags |= isFineGrainBuffer ? svm::OPENCL_SVM_FINE_GRAIN_BUFFER : svm::OPENCL_SVM_COARSE_GRAIN_BUFFER; + } + else +#endif if (createFlags == 0) { - handle = bufferPool.allocate(total, capacity); - if (!handle) - return defaultAllocate(dims, sizes, type, data, step, flags, usageFlags); allocatorFlags = ALLOCATOR_FLAGS_BUFFER_POOL_USED; + handle = bufferPool.allocate(total); + } + else if (createFlags == CL_MEM_ALLOC_HOST_PTR) + { + allocatorFlags = ALLOCATOR_FLAGS_BUFFER_POOL_HOST_PTR_USED; + handle = bufferPoolHostPtr.allocate(total); } else { - capacity = total; - cl_int retval = 0; - handle = clCreateBuffer((cl_context)ctx.ptr(), - CL_MEM_READ_WRITE|createFlags, total, 0, &retval); - if( !handle || retval != CL_SUCCESS ) - return defaultAllocate(dims, sizes, type, data, step, flags, usageFlags); - CV_IMPL_ADD(CV_IMPL_OCL) + CV_Assert(handle != NULL); // Unsupported, throw } + + if (!handle) + return defaultAllocate(dims, sizes, type, data, step, flags, usageFlags); + UMatData* u = new UMatData(this); u->data = 0; u->size = total; - u->capacity = capacity; u->handle = handle; u->flags = flags0; u->allocatorFlags_ = allocatorFlags; @@ -3788,22 +4252,81 @@ public: getBestFlags(ctx, accessFlags, usageFlags, createFlags, flags0); cl_context ctx_handle = (cl_context)ctx.ptr(); - cl_int retval = 0; - int tempUMatFlags = UMatData::TEMP_UMAT; - u->handle = clCreateBuffer(ctx_handle, CL_MEM_USE_HOST_PTR|CL_MEM_READ_WRITE, - u->size, u->origdata, &retval); - if((!u->handle || retval != CL_SUCCESS) && !(accessFlags & ACCESS_FAST)) + int allocatorFlags = 0; + int tempUMatFlags = 0; + void* handle = NULL; + cl_int retval = CL_SUCCESS; + +#ifdef HAVE_OPENCL_SVM + svm::SVMCapabilities svmCaps = svm::getSVMCapabilitites(ctx); + bool useSVM = ctx.useSVM() && svm::useSVM(usageFlags); + if (useSVM && svmCaps.isSupportFineGrainSystem()) { - u->handle = clCreateBuffer(ctx_handle, CL_MEM_COPY_HOST_PTR|CL_MEM_READ_WRITE|createFlags, - u->size, u->origdata, &retval); - tempUMatFlags = UMatData::TEMP_COPIED_UMAT; + allocatorFlags = svm::OPENCL_SVM_FINE_GRAIN_SYSTEM; + tempUMatFlags = UMatData::TEMP_UMAT; + handle = u->origdata; + CV_OPENCL_SVM_TRACE_P("Use fine grain system: %d (%p)\n", (int)u->size, handle); + } + else if (useSVM && (svmCaps.isSupportFineGrainBuffer() || svmCaps.isSupportCoarseGrainBuffer())) + { + if (!(accessFlags & ACCESS_FAST)) // memcpy used + { + bool isFineGrainBuffer = svmCaps.isSupportFineGrainBuffer(); + cl_svm_mem_flags memFlags = createFlags | + (isFineGrainBuffer ? CL_MEM_SVM_FINE_GRAIN_BUFFER : 0); + + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + + CV_OPENCL_SVM_TRACE_P("clSVMAlloc + copy: %d\n", (int)u->size); + handle = svmFns->fn_clSVMAlloc((cl_context)ctx.ptr(), memFlags, u->size, 0); + CV_Assert(handle); + + cl_command_queue q = NULL; + if (!isFineGrainBuffer) + { + q = (cl_command_queue)Queue::getDefault().ptr(); + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMMap: %p (%d)\n", handle, (int)u->size); + cl_int status = svmFns->fn_clEnqueueSVMMap(q, CL_TRUE, CL_MAP_WRITE, + handle, u->size, + 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + + } + memcpy(handle, u->origdata, u->size); + if (!isFineGrainBuffer) + { + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMUnmap: %p\n", handle); + cl_int status = svmFns->fn_clEnqueueSVMUnmap(q, handle, 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + } + + tempUMatFlags = UMatData::TEMP_UMAT | UMatData::TEMP_COPIED_UMAT; + allocatorFlags |= isFineGrainBuffer ? svm::OPENCL_SVM_FINE_GRAIN_BUFFER + : svm::OPENCL_SVM_COARSE_GRAIN_BUFFER; + } + } + else +#endif + { + tempUMatFlags = UMatData::TEMP_UMAT; + handle = clCreateBuffer(ctx_handle, CL_MEM_USE_HOST_PTR|createFlags, + u->size, u->origdata, &retval); + if((!handle || retval < 0) && !(accessFlags & ACCESS_FAST)) + { + handle = clCreateBuffer(ctx_handle, CL_MEM_COPY_HOST_PTR|CL_MEM_READ_WRITE|createFlags, + u->size, u->origdata, &retval); + tempUMatFlags |= UMatData::TEMP_COPIED_UMAT; + } } - if(!u->handle || retval != CL_SUCCESS) + if(!handle || retval != CL_SUCCESS) return false; + u->handle = handle; u->prevAllocator = u->currAllocator; u->currAllocator = this; u->flags |= tempUMatFlags; + u->allocatorFlags_ = allocatorFlags; } if(accessFlags & ACCESS_WRITE) u->markHostCopyObsolete(true); @@ -3848,34 +4371,93 @@ public: CV_Assert(u->urefcount >= 0); CV_Assert(u->refcount >= 0); - // TODO: !!! when we add Shared Virtual Memory Support, - // this function (as well as the others) should be corrected CV_Assert(u->handle != 0 && u->urefcount == 0); if(u->tempUMat()) { // UMatDataAutoLock lock(u); + if( u->hostCopyObsolete() && u->refcount > 0 ) { - cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); - if( u->tempCopiedUMat() ) +#ifdef HAVE_OPENCL_SVM + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) { - AlignedDataPtr alignedPtr(u->origdata, u->size, CV_OPENCL_DATA_PTR_ALIGNMENT); - CV_OclDbgAssert(clEnqueueReadBuffer(q, (cl_mem)u->handle, CL_TRUE, 0, - u->size, alignedPtr.getAlignedPtr(), 0, 0, 0) == CL_SUCCESS); + Context& ctx = Context::getDefault(); + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + + if( u->tempCopiedUMat() ) + { + CV_DbgAssert((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_BUFFER || + (u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_COARSE_GRAIN_BUFFER); + bool isFineGrainBuffer = (u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_BUFFER; + cl_command_queue q = NULL; + if (!isFineGrainBuffer) + { + CV_DbgAssert(((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MAP) == 0)); + q = (cl_command_queue)Queue::getDefault().ptr(); + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMMap: %p (%d)\n", u->handle, (int)u->size); + cl_int status = svmFns->fn_clEnqueueSVMMap(q, CL_FALSE, CL_MAP_READ, + u->handle, u->size, + 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + } + clFinish(q); + memcpy(u->origdata, u->handle, u->size); + if (!isFineGrainBuffer) + { + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMUnmap: %p\n", u->handle); + cl_int status = svmFns->fn_clEnqueueSVMUnmap(q, u->handle, 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + } + } + else + { + CV_DbgAssert((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_SYSTEM); + // nothing + } } else +#endif + { + cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); + if( u->tempCopiedUMat() ) + { + AlignedDataPtr alignedPtr(u->origdata, u->size, CV_OPENCL_DATA_PTR_ALIGNMENT); + CV_OclDbgAssert(clEnqueueReadBuffer(q, (cl_mem)u->handle, CL_TRUE, 0, + u->size, alignedPtr.getAlignedPtr(), 0, 0, 0) == CL_SUCCESS); + } + else + { + // TODO Is it really needed for clCreateBuffer with CL_MEM_USE_HOST_PTR? + cl_int retval = 0; + void* data = clEnqueueMapBuffer(q, (cl_mem)u->handle, CL_TRUE, + (CL_MAP_READ | CL_MAP_WRITE), + 0, u->size, 0, 0, 0, &retval); + CV_OclDbgAssert(retval == CL_SUCCESS); + CV_OclDbgAssert(clEnqueueUnmapMemObject(q, (cl_mem)u->handle, data, 0, 0, 0) == CL_SUCCESS); + CV_OclDbgAssert(clFinish(q) == CL_SUCCESS); + } + } + u->markHostCopyObsolete(false); + } +#ifdef HAVE_OPENCL_SVM + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) + { + if( u->tempCopiedUMat() ) { - cl_int retval = 0; - void* data = clEnqueueMapBuffer(q, (cl_mem)u->handle, CL_TRUE, - (CL_MAP_READ | CL_MAP_WRITE), - 0, u->size, 0, 0, 0, &retval); - CV_OclDbgAssert(retval == CL_SUCCESS); - CV_OclDbgAssert(clEnqueueUnmapMemObject(q, (cl_mem)u->handle, data, 0, 0, 0) == CL_SUCCESS); - CV_OclDbgAssert(clFinish(q) == CL_SUCCESS); + Context& ctx = Context::getDefault(); + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + + CV_OPENCL_SVM_TRACE_P("clSVMFree: %p\n", u->handle); + svmFns->fn_clSVMFree((cl_context)ctx.ptr(), u->handle); } } - u->markHostCopyObsolete(false); - clReleaseMemObject((cl_mem)u->handle); + else +#endif + { + clReleaseMemObject((cl_mem)u->handle); + } u->handle = 0; u->currAllocator = u->prevAllocator; if(u->data && u->copyOnMap() && !(u->flags & UMatData::USER_ALLOCATED)) @@ -3894,14 +4476,42 @@ public: } if (u->allocatorFlags_ & ALLOCATOR_FLAGS_BUFFER_POOL_USED) { - bufferPool.release((cl_mem)u->handle, u->capacity); + bufferPool.release((cl_mem)u->handle); + } + else if (u->allocatorFlags_ & ALLOCATOR_FLAGS_BUFFER_POOL_HOST_PTR_USED) + { + bufferPoolHostPtr.release((cl_mem)u->handle); + } +#ifdef HAVE_OPENCL_SVM + else if (u->allocatorFlags_ & ALLOCATOR_FLAGS_BUFFER_POOL_SVM_USED) + { + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_SYSTEM) + { + //nothing + } + else if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_BUFFER || + (u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_COARSE_GRAIN_BUFFER) + { + Context& ctx = Context::getDefault(); + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); + + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MAP) != 0) + { + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMUnmap: %p\n", u->handle); + cl_int status = svmFns->fn_clEnqueueSVMUnmap(q, u->handle, 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + } + } + bufferPoolSVM.release((void*)u->handle); } +#endif else { clReleaseMemObject((cl_mem)u->handle); } u->handle = 0; - u->capacity = 0; delete u; } } @@ -3925,13 +4535,41 @@ public: { if( !u->copyOnMap() ) { + // TODO + // because there can be other map requests for the same UMat with different access flags, + // we use the universal (read-write) access mode. +#ifdef HAVE_OPENCL_SVM + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) + { + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_COARSE_GRAIN_BUFFER) + { + Context& ctx = Context::getDefault(); + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MAP) == 0) + { + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMMap: %p (%d)\n", u->handle, (int)u->size); + cl_int status = svmFns->fn_clEnqueueSVMMap(q, CL_FALSE, CL_MAP_READ | CL_MAP_WRITE, + u->handle, u->size, + 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + u->allocatorFlags_ |= svm::OPENCL_SVM_BUFFER_MAP; + } + } + clFinish(q); + u->data = (uchar*)u->handle; + u->markHostCopyObsolete(false); + u->markDeviceMemMapped(true); + return; + } +#endif if (u->data) // FIXIT Workaround for UMat synchronization issue { //CV_Assert(u->hostCopyObsolete() == false); return; } - // because there can be other map requests for the same UMat with different access flags, - // we use the universal (read-write) access mode. + cl_int retval = 0; u->data = (uchar*)clEnqueueMapBuffer(q, (cl_mem)u->handle, CL_TRUE, (CL_MAP_READ | CL_MAP_WRITE), @@ -3943,6 +4581,7 @@ public: return; } + // TODO Is it really a good idea and was it tested well? // if map failed, switch to copy-on-map mode for the particular buffer u->flags |= UMatData::COPY_ON_MAP; } @@ -3957,6 +4596,9 @@ public: if( (accessFlags & ACCESS_READ) != 0 && u->hostCopyObsolete() ) { AlignedDataPtr alignedPtr(u->data, u->size, CV_OPENCL_DATA_PTR_ALIGNMENT); +#ifdef HAVE_OPENCL_SVM + CV_DbgAssert((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == 0); +#endif CV_Assert( clEnqueueReadBuffer(q, (cl_mem)u->handle, CL_TRUE, 0, u->size, alignedPtr.getAlignedPtr(), 0, 0, 0) == CL_SUCCESS ); u->markHostCopyObsolete(false); @@ -3983,6 +4625,31 @@ public: { CV_Assert(u->data != NULL); u->markDeviceMemMapped(false); +#ifdef HAVE_OPENCL_SVM + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) + { + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_COARSE_GRAIN_BUFFER) + { + Context& ctx = Context::getDefault(); + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + + CV_DbgAssert((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MAP) != 0); + { + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMUnmap: %p\n", u->handle); + cl_int status = svmFns->fn_clEnqueueSVMUnmap(q, u->handle, + 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + clFinish(q); + u->allocatorFlags_ &= ~svm::OPENCL_SVM_BUFFER_MAP; + } + } + u->data = 0; + u->markDeviceCopyObsolete(false); + u->markHostCopyObsolete(false); + return; + } +#endif CV_Assert( (retval = clEnqueueUnmapMemObject(q, (cl_mem)u->handle, u->data, 0, 0, 0)) == CL_SUCCESS ); if (Device::getDefault().isAMD()) @@ -3995,6 +4662,9 @@ public: else if( u->copyOnMap() && u->deviceCopyObsolete() ) { AlignedDataPtr alignedPtr(u->data, u->size, CV_OPENCL_DATA_PTR_ALIGNMENT); +#ifdef HAVE_OPENCL_SVM + CV_DbgAssert((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == 0); +#endif CV_Assert( (retval = clEnqueueWriteBuffer(q, (cl_mem)u->handle, CL_TRUE, 0, u->size, alignedPtr.getAlignedPtr(), 0, 0, 0)) == CL_SUCCESS ); } @@ -4102,17 +4772,78 @@ public: srcrawofs, new_srcofs, new_srcstep, dstrawofs, new_dstofs, new_dststep); - AlignedDataPtr alignedPtr((uchar*)dstptr, sz[0] * dststep[0], CV_OPENCL_DATA_PTR_ALIGNMENT); - if( iscontinuous ) +#ifdef HAVE_OPENCL_SVM + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) { - CV_Assert( clEnqueueReadBuffer(q, (cl_mem)u->handle, CL_TRUE, - srcrawofs, total, alignedPtr.getAlignedPtr(), 0, 0, 0) == CL_SUCCESS ); + CV_DbgAssert(u->data == NULL || u->data == u->handle); + Context& ctx = Context::getDefault(); + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + + CV_DbgAssert((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MAP) == 0); + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_COARSE_GRAIN_BUFFER) + { + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMMap: %p (%d)\n", u->handle, (int)u->size); + cl_int status = svmFns->fn_clEnqueueSVMMap(q, CL_FALSE, CL_MAP_READ, + u->handle, u->size, + 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + } + clFinish(q); + if( iscontinuous ) + { + memcpy(dstptr, (uchar*)u->handle + srcrawofs, total); + } + else + { + // This code is from MatAllocator::download() + int isz[CV_MAX_DIM]; + uchar* srcptr = (uchar*)u->handle; + for( int i = 0; i < dims; i++ ) + { + CV_Assert( sz[i] <= (size_t)INT_MAX ); + if( sz[i] == 0 ) + return; + if( srcofs ) + srcptr += srcofs[i]*(i <= dims-2 ? srcstep[i] : 1); + isz[i] = (int)sz[i]; + } + + Mat src(dims, isz, CV_8U, srcptr, srcstep); + Mat dst(dims, isz, CV_8U, dstptr, dststep); + + const Mat* arrays[] = { &src, &dst }; + uchar* ptrs[2]; + NAryMatIterator it(arrays, ptrs, 2); + size_t j, planesz = it.size; + + for( j = 0; j < it.nplanes; j++, ++it ) + memcpy(ptrs[1], ptrs[0], planesz); + } + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_COARSE_GRAIN_BUFFER) + { + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMUnmap: %p\n", u->handle); + cl_int status = svmFns->fn_clEnqueueSVMUnmap(q, u->handle, + 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + clFinish(q); + } } else +#endif { - CV_Assert( clEnqueueReadBufferRect(q, (cl_mem)u->handle, CL_TRUE, - new_srcofs, new_dstofs, new_sz, new_srcstep[0], new_srcstep[1], - new_dststep[0], new_dststep[1], alignedPtr.getAlignedPtr(), 0, 0, 0) == CL_SUCCESS ); + AlignedDataPtr alignedPtr((uchar*)dstptr, sz[0] * dststep[0], CV_OPENCL_DATA_PTR_ALIGNMENT); + if( iscontinuous ) + { + CV_Assert( clEnqueueReadBuffer(q, (cl_mem)u->handle, CL_TRUE, + srcrawofs, total, alignedPtr.getAlignedPtr(), 0, 0, 0) >= 0 ); + } + else + { + CV_Assert( clEnqueueReadBufferRect(q, (cl_mem)u->handle, CL_TRUE, + new_srcofs, new_dstofs, new_sz, new_srcstep[0], new_srcstep[1], + new_dststep[0], new_dststep[1], alignedPtr.getAlignedPtr(), 0, 0, 0) >= 0 ); + } } } @@ -4153,20 +4884,91 @@ public: CV_Assert( u->handle != 0 ); cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); - AlignedDataPtr alignedPtr((uchar*)srcptr, sz[0] * srcstep[0], CV_OPENCL_DATA_PTR_ALIGNMENT); - if( iscontinuous ) +#ifdef HAVE_OPENCL_SVM + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) { - CV_Assert( clEnqueueWriteBuffer(q, (cl_mem)u->handle, - CL_TRUE, dstrawofs, total, srcptr, 0, 0, 0) == CL_SUCCESS ); + CV_DbgAssert(u->data == NULL || u->data == u->handle); + Context& ctx = Context::getDefault(); + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + + CV_DbgAssert((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MAP) == 0); + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_COARSE_GRAIN_BUFFER) + { + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMMap: %p (%d)\n", u->handle, (int)u->size); + cl_int status = svmFns->fn_clEnqueueSVMMap(q, CL_FALSE, CL_MAP_WRITE, + u->handle, u->size, + 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + } + clFinish(q); + if( iscontinuous ) + { + memcpy((uchar*)u->handle + dstrawofs, srcptr, total); + } + else + { + // This code is from MatAllocator::upload() + int isz[CV_MAX_DIM]; + uchar* dstptr = (uchar*)u->handle; + for( int i = 0; i < dims; i++ ) + { + CV_Assert( sz[i] <= (size_t)INT_MAX ); + if( sz[i] == 0 ) + return; + if( dstofs ) + dstptr += dstofs[i]*(i <= dims-2 ? dststep[i] : 1); + isz[i] = (int)sz[i]; + } + + Mat src(dims, isz, CV_8U, (void*)srcptr, srcstep); + Mat dst(dims, isz, CV_8U, dstptr, dststep); + + const Mat* arrays[] = { &src, &dst }; + uchar* ptrs[2]; + NAryMatIterator it(arrays, ptrs, 2); + size_t j, planesz = it.size; + + for( j = 0; j < it.nplanes; j++, ++it ) + memcpy(ptrs[1], ptrs[0], planesz); + } + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_COARSE_GRAIN_BUFFER) + { + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMUnmap: %p\n", u->handle); + cl_int status = svmFns->fn_clEnqueueSVMUnmap(q, u->handle, + 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + clFinish(q); + } } else +#endif { - CV_Assert( clEnqueueWriteBufferRect(q, (cl_mem)u->handle, CL_TRUE, - new_dstofs, new_srcofs, new_sz, new_dststep[0], new_dststep[1], - new_srcstep[0], new_srcstep[1], srcptr, 0, 0, 0) == CL_SUCCESS ); + AlignedDataPtr alignedPtr((uchar*)srcptr, sz[0] * srcstep[0], CV_OPENCL_DATA_PTR_ALIGNMENT); + if( iscontinuous ) + { + CV_Assert( clEnqueueWriteBuffer(q, (cl_mem)u->handle, + CL_TRUE, dstrawofs, total, alignedPtr.getAlignedPtr(), 0, 0, 0) >= 0 ); + } + else + { + CV_Assert( clEnqueueWriteBufferRect(q, (cl_mem)u->handle, CL_TRUE, + new_dstofs, new_srcofs, new_sz, new_dststep[0], new_dststep[1], + new_srcstep[0], new_srcstep[1], alignedPtr.getAlignedPtr(), 0, 0, 0) >= 0 ); + } } - u->markHostCopyObsolete(true); +#ifdef HAVE_OPENCL_SVM + if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_BUFFER || + (u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_SYSTEM) + { + // nothing + } + else +#endif + { + u->markHostCopyObsolete(true); + } u->markDeviceCopyObsolete(false); } @@ -4198,7 +5000,17 @@ public: { download(src, dst->data + dstrawofs, dims, sz, srcofs, srcstep, dststep); dst->markHostCopyObsolete(false); - dst->markDeviceCopyObsolete(true); +#ifdef HAVE_OPENCL_SVM + if ((dst->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_BUFFER || + (dst->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_SYSTEM) + { + // nothing + } + else +#endif + { + dst->markDeviceCopyObsolete(true); + } return; } @@ -4206,26 +5018,110 @@ public: CV_Assert(dst->refcount == 0); cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); - cl_int retval; - if( iscontinuous ) + cl_int retval = CL_SUCCESS; +#ifdef HAVE_OPENCL_SVM + if ((src->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0 || + (dst->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) { - CV_Assert( (retval = clEnqueueCopyBuffer(q, (cl_mem)src->handle, (cl_mem)dst->handle, - srcrawofs, dstrawofs, total, 0, 0, 0)) == CL_SUCCESS ); + if ((src->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0 && + (dst->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) + { + Context& ctx = Context::getDefault(); + const svm::SVMFunctions* svmFns = svm::getSVMFunctions(ctx); + CV_DbgAssert(svmFns->isValid()); + + if( iscontinuous ) + { + CV_OPENCL_SVM_TRACE_P("clEnqueueSVMMemcpy: %p <-- %p (%d)\n", + (uchar*)dst->handle + dstrawofs, (uchar*)src->handle + srcrawofs, (int)total); + cl_int status = svmFns->fn_clEnqueueSVMMemcpy(q, CL_TRUE, + (uchar*)dst->handle + dstrawofs, (uchar*)src->handle + srcrawofs, + total, 0, NULL, NULL); + CV_Assert(status == CL_SUCCESS); + } + else + { + clFinish(q); + // This code is from MatAllocator::download()/upload() + int isz[CV_MAX_DIM]; + uchar* srcptr = (uchar*)src->handle; + for( int i = 0; i < dims; i++ ) + { + CV_Assert( sz[i] <= (size_t)INT_MAX ); + if( sz[i] == 0 ) + return; + if( srcofs ) + srcptr += srcofs[i]*(i <= dims-2 ? srcstep[i] : 1); + isz[i] = (int)sz[i]; + } + Mat m_src(dims, isz, CV_8U, srcptr, srcstep); + + uchar* dstptr = (uchar*)dst->handle; + for( int i = 0; i < dims; i++ ) + { + if( dstofs ) + dstptr += dstofs[i]*(i <= dims-2 ? dststep[i] : 1); + } + Mat m_dst(dims, isz, CV_8U, dstptr, dststep); + + const Mat* arrays[] = { &m_src, &m_dst }; + uchar* ptrs[2]; + NAryMatIterator it(arrays, ptrs, 2); + size_t j, planesz = it.size; + + for( j = 0; j < it.nplanes; j++, ++it ) + memcpy(ptrs[1], ptrs[0], planesz); + } + } + else + { + if ((src->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) + { + map(src, ACCESS_READ); + upload(dst, src->data + srcrawofs, dims, sz, dstofs, dststep, srcstep); + unmap(src); + } + else + { + map(dst, ACCESS_WRITE); + download(src, dst->data + dstrawofs, dims, sz, srcofs, srcstep, dststep); + unmap(dst); + } + } } else +#endif { - CV_Assert( (retval = clEnqueueCopyBufferRect(q, (cl_mem)src->handle, (cl_mem)dst->handle, - new_srcofs, new_dstofs, new_sz, - new_srcstep[0], new_srcstep[1], - new_dststep[0], new_dststep[1], - 0, 0, 0)) == CL_SUCCESS ); + if( iscontinuous ) + { + CV_Assert( (retval = clEnqueueCopyBuffer(q, (cl_mem)src->handle, (cl_mem)dst->handle, + srcrawofs, dstrawofs, total, 0, 0, 0)) == CL_SUCCESS ); + } + else + { + CV_Assert( (retval = clEnqueueCopyBufferRect(q, (cl_mem)src->handle, (cl_mem)dst->handle, + new_srcofs, new_dstofs, new_sz, + new_srcstep[0], new_srcstep[1], + new_dststep[0], new_dststep[1], + 0, 0, 0)) == CL_SUCCESS ); + } } - if(retval == CL_SUCCESS) + if (retval == CL_SUCCESS) { CV_IMPL_ADD(CV_IMPL_OCL) } - dst->markHostCopyObsolete(true); +#ifdef HAVE_OPENCL_SVM + if ((dst->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_BUFFER || + (dst->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) == svm::OPENCL_SVM_FINE_GRAIN_SYSTEM) + { + // nothing + } + else +#endif + { + dst->markHostCopyObsolete(true); + } dst->markDeviceCopyObsolete(false); if( _sync ) @@ -4234,7 +5130,23 @@ public: } } - BufferPoolController* getBufferPoolController() const { return &bufferPool; } + BufferPoolController* getBufferPoolController(const char* id) const { +#ifdef HAVE_OPENCL_SVM + if ((svm::checkForceSVMUmatUsage() && (id == NULL || strcmp(id, "OCL") == 0)) || (id != NULL && strcmp(id, "SVM") == 0)) + { + return &bufferPoolSVM; + } +#endif + if (id != NULL && strcmp(id, "HOST_ALLOC") == 0) + { + return &bufferPoolHostPtr; + } + if (id != NULL && strcmp(id, "OCL") != 0) + { + CV_ErrorNoReturn(cv::Error::StsBadArg, "getBufferPoolController(): unknown BufferPool ID\n"); + } + return &bufferPool; + } MatAllocator* matStdAllocator; }; @@ -4818,7 +5730,7 @@ void* Image2D::ptr() const return p ? p->handle : 0; } -bool isPerformanceCheckBypassed() +bool internal::isPerformanceCheckBypassed() { static bool initialized = false; static bool value = false; @@ -4830,4 +5742,22 @@ bool isPerformanceCheckBypassed() return value; } +bool internal::isCLBuffer(UMat& u) +{ + void* h = u.handle(ACCESS_RW); + if (!h) + return true; + CV_DbgAssert(u.u->currAllocator == getOpenCLAllocator()); +#if 1 + if ((u.u->allocatorFlags_ & 0xffff0000) != 0) // OpenCL SVM flags are stored here + return false; +#else + cl_mem_object_type type = 0; + cl_int ret = clGetMemObjectInfo((cl_mem)h, CL_MEM_TYPE, sizeof(type), &type, NULL); + if (ret != CL_SUCCESS || type != CL_MEM_OBJECT_BUFFER) + return false; +#endif + return true; +} + }} diff --git a/modules/core/src/opencl/runtime/opencl_core.cpp b/modules/core/src/opencl/runtime/opencl_core.cpp index 93f6aae5de..43f6b13b6e 100644 --- a/modules/core/src/opencl/runtime/opencl_core.cpp +++ b/modules/core/src/opencl/runtime/opencl_core.cpp @@ -70,7 +70,8 @@ static void* AppleCLGetProcAddress(const char* name) handle = dlopen(oclpath, RTLD_LAZY | RTLD_GLOBAL); if (handle == NULL) { - fprintf(stderr, ERROR_MSG_CANT_LOAD); + if (envPath) + fprintf(stderr, ERROR_MSG_CANT_LOAD); } else if (dlsym(handle, OPENCL_FUNC_TO_CHECK_1_1) == NULL) { @@ -108,7 +109,8 @@ static void* WinGetProcAddress(const char* name) handle = LoadLibraryA(path); if (!handle) { - fprintf(stderr, ERROR_MSG_CANT_LOAD); + if (envPath) + fprintf(stderr, ERROR_MSG_CANT_LOAD); } else if (GetProcAddress(handle, OPENCL_FUNC_TO_CHECK_1_1) == NULL) { @@ -145,7 +147,8 @@ static void* GetProcAddress(const char* name) handle = dlopen(path, RTLD_LAZY | RTLD_GLOBAL); if (handle == NULL) { - fprintf(stderr, ERROR_MSG_CANT_LOAD); + if (envPath) + fprintf(stderr, ERROR_MSG_CANT_LOAD); } else if (dlsym(handle, OPENCL_FUNC_TO_CHECK_1_1) == NULL) { @@ -182,6 +185,65 @@ static void* opencl_check_fn(int ID); #define CUSTOM_FUNCTION_ID 1000 +#ifdef HAVE_OPENCL_SVM +#include "opencv2/core/opencl/runtime/opencl_svm_20.hpp" +#define SVM_FUNCTION_ID_START CUSTOM_FUNCTION_ID +#define SVM_FUNCTION_ID_END CUSTOM_FUNCTION_ID + 100 + +enum OPENCL_FN_SVM_ID +{ + OPENCL_FN_clSVMAlloc = SVM_FUNCTION_ID_START, + OPENCL_FN_clSVMFree, + OPENCL_FN_clSetKernelArgSVMPointer, + OPENCL_FN_clSetKernelExecInfo, + OPENCL_FN_clEnqueueSVMFree, + OPENCL_FN_clEnqueueSVMMemcpy, + OPENCL_FN_clEnqueueSVMMemFill, + OPENCL_FN_clEnqueueSVMMap, + OPENCL_FN_clEnqueueSVMUnmap, +}; + +void* (CL_API_CALL *clSVMAlloc)(cl_context context, cl_svm_mem_flags flags, size_t size, unsigned int alignment) = + opencl_fn4::switch_fn; +static const struct DynamicFnEntry _clSVMAlloc_definition = { "clSVMAlloc", (void**)&clSVMAlloc}; +void (CL_API_CALL *clSVMFree)(cl_context context, void* svm_pointer) = + opencl_fn2::switch_fn; +static const struct DynamicFnEntry _clSVMFree_definition = { "clSVMFree", (void**)&clSVMFree}; +cl_int (CL_API_CALL *clSetKernelArgSVMPointer)(cl_kernel kernel, cl_uint arg_index, const void* arg_value) = + opencl_fn3::switch_fn; +static const struct DynamicFnEntry _clSetKernelArgSVMPointer_definition = { "clSetKernelArgSVMPointer", (void**)&clSetKernelArgSVMPointer}; +//void* (CL_API_CALL *clSetKernelExecInfo)(cl_kernel kernel, cl_kernel_exec_info param_name, size_t param_value_size, const void* param_value) = +// opencl_fn4::switch_fn; +//static const struct DynamicFnEntry _clSetKernelExecInfo_definition = { "clSetKernelExecInfo", (void**)&clSetKernelExecInfo}; +//cl_int (CL_API_CALL *clEnqueueSVMFree)(...) = +// opencl_fn8::switch_fn; +//static const struct DynamicFnEntry _clEnqueueSVMFree_definition = { "clEnqueueSVMFree", (void**)&clEnqueueSVMFree}; +cl_int (CL_API_CALL *clEnqueueSVMMemcpy)(cl_command_queue command_queue, cl_bool blocking_copy, void* dst_ptr, const void* src_ptr, size_t size, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) = + opencl_fn8::switch_fn; +static const struct DynamicFnEntry _clEnqueueSVMMemcpy_definition = { "clEnqueueSVMMemcpy", (void**)&clEnqueueSVMMemcpy}; +cl_int (CL_API_CALL *clEnqueueSVMMemFill)(cl_command_queue command_queue, void* svm_ptr, const void* pattern, size_t pattern_size, size_t size, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) = + opencl_fn8::switch_fn; +static const struct DynamicFnEntry _clEnqueueSVMMemFill_definition = { "clEnqueueSVMMemFill", (void**)&clEnqueueSVMMemFill}; +cl_int (CL_API_CALL *clEnqueueSVMMap)(cl_command_queue command_queue, cl_bool blocking_map, cl_map_flags map_flags, void* svm_ptr, size_t size, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) = + opencl_fn8::switch_fn; +static const struct DynamicFnEntry _clEnqueueSVMMap_definition = { "clEnqueueSVMMap", (void**)&clEnqueueSVMMap}; +cl_int (CL_API_CALL *clEnqueueSVMUnmap)(cl_command_queue command_queue, void* svm_ptr, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) = + opencl_fn5::switch_fn; +static const struct DynamicFnEntry _clEnqueueSVMUnmap_definition = { "clEnqueueSVMUnmap", (void**)&clEnqueueSVMUnmap}; + +static const struct DynamicFnEntry* opencl_svm_fn_list[] = { + &_clSVMAlloc_definition, + &_clSVMFree_definition, + &_clSetKernelArgSVMPointer_definition, + NULL/*&_clSetKernelExecInfo_definition*/, + NULL/*&_clEnqueueSVMFree_definition*/, + &_clEnqueueSVMMemcpy_definition, + &_clEnqueueSVMMemFill_definition, + &_clEnqueueSVMMap_definition, + &_clEnqueueSVMUnmap_definition, +}; +#endif // HAVE_OPENCL_SVM + // // END OF CUSTOM FUNCTIONS HERE // @@ -194,6 +256,14 @@ static void* opencl_check_fn(int ID) assert(ID >= 0 && ID < (int)(sizeof(opencl_fn_list)/sizeof(opencl_fn_list[0]))); e = opencl_fn_list[ID]; } +#ifdef HAVE_OPENCL_SVM + else if (ID >= SVM_FUNCTION_ID_START && ID < SVM_FUNCTION_ID_END) + { + ID = ID - SVM_FUNCTION_ID_START; + assert(ID >= 0 && ID < (int)(sizeof(opencl_svm_fn_list)/sizeof(opencl_svm_fn_list[0]))); + e = opencl_svm_fn_list[ID]; + } +#endif else { CV_ErrorNoReturn(cv::Error::StsBadArg, "Invalid function ID"); diff --git a/modules/core/src/precomp.hpp b/modules/core/src/precomp.hpp index ef154400e2..0f85cc5568 100644 --- a/modules/core/src/precomp.hpp +++ b/modules/core/src/precomp.hpp @@ -192,6 +192,7 @@ struct NoVec extern volatile bool USE_SSE2; extern volatile bool USE_SSE4_2; extern volatile bool USE_AVX; +extern volatile bool USE_AVX2; enum { BLOCK_SIZE = 1024 }; diff --git a/modules/core/src/stat.cpp b/modules/core/src/stat.cpp index ca56a7c966..87c423dc3b 100644 --- a/modules/core/src/stat.cpp +++ b/modules/core/src/stat.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -72,7 +73,114 @@ struct Sum_SIMD } }; -#if CV_NEON +#if CV_SSE2 + +template <> +struct Sum_SIMD +{ + int operator () (const schar * src0, const uchar * mask, int * dst, int len, int cn) const + { + if (mask || (cn != 1 && cn != 2 && cn != 4) || !USE_SSE2) + return 0; + + int x = 0; + __m128i v_zero = _mm_setzero_si128(), v_sum = v_zero; + + for ( ; x <= len - 16; x += 16) + { + __m128i v_src = _mm_loadu_si128((const __m128i *)(src0 + x)); + __m128i v_half = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, v_src), 8); + + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_half), 16)); + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_half), 16)); + + v_half = _mm_srai_epi16(_mm_unpackhi_epi8(v_zero, v_src), 8); + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_half), 16)); + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_half), 16)); + } + + for ( ; x <= len - 8; x += 8) + { + __m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src0 + x))), 8); + + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + } + + int CV_DECL_ALIGNED(16) ar[4]; + _mm_store_si128((__m128i*)ar, v_sum); + + for (int i = 0; i < 4; i += cn) + for (int j = 0; j < cn; ++j) + dst[j] += ar[j + i]; + + return x / cn; + } +}; + +template <> +struct Sum_SIMD +{ + int operator () (const int * src0, const uchar * mask, double * dst, int len, int cn) const + { + if (mask || (cn != 1 && cn != 2 && cn != 4) || !USE_SSE2) + return 0; + + int x = 0; + __m128d v_zero = _mm_setzero_pd(), v_sum0 = v_zero, v_sum1 = v_zero; + + for ( ; x <= len - 4; x += 4) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src0 + x)); + v_sum0 = _mm_add_pd(v_sum0, _mm_cvtepi32_pd(v_src)); + v_sum1 = _mm_add_pd(v_sum1, _mm_cvtepi32_pd(_mm_srli_si128(v_src, 8))); + } + + double CV_DECL_ALIGNED(16) ar[4]; + _mm_store_pd(ar, v_sum0); + _mm_store_pd(ar + 2, v_sum1); + + for (int i = 0; i < 4; i += cn) + for (int j = 0; j < cn; ++j) + dst[j] += ar[j + i]; + + return x / cn; + } +}; + +template <> +struct Sum_SIMD +{ + int operator () (const float * src0, const uchar * mask, double * dst, int len, int cn) const + { + if (mask || (cn != 1 && cn != 2 && cn != 4) || !USE_SSE2) + return 0; + + int x = 0; + __m128d v_zero = _mm_setzero_pd(), v_sum0 = v_zero, v_sum1 = v_zero; + + for ( ; x <= len - 4; x += 4) + { + __m128 v_src = _mm_loadu_ps(src0 + x); + v_sum0 = _mm_add_pd(v_sum0, _mm_cvtps_pd(v_src)); + v_src = _mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8)); + v_sum1 = _mm_add_pd(v_sum1, _mm_cvtps_pd(v_src)); + } + + double CV_DECL_ALIGNED(16) ar[4]; + _mm_store_pd(ar, v_sum0); + _mm_store_pd(ar + 2, v_sum1); + + for (int i = 0; i < 4; i += cn) + for (int j = 0; j < cn; ++j) + dst[j] += ar[j + i]; + + return x / cn; + } +}; + + +#elif CV_NEON template <> struct Sum_SIMD @@ -396,6 +504,38 @@ static int countNonZero_(const T* src, int len ) return nz; } +#if CV_SSE2 + +static const uchar * initPopcountTable() +{ + static uchar tab[256]; + static volatile bool initialized = false; + if( !initialized ) + { + // we compute inverse popcount table, + // since we pass (img[x] == 0) mask as index in the table. + unsigned int j = 0u; +#if CV_POPCNT + if (checkHardwareSupport(CV_CPU_POPCNT)) + for( ; j < 256u; j++ ) + tab[j] = (uchar)(8 - _mm_popcnt_u32(j)); +#else + for( ; j < 256u; j++ ) + { + int val = 0; + for( int mask = 1; mask < 256; mask += mask ) + val += (j & mask) == 0; + tab[j] = (uchar)val; + } +#endif + initialized = true; + } + + return tab; +} + +#endif + static int countNonZero8u( const uchar* src, int len ) { int i=0, nz = 0; @@ -403,21 +543,7 @@ static int countNonZero8u( const uchar* src, int len ) if(USE_SSE2)//5x-6x { __m128i pattern = _mm_setzero_si128 (); - static uchar tab[256]; - static volatile bool initialized = false; - if( !initialized ) - { - // we compute inverse popcount table, - // since we pass (img[x] == 0) mask as index in the table. - for( int j = 0; j < 256; j++ ) - { - int val = 0; - for( int mask = 1; mask < 256; mask += mask ) - val += (j & mask) == 0; - tab[j] = (uchar)val; - } - initialized = true; - } + static const uchar * tab = initPopcountTable(); for (; i<=len-16; i+=16) { @@ -467,7 +593,22 @@ static int countNonZero8u( const uchar* src, int len ) static int countNonZero16u( const ushort* src, int len ) { int i = 0, nz = 0; -#if CV_NEON +#if CV_SSE2 + if (USE_SSE2) + { + __m128i v_zero = _mm_setzero_si128 (); + static const uchar * tab = initPopcountTable(); + + for ( ; i <= len - 8; i += 8) + { + __m128i v_src = _mm_loadu_si128((const __m128i*)(src + i)); + int val = _mm_movemask_epi8(_mm_packs_epi16(_mm_cmpeq_epi16(v_src, v_zero), v_zero)); + nz += tab[val]; + } + + src += i; + } +#elif CV_NEON int len0 = len & -8, blockSize1 = (1 << 15), blockSize0 = blockSize1 << 6; uint32x4_t v_nz = vdupq_n_u32(0u); uint16x8_t v_zero = vdupq_n_u16(0), v_1 = vdupq_n_u16(1); @@ -503,7 +644,27 @@ static int countNonZero16u( const ushort* src, int len ) static int countNonZero32s( const int* src, int len ) { int i = 0, nz = 0; -#if CV_NEON +#if CV_SSE2 + if (USE_SSE2) + { + __m128i v_zero = _mm_setzero_si128 (); + static const uchar * tab = initPopcountTable(); + + for ( ; i <= len - 8; i += 8) + { + __m128i v_src = _mm_loadu_si128((const __m128i*)(src + i)); + __m128i v_dst0 = _mm_cmpeq_epi32(v_src, v_zero); + + v_src = _mm_loadu_si128((const __m128i*)(src + i + 4)); + __m128i v_dst1 = _mm_cmpeq_epi32(v_src, v_zero); + + int val = _mm_movemask_epi8(_mm_packs_epi16(_mm_packs_epi32(v_dst0, v_dst1), v_zero)); + nz += tab[val]; + } + + src += i; + } +#elif CV_NEON int len0 = len & -8, blockSize1 = (1 << 15), blockSize0 = blockSize1 << 6; uint32x4_t v_nz = vdupq_n_u32(0u); int32x4_t v_zero = vdupq_n_s32(0.0f); @@ -541,7 +702,25 @@ static int countNonZero32s( const int* src, int len ) static int countNonZero32f( const float* src, int len ) { int i = 0, nz = 0; -#if CV_NEON +#if CV_SSE2 + if (USE_SSE2) + { + __m128i v_zero_i = _mm_setzero_si128(); + __m128 v_zero_f = _mm_setzero_ps(); + static const uchar * tab = initPopcountTable(); + + for ( ; i <= len - 8; i += 8) + { + __m128i v_dst0 = _mm_castps_si128(_mm_cmpeq_ps(_mm_loadu_ps(src + i), v_zero_f)); + __m128i v_dst1 = _mm_castps_si128(_mm_cmpeq_ps(_mm_loadu_ps(src + i + 4), v_zero_f)); + + int val = _mm_movemask_epi8(_mm_packs_epi16(_mm_packs_epi32(v_dst0, v_dst1), v_zero_i)); + nz += tab[val]; + } + + src += i; + } +#elif CV_NEON int len0 = len & -8, blockSize1 = (1 << 15), blockSize0 = blockSize1 << 6; uint32x4_t v_nz = vdupq_n_u32(0u); float32x4_t v_zero = vdupq_n_f32(0.0f); @@ -577,7 +756,34 @@ static int countNonZero32f( const float* src, int len ) } static int countNonZero64f( const double* src, int len ) -{ return countNonZero_(src, len); } +{ + int i = 0, nz = 0; +#if CV_SSE2 + if (USE_SSE2) + { + __m128i v_zero_i = _mm_setzero_si128(); + __m128d v_zero_d = _mm_setzero_pd(); + static const uchar * tab = initPopcountTable(); + + for ( ; i <= len - 8; i += 8) + { + __m128i v_dst0 = _mm_castpd_si128(_mm_cmpeq_pd(_mm_loadu_pd(src + i), v_zero_d)); + __m128i v_dst1 = _mm_castpd_si128(_mm_cmpeq_pd(_mm_loadu_pd(src + i + 2), v_zero_d)); + __m128i v_dst2 = _mm_castpd_si128(_mm_cmpeq_pd(_mm_loadu_pd(src + i + 4), v_zero_d)); + __m128i v_dst3 = _mm_castpd_si128(_mm_cmpeq_pd(_mm_loadu_pd(src + i + 6), v_zero_d)); + + v_dst0 = _mm_packs_epi32(v_dst0, v_dst1); + v_dst1 = _mm_packs_epi32(v_dst2, v_dst3); + + int val = _mm_movemask_epi8(_mm_packs_epi16(_mm_packs_epi32(v_dst0, v_dst1), v_zero_i)); + nz += tab[val]; + } + + src += i; + } +#endif + return nz + countNonZero_(src, len - i); +} typedef int (*CountNonZeroFunc)(const uchar*, int); @@ -594,6 +800,137 @@ static CountNonZeroFunc getCountNonZeroTab(int depth) return countNonZeroTab[depth]; } +template +struct SumSqr_SIMD +{ + int operator () (const T *, const uchar *, ST *, SQT *, int, int) const + { + return 0; + } +}; + +#if CV_SSE2 + +template <> +struct SumSqr_SIMD +{ + int operator () (const uchar * src0, const uchar * mask, int * sum, int * sqsum, int len, int cn) const + { + if (mask || (cn != 1 && cn != 2) || !USE_SSE2) + return 0; + + int x = 0; + __m128i v_zero = _mm_setzero_si128(), v_sum = v_zero, v_sqsum = v_zero; + + for ( ; x <= len - 16; x += 16) + { + __m128i v_src = _mm_loadu_si128((const __m128i *)(src0 + x)); + __m128i v_half = _mm_unpacklo_epi8(v_src, v_zero); + + __m128i v_mullo = _mm_mullo_epi16(v_half, v_half); + __m128i v_mulhi = _mm_mulhi_epi16(v_half, v_half); + v_sum = _mm_add_epi32(v_sum, _mm_unpacklo_epi16(v_half, v_zero)); + v_sum = _mm_add_epi32(v_sum, _mm_unpackhi_epi16(v_half, v_zero)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi)); + + v_half = _mm_unpackhi_epi8(v_src, v_zero); + v_mullo = _mm_mullo_epi16(v_half, v_half); + v_mulhi = _mm_mulhi_epi16(v_half, v_half); + v_sum = _mm_add_epi32(v_sum, _mm_unpacklo_epi16(v_half, v_zero)); + v_sum = _mm_add_epi32(v_sum, _mm_unpackhi_epi16(v_half, v_zero)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi)); + } + + for ( ; x <= len - 8; x += 8) + { + __m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src0 + x)), v_zero); + + __m128i v_mullo = _mm_mullo_epi16(v_src, v_src); + __m128i v_mulhi = _mm_mulhi_epi16(v_src, v_src); + v_sum = _mm_add_epi32(v_sum, _mm_unpacklo_epi16(v_src, v_zero)); + v_sum = _mm_add_epi32(v_sum, _mm_unpackhi_epi16(v_src, v_zero)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi)); + } + + int CV_DECL_ALIGNED(16) ar[8]; + _mm_store_si128((__m128i*)ar, v_sum); + _mm_store_si128((__m128i*)(ar + 4), v_sqsum); + + for (int i = 0; i < 4; i += cn) + for (int j = 0; j < cn; ++j) + { + sum[j] += ar[j + i]; + sqsum[j] += ar[4 + j + i]; + } + + return x / cn; + } +}; + +template <> +struct SumSqr_SIMD +{ + int operator () (const schar * src0, const uchar * mask, int * sum, int * sqsum, int len, int cn) const + { + if (mask || (cn != 1 && cn != 2) || !USE_SSE2) + return 0; + + int x = 0; + __m128i v_zero = _mm_setzero_si128(), v_sum = v_zero, v_sqsum = v_zero; + + for ( ; x <= len - 16; x += 16) + { + __m128i v_src = _mm_loadu_si128((const __m128i *)(src0 + x)); + __m128i v_half = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, v_src), 8); + + __m128i v_mullo = _mm_mullo_epi16(v_half, v_half); + __m128i v_mulhi = _mm_mulhi_epi16(v_half, v_half); + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_half), 16)); + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_half), 16)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi)); + + v_half = _mm_srai_epi16(_mm_unpackhi_epi8(v_zero, v_src), 8); + v_mullo = _mm_mullo_epi16(v_half, v_half); + v_mulhi = _mm_mulhi_epi16(v_half, v_half); + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_half), 16)); + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_half), 16)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi)); + } + + for ( ; x <= len - 8; x += 8) + { + __m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src0 + x))), 8); + + __m128i v_mullo = _mm_mullo_epi16(v_src, v_src); + __m128i v_mulhi = _mm_mulhi_epi16(v_src, v_src); + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi)); + v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi)); + } + + int CV_DECL_ALIGNED(16) ar[8]; + _mm_store_si128((__m128i*)ar, v_sum); + _mm_store_si128((__m128i*)(ar + 4), v_sqsum); + + for (int i = 0; i < 4; i += cn) + for (int j = 0; j < cn; ++j) + { + sum[j] += ar[j + i]; + sqsum[j] += ar[4 + j + i]; + } + + return x / cn; + } +}; + +#endif + template static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int len, int cn ) { @@ -601,14 +938,15 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le if( !mask ) { - int i; - int k = cn % 4; + SumSqr_SIMD vop; + int i = vop(src0, mask, sum, sqsum, len, cn), k = cn % 4; + src += i * cn; if( k == 1 ) { ST s0 = sum[0]; SQT sq0 = sqsum[0]; - for( i = 0; i < len; i++, src += cn ) + for( ; i < len; i++, src += cn ) { T v = src[0]; s0 += v; sq0 += (SQT)v*v; @@ -620,7 +958,7 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le { ST s0 = sum[0], s1 = sum[1]; SQT sq0 = sqsum[0], sq1 = sqsum[1]; - for( i = 0; i < len; i++, src += cn ) + for( ; i < len; i++, src += cn ) { T v0 = src[0], v1 = src[1]; s0 += v0; sq0 += (SQT)v0*v0; @@ -633,7 +971,7 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le { ST s0 = sum[0], s1 = sum[1], s2 = sum[2]; SQT sq0 = sqsum[0], sq1 = sqsum[1], sq2 = sqsum[2]; - for( i = 0; i < len; i++, src += cn ) + for( ; i < len; i++, src += cn ) { T v0 = src[0], v1 = src[1], v2 = src[2]; s0 += v0; sq0 += (SQT)v0*v0; @@ -649,7 +987,7 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le src = src0 + k; ST s0 = sum[k], s1 = sum[k+1], s2 = sum[k+2], s3 = sum[k+3]; SQT sq0 = sqsum[k], sq1 = sqsum[k+1], sq2 = sqsum[k+2], sq3 = sqsum[k+3]; - for( i = 0; i < len; i++, src += cn ) + for( ; i < len; i++, src += cn ) { T v0, v1; v0 = src[0], v1 = src[1]; @@ -924,7 +1262,6 @@ cv::Scalar cv::sum( InputArray _src ) } } #endif - SumFunc func = getSumFunc(depth); CV_Assert( cn <= 4 && func != 0 ); diff --git a/modules/core/src/system.cpp b/modules/core/src/system.cpp index daf13a2dda..2590f215a9 100644 --- a/modules/core/src/system.cpp +++ b/modules/core/src/system.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -89,6 +90,22 @@ pop ebx } } + static void __cpuidex(int* cpuid_data, int, int) + { + __asm + { + push edi + mov edi, cpuid_data + mov eax, 7 + mov ecx, 0 + cpuid + mov [edi], eax + mov [edi + 4], ebx + mov [edi + 8], ecx + mov [edi + 12], edx + pop edi + } + } #endif #endif @@ -208,7 +225,7 @@ struct HWFeatures enum { MAX_FEATURE = CV_HARDWARE_MAX_FEATURE }; HWFeatures(void) - { + { memset( have, 0, sizeof(have) ); x86_family = 0; } @@ -252,10 +269,53 @@ struct HWFeatures f.have[CV_CPU_SSE2] = (cpuid_data[3] & (1<<26)) != 0; f.have[CV_CPU_SSE3] = (cpuid_data[2] & (1<<0)) != 0; f.have[CV_CPU_SSSE3] = (cpuid_data[2] & (1<<9)) != 0; + f.have[CV_CPU_FMA3] = (cpuid_data[2] & (1<<12)) != 0; f.have[CV_CPU_SSE4_1] = (cpuid_data[2] & (1<<19)) != 0; f.have[CV_CPU_SSE4_2] = (cpuid_data[2] & (1<<20)) != 0; f.have[CV_CPU_POPCNT] = (cpuid_data[2] & (1<<23)) != 0; f.have[CV_CPU_AVX] = (((cpuid_data[2] & (1<<28)) != 0)&&((cpuid_data[2] & (1<<27)) != 0));//OS uses XSAVE_XRSTORE and CPU support AVX + + // make the second call to the cpuid command in order to get + // information about extended features like AVX2 + #if defined _MSC_VER && (defined _M_IX86 || defined _M_X64) + __cpuidex(cpuid_data, 7, 0); + #elif defined __GNUC__ && (defined __i386__ || defined __x86_64__) + #ifdef __x86_64__ + asm __volatile__ + ( + "movl $7, %%eax\n\t" + "movl $0, %%ecx\n\t" + "cpuid\n\t" + :[eax]"=a"(cpuid_data[0]),[ebx]"=b"(cpuid_data[1]),[ecx]"=c"(cpuid_data[2]),[edx]"=d"(cpuid_data[3]) + : + : "cc" + ); + #else + asm volatile + ( + "pushl %%ebx\n\t" + "movl $7,%%eax\n\t" + "movl $0,%%ecx\n\t" + "cpuid\n\t" + "movl %%ebx, %0\n\t" + "popl %%ebx\n\t" + : "=r"(cpuid_data[1]), "=c"(cpuid_data[2]) + : + : "cc" + ); + #endif + #endif + f.have[CV_CPU_AVX2] = (cpuid_data[1] & (1<<5)) != 0; + + f.have[CV_CPU_AVX_512F] = (cpuid_data[1] & (1<<16)) != 0; + f.have[CV_CPU_AVX_512DQ] = (cpuid_data[1] & (1<<17)) != 0; + f.have[CV_CPU_AVX_512IFMA512] = (cpuid_data[1] & (1<<21)) != 0; + f.have[CV_CPU_AVX_512PF] = (cpuid_data[1] & (1<<26)) != 0; + f.have[CV_CPU_AVX_512ER] = (cpuid_data[1] & (1<<27)) != 0; + f.have[CV_CPU_AVX_512CD] = (cpuid_data[1] & (1<<28)) != 0; + f.have[CV_CPU_AVX_512BW] = (cpuid_data[1] & (1<<30)) != 0; + f.have[CV_CPU_AVX_512VL] = (cpuid_data[1] & (1<<31)) != 0; + f.have[CV_CPU_AVX_512VBMI] = (cpuid_data[2] & (1<<1)) != 0; } #if defined ANDROID || defined __linux__ @@ -318,6 +378,7 @@ IPPInitializer ippInitializer; volatile bool USE_SSE2 = featuresEnabled.have[CV_CPU_SSE2]; volatile bool USE_SSE4_2 = featuresEnabled.have[CV_CPU_SSE4_2]; volatile bool USE_AVX = featuresEnabled.have[CV_CPU_AVX]; +volatile bool USE_AVX2 = featuresEnabled.have[CV_CPU_AVX2]; void setUseOptimized( bool flag ) { diff --git a/modules/core/src/umatrix.cpp b/modules/core/src/umatrix.cpp index 443111f48d..1b42f1ee1e 100644 --- a/modules/core/src/umatrix.cpp +++ b/modules/core/src/umatrix.cpp @@ -10,8 +10,7 @@ // License Agreement // For Open Source Computer Vision Library // -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -56,7 +55,7 @@ UMatData::UMatData(const MatAllocator* allocator) prevAllocator = currAllocator = allocator; urefcount = refcount = 0; data = origdata = 0; - size = 0; capacity = 0; + size = 0; flags = 0; handle = 0; userdata = 0; @@ -68,7 +67,7 @@ UMatData::~UMatData() prevAllocator = currAllocator = 0; urefcount = refcount = 0; data = origdata = 0; - size = 0; capacity = 0; + size = 0; flags = 0; handle = 0; userdata = 0; @@ -222,7 +221,7 @@ UMat Mat::getUMat(int accessFlags, UMatUsageFlags usageFlags) const temp_u = a->allocate(dims, size.p, type(), data, step.p, accessFlags, usageFlags); temp_u->refcount = 1; } - UMat::getStdAllocator()->allocate(temp_u, accessFlags, usageFlags); + UMat::getStdAllocator()->allocate(temp_u, accessFlags, usageFlags); // TODO result is not checked hdr.flags = flags; setSize(hdr, dims, size.p, step.p); finalizeHdr(hdr); @@ -576,7 +575,7 @@ Mat UMat::getMat(int accessFlags) const { if(!u) return Mat(); - u->currAllocator->map(u, accessFlags | ACCESS_READ); + u->currAllocator->map(u, accessFlags | ACCESS_READ); // TODO Support ACCESS_WRITE without unnecessary data transfers CV_Assert(u->data != 0); Mat hdr(dims, size.p, type(), u->data + offset, step.p); hdr.flags = flags; diff --git a/modules/core/test/ocl/test_arithm.cpp b/modules/core/test/ocl/test_arithm.cpp index d0d3847bed..0541819f89 100644 --- a/modules/core/test/ocl/test_arithm.cpp +++ b/modules/core/test/ocl/test_arithm.cpp @@ -1577,7 +1577,7 @@ PARAM_TEST_CASE(ConvertScaleAbs, MatDepth, Channels, bool) Size roiSize = randomSize(1, MAX_VALUE); Border srcBorder = randomBorder(0, use_roi ? MAX_VALUE : 0); - randomSubMat(src, src_roi, roiSize, srcBorder, stype, 2, 11); // FIXIT: Test with minV, maxV + randomSubMat(src, src_roi, roiSize, srcBorder, stype, -11, 11); // FIXIT: Test with minV, maxV Border dstBorder = randomBorder(0, use_roi ? MAX_VALUE : 0); randomSubMat(dst, dst_roi, roiSize, dstBorder, dtype, 5, 16); diff --git a/modules/core/test/test_misc.cpp b/modules/core/test/test_misc.cpp index d37f0ee4f6..cd4ec7c5a0 100644 --- a/modules/core/test/test_misc.cpp +++ b/modules/core/test/test_misc.cpp @@ -26,3 +26,106 @@ TEST(Core_SaturateCast, NegativeNotClipped) ASSERT_EQ(0xffffffff, val); } + +template +static double maxAbsDiff(const T &t, const U &u) +{ + Mat_ d; + absdiff(t, u, d); + double ret; + minMaxLoc(d, NULL, &ret); + return ret; +} + +TEST(Core_OutputArrayAssign, _Matxd_Matd) +{ + Mat expected = (Mat_(2,3) << 1, 2, 3, .1, .2, .3); + Matx23d actualx; + + { + OutputArray oa(actualx); + oa.assign(expected); + } + + Mat actual = (Mat) actualx; + + EXPECT_LE(maxAbsDiff(expected, actual), 0.0); +} + +TEST(Core_OutputArrayAssign, _Matxd_Matf) +{ + Mat expected = (Mat_(2,3) << 1, 2, 3, .1, .2, .3); + Matx23d actualx; + + { + OutputArray oa(actualx); + oa.assign(expected); + } + + Mat actual = (Mat) actualx; + + EXPECT_LE(maxAbsDiff(expected, actual), FLT_EPSILON); +} + +TEST(Core_OutputArrayAssign, _Matxf_Matd) +{ + Mat expected = (Mat_(2,3) << 1, 2, 3, .1, .2, .3); + Matx23f actualx; + + { + OutputArray oa(actualx); + oa.assign(expected); + } + + Mat actual = (Mat) actualx; + + EXPECT_LE(maxAbsDiff(expected, actual), FLT_EPSILON); +} + +TEST(Core_OutputArrayAssign, _Matxd_UMatd) +{ + Mat expected = (Mat_(2,3) << 1, 2, 3, .1, .2, .3); + UMat uexpected = expected.getUMat(ACCESS_READ); + Matx23d actualx; + + { + OutputArray oa(actualx); + oa.assign(uexpected); + } + + Mat actual = (Mat) actualx; + + EXPECT_LE(maxAbsDiff(expected, actual), 0.0); +} + +TEST(Core_OutputArrayAssign, _Matxd_UMatf) +{ + Mat expected = (Mat_(2,3) << 1, 2, 3, .1, .2, .3); + UMat uexpected = expected.getUMat(ACCESS_READ); + Matx23d actualx; + + { + OutputArray oa(actualx); + oa.assign(uexpected); + } + + Mat actual = (Mat) actualx; + + EXPECT_LE(maxAbsDiff(expected, actual), FLT_EPSILON); +} + +TEST(Core_OutputArrayAssign, _Matxf_UMatd) +{ + Mat expected = (Mat_(2,3) << 1, 2, 3, .1, .2, .3); + UMat uexpected = expected.getUMat(ACCESS_READ); + Matx23f actualx; + + { + OutputArray oa(actualx); + oa.assign(uexpected); + } + + Mat actual = (Mat) actualx; + + EXPECT_LE(maxAbsDiff(expected, actual), FLT_EPSILON); +} diff --git a/modules/cuda/CMakeLists.txt b/modules/cuda/CMakeLists.txt deleted file mode 100644 index d668ea8b01..0000000000 --- a/modules/cuda/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -if(IOS OR (NOT HAVE_CUDA AND NOT BUILD_CUDA_STUBS)) - ocv_module_disable(cuda) -endif() - -set(the_description "CUDA-accelerated Computer Vision") - -ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4100 /wd4324 /wd4512 /wd4515 -Wundef -Wmissing-declarations -Wshadow -Wunused-parameter) - -ocv_define_module(cuda opencv_calib3d opencv_cudaarithm opencv_cudawarping OPTIONAL opencv_cudalegacy) diff --git a/modules/cuda/include/opencv2/cuda.hpp b/modules/cuda/include/opencv2/cuda.hpp deleted file mode 100644 index c6004296bd..0000000000 --- a/modules/cuda/include/opencv2/cuda.hpp +++ /dev/null @@ -1,135 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#ifndef __OPENCV_CUDA_HPP__ -#define __OPENCV_CUDA_HPP__ - -#ifndef __cplusplus -# error cuda.hpp header must be compiled as C++ -#endif - -#include "opencv2/core/cuda.hpp" - -/** - @addtogroup cuda - @{ - @defgroup cuda_calib3d Camera Calibration and 3D Reconstruction - @} - */ - -namespace cv { namespace cuda { - -//////////////////////////// Labeling //////////////////////////// - -//! @addtogroup cuda -//! @{ - -//!performs labeling via graph cuts of a 2D regular 4-connected graph. -CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& bottom, GpuMat& labels, - GpuMat& buf, Stream& stream = Stream::Null()); - -//!performs labeling via graph cuts of a 2D regular 8-connected graph. -CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& topLeft, GpuMat& topRight, - GpuMat& bottom, GpuMat& bottomLeft, GpuMat& bottomRight, - GpuMat& labels, - GpuMat& buf, Stream& stream = Stream::Null()); - -//! compute mask for Generalized Flood fill componetns labeling. -CV_EXPORTS void connectivityMask(const GpuMat& image, GpuMat& mask, const cv::Scalar& lo, const cv::Scalar& hi, Stream& stream = Stream::Null()); - -//! performs connected componnents labeling. -CV_EXPORTS void labelComponents(const GpuMat& mask, GpuMat& components, int flags = 0, Stream& stream = Stream::Null()); - -//! @} - -//////////////////////////// Calib3d //////////////////////////// - -//! @addtogroup cuda_calib3d -//! @{ - -CV_EXPORTS void transformPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, - GpuMat& dst, Stream& stream = Stream::Null()); - -CV_EXPORTS void projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, - const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst, - Stream& stream = Stream::Null()); - -/** @brief Finds the object pose from 3D-2D point correspondences. - -@param object Single-row matrix of object points. -@param image Single-row matrix of image points. -@param camera_mat 3x3 matrix of intrinsic camera parameters. -@param dist_coef Distortion coefficients. See undistortPoints for details. -@param rvec Output 3D rotation vector. -@param tvec Output 3D translation vector. -@param use_extrinsic_guess Flag to indicate that the function must use rvec and tvec as an -initial transformation guess. It is not supported for now. -@param num_iters Maximum number of RANSAC iterations. -@param max_dist Euclidean distance threshold to detect whether point is inlier or not. -@param min_inlier_count Flag to indicate that the function must stop if greater or equal number -of inliers is achieved. It is not supported for now. -@param inliers Output vector of inlier indices. - */ -CV_EXPORTS void solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat, - const Mat& dist_coef, Mat& rvec, Mat& tvec, bool use_extrinsic_guess=false, - int num_iters=100, float max_dist=8.0, int min_inlier_count=100, - std::vector* inliers=NULL); - -//! @} - -//////////////////////////// VStab //////////////////////////// - -//! @addtogroup cuda -//! @{ - -//! removes points (CV_32FC2, single row matrix) with zero mask value -CV_EXPORTS void compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask); - -CV_EXPORTS void calcWobbleSuppressionMaps( - int left, int idx, int right, Size size, const Mat &ml, const Mat &mr, - GpuMat &mapx, GpuMat &mapy); - -//! @} - -}} // namespace cv { namespace cuda { - -#endif /* __OPENCV_CUDA_HPP__ */ diff --git a/modules/cuda/src/global_motion.cpp b/modules/cuda/src/global_motion.cpp deleted file mode 100644 index 4f847c9244..0000000000 --- a/modules/cuda/src/global_motion.cpp +++ /dev/null @@ -1,96 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -using namespace cv; -using namespace cv::cuda; - -#if !defined HAVE_CUDA || defined(CUDA_DISABLER) - -void cv::cuda::compactPoints(GpuMat&, GpuMat&, const GpuMat&) { throw_no_cuda(); } -void cv::cuda::calcWobbleSuppressionMaps( - int, int, int, Size, const Mat&, const Mat&, GpuMat&, GpuMat&) { throw_no_cuda(); } - -#else - -namespace cv { namespace cuda { namespace device { namespace globmotion { - - int compactPoints(int N, float *points0, float *points1, const uchar *mask); - - void calcWobbleSuppressionMaps( - int left, int idx, int right, int width, int height, - const float *ml, const float *mr, PtrStepSzf mapx, PtrStepSzf mapy); - -}}}} - -void cv::cuda::compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask) -{ - CV_Assert(points0.rows == 1 && points1.rows == 1 && mask.rows == 1); - CV_Assert(points0.type() == CV_32FC2 && points1.type() == CV_32FC2 && mask.type() == CV_8U); - CV_Assert(points0.cols == mask.cols && points1.cols == mask.cols); - - int npoints = points0.cols; - int remaining = cv::cuda::device::globmotion::compactPoints( - npoints, (float*)points0.data, (float*)points1.data, mask.data); - - points0 = points0.colRange(0, remaining); - points1 = points1.colRange(0, remaining); -} - - -void cv::cuda::calcWobbleSuppressionMaps( - int left, int idx, int right, Size size, const Mat &ml, const Mat &mr, - GpuMat &mapx, GpuMat &mapy) -{ - CV_Assert(ml.size() == Size(3, 3) && ml.type() == CV_32F && ml.isContinuous()); - CV_Assert(mr.size() == Size(3, 3) && mr.type() == CV_32F && mr.isContinuous()); - - mapx.create(size, CV_32F); - mapy.create(size, CV_32F); - - cv::cuda::device::globmotion::calcWobbleSuppressionMaps( - left, idx, right, size.width, size.height, - ml.ptr(), mr.ptr(), mapx, mapy); -} - -#endif diff --git a/modules/cuda/src/precomp.hpp b/modules/cuda/src/precomp.hpp deleted file mode 100644 index 7feeadddc1..0000000000 --- a/modules/cuda/src/precomp.hpp +++ /dev/null @@ -1,60 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#ifndef __OPENCV_PRECOMP_H__ -#define __OPENCV_PRECOMP_H__ - -#include "opencv2/cuda.hpp" -#include "opencv2/cudaarithm.hpp" -#include "opencv2/cudawarping.hpp" -#include "opencv2/calib3d.hpp" - -#include "opencv2/core/private.cuda.hpp" -#include "opencv2/core/utility.hpp" - -#include "opencv2/opencv_modules.hpp" - -#ifdef HAVE_OPENCV_CUDALEGACY -# include "opencv2/cudalegacy/private.hpp" -#endif - -#endif /* __OPENCV_PRECOMP_H__ */ diff --git a/modules/cuda/test/test_global_motion.cpp b/modules/cuda/test/test_global_motion.cpp deleted file mode 100644 index 633fe647c3..0000000000 --- a/modules/cuda/test/test_global_motion.cpp +++ /dev/null @@ -1,90 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "test_precomp.hpp" - -#ifdef HAVE_CUDA - -using namespace std; -using namespace cv; - -struct CompactPoints : testing::TestWithParam -{ - virtual void SetUp() { cuda::setDevice(GetParam().deviceID()); } -}; - -CUDA_TEST_P(CompactPoints, CanCompactizeSmallInput) -{ - Mat src0(1, 3, CV_32FC2); - src0.at(0,0) = Point2f(0,0); - src0.at(0,1) = Point2f(0,1); - src0.at(0,2) = Point2f(0,2); - - Mat src1(1, 3, CV_32FC2); - src1.at(0,0) = Point2f(1,0); - src1.at(0,1) = Point2f(1,1); - src1.at(0,2) = Point2f(1,2); - - Mat mask(1, 3, CV_8U); - mask.at(0,0) = 1; - mask.at(0,1) = 0; - mask.at(0,2) = 1; - - cuda::GpuMat dsrc0(src0), dsrc1(src1), dmask(mask); - cuda::compactPoints(dsrc0, dsrc1, dmask); - - dsrc0.download(src0); - dsrc1.download(src1); - - ASSERT_EQ(2, src0.cols); - ASSERT_EQ(2, src1.cols); - - ASSERT_TRUE(src0.at(0,0) == Point2f(0,0)); - ASSERT_TRUE(src0.at(0,1) == Point2f(0,2)); - - ASSERT_TRUE(src1.at(0,0) == Point2f(1,0)); - ASSERT_TRUE(src1.at(0,1) == Point2f(1,2)); -} - -INSTANTIATE_TEST_CASE_P(CUDA_GlobalMotion, CompactPoints, ALL_DEVICES); - -#endif // HAVE_CUDA diff --git a/modules/cuda/test/test_main.cpp b/modules/cuda/test/test_main.cpp deleted file mode 100644 index 04f4fcf6e6..0000000000 --- a/modules/cuda/test/test_main.cpp +++ /dev/null @@ -1,45 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "test_precomp.hpp" - -CV_CUDA_TEST_MAIN("gpu") diff --git a/modules/cuda/test/test_precomp.hpp b/modules/cuda/test/test_precomp.hpp deleted file mode 100644 index e3b33017a7..0000000000 --- a/modules/cuda/test/test_precomp.hpp +++ /dev/null @@ -1,66 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#ifdef __GNUC__ -# pragma GCC diagnostic ignored "-Wmissing-declarations" -# if defined __clang__ || defined __APPLE__ -# pragma GCC diagnostic ignored "-Wmissing-prototypes" -# pragma GCC diagnostic ignored "-Wextra" -# endif -#endif - -#ifndef __OPENCV_TEST_PRECOMP_HPP__ -#define __OPENCV_TEST_PRECOMP_HPP__ - -#include - -#include "opencv2/ts.hpp" -#include "opencv2/ts/cuda_test.hpp" - -#include "opencv2/cuda.hpp" -#include "opencv2/core.hpp" -#include "opencv2/core/opengl.hpp" -#include "opencv2/calib3d.hpp" - -#include "cvconfig.h" - -#endif diff --git a/modules/cudabgsegm/CMakeLists.txt b/modules/cudabgsegm/CMakeLists.txt index 4c3d3f1dbe..c60fdd0769 100644 --- a/modules/cudabgsegm/CMakeLists.txt +++ b/modules/cudabgsegm/CMakeLists.txt @@ -6,4 +6,4 @@ set(the_description "CUDA-accelerated Background Segmentation") ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wmissing-declarations -Wshadow) -ocv_define_module(cudabgsegm opencv_video OPTIONAL opencv_imgproc opencv_cudaarithm opencv_cudafilters opencv_cudaimgproc) +ocv_define_module(cudabgsegm opencv_video) diff --git a/modules/cudabgsegm/include/opencv2/cudabgsegm.hpp b/modules/cudabgsegm/include/opencv2/cudabgsegm.hpp index 4b5e305d6a..32ea7c17e0 100644 --- a/modules/cudabgsegm/include/opencv2/cudabgsegm.hpp +++ b/modules/cudabgsegm/include/opencv2/cudabgsegm.hpp @@ -147,115 +147,6 @@ CV_EXPORTS Ptr createBackgroundSubtractorMOG2(int history = 500, double varThreshold = 16, bool detectShadows = true); -//////////////////////////////////////////////////// -// GMG - -/** @brief Background/Foreground Segmentation Algorithm. - -The class discriminates between foreground and background pixels by building and maintaining a model -of the background. Any pixel which does not fit this model is then deemed to be foreground. The -class implements algorithm described in @cite Gold2012 . - */ -class CV_EXPORTS BackgroundSubtractorGMG : public cv::BackgroundSubtractor -{ -public: - using cv::BackgroundSubtractor::apply; - virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0; - - virtual int getMaxFeatures() const = 0; - virtual void setMaxFeatures(int maxFeatures) = 0; - - virtual double getDefaultLearningRate() const = 0; - virtual void setDefaultLearningRate(double lr) = 0; - - virtual int getNumFrames() const = 0; - virtual void setNumFrames(int nframes) = 0; - - virtual int getQuantizationLevels() const = 0; - virtual void setQuantizationLevels(int nlevels) = 0; - - virtual double getBackgroundPrior() const = 0; - virtual void setBackgroundPrior(double bgprior) = 0; - - virtual int getSmoothingRadius() const = 0; - virtual void setSmoothingRadius(int radius) = 0; - - virtual double getDecisionThreshold() const = 0; - virtual void setDecisionThreshold(double thresh) = 0; - - virtual bool getUpdateBackgroundModel() const = 0; - virtual void setUpdateBackgroundModel(bool update) = 0; - - virtual double getMinVal() const = 0; - virtual void setMinVal(double val) = 0; - - virtual double getMaxVal() const = 0; - virtual void setMaxVal(double val) = 0; -}; - -/** @brief Creates GMG Background Subtractor - -@param initializationFrames Number of frames of video to use to initialize histograms. -@param decisionThreshold Value above which pixel is determined to be FG. - */ -CV_EXPORTS Ptr - createBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8); - -//////////////////////////////////////////////////// -// FGD - -/** @brief The class discriminates between foreground and background pixels by building and maintaining a model -of the background. - -Any pixel which does not fit this model is then deemed to be foreground. The class implements -algorithm described in @cite FGD2003 . -@sa BackgroundSubtractor - */ -class CV_EXPORTS BackgroundSubtractorFGD : public cv::BackgroundSubtractor -{ -public: - /** @brief Returns the output foreground regions calculated by findContours. - - @param foreground_regions Output array (CPU memory). - */ - virtual void getForegroundRegions(OutputArrayOfArrays foreground_regions) = 0; -}; - -struct CV_EXPORTS FGDParams -{ - int Lc; //!< Quantized levels per 'color' component. Power of two, typically 32, 64 or 128. - int N1c; //!< Number of color vectors used to model normal background color variation at a given pixel. - int N2c; //!< Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c. - //!< Used to allow the first N1c vectors to adapt over time to changing background. - - int Lcc; //!< Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64. - int N1cc; //!< Number of color co-occurrence vectors used to model normal background color variation at a given pixel. - int N2cc; //!< Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc. - //!< Used to allow the first N1cc vectors to adapt over time to changing background. - - bool is_obj_without_holes; //!< If TRUE we ignore holes within foreground blobs. Defaults to TRUE. - int perform_morphing; //!< Number of erode-dilate-erode foreground-blob cleanup iterations. - //!< These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1. - - float alpha1; //!< How quickly we forget old background pixel values seen. Typically set to 0.1. - float alpha2; //!< "Controls speed of feature learning". Depends on T. Typical value circa 0.005. - float alpha3; //!< Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1. - - float delta; //!< Affects color and color co-occurrence quantization, typically set to 2. - float T; //!< A percentage value which determines when new features can be recognized as new background. (Typically 0.9). - float minArea; //!< Discard foreground blobs whose bounding box is smaller than this threshold. - - //! default Params - FGDParams(); -}; - -/** @brief Creates FGD Background Subtractor - -@param params Algorithm's parameters. See @cite FGD2003 for explanation. - */ -CV_EXPORTS Ptr - createBackgroundSubtractorFGD(const FGDParams& params = FGDParams()); - //! @} }} // namespace cv { namespace cuda { diff --git a/modules/cudabgsegm/perf/perf_bgsegm.cpp b/modules/cudabgsegm/perf/perf_bgsegm.cpp index c2491f4333..48bda4a33b 100644 --- a/modules/cudabgsegm/perf/perf_bgsegm.cpp +++ b/modules/cudabgsegm/perf/perf_bgsegm.cpp @@ -42,10 +42,6 @@ #include "perf_precomp.hpp" -#ifdef HAVE_OPENCV_CUDAIMGPROC -# include "opencv2/cudaimgproc.hpp" -#endif - using namespace std; using namespace testing; using namespace perf; @@ -63,83 +59,6 @@ using namespace perf; # define BUILD_WITH_VIDEO_INPUT_SUPPORT 0 #endif -////////////////////////////////////////////////////// -// FGDStatModel - -#if BUILD_WITH_VIDEO_INPUT_SUPPORT - -DEF_PARAM_TEST_1(Video, string); - -PERF_TEST_P(Video, FGDStatModel, - Values(string("gpu/video/768x576.avi"))) -{ - const int numIters = 10; - - declare.time(60); - - const string inputFile = perf::TestBase::getDataPath(GetParam()); - - cv::VideoCapture cap(inputFile); - ASSERT_TRUE(cap.isOpened()); - - cv::Mat frame; - cap >> frame; - ASSERT_FALSE(frame.empty()); - - if (PERF_RUN_CUDA()) - { - cv::cuda::GpuMat d_frame(frame), foreground; - - cv::Ptr d_fgd = cv::cuda::createBackgroundSubtractorFGD(); - d_fgd->apply(d_frame, foreground); - - int i = 0; - - // collect performance data - for (; i < numIters; ++i) - { - cap >> frame; - ASSERT_FALSE(frame.empty()); - - d_frame.upload(frame); - - startTimer(); - if(!next()) - break; - - d_fgd->apply(d_frame, foreground); - - stopTimer(); - } - - // process last frame in sequence to get data for sanity test - for (; i < numIters; ++i) - { - cap >> frame; - ASSERT_FALSE(frame.empty()); - - d_frame.upload(frame); - - d_fgd->apply(d_frame, foreground); - } - - CUDA_SANITY_CHECK(foreground, 1e-2, ERROR_RELATIVE); - -#ifdef HAVE_OPENCV_CUDAIMGPROC - cv::cuda::GpuMat background3, background; - d_fgd->getBackgroundImage(background3); - cv::cuda::cvtColor(background3, background, cv::COLOR_BGR2BGRA); - CUDA_SANITY_CHECK(background, 1e-2, ERROR_RELATIVE); -#endif - } - else - { - FAIL_NO_CPU(); - } -} - -#endif - ////////////////////////////////////////////////////// // MOG @@ -484,118 +403,3 @@ PERF_TEST_P(Video_Cn, MOG2GetBackgroundImage, } #endif - -////////////////////////////////////////////////////// -// GMG - -#if BUILD_WITH_VIDEO_INPUT_SUPPORT - -DEF_PARAM_TEST(Video_Cn_MaxFeatures, string, MatCn, int); - -PERF_TEST_P(Video_Cn_MaxFeatures, GMG, - Combine(Values(string("gpu/video/768x576.avi")), - CUDA_CHANNELS_1_3_4, - Values(20, 40, 60))) -{ - const int numIters = 150; - - const std::string inputFile = perf::TestBase::getDataPath(GET_PARAM(0)); - const int cn = GET_PARAM(1); - const int maxFeatures = GET_PARAM(2); - - cv::VideoCapture cap(inputFile); - ASSERT_TRUE(cap.isOpened()); - - cv::Mat frame; - cap >> frame; - ASSERT_FALSE(frame.empty()); - - if (cn != 3) - { - cv::Mat temp; - if (cn == 1) - cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); - else - cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); - cv::swap(temp, frame); - } - - if (PERF_RUN_CUDA()) - { - cv::cuda::GpuMat d_frame(frame); - cv::cuda::GpuMat foreground; - - cv::Ptr d_gmg = cv::cuda::createBackgroundSubtractorGMG(); - d_gmg->setMaxFeatures(maxFeatures); - - d_gmg->apply(d_frame, foreground); - - int i = 0; - - // collect performance data - for (; i < numIters; ++i) - { - cap >> frame; - if (frame.empty()) - { - cap.release(); - cap.open(inputFile); - cap >> frame; - } - - if (cn != 3) - { - cv::Mat temp; - if (cn == 1) - cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); - else - cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); - cv::swap(temp, frame); - } - - d_frame.upload(frame); - - startTimer(); - if(!next()) - break; - - d_gmg->apply(d_frame, foreground); - - stopTimer(); - } - - // process last frame in sequence to get data for sanity test - for (; i < numIters; ++i) - { - cap >> frame; - if (frame.empty()) - { - cap.release(); - cap.open(inputFile); - cap >> frame; - } - - if (cn != 3) - { - cv::Mat temp; - if (cn == 1) - cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); - else - cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); - cv::swap(temp, frame); - } - - d_frame.upload(frame); - - d_gmg->apply(d_frame, foreground); - } - - CUDA_SANITY_CHECK(foreground); - } - else - { - FAIL_NO_CPU(); - } -} - -#endif diff --git a/modules/cudabgsegm/src/precomp.hpp b/modules/cudabgsegm/src/precomp.hpp index 4b1ae8d96f..e8d627e675 100644 --- a/modules/cudabgsegm/src/precomp.hpp +++ b/modules/cudabgsegm/src/precomp.hpp @@ -51,16 +51,4 @@ #include "opencv2/opencv_modules.hpp" -#ifdef HAVE_OPENCV_CUDAARITHM -# include "opencv2/cudaarithm.hpp" -#endif - -#ifdef HAVE_OPENCV_CUDAFILTERS -# include "opencv2/cudafilters.hpp" -#endif - -#ifdef HAVE_OPENCV_CUDAIMGPROC -# include "opencv2/cudaimgproc.hpp" -#endif - #endif /* __OPENCV_PRECOMP_H__ */ diff --git a/modules/cudalegacy/CMakeLists.txt b/modules/cudalegacy/CMakeLists.txt index 8947cd6fdc..7fe342e11c 100644 --- a/modules/cudalegacy/CMakeLists.txt +++ b/modules/cudalegacy/CMakeLists.txt @@ -6,4 +6,5 @@ set(the_description "CUDA-accelerated Computer Vision (legacy)") ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4130 /wd4324 /wd4512 /wd4310 -Wundef -Wmissing-declarations -Wuninitialized -Wshadow) -ocv_define_module(cudalegacy opencv_core OPTIONAL opencv_objdetect) +ocv_define_module(cudalegacy opencv_core opencv_video + OPTIONAL opencv_objdetect opencv_imgproc opencv_calib3d opencv_cudaarithm opencv_cudafilters opencv_cudaimgproc) diff --git a/modules/cudalegacy/include/opencv2/cudalegacy.hpp b/modules/cudalegacy/include/opencv2/cudalegacy.hpp index 5e57733857..c27a1161f5 100644 --- a/modules/cudalegacy/include/opencv2/cudalegacy.hpp +++ b/modules/cudalegacy/include/opencv2/cudalegacy.hpp @@ -49,6 +49,7 @@ #include "opencv2/cudalegacy/NCVPyramid.hpp" #include "opencv2/cudalegacy/NCVHaarObjectDetection.hpp" #include "opencv2/cudalegacy/NCVBroxOpticalFlow.hpp" +#include "opencv2/video/background_segm.hpp" /** @addtogroup cuda @@ -59,6 +60,13 @@ namespace cv { namespace cuda { +//! @addtogroup cudalegacy +//! @{ + +// +// ImagePyramid +// + class CV_EXPORTS ImagePyramid : public Algorithm { public: @@ -67,6 +75,216 @@ public: CV_EXPORTS Ptr createImagePyramid(InputArray img, int nLayers = -1, Stream& stream = Stream::Null()); +// +// GMG +// + +/** @brief Background/Foreground Segmentation Algorithm. + +The class discriminates between foreground and background pixels by building and maintaining a model +of the background. Any pixel which does not fit this model is then deemed to be foreground. The +class implements algorithm described in @cite Gold2012 . + */ +class CV_EXPORTS BackgroundSubtractorGMG : public cv::BackgroundSubtractor +{ +public: + using cv::BackgroundSubtractor::apply; + virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0; + + virtual int getMaxFeatures() const = 0; + virtual void setMaxFeatures(int maxFeatures) = 0; + + virtual double getDefaultLearningRate() const = 0; + virtual void setDefaultLearningRate(double lr) = 0; + + virtual int getNumFrames() const = 0; + virtual void setNumFrames(int nframes) = 0; + + virtual int getQuantizationLevels() const = 0; + virtual void setQuantizationLevels(int nlevels) = 0; + + virtual double getBackgroundPrior() const = 0; + virtual void setBackgroundPrior(double bgprior) = 0; + + virtual int getSmoothingRadius() const = 0; + virtual void setSmoothingRadius(int radius) = 0; + + virtual double getDecisionThreshold() const = 0; + virtual void setDecisionThreshold(double thresh) = 0; + + virtual bool getUpdateBackgroundModel() const = 0; + virtual void setUpdateBackgroundModel(bool update) = 0; + + virtual double getMinVal() const = 0; + virtual void setMinVal(double val) = 0; + + virtual double getMaxVal() const = 0; + virtual void setMaxVal(double val) = 0; +}; + +/** @brief Creates GMG Background Subtractor + +@param initializationFrames Number of frames of video to use to initialize histograms. +@param decisionThreshold Value above which pixel is determined to be FG. + */ +CV_EXPORTS Ptr + createBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8); + +// +// FGD +// + +/** @brief The class discriminates between foreground and background pixels by building and maintaining a model +of the background. + +Any pixel which does not fit this model is then deemed to be foreground. The class implements +algorithm described in @cite FGD2003 . +@sa BackgroundSubtractor + */ +class CV_EXPORTS BackgroundSubtractorFGD : public cv::BackgroundSubtractor +{ +public: + /** @brief Returns the output foreground regions calculated by findContours. + + @param foreground_regions Output array (CPU memory). + */ + virtual void getForegroundRegions(OutputArrayOfArrays foreground_regions) = 0; +}; + +struct CV_EXPORTS FGDParams +{ + int Lc; //!< Quantized levels per 'color' component. Power of two, typically 32, 64 or 128. + int N1c; //!< Number of color vectors used to model normal background color variation at a given pixel. + int N2c; //!< Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c. + //!< Used to allow the first N1c vectors to adapt over time to changing background. + + int Lcc; //!< Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64. + int N1cc; //!< Number of color co-occurrence vectors used to model normal background color variation at a given pixel. + int N2cc; //!< Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc. + //!< Used to allow the first N1cc vectors to adapt over time to changing background. + + bool is_obj_without_holes; //!< If TRUE we ignore holes within foreground blobs. Defaults to TRUE. + int perform_morphing; //!< Number of erode-dilate-erode foreground-blob cleanup iterations. + //!< These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1. + + float alpha1; //!< How quickly we forget old background pixel values seen. Typically set to 0.1. + float alpha2; //!< "Controls speed of feature learning". Depends on T. Typical value circa 0.005. + float alpha3; //!< Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1. + + float delta; //!< Affects color and color co-occurrence quantization, typically set to 2. + float T; //!< A percentage value which determines when new features can be recognized as new background. (Typically 0.9). + float minArea; //!< Discard foreground blobs whose bounding box is smaller than this threshold. + + //! default Params + FGDParams(); +}; + +/** @brief Creates FGD Background Subtractor + +@param params Algorithm's parameters. See @cite FGD2003 for explanation. + */ +CV_EXPORTS Ptr + createBackgroundSubtractorFGD(const FGDParams& params = FGDParams()); + +// +// Optical flow +// + +//! Calculates optical flow for 2 images using block matching algorithm */ +CV_EXPORTS void calcOpticalFlowBM(const GpuMat& prev, const GpuMat& curr, + Size block_size, Size shift_size, Size max_range, bool use_previous, + GpuMat& velx, GpuMat& vely, GpuMat& buf, + Stream& stream = Stream::Null()); + +class CV_EXPORTS FastOpticalFlowBM +{ +public: + void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy, int search_window = 21, int block_window = 7, Stream& s = Stream::Null()); + +private: + GpuMat buffer; + GpuMat extended_I0; + GpuMat extended_I1; +}; + +/** @brief Interpolates frames (images) using provided optical flow (displacement field). + +@param frame0 First frame (32-bit floating point images, single channel). +@param frame1 Second frame. Must have the same type and size as frame0 . +@param fu Forward horizontal displacement. +@param fv Forward vertical displacement. +@param bu Backward horizontal displacement. +@param bv Backward vertical displacement. +@param pos New frame position. +@param newFrame Output image. +@param buf Temporary buffer, will have width x 6\*height size, CV_32FC1 type and contain 6 +GpuMat: occlusion masks for first frame, occlusion masks for second, interpolated forward +horizontal flow, interpolated forward vertical flow, interpolated backward horizontal flow, +interpolated backward vertical flow. +@param stream Stream for the asynchronous version. + */ +CV_EXPORTS void interpolateFrames(const GpuMat& frame0, const GpuMat& frame1, + const GpuMat& fu, const GpuMat& fv, + const GpuMat& bu, const GpuMat& bv, + float pos, GpuMat& newFrame, GpuMat& buf, + Stream& stream = Stream::Null()); + +CV_EXPORTS void createOpticalFlowNeedleMap(const GpuMat& u, const GpuMat& v, GpuMat& vertex, GpuMat& colors); + +// +// Labeling +// + +//!performs labeling via graph cuts of a 2D regular 4-connected graph. +CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& bottom, GpuMat& labels, + GpuMat& buf, Stream& stream = Stream::Null()); + +//!performs labeling via graph cuts of a 2D regular 8-connected graph. +CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& topLeft, GpuMat& topRight, + GpuMat& bottom, GpuMat& bottomLeft, GpuMat& bottomRight, + GpuMat& labels, + GpuMat& buf, Stream& stream = Stream::Null()); + +//! compute mask for Generalized Flood fill componetns labeling. +CV_EXPORTS void connectivityMask(const GpuMat& image, GpuMat& mask, const cv::Scalar& lo, const cv::Scalar& hi, Stream& stream = Stream::Null()); + +//! performs connected componnents labeling. +CV_EXPORTS void labelComponents(const GpuMat& mask, GpuMat& components, int flags = 0, Stream& stream = Stream::Null()); + +// +// Calib3d +// + +CV_EXPORTS void transformPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, + GpuMat& dst, Stream& stream = Stream::Null()); + +CV_EXPORTS void projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, + const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst, + Stream& stream = Stream::Null()); + +/** @brief Finds the object pose from 3D-2D point correspondences. + +@param object Single-row matrix of object points. +@param image Single-row matrix of image points. +@param camera_mat 3x3 matrix of intrinsic camera parameters. +@param dist_coef Distortion coefficients. See undistortPoints for details. +@param rvec Output 3D rotation vector. +@param tvec Output 3D translation vector. +@param use_extrinsic_guess Flag to indicate that the function must use rvec and tvec as an +initial transformation guess. It is not supported for now. +@param num_iters Maximum number of RANSAC iterations. +@param max_dist Euclidean distance threshold to detect whether point is inlier or not. +@param min_inlier_count Flag to indicate that the function must stop if greater or equal number +of inliers is achieved. It is not supported for now. +@param inliers Output vector of inlier indices. + */ +CV_EXPORTS void solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat, + const Mat& dist_coef, Mat& rvec, Mat& tvec, bool use_extrinsic_guess=false, + int num_iters=100, float max_dist=8.0, int min_inlier_count=100, + std::vector* inliers=NULL); + +//! @} + }} #endif /* __OPENCV_CUDALEGACY_HPP__ */ diff --git a/modules/cudalegacy/perf/perf_bgsegm.cpp b/modules/cudalegacy/perf/perf_bgsegm.cpp new file mode 100644 index 0000000000..4367910497 --- /dev/null +++ b/modules/cudalegacy/perf/perf_bgsegm.cpp @@ -0,0 +1,249 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "perf_precomp.hpp" + +#ifdef HAVE_OPENCV_CUDAIMGPROC +# include "opencv2/cudaimgproc.hpp" +#endif + +using namespace std; +using namespace testing; +using namespace perf; + +#if defined(HAVE_XINE) || \ + defined(HAVE_GSTREAMER) || \ + defined(HAVE_QUICKTIME) || \ + defined(HAVE_QTKIT) || \ + defined(HAVE_AVFOUNDATION) || \ + defined(HAVE_FFMPEG) || \ + defined(WIN32) /* assume that we have ffmpeg */ + +# define BUILD_WITH_VIDEO_INPUT_SUPPORT 1 +#else +# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0 +#endif + +////////////////////////////////////////////////////// +// FGDStatModel + +#if BUILD_WITH_VIDEO_INPUT_SUPPORT + +DEF_PARAM_TEST_1(Video, string); + +PERF_TEST_P(Video, FGDStatModel, + Values(string("gpu/video/768x576.avi"))) +{ + const int numIters = 10; + + declare.time(60); + + const string inputFile = perf::TestBase::getDataPath(GetParam()); + + cv::VideoCapture cap(inputFile); + ASSERT_TRUE(cap.isOpened()); + + cv::Mat frame; + cap >> frame; + ASSERT_FALSE(frame.empty()); + + if (PERF_RUN_CUDA()) + { + cv::cuda::GpuMat d_frame(frame), foreground; + + cv::Ptr d_fgd = cv::cuda::createBackgroundSubtractorFGD(); + d_fgd->apply(d_frame, foreground); + + int i = 0; + + // collect performance data + for (; i < numIters; ++i) + { + cap >> frame; + ASSERT_FALSE(frame.empty()); + + d_frame.upload(frame); + + startTimer(); + if(!next()) + break; + + d_fgd->apply(d_frame, foreground); + + stopTimer(); + } + + // process last frame in sequence to get data for sanity test + for (; i < numIters; ++i) + { + cap >> frame; + ASSERT_FALSE(frame.empty()); + + d_frame.upload(frame); + + d_fgd->apply(d_frame, foreground); + } + } + else + { + FAIL_NO_CPU(); + } + + SANITY_CHECK_NOTHING(); +} + +#endif + +////////////////////////////////////////////////////// +// GMG + +#if BUILD_WITH_VIDEO_INPUT_SUPPORT + +DEF_PARAM_TEST(Video_Cn_MaxFeatures, string, MatCn, int); + +PERF_TEST_P(Video_Cn_MaxFeatures, GMG, + Combine(Values(string("gpu/video/768x576.avi")), + CUDA_CHANNELS_1_3_4, + Values(20, 40, 60))) +{ + const int numIters = 150; + + const std::string inputFile = perf::TestBase::getDataPath(GET_PARAM(0)); + const int cn = GET_PARAM(1); + const int maxFeatures = GET_PARAM(2); + + cv::VideoCapture cap(inputFile); + ASSERT_TRUE(cap.isOpened()); + + cv::Mat frame; + cap >> frame; + ASSERT_FALSE(frame.empty()); + + if (cn != 3) + { + cv::Mat temp; + if (cn == 1) + cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); + else + cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); + cv::swap(temp, frame); + } + + if (PERF_RUN_CUDA()) + { + cv::cuda::GpuMat d_frame(frame); + cv::cuda::GpuMat foreground; + + cv::Ptr d_gmg = cv::cuda::createBackgroundSubtractorGMG(); + d_gmg->setMaxFeatures(maxFeatures); + + d_gmg->apply(d_frame, foreground); + + int i = 0; + + // collect performance data + for (; i < numIters; ++i) + { + cap >> frame; + if (frame.empty()) + { + cap.release(); + cap.open(inputFile); + cap >> frame; + } + + if (cn != 3) + { + cv::Mat temp; + if (cn == 1) + cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); + else + cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); + cv::swap(temp, frame); + } + + d_frame.upload(frame); + + startTimer(); + if(!next()) + break; + + d_gmg->apply(d_frame, foreground); + + stopTimer(); + } + + // process last frame in sequence to get data for sanity test + for (; i < numIters; ++i) + { + cap >> frame; + if (frame.empty()) + { + cap.release(); + cap.open(inputFile); + cap >> frame; + } + + if (cn != 3) + { + cv::Mat temp; + if (cn == 1) + cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); + else + cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); + cv::swap(temp, frame); + } + + d_frame.upload(frame); + + d_gmg->apply(d_frame, foreground); + } + } + else + { + FAIL_NO_CPU(); + } + + SANITY_CHECK_NOTHING(); +} + +#endif diff --git a/modules/cuda/perf/perf_calib3d.cpp b/modules/cudalegacy/perf/perf_calib3d.cpp similarity index 98% rename from modules/cuda/perf/perf_calib3d.cpp rename to modules/cudalegacy/perf/perf_calib3d.cpp index 4148b5191a..0e708ce4f0 100644 --- a/modules/cuda/perf/perf_calib3d.cpp +++ b/modules/cudalegacy/perf/perf_calib3d.cpp @@ -42,6 +42,10 @@ #include "perf_precomp.hpp" +#ifdef HAVE_OPENCV_CALIB3D + +#include "opencv2/calib3d.hpp" + using namespace std; using namespace testing; using namespace perf; @@ -133,3 +137,5 @@ PERF_TEST_P(Count, Calib3D_SolvePnPRansac, CPU_SANITY_CHECK(tvec, 1e-6); } } + +#endif diff --git a/modules/cuda/perf/perf_labeling.cpp b/modules/cudalegacy/perf/perf_labeling.cpp similarity index 100% rename from modules/cuda/perf/perf_labeling.cpp rename to modules/cudalegacy/perf/perf_labeling.cpp diff --git a/modules/cuda/perf/perf_main.cpp b/modules/cudalegacy/perf/perf_main.cpp similarity index 98% rename from modules/cuda/perf/perf_main.cpp rename to modules/cudalegacy/perf/perf_main.cpp index f01a2768d5..0830707460 100644 --- a/modules/cuda/perf/perf_main.cpp +++ b/modules/cudalegacy/perf/perf_main.cpp @@ -44,4 +44,4 @@ using namespace perf; -CV_PERF_TEST_CUDA_MAIN(cuda) +CV_PERF_TEST_CUDA_MAIN(cudalegacy) diff --git a/modules/cuda/perf/perf_precomp.hpp b/modules/cudalegacy/perf/perf_precomp.hpp similarity index 96% rename from modules/cuda/perf/perf_precomp.hpp rename to modules/cudalegacy/perf/perf_precomp.hpp index f810968cb7..847765018d 100644 --- a/modules/cuda/perf/perf_precomp.hpp +++ b/modules/cudalegacy/perf/perf_precomp.hpp @@ -54,8 +54,10 @@ #include "opencv2/ts.hpp" #include "opencv2/ts/cuda_perf.hpp" -#include "opencv2/cuda.hpp" -#include "opencv2/calib3d.hpp" +#include "opencv2/cudalegacy.hpp" +#include "opencv2/video.hpp" + +#include "opencv2/opencv_modules.hpp" #ifdef GTEST_CREATE_SHARED_LIBRARY #error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined diff --git a/modules/cudaoptflow/src/bm.cpp b/modules/cudalegacy/src/bm.cpp similarity index 100% rename from modules/cudaoptflow/src/bm.cpp rename to modules/cudalegacy/src/bm.cpp diff --git a/modules/cudaoptflow/src/bm_fast.cpp b/modules/cudalegacy/src/bm_fast.cpp similarity index 100% rename from modules/cudaoptflow/src/bm_fast.cpp rename to modules/cudalegacy/src/bm_fast.cpp diff --git a/modules/cuda/src/calib3d.cpp b/modules/cudalegacy/src/calib3d.cpp similarity index 99% rename from modules/cuda/src/calib3d.cpp rename to modules/cudalegacy/src/calib3d.cpp index eaadb9343d..7d8c816fa0 100644 --- a/modules/cuda/src/calib3d.cpp +++ b/modules/cudalegacy/src/calib3d.cpp @@ -45,7 +45,7 @@ using namespace cv; using namespace cv::cuda; -#if !defined HAVE_CUDA || defined(CUDA_DISABLER) +#if !defined HAVE_CUDA || !defined HAVE_OPENCV_CALIB3D || defined(CUDA_DISABLER) void cv::cuda::transformPoints(const GpuMat&, const Mat&, const Mat&, GpuMat&, Stream&) { throw_no_cuda(); } diff --git a/modules/cudaoptflow/src/cuda/bm.cu b/modules/cudalegacy/src/cuda/bm.cu similarity index 100% rename from modules/cudaoptflow/src/cuda/bm.cu rename to modules/cudalegacy/src/cuda/bm.cu diff --git a/modules/cudaoptflow/src/cuda/bm_fast.cu b/modules/cudalegacy/src/cuda/bm_fast.cu similarity index 100% rename from modules/cudaoptflow/src/cuda/bm_fast.cu rename to modules/cudalegacy/src/cuda/bm_fast.cu diff --git a/modules/cuda/src/cuda/calib3d.cu b/modules/cudalegacy/src/cuda/calib3d.cu similarity index 100% rename from modules/cuda/src/cuda/calib3d.cu rename to modules/cudalegacy/src/cuda/calib3d.cu diff --git a/modules/cuda/src/cuda/ccomponetns.cu b/modules/cudalegacy/src/cuda/ccomponetns.cu similarity index 100% rename from modules/cuda/src/cuda/ccomponetns.cu rename to modules/cudalegacy/src/cuda/ccomponetns.cu diff --git a/modules/cudabgsegm/src/cuda/fgd.cu b/modules/cudalegacy/src/cuda/fgd.cu similarity index 100% rename from modules/cudabgsegm/src/cuda/fgd.cu rename to modules/cudalegacy/src/cuda/fgd.cu diff --git a/modules/cudabgsegm/src/cuda/fgd.hpp b/modules/cudalegacy/src/cuda/fgd.hpp similarity index 100% rename from modules/cudabgsegm/src/cuda/fgd.hpp rename to modules/cudalegacy/src/cuda/fgd.hpp diff --git a/modules/cudabgsegm/src/cuda/gmg.cu b/modules/cudalegacy/src/cuda/gmg.cu similarity index 100% rename from modules/cudabgsegm/src/cuda/gmg.cu rename to modules/cudalegacy/src/cuda/gmg.cu diff --git a/modules/cudaoptflow/src/cuda/needle_map.cu b/modules/cudalegacy/src/cuda/needle_map.cu similarity index 100% rename from modules/cudaoptflow/src/cuda/needle_map.cu rename to modules/cudalegacy/src/cuda/needle_map.cu diff --git a/modules/cudabgsegm/src/fgd.cpp b/modules/cudalegacy/src/fgd.cpp similarity index 100% rename from modules/cudabgsegm/src/fgd.cpp rename to modules/cudalegacy/src/fgd.cpp diff --git a/modules/cudabgsegm/src/gmg.cpp b/modules/cudalegacy/src/gmg.cpp similarity index 100% rename from modules/cudabgsegm/src/gmg.cpp rename to modules/cudalegacy/src/gmg.cpp diff --git a/modules/cuda/src/graphcuts.cpp b/modules/cudalegacy/src/graphcuts.cpp similarity index 100% rename from modules/cuda/src/graphcuts.cpp rename to modules/cudalegacy/src/graphcuts.cpp diff --git a/modules/cudaoptflow/src/interpolate_frames.cpp b/modules/cudalegacy/src/interpolate_frames.cpp similarity index 100% rename from modules/cudaoptflow/src/interpolate_frames.cpp rename to modules/cudalegacy/src/interpolate_frames.cpp diff --git a/modules/cudaoptflow/src/needle_map.cpp b/modules/cudalegacy/src/needle_map.cpp similarity index 100% rename from modules/cudaoptflow/src/needle_map.cpp rename to modules/cudalegacy/src/needle_map.cpp diff --git a/modules/cudalegacy/src/precomp.hpp b/modules/cudalegacy/src/precomp.hpp index b432057efe..9eda7e01df 100644 --- a/modules/cudalegacy/src/precomp.hpp +++ b/modules/cudalegacy/src/precomp.hpp @@ -56,6 +56,22 @@ # include "opencv2/objdetect.hpp" #endif +#ifdef HAVE_OPENCV_CALIB3D +# include "opencv2/calib3d.hpp" +#endif + +#ifdef HAVE_OPENCV_CUDAARITHM +# include "opencv2/cudaarithm.hpp" +#endif + +#ifdef HAVE_OPENCV_CUDAFILTERS +# include "opencv2/cudafilters.hpp" +#endif + +#ifdef HAVE_OPENCV_CUDAIMGPROC +# include "opencv2/cudaimgproc.hpp" +#endif + #include "opencv2/core/private.cuda.hpp" #include "opencv2/cudalegacy/private.hpp" diff --git a/modules/cuda/test/test_calib3d.cpp b/modules/cudalegacy/test/test_calib3d.cpp similarity index 98% rename from modules/cuda/test/test_calib3d.cpp rename to modules/cudalegacy/test/test_calib3d.cpp index 7208b10ba5..7a73f817ec 100644 --- a/modules/cuda/test/test_calib3d.cpp +++ b/modules/cudalegacy/test/test_calib3d.cpp @@ -42,7 +42,9 @@ #include "test_precomp.hpp" -#ifdef HAVE_CUDA +#if defined HAVE_CUDA && defined HAVE_OPENCV_CALIB3D + +#include "opencv2/calib3d.hpp" using namespace cvtest; diff --git a/modules/cuda/test/test_labeling.cpp b/modules/cudalegacy/test/test_labeling.cpp similarity index 100% rename from modules/cuda/test/test_labeling.cpp rename to modules/cudalegacy/test/test_labeling.cpp diff --git a/modules/cudalegacy/test/test_precomp.hpp b/modules/cudalegacy/test/test_precomp.hpp index 5169ef2bbd..727b40d833 100644 --- a/modules/cudalegacy/test/test_precomp.hpp +++ b/modules/cudalegacy/test/test_precomp.hpp @@ -74,6 +74,8 @@ #include "opencv2/core/private.cuda.hpp" +#include "opencv2/opencv_modules.hpp" + #include "cvconfig.h" #include "NCVTest.hpp" diff --git a/modules/cudaoptflow/include/opencv2/cudaoptflow.hpp b/modules/cudaoptflow/include/opencv2/cudaoptflow.hpp index f65b1447b2..6ea75594d2 100644 --- a/modules/cudaoptflow/include/opencv2/cudaoptflow.hpp +++ b/modules/cudaoptflow/include/opencv2/cudaoptflow.hpp @@ -61,49 +61,94 @@ namespace cv { namespace cuda { //! @addtogroup cudaoptflow //! @{ -/** @brief Class computing the optical flow for two images using Brox et al Optical Flow algorithm -(@cite Brox2004). : +// +// Interface +// + +/** @brief Base interface for dense optical flow algorithms. */ -class CV_EXPORTS BroxOpticalFlow +class CV_EXPORTS DenseOpticalFlow : public Algorithm { public: - BroxOpticalFlow(float alpha_, float gamma_, float scale_factor_, int inner_iterations_, int outer_iterations_, int solver_iterations_) : - alpha(alpha_), gamma(gamma_), scale_factor(scale_factor_), - inner_iterations(inner_iterations_), outer_iterations(outer_iterations_), solver_iterations(solver_iterations_) - { - } + /** @brief Calculates a dense optical flow. - //! Compute optical flow - //! frame0 - source frame (supports only CV_32FC1 type) - //! frame1 - frame to track (with the same size and type as frame0) - //! u - flow horizontal component (along x axis) - //! v - flow vertical component (along y axis) - void operator ()(const GpuMat& frame0, const GpuMat& frame1, GpuMat& u, GpuMat& v, Stream& stream = Stream::Null()); + @param I0 first input image. + @param I1 second input image of the same size and the same type as I0. + @param flow computed flow image that has the same size as I0 and type CV_32FC2. + @param stream Stream for the asynchronous version. + */ + virtual void calc(InputArray I0, InputArray I1, InputOutputArray flow, Stream& stream = Stream::Null()) = 0; +}; - //! flow smoothness - float alpha; +/** @brief Base interface for sparse optical flow algorithms. + */ +class CV_EXPORTS SparseOpticalFlow : public Algorithm +{ +public: + /** @brief Calculates a sparse optical flow. + + @param prevImg First input image. + @param nextImg Second input image of the same size and the same type as prevImg. + @param prevPts Vector of 2D points for which the flow needs to be found. + @param nextPts Output vector of 2D points containing the calculated new positions of input features in the second image. + @param status Output status vector. Each element of the vector is set to 1 if the + flow for the corresponding features has been found. Otherwise, it is set to 0. + @param err Optional output vector that contains error response for each point (inverse confidence). + @param stream Stream for the asynchronous version. + */ + virtual void calc(InputArray prevImg, InputArray nextImg, + InputArray prevPts, InputOutputArray nextPts, + OutputArray status, + OutputArray err = cv::noArray(), + Stream& stream = Stream::Null()) = 0; +}; + +// +// BroxOpticalFlow +// + +/** @brief Class computing the optical flow for two images using Brox et al Optical Flow algorithm (@cite Brox2004). + */ +class CV_EXPORTS BroxOpticalFlow : public DenseOpticalFlow +{ +public: + virtual double getFlowSmoothness() const = 0; + virtual void setFlowSmoothness(double alpha) = 0; - //! gradient constancy importance - float gamma; + virtual double getGradientConstancyImportance() const = 0; + virtual void setGradientConstancyImportance(double gamma) = 0; - //! pyramid scale factor - float scale_factor; + virtual double getPyramidScaleFactor() const = 0; + virtual void setPyramidScaleFactor(double scale_factor) = 0; //! number of lagged non-linearity iterations (inner loop) - int inner_iterations; + virtual int getInnerIterations() const = 0; + virtual void setInnerIterations(int inner_iterations) = 0; //! number of warping iterations (number of pyramid levels) - int outer_iterations; + virtual int getOuterIterations() const = 0; + virtual void setOuterIterations(int outer_iterations) = 0; //! number of linear system solver iterations - int solver_iterations; - - GpuMat buf; + virtual int getSolverIterations() const = 0; + virtual void setSolverIterations(int solver_iterations) = 0; + + static Ptr create( + double alpha = 0.197, + double gamma = 50.0, + double scale_factor = 0.8, + int inner_iterations = 5, + int outer_iterations = 150, + int solver_iterations = 10); }; -/** @brief Class used for calculating an optical flow. +// +// PyrLKOpticalFlow +// + +/** @brief Class used for calculating a sparse optical flow. -The class can calculate an optical flow for a sparse feature set or dense optical flow using the +The class can calculate an optical flow for a sparse feature set using the iterative Lucas-Kanade method with pyramids. @sa calcOpticalFlowPyrLK @@ -112,158 +157,116 @@ iterative Lucas-Kanade method with pyramids. - An example of the Lucas Kanade optical flow algorithm can be found at opencv_source_code/samples/gpu/pyrlk_optical_flow.cpp */ -class CV_EXPORTS PyrLKOpticalFlow +class CV_EXPORTS SparsePyrLKOpticalFlow : public SparseOpticalFlow { public: - PyrLKOpticalFlow(); - - /** @brief Calculate an optical flow for a sparse feature set. - - @param prevImg First 8-bit input image (supports both grayscale and color images). - @param nextImg Second input image of the same size and the same type as prevImg . - @param prevPts Vector of 2D points for which the flow needs to be found. It must be one row matrix - with CV_32FC2 type. - @param nextPts Output vector of 2D points (with single-precision floating-point coordinates) - containing the calculated new positions of input features in the second image. When useInitialFlow - is true, the vector must have the same size as in the input. - @param status Output status vector (CV_8UC1 type). Each element of the vector is set to 1 if the - flow for the corresponding features has been found. Otherwise, it is set to 0. - @param err Output vector (CV_32FC1 type) that contains the difference between patches around the - original and moved points or min eigen value if getMinEigenVals is checked. It can be NULL, if not - needed. - - @sa calcOpticalFlowPyrLK - */ - void sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts, - GpuMat& status, GpuMat* err = 0); - - /** @brief Calculate dense optical flow. - - @param prevImg First 8-bit grayscale input image. - @param nextImg Second input image of the same size and the same type as prevImg . - @param u Horizontal component of the optical flow of the same size as input images, 32-bit - floating-point, single-channel - @param v Vertical component of the optical flow of the same size as input images, 32-bit - floating-point, single-channel - @param err Output vector (CV_32FC1 type) that contains the difference between patches around the - original and moved points or min eigen value if getMinEigenVals is checked. It can be NULL, if not - needed. - */ - void dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, GpuMat* err = 0); + virtual Size getWinSize() const = 0; + virtual void setWinSize(Size winSize) = 0; - /** @brief Releases inner buffers memory. - */ - void releaseMemory(); + virtual int getMaxLevel() const = 0; + virtual void setMaxLevel(int maxLevel) = 0; - Size winSize; - int maxLevel; - int iters; - bool useInitialFlow; + virtual int getNumIters() const = 0; + virtual void setNumIters(int iters) = 0; -private: - std::vector prevPyr_; - std::vector nextPyr_; + virtual bool getUseInitialFlow() const = 0; + virtual void setUseInitialFlow(bool useInitialFlow) = 0; - GpuMat buf_; - - GpuMat uPyr_[2]; - GpuMat vPyr_[2]; + static Ptr create( + Size winSize = Size(21, 21), + int maxLevel = 3, + int iters = 30, + bool useInitialFlow = false); }; -/** @brief Class computing a dense optical flow using the Gunnar Farneback’s algorithm. : +/** @brief Class used for calculating a dense optical flow. + +The class can calculate an optical flow for a dense optical flow using the +iterative Lucas-Kanade method with pyramids. */ -class CV_EXPORTS FarnebackOpticalFlow +class CV_EXPORTS DensePyrLKOpticalFlow : public DenseOpticalFlow { public: - FarnebackOpticalFlow() - { - numLevels = 5; - pyrScale = 0.5; - fastPyramids = false; - winSize = 13; - numIters = 10; - polyN = 5; - polySigma = 1.1; - flags = 0; - } - - int numLevels; - double pyrScale; - bool fastPyramids; - int winSize; - int numIters; - int polyN; - double polySigma; - int flags; - - /** @brief Computes a dense optical flow using the Gunnar Farneback’s algorithm. - - @param frame0 First 8-bit gray-scale input image - @param frame1 Second 8-bit gray-scale input image - @param flowx Flow horizontal component - @param flowy Flow vertical component - @param s Stream - - @sa calcOpticalFlowFarneback - */ - void operator ()(const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &s = Stream::Null()); + virtual Size getWinSize() const = 0; + virtual void setWinSize(Size winSize) = 0; - /** @brief Releases unused auxiliary memory buffers. - */ - void releaseMemory() - { - frames_[0].release(); - frames_[1].release(); - pyrLevel_[0].release(); - pyrLevel_[1].release(); - M_.release(); - bufM_.release(); - R_[0].release(); - R_[1].release(); - blurredFrame_[0].release(); - blurredFrame_[1].release(); - pyramid0_.clear(); - pyramid1_.clear(); - } - -private: - void prepareGaussian( - int n, double sigma, float *g, float *xg, float *xxg, - double &ig11, double &ig03, double &ig33, double &ig55); - - void setPolynomialExpansionConsts(int n, double sigma); - - void updateFlow_boxFilter( - const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat &flowy, - GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]); - - void updateFlow_gaussianBlur( - const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat& flowy, - GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]); - - GpuMat frames_[2]; - GpuMat pyrLevel_[2], M_, bufM_, R_[2], blurredFrame_[2]; - std::vector pyramid0_, pyramid1_; + virtual int getMaxLevel() const = 0; + virtual void setMaxLevel(int maxLevel) = 0; + + virtual int getNumIters() const = 0; + virtual void setNumIters(int iters) = 0; + + virtual bool getUseInitialFlow() const = 0; + virtual void setUseInitialFlow(bool useInitialFlow) = 0; + + static Ptr create( + Size winSize = Size(13, 13), + int maxLevel = 3, + int iters = 30, + bool useInitialFlow = false); }; -// Implementation of the Zach, Pock and Bischof Dual TV-L1 Optical Flow method // -// see reference: -// [1] C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow". -// [2] Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation". -class CV_EXPORTS OpticalFlowDual_TVL1_CUDA +// FarnebackOpticalFlow +// + +/** @brief Class computing a dense optical flow using the Gunnar Farneback’s algorithm. + */ +class CV_EXPORTS FarnebackOpticalFlow : public DenseOpticalFlow { public: - OpticalFlowDual_TVL1_CUDA(); + virtual int getNumLevels() const = 0; + virtual void setNumLevels(int numLevels) = 0; + + virtual double getPyrScale() const = 0; + virtual void setPyrScale(double pyrScale) = 0; + + virtual bool getFastPyramids() const = 0; + virtual void setFastPyramids(bool fastPyramids) = 0; + + virtual int getWinSize() const = 0; + virtual void setWinSize(int winSize) = 0; + + virtual int getNumIters() const = 0; + virtual void setNumIters(int numIters) = 0; - void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy); + virtual int getPolyN() const = 0; + virtual void setPolyN(int polyN) = 0; - void collectGarbage(); + virtual double getPolySigma() const = 0; + virtual void setPolySigma(double polySigma) = 0; + virtual int getFlags() const = 0; + virtual void setFlags(int flags) = 0; + + static Ptr create( + int numLevels = 5, + double pyrScale = 0.5, + bool fastPyramids = false, + int winSize = 13, + int numIters = 10, + int polyN = 5, + double polySigma = 1.1, + int flags = 0); +}; + +// +// OpticalFlowDual_TVL1 +// + +/** @brief Implementation of the Zach, Pock and Bischof Dual TV-L1 Optical Flow method. + * + * @sa C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow". + * @sa Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation". + */ +class CV_EXPORTS OpticalFlowDual_TVL1 : public DenseOpticalFlow +{ +public: /** * Time step of the numerical scheme. */ - double tau; + virtual double getTau() const = 0; + virtual void setTau(double tau) = 0; /** * Weight parameter for the data term, attachment parameter. @@ -271,7 +274,8 @@ public: * The smaller this parameter is, the smoother the solutions we obtain. * It depends on the range of motions of the images, so its value should be adapted to each image sequence. */ - double lambda; + virtual double getLambda() const = 0; + virtual void setLambda(double lambda) = 0; /** * Weight parameter for (u - v)^2, tightness parameter. @@ -279,20 +283,23 @@ public: * In theory, it should have a small value in order to maintain both parts in correspondence. * The method is stable for a large range of values of this parameter. */ + virtual double getGamma() const = 0; + virtual void setGamma(double gamma) = 0; - double gamma; /** - * parameter used for motion estimation. It adds a variable allowing for illumination variations - * Set this parameter to 1. if you have varying illumination. - * See: Chambolle et al, A First-Order Primal-Dual Algorithm for Convex Problems with Applications to Imaging - * Journal of Mathematical imaging and vision, may 2011 Vol 40 issue 1, pp 120-145 - */ - double theta; + * parameter used for motion estimation. It adds a variable allowing for illumination variations + * Set this parameter to 1. if you have varying illumination. + * See: Chambolle et al, A First-Order Primal-Dual Algorithm for Convex Problems with Applications to Imaging + * Journal of Mathematical imaging and vision, may 2011 Vol 40 issue 1, pp 120-145 + */ + virtual double getTheta() const = 0; + virtual void setTheta(double theta) = 0; /** * Number of scales used to create the pyramid of images. */ - int nscales; + virtual int getNumScales() const = 0; + virtual void setNumScales(int nscales) = 0; /** * Number of warpings per scale. @@ -300,94 +307,41 @@ public: * This is a parameter that assures the stability of the method. * It also affects the running time, so it is a compromise between speed and accuracy. */ - int warps; + virtual int getNumWarps() const = 0; + virtual void setNumWarps(int warps) = 0; /** * Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time. * A small value will yield more accurate solutions at the expense of a slower convergence. */ - double epsilon; + virtual double getEpsilon() const = 0; + virtual void setEpsilon(double epsilon) = 0; /** * Stopping criterion iterations number used in the numerical scheme. */ - int iterations; - - double scaleStep; - - bool useInitialFlow; - -private: - void procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2, GpuMat& u3); - - std::vector I0s; - std::vector I1s; - std::vector u1s; - std::vector u2s; - std::vector u3s; - - GpuMat I1x_buf; - GpuMat I1y_buf; - - GpuMat I1w_buf; - GpuMat I1wx_buf; - GpuMat I1wy_buf; - - GpuMat grad_buf; - GpuMat rho_c_buf; - - GpuMat p11_buf; - GpuMat p12_buf; - GpuMat p21_buf; - GpuMat p22_buf; - GpuMat p31_buf; - GpuMat p32_buf; - - GpuMat diff_buf; - GpuMat norm_buf; -}; - -//! Calculates optical flow for 2 images using block matching algorithm */ -CV_EXPORTS void calcOpticalFlowBM(const GpuMat& prev, const GpuMat& curr, - Size block_size, Size shift_size, Size max_range, bool use_previous, - GpuMat& velx, GpuMat& vely, GpuMat& buf, - Stream& stream = Stream::Null()); - -class CV_EXPORTS FastOpticalFlowBM -{ -public: - void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy, int search_window = 21, int block_window = 7, Stream& s = Stream::Null()); - -private: - GpuMat buffer; - GpuMat extended_I0; - GpuMat extended_I1; + virtual int getNumIterations() const = 0; + virtual void setNumIterations(int iterations) = 0; + + virtual double getScaleStep() const = 0; + virtual void setScaleStep(double scaleStep) = 0; + + virtual bool getUseInitialFlow() const = 0; + virtual void setUseInitialFlow(bool useInitialFlow) = 0; + + static Ptr create( + double tau = 0.25, + double lambda = 0.15, + double theta = 0.3, + int nscales = 5, + int warps = 5, + double epsilon = 0.01, + int iterations = 300, + double scaleStep = 0.8, + double gamma = 0.0, + bool useInitialFlow = false); }; -/** @brief Interpolates frames (images) using provided optical flow (displacement field). - -@param frame0 First frame (32-bit floating point images, single channel). -@param frame1 Second frame. Must have the same type and size as frame0 . -@param fu Forward horizontal displacement. -@param fv Forward vertical displacement. -@param bu Backward horizontal displacement. -@param bv Backward vertical displacement. -@param pos New frame position. -@param newFrame Output image. -@param buf Temporary buffer, will have width x 6\*height size, CV_32FC1 type and contain 6 -GpuMat: occlusion masks for first frame, occlusion masks for second, interpolated forward -horizontal flow, interpolated forward vertical flow, interpolated backward horizontal flow, -interpolated backward vertical flow. -@param stream Stream for the asynchronous version. - */ -CV_EXPORTS void interpolateFrames(const GpuMat& frame0, const GpuMat& frame1, - const GpuMat& fu, const GpuMat& fv, - const GpuMat& bu, const GpuMat& bv, - float pos, GpuMat& newFrame, GpuMat& buf, - Stream& stream = Stream::Null()); - -CV_EXPORTS void createOpticalFlowNeedleMap(const GpuMat& u, const GpuMat& v, GpuMat& vertex, GpuMat& colors); - //! @} }} // namespace cv { namespace cuda { diff --git a/modules/cudaoptflow/perf/perf_optflow.cpp b/modules/cudaoptflow/perf/perf_optflow.cpp index d22eb7e60d..32040f282c 100644 --- a/modules/cudaoptflow/perf/perf_optflow.cpp +++ b/modules/cudaoptflow/perf/perf_optflow.cpp @@ -46,91 +46,10 @@ using namespace std; using namespace testing; using namespace perf; -////////////////////////////////////////////////////// -// InterpolateFrames - typedef pair pair_string; DEF_PARAM_TEST_1(ImagePair, pair_string); -PERF_TEST_P(ImagePair, InterpolateFrames, - Values(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png"))) -{ - cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE); - ASSERT_FALSE(frame0.empty()); - - cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE); - ASSERT_FALSE(frame1.empty()); - - frame0.convertTo(frame0, CV_32FC1, 1.0 / 255.0); - frame1.convertTo(frame1, CV_32FC1, 1.0 / 255.0); - - if (PERF_RUN_CUDA()) - { - const cv::cuda::GpuMat d_frame0(frame0); - const cv::cuda::GpuMat d_frame1(frame1); - cv::cuda::GpuMat d_fu, d_fv; - cv::cuda::GpuMat d_bu, d_bv; - - cv::cuda::BroxOpticalFlow d_flow(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/, - 10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/); - - d_flow(d_frame0, d_frame1, d_fu, d_fv); - d_flow(d_frame1, d_frame0, d_bu, d_bv); - - cv::cuda::GpuMat newFrame; - cv::cuda::GpuMat d_buf; - - TEST_CYCLE() cv::cuda::interpolateFrames(d_frame0, d_frame1, d_fu, d_fv, d_bu, d_bv, 0.5f, newFrame, d_buf); - - CUDA_SANITY_CHECK(newFrame, 1e-4); - } - else - { - FAIL_NO_CPU(); - } -} - -////////////////////////////////////////////////////// -// CreateOpticalFlowNeedleMap - -PERF_TEST_P(ImagePair, CreateOpticalFlowNeedleMap, - Values(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png"))) -{ - cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE); - ASSERT_FALSE(frame0.empty()); - - cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE); - ASSERT_FALSE(frame1.empty()); - - frame0.convertTo(frame0, CV_32FC1, 1.0 / 255.0); - frame1.convertTo(frame1, CV_32FC1, 1.0 / 255.0); - - if (PERF_RUN_CUDA()) - { - const cv::cuda::GpuMat d_frame0(frame0); - const cv::cuda::GpuMat d_frame1(frame1); - cv::cuda::GpuMat u; - cv::cuda::GpuMat v; - - cv::cuda::BroxOpticalFlow d_flow(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/, - 10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/); - - d_flow(d_frame0, d_frame1, u, v); - - cv::cuda::GpuMat vertex, colors; - - TEST_CYCLE() cv::cuda::createOpticalFlowNeedleMap(u, v, vertex, colors); - - CUDA_SANITY_CHECK(vertex, 1e-6); - CUDA_SANITY_CHECK(colors); - } - else - { - FAIL_NO_CPU(); - } -} - ////////////////////////////////////////////////////// // BroxOpticalFlow @@ -152,13 +71,19 @@ PERF_TEST_P(ImagePair, BroxOpticalFlow, { const cv::cuda::GpuMat d_frame0(frame0); const cv::cuda::GpuMat d_frame1(frame1); - cv::cuda::GpuMat u; - cv::cuda::GpuMat v; + cv::cuda::GpuMat flow; - cv::cuda::BroxOpticalFlow d_flow(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/, - 10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/); + cv::Ptr d_alg = + cv::cuda::BroxOpticalFlow::create(0.197 /*alpha*/, 50.0 /*gamma*/, 0.8 /*scale_factor*/, + 10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/); - TEST_CYCLE() d_flow(d_frame0, d_frame1, u, v); + TEST_CYCLE() d_alg->calc(d_frame0, d_frame1, flow); + + cv::cuda::GpuMat flows[2]; + cv::cuda::split(flow, flows); + + cv::cuda::GpuMat u = flows[0]; + cv::cuda::GpuMat v = flows[1]; CUDA_SANITY_CHECK(u, 1e-1); CUDA_SANITY_CHECK(v, 1e-1); @@ -210,17 +135,17 @@ PERF_TEST_P(ImagePair_Gray_NPts_WinSz_Levels_Iters, PyrLKOpticalFlowSparse, { const cv::cuda::GpuMat d_pts(pts.reshape(2, 1)); - cv::cuda::PyrLKOpticalFlow d_pyrLK; - d_pyrLK.winSize = cv::Size(winSize, winSize); - d_pyrLK.maxLevel = levels - 1; - d_pyrLK.iters = iters; + cv::Ptr d_pyrLK = + cv::cuda::SparsePyrLKOpticalFlow::create(cv::Size(winSize, winSize), + levels - 1, + iters); const cv::cuda::GpuMat d_frame0(frame0); const cv::cuda::GpuMat d_frame1(frame1); cv::cuda::GpuMat nextPts; cv::cuda::GpuMat status; - TEST_CYCLE() d_pyrLK.sparse(d_frame0, d_frame1, d_pts, nextPts, status); + TEST_CYCLE() d_pyrLK->calc(d_frame0, d_frame1, d_pts, nextPts, status); CUDA_SANITY_CHECK(nextPts); CUDA_SANITY_CHECK(status); @@ -270,15 +195,20 @@ PERF_TEST_P(ImagePair_WinSz_Levels_Iters, PyrLKOpticalFlowDense, { const cv::cuda::GpuMat d_frame0(frame0); const cv::cuda::GpuMat d_frame1(frame1); - cv::cuda::GpuMat u; - cv::cuda::GpuMat v; + cv::cuda::GpuMat flow; + + cv::Ptr d_pyrLK = + cv::cuda::DensePyrLKOpticalFlow::create(cv::Size(winSize, winSize), + levels - 1, + iters); + + TEST_CYCLE() d_pyrLK->calc(d_frame0, d_frame1, flow); - cv::cuda::PyrLKOpticalFlow d_pyrLK; - d_pyrLK.winSize = cv::Size(winSize, winSize); - d_pyrLK.maxLevel = levels - 1; - d_pyrLK.iters = iters; + cv::cuda::GpuMat flows[2]; + cv::cuda::split(flow, flows); - TEST_CYCLE() d_pyrLK.dense(d_frame0, d_frame1, u, v); + cv::cuda::GpuMat u = flows[0]; + cv::cuda::GpuMat v = flows[1]; CUDA_SANITY_CHECK(u); CUDA_SANITY_CHECK(v); @@ -315,19 +245,19 @@ PERF_TEST_P(ImagePair, FarnebackOpticalFlow, { const cv::cuda::GpuMat d_frame0(frame0); const cv::cuda::GpuMat d_frame1(frame1); - cv::cuda::GpuMat u; - cv::cuda::GpuMat v; + cv::cuda::GpuMat flow; - cv::cuda::FarnebackOpticalFlow d_farneback; - d_farneback.numLevels = numLevels; - d_farneback.pyrScale = pyrScale; - d_farneback.winSize = winSize; - d_farneback.numIters = numIters; - d_farneback.polyN = polyN; - d_farneback.polySigma = polySigma; - d_farneback.flags = flags; + cv::Ptr d_farneback = + cv::cuda::FarnebackOpticalFlow::create(numLevels, pyrScale, false, winSize, + numIters, polyN, polySigma, flags); - TEST_CYCLE() d_farneback(d_frame0, d_frame1, u, v); + TEST_CYCLE() d_farneback->calc(d_frame0, d_frame1, flow); + + cv::cuda::GpuMat flows[2]; + cv::cuda::split(flow, flows); + + cv::cuda::GpuMat u = flows[0]; + cv::cuda::GpuMat v = flows[1]; CUDA_SANITY_CHECK(u, 1e-4); CUDA_SANITY_CHECK(v, 1e-4); @@ -360,12 +290,18 @@ PERF_TEST_P(ImagePair, OpticalFlowDual_TVL1, { const cv::cuda::GpuMat d_frame0(frame0); const cv::cuda::GpuMat d_frame1(frame1); - cv::cuda::GpuMat u; - cv::cuda::GpuMat v; + cv::cuda::GpuMat flow; + + cv::Ptr d_alg = + cv::cuda::OpticalFlowDual_TVL1::create(); + + TEST_CYCLE() d_alg->calc(d_frame0, d_frame1, flow); - cv::cuda::OpticalFlowDual_TVL1_CUDA d_alg; + cv::cuda::GpuMat flows[2]; + cv::cuda::split(flow, flows); - TEST_CYCLE() d_alg(d_frame0, d_frame1, u, v); + cv::cuda::GpuMat u = flows[0]; + cv::cuda::GpuMat v = flows[1]; CUDA_SANITY_CHECK(u, 1e-1); CUDA_SANITY_CHECK(v, 1e-1); @@ -383,72 +319,3 @@ PERF_TEST_P(ImagePair, OpticalFlowDual_TVL1, CPU_SANITY_CHECK(flow); } } - -////////////////////////////////////////////////////// -// OpticalFlowBM - -PERF_TEST_P(ImagePair, OpticalFlowBM, - Values(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png"))) -{ - declare.time(400); - - const cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE); - ASSERT_FALSE(frame0.empty()); - - const cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE); - ASSERT_FALSE(frame1.empty()); - - const cv::Size block_size(16, 16); - const cv::Size shift_size(1, 1); - const cv::Size max_range(16, 16); - - if (PERF_RUN_CUDA()) - { - const cv::cuda::GpuMat d_frame0(frame0); - const cv::cuda::GpuMat d_frame1(frame1); - cv::cuda::GpuMat u, v, buf; - - TEST_CYCLE() cv::cuda::calcOpticalFlowBM(d_frame0, d_frame1, block_size, shift_size, max_range, false, u, v, buf); - - CUDA_SANITY_CHECK(u); - CUDA_SANITY_CHECK(v); - } - else - { - FAIL_NO_CPU(); - } -} - -PERF_TEST_P(ImagePair, DISABLED_FastOpticalFlowBM, - Values(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png"))) -{ - declare.time(400); - - const cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE); - ASSERT_FALSE(frame0.empty()); - - const cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE); - ASSERT_FALSE(frame1.empty()); - - const cv::Size block_size(16, 16); - const cv::Size shift_size(1, 1); - const cv::Size max_range(16, 16); - - if (PERF_RUN_CUDA()) - { - const cv::cuda::GpuMat d_frame0(frame0); - const cv::cuda::GpuMat d_frame1(frame1); - cv::cuda::GpuMat u, v; - - cv::cuda::FastOpticalFlowBM fastBM; - - TEST_CYCLE() fastBM(d_frame0, d_frame1, u, v, max_range.width, block_size.width); - - CUDA_SANITY_CHECK(u, 2); - CUDA_SANITY_CHECK(v, 2); - } - else - { - FAIL_NO_CPU(); - } -} diff --git a/modules/cudaoptflow/perf/perf_precomp.hpp b/modules/cudaoptflow/perf/perf_precomp.hpp index 1dc00ae4b7..d7761a587a 100644 --- a/modules/cudaoptflow/perf/perf_precomp.hpp +++ b/modules/cudaoptflow/perf/perf_precomp.hpp @@ -55,6 +55,7 @@ #include "opencv2/ts/cuda_perf.hpp" #include "opencv2/cudaoptflow.hpp" +#include "opencv2/cudaarithm.hpp" #include "opencv2/video.hpp" #ifdef GTEST_CREATE_SHARED_LIBRARY diff --git a/modules/cudaoptflow/src/brox.cpp b/modules/cudaoptflow/src/brox.cpp index 39eae9a8ab..11c541906b 100644 --- a/modules/cudaoptflow/src/brox.cpp +++ b/modules/cudaoptflow/src/brox.cpp @@ -47,84 +47,148 @@ using namespace cv::cuda; #if !defined (HAVE_CUDA) || !defined (HAVE_OPENCV_CUDALEGACY) || defined (CUDA_DISABLER) -void cv::cuda::BroxOpticalFlow::operator ()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); } +Ptr cv::cuda::BroxOpticalFlow::create(double, double, double, int, int, int) { throw_no_cuda(); return Ptr(); } #else -namespace -{ - size_t getBufSize(const NCVBroxOpticalFlowDescriptor& desc, const NCVMatrix& frame0, const NCVMatrix& frame1, - NCVMatrix& u, NCVMatrix& v, const cudaDeviceProp& devProp) +namespace { + + class BroxOpticalFlowImpl : public BroxOpticalFlow { - NCVMemStackAllocator gpuCounter(static_cast(devProp.textureAlignment)); + public: + BroxOpticalFlowImpl(double alpha, double gamma, double scale_factor, + int inner_iterations, int outer_iterations, int solver_iterations) : + alpha_(alpha), gamma_(gamma), scale_factor_(scale_factor), + inner_iterations_(inner_iterations), outer_iterations_(outer_iterations), + solver_iterations_(solver_iterations) + { + } + + virtual void calc(InputArray I0, InputArray I1, InputOutputArray flow, Stream& stream); + + virtual double getFlowSmoothness() const { return alpha_; } + virtual void setFlowSmoothness(double alpha) { alpha_ = static_cast(alpha); } + + virtual double getGradientConstancyImportance() const { return gamma_; } + virtual void setGradientConstancyImportance(double gamma) { gamma_ = static_cast(gamma); } + + virtual double getPyramidScaleFactor() const { return scale_factor_; } + virtual void setPyramidScaleFactor(double scale_factor) { scale_factor_ = static_cast(scale_factor); } + + //! number of lagged non-linearity iterations (inner loop) + virtual int getInnerIterations() const { return inner_iterations_; } + virtual void setInnerIterations(int inner_iterations) { inner_iterations_ = inner_iterations; } + + //! number of warping iterations (number of pyramid levels) + virtual int getOuterIterations() const { return outer_iterations_; } + virtual void setOuterIterations(int outer_iterations) { outer_iterations_ = outer_iterations; } + + //! number of linear system solver iterations + virtual int getSolverIterations() const { return solver_iterations_; } + virtual void setSolverIterations(int solver_iterations) { solver_iterations_ = solver_iterations; } + + private: + //! flow smoothness + float alpha_; + + //! gradient constancy importance + float gamma_; + + //! pyramid scale factor + float scale_factor_; + + //! number of lagged non-linearity iterations (inner loop) + int inner_iterations_; + + //! number of warping iterations (number of pyramid levels) + int outer_iterations_; + + //! number of linear system solver iterations + int solver_iterations_; + }; + + static size_t getBufSize(const NCVBroxOpticalFlowDescriptor& desc, + const NCVMatrix& frame0, const NCVMatrix& frame1, + NCVMatrix& u, NCVMatrix& v, + size_t textureAlignment) + { + NCVMemStackAllocator gpuCounter(static_cast(textureAlignment)); ncvSafeCall( NCVBroxOpticalFlow(desc, gpuCounter, frame0, frame1, u, v, 0) ); return gpuCounter.maxSize(); } -} -namespace -{ - static void outputHandler(const String &msg) { CV_Error(cv::Error::GpuApiCallError, msg.c_str()); } -} + static void outputHandler(const String &msg) + { + CV_Error(cv::Error::GpuApiCallError, msg.c_str()); + } -void cv::cuda::BroxOpticalFlow::operator ()(const GpuMat& frame0, const GpuMat& frame1, GpuMat& u, GpuMat& v, Stream& s) -{ - ncvSetDebugOutputHandler(outputHandler); + void BroxOpticalFlowImpl::calc(InputArray _I0, InputArray _I1, InputOutputArray _flow, Stream& stream) + { + const GpuMat frame0 = _I0.getGpuMat(); + const GpuMat frame1 = _I1.getGpuMat(); - CV_Assert(frame0.type() == CV_32FC1); - CV_Assert(frame1.size() == frame0.size() && frame1.type() == frame0.type()); + CV_Assert( frame0.type() == CV_32FC1 ); + CV_Assert( frame1.size() == frame0.size() && frame1.type() == frame0.type() ); - u.create(frame0.size(), CV_32FC1); - v.create(frame0.size(), CV_32FC1); + ncvSetDebugOutputHandler(outputHandler); - cudaDeviceProp devProp; - cudaSafeCall( cudaGetDeviceProperties(&devProp, getDevice()) ); + BufferPool pool(stream); + GpuMat u = pool.getBuffer(frame0.size(), CV_32FC1); + GpuMat v = pool.getBuffer(frame0.size(), CV_32FC1); - NCVBroxOpticalFlowDescriptor desc; + NCVBroxOpticalFlowDescriptor desc; + desc.alpha = alpha_; + desc.gamma = gamma_; + desc.scale_factor = scale_factor_; + desc.number_of_inner_iterations = inner_iterations_; + desc.number_of_outer_iterations = outer_iterations_; + desc.number_of_solver_iterations = solver_iterations_; - desc.alpha = alpha; - desc.gamma = gamma; - desc.scale_factor = scale_factor; - desc.number_of_inner_iterations = inner_iterations; - desc.number_of_outer_iterations = outer_iterations; - desc.number_of_solver_iterations = solver_iterations; + NCVMemSegment frame0MemSeg; + frame0MemSeg.begin.memtype = NCVMemoryTypeDevice; + frame0MemSeg.begin.ptr = const_cast(frame0.data); + frame0MemSeg.size = frame0.step * frame0.rows; - NCVMemSegment frame0MemSeg; - frame0MemSeg.begin.memtype = NCVMemoryTypeDevice; - frame0MemSeg.begin.ptr = const_cast(frame0.data); - frame0MemSeg.size = frame0.step * frame0.rows; + NCVMemSegment frame1MemSeg; + frame1MemSeg.begin.memtype = NCVMemoryTypeDevice; + frame1MemSeg.begin.ptr = const_cast(frame1.data); + frame1MemSeg.size = frame1.step * frame1.rows; - NCVMemSegment frame1MemSeg; - frame1MemSeg.begin.memtype = NCVMemoryTypeDevice; - frame1MemSeg.begin.ptr = const_cast(frame1.data); - frame1MemSeg.size = frame1.step * frame1.rows; + NCVMemSegment uMemSeg; + uMemSeg.begin.memtype = NCVMemoryTypeDevice; + uMemSeg.begin.ptr = u.ptr(); + uMemSeg.size = u.step * u.rows; - NCVMemSegment uMemSeg; - uMemSeg.begin.memtype = NCVMemoryTypeDevice; - uMemSeg.begin.ptr = u.ptr(); - uMemSeg.size = u.step * u.rows; + NCVMemSegment vMemSeg; + vMemSeg.begin.memtype = NCVMemoryTypeDevice; + vMemSeg.begin.ptr = v.ptr(); + vMemSeg.size = v.step * v.rows; - NCVMemSegment vMemSeg; - vMemSeg.begin.memtype = NCVMemoryTypeDevice; - vMemSeg.begin.ptr = v.ptr(); - vMemSeg.size = v.step * v.rows; + DeviceInfo devInfo; + size_t textureAlignment = devInfo.textureAlignment(); - NCVMatrixReuse frame0Mat(frame0MemSeg, static_cast(devProp.textureAlignment), frame0.cols, frame0.rows, static_cast(frame0.step)); - NCVMatrixReuse frame1Mat(frame1MemSeg, static_cast(devProp.textureAlignment), frame1.cols, frame1.rows, static_cast(frame1.step)); - NCVMatrixReuse uMat(uMemSeg, static_cast(devProp.textureAlignment), u.cols, u.rows, static_cast(u.step)); - NCVMatrixReuse vMat(vMemSeg, static_cast(devProp.textureAlignment), v.cols, v.rows, static_cast(v.step)); + NCVMatrixReuse frame0Mat(frame0MemSeg, static_cast(textureAlignment), frame0.cols, frame0.rows, static_cast(frame0.step)); + NCVMatrixReuse frame1Mat(frame1MemSeg, static_cast(textureAlignment), frame1.cols, frame1.rows, static_cast(frame1.step)); + NCVMatrixReuse uMat(uMemSeg, static_cast(textureAlignment), u.cols, u.rows, static_cast(u.step)); + NCVMatrixReuse vMat(vMemSeg, static_cast(textureAlignment), v.cols, v.rows, static_cast(v.step)); - cudaStream_t stream = StreamAccessor::getStream(s); + size_t bufSize = getBufSize(desc, frame0Mat, frame1Mat, uMat, vMat, textureAlignment); + GpuMat buf = pool.getBuffer(1, static_cast(bufSize), CV_8UC1); - size_t bufSize = getBufSize(desc, frame0Mat, frame1Mat, uMat, vMat, devProp); + NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast(textureAlignment), buf.ptr()); - ensureSizeIsEnough(1, static_cast(bufSize), CV_8UC1, buf); + ncvSafeCall( NCVBroxOpticalFlow(desc, gpuAllocator, frame0Mat, frame1Mat, uMat, vMat, StreamAccessor::getStream(stream)) ); - NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast(devProp.textureAlignment), buf.ptr()); + GpuMat flows[] = {u, v}; + cuda::merge(flows, 2, _flow, stream); + } +} - ncvSafeCall( NCVBroxOpticalFlow(desc, gpuAllocator, frame0Mat, frame1Mat, uMat, vMat, stream) ); +Ptr cv::cuda::BroxOpticalFlow::create(double alpha, double gamma, double scale_factor, int inner_iterations, int outer_iterations, int solver_iterations) +{ + return makePtr(alpha, gamma, scale_factor, inner_iterations, outer_iterations, solver_iterations); } #endif /* HAVE_CUDA */ diff --git a/modules/cudaoptflow/src/cuda/pyrlk.cu b/modules/cudaoptflow/src/cuda/pyrlk.cu index d4606f2281..7693551fca 100644 --- a/modules/cudaoptflow/src/cuda/pyrlk.cu +++ b/modules/cudaoptflow/src/cuda/pyrlk.cu @@ -472,16 +472,16 @@ namespace pyrlk } } - void loadConstants(int2 winSize, int iters) + void loadConstants(int2 winSize, int iters, cudaStream_t stream) { - cudaSafeCall( cudaMemcpyToSymbol(c_winSize_x, &winSize.x, sizeof(int)) ); - cudaSafeCall( cudaMemcpyToSymbol(c_winSize_y, &winSize.y, sizeof(int)) ); + cudaSafeCall( cudaMemcpyToSymbolAsync(c_winSize_x, &winSize.x, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); + cudaSafeCall( cudaMemcpyToSymbolAsync(c_winSize_y, &winSize.y, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); int2 halfWin = make_int2((winSize.x - 1) / 2, (winSize.y - 1) / 2); - cudaSafeCall( cudaMemcpyToSymbol(c_halfWin_x, &halfWin.x, sizeof(int)) ); - cudaSafeCall( cudaMemcpyToSymbol(c_halfWin_y, &halfWin.y, sizeof(int)) ); + cudaSafeCall( cudaMemcpyToSymbolAsync(c_halfWin_x, &halfWin.x, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); + cudaSafeCall( cudaMemcpyToSymbolAsync(c_halfWin_y, &halfWin.y, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); - cudaSafeCall( cudaMemcpyToSymbol(c_iters, &iters, sizeof(int)) ); + cudaSafeCall( cudaMemcpyToSymbolAsync(c_iters, &iters, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); } void sparse1(PtrStepSzf I, PtrStepSzf J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, diff --git a/modules/cudaoptflow/src/cuda/tvl1flow.cu b/modules/cudaoptflow/src/cuda/tvl1flow.cu index 2b66c972bc..66f0d664a0 100644 --- a/modules/cudaoptflow/src/cuda/tvl1flow.cu +++ b/modules/cudaoptflow/src/cuda/tvl1flow.cu @@ -66,15 +66,16 @@ namespace tvl1flow dy(y, x) = 0.5f * (src(::min(y + 1, src.rows - 1), x) - src(::max(y - 1, 0), x)); } - void centeredGradient(PtrStepSzf src, PtrStepSzf dx, PtrStepSzf dy) + void centeredGradient(PtrStepSzf src, PtrStepSzf dx, PtrStepSzf dy, cudaStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y)); - centeredGradientKernel<<>>(src, dx, dy); + centeredGradientKernel<<>>(src, dx, dy); cudaSafeCall( cudaGetLastError() ); - cudaSafeCall( cudaDeviceSynchronize() ); + if (!stream) + cudaSafeCall( cudaDeviceSynchronize() ); } } @@ -164,7 +165,10 @@ namespace tvl1flow rho(y, x) = I1wVal - I1wxVal * u1Val - I1wyVal * u2Val - I0Val; } - void warpBackward(PtrStepSzf I0, PtrStepSzf I1, PtrStepSzf I1x, PtrStepSzf I1y, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf I1w, PtrStepSzf I1wx, PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho) + void warpBackward(PtrStepSzf I0, PtrStepSzf I1, PtrStepSzf I1x, PtrStepSzf I1y, + PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf I1w, PtrStepSzf I1wx, + PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho, + cudaStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(I0.cols, block.x), divUp(I0.rows, block.y)); @@ -173,10 +177,11 @@ namespace tvl1flow bindTexture(&tex_I1x, I1x); bindTexture(&tex_I1y, I1y); - warpBackwardKernel<<>>(I0, u1, u2, I1w, I1wx, I1wy, grad, rho); + warpBackwardKernel<<>>(I0, u1, u2, I1w, I1wx, I1wy, grad, rho); cudaSafeCall( cudaGetLastError() ); - cudaSafeCall( cudaDeviceSynchronize() ); + if (!stream) + cudaSafeCall( cudaDeviceSynchronize() ); } } @@ -292,15 +297,17 @@ namespace tvl1flow PtrStepSzf grad, PtrStepSzf rho_c, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, PtrStepSzf error, - float l_t, float theta, float gamma, bool calcError) + float l_t, float theta, float gamma, bool calcError, + cudaStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(I1wx.cols, block.x), divUp(I1wx.rows, block.y)); - estimateUKernel<<>>(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, p31, p32, u1, u2, u3, error, l_t, theta, gamma, calcError); + estimateUKernel<<>>(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, p31, p32, u1, u2, u3, error, l_t, theta, gamma, calcError); cudaSafeCall( cudaGetLastError() ); - cudaSafeCall( cudaDeviceSynchronize() ); + if (!stream) + cudaSafeCall( cudaDeviceSynchronize() ); } } @@ -346,15 +353,19 @@ namespace tvl1flow } } - void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32, float taut, float gamma) + void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, + PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32, + float taut, float gamma, + cudaStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(u1.cols, block.x), divUp(u1.rows, block.y)); - estimateDualVariablesKernel<<>>(u1, u2, u3, p11, p12, p21, p22, p31, p32, taut, gamma); + estimateDualVariablesKernel<<>>(u1, u2, u3, p11, p12, p21, p22, p31, p32, taut, gamma); cudaSafeCall( cudaGetLastError() ); - cudaSafeCall( cudaDeviceSynchronize() ); + if (!stream) + cudaSafeCall( cudaDeviceSynchronize() ); } } diff --git a/modules/cudaoptflow/src/farneback.cpp b/modules/cudaoptflow/src/farneback.cpp index 6b74432632..b7fefeb191 100644 --- a/modules/cudaoptflow/src/farneback.cpp +++ b/modules/cudaoptflow/src/farneback.cpp @@ -42,23 +42,21 @@ #include "precomp.hpp" -#define MIN_SIZE 32 - -#define S(x) StreamAccessor::getStream(x) - -// CUDA resize() is fast, but it differs from the CPU analog. Disabling this flag -// leads to an inefficient code. It's for debug purposes only. -#define ENABLE_CUDA_RESIZE 1 - using namespace cv; using namespace cv::cuda; #if !defined HAVE_CUDA || defined(CUDA_DISABLER) -void cv::cuda::FarnebackOpticalFlow::operator ()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); } +Ptr cv::cuda::FarnebackOpticalFlow::create(int, double, bool, int, int, int, double, int) { throw_no_cuda(); return Ptr(); } #else +#define MIN_SIZE 32 + +// CUDA resize() is fast, but it differs from the CPU analog. Disabling this flag +// leads to an inefficient code. It's for debug purposes only. +#define ENABLE_CUDA_RESIZE 1 + namespace cv { namespace cuda { namespace device { namespace optflow_farneback { void setPolynomialExpansionConsts( @@ -76,8 +74,6 @@ namespace cv { namespace cuda { namespace device { namespace optflow_farneback void updateFlowGpu( const PtrStepSzf M, PtrStepSzf flowx, PtrStepSzf flowy, cudaStream_t stream); - /*void boxFilterGpu(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, cudaStream_t stream);*/ - void boxFilter5Gpu(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, cudaStream_t stream); void boxFilter5Gpu_CC11(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, cudaStream_t stream); @@ -93,296 +89,381 @@ namespace cv { namespace cuda { namespace device { namespace optflow_farneback void gaussianBlur5Gpu_CC11( const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, int borderType, cudaStream_t stream); -}}}} // namespace cv { namespace cuda { namespace cudev { namespace optflow_farneback +}}}} namespace { - GpuMat allocMatFromBuf(int rows, int cols, int type, GpuMat& mat) + class FarnebackOpticalFlowImpl : public FarnebackOpticalFlow { - if (!mat.empty() && mat.type() == type && mat.rows >= rows && mat.cols >= cols) - return mat(Rect(0, 0, cols, rows)); + public: + FarnebackOpticalFlowImpl(int numLevels, double pyrScale, bool fastPyramids, int winSize, + int numIters, int polyN, double polySigma, int flags) : + numLevels_(numLevels), pyrScale_(pyrScale), fastPyramids_(fastPyramids), winSize_(winSize), + numIters_(numIters), polyN_(polyN), polySigma_(polySigma), flags_(flags) + { + } - return mat = GpuMat(rows, cols, type); - } -} + virtual int getNumLevels() const { return numLevels_; } + virtual void setNumLevels(int numLevels) { numLevels_ = numLevels; } -void cv::cuda::FarnebackOpticalFlow::prepareGaussian( - int n, double sigma, float *g, float *xg, float *xxg, - double &ig11, double &ig03, double &ig33, double &ig55) -{ - double s = 0.; - for (int x = -n; x <= n; x++) - { - g[x] = (float)std::exp(-x*x/(2*sigma*sigma)); - s += g[x]; - } + virtual double getPyrScale() const { return pyrScale_; } + virtual void setPyrScale(double pyrScale) { pyrScale_ = pyrScale; } - s = 1./s; - for (int x = -n; x <= n; x++) - { - g[x] = (float)(g[x]*s); - xg[x] = (float)(x*g[x]); - xxg[x] = (float)(x*x*g[x]); - } + virtual bool getFastPyramids() const { return fastPyramids_; } + virtual void setFastPyramids(bool fastPyramids) { fastPyramids_ = fastPyramids; } - Mat_ G(6, 6); - G.setTo(0); + virtual int getWinSize() const { return winSize_; } + virtual void setWinSize(int winSize) { winSize_ = winSize; } - for (int y = -n; y <= n; y++) - { - for (int x = -n; x <= n; x++) - { - G(0,0) += g[y]*g[x]; - G(1,1) += g[y]*g[x]*x*x; - G(3,3) += g[y]*g[x]*x*x*x*x; - G(5,5) += g[y]*g[x]*x*x*y*y; - } - } + virtual int getNumIters() const { return numIters_; } + virtual void setNumIters(int numIters) { numIters_ = numIters; } - //G[0][0] = 1.; - G(2,2) = G(0,3) = G(0,4) = G(3,0) = G(4,0) = G(1,1); - G(4,4) = G(3,3); - G(3,4) = G(4,3) = G(5,5); - - // invG: - // [ x e e ] - // [ y ] - // [ y ] - // [ e z ] - // [ e z ] - // [ u ] - Mat_ invG = G.inv(DECOMP_CHOLESKY); - - ig11 = invG(1,1); - ig03 = invG(0,3); - ig33 = invG(3,3); - ig55 = invG(5,5); -} + virtual int getPolyN() const { return polyN_; } + virtual void setPolyN(int polyN) { polyN_ = polyN; } + virtual double getPolySigma() const { return polySigma_; } + virtual void setPolySigma(double polySigma) { polySigma_ = polySigma; } -void cv::cuda::FarnebackOpticalFlow::setPolynomialExpansionConsts(int n, double sigma) -{ - std::vector buf(n*6 + 3); - float* g = &buf[0] + n; - float* xg = g + n*2 + 1; - float* xxg = xg + n*2 + 1; + virtual int getFlags() const { return flags_; } + virtual void setFlags(int flags) { flags_ = flags; } - if (sigma < FLT_EPSILON) - sigma = n*0.3; + virtual void calc(InputArray I0, InputArray I1, InputOutputArray flow, Stream& stream); - double ig11, ig03, ig33, ig55; - prepareGaussian(n, sigma, g, xg, xxg, ig11, ig03, ig33, ig55); + private: + int numLevels_; + double pyrScale_; + bool fastPyramids_; + int winSize_; + int numIters_; + int polyN_; + double polySigma_; + int flags_; - device::optflow_farneback::setPolynomialExpansionConsts(n, g, xg, xxg, static_cast(ig11), static_cast(ig03), static_cast(ig33), static_cast(ig55)); -} + private: + void prepareGaussian( + int n, double sigma, float *g, float *xg, float *xxg, + double &ig11, double &ig03, double &ig33, double &ig55); + void setPolynomialExpansionConsts(int n, double sigma); -void cv::cuda::FarnebackOpticalFlow::updateFlow_boxFilter( - const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat &flowy, - GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]) -{ - if (deviceSupports(FEATURE_SET_COMPUTE_12)) - device::optflow_farneback::boxFilter5Gpu(M, blockSize/2, bufM, S(streams[0])); - else - device::optflow_farneback::boxFilter5Gpu_CC11(M, blockSize/2, bufM, S(streams[0])); - swap(M, bufM); - - for (int i = 1; i < 5; ++i) - streams[i].waitForCompletion(); - device::optflow_farneback::updateFlowGpu(M, flowx, flowy, S(streams[0])); - - if (updateMatrices) - device::optflow_farneback::updateMatricesGpu(flowx, flowy, R0, R1, M, S(streams[0])); -} + void updateFlow_boxFilter( + const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat &flowy, + GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]); + void updateFlow_gaussianBlur( + const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat& flowy, + GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]); -void cv::cuda::FarnebackOpticalFlow::updateFlow_gaussianBlur( - const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat& flowy, - GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]) -{ - if (deviceSupports(FEATURE_SET_COMPUTE_12)) - device::optflow_farneback::gaussianBlur5Gpu( - M, blockSize/2, bufM, BORDER_REPLICATE, S(streams[0])); - else - device::optflow_farneback::gaussianBlur5Gpu_CC11( - M, blockSize/2, bufM, BORDER_REPLICATE, S(streams[0])); - swap(M, bufM); - - device::optflow_farneback::updateFlowGpu(M, flowx, flowy, S(streams[0])); - - if (updateMatrices) - device::optflow_farneback::updateMatricesGpu(flowx, flowy, R0, R1, M, S(streams[0])); -} + void calcImpl(const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &stream); + GpuMat frames_[2]; + GpuMat pyrLevel_[2], M_, bufM_, R_[2], blurredFrame_[2]; + std::vector pyramid0_, pyramid1_; + }; -void cv::cuda::FarnebackOpticalFlow::operator ()( - const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &s) -{ - CV_Assert(frame0.channels() == 1 && frame1.channels() == 1); - CV_Assert(frame0.size() == frame1.size()); - CV_Assert(polyN == 5 || polyN == 7); - CV_Assert(!fastPyramids || std::abs(pyrScale - 0.5) < 1e-6); - - Stream streams[5]; - if (S(s)) - streams[0] = s; - - Size size = frame0.size(); - GpuMat prevFlowX, prevFlowY, curFlowX, curFlowY; - - flowx.create(size, CV_32F); - flowy.create(size, CV_32F); - GpuMat flowx0 = flowx; - GpuMat flowy0 = flowy; - - // Crop unnecessary levels - double scale = 1; - int numLevelsCropped = 0; - for (; numLevelsCropped < numLevels; numLevelsCropped++) + void FarnebackOpticalFlowImpl::calc(InputArray _frame0, InputArray _frame1, InputOutputArray _flow, Stream& stream) { - scale *= pyrScale; - if (size.width*scale < MIN_SIZE || size.height*scale < MIN_SIZE) - break; + const GpuMat frame0 = _frame0.getGpuMat(); + const GpuMat frame1 = _frame1.getGpuMat(); + + BufferPool pool(stream); + GpuMat flowx = pool.getBuffer(frame0.size(), CV_32FC1); + GpuMat flowy = pool.getBuffer(frame0.size(), CV_32FC1); + + calcImpl(frame0, frame1, flowx, flowy, stream); + + GpuMat flows[] = {flowx, flowy}; + cuda::merge(flows, 2, _flow, stream); } - frame0.convertTo(frames_[0], CV_32F, streams[0]); - frame1.convertTo(frames_[1], CV_32F, streams[1]); + GpuMat allocMatFromBuf(int rows, int cols, int type, GpuMat& mat) + { + if (!mat.empty() && mat.type() == type && mat.rows >= rows && mat.cols >= cols) + return mat(Rect(0, 0, cols, rows)); + + return mat = GpuMat(rows, cols, type); + } - if (fastPyramids) + void FarnebackOpticalFlowImpl::prepareGaussian( + int n, double sigma, float *g, float *xg, float *xxg, + double &ig11, double &ig03, double &ig33, double &ig55) { - // Build Gaussian pyramids using pyrDown() - pyramid0_.resize(numLevelsCropped + 1); - pyramid1_.resize(numLevelsCropped + 1); - pyramid0_[0] = frames_[0]; - pyramid1_[0] = frames_[1]; - for (int i = 1; i <= numLevelsCropped; ++i) + double s = 0.; + for (int x = -n; x <= n; x++) { - cuda::pyrDown(pyramid0_[i - 1], pyramid0_[i], streams[0]); - cuda::pyrDown(pyramid1_[i - 1], pyramid1_[i], streams[1]); + g[x] = (float)std::exp(-x*x/(2*sigma*sigma)); + s += g[x]; + } + + s = 1./s; + for (int x = -n; x <= n; x++) + { + g[x] = (float)(g[x]*s); + xg[x] = (float)(x*g[x]); + xxg[x] = (float)(x*x*g[x]); + } + + Mat_ G(6, 6); + G.setTo(0); + + for (int y = -n; y <= n; y++) + { + for (int x = -n; x <= n; x++) + { + G(0,0) += g[y]*g[x]; + G(1,1) += g[y]*g[x]*x*x; + G(3,3) += g[y]*g[x]*x*x*x*x; + G(5,5) += g[y]*g[x]*x*x*y*y; + } } + + //G[0][0] = 1.; + G(2,2) = G(0,3) = G(0,4) = G(3,0) = G(4,0) = G(1,1); + G(4,4) = G(3,3); + G(3,4) = G(4,3) = G(5,5); + + // invG: + // [ x e e ] + // [ y ] + // [ y ] + // [ e z ] + // [ e z ] + // [ u ] + Mat_ invG = G.inv(DECOMP_CHOLESKY); + + ig11 = invG(1,1); + ig03 = invG(0,3); + ig33 = invG(3,3); + ig55 = invG(5,5); } - setPolynomialExpansionConsts(polyN, polySigma); - device::optflow_farneback::setUpdateMatricesConsts(); + void FarnebackOpticalFlowImpl::setPolynomialExpansionConsts(int n, double sigma) + { + std::vector buf(n*6 + 3); + float* g = &buf[0] + n; + float* xg = g + n*2 + 1; + float* xxg = xg + n*2 + 1; + + if (sigma < FLT_EPSILON) + sigma = n*0.3; + + double ig11, ig03, ig33, ig55; + prepareGaussian(n, sigma, g, xg, xxg, ig11, ig03, ig33, ig55); - for (int k = numLevelsCropped; k >= 0; k--) + device::optflow_farneback::setPolynomialExpansionConsts(n, g, xg, xxg, static_cast(ig11), static_cast(ig03), static_cast(ig33), static_cast(ig55)); + } + + void FarnebackOpticalFlowImpl::updateFlow_boxFilter( + const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat &flowy, + GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]) { - streams[0].waitForCompletion(); + if (deviceSupports(FEATURE_SET_COMPUTE_12)) + device::optflow_farneback::boxFilter5Gpu(M, blockSize/2, bufM, StreamAccessor::getStream(streams[0])); + else + device::optflow_farneback::boxFilter5Gpu_CC11(M, blockSize/2, bufM, StreamAccessor::getStream(streams[0])); + swap(M, bufM); - scale = 1; - for (int i = 0; i < k; i++) - scale *= pyrScale; + for (int i = 1; i < 5; ++i) + streams[i].waitForCompletion(); + device::optflow_farneback::updateFlowGpu(M, flowx, flowy, StreamAccessor::getStream(streams[0])); - double sigma = (1./scale - 1) * 0.5; - int smoothSize = cvRound(sigma*5) | 1; - smoothSize = std::max(smoothSize, 3); + if (updateMatrices) + device::optflow_farneback::updateMatricesGpu(flowx, flowy, R0, R1, M, StreamAccessor::getStream(streams[0])); + } - int width = cvRound(size.width*scale); - int height = cvRound(size.height*scale); + void FarnebackOpticalFlowImpl::updateFlow_gaussianBlur( + const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat& flowy, + GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]) + { + if (deviceSupports(FEATURE_SET_COMPUTE_12)) + device::optflow_farneback::gaussianBlur5Gpu( + M, blockSize/2, bufM, BORDER_REPLICATE, StreamAccessor::getStream(streams[0])); + else + device::optflow_farneback::gaussianBlur5Gpu_CC11( + M, blockSize/2, bufM, BORDER_REPLICATE, StreamAccessor::getStream(streams[0])); + swap(M, bufM); - if (fastPyramids) - { - width = pyramid0_[k].cols; - height = pyramid0_[k].rows; - } + device::optflow_farneback::updateFlowGpu(M, flowx, flowy, StreamAccessor::getStream(streams[0])); - if (k > 0) + if (updateMatrices) + device::optflow_farneback::updateMatricesGpu(flowx, flowy, R0, R1, M, StreamAccessor::getStream(streams[0])); + } + + void FarnebackOpticalFlowImpl::calcImpl(const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &stream) + { + CV_Assert(frame0.channels() == 1 && frame1.channels() == 1); + CV_Assert(frame0.size() == frame1.size()); + CV_Assert(polyN_ == 5 || polyN_ == 7); + CV_Assert(!fastPyramids_ || std::abs(pyrScale_ - 0.5) < 1e-6); + + Stream streams[5]; + if (stream) + streams[0] = stream; + + Size size = frame0.size(); + GpuMat prevFlowX, prevFlowY, curFlowX, curFlowY; + + flowx.create(size, CV_32F); + flowy.create(size, CV_32F); + GpuMat flowx0 = flowx; + GpuMat flowy0 = flowy; + + // Crop unnecessary levels + double scale = 1; + int numLevelsCropped = 0; + for (; numLevelsCropped < numLevels_; numLevelsCropped++) { - curFlowX.create(height, width, CV_32F); - curFlowY.create(height, width, CV_32F); + scale *= pyrScale_; + if (size.width*scale < MIN_SIZE || size.height*scale < MIN_SIZE) + break; } - else + + frame0.convertTo(frames_[0], CV_32F, streams[0]); + frame1.convertTo(frames_[1], CV_32F, streams[1]); + + if (fastPyramids_) { - curFlowX = flowx0; - curFlowY = flowy0; + // Build Gaussian pyramids using pyrDown() + pyramid0_.resize(numLevelsCropped + 1); + pyramid1_.resize(numLevelsCropped + 1); + pyramid0_[0] = frames_[0]; + pyramid1_[0] = frames_[1]; + for (int i = 1; i <= numLevelsCropped; ++i) + { + cuda::pyrDown(pyramid0_[i - 1], pyramid0_[i], streams[0]); + cuda::pyrDown(pyramid1_[i - 1], pyramid1_[i], streams[1]); + } } - if (!prevFlowX.data) + setPolynomialExpansionConsts(polyN_, polySigma_); + device::optflow_farneback::setUpdateMatricesConsts(); + + for (int k = numLevelsCropped; k >= 0; k--) { - if (flags & OPTFLOW_USE_INITIAL_FLOW) + streams[0].waitForCompletion(); + + scale = 1; + for (int i = 0; i < k; i++) + scale *= pyrScale_; + + double sigma = (1./scale - 1) * 0.5; + int smoothSize = cvRound(sigma*5) | 1; + smoothSize = std::max(smoothSize, 3); + + int width = cvRound(size.width*scale); + int height = cvRound(size.height*scale); + + if (fastPyramids_) { - cuda::resize(flowx0, curFlowX, Size(width, height), 0, 0, INTER_LINEAR, streams[0]); - cuda::resize(flowy0, curFlowY, Size(width, height), 0, 0, INTER_LINEAR, streams[1]); - curFlowX.convertTo(curFlowX, curFlowX.depth(), scale, streams[0]); - curFlowY.convertTo(curFlowY, curFlowY.depth(), scale, streams[1]); + width = pyramid0_[k].cols; + height = pyramid0_[k].rows; + } + + if (k > 0) + { + curFlowX.create(height, width, CV_32F); + curFlowY.create(height, width, CV_32F); } else { - curFlowX.setTo(0, streams[0]); - curFlowY.setTo(0, streams[1]); + curFlowX = flowx0; + curFlowY = flowy0; } - } - else - { - cuda::resize(prevFlowX, curFlowX, Size(width, height), 0, 0, INTER_LINEAR, streams[0]); - cuda::resize(prevFlowY, curFlowY, Size(width, height), 0, 0, INTER_LINEAR, streams[1]); - curFlowX.convertTo(curFlowX, curFlowX.depth(), 1./pyrScale, streams[0]); - curFlowY.convertTo(curFlowY, curFlowY.depth(), 1./pyrScale, streams[1]); - } - GpuMat M = allocMatFromBuf(5*height, width, CV_32F, M_); - GpuMat bufM = allocMatFromBuf(5*height, width, CV_32F, bufM_); - GpuMat R[2] = - { - allocMatFromBuf(5*height, width, CV_32F, R_[0]), - allocMatFromBuf(5*height, width, CV_32F, R_[1]) - }; + if (!prevFlowX.data) + { + if (flags_ & OPTFLOW_USE_INITIAL_FLOW) + { + cuda::resize(flowx0, curFlowX, Size(width, height), 0, 0, INTER_LINEAR, streams[0]); + cuda::resize(flowy0, curFlowY, Size(width, height), 0, 0, INTER_LINEAR, streams[1]); + curFlowX.convertTo(curFlowX, curFlowX.depth(), scale, streams[0]); + curFlowY.convertTo(curFlowY, curFlowY.depth(), scale, streams[1]); + } + else + { + curFlowX.setTo(0, streams[0]); + curFlowY.setTo(0, streams[1]); + } + } + else + { + cuda::resize(prevFlowX, curFlowX, Size(width, height), 0, 0, INTER_LINEAR, streams[0]); + cuda::resize(prevFlowY, curFlowY, Size(width, height), 0, 0, INTER_LINEAR, streams[1]); + curFlowX.convertTo(curFlowX, curFlowX.depth(), 1./pyrScale_, streams[0]); + curFlowY.convertTo(curFlowY, curFlowY.depth(), 1./pyrScale_, streams[1]); + } - if (fastPyramids) - { - device::optflow_farneback::polynomialExpansionGpu(pyramid0_[k], polyN, R[0], S(streams[0])); - device::optflow_farneback::polynomialExpansionGpu(pyramid1_[k], polyN, R[1], S(streams[1])); - } - else - { - GpuMat blurredFrame[2] = + GpuMat M = allocMatFromBuf(5*height, width, CV_32F, M_); + GpuMat bufM = allocMatFromBuf(5*height, width, CV_32F, bufM_); + GpuMat R[2] = { - allocMatFromBuf(size.height, size.width, CV_32F, blurredFrame_[0]), - allocMatFromBuf(size.height, size.width, CV_32F, blurredFrame_[1]) + allocMatFromBuf(5*height, width, CV_32F, R_[0]), + allocMatFromBuf(5*height, width, CV_32F, R_[1]) }; - GpuMat pyrLevel[2] = + + if (fastPyramids_) { - allocMatFromBuf(height, width, CV_32F, pyrLevel_[0]), - allocMatFromBuf(height, width, CV_32F, pyrLevel_[1]) - }; + device::optflow_farneback::polynomialExpansionGpu(pyramid0_[k], polyN_, R[0], StreamAccessor::getStream(streams[0])); + device::optflow_farneback::polynomialExpansionGpu(pyramid1_[k], polyN_, R[1], StreamAccessor::getStream(streams[1])); + } + else + { + GpuMat blurredFrame[2] = + { + allocMatFromBuf(size.height, size.width, CV_32F, blurredFrame_[0]), + allocMatFromBuf(size.height, size.width, CV_32F, blurredFrame_[1]) + }; + GpuMat pyrLevel[2] = + { + allocMatFromBuf(height, width, CV_32F, pyrLevel_[0]), + allocMatFromBuf(height, width, CV_32F, pyrLevel_[1]) + }; + + Mat g = getGaussianKernel(smoothSize, sigma, CV_32F); + device::optflow_farneback::setGaussianBlurKernel(g.ptr(smoothSize/2), smoothSize/2); + + for (int i = 0; i < 2; i++) + { + device::optflow_farneback::gaussianBlurGpu( + frames_[i], smoothSize/2, blurredFrame[i], BORDER_REFLECT101, StreamAccessor::getStream(streams[i])); + cuda::resize(blurredFrame[i], pyrLevel[i], Size(width, height), 0.0, 0.0, INTER_LINEAR, streams[i]); + device::optflow_farneback::polynomialExpansionGpu(pyrLevel[i], polyN_, R[i], StreamAccessor::getStream(streams[i])); + } + } - Mat g = getGaussianKernel(smoothSize, sigma, CV_32F); - device::optflow_farneback::setGaussianBlurKernel(g.ptr(smoothSize/2), smoothSize/2); + streams[1].waitForCompletion(); + device::optflow_farneback::updateMatricesGpu(curFlowX, curFlowY, R[0], R[1], M, StreamAccessor::getStream(streams[0])); - for (int i = 0; i < 2; i++) + if (flags_ & OPTFLOW_FARNEBACK_GAUSSIAN) { - device::optflow_farneback::gaussianBlurGpu( - frames_[i], smoothSize/2, blurredFrame[i], BORDER_REFLECT101, S(streams[i])); - cuda::resize(blurredFrame[i], pyrLevel[i], Size(width, height), 0.0, 0.0, INTER_LINEAR, streams[i]); - device::optflow_farneback::polynomialExpansionGpu(pyrLevel[i], polyN, R[i], S(streams[i])); + Mat g = getGaussianKernel(winSize_, winSize_/2*0.3f, CV_32F); + device::optflow_farneback::setGaussianBlurKernel(g.ptr(winSize_/2), winSize_/2); + } + for (int i = 0; i < numIters_; i++) + { + if (flags_ & OPTFLOW_FARNEBACK_GAUSSIAN) + updateFlow_gaussianBlur(R[0], R[1], curFlowX, curFlowY, M, bufM, winSize_, i < numIters_-1, streams); + else + updateFlow_boxFilter(R[0], R[1], curFlowX, curFlowY, M, bufM, winSize_, i < numIters_-1, streams); } - } - - streams[1].waitForCompletion(); - device::optflow_farneback::updateMatricesGpu(curFlowX, curFlowY, R[0], R[1], M, S(streams[0])); - if (flags & OPTFLOW_FARNEBACK_GAUSSIAN) - { - Mat g = getGaussianKernel(winSize, winSize/2*0.3f, CV_32F); - device::optflow_farneback::setGaussianBlurKernel(g.ptr(winSize/2), winSize/2); - } - for (int i = 0; i < numIters; i++) - { - if (flags & OPTFLOW_FARNEBACK_GAUSSIAN) - updateFlow_gaussianBlur(R[0], R[1], curFlowX, curFlowY, M, bufM, winSize, i < numIters-1, streams); - else - updateFlow_boxFilter(R[0], R[1], curFlowX, curFlowY, M, bufM, winSize, i < numIters-1, streams); + prevFlowX = curFlowX; + prevFlowY = curFlowY; } - prevFlowX = curFlowX; - prevFlowY = curFlowY; - } + flowx = curFlowX; + flowy = curFlowY; - flowx = curFlowX; - flowy = curFlowY; + if (!stream) + streams[0].waitForCompletion(); + } +} - if (!S(s)) - streams[0].waitForCompletion(); +Ptr cv::cuda::FarnebackOpticalFlow::create(int numLevels, double pyrScale, bool fastPyramids, int winSize, + int numIters, int polyN, double polySigma, int flags) +{ + return makePtr(numLevels, pyrScale, fastPyramids, winSize, + numIters, polyN, polySigma, flags); } #endif diff --git a/modules/cudaoptflow/src/pyrlk.cpp b/modules/cudaoptflow/src/pyrlk.cpp index 52ee91f2fe..f4182743c0 100644 --- a/modules/cudaoptflow/src/pyrlk.cpp +++ b/modules/cudaoptflow/src/pyrlk.cpp @@ -47,37 +47,54 @@ using namespace cv::cuda; #if !defined (HAVE_CUDA) || defined (CUDA_DISABLER) -cv::cuda::PyrLKOpticalFlow::PyrLKOpticalFlow() { throw_no_cuda(); } -void cv::cuda::PyrLKOpticalFlow::sparse(const GpuMat&, const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat*) { throw_no_cuda(); } -void cv::cuda::PyrLKOpticalFlow::dense(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat*) { throw_no_cuda(); } -void cv::cuda::PyrLKOpticalFlow::releaseMemory() {} +Ptr cv::cuda::SparsePyrLKOpticalFlow::create(Size, int, int, bool) { throw_no_cuda(); return Ptr(); } + +Ptr cv::cuda::DensePyrLKOpticalFlow::create(Size, int, int, bool) { throw_no_cuda(); return Ptr(); } #else /* !defined (HAVE_CUDA) */ namespace pyrlk { - void loadConstants(int2 winSize, int iters); + void loadConstants(int2 winSize, int iters, cudaStream_t stream); void sparse1(PtrStepSzf I, PtrStepSzf J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, - int level, dim3 block, dim3 patch, cudaStream_t stream = 0); + int level, dim3 block, dim3 patch, cudaStream_t stream); void sparse4(PtrStepSz I, PtrStepSz J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, - int level, dim3 block, dim3 patch, cudaStream_t stream = 0); + int level, dim3 block, dim3 patch, cudaStream_t stream); void dense(PtrStepSzb I, PtrStepSzf J, PtrStepSzf u, PtrStepSzf v, PtrStepSzf prevU, PtrStepSzf prevV, - PtrStepSzf err, int2 winSize, cudaStream_t stream = 0); -} - -cv::cuda::PyrLKOpticalFlow::PyrLKOpticalFlow() -{ - winSize = Size(21, 21); - maxLevel = 3; - iters = 30; - useInitialFlow = false; + PtrStepSzf err, int2 winSize, cudaStream_t stream); } namespace { - void calcPatchSize(cv::Size winSize, dim3& block, dim3& patch) + class PyrLKOpticalFlowBase + { + public: + PyrLKOpticalFlowBase(Size winSize, int maxLevel, int iters, bool useInitialFlow); + + void sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts, + GpuMat& status, GpuMat* err, Stream& stream); + + void dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, Stream& stream); + + protected: + Size winSize_; + int maxLevel_; + int iters_; + bool useInitialFlow_; + + private: + std::vector prevPyr_; + std::vector nextPyr_; + }; + + PyrLKOpticalFlowBase::PyrLKOpticalFlowBase(Size winSize, int maxLevel, int iters, bool useInitialFlow) : + winSize_(winSize), maxLevel_(maxLevel), iters_(iters), useInitialFlow_(useInitialFlow) + { + } + + void calcPatchSize(Size winSize, dim3& block, dim3& patch) { if (winSize.width > 32 && winSize.width > 2 * winSize.height) { @@ -95,156 +112,239 @@ namespace block.z = patch.z = 1; } -} -void cv::cuda::PyrLKOpticalFlow::sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts, GpuMat& status, GpuMat* err) -{ - if (prevPts.empty()) + void PyrLKOpticalFlowBase::sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts, GpuMat& status, GpuMat* err, Stream& stream) { - nextPts.release(); - status.release(); - if (err) err->release(); - return; - } + if (prevPts.empty()) + { + nextPts.release(); + status.release(); + if (err) err->release(); + return; + } - dim3 block, patch; - calcPatchSize(winSize, block, patch); + dim3 block, patch; + calcPatchSize(winSize_, block, patch); - CV_Assert(prevImg.channels() == 1 || prevImg.channels() == 3 || prevImg.channels() == 4); - CV_Assert(prevImg.size() == nextImg.size() && prevImg.type() == nextImg.type()); - CV_Assert(maxLevel >= 0); - CV_Assert(winSize.width > 2 && winSize.height > 2); - CV_Assert(patch.x > 0 && patch.x < 6 && patch.y > 0 && patch.y < 6); - CV_Assert(prevPts.rows == 1 && prevPts.type() == CV_32FC2); + CV_Assert( prevImg.channels() == 1 || prevImg.channels() == 3 || prevImg.channels() == 4 ); + CV_Assert( prevImg.size() == nextImg.size() && prevImg.type() == nextImg.type() ); + CV_Assert( maxLevel_ >= 0 ); + CV_Assert( winSize_.width > 2 && winSize_.height > 2 ); + CV_Assert( patch.x > 0 && patch.x < 6 && patch.y > 0 && patch.y < 6 ); + CV_Assert( prevPts.rows == 1 && prevPts.type() == CV_32FC2 ); - if (useInitialFlow) - CV_Assert(nextPts.size() == prevPts.size() && nextPts.type() == CV_32FC2); - else - ensureSizeIsEnough(1, prevPts.cols, prevPts.type(), nextPts); + if (useInitialFlow_) + CV_Assert( nextPts.size() == prevPts.size() && nextPts.type() == prevPts.type() ); + else + ensureSizeIsEnough(1, prevPts.cols, prevPts.type(), nextPts); - GpuMat temp1 = (useInitialFlow ? nextPts : prevPts).reshape(1); - GpuMat temp2 = nextPts.reshape(1); - cuda::multiply(temp1, Scalar::all(1.0 / (1 << maxLevel) / 2.0), temp2); + GpuMat temp1 = (useInitialFlow_ ? nextPts : prevPts).reshape(1); + GpuMat temp2 = nextPts.reshape(1); + cuda::multiply(temp1, Scalar::all(1.0 / (1 << maxLevel_) / 2.0), temp2, 1, -1, stream); - ensureSizeIsEnough(1, prevPts.cols, CV_8UC1, status); - status.setTo(Scalar::all(1)); + ensureSizeIsEnough(1, prevPts.cols, CV_8UC1, status); + status.setTo(Scalar::all(1), stream); - if (err) - ensureSizeIsEnough(1, prevPts.cols, CV_32FC1, *err); + if (err) + ensureSizeIsEnough(1, prevPts.cols, CV_32FC1, *err); - // build the image pyramids. + // build the image pyramids. - prevPyr_.resize(maxLevel + 1); - nextPyr_.resize(maxLevel + 1); + BufferPool pool(stream); - int cn = prevImg.channels(); + prevPyr_.resize(maxLevel_ + 1); + nextPyr_.resize(maxLevel_ + 1); - if (cn == 1 || cn == 4) - { - prevImg.convertTo(prevPyr_[0], CV_32F); - nextImg.convertTo(nextPyr_[0], CV_32F); - } - else - { - cuda::cvtColor(prevImg, buf_, COLOR_BGR2BGRA); - buf_.convertTo(prevPyr_[0], CV_32F); + int cn = prevImg.channels(); + + if (cn == 1 || cn == 4) + { + prevImg.convertTo(prevPyr_[0], CV_32F, stream); + nextImg.convertTo(nextPyr_[0], CV_32F, stream); + } + else + { + GpuMat buf = pool.getBuffer(prevImg.size(), CV_MAKE_TYPE(prevImg.depth(), 4)); - cuda::cvtColor(nextImg, buf_, COLOR_BGR2BGRA); - buf_.convertTo(nextPyr_[0], CV_32F); + cuda::cvtColor(prevImg, buf, COLOR_BGR2BGRA, 0, stream); + buf.convertTo(prevPyr_[0], CV_32F, stream); + + cuda::cvtColor(nextImg, buf, COLOR_BGR2BGRA, 0, stream); + buf.convertTo(nextPyr_[0], CV_32F, stream); + } + + for (int level = 1; level <= maxLevel_; ++level) + { + cuda::pyrDown(prevPyr_[level - 1], prevPyr_[level], stream); + cuda::pyrDown(nextPyr_[level - 1], nextPyr_[level], stream); + } + + pyrlk::loadConstants(make_int2(winSize_.width, winSize_.height), iters_, StreamAccessor::getStream(stream)); + + for (int level = maxLevel_; level >= 0; level--) + { + if (cn == 1) + { + pyrlk::sparse1(prevPyr_[level], nextPyr_[level], + prevPts.ptr(), nextPts.ptr(), + status.ptr(), + level == 0 && err ? err->ptr() : 0, prevPts.cols, + level, block, patch, + StreamAccessor::getStream(stream)); + } + else + { + pyrlk::sparse4(prevPyr_[level], nextPyr_[level], + prevPts.ptr(), nextPts.ptr(), + status.ptr(), + level == 0 && err ? err->ptr() : 0, prevPts.cols, + level, block, patch, + StreamAccessor::getStream(stream)); + } + } } - for (int level = 1; level <= maxLevel; ++level) + void PyrLKOpticalFlowBase::dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, Stream& stream) { - cuda::pyrDown(prevPyr_[level - 1], prevPyr_[level]); - cuda::pyrDown(nextPyr_[level - 1], nextPyr_[level]); - } + CV_Assert( prevImg.type() == CV_8UC1 ); + CV_Assert( prevImg.size() == nextImg.size() && prevImg.type() == nextImg.type() ); + CV_Assert( maxLevel_ >= 0 ); + CV_Assert( winSize_.width > 2 && winSize_.height > 2 ); - pyrlk::loadConstants(make_int2(winSize.width, winSize.height), iters); + // build the image pyramids. - for (int level = maxLevel; level >= 0; level--) - { - if (cn == 1) + prevPyr_.resize(maxLevel_ + 1); + nextPyr_.resize(maxLevel_ + 1); + + prevPyr_[0] = prevImg; + nextImg.convertTo(nextPyr_[0], CV_32F, stream); + + for (int level = 1; level <= maxLevel_; ++level) { - pyrlk::sparse1(prevPyr_[level], nextPyr_[level], - prevPts.ptr(), nextPts.ptr(), status.ptr(), level == 0 && err ? err->ptr() : 0, prevPts.cols, - level, block, patch); + cuda::pyrDown(prevPyr_[level - 1], prevPyr_[level], stream); + cuda::pyrDown(nextPyr_[level - 1], nextPyr_[level], stream); } - else + + BufferPool pool(stream); + + GpuMat uPyr[] = { + pool.getBuffer(prevImg.size(), CV_32FC1), + pool.getBuffer(prevImg.size(), CV_32FC1), + }; + GpuMat vPyr[] = { + pool.getBuffer(prevImg.size(), CV_32FC1), + pool.getBuffer(prevImg.size(), CV_32FC1), + }; + + uPyr[0].setTo(Scalar::all(0), stream); + vPyr[0].setTo(Scalar::all(0), stream); + uPyr[1].setTo(Scalar::all(0), stream); + vPyr[1].setTo(Scalar::all(0), stream); + + int2 winSize2i = make_int2(winSize_.width, winSize_.height); + pyrlk::loadConstants(winSize2i, iters_, StreamAccessor::getStream(stream)); + + int idx = 0; + + for (int level = maxLevel_; level >= 0; level--) { - pyrlk::sparse4(prevPyr_[level], nextPyr_[level], - prevPts.ptr(), nextPts.ptr(), status.ptr(), level == 0 && err ? err->ptr() : 0, prevPts.cols, - level, block, patch); + int idx2 = (idx + 1) & 1; + + pyrlk::dense(prevPyr_[level], nextPyr_[level], + uPyr[idx], vPyr[idx], uPyr[idx2], vPyr[idx2], + PtrStepSzf(), winSize2i, + StreamAccessor::getStream(stream)); + + if (level > 0) + idx = idx2; } + + uPyr[idx].copyTo(u, stream); + vPyr[idx].copyTo(v, stream); } -} -void cv::cuda::PyrLKOpticalFlow::dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, GpuMat* err) -{ - CV_Assert(prevImg.type() == CV_8UC1); - CV_Assert(prevImg.size() == nextImg.size() && prevImg.type() == nextImg.type()); - CV_Assert(maxLevel >= 0); - CV_Assert(winSize.width > 2 && winSize.height > 2); + class SparsePyrLKOpticalFlowImpl : public SparsePyrLKOpticalFlow, private PyrLKOpticalFlowBase + { + public: + SparsePyrLKOpticalFlowImpl(Size winSize, int maxLevel, int iters, bool useInitialFlow) : + PyrLKOpticalFlowBase(winSize, maxLevel, iters, useInitialFlow) + { + } - if (err) - err->create(prevImg.size(), CV_32FC1); + virtual Size getWinSize() const { return winSize_; } + virtual void setWinSize(Size winSize) { winSize_ = winSize; } - // build the image pyramids. + virtual int getMaxLevel() const { return maxLevel_; } + virtual void setMaxLevel(int maxLevel) { maxLevel_ = maxLevel; } - prevPyr_.resize(maxLevel + 1); - nextPyr_.resize(maxLevel + 1); + virtual int getNumIters() const { return iters_; } + virtual void setNumIters(int iters) { iters_ = iters; } - prevPyr_[0] = prevImg; - nextImg.convertTo(nextPyr_[0], CV_32F); + virtual bool getUseInitialFlow() const { return useInitialFlow_; } + virtual void setUseInitialFlow(bool useInitialFlow) { useInitialFlow_ = useInitialFlow; } - for (int level = 1; level <= maxLevel; ++level) + virtual void calc(InputArray _prevImg, InputArray _nextImg, + InputArray _prevPts, InputOutputArray _nextPts, + OutputArray _status, + OutputArray _err, + Stream& stream) + { + const GpuMat prevImg = _prevImg.getGpuMat(); + const GpuMat nextImg = _nextImg.getGpuMat(); + const GpuMat prevPts = _prevPts.getGpuMat(); + GpuMat& nextPts = _nextPts.getGpuMatRef(); + GpuMat& status = _status.getGpuMatRef(); + GpuMat* err = _err.needed() ? &(_err.getGpuMatRef()) : NULL; + + sparse(prevImg, nextImg, prevPts, nextPts, status, err, stream); + } + }; + + class DensePyrLKOpticalFlowImpl : public DensePyrLKOpticalFlow, private PyrLKOpticalFlowBase { - cuda::pyrDown(prevPyr_[level - 1], prevPyr_[level]); - cuda::pyrDown(nextPyr_[level - 1], nextPyr_[level]); - } + public: + DensePyrLKOpticalFlowImpl(Size winSize, int maxLevel, int iters, bool useInitialFlow) : + PyrLKOpticalFlowBase(winSize, maxLevel, iters, useInitialFlow) + { + } - ensureSizeIsEnough(prevImg.size(), CV_32FC1, uPyr_[0]); - ensureSizeIsEnough(prevImg.size(), CV_32FC1, vPyr_[0]); - ensureSizeIsEnough(prevImg.size(), CV_32FC1, uPyr_[1]); - ensureSizeIsEnough(prevImg.size(), CV_32FC1, vPyr_[1]); - uPyr_[0].setTo(Scalar::all(0)); - vPyr_[0].setTo(Scalar::all(0)); - uPyr_[1].setTo(Scalar::all(0)); - vPyr_[1].setTo(Scalar::all(0)); + virtual Size getWinSize() const { return winSize_; } + virtual void setWinSize(Size winSize) { winSize_ = winSize; } - int2 winSize2i = make_int2(winSize.width, winSize.height); - pyrlk::loadConstants(winSize2i, iters); + virtual int getMaxLevel() const { return maxLevel_; } + virtual void setMaxLevel(int maxLevel) { maxLevel_ = maxLevel; } - PtrStepSzf derr = err ? *err : PtrStepSzf(); + virtual int getNumIters() const { return iters_; } + virtual void setNumIters(int iters) { iters_ = iters; } - int idx = 0; + virtual bool getUseInitialFlow() const { return useInitialFlow_; } + virtual void setUseInitialFlow(bool useInitialFlow) { useInitialFlow_ = useInitialFlow; } - for (int level = maxLevel; level >= 0; level--) - { - int idx2 = (idx + 1) & 1; + virtual void calc(InputArray _prevImg, InputArray _nextImg, InputOutputArray _flow, Stream& stream) + { + const GpuMat prevImg = _prevImg.getGpuMat(); + const GpuMat nextImg = _nextImg.getGpuMat(); - pyrlk::dense(prevPyr_[level], nextPyr_[level], uPyr_[idx], vPyr_[idx], uPyr_[idx2], vPyr_[idx2], - level == 0 ? derr : PtrStepSzf(), winSize2i); + BufferPool pool(stream); + GpuMat u = pool.getBuffer(prevImg.size(), CV_32FC1); + GpuMat v = pool.getBuffer(prevImg.size(), CV_32FC1); - if (level > 0) - idx = idx2; - } + dense(prevImg, nextImg, u, v, stream); - uPyr_[idx].copyTo(u); - vPyr_[idx].copyTo(v); + GpuMat flows[] = {u, v}; + cuda::merge(flows, 2, _flow, stream); + } + }; } -void cv::cuda::PyrLKOpticalFlow::releaseMemory() +Ptr cv::cuda::SparsePyrLKOpticalFlow::create(Size winSize, int maxLevel, int iters, bool useInitialFlow) { - prevPyr_.clear(); - nextPyr_.clear(); - - buf_.release(); - - uPyr_[0].release(); - vPyr_[0].release(); + return makePtr(winSize, maxLevel, iters, useInitialFlow); +} - uPyr_[1].release(); - vPyr_[1].release(); +Ptr cv::cuda::DensePyrLKOpticalFlow::create(Size winSize, int maxLevel, int iters, bool useInitialFlow) +{ + return makePtr(winSize, maxLevel, iters, useInitialFlow); } #endif /* !defined (HAVE_CUDA) */ diff --git a/modules/cudaoptflow/src/tvl1flow.cpp b/modules/cudaoptflow/src/tvl1flow.cpp index d0efffa57f..e2ef07b0d1 100644 --- a/modules/cudaoptflow/src/tvl1flow.cpp +++ b/modules/cudaoptflow/src/tvl1flow.cpp @@ -44,257 +44,338 @@ #if !defined HAVE_CUDA || defined(CUDA_DISABLER) -cv::cuda::OpticalFlowDual_TVL1_CUDA::OpticalFlowDual_TVL1_CUDA() { throw_no_cuda(); } -void cv::cuda::OpticalFlowDual_TVL1_CUDA::operator ()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&) { throw_no_cuda(); } -void cv::cuda::OpticalFlowDual_TVL1_CUDA::collectGarbage() {} -void cv::cuda::OpticalFlowDual_TVL1_CUDA::procOneScale(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&) { throw_no_cuda(); } +Ptr cv::cuda::OpticalFlowDual_TVL1::create(double, double, double, int, int, double, int, double, double, bool) { throw_no_cuda(); return Ptr(); } #else using namespace cv; using namespace cv::cuda; -cv::cuda::OpticalFlowDual_TVL1_CUDA::OpticalFlowDual_TVL1_CUDA() +namespace tvl1flow { - tau = 0.25; - lambda = 0.15; - theta = 0.3; - nscales = 5; - warps = 5; - epsilon = 0.01; - iterations = 300; - scaleStep = 0.8; - gamma = 0.0; - useInitialFlow = false; + void centeredGradient(PtrStepSzf src, PtrStepSzf dx, PtrStepSzf dy, cudaStream_t stream); + void warpBackward(PtrStepSzf I0, PtrStepSzf I1, PtrStepSzf I1x, PtrStepSzf I1y, + PtrStepSzf u1, PtrStepSzf u2, + PtrStepSzf I1w, PtrStepSzf I1wx, PtrStepSzf I1wy, + PtrStepSzf grad, PtrStepSzf rho, + cudaStream_t stream); + void estimateU(PtrStepSzf I1wx, PtrStepSzf I1wy, + PtrStepSzf grad, PtrStepSzf rho_c, + PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32, + PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, PtrStepSzf error, + float l_t, float theta, float gamma, bool calcError, + cudaStream_t stream); + void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, + PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32, + float taut, float gamma, + cudaStream_t stream); } -void cv::cuda::OpticalFlowDual_TVL1_CUDA::operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy) +namespace { - CV_Assert( I0.type() == CV_8UC1 || I0.type() == CV_32FC1 ); - CV_Assert( I0.size() == I1.size() ); - CV_Assert( I0.type() == I1.type() ); - CV_Assert( !useInitialFlow || (flowx.size() == I0.size() && flowx.type() == CV_32FC1 && flowy.size() == flowx.size() && flowy.type() == flowx.type()) ); - CV_Assert( nscales > 0 ); - - // allocate memory for the pyramid structure - I0s.resize(nscales); - I1s.resize(nscales); - u1s.resize(nscales); - u2s.resize(nscales); - u3s.resize(nscales); - - I0.convertTo(I0s[0], CV_32F, I0.depth() == CV_8U ? 1.0 : 255.0); - I1.convertTo(I1s[0], CV_32F, I1.depth() == CV_8U ? 1.0 : 255.0); - - if (!useInitialFlow) + class OpticalFlowDual_TVL1_Impl : public OpticalFlowDual_TVL1 { - flowx.create(I0.size(), CV_32FC1); - flowy.create(I0.size(), CV_32FC1); - } + public: + OpticalFlowDual_TVL1_Impl(double tau, double lambda, double theta, int nscales, int warps, double epsilon, + int iterations, double scaleStep, double gamma, bool useInitialFlow) : + tau_(tau), lambda_(lambda), gamma_(gamma), theta_(theta), nscales_(nscales), warps_(warps), + epsilon_(epsilon), iterations_(iterations), scaleStep_(scaleStep), useInitialFlow_(useInitialFlow) + { + } + + virtual double getTau() const { return tau_; } + virtual void setTau(double tau) { tau_ = tau; } + + virtual double getLambda() const { return lambda_; } + virtual void setLambda(double lambda) { lambda_ = lambda; } + + virtual double getGamma() const { return gamma_; } + virtual void setGamma(double gamma) { gamma_ = gamma; } + + virtual double getTheta() const { return theta_; } + virtual void setTheta(double theta) { theta_ = theta; } + + virtual int getNumScales() const { return nscales_; } + virtual void setNumScales(int nscales) { nscales_ = nscales; } + + virtual int getNumWarps() const { return warps_; } + virtual void setNumWarps(int warps) { warps_ = warps; } + + virtual double getEpsilon() const { return epsilon_; } + virtual void setEpsilon(double epsilon) { epsilon_ = epsilon; } + + virtual int getNumIterations() const { return iterations_; } + virtual void setNumIterations(int iterations) { iterations_ = iterations; } + + virtual double getScaleStep() const { return scaleStep_; } + virtual void setScaleStep(double scaleStep) { scaleStep_ = scaleStep; } + + virtual bool getUseInitialFlow() const { return useInitialFlow_; } + virtual void setUseInitialFlow(bool useInitialFlow) { useInitialFlow_ = useInitialFlow; } + + virtual void calc(InputArray I0, InputArray I1, InputOutputArray flow, Stream& stream); + + private: + double tau_; + double lambda_; + double gamma_; + double theta_; + int nscales_; + int warps_; + double epsilon_; + int iterations_; + double scaleStep_; + bool useInitialFlow_; + + private: + void calcImpl(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy, Stream& stream); + void procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2, GpuMat& u3, Stream& stream); - u1s[0] = flowx; - u2s[0] = flowy; - if (gamma) - u3s[0].create(I0.size(), CV_32FC1); + std::vector I0s; + std::vector I1s; + std::vector u1s; + std::vector u2s; + std::vector u3s; - I1x_buf.create(I0.size(), CV_32FC1); - I1y_buf.create(I0.size(), CV_32FC1); + GpuMat I1x_buf; + GpuMat I1y_buf; - I1w_buf.create(I0.size(), CV_32FC1); - I1wx_buf.create(I0.size(), CV_32FC1); - I1wy_buf.create(I0.size(), CV_32FC1); + GpuMat I1w_buf; + GpuMat I1wx_buf; + GpuMat I1wy_buf; - grad_buf.create(I0.size(), CV_32FC1); - rho_c_buf.create(I0.size(), CV_32FC1); + GpuMat grad_buf; + GpuMat rho_c_buf; - p11_buf.create(I0.size(), CV_32FC1); - p12_buf.create(I0.size(), CV_32FC1); - p21_buf.create(I0.size(), CV_32FC1); - p22_buf.create(I0.size(), CV_32FC1); - if (gamma) + GpuMat p11_buf; + GpuMat p12_buf; + GpuMat p21_buf; + GpuMat p22_buf; + GpuMat p31_buf; + GpuMat p32_buf; + + GpuMat diff_buf; + GpuMat norm_buf; + }; + + void OpticalFlowDual_TVL1_Impl::calc(InputArray _frame0, InputArray _frame1, InputOutputArray _flow, Stream& stream) { - p31_buf.create(I0.size(), CV_32FC1); - p32_buf.create(I0.size(), CV_32FC1); + const GpuMat frame0 = _frame0.getGpuMat(); + const GpuMat frame1 = _frame1.getGpuMat(); + + BufferPool pool(stream); + GpuMat flowx = pool.getBuffer(frame0.size(), CV_32FC1); + GpuMat flowy = pool.getBuffer(frame0.size(), CV_32FC1); + + calcImpl(frame0, frame1, flowx, flowy, stream); + + GpuMat flows[] = {flowx, flowy}; + cuda::merge(flows, 2, _flow, stream); } - diff_buf.create(I0.size(), CV_32FC1); - // create the scales - for (int s = 1; s < nscales; ++s) + void OpticalFlowDual_TVL1_Impl::calcImpl(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy, Stream& stream) { - cuda::resize(I0s[s-1], I0s[s], Size(), scaleStep, scaleStep); - cuda::resize(I1s[s-1], I1s[s], Size(), scaleStep, scaleStep); - - if (I0s[s].cols < 16 || I0s[s].rows < 16) + CV_Assert( I0.type() == CV_8UC1 || I0.type() == CV_32FC1 ); + CV_Assert( I0.size() == I1.size() ); + CV_Assert( I0.type() == I1.type() ); + CV_Assert( !useInitialFlow_ || (flowx.size() == I0.size() && flowx.type() == CV_32FC1 && flowy.size() == flowx.size() && flowy.type() == flowx.type()) ); + CV_Assert( nscales_ > 0 ); + + // allocate memory for the pyramid structure + I0s.resize(nscales_); + I1s.resize(nscales_); + u1s.resize(nscales_); + u2s.resize(nscales_); + u3s.resize(nscales_); + + I0.convertTo(I0s[0], CV_32F, I0.depth() == CV_8U ? 1.0 : 255.0, stream); + I1.convertTo(I1s[0], CV_32F, I1.depth() == CV_8U ? 1.0 : 255.0, stream); + + if (!useInitialFlow_) { - nscales = s; - break; + flowx.create(I0.size(), CV_32FC1); + flowy.create(I0.size(), CV_32FC1); } - if (useInitialFlow) + u1s[0] = flowx; + u2s[0] = flowy; + if (gamma_) { - cuda::resize(u1s[s-1], u1s[s], Size(), scaleStep, scaleStep); - cuda::resize(u2s[s-1], u2s[s], Size(), scaleStep, scaleStep); + u3s[0].create(I0.size(), CV_32FC1); + } + + I1x_buf.create(I0.size(), CV_32FC1); + I1y_buf.create(I0.size(), CV_32FC1); + + I1w_buf.create(I0.size(), CV_32FC1); + I1wx_buf.create(I0.size(), CV_32FC1); + I1wy_buf.create(I0.size(), CV_32FC1); - cuda::multiply(u1s[s], Scalar::all(scaleStep), u1s[s]); - cuda::multiply(u2s[s], Scalar::all(scaleStep), u2s[s]); + grad_buf.create(I0.size(), CV_32FC1); + rho_c_buf.create(I0.size(), CV_32FC1); + + p11_buf.create(I0.size(), CV_32FC1); + p12_buf.create(I0.size(), CV_32FC1); + p21_buf.create(I0.size(), CV_32FC1); + p22_buf.create(I0.size(), CV_32FC1); + if (gamma_) + { + p31_buf.create(I0.size(), CV_32FC1); + p32_buf.create(I0.size(), CV_32FC1); } - else + diff_buf.create(I0.size(), CV_32FC1); + + // create the scales + for (int s = 1; s < nscales_; ++s) { - u1s[s].create(I0s[s].size(), CV_32FC1); - u2s[s].create(I0s[s].size(), CV_32FC1); + cuda::resize(I0s[s-1], I0s[s], Size(), scaleStep_, scaleStep_, INTER_LINEAR, stream); + cuda::resize(I1s[s-1], I1s[s], Size(), scaleStep_, scaleStep_, INTER_LINEAR, stream); + + if (I0s[s].cols < 16 || I0s[s].rows < 16) + { + nscales_ = s; + break; + } + + if (useInitialFlow_) + { + cuda::resize(u1s[s-1], u1s[s], Size(), scaleStep_, scaleStep_, INTER_LINEAR, stream); + cuda::resize(u2s[s-1], u2s[s], Size(), scaleStep_, scaleStep_, INTER_LINEAR, stream); + + cuda::multiply(u1s[s], Scalar::all(scaleStep_), u1s[s], 1, -1, stream); + cuda::multiply(u2s[s], Scalar::all(scaleStep_), u2s[s], 1, -1, stream); + } + else + { + u1s[s].create(I0s[s].size(), CV_32FC1); + u2s[s].create(I0s[s].size(), CV_32FC1); + } + if (gamma_) + { + u3s[s].create(I0s[s].size(), CV_32FC1); + } } - if (gamma) - u3s[s].create(I0s[s].size(), CV_32FC1); - } - if (!useInitialFlow) - { - u1s[nscales-1].setTo(Scalar::all(0)); - u2s[nscales-1].setTo(Scalar::all(0)); - } - if (gamma) - u3s[nscales - 1].setTo(Scalar::all(0)); + if (!useInitialFlow_) + { + u1s[nscales_-1].setTo(Scalar::all(0), stream); + u2s[nscales_-1].setTo(Scalar::all(0), stream); + } + if (gamma_) + { + u3s[nscales_ - 1].setTo(Scalar::all(0), stream); + } - // pyramidal structure for computing the optical flow - for (int s = nscales - 1; s >= 0; --s) - { - // compute the optical flow at the current scale - procOneScale(I0s[s], I1s[s], u1s[s], u2s[s], u3s[s]); + // pyramidal structure for computing the optical flow + for (int s = nscales_ - 1; s >= 0; --s) + { + // compute the optical flow at the current scale + procOneScale(I0s[s], I1s[s], u1s[s], u2s[s], u3s[s], stream); - // if this was the last scale, finish now - if (s == 0) - break; + // if this was the last scale, finish now + if (s == 0) + break; - // otherwise, upsample the optical flow + // otherwise, upsample the optical flow - // zoom the optical flow for the next finer scale - cuda::resize(u1s[s], u1s[s - 1], I0s[s - 1].size()); - cuda::resize(u2s[s], u2s[s - 1], I0s[s - 1].size()); - if (gamma) - cuda::resize(u3s[s], u3s[s - 1], I0s[s - 1].size()); + // zoom the optical flow for the next finer scale + cuda::resize(u1s[s], u1s[s - 1], I0s[s - 1].size(), 0, 0, INTER_LINEAR, stream); + cuda::resize(u2s[s], u2s[s - 1], I0s[s - 1].size(), 0, 0, INTER_LINEAR, stream); + if (gamma_) + { + cuda::resize(u3s[s], u3s[s - 1], I0s[s - 1].size(), 0, 0, INTER_LINEAR, stream); + } - // scale the optical flow with the appropriate zoom factor - cuda::multiply(u1s[s - 1], Scalar::all(1/scaleStep), u1s[s - 1]); - cuda::multiply(u2s[s - 1], Scalar::all(1/scaleStep), u2s[s - 1]); + // scale the optical flow with the appropriate zoom factor + cuda::multiply(u1s[s - 1], Scalar::all(1/scaleStep_), u1s[s - 1], 1, -1, stream); + cuda::multiply(u2s[s - 1], Scalar::all(1/scaleStep_), u2s[s - 1], 1, -1, stream); + } } -} - -namespace tvl1flow -{ - void centeredGradient(PtrStepSzf src, PtrStepSzf dx, PtrStepSzf dy); - void warpBackward(PtrStepSzf I0, PtrStepSzf I1, PtrStepSzf I1x, PtrStepSzf I1y, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf I1w, PtrStepSzf I1wx, PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho); - void estimateU(PtrStepSzf I1wx, PtrStepSzf I1wy, - PtrStepSzf grad, PtrStepSzf rho_c, - PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32, - PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, PtrStepSzf error, - float l_t, float theta, float gamma, bool calcError); - void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32, float taut, const float gamma); -} -void cv::cuda::OpticalFlowDual_TVL1_CUDA::procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2, GpuMat& u3) -{ - using namespace tvl1flow; + void OpticalFlowDual_TVL1_Impl::procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2, GpuMat& u3, Stream& _stream) + { + using namespace tvl1flow; - const double scaledEpsilon = epsilon * epsilon * I0.size().area(); + cudaStream_t stream = StreamAccessor::getStream(_stream); - CV_DbgAssert( I1.size() == I0.size() ); - CV_DbgAssert( I1.type() == I0.type() ); - CV_DbgAssert( u1.size() == I0.size() ); - CV_DbgAssert( u2.size() == u1.size() ); + const double scaledEpsilon = epsilon_ * epsilon_ * I0.size().area(); - GpuMat I1x = I1x_buf(Rect(0, 0, I0.cols, I0.rows)); - GpuMat I1y = I1y_buf(Rect(0, 0, I0.cols, I0.rows)); - centeredGradient(I1, I1x, I1y); + CV_DbgAssert( I1.size() == I0.size() ); + CV_DbgAssert( I1.type() == I0.type() ); + CV_DbgAssert( u1.size() == I0.size() ); + CV_DbgAssert( u2.size() == u1.size() ); - GpuMat I1w = I1w_buf(Rect(0, 0, I0.cols, I0.rows)); - GpuMat I1wx = I1wx_buf(Rect(0, 0, I0.cols, I0.rows)); - GpuMat I1wy = I1wy_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat I1x = I1x_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat I1y = I1y_buf(Rect(0, 0, I0.cols, I0.rows)); + centeredGradient(I1, I1x, I1y, stream); - GpuMat grad = grad_buf(Rect(0, 0, I0.cols, I0.rows)); - GpuMat rho_c = rho_c_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat I1w = I1w_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat I1wx = I1wx_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat I1wy = I1wy_buf(Rect(0, 0, I0.cols, I0.rows)); - GpuMat p11 = p11_buf(Rect(0, 0, I0.cols, I0.rows)); - GpuMat p12 = p12_buf(Rect(0, 0, I0.cols, I0.rows)); - GpuMat p21 = p21_buf(Rect(0, 0, I0.cols, I0.rows)); - GpuMat p22 = p22_buf(Rect(0, 0, I0.cols, I0.rows)); - GpuMat p31, p32; - if (gamma) - { - p31 = p31_buf(Rect(0, 0, I0.cols, I0.rows)); - p32 = p32_buf(Rect(0, 0, I0.cols, I0.rows)); - } - p11.setTo(Scalar::all(0)); - p12.setTo(Scalar::all(0)); - p21.setTo(Scalar::all(0)); - p22.setTo(Scalar::all(0)); - if (gamma) - { - p31.setTo(Scalar::all(0)); - p32.setTo(Scalar::all(0)); - } + GpuMat grad = grad_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat rho_c = rho_c_buf(Rect(0, 0, I0.cols, I0.rows)); - GpuMat diff = diff_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat p11 = p11_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat p12 = p12_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat p21 = p21_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat p22 = p22_buf(Rect(0, 0, I0.cols, I0.rows)); + GpuMat p31, p32; + if (gamma_) + { + p31 = p31_buf(Rect(0, 0, I0.cols, I0.rows)); + p32 = p32_buf(Rect(0, 0, I0.cols, I0.rows)); + } + p11.setTo(Scalar::all(0), _stream); + p12.setTo(Scalar::all(0), _stream); + p21.setTo(Scalar::all(0), _stream); + p22.setTo(Scalar::all(0), _stream); + if (gamma_) + { + p31.setTo(Scalar::all(0), _stream); + p32.setTo(Scalar::all(0), _stream); + } - const float l_t = static_cast(lambda * theta); - const float taut = static_cast(tau / theta); + GpuMat diff = diff_buf(Rect(0, 0, I0.cols, I0.rows)); - for (int warpings = 0; warpings < warps; ++warpings) - { - warpBackward(I0, I1, I1x, I1y, u1, u2, I1w, I1wx, I1wy, grad, rho_c); + const float l_t = static_cast(lambda_ * theta_); + const float taut = static_cast(tau_ / theta_); - double error = std::numeric_limits::max(); - double prevError = 0.0; - for (int n = 0; error > scaledEpsilon && n < iterations; ++n) + for (int warpings = 0; warpings < warps_; ++warpings) { - // some tweaks to make sum operation less frequently - bool calcError = (epsilon > 0) && (n & 0x1) && (prevError < scaledEpsilon); - cv::Mat m1(u3); - estimateU(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, p31, p32, u1, u2, u3, diff, l_t, static_cast(theta), gamma, calcError); - if (calcError) - { - error = cuda::sum(diff, norm_buf)[0]; - prevError = error; - } - else + warpBackward(I0, I1, I1x, I1y, u1, u2, I1w, I1wx, I1wy, grad, rho_c, stream); + + double error = std::numeric_limits::max(); + double prevError = 0.0; + for (int n = 0; error > scaledEpsilon && n < iterations_; ++n) { - error = std::numeric_limits::max(); - prevError -= scaledEpsilon; + // some tweaks to make sum operation less frequently + bool calcError = (epsilon_ > 0) && (n & 0x1) && (prevError < scaledEpsilon); + estimateU(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, p31, p32, u1, u2, u3, diff, l_t, static_cast(theta_), gamma_, calcError, stream); + if (calcError) + { + _stream.waitForCompletion(); + error = cuda::sum(diff, norm_buf)[0]; + prevError = error; + } + else + { + error = std::numeric_limits::max(); + prevError -= scaledEpsilon; + } + + estimateDualVariables(u1, u2, u3, p11, p12, p21, p22, p31, p32, taut, gamma_, stream); } - - estimateDualVariables(u1, u2, u3, p11, p12, p21, p22, p31, p32, taut, gamma); } } } -void cv::cuda::OpticalFlowDual_TVL1_CUDA::collectGarbage() +Ptr cv::cuda::OpticalFlowDual_TVL1::create( + double tau, double lambda, double theta, int nscales, int warps, + double epsilon, int iterations, double scaleStep, double gamma, bool useInitialFlow) { - I0s.clear(); - I1s.clear(); - u1s.clear(); - u2s.clear(); - u3s.clear(); - - I1x_buf.release(); - I1y_buf.release(); - - I1w_buf.release(); - I1wx_buf.release(); - I1wy_buf.release(); - - grad_buf.release(); - rho_c_buf.release(); - - p11_buf.release(); - p12_buf.release(); - p21_buf.release(); - p22_buf.release(); - if (gamma) - { - p31_buf.release(); - p32_buf.release(); - } - diff_buf.release(); - norm_buf.release(); + return makePtr(tau, lambda, theta, nscales, warps, + epsilon, iterations, scaleStep, gamma, useInitialFlow); } #endif // !defined HAVE_CUDA || defined(CUDA_DISABLER) diff --git a/modules/cudaoptflow/test/test_optflow.cpp b/modules/cudaoptflow/test/test_optflow.cpp index 2b976563b0..c5b2ad8478 100644 --- a/modules/cudaoptflow/test/test_optflow.cpp +++ b/modules/cudaoptflow/test/test_optflow.cpp @@ -71,12 +71,18 @@ CUDA_TEST_P(BroxOpticalFlow, Regression) cv::Mat frame1 = readImageType("opticalflow/frame1.png", CV_32FC1); ASSERT_FALSE(frame1.empty()); - cv::cuda::BroxOpticalFlow brox(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/, - 10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/); + cv::Ptr brox = + cv::cuda::BroxOpticalFlow::create(0.197 /*alpha*/, 50.0 /*gamma*/, 0.8 /*scale_factor*/, + 10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/); - cv::cuda::GpuMat u; - cv::cuda::GpuMat v; - brox(loadMat(frame0), loadMat(frame1), u, v); + cv::cuda::GpuMat flow; + brox->calc(loadMat(frame0), loadMat(frame1), flow); + + cv::cuda::GpuMat flows[2]; + cv::cuda::split(flow, flows); + + cv::cuda::GpuMat u = flows[0]; + cv::cuda::GpuMat v = flows[1]; std::string fname(cvtest::TS::ptr()->get_data_path()); if (devInfo.majorVersion() >= 2) @@ -133,12 +139,18 @@ CUDA_TEST_P(BroxOpticalFlow, OpticalFlowNan) cv::resize(frame0, r_frame0, cv::Size(1380,1000)); cv::resize(frame1, r_frame1, cv::Size(1380,1000)); - cv::cuda::BroxOpticalFlow brox(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/, - 5 /*inner_iterations*/, 150 /*outer_iterations*/, 10 /*solver_iterations*/); + cv::Ptr brox = + cv::cuda::BroxOpticalFlow::create(0.197 /*alpha*/, 50.0 /*gamma*/, 0.8 /*scale_factor*/, + 10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/); + + cv::cuda::GpuMat flow; + brox->calc(loadMat(frame0), loadMat(frame1), flow); + + cv::cuda::GpuMat flows[2]; + cv::cuda::split(flow, flows); - cv::cuda::GpuMat u; - cv::cuda::GpuMat v; - brox(loadMat(r_frame0), loadMat(r_frame1), u, v); + cv::cuda::GpuMat u = flows[0]; + cv::cuda::GpuMat v = flows[1]; cv::Mat h_u, h_v; u.download(h_u); @@ -193,11 +205,12 @@ CUDA_TEST_P(PyrLKOpticalFlow, Sparse) cv::Mat pts_mat(1, (int) pts.size(), CV_32FC2, (void*) &pts[0]); d_pts.upload(pts_mat); - cv::cuda::PyrLKOpticalFlow pyrLK; + cv::Ptr pyrLK = + cv::cuda::SparsePyrLKOpticalFlow::create(); cv::cuda::GpuMat d_nextPts; cv::cuda::GpuMat d_status; - pyrLK.sparse(loadMat(frame0), loadMat(frame1), d_pts, d_nextPts, d_status); + pyrLK->calc(loadMat(frame0), loadMat(frame1), d_pts, d_nextPts, d_status); std::vector nextPts(d_nextPts.cols); cv::Mat nextPts_mat(1, d_nextPts.cols, CV_32FC2, (void*) &nextPts[0]); @@ -285,34 +298,30 @@ CUDA_TEST_P(FarnebackOpticalFlow, Accuracy) double polySigma = polyN <= 5 ? 1.1 : 1.5; - cv::cuda::FarnebackOpticalFlow farn; - farn.pyrScale = pyrScale; - farn.polyN = polyN; - farn.polySigma = polySigma; - farn.flags = flags; + cv::Ptr farn = + cv::cuda::FarnebackOpticalFlow::create(); + farn->setPyrScale(pyrScale); + farn->setPolyN(polyN); + farn->setPolySigma(polySigma); + farn->setFlags(flags); - cv::cuda::GpuMat d_flowx, d_flowy; - farn(loadMat(frame0), loadMat(frame1), d_flowx, d_flowy); + cv::cuda::GpuMat d_flow; + farn->calc(loadMat(frame0), loadMat(frame1), d_flow); cv::Mat flow; if (useInitFlow) { - cv::Mat flowxy[] = {cv::Mat(d_flowx), cv::Mat(d_flowy)}; - cv::merge(flowxy, 2, flow); + d_flow.download(flow); - farn.flags |= cv::OPTFLOW_USE_INITIAL_FLOW; - farn(loadMat(frame0), loadMat(frame1), d_flowx, d_flowy); + farn->setFlags(farn->getFlags() | cv::OPTFLOW_USE_INITIAL_FLOW); + farn->calc(loadMat(frame0), loadMat(frame1), d_flow); } cv::calcOpticalFlowFarneback( - frame0, frame1, flow, farn.pyrScale, farn.numLevels, farn.winSize, - farn.numIters, farn.polyN, farn.polySigma, farn.flags); + frame0, frame1, flow, farn->getPyrScale(), farn->getNumLevels(), farn->getWinSize(), + farn->getNumIters(), farn->getPolyN(), farn->getPolySigma(), farn->getFlags()); - std::vector flowxy; - cv::split(flow, flowxy); - - EXPECT_MAT_SIMILAR(flowxy[0], d_flowx, 0.1); - EXPECT_MAT_SIMILAR(flowxy[1], d_flowy, 0.1); + EXPECT_MAT_SIMILAR(flow, d_flow, 0.1); } INSTANTIATE_TEST_CASE_P(CUDA_OptFlow, FarnebackOpticalFlow, testing::Combine( @@ -325,15 +334,20 @@ INSTANTIATE_TEST_CASE_P(CUDA_OptFlow, FarnebackOpticalFlow, testing::Combine( ////////////////////////////////////////////////////// // OpticalFlowDual_TVL1 -PARAM_TEST_CASE(OpticalFlowDual_TVL1, cv::cuda::DeviceInfo, UseRoi) +namespace +{ + IMPLEMENT_PARAM_CLASS(Gamma, double) +} + +PARAM_TEST_CASE(OpticalFlowDual_TVL1, cv::cuda::DeviceInfo, Gamma) { cv::cuda::DeviceInfo devInfo; - bool useRoi; + double gamma; virtual void SetUp() { devInfo = GET_PARAM(0); - useRoi = GET_PARAM(1); + gamma = GET_PARAM(1); cv::cuda::setDevice(devInfo.deviceID()); } @@ -347,156 +361,28 @@ CUDA_TEST_P(OpticalFlowDual_TVL1, Accuracy) cv::Mat frame1 = readImage("opticalflow/rubberwhale2.png", cv::IMREAD_GRAYSCALE); ASSERT_FALSE(frame1.empty()); - cv::cuda::OpticalFlowDual_TVL1_CUDA d_alg; - cv::cuda::GpuMat d_flowx = createMat(frame0.size(), CV_32FC1, useRoi); - cv::cuda::GpuMat d_flowy = createMat(frame0.size(), CV_32FC1, useRoi); - d_alg(loadMat(frame0, useRoi), loadMat(frame1, useRoi), d_flowx, d_flowy); + cv::Ptr d_alg = + cv::cuda::OpticalFlowDual_TVL1::create(); + d_alg->setNumIterations(10); + d_alg->setGamma(gamma); + + cv::cuda::GpuMat d_flow; + d_alg->calc(loadMat(frame0), loadMat(frame1), d_flow); cv::Ptr alg = cv::createOptFlow_DualTVL1(); alg->set("medianFiltering", 1); alg->set("innerIterations", 1); - alg->set("outerIterations", d_alg.iterations); + alg->set("outerIterations", d_alg->getNumIterations()); + alg->set("gamma", gamma); + cv::Mat flow; alg->calc(frame0, frame1, flow); - cv::Mat gold[2]; - cv::split(flow, gold); - cv::Mat mx(d_flowx); - cv::Mat my(d_flowx); - - EXPECT_MAT_SIMILAR(gold[0], d_flowx, 4e-3); - EXPECT_MAT_SIMILAR(gold[1], d_flowy, 4e-3); - d_alg.gamma = 1; - alg->set("gamma", 1); - d_alg(loadMat(frame0, useRoi), loadMat(frame1, useRoi), d_flowx, d_flowy); - alg->calc(frame0, frame1, flow); - cv::split(flow, gold); - mx = cv::Mat(d_flowx); - my = cv::Mat(d_flowx); - EXPECT_MAT_SIMILAR(gold[0], d_flowx, 4e-3); - EXPECT_MAT_SIMILAR(gold[1], d_flowy, 4e-3); + EXPECT_MAT_SIMILAR(flow, d_flow, 4e-3); } INSTANTIATE_TEST_CASE_P(CUDA_OptFlow, OpticalFlowDual_TVL1, testing::Combine( ALL_DEVICES, - WHOLE_SUBMAT)); - -////////////////////////////////////////////////////// -// FastOpticalFlowBM - -namespace -{ - void FastOpticalFlowBM_gold(const cv::Mat_& I0, const cv::Mat_& I1, cv::Mat_& velx, cv::Mat_& vely, int search_window, int block_window) - { - velx.create(I0.size()); - vely.create(I0.size()); - - int search_radius = search_window / 2; - int block_radius = block_window / 2; - - for (int y = 0; y < I0.rows; ++y) - { - for (int x = 0; x < I0.cols; ++x) - { - int bestDist = std::numeric_limits::max(); - int bestDx = 0; - int bestDy = 0; - - for (int dy = -search_radius; dy <= search_radius; ++dy) - { - for (int dx = -search_radius; dx <= search_radius; ++dx) - { - int dist = 0; - - for (int by = -block_radius; by <= block_radius; ++by) - { - for (int bx = -block_radius; bx <= block_radius; ++bx) - { - int I0_val = I0(cv::borderInterpolate(y + by, I0.rows, cv::BORDER_DEFAULT), cv::borderInterpolate(x + bx, I0.cols, cv::BORDER_DEFAULT)); - int I1_val = I1(cv::borderInterpolate(y + dy + by, I0.rows, cv::BORDER_DEFAULT), cv::borderInterpolate(x + dx + bx, I0.cols, cv::BORDER_DEFAULT)); - - dist += std::abs(I0_val - I1_val); - } - } - - if (dist < bestDist) - { - bestDist = dist; - bestDx = dx; - bestDy = dy; - } - } - } - - velx(y, x) = (float) bestDx; - vely(y, x) = (float) bestDy; - } - } - } - - double calc_rmse(const cv::Mat_& flow1, const cv::Mat_& flow2) - { - double sum = 0.0; - - for (int y = 0; y < flow1.rows; ++y) - { - for (int x = 0; x < flow1.cols; ++x) - { - double diff = flow1(y, x) - flow2(y, x); - sum += diff * diff; - } - } - - return std::sqrt(sum / flow1.size().area()); - } -} - -struct FastOpticalFlowBM : testing::TestWithParam -{ -}; - -CUDA_TEST_P(FastOpticalFlowBM, Accuracy) -{ - const double MAX_RMSE = 0.6; - - int search_window = 15; - int block_window = 5; - - cv::cuda::DeviceInfo devInfo = GetParam(); - cv::cuda::setDevice(devInfo.deviceID()); - - cv::Mat frame0 = readImage("opticalflow/rubberwhale1.png", cv::IMREAD_GRAYSCALE); - ASSERT_FALSE(frame0.empty()); - - cv::Mat frame1 = readImage("opticalflow/rubberwhale2.png", cv::IMREAD_GRAYSCALE); - ASSERT_FALSE(frame1.empty()); - - cv::Size smallSize(320, 240); - cv::Mat frame0_small; - cv::Mat frame1_small; - - cv::resize(frame0, frame0_small, smallSize); - cv::resize(frame1, frame1_small, smallSize); - - cv::cuda::GpuMat d_flowx; - cv::cuda::GpuMat d_flowy; - cv::cuda::FastOpticalFlowBM fastBM; - - fastBM(loadMat(frame0_small), loadMat(frame1_small), d_flowx, d_flowy, search_window, block_window); - - cv::Mat_ flowx; - cv::Mat_ flowy; - FastOpticalFlowBM_gold(frame0_small, frame1_small, flowx, flowy, search_window, block_window); - - double err; - - err = calc_rmse(flowx, cv::Mat(d_flowx)); - EXPECT_LE(err, MAX_RMSE); - - err = calc_rmse(flowy, cv::Mat(d_flowy)); - EXPECT_LE(err, MAX_RMSE); -} - -INSTANTIATE_TEST_CASE_P(CUDA_OptFlow, FastOpticalFlowBM, ALL_DEVICES); + testing::Values(Gamma(0.0), Gamma(1.0)))); #endif // HAVE_CUDA diff --git a/modules/cudaoptflow/test/test_precomp.hpp b/modules/cudaoptflow/test/test_precomp.hpp index 54812022a6..2dc36abf5c 100644 --- a/modules/cudaoptflow/test/test_precomp.hpp +++ b/modules/cudaoptflow/test/test_precomp.hpp @@ -57,6 +57,7 @@ #include "opencv2/ts/cuda_test.hpp" #include "opencv2/cudaoptflow.hpp" +#include "opencv2/cudaarithm.hpp" #include "opencv2/video.hpp" #include "cvconfig.h" diff --git a/modules/imgcodecs/src/grfmt_gdal.cpp b/modules/imgcodecs/src/grfmt_gdal.cpp index 0311630950..55dd7192f6 100644 --- a/modules/imgcodecs/src/grfmt_gdal.cpp +++ b/modules/imgcodecs/src/grfmt_gdal.cpp @@ -140,35 +140,6 @@ int gdal2opencv( const GDALDataType& gdalType, const int& channels ){ return -1; } - -std::string GetOpenCVTypeName( const int& type ){ - - switch(type){ - case CV_8UC1: - return "CV_8UC1"; - case CV_8UC3: - return "CV_8UC3"; - case CV_8UC4: - return "CV_8UC4"; - case CV_16UC1: - return "CV_16UC1"; - case CV_16UC3: - return "CV_16UC3"; - case CV_16UC4: - return "CV_16UC4"; - case CV_16SC1: - return "CV_16SC1"; - case CV_16SC3: - return "CV_16SC3"; - case CV_16SC4: - return "CV_16SC4"; - default: - return "Unknown"; - } - return "Unknown"; -} - - /** * GDAL Decoder Constructor */ diff --git a/modules/imgcodecs/src/loadsave.cpp b/modules/imgcodecs/src/loadsave.cpp index c06b42aded..8526a4a3f0 100644 --- a/modules/imgcodecs/src/loadsave.cpp +++ b/modules/imgcodecs/src/loadsave.cpp @@ -247,7 +247,7 @@ imread_( const String& filename, int flags, int hdrtype, Mat* mat=0 ) ImageDecoder decoder; #ifdef HAVE_GDAL - if( (flags & IMREAD_LOAD_GDAL) == IMREAD_LOAD_GDAL ){ + if(flags != IMREAD_UNCHANGED && (flags & IMREAD_LOAD_GDAL) == IMREAD_LOAD_GDAL ){ decoder = GdalDecoder().newDecoder(); }else{ #endif @@ -275,7 +275,7 @@ imread_( const String& filename, int flags, int hdrtype, Mat* mat=0 ) // grab the decoded type int type = decoder->type(); - if( flags != -1 ) + if( flags != IMREAD_UNCHANGED ) { if( (flags & CV_LOAD_IMAGE_ANYDEPTH) == 0 ) type = CV_MAKETYPE(CV_8U, CV_MAT_CN(type)); @@ -336,7 +336,7 @@ imreadmulti_(const String& filename, int flags, std::vector& mats) ImageDecoder decoder; #ifdef HAVE_GDAL - if ((flags & IMREAD_LOAD_GDAL) == IMREAD_LOAD_GDAL){ + if (flags != IMREAD_UNCHANGED && (flags & IMREAD_LOAD_GDAL) == IMREAD_LOAD_GDAL){ decoder = GdalDecoder().newDecoder(); } else{ @@ -362,7 +362,7 @@ imreadmulti_(const String& filename, int flags, std::vector& mats) { // grab the decoded type int type = decoder->type(); - if (flags != -1) + if (flags != IMREAD_UNCHANGED) { if ((flags & CV_LOAD_IMAGE_ANYDEPTH) == 0) type = CV_MAKETYPE(CV_8U, CV_MAT_CN(type)); @@ -508,7 +508,7 @@ imdecode_( const Mat& buf, int flags, int hdrtype, Mat* mat=0 ) size.height = decoder->height(); int type = decoder->type(); - if( flags != -1 ) + if( flags != IMREAD_UNCHANGED ) { if( (flags & CV_LOAD_IMAGE_ANYDEPTH) == 0 ) type = CV_MAKETYPE(CV_8U, CV_MAT_CN(type)); diff --git a/modules/imgcodecs/test/test_grfmt.cpp b/modules/imgcodecs/test/test_grfmt.cpp index d1610ae7fc..423d030a0c 100644 --- a/modules/imgcodecs/test/test_grfmt.cpp +++ b/modules/imgcodecs/test/test_grfmt.cpp @@ -104,7 +104,8 @@ TEST(Imgcodecs_imread, regression) ASSERT_TRUE(imread_compare(folder + string(filenames[i]), IMREAD_COLOR)); ASSERT_TRUE(imread_compare(folder + string(filenames[i]), IMREAD_ANYDEPTH)); ASSERT_TRUE(imread_compare(folder + string(filenames[i]), IMREAD_ANYCOLOR)); - ASSERT_TRUE(imread_compare(folder + string(filenames[i]), IMREAD_LOAD_GDAL)); + if (i != 2) // GDAL does not support hdr + ASSERT_TRUE(imread_compare(folder + string(filenames[i]), IMREAD_LOAD_GDAL)); } } @@ -684,7 +685,7 @@ public: compare(IMREAD_COLOR); compare(IMREAD_ANYDEPTH); compare(IMREAD_ANYCOLOR); - compare(IMREAD_LOAD_GDAL); + // compare(IMREAD_LOAD_GDAL); // GDAL does not support multi-page TIFFs } }; diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp index fad801b947..a434500b87 100644 --- a/modules/imgproc/include/opencv2/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc.hpp @@ -985,8 +985,8 @@ public: @param _image A grayscale (CV_8UC1) input image. If only a roi needs to be selected, use: `lsd_ptr-\>detect(image(roi), lines, ...); lines += Scalar(roi.x, roi.y, roi.x, roi.y);` - @param _lines A vector of Vec4i elements specifying the beginning and ending point of a line. Where - Vec4i is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly + @param _lines A vector of Vec4i or Vec4f elements specifying the beginning and ending point of a line. Where + Vec4i/Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly oriented depending on the gradient. @param width Vector of widths of the regions, where the lines are found. E.g. Width of line. @param prec Vector of precisions with which the lines are found. diff --git a/modules/imgproc/src/accum.cpp b/modules/imgproc/src/accum.cpp index d2e8b39aa3..23dc4576ba 100644 --- a/modules/imgproc/src/accum.cpp +++ b/modules/imgproc/src/accum.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. / // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/imgproc/src/canny.cpp b/modules/imgproc/src/canny.cpp index 1311d5abb9..233218b3e2 100644 --- a/modules/imgproc/src/canny.cpp +++ b/modules/imgproc/src/canny.cpp @@ -11,6 +11,7 @@ // For Open Source Computer Vision Library // // Copyright (C) 2000, Intel Corporation, all rights reserved. +// Copyright (C) 2014, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/imgproc/src/clahe.cpp b/modules/imgproc/src/clahe.cpp index 18a91d9544..06fc73153f 100644 --- a/modules/imgproc/src/clahe.cpp +++ b/modules/imgproc/src/clahe.cpp @@ -11,6 +11,7 @@ // For Open Source Computer Vision Library // // Copyright (C) 2013, NVIDIA Corporation, all rights reserved. +// Copyright (C) 2014, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index f0a8fd8584..b900cf1845 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009-2010, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -965,6 +966,11 @@ struct Gray2RGB5x5 #if CV_NEON v_n7 = vdup_n_u8(~7); v_n3 = vdup_n_u8(~3); + #elif CV_SSE2 + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); + v_n7 = _mm_set1_epi16(~7); + v_n3 = _mm_set1_epi16(~3); + v_zero = _mm_setzero_si128(); #endif } @@ -982,6 +988,26 @@ struct Gray2RGB5x5 v_dst = vorrq_u16(v_dst, vshlq_n_u16(vmovl_u8(vand_u8(v_src, v_n7)), 8)); vst1q_u16((ushort *)dst + i, v_dst); } + #elif CV_SSE2 + if (haveSIMD) + { + for ( ; i <= n - 16; i += 16 ) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + i)); + + __m128i v_src_p = _mm_unpacklo_epi8(v_src, v_zero); + __m128i v_dst = _mm_or_si128(_mm_srli_epi16(v_src_p, 3), + _mm_or_si128(_mm_slli_epi16(_mm_and_si128(v_src_p, v_n3), 3), + _mm_slli_epi16(_mm_and_si128(v_src_p, v_n7), 8))); + _mm_storeu_si128((__m128i *)((ushort *)dst + i), v_dst); + + v_src_p = _mm_unpackhi_epi8(v_src, v_zero); + v_dst = _mm_or_si128(_mm_srli_epi16(v_src_p, 3), + _mm_or_si128(_mm_slli_epi16(_mm_and_si128(v_src_p, v_n3), 3), + _mm_slli_epi16(_mm_and_si128(v_src_p, v_n7), 8))); + _mm_storeu_si128((__m128i *)((ushort *)dst + i + 8), v_dst); + } + } #endif for ( ; i < n; i++ ) { @@ -998,6 +1024,26 @@ struct Gray2RGB5x5 uint16x8_t v_dst = vorrq_u16(vorrq_u16(v_src, vshlq_n_u16(v_src, 5)), vshlq_n_u16(v_src, 10)); vst1q_u16((ushort *)dst + i, v_dst); } + #elif CV_SSE2 + if (haveSIMD) + { + for ( ; i <= n - 16; i += 8 ) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + i)); + + __m128i v_src_p = _mm_srli_epi16(_mm_unpacklo_epi8(v_src, v_zero), 3); + __m128i v_dst = _mm_or_si128(v_src_p, + _mm_or_si128(_mm_slli_epi32(v_src_p, 5), + _mm_slli_epi16(v_src_p, 10))); + _mm_storeu_si128((__m128i *)((ushort *)dst + i), v_dst); + + v_src_p = _mm_srli_epi16(_mm_unpackhi_epi8(v_src, v_zero), 3); + v_dst = _mm_or_si128(v_src_p, + _mm_or_si128(_mm_slli_epi16(v_src_p, 5), + _mm_slli_epi16(v_src_p, 10))); + _mm_storeu_si128((__m128i *)((ushort *)dst + i + 8), v_dst); + } + } #endif for( ; i < n; i++ ) { @@ -1010,6 +1056,9 @@ struct Gray2RGB5x5 #if CV_NEON uint8x8_t v_n7, v_n3; + #elif CV_SSE2 + __m128i v_n7, v_n3, v_zero; + bool haveSIMD; #endif }; @@ -1042,6 +1091,14 @@ struct RGB5x52Gray v_delta = vdupq_n_u32(1 << (yuv_shift - 1)); v_f8 = vdupq_n_u16(0xf8); v_fc = vdupq_n_u16(0xfc); + #elif CV_SSE2 + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); + v_b2y = _mm_set1_epi16(B2Y); + v_g2y = _mm_set1_epi16(G2Y); + v_r2y = _mm_set1_epi16(R2Y); + v_delta = _mm_set1_epi32(1 << (yuv_shift - 1)); + v_f8 = _mm_set1_epi16(0xf8); + v_fc = _mm_set1_epi16(0xfc); #endif } @@ -1067,6 +1124,42 @@ struct RGB5x52Gray vst1_u8(dst + i, vmovn_u16(vcombine_u16(vmovn_u32(v_dst0), vmovn_u32(v_dst1)))); } + #elif CV_SSE2 + if (haveSIMD) + { + __m128i v_zero = _mm_setzero_si128(); + + for ( ; i <= n - 8; i += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)((ushort *)src + i)); + __m128i v_t0 = _mm_and_si128(_mm_slli_epi16(v_src, 3), v_f8), + v_t1 = _mm_and_si128(_mm_srli_epi16(v_src, 3), v_fc), + v_t2 = _mm_and_si128(_mm_srli_epi16(v_src, 8), v_f8); + + __m128i v_mullo_b = _mm_mullo_epi16(v_t0, v_b2y); + __m128i v_mullo_g = _mm_mullo_epi16(v_t1, v_g2y); + __m128i v_mullo_r = _mm_mullo_epi16(v_t2, v_r2y); + __m128i v_mulhi_b = _mm_mulhi_epi16(v_t0, v_b2y); + __m128i v_mulhi_g = _mm_mulhi_epi16(v_t1, v_g2y); + __m128i v_mulhi_r = _mm_mulhi_epi16(v_t2, v_r2y); + + __m128i v_dst0 = _mm_add_epi32(_mm_unpacklo_epi16(v_mullo_b, v_mulhi_b), + _mm_unpacklo_epi16(v_mullo_g, v_mulhi_g)); + v_dst0 = _mm_add_epi32(_mm_add_epi32(v_dst0, v_delta), + _mm_unpacklo_epi16(v_mullo_r, v_mulhi_r)); + + __m128i v_dst1 = _mm_add_epi32(_mm_unpackhi_epi16(v_mullo_b, v_mulhi_b), + _mm_unpackhi_epi16(v_mullo_g, v_mulhi_g)); + v_dst1 = _mm_add_epi32(_mm_add_epi32(v_dst1, v_delta), + _mm_unpackhi_epi16(v_mullo_r, v_mulhi_r)); + + v_dst0 = _mm_srli_epi32(v_dst0, yuv_shift); + v_dst1 = _mm_srli_epi32(v_dst1, yuv_shift); + + __m128i v_dst = _mm_packs_epi32(v_dst0, v_dst1); + _mm_storel_epi64((__m128i *)(dst + i), _mm_packus_epi16(v_dst, v_zero)); + } + } #endif for ( ; i < n; i++) { @@ -1095,6 +1188,42 @@ struct RGB5x52Gray vst1_u8(dst + i, vmovn_u16(vcombine_u16(vmovn_u32(v_dst0), vmovn_u32(v_dst1)))); } + #elif CV_SSE2 + if (haveSIMD) + { + __m128i v_zero = _mm_setzero_si128(); + + for ( ; i <= n - 8; i += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)((ushort *)src + i)); + __m128i v_t0 = _mm_and_si128(_mm_slli_epi16(v_src, 3), v_f8), + v_t1 = _mm_and_si128(_mm_srli_epi16(v_src, 2), v_f8), + v_t2 = _mm_and_si128(_mm_srli_epi16(v_src, 7), v_f8); + + __m128i v_mullo_b = _mm_mullo_epi16(v_t0, v_b2y); + __m128i v_mullo_g = _mm_mullo_epi16(v_t1, v_g2y); + __m128i v_mullo_r = _mm_mullo_epi16(v_t2, v_r2y); + __m128i v_mulhi_b = _mm_mulhi_epi16(v_t0, v_b2y); + __m128i v_mulhi_g = _mm_mulhi_epi16(v_t1, v_g2y); + __m128i v_mulhi_r = _mm_mulhi_epi16(v_t2, v_r2y); + + __m128i v_dst0 = _mm_add_epi32(_mm_unpacklo_epi16(v_mullo_b, v_mulhi_b), + _mm_unpacklo_epi16(v_mullo_g, v_mulhi_g)); + v_dst0 = _mm_add_epi32(_mm_add_epi32(v_dst0, v_delta), + _mm_unpacklo_epi16(v_mullo_r, v_mulhi_r)); + + __m128i v_dst1 = _mm_add_epi32(_mm_unpackhi_epi16(v_mullo_b, v_mulhi_b), + _mm_unpackhi_epi16(v_mullo_g, v_mulhi_g)); + v_dst1 = _mm_add_epi32(_mm_add_epi32(v_dst1, v_delta), + _mm_unpackhi_epi16(v_mullo_r, v_mulhi_r)); + + v_dst0 = _mm_srli_epi32(v_dst0, yuv_shift); + v_dst1 = _mm_srli_epi32(v_dst1, yuv_shift); + + __m128i v_dst = _mm_packs_epi32(v_dst0, v_dst1); + _mm_storel_epi64((__m128i *)(dst + i), _mm_packus_epi16(v_dst, v_zero)); + } + } #endif for ( ; i < n; i++) { @@ -1111,6 +1240,11 @@ struct RGB5x52Gray uint16x4_t v_b2y, v_g2y, v_r2y; uint32x4_t v_delta; uint16x8_t v_f8, v_fc; + #elif CV_SSE2 + bool haveSIMD; + __m128i v_b2y, v_g2y, v_r2y; + __m128i v_delta; + __m128i v_f8, v_fc; #endif }; @@ -1327,6 +1461,219 @@ struct RGB2Gray float32x4_t v_cb, v_cg, v_cr; }; +#elif CV_SSE2 + +#if CV_SSE4_1 + +template <> +struct RGB2Gray +{ + typedef ushort channel_type; + + RGB2Gray(int _srccn, int blueIdx, const int* _coeffs) : + srccn(_srccn) + { + static const int coeffs0[] = { R2Y, G2Y, B2Y }; + memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 3*sizeof(coeffs[0])); + if( blueIdx == 0 ) + std::swap(coeffs[0], coeffs[2]); + + v_cb = _mm_set1_epi16((short)coeffs[0]); + v_cg = _mm_set1_epi16((short)coeffs[1]); + v_cr = _mm_set1_epi16((short)coeffs[2]); + v_delta = _mm_set1_epi32(1 << (yuv_shift - 1)); + + haveSIMD = checkHardwareSupport(CV_CPU_SSE4_1); + } + + // 16s x 8 + void process(__m128i v_b, __m128i v_g, __m128i v_r, + __m128i & v_gray) const + { + __m128i v_mullo_r = _mm_mullo_epi16(v_r, v_cr); + __m128i v_mullo_g = _mm_mullo_epi16(v_g, v_cg); + __m128i v_mullo_b = _mm_mullo_epi16(v_b, v_cb); + __m128i v_mulhi_r = _mm_mulhi_epu16(v_r, v_cr); + __m128i v_mulhi_g = _mm_mulhi_epu16(v_g, v_cg); + __m128i v_mulhi_b = _mm_mulhi_epu16(v_b, v_cb); + + __m128i v_gray0 = _mm_add_epi32(_mm_unpacklo_epi16(v_mullo_r, v_mulhi_r), + _mm_unpacklo_epi16(v_mullo_g, v_mulhi_g)); + v_gray0 = _mm_add_epi32(_mm_unpacklo_epi16(v_mullo_b, v_mulhi_b), v_gray0); + v_gray0 = _mm_srli_epi32(_mm_add_epi32(v_gray0, v_delta), yuv_shift); + + __m128i v_gray1 = _mm_add_epi32(_mm_unpackhi_epi16(v_mullo_r, v_mulhi_r), + _mm_unpackhi_epi16(v_mullo_g, v_mulhi_g)); + v_gray1 = _mm_add_epi32(_mm_unpackhi_epi16(v_mullo_b, v_mulhi_b), v_gray1); + v_gray1 = _mm_srli_epi32(_mm_add_epi32(v_gray1, v_delta), yuv_shift); + + v_gray = _mm_packus_epi32(v_gray0, v_gray1); + } + + void operator()(const ushort* src, ushort* dst, int n) const + { + int scn = srccn, cb = coeffs[0], cg = coeffs[1], cr = coeffs[2], i = 0; + + if (scn == 3 && haveSIMD) + { + for ( ; i <= n - 16; i += 16, src += scn * 16) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src)); + __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + 8)); + __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + 16)); + __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + 24)); + __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + 32)); + __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + 40)); + + _mm_deinterleave_epi16(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + + __m128i v_gray0; + process(v_r0, v_g0, v_b0, + v_gray0); + + __m128i v_gray1; + process(v_r1, v_g1, v_b1, + v_gray1); + + _mm_storeu_si128((__m128i *)(dst + i), v_gray0); + _mm_storeu_si128((__m128i *)(dst + i + 8), v_gray1); + } + } + else if (scn == 4 && haveSIMD) + { + for ( ; i <= n - 16; i += 16, src += scn * 16) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src)); + __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + 8)); + __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + 16)); + __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + 24)); + __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + 32)); + __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + 40)); + __m128i v_a0 = _mm_loadu_si128((__m128i const *)(src + 48)); + __m128i v_a1 = _mm_loadu_si128((__m128i const *)(src + 56)); + + _mm_deinterleave_epi16(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1, v_a0, v_a1); + + __m128i v_gray0; + process(v_r0, v_g0, v_b0, + v_gray0); + + __m128i v_gray1; + process(v_r1, v_g1, v_b1, + v_gray1); + + _mm_storeu_si128((__m128i *)(dst + i), v_gray0); + _mm_storeu_si128((__m128i *)(dst + i + 8), v_gray1); + } + } + + for( ; i < n; i++, src += scn) + dst[i] = (ushort)CV_DESCALE((unsigned)(src[0]*cb + src[1]*cg + src[2]*cr), yuv_shift); + } + + int srccn, coeffs[3]; + __m128i v_cb, v_cg, v_cr; + __m128i v_delta; + bool haveSIMD; +}; + +#endif // CV_SSE4_1 + +template <> +struct RGB2Gray +{ + typedef float channel_type; + + RGB2Gray(int _srccn, int blueIdx, const float* _coeffs) : srccn(_srccn) + { + static const float coeffs0[] = { 0.299f, 0.587f, 0.114f }; + memcpy( coeffs, _coeffs ? _coeffs : coeffs0, 3*sizeof(coeffs[0]) ); + if(blueIdx == 0) + std::swap(coeffs[0], coeffs[2]); + + v_cb = _mm_set1_ps(coeffs[0]); + v_cg = _mm_set1_ps(coeffs[1]); + v_cr = _mm_set1_ps(coeffs[2]); + + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); + } + + void process(__m128 v_b, __m128 v_g, __m128 v_r, + __m128 & v_gray) const + { + v_gray = _mm_mul_ps(v_r, v_cr); + v_gray = _mm_add_ps(v_gray, _mm_mul_ps(v_g, v_cg)); + v_gray = _mm_add_ps(v_gray, _mm_mul_ps(v_b, v_cb)); + } + + void operator()(const float * src, float * dst, int n) const + { + int scn = srccn, i = 0; + float cb = coeffs[0], cg = coeffs[1], cr = coeffs[2]; + + if (scn == 3 && haveSIMD) + { + for ( ; i <= n - 8; i += 8, src += scn * 8) + { + __m128 v_r0 = _mm_loadu_ps(src); + __m128 v_r1 = _mm_loadu_ps(src + 4); + __m128 v_g0 = _mm_loadu_ps(src + 8); + __m128 v_g1 = _mm_loadu_ps(src + 12); + __m128 v_b0 = _mm_loadu_ps(src + 16); + __m128 v_b1 = _mm_loadu_ps(src + 20); + + _mm_deinterleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + + __m128 v_gray0; + process(v_r0, v_g0, v_b0, + v_gray0); + + __m128 v_gray1; + process(v_r1, v_g1, v_b1, + v_gray1); + + _mm_storeu_ps(dst + i, v_gray0); + _mm_storeu_ps(dst + i + 4, v_gray1); + } + } + else if (scn == 4 && haveSIMD) + { + for ( ; i <= n - 8; i += 8, src += scn * 8) + { + __m128 v_r0 = _mm_loadu_ps(src); + __m128 v_r1 = _mm_loadu_ps(src + 4); + __m128 v_g0 = _mm_loadu_ps(src + 8); + __m128 v_g1 = _mm_loadu_ps(src + 12); + __m128 v_b0 = _mm_loadu_ps(src + 16); + __m128 v_b1 = _mm_loadu_ps(src + 20); + __m128 v_a0 = _mm_loadu_ps(src + 24); + __m128 v_a1 = _mm_loadu_ps(src + 28); + + _mm_deinterleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1, v_a0, v_a1); + + __m128 v_gray0; + process(v_r0, v_g0, v_b0, + v_gray0); + + __m128 v_gray1; + process(v_r1, v_g1, v_b1, + v_gray1); + + _mm_storeu_ps(dst + i, v_gray0); + _mm_storeu_ps(dst + i + 4, v_gray1); + } + } + + for ( ; i < n; i++, src += scn) + dst[i] = src[0]*cb + src[1]*cg + src[2]*cr; + } + + int srccn; + float coeffs[3]; + __m128 v_cb, v_cg, v_cr; + bool haveSIMD; +}; + #else template<> struct RGB2Gray @@ -1449,6 +1796,103 @@ struct RGB2YCrCb_f float32x4_t v_c0, v_c1, v_c2, v_c3, v_c4, v_delta; }; +#elif CV_SSE2 + +template <> +struct RGB2YCrCb_f +{ + typedef float channel_type; + + RGB2YCrCb_f(int _srccn, int _blueIdx, const float* _coeffs) : + srccn(_srccn), blueIdx(_blueIdx) + { + static const float coeffs0[] = {0.299f, 0.587f, 0.114f, 0.713f, 0.564f}; + memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 5*sizeof(coeffs[0])); + if (blueIdx==0) + std::swap(coeffs[0], coeffs[2]); + + v_c0 = _mm_set1_ps(coeffs[0]); + v_c1 = _mm_set1_ps(coeffs[1]); + v_c2 = _mm_set1_ps(coeffs[2]); + v_c3 = _mm_set1_ps(coeffs[3]); + v_c4 = _mm_set1_ps(coeffs[4]); + v_delta = _mm_set1_ps(ColorChannel::half()); + + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); + } + + void process(__m128 v_r, __m128 v_g, __m128 v_b, + __m128 & v_y, __m128 & v_cr, __m128 & v_cb) const + { + v_y = _mm_mul_ps(v_r, v_c0); + v_y = _mm_add_ps(v_y, _mm_mul_ps(v_g, v_c1)); + v_y = _mm_add_ps(v_y, _mm_mul_ps(v_b, v_c2)); + + v_cr = _mm_add_ps(_mm_mul_ps(_mm_sub_ps(blueIdx == 0 ? v_b : v_r, v_y), v_c3), v_delta); + v_cb = _mm_add_ps(_mm_mul_ps(_mm_sub_ps(blueIdx == 2 ? v_b : v_r, v_y), v_c4), v_delta); + } + + void operator()(const float * src, float * dst, int n) const + { + int scn = srccn, bidx = blueIdx, i = 0; + const float delta = ColorChannel::half(); + float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3], C4 = coeffs[4]; + n *= 3; + + if (haveSIMD) + { + for ( ; i <= n - 24; i += 24, src += 8 * scn) + { + __m128 v_r0 = _mm_loadu_ps(src); + __m128 v_r1 = _mm_loadu_ps(src + 4); + __m128 v_g0 = _mm_loadu_ps(src + 8); + __m128 v_g1 = _mm_loadu_ps(src + 12); + __m128 v_b0 = _mm_loadu_ps(src + 16); + __m128 v_b1 = _mm_loadu_ps(src + 20); + + if (scn == 4) + { + __m128 v_a0 = _mm_loadu_ps(src + 24); + __m128 v_a1 = _mm_loadu_ps(src + 28); + _mm_deinterleave_ps(v_r0, v_r1, v_g0, v_g1, + v_b0, v_b1, v_a0, v_a1); + } + else + _mm_deinterleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + + __m128 v_y0, v_cr0, v_cb0; + process(v_r0, v_g0, v_b0, + v_y0, v_cr0, v_cb0); + + __m128 v_y1, v_cr1, v_cb1; + process(v_r1, v_g1, v_b1, + v_y1, v_cr1, v_cb1); + + _mm_interleave_ps(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1); + + _mm_storeu_ps(dst + i, v_y0); + _mm_storeu_ps(dst + i + 4, v_y1); + _mm_storeu_ps(dst + i + 8, v_cr0); + _mm_storeu_ps(dst + i + 12, v_cr1); + _mm_storeu_ps(dst + i + 16, v_cb0); + _mm_storeu_ps(dst + i + 20, v_cb1); + } + } + + for ( ; i < n; i += 3, src += scn) + { + float Y = src[0]*C0 + src[1]*C1 + src[2]*C2; + float Cr = (src[bidx^2] - Y)*C3 + delta; + float Cb = (src[bidx] - Y)*C4 + delta; + dst[i] = Y; dst[i+1] = Cr; dst[i+2] = Cb; + } + } + int srccn, blueIdx; + float coeffs[5]; + __m128 v_c0, v_c1, v_c2, v_c3, v_c4, v_delta; + bool haveSIMD; +}; + #endif template struct RGB2YCrCb_i @@ -1699,7 +2143,288 @@ struct RGB2YCrCb_i int32x4_t v_c0, v_c1, v_c2, v_c3, v_c4, v_delta, v_delta2; }; -#endif +#elif CV_SSE4_1 + +template <> +struct RGB2YCrCb_i +{ + typedef uchar channel_type; + + RGB2YCrCb_i(int _srccn, int _blueIdx, const int* _coeffs) + : srccn(_srccn), blueIdx(_blueIdx) + { + static const int coeffs0[] = {R2Y, G2Y, B2Y, 11682, 9241}; + memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 5*sizeof(coeffs[0])); + if (blueIdx==0) + std::swap(coeffs[0], coeffs[2]); + + v_c0 = _mm_set1_epi32(coeffs[0]); + v_c1 = _mm_set1_epi32(coeffs[1]); + v_c2 = _mm_set1_epi32(coeffs[2]); + v_c3 = _mm_set1_epi32(coeffs[3]); + v_c4 = _mm_set1_epi32(coeffs[4]); + v_delta2 = _mm_set1_epi32(1 << (yuv_shift - 1)); + v_delta = _mm_set1_epi32(ColorChannel::half()*(1 << yuv_shift)); + v_delta = _mm_add_epi32(v_delta, v_delta2); + v_zero = _mm_setzero_si128(); + + haveSIMD = checkHardwareSupport(CV_CPU_SSE4_1); + } + + // 16u x 8 + void process(__m128i v_r, __m128i v_g, __m128i v_b, + __m128i & v_y, __m128i & v_cr, __m128i & v_cb) const + { + __m128i v_r_p = _mm_unpacklo_epi16(v_r, v_zero); + __m128i v_g_p = _mm_unpacklo_epi16(v_g, v_zero); + __m128i v_b_p = _mm_unpacklo_epi16(v_b, v_zero); + + __m128i v_y0 = _mm_add_epi32(_mm_mullo_epi32(v_r_p, v_c0), + _mm_add_epi32(_mm_mullo_epi32(v_g_p, v_c1), + _mm_mullo_epi32(v_b_p, v_c2))); + v_y0 = _mm_srli_epi32(_mm_add_epi32(v_delta2, v_y0), yuv_shift); + + __m128i v_cr0 = _mm_mullo_epi32(_mm_sub_epi32(blueIdx == 2 ? v_r_p : v_b_p, v_y0), v_c3); + __m128i v_cb0 = _mm_mullo_epi32(_mm_sub_epi32(blueIdx == 0 ? v_r_p : v_b_p, v_y0), v_c4); + v_cr0 = _mm_srai_epi32(_mm_add_epi32(v_delta, v_cr0), yuv_shift); + v_cb0 = _mm_srai_epi32(_mm_add_epi32(v_delta, v_cb0), yuv_shift); + + v_r_p = _mm_unpackhi_epi16(v_r, v_zero); + v_g_p = _mm_unpackhi_epi16(v_g, v_zero); + v_b_p = _mm_unpackhi_epi16(v_b, v_zero); + + __m128i v_y1 = _mm_add_epi32(_mm_mullo_epi32(v_r_p, v_c0), + _mm_add_epi32(_mm_mullo_epi32(v_g_p, v_c1), + _mm_mullo_epi32(v_b_p, v_c2))); + v_y1 = _mm_srli_epi32(_mm_add_epi32(v_delta2, v_y1), yuv_shift); + + __m128i v_cr1 = _mm_mullo_epi32(_mm_sub_epi32(blueIdx == 2 ? v_r_p : v_b_p, v_y1), v_c3); + __m128i v_cb1 = _mm_mullo_epi32(_mm_sub_epi32(blueIdx == 0 ? v_r_p : v_b_p, v_y1), v_c4); + v_cr1 = _mm_srai_epi32(_mm_add_epi32(v_delta, v_cr1), yuv_shift); + v_cb1 = _mm_srai_epi32(_mm_add_epi32(v_delta, v_cb1), yuv_shift); + + v_y = _mm_packs_epi32(v_y0, v_y1); + v_cr = _mm_packs_epi32(v_cr0, v_cr1); + v_cb = _mm_packs_epi32(v_cb0, v_cb1); + } + + void operator()(const uchar * src, uchar * dst, int n) const + { + int scn = srccn, bidx = blueIdx, i = 0; + int C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3], C4 = coeffs[4]; + int delta = ColorChannel::half()*(1 << yuv_shift); + n *= 3; + + if (haveSIMD) + { + for ( ; i <= n - 96; i += 96, src += scn * 32) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src)); + __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + 16)); + __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + 32)); + __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + 48)); + __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + 64)); + __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + 80)); + + if (scn == 4) + { + __m128i v_a0 = _mm_loadu_si128((__m128i const *)(src + 96)); + __m128i v_a1 = _mm_loadu_si128((__m128i const *)(src + 112)); + _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, + v_b0, v_b1, v_a0, v_a1); + } + else + _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + + __m128i v_y0 = v_zero, v_cr0 = v_zero, v_cb0 = v_zero; + process(_mm_unpacklo_epi8(v_r0, v_zero), + _mm_unpacklo_epi8(v_g0, v_zero), + _mm_unpacklo_epi8(v_b0, v_zero), + v_y0, v_cr0, v_cb0); + + __m128i v_y1 = v_zero, v_cr1 = v_zero, v_cb1 = v_zero; + process(_mm_unpackhi_epi8(v_r0, v_zero), + _mm_unpackhi_epi8(v_g0, v_zero), + _mm_unpackhi_epi8(v_b0, v_zero), + v_y1, v_cr1, v_cb1); + + __m128i v_y_0 = _mm_packus_epi16(v_y0, v_y1); + __m128i v_cr_0 = _mm_packus_epi16(v_cr0, v_cr1); + __m128i v_cb_0 = _mm_packus_epi16(v_cb0, v_cb1); + + process(_mm_unpacklo_epi8(v_r1, v_zero), + _mm_unpacklo_epi8(v_g1, v_zero), + _mm_unpacklo_epi8(v_b1, v_zero), + v_y0, v_cr0, v_cb0); + + process(_mm_unpackhi_epi8(v_r1, v_zero), + _mm_unpackhi_epi8(v_g1, v_zero), + _mm_unpackhi_epi8(v_b1, v_zero), + v_y1, v_cr1, v_cb1); + + __m128i v_y_1 = _mm_packus_epi16(v_y0, v_y1); + __m128i v_cr_1 = _mm_packus_epi16(v_cr0, v_cr1); + __m128i v_cb_1 = _mm_packus_epi16(v_cb0, v_cb1); + + _mm_interleave_epi8(v_y_0, v_y_1, v_cr_0, v_cr_1, v_cb_0, v_cb_1); + + _mm_storeu_si128((__m128i *)(dst + i), v_y_0); + _mm_storeu_si128((__m128i *)(dst + i + 16), v_y_1); + _mm_storeu_si128((__m128i *)(dst + i + 32), v_cr_0); + _mm_storeu_si128((__m128i *)(dst + i + 48), v_cr_1); + _mm_storeu_si128((__m128i *)(dst + i + 64), v_cb_0); + _mm_storeu_si128((__m128i *)(dst + i + 80), v_cb_1); + } + } + + for ( ; i < n; i += 3, src += scn) + { + int Y = CV_DESCALE(src[0]*C0 + src[1]*C1 + src[2]*C2, yuv_shift); + int Cr = CV_DESCALE((src[bidx^2] - Y)*C3 + delta, yuv_shift); + int Cb = CV_DESCALE((src[bidx] - Y)*C4 + delta, yuv_shift); + dst[i] = saturate_cast(Y); + dst[i+1] = saturate_cast(Cr); + dst[i+2] = saturate_cast(Cb); + } + } + + int srccn, blueIdx, coeffs[5]; + __m128i v_c0, v_c1, v_c2; + __m128i v_c3, v_c4, v_delta, v_delta2; + __m128i v_zero; + bool haveSIMD; +}; + +template <> +struct RGB2YCrCb_i +{ + typedef ushort channel_type; + + RGB2YCrCb_i(int _srccn, int _blueIdx, const int* _coeffs) + : srccn(_srccn), blueIdx(_blueIdx) + { + static const int coeffs0[] = {R2Y, G2Y, B2Y, 11682, 9241}; + memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 5*sizeof(coeffs[0])); + if (blueIdx==0) + std::swap(coeffs[0], coeffs[2]); + + v_c0 = _mm_set1_epi32(coeffs[0]); + v_c1 = _mm_set1_epi32(coeffs[1]); + v_c2 = _mm_set1_epi32(coeffs[2]); + v_c3 = _mm_set1_epi32(coeffs[3]); + v_c4 = _mm_set1_epi32(coeffs[4]); + v_delta2 = _mm_set1_epi32(1 << (yuv_shift - 1)); + v_delta = _mm_set1_epi32(ColorChannel::half()*(1 << yuv_shift)); + v_delta = _mm_add_epi32(v_delta, v_delta2); + v_zero = _mm_setzero_si128(); + + haveSIMD = checkHardwareSupport(CV_CPU_SSE4_1); + } + + // 16u x 8 + void process(__m128i v_r, __m128i v_g, __m128i v_b, + __m128i & v_y, __m128i & v_cr, __m128i & v_cb) const + { + __m128i v_r_p = _mm_unpacklo_epi16(v_r, v_zero); + __m128i v_g_p = _mm_unpacklo_epi16(v_g, v_zero); + __m128i v_b_p = _mm_unpacklo_epi16(v_b, v_zero); + + __m128i v_y0 = _mm_add_epi32(_mm_mullo_epi32(v_r_p, v_c0), + _mm_add_epi32(_mm_mullo_epi32(v_g_p, v_c1), + _mm_mullo_epi32(v_b_p, v_c2))); + v_y0 = _mm_srli_epi32(_mm_add_epi32(v_delta2, v_y0), yuv_shift); + + __m128i v_cr0 = _mm_mullo_epi32(_mm_sub_epi32(blueIdx == 2 ? v_r_p : v_b_p, v_y0), v_c3); + __m128i v_cb0 = _mm_mullo_epi32(_mm_sub_epi32(blueIdx == 0 ? v_r_p : v_b_p, v_y0), v_c4); + v_cr0 = _mm_srai_epi32(_mm_add_epi32(v_delta, v_cr0), yuv_shift); + v_cb0 = _mm_srai_epi32(_mm_add_epi32(v_delta, v_cb0), yuv_shift); + + v_r_p = _mm_unpackhi_epi16(v_r, v_zero); + v_g_p = _mm_unpackhi_epi16(v_g, v_zero); + v_b_p = _mm_unpackhi_epi16(v_b, v_zero); + + __m128i v_y1 = _mm_add_epi32(_mm_mullo_epi32(v_r_p, v_c0), + _mm_add_epi32(_mm_mullo_epi32(v_g_p, v_c1), + _mm_mullo_epi32(v_b_p, v_c2))); + v_y1 = _mm_srli_epi32(_mm_add_epi32(v_delta2, v_y1), yuv_shift); + + __m128i v_cr1 = _mm_mullo_epi32(_mm_sub_epi32(blueIdx == 2 ? v_r_p : v_b_p, v_y1), v_c3); + __m128i v_cb1 = _mm_mullo_epi32(_mm_sub_epi32(blueIdx == 0 ? v_r_p : v_b_p, v_y1), v_c4); + v_cr1 = _mm_srai_epi32(_mm_add_epi32(v_delta, v_cr1), yuv_shift); + v_cb1 = _mm_srai_epi32(_mm_add_epi32(v_delta, v_cb1), yuv_shift); + + v_y = _mm_packus_epi32(v_y0, v_y1); + v_cr = _mm_packus_epi32(v_cr0, v_cr1); + v_cb = _mm_packus_epi32(v_cb0, v_cb1); + } + + void operator()(const ushort * src, ushort * dst, int n) const + { + int scn = srccn, bidx = blueIdx, i = 0; + int C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3], C4 = coeffs[4]; + int delta = ColorChannel::half()*(1 << yuv_shift); + n *= 3; + + if (haveSIMD) + { + for ( ; i <= n - 48; i += 48, src += scn * 16) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src)); + __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + 8)); + __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + 16)); + __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + 24)); + __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + 32)); + __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + 40)); + + if (scn == 4) + { + __m128i v_a0 = _mm_loadu_si128((__m128i const *)(src + 48)); + __m128i v_a1 = _mm_loadu_si128((__m128i const *)(src + 56)); + + _mm_deinterleave_epi16(v_r0, v_r1, v_g0, v_g1, + v_b0, v_b1, v_a0, v_a1); + } + else + _mm_deinterleave_epi16(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + + __m128i v_y0 = v_zero, v_cr0 = v_zero, v_cb0 = v_zero; + process(v_r0, v_g0, v_b0, + v_y0, v_cr0, v_cb0); + + __m128i v_y1 = v_zero, v_cr1 = v_zero, v_cb1 = v_zero; + process(v_r1, v_g1, v_b1, + v_y1, v_cr1, v_cb1); + + _mm_interleave_epi16(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1); + + _mm_storeu_si128((__m128i *)(dst + i), v_y0); + _mm_storeu_si128((__m128i *)(dst + i + 8), v_y1); + _mm_storeu_si128((__m128i *)(dst + i + 16), v_cr0); + _mm_storeu_si128((__m128i *)(dst + i + 24), v_cr1); + _mm_storeu_si128((__m128i *)(dst + i + 32), v_cb0); + _mm_storeu_si128((__m128i *)(dst + i + 40), v_cb1); + } + } + + for ( ; i < n; i += 3, src += scn) + { + int Y = CV_DESCALE(src[0]*C0 + src[1]*C1 + src[2]*C2, yuv_shift); + int Cr = CV_DESCALE((src[bidx^2] - Y)*C3 + delta, yuv_shift); + int Cb = CV_DESCALE((src[bidx] - Y)*C4 + delta, yuv_shift); + dst[i] = saturate_cast(Y); + dst[i+1] = saturate_cast(Cr); + dst[i+2] = saturate_cast(Cb); + } + } + + int srccn, blueIdx, coeffs[5]; + __m128i v_c0, v_c1, v_c2; + __m128i v_c3, v_c4, v_delta, v_delta2; + __m128i v_zero; + bool haveSIMD; +}; + +#endif // CV_SSE4_1 template struct YCrCb2RGB_f { @@ -1809,6 +2534,118 @@ struct YCrCb2RGB_f float32x4_t v_c0, v_c1, v_c2, v_c3, v_alpha, v_delta; }; +#elif CV_SSE2 + +template <> +struct YCrCb2RGB_f +{ + typedef float channel_type; + + YCrCb2RGB_f(int _dstcn, int _blueIdx, const float* _coeffs) + : dstcn(_dstcn), blueIdx(_blueIdx) + { + static const float coeffs0[] = {1.403f, -0.714f, -0.344f, 1.773f}; + memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 4*sizeof(coeffs[0])); + + v_c0 = _mm_set1_ps(coeffs[0]); + v_c1 = _mm_set1_ps(coeffs[1]); + v_c2 = _mm_set1_ps(coeffs[2]); + v_c3 = _mm_set1_ps(coeffs[3]); + v_delta = _mm_set1_ps(ColorChannel::half()); + v_alpha = _mm_set1_ps(ColorChannel::max()); + + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); + } + + void process(__m128 v_y, __m128 v_cr, __m128 v_cb, + __m128 & v_r, __m128 & v_g, __m128 & v_b) const + { + v_cb = _mm_sub_ps(v_cb, v_delta); + v_cr = _mm_sub_ps(v_cr, v_delta); + + v_b = _mm_mul_ps(v_cb, v_c3); + v_g = _mm_add_ps(_mm_mul_ps(v_cb, v_c2), _mm_mul_ps(v_cr, v_c1)); + v_r = _mm_mul_ps(v_cr, v_c0); + + v_b = _mm_add_ps(v_b, v_y); + v_g = _mm_add_ps(v_g, v_y); + v_r = _mm_add_ps(v_r, v_y); + + if (blueIdx == 0) + std::swap(v_b, v_r); + } + + void operator()(const float* src, float* dst, int n) const + { + int dcn = dstcn, bidx = blueIdx, i = 0; + const float delta = ColorChannel::half(), alpha = ColorChannel::max(); + float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3]; + n *= 3; + + if (haveSIMD) + { + for ( ; i <= n - 24; i += 24, dst += 8 * dcn) + { + __m128 v_y0 = _mm_loadu_ps(src + i); + __m128 v_y1 = _mm_loadu_ps(src + i + 4); + __m128 v_cr0 = _mm_loadu_ps(src + i + 8); + __m128 v_cr1 = _mm_loadu_ps(src + i + 12); + __m128 v_cb0 = _mm_loadu_ps(src + i + 16); + __m128 v_cb1 = _mm_loadu_ps(src + i + 20); + + _mm_deinterleave_ps(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1); + + __m128 v_r0, v_g0, v_b0; + process(v_y0, v_cr0, v_cb0, + v_r0, v_g0, v_b0); + + __m128 v_r1, v_g1, v_b1; + process(v_y1, v_cr1, v_cb1, + v_r1, v_g1, v_b1); + + __m128 v_a0 = v_alpha, v_a1 = v_alpha; + + if (dcn == 3) + _mm_interleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + else + _mm_interleave_ps(v_r0, v_r1, v_g0, v_g1, + v_b0, v_b1, v_a0, v_a1); + + _mm_storeu_ps(dst, v_r0); + _mm_storeu_ps(dst + 4, v_r1); + _mm_storeu_ps(dst + 8, v_g0); + _mm_storeu_ps(dst + 12, v_g1); + _mm_storeu_ps(dst + 16, v_b0); + _mm_storeu_ps(dst + 20, v_b1); + + if (dcn == 4) + { + _mm_storeu_ps(dst + 24, v_a0); + _mm_storeu_ps(dst + 28, v_a1); + } + } + } + + for ( ; i < n; i += 3, dst += dcn) + { + float Y = src[i], Cr = src[i+1], Cb = src[i+2]; + + float b = Y + (Cb - delta)*C3; + float g = Y + (Cb - delta)*C2 + (Cr - delta)*C1; + float r = Y + (Cr - delta)*C0; + + dst[bidx] = b; dst[1] = g; dst[bidx^2] = r; + if( dcn == 4 ) + dst[3] = alpha; + } + } + int dstcn, blueIdx; + float coeffs[4]; + + __m128 v_c0, v_c1, v_c2, v_c3, v_alpha, v_delta; + bool haveSIMD; +}; + #endif template struct YCrCb2RGB_i @@ -2096,7 +2933,185 @@ struct YCrCb2RGB_i uint16x4_t v_alpha2; }; -#endif +#elif CV_SSE2 + +template <> +struct YCrCb2RGB_i +{ + typedef uchar channel_type; + + YCrCb2RGB_i(int _dstcn, int _blueIdx, const int* _coeffs) + : dstcn(_dstcn), blueIdx(_blueIdx) + { + static const int coeffs0[] = {22987, -11698, -5636, 29049}; + memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 4*sizeof(coeffs[0])); + + v_c0 = _mm_set1_epi16((short)coeffs[0]); + v_c1 = _mm_set1_epi16((short)coeffs[1]); + v_c2 = _mm_set1_epi16((short)coeffs[2]); + v_c3 = _mm_set1_epi16((short)coeffs[3]); + v_delta = _mm_set1_epi16(ColorChannel::half()); + v_delta2 = _mm_set1_epi32(1 << (yuv_shift - 1)); + v_zero = _mm_setzero_si128(); + + uchar alpha = ColorChannel::max(); + v_alpha = _mm_set1_epi8(*(char *)&alpha); + + useSSE = coeffs[0] <= std::numeric_limits::max(); + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); + } + + // 16s x 8 + void process(__m128i v_y, __m128i v_cr, __m128i v_cb, + __m128i & v_r, __m128i & v_g, __m128i & v_b) const + { + v_cr = _mm_sub_epi16(v_cr, v_delta); + v_cb = _mm_sub_epi16(v_cb, v_delta); + + __m128i v_y_p = _mm_unpacklo_epi16(v_y, v_zero); + + __m128i v_mullo_3 = _mm_mullo_epi16(v_cb, v_c3); + __m128i v_mullo_2 = _mm_mullo_epi16(v_cb, v_c2); + __m128i v_mullo_1 = _mm_mullo_epi16(v_cr, v_c1); + __m128i v_mullo_0 = _mm_mullo_epi16(v_cr, v_c0); + + __m128i v_mulhi_3 = _mm_mulhi_epi16(v_cb, v_c3); + __m128i v_mulhi_2 = _mm_mulhi_epi16(v_cb, v_c2); + __m128i v_mulhi_1 = _mm_mulhi_epi16(v_cr, v_c1); + __m128i v_mulhi_0 = _mm_mulhi_epi16(v_cr, v_c0); + + __m128i v_b0 = _mm_srai_epi32(_mm_add_epi32(_mm_unpacklo_epi16(v_mullo_3, v_mulhi_3), v_delta2), yuv_shift); + __m128i v_g0 = _mm_srai_epi32(_mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi16(v_mullo_2, v_mulhi_2), + _mm_unpacklo_epi16(v_mullo_1, v_mulhi_1)), v_delta2), + yuv_shift); + __m128i v_r0 = _mm_srai_epi32(_mm_add_epi32(_mm_unpacklo_epi16(v_mullo_0, v_mulhi_0), v_delta2), yuv_shift); + + v_r0 = _mm_add_epi32(v_r0, v_y_p); + v_g0 = _mm_add_epi32(v_g0, v_y_p); + v_b0 = _mm_add_epi32(v_b0, v_y_p); + + v_y_p = _mm_unpackhi_epi16(v_y, v_zero); + + __m128i v_b1 = _mm_srai_epi32(_mm_add_epi32(_mm_unpackhi_epi16(v_mullo_3, v_mulhi_3), v_delta2), yuv_shift); + __m128i v_g1 = _mm_srai_epi32(_mm_add_epi32(_mm_add_epi32(_mm_unpackhi_epi16(v_mullo_2, v_mulhi_2), + _mm_unpackhi_epi16(v_mullo_1, v_mulhi_1)), v_delta2), + yuv_shift); + __m128i v_r1 = _mm_srai_epi32(_mm_add_epi32(_mm_unpackhi_epi16(v_mullo_0, v_mulhi_0), v_delta2), yuv_shift); + + v_r1 = _mm_add_epi32(v_r1, v_y_p); + v_g1 = _mm_add_epi32(v_g1, v_y_p); + v_b1 = _mm_add_epi32(v_b1, v_y_p); + + v_r = _mm_packs_epi32(v_r0, v_r1); + v_g = _mm_packs_epi32(v_g0, v_g1); + v_b = _mm_packs_epi32(v_b0, v_b1); + } + + void operator()(const uchar* src, uchar* dst, int n) const + { + int dcn = dstcn, bidx = blueIdx, i = 0; + const uchar delta = ColorChannel::half(), alpha = ColorChannel::max(); + int C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3]; + n *= 3; + + if (haveSIMD && useSSE) + { + for ( ; i <= n - 96; i += 96, dst += dcn * 32) + { + __m128i v_y0 = _mm_loadu_si128((__m128i const *)(src + i)); + __m128i v_y1 = _mm_loadu_si128((__m128i const *)(src + i + 16)); + __m128i v_cr0 = _mm_loadu_si128((__m128i const *)(src + i + 32)); + __m128i v_cr1 = _mm_loadu_si128((__m128i const *)(src + i + 48)); + __m128i v_cb0 = _mm_loadu_si128((__m128i const *)(src + i + 64)); + __m128i v_cb1 = _mm_loadu_si128((__m128i const *)(src + i + 80)); + + _mm_deinterleave_epi8(v_y0, v_y1, v_cr0, v_cr1, v_cb0, v_cb1); + + __m128i v_r_0 = v_zero, v_g_0 = v_zero, v_b_0 = v_zero; + process(_mm_unpacklo_epi8(v_y0, v_zero), + _mm_unpacklo_epi8(v_cr0, v_zero), + _mm_unpacklo_epi8(v_cb0, v_zero), + v_r_0, v_g_0, v_b_0); + + __m128i v_r_1 = v_zero, v_g_1 = v_zero, v_b_1 = v_zero; + process(_mm_unpackhi_epi8(v_y0, v_zero), + _mm_unpackhi_epi8(v_cr0, v_zero), + _mm_unpackhi_epi8(v_cb0, v_zero), + v_r_1, v_g_1, v_b_1); + + __m128i v_r0 = _mm_packus_epi16(v_r_0, v_r_1); + __m128i v_g0 = _mm_packus_epi16(v_g_0, v_g_1); + __m128i v_b0 = _mm_packus_epi16(v_b_0, v_b_1); + + process(_mm_unpacklo_epi8(v_y1, v_zero), + _mm_unpacklo_epi8(v_cr1, v_zero), + _mm_unpacklo_epi8(v_cb1, v_zero), + v_r_0, v_g_0, v_b_0); + + process(_mm_unpackhi_epi8(v_y1, v_zero), + _mm_unpackhi_epi8(v_cr1, v_zero), + _mm_unpackhi_epi8(v_cb1, v_zero), + v_r_1, v_g_1, v_b_1); + + __m128i v_r1 = _mm_packus_epi16(v_r_0, v_r_1); + __m128i v_g1 = _mm_packus_epi16(v_g_0, v_g_1); + __m128i v_b1 = _mm_packus_epi16(v_b_0, v_b_1); + + if (bidx == 0) + { + std::swap(v_r0, v_b0); + std::swap(v_r1, v_b1); + } + + __m128i v_a0 = v_alpha, v_a1 = v_alpha; + + if (dcn == 3) + _mm_interleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + else + _mm_interleave_epi8(v_r0, v_r1, v_g0, v_g1, + v_b0, v_b1, v_a0, v_a1); + + _mm_storeu_si128((__m128i *)(dst), v_r0); + _mm_storeu_si128((__m128i *)(dst + 16), v_r1); + _mm_storeu_si128((__m128i *)(dst + 32), v_g0); + _mm_storeu_si128((__m128i *)(dst + 48), v_g1); + _mm_storeu_si128((__m128i *)(dst + 64), v_b0); + _mm_storeu_si128((__m128i *)(dst + 80), v_b1); + + if (dcn == 4) + { + _mm_storeu_si128((__m128i *)(dst + 96), v_a0); + _mm_storeu_si128((__m128i *)(dst + 112), v_a1); + } + } + } + + for ( ; i < n; i += 3, dst += dcn) + { + uchar Y = src[i]; + uchar Cr = src[i+1]; + uchar Cb = src[i+2]; + + int b = Y + CV_DESCALE((Cb - delta)*C3, yuv_shift); + int g = Y + CV_DESCALE((Cb - delta)*C2 + (Cr - delta)*C1, yuv_shift); + int r = Y + CV_DESCALE((Cr - delta)*C0, yuv_shift); + + dst[bidx] = saturate_cast(b); + dst[1] = saturate_cast(g); + dst[bidx^2] = saturate_cast(r); + if( dcn == 4 ) + dst[3] = alpha; + } + } + int dstcn, blueIdx; + int coeffs[4]; + bool useSSE, haveSIMD; + + __m128i v_c0, v_c1, v_c2, v_c3, v_delta2; + __m128i v_delta, v_alpha, v_zero; +}; + +#endif // CV_SSE2 ////////////////////////////////////// RGB <-> XYZ /////////////////////////////////////// @@ -2135,20 +3150,91 @@ template struct RGB2XYZ_f C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5], C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8]; - n *= 3; - for(int i = 0; i < n; i += 3, src += scn) + n *= 3; + for(int i = 0; i < n; i += 3, src += scn) + { + _Tp X = saturate_cast<_Tp>(src[0]*C0 + src[1]*C1 + src[2]*C2); + _Tp Y = saturate_cast<_Tp>(src[0]*C3 + src[1]*C4 + src[2]*C5); + _Tp Z = saturate_cast<_Tp>(src[0]*C6 + src[1]*C7 + src[2]*C8); + dst[i] = X; dst[i+1] = Y; dst[i+2] = Z; + } + } + int srccn; + float coeffs[9]; +}; + +#if CV_NEON + +template <> +struct RGB2XYZ_f +{ + typedef float channel_type; + + RGB2XYZ_f(int _srccn, int blueIdx, const float* _coeffs) : srccn(_srccn) + { + memcpy(coeffs, _coeffs ? _coeffs : sRGB2XYZ_D65, 9*sizeof(coeffs[0])); + if(blueIdx == 0) + { + std::swap(coeffs[0], coeffs[2]); + std::swap(coeffs[3], coeffs[5]); + std::swap(coeffs[6], coeffs[8]); + } + + v_c0 = vdupq_n_f32(coeffs[0]); + v_c1 = vdupq_n_f32(coeffs[1]); + v_c2 = vdupq_n_f32(coeffs[2]); + v_c3 = vdupq_n_f32(coeffs[3]); + v_c4 = vdupq_n_f32(coeffs[4]); + v_c5 = vdupq_n_f32(coeffs[5]); + v_c6 = vdupq_n_f32(coeffs[6]); + v_c7 = vdupq_n_f32(coeffs[7]); + v_c8 = vdupq_n_f32(coeffs[8]); + } + + void operator()(const float* src, float* dst, int n) const + { + int scn = srccn, i = 0; + float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], + C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5], + C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8]; + + n *= 3; + + if (scn == 3) + for ( ; i <= n - 12; i += 12, src += 12) + { + float32x4x3_t v_src = vld3q_f32(src), v_dst; + v_dst.val[0] = vmlaq_f32(vmlaq_f32(vmulq_f32(v_src.val[0], v_c0), v_src.val[1], v_c1), v_src.val[2], v_c2); + v_dst.val[1] = vmlaq_f32(vmlaq_f32(vmulq_f32(v_src.val[0], v_c3), v_src.val[1], v_c4), v_src.val[2], v_c5); + v_dst.val[2] = vmlaq_f32(vmlaq_f32(vmulq_f32(v_src.val[0], v_c6), v_src.val[1], v_c7), v_src.val[2], v_c8); + vst3q_f32(dst + i, v_dst); + } + else + for ( ; i <= n - 12; i += 12, src += 16) + { + float32x4x4_t v_src = vld4q_f32(src); + float32x4x3_t v_dst; + v_dst.val[0] = vmlaq_f32(vmlaq_f32(vmulq_f32(v_src.val[0], v_c0), v_src.val[1], v_c1), v_src.val[2], v_c2); + v_dst.val[1] = vmlaq_f32(vmlaq_f32(vmulq_f32(v_src.val[0], v_c3), v_src.val[1], v_c4), v_src.val[2], v_c5); + v_dst.val[2] = vmlaq_f32(vmlaq_f32(vmulq_f32(v_src.val[0], v_c6), v_src.val[1], v_c7), v_src.val[2], v_c8); + vst3q_f32(dst + i, v_dst); + } + + for ( ; i < n; i += 3, src += scn) { - _Tp X = saturate_cast<_Tp>(src[0]*C0 + src[1]*C1 + src[2]*C2); - _Tp Y = saturate_cast<_Tp>(src[0]*C3 + src[1]*C4 + src[2]*C5); - _Tp Z = saturate_cast<_Tp>(src[0]*C6 + src[1]*C7 + src[2]*C8); + float X = saturate_cast(src[0]*C0 + src[1]*C1 + src[2]*C2); + float Y = saturate_cast(src[0]*C3 + src[1]*C4 + src[2]*C5); + float Z = saturate_cast(src[0]*C6 + src[1]*C7 + src[2]*C8); dst[i] = X; dst[i+1] = Y; dst[i+2] = Z; } } + int srccn; float coeffs[9]; + float32x4_t v_c0, v_c1, v_c2, v_c3, v_c4, v_c5, v_c6, v_c7, v_c8; }; -#if CV_NEON +#elif CV_SSE2 template <> struct RGB2XYZ_f @@ -2165,15 +3251,33 @@ struct RGB2XYZ_f std::swap(coeffs[6], coeffs[8]); } - v_c0 = vdupq_n_f32(coeffs[0]); - v_c1 = vdupq_n_f32(coeffs[1]); - v_c2 = vdupq_n_f32(coeffs[2]); - v_c3 = vdupq_n_f32(coeffs[3]); - v_c4 = vdupq_n_f32(coeffs[4]); - v_c5 = vdupq_n_f32(coeffs[5]); - v_c6 = vdupq_n_f32(coeffs[6]); - v_c7 = vdupq_n_f32(coeffs[7]); - v_c8 = vdupq_n_f32(coeffs[8]); + v_c0 = _mm_set1_ps(coeffs[0]); + v_c1 = _mm_set1_ps(coeffs[1]); + v_c2 = _mm_set1_ps(coeffs[2]); + v_c3 = _mm_set1_ps(coeffs[3]); + v_c4 = _mm_set1_ps(coeffs[4]); + v_c5 = _mm_set1_ps(coeffs[5]); + v_c6 = _mm_set1_ps(coeffs[6]); + v_c7 = _mm_set1_ps(coeffs[7]); + v_c8 = _mm_set1_ps(coeffs[8]); + + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); + } + + void process(__m128 v_r, __m128 v_g, __m128 v_b, + __m128 & v_x, __m128 & v_y, __m128 & v_z) const + { + v_x = _mm_mul_ps(v_r, v_c0); + v_x = _mm_add_ps(v_x, _mm_mul_ps(v_g, v_c1)); + v_x = _mm_add_ps(v_x, _mm_mul_ps(v_b, v_c2)); + + v_y = _mm_mul_ps(v_r, v_c3); + v_y = _mm_add_ps(v_y, _mm_mul_ps(v_g, v_c4)); + v_y = _mm_add_ps(v_y, _mm_mul_ps(v_b, v_c5)); + + v_z = _mm_mul_ps(v_r, v_c6); + v_z = _mm_add_ps(v_z, _mm_mul_ps(v_g, v_c7)); + v_z = _mm_add_ps(v_z, _mm_mul_ps(v_b, v_c8)); } void operator()(const float* src, float* dst, int n) const @@ -2185,25 +3289,46 @@ struct RGB2XYZ_f n *= 3; - if (scn == 3) - for ( ; i <= n - 12; i += 12, src += 12) - { - float32x4x3_t v_src = vld3q_f32(src), v_dst; - v_dst.val[0] = vmlaq_f32(vmlaq_f32(vmulq_f32(v_src.val[0], v_c0), v_src.val[1], v_c1), v_src.val[2], v_c2); - v_dst.val[1] = vmlaq_f32(vmlaq_f32(vmulq_f32(v_src.val[0], v_c3), v_src.val[1], v_c4), v_src.val[2], v_c5); - v_dst.val[2] = vmlaq_f32(vmlaq_f32(vmulq_f32(v_src.val[0], v_c6), v_src.val[1], v_c7), v_src.val[2], v_c8); - vst3q_f32(dst + i, v_dst); - } - else - for ( ; i <= n - 12; i += 12, src += 16) + if (haveSIMD) + { + for ( ; i <= n - 24; i += 24, src += 8 * scn) { - float32x4x4_t v_src = vld4q_f32(src); - float32x4x3_t v_dst; - v_dst.val[0] = vmlaq_f32(vmlaq_f32(vmulq_f32(v_src.val[0], v_c0), v_src.val[1], v_c1), v_src.val[2], v_c2); - v_dst.val[1] = vmlaq_f32(vmlaq_f32(vmulq_f32(v_src.val[0], v_c3), v_src.val[1], v_c4), v_src.val[2], v_c5); - v_dst.val[2] = vmlaq_f32(vmlaq_f32(vmulq_f32(v_src.val[0], v_c6), v_src.val[1], v_c7), v_src.val[2], v_c8); - vst3q_f32(dst + i, v_dst); + __m128 v_r0 = _mm_loadu_ps(src); + __m128 v_r1 = _mm_loadu_ps(src + 4); + __m128 v_g0 = _mm_loadu_ps(src + 8); + __m128 v_g1 = _mm_loadu_ps(src + 12); + __m128 v_b0 = _mm_loadu_ps(src + 16); + __m128 v_b1 = _mm_loadu_ps(src + 20); + + if (scn == 4) + { + __m128 v_a0 = _mm_loadu_ps(src + 24); + __m128 v_a1 = _mm_loadu_ps(src + 28); + + _mm_deinterleave_ps(v_r0, v_r1, v_g0, v_g1, + v_b0, v_b1, v_a0, v_a1); + } + else + _mm_deinterleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + + __m128 v_x0, v_y0, v_z0; + process(v_r0, v_g0, v_b0, + v_x0, v_y0, v_z0); + + __m128 v_x1, v_y1, v_z1; + process(v_r1, v_g1, v_b1, + v_x1, v_y1, v_z1); + + _mm_interleave_ps(v_x0, v_x1, v_y0, v_y1, v_z0, v_z1); + + _mm_storeu_ps(dst + i, v_x0); + _mm_storeu_ps(dst + i + 4, v_x1); + _mm_storeu_ps(dst + i + 8, v_y0); + _mm_storeu_ps(dst + i + 12, v_y1); + _mm_storeu_ps(dst + i + 16, v_z0); + _mm_storeu_ps(dst + i + 20, v_z1); } + } for ( ; i < n; i += 3, src += scn) { @@ -2216,9 +3341,11 @@ struct RGB2XYZ_f int srccn; float coeffs[9]; - float32x4_t v_c0, v_c1, v_c2, v_c3, v_c4, v_c5, v_c6, v_c7, v_c8; + __m128 v_c0, v_c1, v_c2, v_c3, v_c4, v_c5, v_c6, v_c7, v_c8; + bool haveSIMD; }; + #endif template struct RGB2XYZ_i @@ -2249,6 +3376,7 @@ template struct RGB2XYZ_i C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5], C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8]; n *= 3; + for(int i = 0; i < n; i += 3, src += scn) { int X = CV_DESCALE(src[0]*C0 + src[1]*C1 + src[2]*C2, xyz_shift); @@ -2542,6 +3670,130 @@ template struct XYZ2RGB_f float coeffs[9]; }; +#if CV_SSE2 + +template <> +struct XYZ2RGB_f +{ + typedef float channel_type; + + XYZ2RGB_f(int _dstcn, int _blueIdx, const float* _coeffs) + : dstcn(_dstcn), blueIdx(_blueIdx) + { + memcpy(coeffs, _coeffs ? _coeffs : XYZ2sRGB_D65, 9*sizeof(coeffs[0])); + if(blueIdx == 0) + { + std::swap(coeffs[0], coeffs[6]); + std::swap(coeffs[1], coeffs[7]); + std::swap(coeffs[2], coeffs[8]); + } + + v_c0 = _mm_set1_ps(coeffs[0]); + v_c1 = _mm_set1_ps(coeffs[1]); + v_c2 = _mm_set1_ps(coeffs[2]); + v_c3 = _mm_set1_ps(coeffs[3]); + v_c4 = _mm_set1_ps(coeffs[4]); + v_c5 = _mm_set1_ps(coeffs[5]); + v_c6 = _mm_set1_ps(coeffs[6]); + v_c7 = _mm_set1_ps(coeffs[7]); + v_c8 = _mm_set1_ps(coeffs[8]); + + v_alpha = _mm_set1_ps(ColorChannel::max()); + + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); + } + + void process(__m128 v_x, __m128 v_y, __m128 v_z, + __m128 & v_r, __m128 & v_g, __m128 & v_b) const + { + v_b = _mm_mul_ps(v_x, v_c0); + v_b = _mm_add_ps(v_b, _mm_mul_ps(v_y, v_c1)); + v_b = _mm_add_ps(v_b, _mm_mul_ps(v_z, v_c2)); + + v_g = _mm_mul_ps(v_x, v_c3); + v_g = _mm_add_ps(v_g, _mm_mul_ps(v_y, v_c4)); + v_g = _mm_add_ps(v_g, _mm_mul_ps(v_z, v_c5)); + + v_r = _mm_mul_ps(v_x, v_c6); + v_r = _mm_add_ps(v_r, _mm_mul_ps(v_y, v_c7)); + v_r = _mm_add_ps(v_r, _mm_mul_ps(v_z, v_c8)); + } + + void operator()(const float* src, float* dst, int n) const + { + int dcn = dstcn; + float alpha = ColorChannel::max(); + float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], + C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5], + C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8]; + n *= 3; + int i = 0; + + if (haveSIMD) + { + for ( ; i <= n - 24; i += 24, dst += 8 * dcn) + { + __m128 v_x0 = _mm_loadu_ps(src + i); + __m128 v_x1 = _mm_loadu_ps(src + i + 4); + __m128 v_y0 = _mm_loadu_ps(src + i + 8); + __m128 v_y1 = _mm_loadu_ps(src + i + 12); + __m128 v_z0 = _mm_loadu_ps(src + i + 16); + __m128 v_z1 = _mm_loadu_ps(src + i + 20); + + _mm_deinterleave_ps(v_x0, v_x1, v_y0, v_y1, v_z0, v_z1); + + __m128 v_r0, v_g0, v_b0; + process(v_x0, v_y0, v_z0, + v_r0, v_g0, v_b0); + + __m128 v_r1, v_g1, v_b1; + process(v_x1, v_y1, v_z1, + v_r1, v_g1, v_b1); + + __m128 v_a0 = v_alpha, v_a1 = v_alpha; + + if (dcn == 4) + _mm_interleave_ps(v_b0, v_b1, v_g0, v_g1, + v_r0, v_r1, v_a0, v_a1); + else + _mm_interleave_ps(v_b0, v_b1, v_g0, v_g1, v_r0, v_r1); + + _mm_storeu_ps(dst, v_b0); + _mm_storeu_ps(dst + 4, v_b1); + _mm_storeu_ps(dst + 8, v_g0); + _mm_storeu_ps(dst + 12, v_g1); + _mm_storeu_ps(dst + 16, v_r0); + _mm_storeu_ps(dst + 20, v_r1); + + if (dcn == 4) + { + _mm_storeu_ps(dst + 24, v_a0); + _mm_storeu_ps(dst + 28, v_a1); + } + } + + } + + for( ; i < n; i += 3, dst += dcn) + { + float B = src[i]*C0 + src[i+1]*C1 + src[i+2]*C2; + float G = src[i]*C3 + src[i+1]*C4 + src[i+2]*C5; + float R = src[i]*C6 + src[i+1]*C7 + src[i+2]*C8; + dst[0] = B; dst[1] = G; dst[2] = R; + if( dcn == 4 ) + dst[3] = alpha; + } + } + int dstcn, blueIdx; + float coeffs[9]; + + __m128 v_c0, v_c1, v_c2, v_c3, v_c4, v_c5, v_c6, v_c7, v_c8; + __m128 v_alpha; + bool haveSIMD; +}; + +#endif // CV_SSE2 + template struct XYZ2RGB_i { @@ -3056,14 +4308,49 @@ struct HSV2RGB_b v_scale_inv = vdupq_n_f32(1.f/255.f); v_scale = vdupq_n_f32(255.f); v_alpha = vdup_n_u8(ColorChannel::max()); + #elif CV_SSE2 + v_scale_inv = _mm_set1_ps(1.f/255.f); + v_scale = _mm_set1_ps(255.0f); + v_zero = _mm_setzero_si128(); + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); #endif } + #if CV_SSE2 + // 16s x 8 + void process(__m128i v_r, __m128i v_g, __m128i v_b, + float * buf) const + { + __m128 v_r0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_r, v_zero)); + __m128 v_g0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_g, v_zero)); + __m128 v_b0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_b, v_zero)); + + __m128 v_r1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_r, v_zero)); + __m128 v_g1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_g, v_zero)); + __m128 v_b1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_b, v_zero)); + + v_g0 = _mm_mul_ps(v_g0, v_scale_inv); + v_b0 = _mm_mul_ps(v_b0, v_scale_inv); + + v_g1 = _mm_mul_ps(v_g1, v_scale_inv); + v_b1 = _mm_mul_ps(v_b1, v_scale_inv); + + _mm_interleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + + _mm_store_ps(buf, v_r0); + _mm_store_ps(buf + 4, v_r1); + _mm_store_ps(buf + 8, v_g0); + _mm_store_ps(buf + 12, v_g1); + _mm_store_ps(buf + 16, v_b0); + _mm_store_ps(buf + 20, v_b1); + } + #endif + void operator()(const uchar* src, uchar* dst, int n) const { int i, j, dcn = dstcn; uchar alpha = ColorChannel::max(); - float buf[3*BLOCK_SIZE]; + float CV_DECL_ALIGNED(16) buf[3*BLOCK_SIZE]; for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 ) { @@ -3089,6 +4376,41 @@ struct HSV2RGB_b v_dst.val[2] = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_t2))), v_scale_inv); vst3q_f32(buf + j + 12, v_dst); } + #elif CV_SSE2 + if (haveSIMD) + { + for ( ; j <= (dn - 32) * 3; j += 96) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src + j)); + __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + j + 16)); + __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + j + 32)); + __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + j + 48)); + __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); + __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); + + _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + + process(_mm_unpacklo_epi8(v_r0, v_zero), + _mm_unpacklo_epi8(v_g0, v_zero), + _mm_unpacklo_epi8(v_b0, v_zero), + buf + j); + + process(_mm_unpackhi_epi8(v_r0, v_zero), + _mm_unpackhi_epi8(v_g0, v_zero), + _mm_unpackhi_epi8(v_b0, v_zero), + buf + j + 24); + + process(_mm_unpacklo_epi8(v_r1, v_zero), + _mm_unpacklo_epi8(v_g1, v_zero), + _mm_unpacklo_epi8(v_b1, v_zero), + buf + j + 48); + + process(_mm_unpackhi_epi8(v_r1, v_zero), + _mm_unpackhi_epi8(v_g1, v_zero), + _mm_unpackhi_epi8(v_b1, v_zero), + buf + j + 72); + } + } #endif for( ; j < dn*3; j += 3 ) @@ -3129,6 +4451,28 @@ struct HSV2RGB_b vst3_u8(dst, v_dst); } } + #elif CV_SSE2 + if (dcn == 3 && haveSIMD) + { + for ( ; j <= (dn * 3 - 16); j += 16, dst += 16) + { + __m128 v_src0 = _mm_mul_ps(_mm_load_ps(buf + j), v_scale); + __m128 v_src1 = _mm_mul_ps(_mm_load_ps(buf + j + 4), v_scale); + __m128 v_src2 = _mm_mul_ps(_mm_load_ps(buf + j + 8), v_scale); + __m128 v_src3 = _mm_mul_ps(_mm_load_ps(buf + j + 12), v_scale); + + __m128i v_dst0 = _mm_packs_epi32(_mm_cvtps_epi32(v_src0), + _mm_cvtps_epi32(v_src1)); + __m128i v_dst1 = _mm_packs_epi32(_mm_cvtps_epi32(v_src2), + _mm_cvtps_epi32(v_src3)); + + _mm_storeu_si128((__m128i *)dst, _mm_packus_epi16(v_dst0, v_dst1)); + } + + int jr = j % 3; + if (jr) + dst -= jr, j -= jr; + } #endif for( ; j < dn*3; j += 3, dst += dcn ) @@ -3147,6 +4491,10 @@ struct HSV2RGB_b #if CV_NEON float32x4_t v_scale, v_scale_inv; uint8x8_t v_alpha; + #elif CV_SSE2 + __m128 v_scale_inv, v_scale; + __m128i v_zero; + bool haveSIMD; #endif }; @@ -3218,13 +4566,42 @@ struct RGB2HLS_b v_scale_inv = vdupq_n_f32(1.f/255.f); v_scale = vdupq_n_f32(255.f); v_alpha = vdup_n_u8(ColorChannel::max()); + #elif CV_SSE2 + v_scale_inv = _mm_set1_ps(1.f/255.f); + v_scale = _mm_set1_ps(255.f); + v_zero = _mm_setzero_si128(); + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); #endif } + #if CV_SSE2 + void process(const float * buf, + __m128i & v_h, __m128i & v_l, __m128i & v_s) const + { + __m128 v_h0f = _mm_load_ps(buf); + __m128 v_h1f = _mm_load_ps(buf + 4); + __m128 v_l0f = _mm_load_ps(buf + 8); + __m128 v_l1f = _mm_load_ps(buf + 12); + __m128 v_s0f = _mm_load_ps(buf + 16); + __m128 v_s1f = _mm_load_ps(buf + 20); + + _mm_deinterleave_ps(v_h0f, v_h1f, v_l0f, v_l1f, v_s0f, v_s1f); + + v_l0f = _mm_mul_ps(v_l0f, v_scale); + v_l1f = _mm_mul_ps(v_l1f, v_scale); + v_s0f = _mm_mul_ps(v_s0f, v_scale); + v_s1f = _mm_mul_ps(v_s1f, v_scale); + + v_h = _mm_packs_epi32(_mm_cvtps_epi32(v_h0f), _mm_cvtps_epi32(v_h1f)); + v_l = _mm_packs_epi32(_mm_cvtps_epi32(v_l0f), _mm_cvtps_epi32(v_l1f)); + v_s = _mm_packs_epi32(_mm_cvtps_epi32(v_s0f), _mm_cvtps_epi32(v_s1f)); + } + #endif + void operator()(const uchar* src, uchar* dst, int n) const { int i, j, scn = srccn; - float buf[3*BLOCK_SIZE]; + float CV_DECL_ALIGNED(16) buf[3*BLOCK_SIZE]; for( i = 0; i < n; i += BLOCK_SIZE, dst += BLOCK_SIZE*3 ) { @@ -3262,6 +4639,26 @@ struct RGB2HLS_b v_dst.val[2] = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_t2))), v_scale_inv); vst3q_f32(buf + j + 12, v_dst); } + #elif CV_SSE2 + if (scn == 3 && haveSIMD) + { + for ( ; j <= (dn * 3 - 16); j += 16, src += 16) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)src); + + __m128i v_src_p = _mm_unpacklo_epi8(v_src, v_zero); + _mm_store_ps(buf + j, _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src_p, v_zero)), v_scale_inv)); + _mm_store_ps(buf + j + 4, _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src_p, v_zero)), v_scale_inv)); + + v_src_p = _mm_unpackhi_epi8(v_src, v_zero); + _mm_store_ps(buf + j + 8, _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src_p, v_zero)), v_scale_inv)); + _mm_store_ps(buf + j + 12, _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src_p, v_zero)), v_scale_inv)); + } + + int jr = j % 3; + if (jr) + src -= jr, j -= jr; + } #endif for( ; j < dn*3; j += 3, src += scn ) { @@ -3286,6 +4683,43 @@ struct RGB2HLS_b vqmovn_u32(cv_vrndq_u32_f32(vmulq_f32(v_src1.val[2], v_scale))))); vst3_u8(dst + j, v_dst); } + #elif CV_SSE2 + if (haveSIMD) + { + for ( ; j <= (dn - 32) * 3; j += 96) + { + __m128i v_h_0, v_l_0, v_s_0; + process(buf + j, + v_h_0, v_l_0, v_s_0); + + __m128i v_h_1, v_l_1, v_s_1; + process(buf + j + 24, + v_h_1, v_l_1, v_s_1); + + __m128i v_h0 = _mm_packus_epi16(v_h_0, v_h_1); + __m128i v_l0 = _mm_packus_epi16(v_l_0, v_l_1); + __m128i v_s0 = _mm_packus_epi16(v_s_0, v_s_1); + + process(buf + j + 48, + v_h_0, v_l_0, v_s_0); + + process(buf + j + 72, + v_h_1, v_l_1, v_s_1); + + __m128i v_h1 = _mm_packus_epi16(v_h_0, v_h_1); + __m128i v_l1 = _mm_packus_epi16(v_l_0, v_l_1); + __m128i v_s1 = _mm_packus_epi16(v_s_0, v_s_1); + + _mm_interleave_epi8(v_h0, v_h1, v_l0, v_l1, v_s0, v_s1); + + _mm_storeu_si128((__m128i *)(dst + j), v_h0); + _mm_storeu_si128((__m128i *)(dst + j + 16), v_h1); + _mm_storeu_si128((__m128i *)(dst + j + 32), v_l0); + _mm_storeu_si128((__m128i *)(dst + j + 48), v_l1); + _mm_storeu_si128((__m128i *)(dst + j + 64), v_s0); + _mm_storeu_si128((__m128i *)(dst + j + 80), v_s1); + } + } #endif for( ; j < dn*3; j += 3 ) { @@ -3301,6 +4735,10 @@ struct RGB2HLS_b #if CV_NEON float32x4_t v_scale, v_scale_inv; uint8x8_t v_alpha; + #elif CV_SSE2 + __m128 v_scale, v_scale_inv; + __m128i v_zero; + bool haveSIMD; #endif }; @@ -3380,14 +4818,49 @@ struct HLS2RGB_b v_scale_inv = vdupq_n_f32(1.f/255.f); v_scale = vdupq_n_f32(255.f); v_alpha = vdup_n_u8(ColorChannel::max()); + #elif CV_SSE2 + v_scale_inv = _mm_set1_ps(1.f/255.f); + v_scale = _mm_set1_ps(255.f); + v_zero = _mm_setzero_si128(); + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); #endif } + #if CV_SSE2 + // 16s x 8 + void process(__m128i v_r, __m128i v_g, __m128i v_b, + float * buf) const + { + __m128 v_r0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_r, v_zero)); + __m128 v_g0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_g, v_zero)); + __m128 v_b0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_b, v_zero)); + + __m128 v_r1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_r, v_zero)); + __m128 v_g1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_g, v_zero)); + __m128 v_b1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_b, v_zero)); + + v_g0 = _mm_mul_ps(v_g0, v_scale_inv); + v_b0 = _mm_mul_ps(v_b0, v_scale_inv); + + v_g1 = _mm_mul_ps(v_g1, v_scale_inv); + v_b1 = _mm_mul_ps(v_b1, v_scale_inv); + + _mm_interleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + + _mm_store_ps(buf, v_r0); + _mm_store_ps(buf + 4, v_r1); + _mm_store_ps(buf + 8, v_g0); + _mm_store_ps(buf + 12, v_g1); + _mm_store_ps(buf + 16, v_b0); + _mm_store_ps(buf + 20, v_b1); + } + #endif + void operator()(const uchar* src, uchar* dst, int n) const { int i, j, dcn = dstcn; uchar alpha = ColorChannel::max(); - float buf[3*BLOCK_SIZE]; + float CV_DECL_ALIGNED(16) buf[3*BLOCK_SIZE]; for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 ) { @@ -3413,6 +4886,41 @@ struct HLS2RGB_b v_dst.val[2] = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_t2))), v_scale_inv); vst3q_f32(buf + j + 12, v_dst); } + #elif CV_SSE2 + if (haveSIMD) + { + for ( ; j <= (dn - 32) * 3; j += 96) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src + j)); + __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + j + 16)); + __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + j + 32)); + __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + j + 48)); + __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); + __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); + + _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + + process(_mm_unpacklo_epi8(v_r0, v_zero), + _mm_unpacklo_epi8(v_g0, v_zero), + _mm_unpacklo_epi8(v_b0, v_zero), + buf + j); + + process(_mm_unpackhi_epi8(v_r0, v_zero), + _mm_unpackhi_epi8(v_g0, v_zero), + _mm_unpackhi_epi8(v_b0, v_zero), + buf + j + 24); + + process(_mm_unpacklo_epi8(v_r1, v_zero), + _mm_unpacklo_epi8(v_g1, v_zero), + _mm_unpacklo_epi8(v_b1, v_zero), + buf + j + 48); + + process(_mm_unpackhi_epi8(v_r1, v_zero), + _mm_unpackhi_epi8(v_g1, v_zero), + _mm_unpackhi_epi8(v_b1, v_zero), + buf + j + 72); + } + } #endif for( ; j < dn*3; j += 3 ) { @@ -3452,7 +4960,30 @@ struct HLS2RGB_b vst3_u8(dst, v_dst); } } + #elif CV_SSE2 + if (dcn == 3 && haveSIMD) + { + for ( ; j <= (dn * 3 - 16); j += 16, dst += 16) + { + __m128 v_src0 = _mm_mul_ps(_mm_load_ps(buf + j), v_scale); + __m128 v_src1 = _mm_mul_ps(_mm_load_ps(buf + j + 4), v_scale); + __m128 v_src2 = _mm_mul_ps(_mm_load_ps(buf + j + 8), v_scale); + __m128 v_src3 = _mm_mul_ps(_mm_load_ps(buf + j + 12), v_scale); + + __m128i v_dst0 = _mm_packs_epi32(_mm_cvtps_epi32(v_src0), + _mm_cvtps_epi32(v_src1)); + __m128i v_dst1 = _mm_packs_epi32(_mm_cvtps_epi32(v_src2), + _mm_cvtps_epi32(v_src3)); + + _mm_storeu_si128((__m128i *)dst, _mm_packus_epi16(v_dst0, v_dst1)); + } + + int jr = j % 3; + if (jr) + dst -= jr, j -= jr; + } #endif + for( ; j < dn*3; j += 3, dst += dcn ) { dst[0] = saturate_cast(buf[j]*255.f); @@ -3469,6 +5000,10 @@ struct HLS2RGB_b #if CV_NEON float32x4_t v_scale, v_scale_inv; uint8x8_t v_alpha; + #elif CV_SSE2 + __m128 v_scale, v_scale_inv; + __m128i v_zero; + bool haveSIMD; #endif }; @@ -3784,14 +5319,52 @@ struct Lab2RGB_b v_scale = vdupq_n_f32(255.f); v_alpha = vdup_n_u8(ColorChannel::max()); v_128 = vdupq_n_f32(128.0f); + #elif CV_SSE2 + v_scale_inv = _mm_set1_ps(100.f/255.f); + v_scale = _mm_set1_ps(255.f); + v_128 = _mm_set1_ps(128.0f); + v_zero = _mm_setzero_si128(); + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); #endif } + #if CV_SSE2 + // 16s x 8 + void process(__m128i v_r, __m128i v_g, __m128i v_b, + float * buf) const + { + __m128 v_r0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_r, v_zero)); + __m128 v_g0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_g, v_zero)); + __m128 v_b0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_b, v_zero)); + + __m128 v_r1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_r, v_zero)); + __m128 v_g1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_g, v_zero)); + __m128 v_b1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_b, v_zero)); + + v_r0 = _mm_mul_ps(v_r0, v_scale_inv); + v_r1 = _mm_mul_ps(v_r1, v_scale_inv); + + v_g0 = _mm_sub_ps(v_g0, v_128); + v_g1 = _mm_sub_ps(v_g1, v_128); + v_b0 = _mm_sub_ps(v_b0, v_128); + v_b1 = _mm_sub_ps(v_b1, v_128); + + _mm_interleave_ps(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + + _mm_store_ps(buf, v_r0); + _mm_store_ps(buf + 4, v_r1); + _mm_store_ps(buf + 8, v_g0); + _mm_store_ps(buf + 12, v_g1); + _mm_store_ps(buf + 16, v_b0); + _mm_store_ps(buf + 20, v_b1); + } + #endif + void operator()(const uchar* src, uchar* dst, int n) const { int i, j, dcn = dstcn; uchar alpha = ColorChannel::max(); - float buf[3*BLOCK_SIZE]; + float CV_DECL_ALIGNED(16) buf[3*BLOCK_SIZE]; for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 ) { @@ -3817,6 +5390,41 @@ struct Lab2RGB_b v_dst.val[2] = vsubq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_t2))), v_128); vst3q_f32(buf + j + 12, v_dst); } + #elif CV_SSE2 + if (haveSIMD) + { + for ( ; j <= (dn - 32) * 3; j += 96) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src + j)); + __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + j + 16)); + __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + j + 32)); + __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + j + 48)); + __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); + __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); + + _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + + process(_mm_unpacklo_epi8(v_r0, v_zero), + _mm_unpacklo_epi8(v_g0, v_zero), + _mm_unpacklo_epi8(v_b0, v_zero), + buf + j); + + process(_mm_unpackhi_epi8(v_r0, v_zero), + _mm_unpackhi_epi8(v_g0, v_zero), + _mm_unpackhi_epi8(v_b0, v_zero), + buf + j + 24); + + process(_mm_unpacklo_epi8(v_r1, v_zero), + _mm_unpacklo_epi8(v_g1, v_zero), + _mm_unpacklo_epi8(v_b1, v_zero), + buf + j + 48); + + process(_mm_unpackhi_epi8(v_r1, v_zero), + _mm_unpackhi_epi8(v_g1, v_zero), + _mm_unpackhi_epi8(v_b1, v_zero), + buf + j + 72); + } + } #endif for( ; j < dn*3; j += 3 ) @@ -3857,6 +5465,28 @@ struct Lab2RGB_b vst3_u8(dst, v_dst); } } + #elif CV_SSE2 + if (dcn == 3 && haveSIMD) + { + for ( ; j <= (dn * 3 - 16); j += 16, dst += 16) + { + __m128 v_src0 = _mm_mul_ps(_mm_load_ps(buf + j), v_scale); + __m128 v_src1 = _mm_mul_ps(_mm_load_ps(buf + j + 4), v_scale); + __m128 v_src2 = _mm_mul_ps(_mm_load_ps(buf + j + 8), v_scale); + __m128 v_src3 = _mm_mul_ps(_mm_load_ps(buf + j + 12), v_scale); + + __m128i v_dst0 = _mm_packs_epi32(_mm_cvtps_epi32(v_src0), + _mm_cvtps_epi32(v_src1)); + __m128i v_dst1 = _mm_packs_epi32(_mm_cvtps_epi32(v_src2), + _mm_cvtps_epi32(v_src3)); + + _mm_storeu_si128((__m128i *)dst, _mm_packus_epi16(v_dst0, v_dst1)); + } + + int jr = j % 3; + if (jr) + dst -= jr, j -= jr; + } #endif for( ; j < dn*3; j += 3, dst += dcn ) @@ -3876,6 +5506,10 @@ struct Lab2RGB_b #if CV_NEON float32x4_t v_scale, v_scale_inv, v_128; uint8x8_t v_alpha; + #elif CV_SSE2 + __m128 v_scale, v_scale_inv, v_128; + __m128i v_zero; + bool haveSIMD; #endif }; @@ -4050,13 +5684,48 @@ struct RGB2Luv_b v_coeff3 = vdupq_n_f32(0.9732824427480916f); v_coeff4 = vdupq_n_f32(136.259541984732824f); v_alpha = vdup_n_u8(ColorChannel::max()); + #elif CV_SSE2 + v_zero = _mm_setzero_si128(); + v_scale_inv = _mm_set1_ps(1.f/255.f); + v_scale = _mm_set1_ps(2.55f); + v_coeff1 = _mm_set1_ps(0.72033898305084743f); + v_coeff2 = _mm_set1_ps(96.525423728813564f); + v_coeff3 = _mm_set1_ps(0.9732824427480916f); + v_coeff4 = _mm_set1_ps(136.259541984732824f); + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); #endif } + #if CV_SSE2 + void process(const float * buf, + __m128i & v_l, __m128i & v_u, __m128i & v_v) const + { + __m128 v_l0f = _mm_load_ps(buf); + __m128 v_l1f = _mm_load_ps(buf + 4); + __m128 v_u0f = _mm_load_ps(buf + 8); + __m128 v_u1f = _mm_load_ps(buf + 12); + __m128 v_v0f = _mm_load_ps(buf + 16); + __m128 v_v1f = _mm_load_ps(buf + 20); + + _mm_deinterleave_ps(v_l0f, v_l1f, v_u0f, v_u1f, v_v0f, v_v1f); + + v_l0f = _mm_mul_ps(v_l0f, v_scale); + v_l1f = _mm_mul_ps(v_l1f, v_scale); + v_u0f = _mm_add_ps(_mm_mul_ps(v_u0f, v_coeff1), v_coeff2); + v_u1f = _mm_add_ps(_mm_mul_ps(v_u1f, v_coeff1), v_coeff2); + v_v0f = _mm_add_ps(_mm_mul_ps(v_v0f, v_coeff3), v_coeff4); + v_v1f = _mm_add_ps(_mm_mul_ps(v_v1f, v_coeff3), v_coeff4); + + v_l = _mm_packs_epi32(_mm_cvtps_epi32(v_l0f), _mm_cvtps_epi32(v_l1f)); + v_u = _mm_packs_epi32(_mm_cvtps_epi32(v_u0f), _mm_cvtps_epi32(v_u1f)); + v_v = _mm_packs_epi32(_mm_cvtps_epi32(v_v0f), _mm_cvtps_epi32(v_v1f)); + } + #endif + void operator()(const uchar* src, uchar* dst, int n) const { int i, j, scn = srccn; - float buf[3*BLOCK_SIZE]; + float CV_DECL_ALIGNED(16) buf[3*BLOCK_SIZE]; for( i = 0; i < n; i += BLOCK_SIZE, dst += BLOCK_SIZE*3 ) { @@ -4094,6 +5763,26 @@ struct RGB2Luv_b v_dst.val[2] = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_t2))), v_scale_inv); vst3q_f32(buf + j + 12, v_dst); } + #elif CV_SSE2 + if (scn == 3 && haveSIMD) + { + for ( ; j <= (dn * 3 - 16); j += 16, src += 16) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)src); + + __m128i v_src_p = _mm_unpacklo_epi8(v_src, v_zero); + _mm_store_ps(buf + j, _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src_p, v_zero)), v_scale_inv)); + _mm_store_ps(buf + j + 4, _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src_p, v_zero)), v_scale_inv)); + + v_src_p = _mm_unpackhi_epi8(v_src, v_zero); + _mm_store_ps(buf + j + 8, _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src_p, v_zero)), v_scale_inv)); + _mm_store_ps(buf + j + 12, _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src_p, v_zero)), v_scale_inv)); + } + + int jr = j % 3; + if (jr) + src -= jr, j -= jr; + } #endif for( ; j < dn*3; j += 3, src += scn ) { @@ -4119,6 +5808,43 @@ struct RGB2Luv_b vst3_u8(dst + j, v_dst); } + #elif CV_SSE2 + if (haveSIMD) + { + for ( ; j <= (dn - 32) * 3; j += 96) + { + __m128i v_l_0, v_u_0, v_v_0; + process(buf + j, + v_l_0, v_u_0, v_v_0); + + __m128i v_l_1, v_u_1, v_v_1; + process(buf + j + 24, + v_l_1, v_u_1, v_v_1); + + __m128i v_l0 = _mm_packus_epi16(v_l_0, v_l_1); + __m128i v_u0 = _mm_packus_epi16(v_u_0, v_u_1); + __m128i v_v0 = _mm_packus_epi16(v_v_0, v_v_1); + + process(buf + j + 48, + v_l_0, v_u_0, v_v_0); + + process(buf + j + 72, + v_l_1, v_u_1, v_v_1); + + __m128i v_l1 = _mm_packus_epi16(v_l_0, v_l_1); + __m128i v_u1 = _mm_packus_epi16(v_u_0, v_u_1); + __m128i v_v1 = _mm_packus_epi16(v_v_0, v_v_1); + + _mm_interleave_epi8(v_l0, v_l1, v_u0, v_u1, v_v0, v_v1); + + _mm_storeu_si128((__m128i *)(dst + j), v_l0); + _mm_storeu_si128((__m128i *)(dst + j + 16), v_l1); + _mm_storeu_si128((__m128i *)(dst + j + 32), v_u0); + _mm_storeu_si128((__m128i *)(dst + j + 48), v_u1); + _mm_storeu_si128((__m128i *)(dst + j + 64), v_v0); + _mm_storeu_si128((__m128i *)(dst + j + 80), v_v1); + } + } #endif for( ; j < dn*3; j += 3 ) @@ -4136,6 +5862,10 @@ struct RGB2Luv_b #if CV_NEON float32x4_t v_scale, v_scale_inv, v_coeff1, v_coeff2, v_coeff3, v_coeff4; uint8x8_t v_alpha; + #elif CV_SSE2 + __m128 v_scale, v_scale_inv, v_coeff1, v_coeff2, v_coeff3, v_coeff4; + __m128i v_zero; + bool haveSIMD; #endif }; @@ -4156,14 +5886,55 @@ struct Luv2RGB_b v_140 = vdupq_n_f32(140.f); v_scale = vdupq_n_f32(255.f); v_alpha = vdup_n_u8(ColorChannel::max()); + #elif CV_SSE2 + v_scale_inv = _mm_set1_ps(100.f/255.f); + v_coeff1 = _mm_set1_ps(1.388235294117647f); + v_coeff2 = _mm_set1_ps(1.027450980392157f); + v_134 = _mm_set1_ps(134.f); + v_140 = _mm_set1_ps(140.f); + v_scale = _mm_set1_ps(255.f); + v_zero = _mm_setzero_si128(); + haveSIMD = checkHardwareSupport(CV_CPU_SSE2); #endif } + #if CV_SSE2 + // 16s x 8 + void process(__m128i v_l, __m128i v_u, __m128i v_v, + float * buf) const + { + __m128 v_l0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_l, v_zero)); + __m128 v_u0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_u, v_zero)); + __m128 v_v0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_v, v_zero)); + + __m128 v_l1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_l, v_zero)); + __m128 v_u1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_u, v_zero)); + __m128 v_v1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_v, v_zero)); + + v_l0 = _mm_mul_ps(v_l0, v_scale_inv); + v_l1 = _mm_mul_ps(v_l1, v_scale_inv); + + v_u0 = _mm_sub_ps(_mm_mul_ps(v_u0, v_coeff1), v_134); + v_u1 = _mm_sub_ps(_mm_mul_ps(v_u1, v_coeff1), v_134); + v_v0 = _mm_sub_ps(_mm_mul_ps(v_v0, v_coeff2), v_140); + v_v1 = _mm_sub_ps(_mm_mul_ps(v_v1, v_coeff2), v_140); + + _mm_interleave_ps(v_l0, v_l1, v_u0, v_u1, v_v0, v_v1); + + _mm_store_ps(buf, v_l0); + _mm_store_ps(buf + 4, v_l1); + _mm_store_ps(buf + 8, v_u0); + _mm_store_ps(buf + 12, v_u1); + _mm_store_ps(buf + 16, v_v0); + _mm_store_ps(buf + 20, v_v1); + } + #endif + void operator()(const uchar* src, uchar* dst, int n) const { int i, j, dcn = dstcn; uchar alpha = ColorChannel::max(); - float buf[3*BLOCK_SIZE]; + float CV_DECL_ALIGNED(16) buf[3*BLOCK_SIZE]; for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 ) { @@ -4189,6 +5960,41 @@ struct Luv2RGB_b v_dst.val[2] = vsubq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_t2))), v_coeff2), v_140); vst3q_f32(buf + j + 12, v_dst); } + #elif CV_SSE2 + if (haveSIMD) + { + for ( ; j <= (dn - 32) * 3; j += 96) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(src + j)); + __m128i v_r1 = _mm_loadu_si128((__m128i const *)(src + j + 16)); + __m128i v_g0 = _mm_loadu_si128((__m128i const *)(src + j + 32)); + __m128i v_g1 = _mm_loadu_si128((__m128i const *)(src + j + 48)); + __m128i v_b0 = _mm_loadu_si128((__m128i const *)(src + j + 64)); + __m128i v_b1 = _mm_loadu_si128((__m128i const *)(src + j + 80)); + + _mm_deinterleave_epi8(v_r0, v_r1, v_g0, v_g1, v_b0, v_b1); + + process(_mm_unpacklo_epi8(v_r0, v_zero), + _mm_unpacklo_epi8(v_g0, v_zero), + _mm_unpacklo_epi8(v_b0, v_zero), + buf + j); + + process(_mm_unpackhi_epi8(v_r0, v_zero), + _mm_unpackhi_epi8(v_g0, v_zero), + _mm_unpackhi_epi8(v_b0, v_zero), + buf + j + 24); + + process(_mm_unpacklo_epi8(v_r1, v_zero), + _mm_unpacklo_epi8(v_g1, v_zero), + _mm_unpacklo_epi8(v_b1, v_zero), + buf + j + 48); + + process(_mm_unpackhi_epi8(v_r1, v_zero), + _mm_unpackhi_epi8(v_g1, v_zero), + _mm_unpackhi_epi8(v_b1, v_zero), + buf + j + 72); + } + } #endif for( ; j < dn*3; j += 3 ) { @@ -4228,6 +6034,28 @@ struct Luv2RGB_b vst3_u8(dst, v_dst); } } + #elif CV_SSE2 + if (dcn == 3 && haveSIMD) + { + for ( ; j <= (dn * 3 - 16); j += 16, dst += 16) + { + __m128 v_src0 = _mm_mul_ps(_mm_load_ps(buf + j), v_scale); + __m128 v_src1 = _mm_mul_ps(_mm_load_ps(buf + j + 4), v_scale); + __m128 v_src2 = _mm_mul_ps(_mm_load_ps(buf + j + 8), v_scale); + __m128 v_src3 = _mm_mul_ps(_mm_load_ps(buf + j + 12), v_scale); + + __m128i v_dst0 = _mm_packs_epi32(_mm_cvtps_epi32(v_src0), + _mm_cvtps_epi32(v_src1)); + __m128i v_dst1 = _mm_packs_epi32(_mm_cvtps_epi32(v_src2), + _mm_cvtps_epi32(v_src3)); + + _mm_storeu_si128((__m128i *)dst, _mm_packus_epi16(v_dst0, v_dst1)); + } + + int jr = j % 3; + if (jr) + dst -= jr, j -= jr; + } #endif for( ; j < dn*3; j += 3, dst += dcn ) @@ -4247,6 +6075,10 @@ struct Luv2RGB_b #if CV_NEON float32x4_t v_scale, v_scale_inv, v_coeff1, v_coeff2, v_134, v_140; uint8x8_t v_alpha; + #elif CV_SSE2 + __m128 v_scale, v_scale_inv, v_coeff1, v_coeff2, v_134, v_140; + __m128i v_zero; + bool haveSIMD; #endif }; diff --git a/modules/imgproc/src/corner.cpp b/modules/imgproc/src/corner.cpp index 85f2063b28..2e9400409f 100644 --- a/modules/imgproc/src/corner.cpp +++ b/modules/imgproc/src/corner.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -271,6 +272,9 @@ cornerEigenValsVecs( const Mat& src, Mat& eigenv, int block_size, if (tegra::cornerEigenValsVecs(src, eigenv, block_size, aperture_size, op_type, k, borderType)) return; #endif +#if CV_SSE2 + bool haveSSE2 = checkHardwareSupport(CV_CPU_SSE2); +#endif int depth = src.depth(); double scale = (double)(1 << ((aperture_size > 0 ? aperture_size : 3) - 1)) * block_size; @@ -318,6 +322,33 @@ cornerEigenValsVecs( const Mat& src, Mat& eigenv, int block_size, vst3q_f32(cov_data + j * 3, v_dst); } + #elif CV_SSE2 + if (haveSSE2) + { + for( ; j <= size.width - 8; j += 8 ) + { + __m128 v_dx_0 = _mm_loadu_ps(dxdata + j); + __m128 v_dx_1 = _mm_loadu_ps(dxdata + j + 4); + __m128 v_dy_0 = _mm_loadu_ps(dydata + j); + __m128 v_dy_1 = _mm_loadu_ps(dydata + j + 4); + + __m128 v_dx2_0 = _mm_mul_ps(v_dx_0, v_dx_0); + __m128 v_dxy_0 = _mm_mul_ps(v_dx_0, v_dy_0); + __m128 v_dy2_0 = _mm_mul_ps(v_dy_0, v_dy_0); + __m128 v_dx2_1 = _mm_mul_ps(v_dx_1, v_dx_1); + __m128 v_dxy_1 = _mm_mul_ps(v_dx_1, v_dy_1); + __m128 v_dy2_1 = _mm_mul_ps(v_dy_1, v_dy_1); + + _mm_interleave_ps(v_dx2_0, v_dx2_1, v_dxy_0, v_dxy_1, v_dy2_0, v_dy2_1); + + _mm_storeu_ps(cov_data + j * 3, v_dx2_0); + _mm_storeu_ps(cov_data + j * 3 + 4, v_dx2_1); + _mm_storeu_ps(cov_data + j * 3 + 8, v_dxy_0); + _mm_storeu_ps(cov_data + j * 3 + 12, v_dxy_1); + _mm_storeu_ps(cov_data + j * 3 + 16, v_dy2_0); + _mm_storeu_ps(cov_data + j * 3 + 20, v_dy2_1); + } + } #endif for( ; j < size.width; j++ ) diff --git a/modules/imgproc/src/demosaicing.cpp b/modules/imgproc/src/demosaicing.cpp index 0b7afb8ea6..cec450dc71 100644 --- a/modules/imgproc/src/demosaicing.cpp +++ b/modules/imgproc/src/demosaicing.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009-2010, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/imgproc/src/histogram.cpp b/modules/imgproc/src/histogram.cpp index 9acdc11415..ec8de4d815 100644 --- a/modules/imgproc/src/histogram.cpp +++ b/modules/imgproc/src/histogram.cpp @@ -2284,15 +2284,20 @@ double cv::compareHist( InputArray _H1, InputArray _H2, int method ) CV_Assert( it.planes[0].isContinuous() && it.planes[1].isContinuous() ); +#if CV_SSE2 + bool haveSIMD = checkHardwareSupport(CV_CPU_SSE2); +#endif + for( size_t i = 0; i < it.nplanes; i++, ++it ) { const float* h1 = it.planes[0].ptr(); const float* h2 = it.planes[1].ptr(); len = it.planes[0].rows*it.planes[0].cols*H1.channels(); + j = 0; if( (method == CV_COMP_CHISQR) || (method == CV_COMP_CHISQR_ALT)) { - for( j = 0; j < len; j++ ) + for( ; j < len; j++ ) { double a = h1[j] - h2[j]; double b = (method == CV_COMP_CHISQR) ? h1[j] : h1[j] + h2[j]; @@ -2302,7 +2307,51 @@ double cv::compareHist( InputArray _H1, InputArray _H2, int method ) } else if( method == CV_COMP_CORREL ) { - for( j = 0; j < len; j++ ) + #if CV_SSE2 + if (haveSIMD) + { + __m128d v_s1 = _mm_setzero_pd(), v_s2 = v_s1; + __m128d v_s11 = v_s1, v_s22 = v_s1, v_s12 = v_s1; + + for ( ; j <= len - 4; j += 4) + { + __m128 v_a = _mm_loadu_ps(h1 + j); + __m128 v_b = _mm_loadu_ps(h2 + j); + + // 0-1 + __m128d v_ad = _mm_cvtps_pd(v_a); + __m128d v_bd = _mm_cvtps_pd(v_b); + v_s12 = _mm_add_pd(v_s12, _mm_mul_pd(v_ad, v_bd)); + v_s11 = _mm_add_pd(v_s11, _mm_mul_pd(v_ad, v_ad)); + v_s22 = _mm_add_pd(v_s22, _mm_mul_pd(v_bd, v_bd)); + v_s1 = _mm_add_pd(v_s1, v_ad); + v_s2 = _mm_add_pd(v_s2, v_bd); + + // 2-3 + v_ad = _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_a), 8))); + v_bd = _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_b), 8))); + v_s12 = _mm_add_pd(v_s12, _mm_mul_pd(v_ad, v_bd)); + v_s11 = _mm_add_pd(v_s11, _mm_mul_pd(v_ad, v_ad)); + v_s22 = _mm_add_pd(v_s22, _mm_mul_pd(v_bd, v_bd)); + v_s1 = _mm_add_pd(v_s1, v_ad); + v_s2 = _mm_add_pd(v_s2, v_bd); + } + + double CV_DECL_ALIGNED(16) ar[10]; + _mm_store_pd(ar, v_s12); + _mm_store_pd(ar + 2, v_s11); + _mm_store_pd(ar + 4, v_s22); + _mm_store_pd(ar + 6, v_s1); + _mm_store_pd(ar + 8, v_s2); + + s12 += ar[0] + ar[1]; + s11 += ar[2] + ar[3]; + s22 += ar[4] + ar[5]; + s1 += ar[6] + ar[7]; + s2 += ar[8] + ar[9]; + } + #endif + for( ; j < len; j++ ) { double a = h1[j]; double b = h2[j]; @@ -2316,7 +2365,6 @@ double cv::compareHist( InputArray _H1, InputArray _H2, int method ) } else if( method == CV_COMP_INTERSECT ) { - j = 0; #if CV_NEON float32x4_t v_result = vdupq_n_f32(0.0f); for( ; j <= len - 4; j += 4 ) @@ -2324,13 +2372,61 @@ double cv::compareHist( InputArray _H1, InputArray _H2, int method ) float CV_DECL_ALIGNED(16) ar[4]; vst1q_f32(ar, v_result); result += ar[0] + ar[1] + ar[2] + ar[3]; + #elif CV_SSE2 + if (haveSIMD) + { + __m128d v_result = _mm_setzero_pd(); + for ( ; j <= len - 4; j += 4) + { + __m128 v_src = _mm_min_ps(_mm_loadu_ps(h1 + j), + _mm_loadu_ps(h2 + j)); + v_result = _mm_add_pd(v_result, _mm_cvtps_pd(v_src)); + v_src = _mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8)); + v_result = _mm_add_pd(v_result, _mm_cvtps_pd(v_src)); + } + + double CV_DECL_ALIGNED(16) ar[2]; + _mm_store_pd(ar, v_result); + result += ar[0] + ar[1]; + } #endif for( ; j < len; j++ ) result += std::min(h1[j], h2[j]); } else if( method == CV_COMP_BHATTACHARYYA ) { - for( j = 0; j < len; j++ ) + #if CV_SSE2 + if (haveSIMD) + { + __m128d v_s1 = _mm_setzero_pd(), v_s2 = v_s1, v_result = v_s1; + for ( ; j <= len - 4; j += 4) + { + __m128 v_a = _mm_loadu_ps(h1 + j); + __m128 v_b = _mm_loadu_ps(h2 + j); + + __m128d v_ad = _mm_cvtps_pd(v_a); + __m128d v_bd = _mm_cvtps_pd(v_b); + v_s1 = _mm_add_pd(v_s1, v_ad); + v_s2 = _mm_add_pd(v_s2, v_bd); + v_result = _mm_add_pd(v_result, _mm_sqrt_pd(_mm_mul_pd(v_ad, v_bd))); + + v_ad = _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_a), 8))); + v_bd = _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_b), 8))); + v_s1 = _mm_add_pd(v_s1, v_ad); + v_s2 = _mm_add_pd(v_s2, v_bd); + v_result = _mm_add_pd(v_result, _mm_sqrt_pd(_mm_mul_pd(v_ad, v_bd))); + } + + double CV_DECL_ALIGNED(16) ar[6]; + _mm_store_pd(ar, v_s1); + _mm_store_pd(ar + 2, v_s2); + _mm_store_pd(ar + 4, v_result); + s1 += ar[0] + ar[1]; + s2 += ar[2] + ar[3]; + result += ar[4] + ar[5]; + } + #endif + for( ; j < len; j++ ) { double a = h1[j]; double b = h2[j]; @@ -2341,7 +2437,7 @@ double cv::compareHist( InputArray _H1, InputArray _H2, int method ) } else if( method == CV_COMP_KL_DIV ) { - for( j = 0; j < len; j++ ) + for( ; j < len; j++ ) { double p = h1[j]; double q = h2[j]; diff --git a/modules/imgproc/src/imgwarp.cpp b/modules/imgproc/src/imgwarp.cpp index c4bb3baa9f..fe126fbbd1 100644 --- a/modules/imgproc/src/imgwarp.cpp +++ b/modules/imgproc/src/imgwarp.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -1962,9 +1963,9 @@ private: struct ResizeAreaFastVec_SIMD_32f { ResizeAreaFastVec_SIMD_32f(int _scale_x, int _scale_y, int _cn, int _step) : - scale_x(_scale_x), scale_y(_scale_y), cn(_cn), step(_step) + cn(_cn), step(_step) { - fast_mode = scale_x == 2 && scale_y == 2 && (cn == 1 || cn == 3 || cn == 4); + fast_mode = _scale_x == 2 && _scale_y == 2 && (cn == 1 || cn == 4); } int operator() (const float * S, float * D, int w) const @@ -2004,7 +2005,6 @@ struct ResizeAreaFastVec_SIMD_32f } private: - int scale_x, scale_y; int cn; bool fast_mode; int step; @@ -2199,8 +2199,146 @@ private: bool use_simd; }; -typedef ResizeAreaFastNoVec ResizeAreaFastVec_SIMD_16s; -typedef ResizeAreaFastNoVec ResizeAreaFastVec_SIMD_32f; +class ResizeAreaFastVec_SIMD_16s +{ +public: + ResizeAreaFastVec_SIMD_16s(int _cn, int _step) : + cn(_cn), step(_step) + { + use_simd = checkHardwareSupport(CV_CPU_SSE2); + } + + int operator() (const short* S, short* D, int w) const + { + if (!use_simd) + return 0; + + int dx = 0; + const short* S0 = (const short*)S; + const short* S1 = (const short*)((const uchar*)(S) + step); + __m128i masklow = _mm_set1_epi32(0x0000ffff); + __m128i zero = _mm_setzero_si128(); + __m128i delta2 = _mm_set1_epi32(2); + + if (cn == 1) + { + for ( ; dx <= w - 4; dx += 4, S0 += 8, S1 += 8, D += 4) + { + __m128i r0 = _mm_loadu_si128((const __m128i*)S0); + __m128i r1 = _mm_loadu_si128((const __m128i*)S1); + + __m128i s0 = _mm_add_epi32(_mm_srai_epi32(r0, 16), + _mm_srai_epi32(_mm_slli_epi32(_mm_and_si128(r0, masklow), 16), 16)); + __m128i s1 = _mm_add_epi32(_mm_srai_epi32(r1, 16), + _mm_srai_epi32(_mm_slli_epi32(_mm_and_si128(r1, masklow), 16), 16)); + s0 = _mm_add_epi32(_mm_add_epi32(s0, s1), delta2); + s0 = _mm_srai_epi32(s0, 2); + s0 = _mm_packs_epi32(s0, zero); + + _mm_storel_epi64((__m128i*)D, s0); + } + } + else if (cn == 3) + for ( ; dx <= w - 4; dx += 3, S0 += 6, S1 += 6, D += 3) + { + __m128i r0 = _mm_loadu_si128((const __m128i*)S0); + __m128i r1 = _mm_loadu_si128((const __m128i*)S1); + + __m128i r0_16l = _mm_srai_epi32(_mm_unpacklo_epi16(zero, r0), 16); + __m128i r0_16h = _mm_srai_epi32(_mm_unpacklo_epi16(zero, _mm_srli_si128(r0, 6)), 16); + __m128i r1_16l = _mm_srai_epi32(_mm_unpacklo_epi16(zero, r1), 16); + __m128i r1_16h = _mm_srai_epi32(_mm_unpacklo_epi16(zero, _mm_srli_si128(r1, 6)), 16); + + __m128i s0 = _mm_add_epi32(r0_16l, r0_16h); + __m128i s1 = _mm_add_epi32(r1_16l, r1_16h); + s0 = _mm_add_epi32(delta2, _mm_add_epi32(s0, s1)); + s0 = _mm_packs_epi32(_mm_srai_epi32(s0, 2), zero); + _mm_storel_epi64((__m128i*)D, s0); + } + else + { + CV_Assert(cn == 4); + for ( ; dx <= w - 4; dx += 4, S0 += 8, S1 += 8, D += 4) + { + __m128i r0 = _mm_loadu_si128((const __m128i*)S0); + __m128i r1 = _mm_loadu_si128((const __m128i*)S1); + + __m128i r0_32l = _mm_srai_epi32(_mm_unpacklo_epi16(zero, r0), 16); + __m128i r0_32h = _mm_srai_epi32(_mm_unpackhi_epi16(zero, r0), 16); + __m128i r1_32l = _mm_srai_epi32(_mm_unpacklo_epi16(zero, r1), 16); + __m128i r1_32h = _mm_srai_epi32(_mm_unpackhi_epi16(zero, r1), 16); + + __m128i s0 = _mm_add_epi32(r0_32l, r0_32h); + __m128i s1 = _mm_add_epi32(r1_32l, r1_32h); + s0 = _mm_add_epi32(s1, _mm_add_epi32(s0, delta2)); + s0 = _mm_packs_epi32(_mm_srai_epi32(s0, 2), zero); + _mm_storel_epi64((__m128i*)D, s0); + } + } + + return dx; + } + +private: + int cn; + int step; + bool use_simd; +}; + +struct ResizeAreaFastVec_SIMD_32f +{ + ResizeAreaFastVec_SIMD_32f(int _scale_x, int _scale_y, int _cn, int _step) : + cn(_cn), step(_step) + { + fast_mode = _scale_x == 2 && _scale_y == 2 && (cn == 1 || cn == 4); + fast_mode = fast_mode && checkHardwareSupport(CV_CPU_SSE2); + } + + int operator() (const float * S, float * D, int w) const + { + if (!fast_mode) + return 0; + + const float * S0 = S, * S1 = (const float *)((const uchar *)(S0) + step); + int dx = 0; + + __m128 v_025 = _mm_set1_ps(0.25f); + + if (cn == 1) + { + const int shuffle_lo = _MM_SHUFFLE(2, 0, 2, 0), shuffle_hi = _MM_SHUFFLE(3, 1, 3, 1); + for ( ; dx <= w - 4; dx += 4, S0 += 8, S1 += 8, D += 4) + { + __m128 v_row00 = _mm_loadu_ps(S0), v_row01 = _mm_loadu_ps(S0 + 4), + v_row10 = _mm_loadu_ps(S1), v_row11 = _mm_loadu_ps(S1 + 4); + + __m128 v_dst0 = _mm_add_ps(_mm_shuffle_ps(v_row00, v_row01, shuffle_lo), + _mm_shuffle_ps(v_row00, v_row01, shuffle_hi)); + __m128 v_dst1 = _mm_add_ps(_mm_shuffle_ps(v_row10, v_row11, shuffle_lo), + _mm_shuffle_ps(v_row10, v_row11, shuffle_hi)); + + _mm_storeu_ps(D, _mm_mul_ps(_mm_add_ps(v_dst0, v_dst1), v_025)); + } + } + else if (cn == 4) + { + for ( ; dx <= w - 4; dx += 4, S0 += 8, S1 += 8, D += 4) + { + __m128 v_dst0 = _mm_add_ps(_mm_loadu_ps(S0), _mm_loadu_ps(S0 + 4)); + __m128 v_dst1 = _mm_add_ps(_mm_loadu_ps(S1), _mm_loadu_ps(S1 + 4)); + + _mm_storeu_ps(D, _mm_mul_ps(_mm_add_ps(v_dst0, v_dst1), v_025)); + } + } + + return dx; + } + +private: + int cn; + bool fast_mode; + int step; +}; #else @@ -4678,6 +4816,13 @@ void cv::convertMaps( InputArray _map1, InputArray _map2, size.height = 1; } +#if CV_SSE2 + bool useSSE2 = checkHardwareSupport(CV_CPU_SSE2); +#endif +#if CV_SSE4_1 + bool useSSE4_1 = checkHardwareSupport(CV_CPU_SSE4_1); +#endif + const float scale = 1.f/INTER_TAB_SIZE; int x, y; for( y = 0; y < size.height; y++ ) @@ -4708,6 +4853,29 @@ void cv::convertMaps( InputArray _map1, InputArray _map2, vst2q_s16(dst1 + (x << 1), v_dst); } + #elif CV_SSE4_1 + if (useSSE4_1) + { + for( ; x <= size.width - 16; x += 16 ) + { + __m128i v_dst0 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src1f + x)), + _mm_cvtps_epi32(_mm_loadu_ps(src1f + x + 4))); + __m128i v_dst1 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src1f + x + 8)), + _mm_cvtps_epi32(_mm_loadu_ps(src1f + x + 12))); + + __m128i v_dst2 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src2f + x)), + _mm_cvtps_epi32(_mm_loadu_ps(src2f + x + 4))); + __m128i v_dst3 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src2f + x + 8)), + _mm_cvtps_epi32(_mm_loadu_ps(src2f + x + 12))); + + _mm_interleave_epi16(v_dst0, v_dst1, v_dst2, v_dst3); + + _mm_storeu_si128((__m128i *)(dst1 + x * 2), v_dst0); + _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 8), v_dst1); + _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 16), v_dst2); + _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 24), v_dst3); + } + } #endif for( ; x < size.width; x++ ) { @@ -4742,6 +4910,52 @@ void cv::convertMaps( InputArray _map1, InputArray _map2, vandq_s32(v_ix1, v_mask))); vst1q_u16(dst2 + x, vcombine_u16(v_dst0, v_dst1)); } + #elif CV_SSE4_1 + if (useSSE4_1) + { + __m128 v_its = _mm_set1_ps(INTER_TAB_SIZE); + __m128i v_its1 = _mm_set1_epi32(INTER_TAB_SIZE-1); + + for( ; x <= size.width - 16; x += 16 ) + { + __m128i v_ix0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x), v_its)); + __m128i v_ix1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x + 4), v_its)); + __m128i v_iy0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src2f + x), v_its)); + __m128i v_iy1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src2f + x + 4), v_its)); + + __m128i v_dst10 = _mm_packs_epi32(_mm_srai_epi32(v_ix0, INTER_BITS), + _mm_srai_epi32(v_ix1, INTER_BITS)); + __m128i v_dst12 = _mm_packs_epi32(_mm_srai_epi32(v_iy0, INTER_BITS), + _mm_srai_epi32(v_iy1, INTER_BITS)); + __m128i v_dst20 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_iy0, v_its1), INTER_BITS), + _mm_and_si128(v_ix0, v_its1)); + __m128i v_dst21 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_iy1, v_its1), INTER_BITS), + _mm_and_si128(v_ix1, v_its1)); + _mm_storeu_si128((__m128i *)(dst2 + x), _mm_packus_epi32(v_dst20, v_dst21)); + + v_ix0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x + 8), v_its)); + v_ix1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x + 12), v_its)); + v_iy0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src2f + x + 8), v_its)); + v_iy1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src2f + x + 12), v_its)); + + __m128i v_dst11 = _mm_packs_epi32(_mm_srai_epi32(v_ix0, INTER_BITS), + _mm_srai_epi32(v_ix1, INTER_BITS)); + __m128i v_dst13 = _mm_packs_epi32(_mm_srai_epi32(v_iy0, INTER_BITS), + _mm_srai_epi32(v_iy1, INTER_BITS)); + v_dst20 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_iy0, v_its1), INTER_BITS), + _mm_and_si128(v_ix0, v_its1)); + v_dst21 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_iy1, v_its1), INTER_BITS), + _mm_and_si128(v_ix1, v_its1)); + _mm_storeu_si128((__m128i *)(dst2 + x + 8), _mm_packus_epi32(v_dst20, v_dst21)); + + _mm_interleave_epi16(v_dst10, v_dst11, v_dst12, v_dst13); + + _mm_storeu_si128((__m128i *)(dst1 + x * 2), v_dst10); + _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 8), v_dst11); + _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 16), v_dst12); + _mm_storeu_si128((__m128i *)(dst1 + x * 2 + 24), v_dst13); + } + } #endif for( ; x < size.width; x++ ) { @@ -4761,6 +4975,12 @@ void cv::convertMaps( InputArray _map1, InputArray _map2, for( ; x <= (size.width << 1) - 8; x += 8 ) vst1q_s16(dst1 + x, vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(vld1q_f32(src1f + x))), vqmovn_s32(cv_vrndq_s32_f32(vld1q_f32(src1f + x + 4))))); + #elif CV_SSE2 + for( ; x <= (size.width << 1) - 8; x += 8 ) + { + _mm_storeu_si128((__m128i *)(dst1 + x), _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src1f + x)), + _mm_cvtps_epi32(_mm_loadu_ps(src1f + x + 4)))); + } #endif for( ; x < size.width; x++ ) { @@ -4796,6 +5016,30 @@ void cv::convertMaps( InputArray _map1, InputArray _map2, vandq_s32(v_ix1, v_mask))); vst1q_u16(dst2 + x, vcombine_u16(v_dst0, v_dst1)); } + #elif CV_SSE4_1 + if (useSSE4_1) + { + __m128 v_its = _mm_set1_ps(INTER_TAB_SIZE); + __m128i v_its1 = _mm_set1_epi32(INTER_TAB_SIZE-1); + __m128i v_y_mask = _mm_set1_epi32((INTER_TAB_SIZE-1) << 16); + + for( ; x <= size.width - 4; x += 4 ) + { + __m128i v_src0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x * 2), v_its)); + __m128i v_src1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x * 2 + 4), v_its)); + + __m128i v_dst1 = _mm_packs_epi32(_mm_srai_epi32(v_src0, INTER_BITS), + _mm_srai_epi32(v_src1, INTER_BITS)); + _mm_storeu_si128((__m128i *)(dst1 + x * 2), v_dst1); + + // x0 y0 x1 y1 . . . + v_src0 = _mm_packs_epi32(_mm_and_si128(v_src0, v_its1), + _mm_and_si128(v_src1, v_its1)); + __m128i v_dst2 = _mm_or_si128(_mm_srli_epi32(_mm_and_si128(v_src0, v_y_mask), 16 - INTER_BITS), // y0 0 y1 0 . . . + _mm_and_si128(v_src0, v_its1)); // 0 x0 0 x1 . . . + _mm_storel_epi64((__m128i *)(dst2 + x), _mm_packus_epi32(v_dst2, v_dst2)); + } + } #endif for( ; x < size.width; x++ ) { @@ -4841,6 +5085,44 @@ void cv::convertMaps( InputArray _map1, InputArray _map2, vst1q_f32(dst1f + x + 4, v_dst1); vst1q_f32(dst2f + x + 4, v_dst2); } + #elif CV_SSE2 + __m128i v_mask2 = _mm_set1_epi16(INTER_TAB_SIZE2-1); + __m128i v_zero = _mm_setzero_si128(), v_mask = _mm_set1_epi32(INTER_TAB_SIZE-1); + __m128 v_scale = _mm_set1_ps(scale); + + for( ; x <= size.width - 16; x += 16) + { + __m128i v_src10 = _mm_loadu_si128((__m128i const *)(src1 + x * 2)); + __m128i v_src11 = _mm_loadu_si128((__m128i const *)(src1 + x * 2 + 8)); + __m128i v_src20 = _mm_loadu_si128((__m128i const *)(src1 + x * 2 + 16)); + __m128i v_src21 = _mm_loadu_si128((__m128i const *)(src1 + x * 2 + 24)); + + _mm_deinterleave_epi16(v_src10, v_src11, v_src20, v_src21); + + __m128i v_fxy = src2 ? _mm_and_si128(_mm_loadu_si128((__m128i const *)(src2 + x)), v_mask2) : v_zero; + __m128i v_fxy_p = _mm_unpacklo_epi16(v_fxy, v_zero); + _mm_storeu_ps(dst1f + x, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src10), 16)), + _mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_and_si128(v_fxy_p, v_mask))))); + _mm_storeu_ps(dst2f + x, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src20), 16)), + _mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_srli_epi32(v_fxy_p, INTER_BITS))))); + v_fxy_p = _mm_unpackhi_epi16(v_fxy, v_zero); + _mm_storeu_ps(dst1f + x + 4, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src10), 16)), + _mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_and_si128(v_fxy_p, v_mask))))); + _mm_storeu_ps(dst2f + x + 4, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src20), 16)), + _mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_srli_epi32(v_fxy_p, INTER_BITS))))); + + v_fxy = src2 ? _mm_and_si128(_mm_loadu_si128((__m128i const *)(src2 + x + 8)), v_mask2) : v_zero; + v_fxy_p = _mm_unpackhi_epi16(v_fxy, v_zero); + _mm_storeu_ps(dst1f + x + 8, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src11), 16)), + _mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_and_si128(v_fxy_p, v_mask))))); + _mm_storeu_ps(dst2f + x + 8, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src21), 16)), + _mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_srli_epi32(v_fxy_p, INTER_BITS))))); + v_fxy_p = _mm_unpackhi_epi16(v_fxy, v_zero); + _mm_storeu_ps(dst1f + x + 12, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src11), 16)), + _mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_and_si128(v_fxy_p, v_mask))))); + _mm_storeu_ps(dst2f + x + 12, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src21), 16)), + _mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_srli_epi32(v_fxy_p, INTER_BITS))))); + } #endif for( ; x < size.width; x++ ) { @@ -4882,6 +5164,27 @@ void cv::convertMaps( InputArray _map1, InputArray _map2, v_scale, vcvtq_f32_s32(vshrq_n_s32(v_fxy2, INTER_BITS))); vst2q_f32(dst1f + (x << 1) + 8, v_dst); } + #elif CV_SSE2 + if (useSSE2) + { + __m128i v_mask2 = _mm_set1_epi16(INTER_TAB_SIZE2-1); + __m128i v_zero = _mm_set1_epi32(0), v_mask = _mm_set1_epi32(INTER_TAB_SIZE-1); + __m128 v_scale = _mm_set1_ps(scale); + + for ( ; x <= size.width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src1 + x * 2)); + __m128i v_fxy = src2 ? _mm_and_si128(_mm_loadu_si128((__m128i const *)(src2 + x)), v_mask2) : v_zero; + __m128i v_fxy1 = _mm_and_si128(v_fxy, v_mask); + __m128i v_fxy2 = _mm_srli_epi16(v_fxy, INTER_BITS); + + __m128 v_add = _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_fxy1, v_fxy2)), v_scale); + _mm_storeu_ps(dst1f + x * 2, _mm_add_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)), v_add)); + + v_add = _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_fxy1, v_fxy2)), v_scale); + _mm_storeu_ps(dst1f + x * 2, _mm_add_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)), v_add)); + } + } #endif for( ; x < size.width; x++ ) { @@ -4919,7 +5222,10 @@ public: const int AB_SCALE = 1 << AB_BITS; int round_delta = interpolation == INTER_NEAREST ? AB_SCALE/2 : AB_SCALE/INTER_TAB_SIZE/2, x, y, x1, y1; #if CV_SSE2 - bool useSIMD = checkHardwareSupport(CV_CPU_SSE2); + bool useSSE2 = checkHardwareSupport(CV_CPU_SSE2); + #endif + #if CV_SSE4_1 + bool useSSE4_1 = checkHardwareSupport(CV_CPU_SSE4_1); #endif int bh0 = std::min(BLOCK_SZ/2, dst.rows); @@ -4957,6 +5263,31 @@ public: vst2q_s16(xy + (x1 << 1), v_dst); } + #elif CV_SSE4_1 + if (useSSE4_1) + { + __m128i v_X0 = _mm_set1_epi32(X0); + __m128i v_Y0 = _mm_set1_epi32(Y0); + for ( ; x1 <= bw - 16; x1 += 16) + { + __m128i v_x0 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_X0, _mm_loadu_si128((__m128i const *)(adelta + x + x1))), AB_BITS), + _mm_srai_epi32(_mm_add_epi32(v_X0, _mm_loadu_si128((__m128i const *)(adelta + x + x1 + 4))), AB_BITS)); + __m128i v_x1 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_X0, _mm_loadu_si128((__m128i const *)(adelta + x + x1 + 8))), AB_BITS), + _mm_srai_epi32(_mm_add_epi32(v_X0, _mm_loadu_si128((__m128i const *)(adelta + x + x1 + 12))), AB_BITS)); + + __m128i v_y0 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_Y0, _mm_loadu_si128((__m128i const *)(bdelta + x + x1))), AB_BITS), + _mm_srai_epi32(_mm_add_epi32(v_Y0, _mm_loadu_si128((__m128i const *)(bdelta + x + x1 + 4))), AB_BITS)); + __m128i v_y1 = _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_Y0, _mm_loadu_si128((__m128i const *)(bdelta + x + x1 + 8))), AB_BITS), + _mm_srai_epi32(_mm_add_epi32(v_Y0, _mm_loadu_si128((__m128i const *)(bdelta + x + x1 + 12))), AB_BITS)); + + _mm_interleave_epi16(v_x0, v_x1, v_y0, v_y1); + + _mm_storeu_si128((__m128i *)(xy + x1 * 2), v_x0); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 8), v_x1); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 16), v_y0); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 24), v_y1); + } + } #endif for( ; x1 < bw; x1++ ) { @@ -4971,7 +5302,7 @@ public: short* alpha = A + y1*bw; x1 = 0; #if CV_SSE2 - if( useSIMD ) + if( useSSE2 ) { __m128i fxy_mask = _mm_set1_epi32(INTER_TAB_SIZE - 1); __m128i XX = _mm_set1_epi32(X0), YY = _mm_set1_epi32(Y0); @@ -5364,6 +5695,20 @@ public: int bw0 = std::min(BLOCK_SZ*BLOCK_SZ/bh0, width); bh0 = std::min(BLOCK_SZ*BLOCK_SZ/bw0, height); + #if CV_SSE4_1 + bool haveSSE4_1 = checkHardwareSupport(CV_CPU_SSE4_1); + __m128d v_M0 = _mm_set1_pd(M[0]); + __m128d v_M3 = _mm_set1_pd(M[3]); + __m128d v_M6 = _mm_set1_pd(M[6]); + __m128d v_intmax = _mm_set1_pd((double)INT_MAX); + __m128d v_intmin = _mm_set1_pd((double)INT_MIN); + __m128d v_2 = _mm_set1_pd(2), + v_zero = _mm_setzero_pd(), + v_1 = _mm_set1_pd(1), + v_its = _mm_set1_pd(INTER_TAB_SIZE); + __m128i v_itsi1 = _mm_set1_epi32(INTER_TAB_SIZE - 1); + #endif + for( y = range.start; y < range.end; y += bh0 ) { for( x = 0; x < width; x += bw0 ) @@ -5382,7 +5727,120 @@ public: double W0 = M[6]*x + M[7]*(y + y1) + M[8]; if( interpolation == INTER_NEAREST ) - for( x1 = 0; x1 < bw; x1++ ) + { + x1 = 0; + + #if CV_SSE4_1 + if (haveSSE4_1) + { + __m128d v_X0d = _mm_set1_pd(X0); + __m128d v_Y0d = _mm_set1_pd(Y0); + __m128d v_W0 = _mm_set1_pd(W0); + __m128d v_x1 = _mm_set_pd(1, 0); + + for( ; x1 <= bw - 16; x1 += 16 ) + { + // 0-3 + __m128i v_X0, v_Y0; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // 4-8 + __m128i v_X1, v_Y1; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // 8-11 + __m128i v_X2, v_Y2; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X2 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y2 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // 12-15 + __m128i v_X3, v_Y3; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_1, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X3 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y3 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // convert to 16s + v_X0 = _mm_packs_epi32(v_X0, v_X1); + v_X1 = _mm_packs_epi32(v_X2, v_X3); + v_Y0 = _mm_packs_epi32(v_Y0, v_Y1); + v_Y1 = _mm_packs_epi32(v_Y2, v_Y3); + + _mm_interleave_epi16(v_X0, v_X1, v_Y0, v_Y1); + + _mm_storeu_si128((__m128i *)(xy + x1 * 2), v_X0); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 8), v_X1); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 16), v_Y0); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 24), v_Y1); + } + } + #endif + + for( ; x1 < bw; x1++ ) { double W = W0 + M[6]*x1; W = W ? 1./W : 0; @@ -5394,10 +5852,136 @@ public: xy[x1*2] = saturate_cast(X); xy[x1*2+1] = saturate_cast(Y); } + } else { short* alpha = A + y1*bw; - for( x1 = 0; x1 < bw; x1++ ) + x1 = 0; + + #if CV_SSE4_1 + if (haveSSE4_1) + { + __m128d v_X0d = _mm_set1_pd(X0); + __m128d v_Y0d = _mm_set1_pd(Y0); + __m128d v_W0 = _mm_set1_pd(W0); + __m128d v_x1 = _mm_set_pd(1, 0); + + for( ; x1 <= bw - 16; x1 += 16 ) + { + // 0-3 + __m128i v_X0, v_Y0; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y0 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // 4-8 + __m128i v_X1, v_Y1; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y1 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // 8-11 + __m128i v_X2, v_Y2; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X2 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y2 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // 12-15 + __m128i v_X3, v_Y3; + { + __m128d v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY0 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_W = _mm_add_pd(_mm_mul_pd(v_M6, v_x1), v_W0); + v_W = _mm_andnot_pd(_mm_cmpeq_pd(v_W, v_zero), _mm_div_pd(v_its, v_W)); + __m128d v_fX1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_X0d, _mm_mul_pd(v_M0, v_x1)), v_W))); + __m128d v_fY1 = _mm_max_pd(v_intmin, _mm_min_pd(v_intmax, _mm_mul_pd(_mm_add_pd(v_Y0d, _mm_mul_pd(v_M3, v_x1)), v_W))); + v_x1 = _mm_add_pd(v_x1, v_2); + + v_X3 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fX0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fX1)))); + v_Y3 = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_fY0)), + _mm_castsi128_ps(_mm_cvtpd_epi32(v_fY1)))); + } + + // store alpha + __m128i v_alpha0 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_Y0, v_itsi1), INTER_BITS), + _mm_and_si128(v_X0, v_itsi1)); + __m128i v_alpha1 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_Y1, v_itsi1), INTER_BITS), + _mm_and_si128(v_X1, v_itsi1)); + _mm_storeu_si128((__m128i *)(alpha + x1), _mm_packs_epi32(v_alpha0, v_alpha1)); + + v_alpha0 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_Y2, v_itsi1), INTER_BITS), + _mm_and_si128(v_X2, v_itsi1)); + v_alpha1 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_Y3, v_itsi1), INTER_BITS), + _mm_and_si128(v_X3, v_itsi1)); + _mm_storeu_si128((__m128i *)(alpha + x1 + 8), _mm_packs_epi32(v_alpha0, v_alpha1)); + + // convert to 16s + v_X0 = _mm_packs_epi32(_mm_srai_epi32(v_X0, INTER_BITS), _mm_srai_epi32(v_X1, INTER_BITS)); + v_X1 = _mm_packs_epi32(_mm_srai_epi32(v_X2, INTER_BITS), _mm_srai_epi32(v_X3, INTER_BITS)); + v_Y0 = _mm_packs_epi32(_mm_srai_epi32(v_Y0, INTER_BITS), _mm_srai_epi32(v_Y1, INTER_BITS)); + v_Y1 = _mm_packs_epi32(_mm_srai_epi32(v_Y2, INTER_BITS), _mm_srai_epi32(v_Y3, INTER_BITS)); + + _mm_interleave_epi16(v_X0, v_X1, v_Y0, v_Y1); + + _mm_storeu_si128((__m128i *)(xy + x1 * 2), v_X0); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 8), v_X1); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 16), v_Y0); + _mm_storeu_si128((__m128i *)(xy + x1 * 2 + 24), v_Y1); + } + } + #endif + + for( ; x1 < bw; x1++ ) { double W = W0 + M[6]*x1; W = W ? INTER_TAB_SIZE/W : 0; diff --git a/modules/imgproc/src/lsd.cpp b/modules/imgproc/src/lsd.cpp index 65e874e00c..cda073a61e 100644 --- a/modules/imgproc/src/lsd.cpp +++ b/modules/imgproc/src/lsd.cpp @@ -191,8 +191,8 @@ public: * If only a roi needs to be selected, use * lsd_ptr->detect(image(roi), ..., lines); * lines += Scalar(roi.x, roi.y, roi.x, roi.y); - * @param _lines Return: A vector of Vec4i elements specifying the beginning and ending point of a line. - * Where Vec4i is (x1, y1, x2, y2), point 1 is the start, point 2 - end. + * @param _lines Return: A vector of Vec4i or Vec4f elements specifying the beginning and ending point of a line. + * Where Vec4i/Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. * Returned lines are strictly oriented depending on the gradient. * @param width Return: Vector of widths of the regions, where the lines are found. E.g. Width of line. * @param prec Return: Vector of precisions with which the lines are found. @@ -286,8 +286,8 @@ private: /** * Detect lines in the whole input image. * - * @param lines Return: A vector of Vec4i elements specifying the beginning and ending point of a line. - * Where Vec4i is (x1, y1, x2, y2), point 1 is the start, point 2 - end. + * @param lines Return: A vector of Vec4f elements specifying the beginning and ending point of a line. + * Where Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. * Returned lines are strictly oriented depending on the gradient. * @param widths Return: Vector of widths of the regions, where the lines are found. E.g. Width of line. * @param precisions Return: Vector of precisions with which the lines are found. @@ -297,7 +297,7 @@ private: * * 0 corresponds to 1 mean false alarm * * 1 corresponds to 0.1 mean false alarms */ - void flsd(std::vector& lines, + void flsd(std::vector& lines, std::vector& widths, std::vector& precisions, std::vector& nfas); @@ -418,7 +418,7 @@ void LineSegmentDetectorImpl::detect(InputArray _image, OutputArray _lines, // Convert image to double img.convertTo(image, CV_64FC1); - std::vector lines; + std::vector lines; std::vector w, p, n; w_needed = _width.needed(); p_needed = _prec.needed(); @@ -435,7 +435,7 @@ void LineSegmentDetectorImpl::detect(InputArray _image, OutputArray _lines, if(n_needed) Mat(n).copyTo(_nfa); } -void LineSegmentDetectorImpl::flsd(std::vector& lines, +void LineSegmentDetectorImpl::flsd(std::vector& lines, std::vector& widths, std::vector& precisions, std::vector& nfas) { @@ -518,7 +518,7 @@ void LineSegmentDetectorImpl::flsd(std::vector& lines, } //Store the relevant data - lines.push_back(Vec4i(int(rec.x1), int(rec.y1), int(rec.x2), int(rec.y2))); + lines.push_back(Vec4f(float(rec.x1), float(rec.y1), float(rec.x2), float(rec.y2))); if(w_needed) widths.push_back(rec.width); if(p_needed) precisions.push_back(rec.p); if(n_needed && doRefine >= LSD_REFINE_ADV) nfas.push_back(log_nfa); @@ -1181,9 +1181,9 @@ void LineSegmentDetectorImpl::drawSegments(InputOutputArray _image, InputArray l // Draw segments for(int i = 0; i < N; ++i) { - const Vec4i& v = _lines.at(i); - Point b(v[0], v[1]); - Point e(v[2], v[3]); + const Vec4f& v = _lines.at(i); + Point2f b(v[0], v[1]); + Point2f e(v[2], v[3]); line(_image.getMatRef(), b, e, Scalar(0, 0, 255), 1); } } @@ -1208,14 +1208,14 @@ int LineSegmentDetectorImpl::compareSegments(const Size& size, InputArray lines1 // Draw segments for(int i = 0; i < N1; ++i) { - Point b(_lines1.at(i)[0], _lines1.at(i)[1]); - Point e(_lines1.at(i)[2], _lines1.at(i)[3]); + Point2f b(_lines1.at(i)[0], _lines1.at(i)[1]); + Point2f e(_lines1.at(i)[2], _lines1.at(i)[3]); line(I1, b, e, Scalar::all(255), 1); } for(int i = 0; i < N2; ++i) { - Point b(_lines2.at(i)[0], _lines2.at(i)[1]); - Point e(_lines2.at(i)[2], _lines2.at(i)[3]); + Point2f b(_lines2.at(i)[0], _lines2.at(i)[1]); + Point2f e(_lines2.at(i)[2], _lines2.at(i)[3]); line(I2, b, e, Scalar::all(255), 1); } diff --git a/modules/imgproc/src/pyramids.cpp b/modules/imgproc/src/pyramids.cpp index e510530afd..4271b942ae 100644 --- a/modules/imgproc/src/pyramids.cpp +++ b/modules/imgproc/src/pyramids.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -183,13 +184,336 @@ struct PyrDownVec_32f } }; +#if CV_SSE4_1 + +struct PyrDownVec_32s16u +{ + PyrDownVec_32s16u() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE4_1); + } + + int operator()(int** src, ushort* dst, int, int width) const + { + int x = 0; + + if (!haveSSE) + return x; + + const int *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4]; + __m128i v_delta = _mm_set1_epi32(128); + + for( ; x <= width - 8; x += 8 ) + { + __m128i v_r00 = _mm_loadu_si128((__m128i const *)(row0 + x)), + v_r01 = _mm_loadu_si128((__m128i const *)(row0 + x + 4)); + __m128i v_r10 = _mm_loadu_si128((__m128i const *)(row1 + x)), + v_r11 = _mm_loadu_si128((__m128i const *)(row1 + x + 4)); + __m128i v_r20 = _mm_loadu_si128((__m128i const *)(row2 + x)), + v_r21 = _mm_loadu_si128((__m128i const *)(row2 + x + 4)); + __m128i v_r30 = _mm_loadu_si128((__m128i const *)(row3 + x)), + v_r31 = _mm_loadu_si128((__m128i const *)(row3 + x + 4)); + __m128i v_r40 = _mm_loadu_si128((__m128i const *)(row4 + x)), + v_r41 = _mm_loadu_si128((__m128i const *)(row4 + x + 4)); + + v_r00 = _mm_add_epi32(_mm_add_epi32(v_r00, v_r40), _mm_add_epi32(v_r20, v_r20)); + v_r10 = _mm_add_epi32(_mm_add_epi32(v_r10, v_r20), v_r30); + + v_r10 = _mm_slli_epi32(v_r10, 2); + __m128i v_dst0 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(v_r00, v_r10), v_delta), 8); + + v_r01 = _mm_add_epi32(_mm_add_epi32(v_r01, v_r41), _mm_add_epi32(v_r21, v_r21)); + v_r11 = _mm_add_epi32(_mm_add_epi32(v_r11, v_r21), v_r31); + v_r11 = _mm_slli_epi32(v_r11, 2); + __m128i v_dst1 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(v_r01, v_r11), v_delta), 8); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_packus_epi32(v_dst0, v_dst1)); + } + + return x; + } + + bool haveSSE; +}; + +#else + typedef PyrDownNoVec PyrDownVec_32s16u; -typedef PyrDownNoVec PyrDownVec_32s16s; -typedef PyrUpNoVec PyrUpVec_32s8u; -typedef PyrUpNoVec PyrUpVec_32s16s; +#endif // CV_SSE4_1 + +struct PyrDownVec_32s16s +{ + PyrDownVec_32s16s() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE2); + } + + int operator()(int** src, short* dst, int, int width) const + { + int x = 0; + + if (!haveSSE) + return x; + + const int *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4]; + __m128i v_delta = _mm_set1_epi32(128); + + for( ; x <= width - 8; x += 8 ) + { + __m128i v_r00 = _mm_loadu_si128((__m128i const *)(row0 + x)), + v_r01 = _mm_loadu_si128((__m128i const *)(row0 + x + 4)); + __m128i v_r10 = _mm_loadu_si128((__m128i const *)(row1 + x)), + v_r11 = _mm_loadu_si128((__m128i const *)(row1 + x + 4)); + __m128i v_r20 = _mm_loadu_si128((__m128i const *)(row2 + x)), + v_r21 = _mm_loadu_si128((__m128i const *)(row2 + x + 4)); + __m128i v_r30 = _mm_loadu_si128((__m128i const *)(row3 + x)), + v_r31 = _mm_loadu_si128((__m128i const *)(row3 + x + 4)); + __m128i v_r40 = _mm_loadu_si128((__m128i const *)(row4 + x)), + v_r41 = _mm_loadu_si128((__m128i const *)(row4 + x + 4)); + + v_r00 = _mm_add_epi32(_mm_add_epi32(v_r00, v_r40), _mm_add_epi32(v_r20, v_r20)); + v_r10 = _mm_add_epi32(_mm_add_epi32(v_r10, v_r20), v_r30); + + v_r10 = _mm_slli_epi32(v_r10, 2); + __m128i v_dst0 = _mm_srai_epi32(_mm_add_epi32(_mm_add_epi32(v_r00, v_r10), v_delta), 8); + + v_r01 = _mm_add_epi32(_mm_add_epi32(v_r01, v_r41), _mm_add_epi32(v_r21, v_r21)); + v_r11 = _mm_add_epi32(_mm_add_epi32(v_r11, v_r21), v_r31); + v_r11 = _mm_slli_epi32(v_r11, 2); + __m128i v_dst1 = _mm_srai_epi32(_mm_add_epi32(_mm_add_epi32(v_r01, v_r11), v_delta), 8); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_packs_epi32(v_dst0, v_dst1)); + } + + return x; + } + + bool haveSSE; +}; + +struct PyrUpVec_32s8u +{ + int operator()(int** src, uchar** dst, int, int width) const + { + int x = 0; + + if (!checkHardwareSupport(CV_CPU_SSE2)) + return x; + + uchar *dst0 = dst[0], *dst1 = dst[1]; + const uint *row0 = (uint *)src[0], *row1 = (uint *)src[1], *row2 = (uint *)src[2]; + __m128i v_delta = _mm_set1_epi16(32), v_zero = _mm_setzero_si128(); + + for( ; x <= width - 16; x += 16 ) + { + __m128i v_r0 = _mm_packs_epi32(_mm_loadu_si128((__m128i const *)(row0 + x)), + _mm_loadu_si128((__m128i const *)(row0 + x + 4))); + __m128i v_r1 = _mm_packs_epi32(_mm_loadu_si128((__m128i const *)(row1 + x)), + _mm_loadu_si128((__m128i const *)(row1 + x + 4))); + __m128i v_r2 = _mm_packs_epi32(_mm_loadu_si128((__m128i const *)(row2 + x)), + _mm_loadu_si128((__m128i const *)(row2 + x + 4))); + + __m128i v_2r1 = _mm_adds_epu16(v_r1, v_r1), v_4r1 = _mm_adds_epu16(v_2r1, v_2r1); + __m128i v_dst00 = _mm_adds_epu16(_mm_adds_epu16(v_r0, v_r2), _mm_adds_epu16(v_2r1, v_4r1)); + __m128i v_dst10 = _mm_slli_epi16(_mm_adds_epu16(v_r1, v_r2), 2); + + v_r0 = _mm_packs_epi32(_mm_loadu_si128((__m128i const *)(row0 + x + 8)), + _mm_loadu_si128((__m128i const *)(row0 + x + 12))); + v_r1 = _mm_packs_epi32(_mm_loadu_si128((__m128i const *)(row1 + x + 8)), + _mm_loadu_si128((__m128i const *)(row1 + x + 12))); + v_r2 = _mm_packs_epi32(_mm_loadu_si128((__m128i const *)(row2 + x + 8)), + _mm_loadu_si128((__m128i const *)(row2 + x + 12))); + + v_2r1 = _mm_adds_epu16(v_r1, v_r1), v_4r1 = _mm_adds_epu16(v_2r1, v_2r1); + __m128i v_dst01 = _mm_adds_epu16(_mm_adds_epu16(v_r0, v_r2), _mm_adds_epu16(v_2r1, v_4r1)); + __m128i v_dst11 = _mm_slli_epi16(_mm_adds_epu16(v_r1, v_r2), 2); + + _mm_storeu_si128((__m128i *)(dst0 + x), _mm_packus_epi16(_mm_srli_epi16(_mm_adds_epu16(v_dst00, v_delta), 6), + _mm_srli_epi16(_mm_adds_epu16(v_dst01, v_delta), 6))); + _mm_storeu_si128((__m128i *)(dst1 + x), _mm_packus_epi16(_mm_srli_epi16(_mm_adds_epu16(v_dst10, v_delta), 6), + _mm_srli_epi16(_mm_adds_epu16(v_dst11, v_delta), 6))); + } + + for( ; x <= width - 8; x += 8 ) + { + __m128i v_r0 = _mm_packs_epi32(_mm_loadu_si128((__m128i const *)(row0 + x)), + _mm_loadu_si128((__m128i const *)(row0 + x + 4))); + __m128i v_r1 = _mm_packs_epi32(_mm_loadu_si128((__m128i const *)(row1 + x)), + _mm_loadu_si128((__m128i const *)(row1 + x + 4))); + __m128i v_r2 = _mm_packs_epi32(_mm_loadu_si128((__m128i const *)(row2 + x)), + _mm_loadu_si128((__m128i const *)(row2 + x + 4))); + + __m128i v_2r1 = _mm_adds_epu16(v_r1, v_r1), v_4r1 = _mm_adds_epu16(v_2r1, v_2r1); + __m128i v_dst0 = _mm_adds_epu16(_mm_adds_epu16(v_r0, v_r2), _mm_adds_epu16(v_2r1, v_4r1)); + __m128i v_dst1 = _mm_slli_epi16(_mm_adds_epu16(v_r1, v_r2), 2); + + _mm_storel_epi64((__m128i *)(dst0 + x), _mm_packus_epi16(_mm_srli_epi16(_mm_adds_epu16(v_dst0, v_delta), 6), v_zero)); + _mm_storel_epi64((__m128i *)(dst1 + x), _mm_packus_epi16(_mm_srli_epi16(_mm_adds_epu16(v_dst1, v_delta), 6), v_zero)); + } + + return x; + } +}; + +struct PyrUpVec_32s16s +{ + int operator()(int** src, short** dst, int, int width) const + { + int x = 0; + + if (!checkHardwareSupport(CV_CPU_SSE2)) + return x; + + short *dst0 = dst[0], *dst1 = dst[1]; + const uint *row0 = (uint *)src[0], *row1 = (uint *)src[1], *row2 = (uint *)src[2]; + __m128i v_delta = _mm_set1_epi32(32), v_zero = _mm_setzero_si128(); + + for( ; x <= width - 8; x += 8 ) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(row0 + x)), + v_r1 = _mm_loadu_si128((__m128i const *)(row1 + x)), + v_r2 = _mm_loadu_si128((__m128i const *)(row2 + x)); + __m128i v_2r1 = _mm_slli_epi32(v_r1, 1), v_4r1 = _mm_slli_epi32(v_r1, 2); + __m128i v_dst00 = _mm_add_epi32(_mm_add_epi32(v_r0, v_r2), _mm_add_epi32(v_2r1, v_4r1)); + __m128i v_dst10 = _mm_slli_epi32(_mm_add_epi32(v_r1, v_r2), 2); + + v_r0 = _mm_loadu_si128((__m128i const *)(row0 + x + 4)); + v_r1 = _mm_loadu_si128((__m128i const *)(row1 + x + 4)); + v_r2 = _mm_loadu_si128((__m128i const *)(row2 + x + 4)); + v_2r1 = _mm_slli_epi32(v_r1, 1); + v_4r1 = _mm_slli_epi32(v_r1, 2); + __m128i v_dst01 = _mm_add_epi32(_mm_add_epi32(v_r0, v_r2), _mm_add_epi32(v_2r1, v_4r1)); + __m128i v_dst11 = _mm_slli_epi32(_mm_add_epi32(v_r1, v_r2), 2); + + _mm_storeu_si128((__m128i *)(dst0 + x), + _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_dst00, v_delta), 6), + _mm_srai_epi32(_mm_add_epi32(v_dst01, v_delta), 6))); + _mm_storeu_si128((__m128i *)(dst1 + x), + _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_dst10, v_delta), 6), + _mm_srai_epi32(_mm_add_epi32(v_dst11, v_delta), 6))); + } + + for( ; x <= width - 4; x += 4 ) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(row0 + x)), + v_r1 = _mm_loadu_si128((__m128i const *)(row1 + x)), + v_r2 = _mm_loadu_si128((__m128i const *)(row2 + x)); + __m128i v_2r1 = _mm_slli_epi32(v_r1, 1), v_4r1 = _mm_slli_epi32(v_r1, 2); + + __m128i v_dst0 = _mm_add_epi32(_mm_add_epi32(v_r0, v_r2), _mm_add_epi32(v_2r1, v_4r1)); + __m128i v_dst1 = _mm_slli_epi32(_mm_add_epi32(v_r1, v_r2), 2); + + _mm_storel_epi64((__m128i *)(dst0 + x), + _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_dst0, v_delta), 6), v_zero)); + _mm_storel_epi64((__m128i *)(dst1 + x), + _mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(v_dst1, v_delta), 6), v_zero)); + } + + return x; + } +}; + +#if CV_SSE4_1 + +struct PyrUpVec_32s16u +{ + int operator()(int** src, ushort** dst, int, int width) const + { + int x = 0; + + if (!checkHardwareSupport(CV_CPU_SSE4_1)) + return x; + + ushort *dst0 = dst[0], *dst1 = dst[1]; + const uint *row0 = (uint *)src[0], *row1 = (uint *)src[1], *row2 = (uint *)src[2]; + __m128i v_delta = _mm_set1_epi32(32), v_zero = _mm_setzero_si128(); + + for( ; x <= width - 8; x += 8 ) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(row0 + x)), + v_r1 = _mm_loadu_si128((__m128i const *)(row1 + x)), + v_r2 = _mm_loadu_si128((__m128i const *)(row2 + x)); + __m128i v_2r1 = _mm_slli_epi32(v_r1, 1), v_4r1 = _mm_slli_epi32(v_r1, 2); + __m128i v_dst00 = _mm_add_epi32(_mm_add_epi32(v_r0, v_r2), _mm_add_epi32(v_2r1, v_4r1)); + __m128i v_dst10 = _mm_slli_epi32(_mm_add_epi32(v_r1, v_r2), 2); + + v_r0 = _mm_loadu_si128((__m128i const *)(row0 + x + 4)); + v_r1 = _mm_loadu_si128((__m128i const *)(row1 + x + 4)); + v_r2 = _mm_loadu_si128((__m128i const *)(row2 + x + 4)); + v_2r1 = _mm_slli_epi32(v_r1, 1); + v_4r1 = _mm_slli_epi32(v_r1, 2); + __m128i v_dst01 = _mm_add_epi32(_mm_add_epi32(v_r0, v_r2), _mm_add_epi32(v_2r1, v_4r1)); + __m128i v_dst11 = _mm_slli_epi32(_mm_add_epi32(v_r1, v_r2), 2); + + _mm_storeu_si128((__m128i *)(dst0 + x), + _mm_packus_epi32(_mm_srli_epi32(_mm_add_epi32(v_dst00, v_delta), 6), + _mm_srli_epi32(_mm_add_epi32(v_dst01, v_delta), 6))); + _mm_storeu_si128((__m128i *)(dst1 + x), + _mm_packus_epi32(_mm_srli_epi32(_mm_add_epi32(v_dst10, v_delta), 6), + _mm_srli_epi32(_mm_add_epi32(v_dst11, v_delta), 6))); + } + + for( ; x <= width - 4; x += 4 ) + { + __m128i v_r0 = _mm_loadu_si128((__m128i const *)(row0 + x)), + v_r1 = _mm_loadu_si128((__m128i const *)(row1 + x)), + v_r2 = _mm_loadu_si128((__m128i const *)(row2 + x)); + __m128i v_2r1 = _mm_slli_epi32(v_r1, 1), v_4r1 = _mm_slli_epi32(v_r1, 2); + + __m128i v_dst0 = _mm_add_epi32(_mm_add_epi32(v_r0, v_r2), _mm_add_epi32(v_2r1, v_4r1)); + __m128i v_dst1 = _mm_slli_epi32(_mm_add_epi32(v_r1, v_r2), 2); + + _mm_storel_epi64((__m128i *)(dst0 + x), + _mm_packus_epi32(_mm_srli_epi32(_mm_add_epi32(v_dst0, v_delta), 6), v_zero)); + _mm_storel_epi64((__m128i *)(dst1 + x), + _mm_packus_epi32(_mm_srli_epi32(_mm_add_epi32(v_dst1, v_delta), 6), v_zero)); + } + + return x; + } +}; + +#else + typedef PyrUpNoVec PyrUpVec_32s16u; -typedef PyrUpNoVec PyrUpVec_32f; + +#endif // CV_SSE4_1 + +struct PyrUpVec_32f +{ + int operator()(float** src, float** dst, int, int width) const + { + int x = 0; + + if (!checkHardwareSupport(CV_CPU_SSE2)) + return x; + + const float *row0 = src[0], *row1 = src[1], *row2 = src[2]; + float *dst0 = dst[0], *dst1 = dst[1]; + __m128 v_6 = _mm_set1_ps(6.0f), v_scale = _mm_set1_ps(1.f/64.0f), + v_scale4 = _mm_mul_ps(v_scale, _mm_set1_ps(4.0f)); + + for( ; x <= width - 8; x += 8 ) + { + __m128 v_r0 = _mm_loadu_ps(row0 + x); + __m128 v_r1 = _mm_loadu_ps(row1 + x); + __m128 v_r2 = _mm_loadu_ps(row2 + x); + + _mm_storeu_ps(dst1 + x, _mm_mul_ps(v_scale4, _mm_add_ps(v_r1, v_r2))); + _mm_storeu_ps(dst0 + x, _mm_mul_ps(v_scale, _mm_add_ps(_mm_add_ps(v_r0, _mm_mul_ps(v_6, v_r1)), v_r2))); + + v_r0 = _mm_loadu_ps(row0 + x + 4); + v_r1 = _mm_loadu_ps(row1 + x + 4); + v_r2 = _mm_loadu_ps(row2 + x + 4); + + _mm_storeu_ps(dst1 + x + 4, _mm_mul_ps(v_scale4, _mm_add_ps(v_r1, v_r2))); + _mm_storeu_ps(dst0 + x + 4, _mm_mul_ps(v_scale, _mm_add_ps(_mm_add_ps(v_r0, _mm_mul_ps(v_6, v_r1)), v_r2))); + } + + return x; + } +}; #elif CV_NEON diff --git a/modules/imgproc/src/smooth.cpp b/modules/imgproc/src/smooth.cpp index 2a69003641..7d8b263bda 100644 --- a/modules/imgproc/src/smooth.cpp +++ b/modules/imgproc/src/smooth.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -713,6 +714,156 @@ struct ColumnSum : std::vector sum; }; +template<> +struct ColumnSum : + public BaseColumnFilter +{ + ColumnSum( int _ksize, int _anchor, double _scale ) : + BaseColumnFilter() + { + ksize = _ksize; + anchor = _anchor; + scale = _scale; + sumCount = 0; + } + + virtual void reset() { sumCount = 0; } + + virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width) + { + int i; + int* SUM; + bool haveScale = scale != 1; + double _scale = scale; + + #if CV_SSE2 + bool haveSSE2 = checkHardwareSupport(CV_CPU_SSE2); + #endif + + if( width != (int)sum.size() ) + { + sum.resize(width); + sumCount = 0; + } + SUM = &sum[0]; + if( sumCount == 0 ) + { + memset((void*)SUM, 0, width*sizeof(int)); + for( ; sumCount < ksize - 1; sumCount++, src++ ) + { + const int* Sp = (const int*)src[0]; + i = 0; + #if CV_SSE2 + if(haveSSE2) + { + for( ; i <= width-4; i+=4 ) + { + __m128i _sum = _mm_loadu_si128((const __m128i*)(SUM+i)); + __m128i _sp = _mm_loadu_si128((const __m128i*)(Sp+i)); + _mm_storeu_si128((__m128i*)(SUM+i),_mm_add_epi32(_sum, _sp)); + } + } + #elif CV_NEON + for( ; i <= width - 4; i+=4 ) + vst1q_s32(SUM + i, vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i))); + #endif + for( ; i < width; i++ ) + SUM[i] += Sp[i]; + } + } + else + { + CV_Assert( sumCount == ksize-1 ); + src += ksize-1; + } + + for( ; count--; src++ ) + { + const int* Sp = (const int*)src[0]; + const int* Sm = (const int*)src[1-ksize]; + int* D = (int*)dst; + if( haveScale ) + { + i = 0; + #if CV_SSE2 + if(haveSSE2) + { + const __m128 scale4 = _mm_set1_ps((float)_scale); + for( ; i <= width-4; i+=4 ) + { + __m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i)); + + __m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)), + _mm_loadu_si128((const __m128i*)(Sp+i))); + + __m128i _s0T = _mm_cvtps_epi32(_mm_mul_ps(scale4, _mm_cvtepi32_ps(_s0))); + + _mm_storeu_si128((__m128i*)(D+i), _s0T); + _mm_storeu_si128((__m128i*)(SUM+i),_mm_sub_epi32(_s0,_sm)); + } + } + #elif CV_NEON + float32x4_t v_scale = vdupq_n_f32((float)_scale); + for( ; i <= width-4; i+=4 ) + { + int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i)); + + int32x4_t v_s0d = cv_vrndq_s32_f32(vmulq_f32(vcvtq_f32_s32(v_s0), v_scale)); + vst1q_s32(D + i, v_s0d); + + vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i))); + } + #endif + for( ; i < width; i++ ) + { + int s0 = SUM[i] + Sp[i]; + D[i] = saturate_cast(s0*_scale); + SUM[i] = s0 - Sm[i]; + } + } + else + { + i = 0; + #if CV_SSE2 + if(haveSSE2) + { + for( ; i <= width-4; i+=4 ) + { + __m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i)); + __m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)), + _mm_loadu_si128((const __m128i*)(Sp+i))); + + _mm_storeu_si128((__m128i*)(D+i), _s0); + _mm_storeu_si128((__m128i*)(SUM+i), _mm_sub_epi32(_s0,_sm)); + } + } + #elif CV_NEON + for( ; i <= width-4; i+=4 ) + { + int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i)); + + vst1q_s32(D + i, v_s0); + vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i))); + } + #endif + + for( ; i < width; i++ ) + { + int s0 = SUM[i] + Sp[i]; + D[i] = s0; + SUM[i] = s0 - Sm[i]; + } + } + dst += dststep; + } + } + + double scale; + int sumCount; + std::vector sum; +}; + + template<> struct ColumnSum : public BaseColumnFilter diff --git a/modules/imgproc/src/sumpixels.cpp b/modules/imgproc/src/sumpixels.cpp index cdef88f6c1..16c7c7ef26 100755 --- a/modules/imgproc/src/sumpixels.cpp +++ b/modules/imgproc/src/sumpixels.cpp @@ -12,6 +12,7 @@ // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, diff --git a/modules/imgproc/test/test_imgwarp.cpp b/modules/imgproc/test/test_imgwarp.cpp index 34505c4ca4..176c9907f3 100644 --- a/modules/imgproc/test/test_imgwarp.cpp +++ b/modules/imgproc/test/test_imgwarp.cpp @@ -1595,7 +1595,10 @@ void resizeArea(const cv::Mat & src, cv::Mat & dst) TEST(Resize, Area_half) { const int size = 1000; - int types[] = { CV_8UC1, CV_8UC4, CV_16UC1, CV_16UC4, CV_16SC1, CV_16SC4, CV_32FC1, CV_32FC4 }; + int types[] = { CV_8UC1, CV_8UC4, + CV_16UC1, CV_16UC4, + CV_16SC1, CV_16SC3, CV_16SC4, + CV_32FC1, CV_32FC4 }; cv::RNG rng(17); diff --git a/modules/imgproc/test/test_lsd.cpp b/modules/imgproc/test/test_lsd.cpp index 50a3535034..2daa4bef14 100644 --- a/modules/imgproc/test/test_lsd.cpp +++ b/modules/imgproc/test/test_lsd.cpp @@ -16,7 +16,7 @@ public: protected: Mat test_image; - vector lines; + vector lines; RNG rng; int passedtests; diff --git a/modules/ml/doc/ml_intro.markdown b/modules/ml/doc/ml_intro.markdown new file mode 100644 index 0000000000..5e3c3d2cf8 --- /dev/null +++ b/modules/ml/doc/ml_intro.markdown @@ -0,0 +1,488 @@ +Machine Learning Overview {#ml_intro} +========================= + +[TOC] + +Training Data {#ml_intro_data} +============= + +In machine learning algorithms there is notion of training data. Training data includes several +components: + +- A set of training samples. Each training sample is a vector of values (in Computer Vision it's + sometimes referred to as feature vector). Usually all the vectors have the same number of + components (features); OpenCV ml module assumes that. Each feature can be ordered (i.e. its + values are floating-point numbers that can be compared with each other and strictly ordered, + i.e. sorted) or categorical (i.e. its value belongs to a fixed set of values that can be + integers, strings etc.). +- Optional set of responses corresponding to the samples. Training data with no responses is used + in unsupervised learning algorithms that learn structure of the supplied data based on distances + between different samples. Training data with responses is used in supervised learning + algorithms, which learn the function mapping samples to responses. Usually the responses are + scalar values, ordered (when we deal with regression problem) or categorical (when we deal with + classification problem; in this case the responses are often called "labels"). Some algorithms, + most noticeably Neural networks, can handle not only scalar, but also multi-dimensional or + vector responses. +- Another optional component is the mask of missing measurements. Most algorithms require all the + components in all the training samples be valid, but some other algorithms, such as decision + tress, can handle the cases of missing measurements. +- In the case of classification problem user may want to give different weights to different + classes. This is useful, for example, when: + - user wants to shift prediction accuracy towards lower false-alarm rate or higher hit-rate. + - user wants to compensate for significantly different amounts of training samples from + different classes. +- In addition to that, each training sample may be given a weight, if user wants the algorithm to + pay special attention to certain training samples and adjust the training model accordingly. +- Also, user may wish not to use the whole training data at once, but rather use parts of it, e.g. + to do parameter optimization via cross-validation procedure. + +As you can see, training data can have rather complex structure; besides, it may be very big and/or +not entirely available, so there is need to make abstraction for this concept. In OpenCV ml there is +cv::ml::TrainData class for that. + +@sa cv::ml::TrainData + +Normal Bayes Classifier {#ml_intro_bayes} +======================= + +This simple classification model assumes that feature vectors from each class are normally +distributed (though, not necessarily independently distributed). So, the whole data distribution +function is assumed to be a Gaussian mixture, one component per class. Using the training data the +algorithm estimates mean vectors and covariance matrices for every class, and then it uses them for +prediction. + +@sa cv::ml::NormalBayesClassifier + +K-Nearest Neighbors {#ml_intro_knn} +=================== + +The algorithm caches all training samples and predicts the response for a new sample by analyzing a +certain number (__K__) of the nearest neighbors of the sample using voting, calculating weighted +sum, and so on. The method is sometimes referred to as "learning by example" because for prediction +it looks for the feature vector with a known response that is closest to the given vector. + +@sa cv::ml::KNearest + +Support Vector Machines {#ml_intro_svm} +======================= + +Originally, support vector machines (SVM) was a technique for building an optimal binary (2-class) +classifier. Later the technique was extended to regression and clustering problems. SVM is a partial +case of kernel-based methods. It maps feature vectors into a higher-dimensional space using a kernel +function and builds an optimal linear discriminating function in this space or an optimal hyper- +plane that fits into the training data. In case of SVM, the kernel is not defined explicitly. +Instead, a distance between any 2 points in the hyper-space needs to be defined. + +The solution is optimal, which means that the margin between the separating hyper-plane and the +nearest feature vectors from both classes (in case of 2-class classifier) is maximal. The feature +vectors that are the closest to the hyper-plane are called _support vectors_, which means that the +position of other vectors does not affect the hyper-plane (the decision function). + +SVM implementation in OpenCV is based on @cite LibSVM + +@sa cv::ml::SVM + +Prediction with SVM {#ml_intro_svm_predict} +------------------- + +StatModel::predict(samples, results, flags) should be used. Pass flags=StatModel::RAW_OUTPUT to get +the raw response from SVM (in the case of regression, 1-class or 2-class classification problem). + +Decision Trees {#ml_intro_trees} +============== + +The ML classes discussed in this section implement Classification and Regression Tree algorithms +described in @cite Breiman84 . + +The class cv::ml::DTrees represents a single decision tree or a collection of decision trees. It's +also a base class for RTrees and Boost. + +A decision tree is a binary tree (tree where each non-leaf node has two child nodes). It can be used +either for classification or for regression. For classification, each tree leaf is marked with a +class label; multiple leaves may have the same label. For regression, a constant is also assigned to +each tree leaf, so the approximation function is piecewise constant. + +@sa cv::ml::DTrees + +Predicting with Decision Trees {#ml_intro_trees_predict} +------------------------------ + +To reach a leaf node and to obtain a response for the input feature vector, the prediction procedure +starts with the root node. From each non-leaf node the procedure goes to the left (selects the left +child node as the next observed node) or to the right based on the value of a certain variable whose +index is stored in the observed node. The following variables are possible: + +- __Ordered variables.__ The variable value is compared with a threshold that is also stored in + the node. If the value is less than the threshold, the procedure goes to the left. Otherwise, it + goes to the right. For example, if the weight is less than 1 kilogram, the procedure goes to the + left, else to the right. + +- __Categorical variables.__ A discrete variable value is tested to see whether it belongs to a + certain subset of values (also stored in the node) from a limited set of values the variable + could take. If it does, the procedure goes to the left. Otherwise, it goes to the right. For + example, if the color is green or red, go to the left, else to the right. + +So, in each node, a pair of entities (variable_index , `decision_rule (threshold/subset)` ) is used. +This pair is called a _split_ (split on the variable variable_index ). Once a leaf node is reached, +the value assigned to this node is used as the output of the prediction procedure. + +Sometimes, certain features of the input vector are missed (for example, in the darkness it is +difficult to determine the object color), and the prediction procedure may get stuck in the certain +node (in the mentioned example, if the node is split by color). To avoid such situations, decision +trees use so-called _surrogate splits_. That is, in addition to the best "primary" split, every tree +node may also be split to one or more other variables with nearly the same results. + +Training Decision Trees {#ml_intro_trees_train} +----------------------- + +The tree is built recursively, starting from the root node. All training data (feature vectors and +responses) is used to split the root node. In each node the optimum decision rule (the best +"primary" split) is found based on some criteria. In machine learning, gini "purity" criteria are +used for classification, and sum of squared errors is used for regression. Then, if necessary, the +surrogate splits are found. They resemble the results of the primary split on the training data. All +the data is divided using the primary and the surrogate splits (like it is done in the prediction +procedure) between the left and the right child node. Then, the procedure recursively splits both +left and right nodes. At each node the recursive procedure may stop (that is, stop splitting the +node further) in one of the following cases: + +- Depth of the constructed tree branch has reached the specified maximum value. +- Number of training samples in the node is less than the specified threshold when it is not + statistically representative to split the node further. +- All the samples in the node belong to the same class or, in case of regression, the variation is + too small. +- The best found split does not give any noticeable improvement compared to a random choice. + +When the tree is built, it may be pruned using a cross-validation procedure, if necessary. That is, +some branches of the tree that may lead to the model overfitting are cut off. Normally, this +procedure is only applied to standalone decision trees. Usually tree ensembles build trees that are +small enough and use their own protection schemes against overfitting. + +Variable Importance {#ml_intro_trees_var} +------------------- + +Besides the prediction that is an obvious use of decision trees, the tree can be also used for +various data analyses. One of the key properties of the constructed decision tree algorithms is an +ability to compute the importance (relative decisive power) of each variable. For example, in a spam +filter that uses a set of words occurred in the message as a feature vector, the variable importance +rating can be used to determine the most "spam-indicating" words and thus help keep the dictionary +size reasonable. + +Importance of each variable is computed over all the splits on this variable in the tree, primary +and surrogate ones. Thus, to compute variable importance correctly, the surrogate splits must be +enabled in the training parameters, even if there is no missing data. + +Boosting {#ml_intro_boost} +======== + +A common machine learning task is supervised learning. In supervised learning, the goal is to learn +the functional relationship \f$F: y = F(x)\f$ between the input \f$x\f$ and the output \f$y\f$ . +Predicting the qualitative output is called _classification_, while predicting the quantitative +output is called _regression_. + +Boosting is a powerful learning concept that provides a solution to the supervised classification +learning task. It combines the performance of many "weak" classifiers to produce a powerful +committee @cite HTF01 . A weak classifier is only required to be better than chance, and thus can be +very simple and computationally inexpensive. However, many of them smartly combine results to a +strong classifier that often outperforms most "monolithic" strong classifiers such as SVMs and +Neural Networks. + +Decision trees are the most popular weak classifiers used in boosting schemes. Often the simplest +decision trees with only a single split node per tree (called stumps ) are sufficient. + +The boosted model is based on \f$N\f$ training examples \f${(x_i,y_i)}1N\f$ with \f$x_i \in{R^K}\f$ +and \f$y_i \in{-1, +1}\f$ . \f$x_i\f$ is a \f$K\f$ -component vector. Each component encodes a +feature relevant to the learning task at hand. The desired two-class output is encoded as -1 and +1. + +Different variants of boosting are known as Discrete Adaboost, Real AdaBoost, LogitBoost, and Gentle +AdaBoost @cite FHT98 . All of them are very similar in their overall structure. Therefore, this +chapter focuses only on the standard two-class Discrete AdaBoost algorithm, outlined below. +Initially the same weight is assigned to each sample (step 2). Then, a weak classifier +\f$f_{m(x)}\f$ is trained on the weighted training data (step 3a). Its weighted training error and +scaling factor \f$c_m\f$ is computed (step 3b). The weights are increased for training samples that +have been misclassified (step 3c). All weights are then normalized, and the process of finding the +next weak classifier continues for another \f$M\f$ -1 times. The final classifier \f$F(x)\f$ is the +sign of the weighted sum over the individual weak classifiers (step 4). + +__Two-class Discrete AdaBoost Algorithm__ + +- Set \f$N\f$ examples \f${(x_i,y_i)}1N\f$ with \f$x_i \in{R^K}, y_i \in{-1, +1}\f$ . + +- Assign weights as \f$w_i = 1/N, i = 1,...,N\f$ . + +- Repeat for \f$m = 1,2,...,M\f$ : + + - Fit the classifier \f$f_m(x) \in{-1,1}\f$, using weights \f$w_i\f$ on the training data. + + - Compute \f$err_m = E_w [1_{(y \neq f_m(x))}], c_m = log((1 - err_m)/err_m)\f$ . + + - Set \f$w_i \Leftarrow w_i exp[c_m 1_{(y_i \neq f_m(x_i))}], i = 1,2,...,N,\f$ and + renormalize so that \f$\Sigma i w_i = 1\f$ . + +- Classify new samples _x_ using the formula: \f$\textrm{sign} (\Sigma m = 1M c_m f_m(x))\f$ . + +@note Similar to the classical boosting methods, the current implementation supports two-class +classifiers only. For M \> 2 classes, there is the __AdaBoost.MH__ algorithm (described in +@cite FHT98) that reduces the problem to the two-class problem, yet with a much larger training set. + +To reduce computation time for boosted models without substantially losing accuracy, the influence +trimming technique can be employed. As the training algorithm proceeds and the number of trees in +the ensemble is increased, a larger number of the training samples are classified correctly and with +increasing confidence, thereby those samples receive smaller weights on the subsequent iterations. +Examples with a very low relative weight have a small impact on the weak classifier training. Thus, +such examples may be excluded during the weak classifier training without having much effect on the +induced classifier. This process is controlled with the weight_trim_rate parameter. Only examples +with the summary fraction weight_trim_rate of the total weight mass are used in the weak classifier +training. Note that the weights for __all__ training examples are recomputed at each training +iteration. Examples deleted at a particular iteration may be used again for learning some of the +weak classifiers further @cite FHT98 + +@sa cv::ml::Boost + +Prediction with Boost {#ml_intro_boost_predict} +--------------------- +StatModel::predict(samples, results, flags) should be used. Pass flags=StatModel::RAW_OUTPUT to get +the raw sum from Boost classifier. + +Random Trees {#ml_intro_rtrees} +============ + +Random trees have been introduced by Leo Breiman and Adele Cutler: + . The algorithm can deal with both +classification and regression problems. Random trees is a collection (ensemble) of tree predictors +that is called _forest_ further in this section (the term has been also introduced by L. Breiman). +The classification works as follows: the random trees classifier takes the input feature vector, +classifies it with every tree in the forest, and outputs the class label that received the majority +of "votes". In case of a regression, the classifier response is the average of the responses over +all the trees in the forest. + +All the trees are trained with the same parameters but on different training sets. These sets are +generated from the original training set using the bootstrap procedure: for each training set, you +randomly select the same number of vectors as in the original set ( =N ). The vectors are chosen +with replacement. That is, some vectors will occur more than once and some will be absent. At each +node of each trained tree, not all the variables are used to find the best split, but a random +subset of them. With each node a new subset is generated. However, its size is fixed for all the +nodes and all the trees. It is a training parameter set to \f$\sqrt{number\_of\_variables}\f$ by +default. None of the built trees are pruned. + +In random trees there is no need for any accuracy estimation procedures, such as cross-validation or +bootstrap, or a separate test set to get an estimate of the training error. The error is estimated +internally during the training. When the training set for the current tree is drawn by sampling with +replacement, some vectors are left out (so-called _oob (out-of-bag) data_ ). The size of oob data is +about N/3 . The classification error is estimated by using this oob-data as follows: + +- Get a prediction for each vector, which is oob relative to the i-th tree, using the very i-th + tree. + +- After all the trees have been trained, for each vector that has ever been oob, find the + class-winner for it (the class that has got the majority of votes in the trees where + the vector was oob) and compare it to the ground-truth response. + +- Compute the classification error estimate as a ratio of the number of misclassified oob vectors + to all the vectors in the original data. In case of regression, the oob-error is computed as the + squared error for oob vectors difference divided by the total number of vectors. + +For the random trees usage example, please, see letter_recog.cpp sample in OpenCV distribution. + +@sa cv::ml::RTrees + +__References:__ + +- _Machine Learning_, Wald I, July 2002. + +- _Looking Inside the Black Box_, Wald II, July 2002. + +- _Software for the Masses_, Wald III, July 2002. + +- And other articles from the web site + + +Expectation Maximization {#ml_intro_em} +======================== + +The Expectation Maximization(EM) algorithm estimates the parameters of the multivariate probability +density function in the form of a Gaussian mixture distribution with a specified number of mixtures. + +Consider the set of the N feature vectors { \f$x_1, x_2,...,x_{N}\f$ } from a d-dimensional Euclidean +space drawn from a Gaussian mixture: + +\f[p(x;a_k,S_k, \pi _k) = \sum _{k=1}^{m} \pi _kp_k(x), \quad \pi _k \geq 0, \quad \sum _{k=1}^{m} \pi _k=1,\f] + +\f[p_k(x)= \varphi (x;a_k,S_k)= \frac{1}{(2\pi)^{d/2}\mid{S_k}\mid^{1/2}} exp \left \{ - \frac{1}{2} (x-a_k)^TS_k^{-1}(x-a_k) \right \} ,\f] + +where \f$m\f$ is the number of mixtures, \f$p_k\f$ is the normal distribution density with the mean +\f$a_k\f$ and covariance matrix \f$S_k\f$, \f$\pi_k\f$ is the weight of the k-th mixture. Given the +number of mixtures \f$M\f$ and the samples \f$x_i\f$, \f$i=1..N\f$ the algorithm finds the maximum- +likelihood estimates (MLE) of all the mixture parameters, that is, \f$a_k\f$, \f$S_k\f$ and +\f$\pi_k\f$ : + +\f[L(x, \theta )=logp(x, \theta )= \sum _{i=1}^{N}log \left ( \sum _{k=1}^{m} \pi _kp_k(x) \right ) \to \max _{ \theta \in \Theta },\f] + +\f[\Theta = \left \{ (a_k,S_k, \pi _k): a_k \in \mathbbm{R} ^d,S_k=S_k^T>0,S_k \in \mathbbm{R} ^{d \times d}, \pi _k \geq 0, \sum _{k=1}^{m} \pi _k=1 \right \} .\f] + +The EM algorithm is an iterative procedure. Each iteration includes two steps. At the first step +(Expectation step or E-step), you find a probability \f$p_{i,k}\f$ (denoted \f$\alpha_{i,k}\f$ in +the formula below) of sample i to belong to mixture k using the currently available mixture +parameter estimates: + +\f[\alpha _{ki} = \frac{\pi_k\varphi(x;a_k,S_k)}{\sum\limits_{j=1}^{m}\pi_j\varphi(x;a_j,S_j)} .\f] + +At the second step (Maximization step or M-step), the mixture parameter estimates are refined using +the computed probabilities: + +\f[\pi _k= \frac{1}{N} \sum _{i=1}^{N} \alpha _{ki}, \quad a_k= \frac{\sum\limits_{i=1}^{N}\alpha_{ki}x_i}{\sum\limits_{i=1}^{N}\alpha_{ki}} , \quad S_k= \frac{\sum\limits_{i=1}^{N}\alpha_{ki}(x_i-a_k)(x_i-a_k)^T}{\sum\limits_{i=1}^{N}\alpha_{ki}}\f] + +Alternatively, the algorithm may start with the M-step when the initial values for \f$p_{i,k}\f$ can +be provided. Another alternative when \f$p_{i,k}\f$ are unknown is to use a simpler clustering +algorithm to pre-cluster the input samples and thus obtain initial \f$p_{i,k}\f$ . Often (including +machine learning) the k-means algorithm is used for that purpose. + +One of the main problems of the EM algorithm is a large number of parameters to estimate. The +majority of the parameters reside in covariance matrices, which are \f$d \times d\f$ elements each +where \f$d\f$ is the feature space dimensionality. However, in many practical problems, the +covariance matrices are close to diagonal or even to \f$\mu_k*I\f$ , where \f$I\f$ is an identity +matrix and \f$\mu_k\f$ is a mixture-dependent "scale" parameter. So, a robust computation scheme +could start with harder constraints on the covariance matrices and then use the estimated parameters +as an input for a less constrained optimization problem (often a diagonal covariance matrix is +already a good enough approximation). + +@sa cv::ml::EM + +References: +- Bilmes98 J. A. Bilmes. _A Gentle Tutorial of the EM Algorithm and its Application to Parameter +Estimation for Gaussian Mixture and Hidden Markov Models_. Technical Report TR-97-021, +International Computer Science Institute and Computer Science Division, University of California +at Berkeley, April 1998. + +Neural Networks {#ml_intro_ann} +=============== + +ML implements feed-forward artificial neural networks or, more particularly, multi-layer perceptrons +(MLP), the most commonly used type of neural networks. MLP consists of the input layer, output +layer, and one or more hidden layers. Each layer of MLP includes one or more neurons directionally +linked with the neurons from the previous and the next layer. The example below represents a 3-layer +perceptron with three inputs, two outputs, and the hidden layer including five neurons: + +![image](pics/mlp.png) + +All the neurons in MLP are similar. Each of them has several input links (it takes the output values +from several neurons in the previous layer as input) and several output links (it passes the +response to several neurons in the next layer). The values retrieved from the previous layer are +summed up with certain weights, individual for each neuron, plus the bias term. The sum is +transformed using the activation function \f$f\f$ that may be also different for different neurons. + +![image](pics/neuron_model.png) + +In other words, given the outputs \f$x_j\f$ of the layer \f$n\f$ , the outputs \f$y_i\f$ of the +layer \f$n+1\f$ are computed as: + +\f[u_i = \sum _j (w^{n+1}_{i,j}*x_j) + w^{n+1}_{i,bias}\f] + +\f[y_i = f(u_i)\f] + +Different activation functions may be used. ML implements three standard functions: + +- Identity function ( cv::ml::ANN_MLP::IDENTITY ): \f$f(x)=x\f$ + +- Symmetrical sigmoid ( cv::ml::ANN_MLP::SIGMOID_SYM ): \f$f(x)=\beta*(1-e^{-\alpha + x})/(1+e^{-\alpha x}\f$ ), which is the default choice for MLP. The standard sigmoid with + \f$\beta =1, \alpha =1\f$ is shown below: + + ![image](pics/sigmoid_bipolar.png) + +- Gaussian function ( cv::ml::ANN_MLP::GAUSSIAN ): \f$f(x)=\beta e^{-\alpha x*x}\f$ , which is not + completely supported at the moment. + +In ML, all the neurons have the same activation functions, with the same free parameters ( +\f$\alpha, \beta\f$ ) that are specified by user and are not altered by the training algorithms. + +So, the whole trained network works as follows: + +1. Take the feature vector as input. The vector size is equal to the size of the input layer. +2. Pass values as input to the first hidden layer. +3. Compute outputs of the hidden layer using the weights and the activation functions. +4. Pass outputs further downstream until you compute the output layer. + +So, to compute the network, you need to know all the weights \f$w^{n+1)}_{i,j}\f$ . The weights are +computed by the training algorithm. The algorithm takes a training set, multiple input vectors with +the corresponding output vectors, and iteratively adjusts the weights to enable the network to give +the desired response to the provided input vectors. + +The larger the network size (the number of hidden layers and their sizes) is, the more the potential +network flexibility is. The error on the training set could be made arbitrarily small. But at the +same time the learned network also "learns" the noise present in the training set, so the error on +the test set usually starts increasing after the network size reaches a limit. Besides, the larger +networks are trained much longer than the smaller ones, so it is reasonable to pre-process the data, +using cv::PCA or similar technique, and train a smaller network on only essential features. + +Another MLP feature is an inability to handle categorical data as is. However, there is a +workaround. If a certain feature in the input or output (in case of n -class classifier for +\f$n>2\f$ ) layer is categorical and can take \f$M>2\f$ different values, it makes sense to +represent it as a binary tuple of M elements, where the i -th element is 1 if and only if the +feature is equal to the i -th value out of M possible. It increases the size of the input/output +layer but speeds up the training algorithm convergence and at the same time enables "fuzzy" values +of such variables, that is, a tuple of probabilities instead of a fixed value. + +ML implements two algorithms for training MLP's. The first algorithm is a classical random +sequential back-propagation algorithm. The second (default) one is a batch RPROP algorithm. + +@sa cv::ml::ANN_MLP + +Logistic Regression {#ml_intro_lr} +=================== + +ML implements logistic regression, which is a probabilistic classification technique. Logistic +Regression is a binary classification algorithm which is closely related to Support Vector Machines +(SVM). Like SVM, Logistic Regression can be extended to work on multi-class classification problems +like digit recognition (i.e. recognizing digitis like 0,1 2, 3,... from the given images). This +version of Logistic Regression supports both binary and multi-class classifications (for multi-class +it creates a multiple 2-class classifiers). In order to train the logistic regression classifier, +Batch Gradient Descent and Mini-Batch Gradient Descent algorithms are used (see +). Logistic Regression is a +discriminative classifier (see for more details). +Logistic Regression is implemented as a C++ class in LogisticRegression. + +In Logistic Regression, we try to optimize the training paramater \f$\theta\f$ such that the +hypothesis \f$0 \leq h_\theta(x) \leq 1\f$ is acheived. We have \f$h_\theta(x) = g(h_\theta(x))\f$ +and \f$g(z) = \frac{1}{1+e^{-z}}\f$ as the logistic or sigmoid function. The term "Logistic" in +Logistic Regression refers to this function. For given data of a binary classification problem of +classes 0 and 1, one can determine that the given data instance belongs to class 1 if \f$h_\theta(x) +\geq 0.5\f$ or class 0 if \f$h_\theta(x) < 0.5\f$ . + +In Logistic Regression, choosing the right parameters is of utmost importance for reducing the +training error and ensuring high training accuracy. cv::ml::LogisticRegression::Params is the +structure that defines parameters that are required to train a Logistic Regression classifier. + +The learning rate is determined by cv::ml::LogisticRegression::Params.alpha. It determines how fast +we approach the solution. It is a positive real number. + +Optimization algorithms like Batch Gradient Descent and Mini-Batch Gradient Descent are supported in +LogisticRegression. It is important that we mention the number of iterations these optimization +algorithms have to run. The number of iterations are mentioned by +cv::ml::LogisticRegression::Params.num_iters. The number of iterations can be thought as number of +steps taken and learning rate specifies if it is a long step or a short step. These two parameters +define how fast we arrive at a possible solution. + +In order to compensate for overfitting regularization is performed, which can be enabled by setting +cv::ml::LogisticRegression::Params.regularized to a positive integer (greater than zero). One can +specify what kind of regularization has to be performed by setting +cv::ml::LogisticRegression::Params.norm to REG_L1 or REG_L2 values. + +LogisticRegression provides a choice of 2 training methods with Batch Gradient Descent or the Mini- +Batch Gradient Descent. To specify this, set cv::ml::LogisticRegression::Params::train_method to +either BATCH or MINI_BATCH. If training method is set to MINI_BATCH, the size of the mini batch has +to be to a postive integer using cv::ml::LogisticRegression::Params::mini_batch_size. + +A sample set of training parameters for the Logistic Regression classifier can be initialized as +follows: +@code{.cpp} +using namespace cv::ml; +LogisticRegression::Params params; +params.alpha = 0.5; +params.num_iters = 10000; +params.norm = LogisticRegression::REG_L2; +params.regularized = 1; +params.train_method = LogisticRegression::MINI_BATCH; +params.mini_batch_size = 10; +@endcode + +@sa cv::ml::LogisticRegression diff --git a/modules/ml/include/opencv2/ml.hpp b/modules/ml/include/opencv2/ml.hpp index 619664ba30..9dca486af4 100644 --- a/modules/ml/include/opencv2/ml.hpp +++ b/modules/ml/include/opencv2/ml.hpp @@ -56,455 +56,16 @@ /** @defgroup ml Machine Learning - @{ -@defgroup ml_stat Statistical Models -@defgroup ml_bayes Normal Bayes Classifier - -This simple classification model assumes that feature vectors from each class are normally -distributed (though, not necessarily independently distributed). So, the whole data distribution -function is assumed to be a Gaussian mixture, one component per class. Using the training data the -algorithm estimates mean vectors and covariance matrices for every class, and then it uses them for -prediction. - -@defgroup ml_knearest K-Nearest Neighbors - -The algorithm caches all training samples and predicts the response for a new sample by analyzing a -certain number (**K**) of the nearest neighbors of the sample using voting, calculating weighted -sum, and so on. The method is sometimes referred to as "learning by example" because for prediction -it looks for the feature vector with a known response that is closest to the given vector. - -@defgroup ml_svm Support Vector Machines - -Originally, support vector machines (SVM) was a technique for building an optimal binary (2-class) -classifier. Later the technique was extended to regression and clustering problems. SVM is a partial -case of kernel-based methods. It maps feature vectors into a higher-dimensional space using a kernel -function and builds an optimal linear discriminating function in this space or an optimal -hyper-plane that fits into the training data. In case of SVM, the kernel is not defined explicitly. -Instead, a distance between any 2 points in the hyper-space needs to be defined. - -The solution is optimal, which means that the margin between the separating hyper-plane and the -nearest feature vectors from both classes (in case of 2-class classifier) is maximal. The feature -vectors that are the closest to the hyper-plane are called *support vectors*, which means that the -position of other vectors does not affect the hyper-plane (the decision function). - -SVM implementation in OpenCV is based on @cite LibSVM . - -Prediction with SVM -------------------- - -StatModel::predict(samples, results, flags) should be used. Pass flags=StatModel::RAW_OUTPUT to get -the raw response from SVM (in the case of regression, 1-class or 2-class classification problem). - -@defgroup ml_decsiontrees Decision Trees - -The ML classes discussed in this section implement Classification and Regression Tree algorithms -described in @cite Breiman84 . - -The class cv::ml::DTrees represents a single decision tree or a collection of decision trees. It's -also a base class for RTrees and Boost. - -A decision tree is a binary tree (tree where each non-leaf node has two child nodes). It can be used -either for classification or for regression. For classification, each tree leaf is marked with a -class label; multiple leaves may have the same label. For regression, a constant is also assigned to -each tree leaf, so the approximation function is piecewise constant. - -Predicting with Decision Trees ------------------------------- - -To reach a leaf node and to obtain a response for the input feature vector, the prediction procedure -starts with the root node. From each non-leaf node the procedure goes to the left (selects the left -child node as the next observed node) or to the right based on the value of a certain variable whose -index is stored in the observed node. The following variables are possible: - -- **Ordered variables.** The variable value is compared with a threshold that is also stored in - the node. If the value is less than the threshold, the procedure goes to the left. Otherwise, it - goes to the right. For example, if the weight is less than 1 kilogram, the procedure goes to the - left, else to the right. - -- **Categorical variables.** A discrete variable value is tested to see whether it belongs to a - certain subset of values (also stored in the node) from a limited set of values the variable - could take. If it does, the procedure goes to the left. Otherwise, it goes to the right. For - example, if the color is green or red, go to the left, else to the right. - -So, in each node, a pair of entities (variable_index , `decision_rule (threshold/subset)` ) is -used. This pair is called a *split* (split on the variable variable_index ). Once a leaf node is -reached, the value assigned to this node is used as the output of the prediction procedure. - -Sometimes, certain features of the input vector are missed (for example, in the darkness it is -difficult to determine the object color), and the prediction procedure may get stuck in the certain -node (in the mentioned example, if the node is split by color). To avoid such situations, decision -trees use so-called *surrogate splits*. That is, in addition to the best "primary" split, every tree -node may also be split to one or more other variables with nearly the same results. - -Training Decision Trees ------------------------ - -The tree is built recursively, starting from the root node. All training data (feature vectors and -responses) is used to split the root node. In each node the optimum decision rule (the best -"primary" split) is found based on some criteria. In machine learning, gini "purity" criteria are -used for classification, and sum of squared errors is used for regression. Then, if necessary, the -surrogate splits are found. They resemble the results of the primary split on the training data. All -the data is divided using the primary and the surrogate splits (like it is done in the prediction -procedure) between the left and the right child node. Then, the procedure recursively splits both -left and right nodes. At each node the recursive procedure may stop (that is, stop splitting the -node further) in one of the following cases: - -- Depth of the constructed tree branch has reached the specified maximum value. -- Number of training samples in the node is less than the specified threshold when it is not - statistically representative to split the node further. -- All the samples in the node belong to the same class or, in case of regression, the variation is - too small. -- The best found split does not give any noticeable improvement compared to a random choice. - -When the tree is built, it may be pruned using a cross-validation procedure, if necessary. That is, -some branches of the tree that may lead to the model overfitting are cut off. Normally, this -procedure is only applied to standalone decision trees. Usually tree ensembles build trees that are -small enough and use their own protection schemes against overfitting. - -Variable Importance -------------------- - -Besides the prediction that is an obvious use of decision trees, the tree can be also used for -various data analyses. One of the key properties of the constructed decision tree algorithms is an -ability to compute the importance (relative decisive power) of each variable. For example, in a spam -filter that uses a set of words occurred in the message as a feature vector, the variable importance -rating can be used to determine the most "spam-indicating" words and thus help keep the dictionary -size reasonable. - -Importance of each variable is computed over all the splits on this variable in the tree, primary -and surrogate ones. Thus, to compute variable importance correctly, the surrogate splits must be -enabled in the training parameters, even if there is no missing data. - -@defgroup ml_boost Boosting - -A common machine learning task is supervised learning. In supervised learning, the goal is to learn -the functional relationship \f$F: y = F(x)\f$ between the input \f$x\f$ and the output \f$y\f$ . Predicting the -qualitative output is called *classification*, while predicting the quantitative output is called -*regression*. - -Boosting is a powerful learning concept that provides a solution to the supervised classification -learning task. It combines the performance of many "weak" classifiers to produce a powerful -committee @cite HTF01 . A weak classifier is only required to be better than chance, and thus can be -very simple and computationally inexpensive. However, many of them smartly combine results to a -strong classifier that often outperforms most "monolithic" strong classifiers such as SVMs and -Neural Networks. - -Decision trees are the most popular weak classifiers used in boosting schemes. Often the simplest -decision trees with only a single split node per tree (called stumps ) are sufficient. - -The boosted model is based on \f$N\f$ training examples \f${(x_i,y_i)}1N\f$ with \f$x_i \in{R^K}\f$ and -\f$y_i \in{-1, +1}\f$ . \f$x_i\f$ is a \f$K\f$ -component vector. Each component encodes a feature relevant to -the learning task at hand. The desired two-class output is encoded as -1 and +1. - -Different variants of boosting are known as Discrete Adaboost, Real AdaBoost, LogitBoost, and Gentle -AdaBoost @cite FHT98 . All of them are very similar in their overall structure. Therefore, this chapter -focuses only on the standard two-class Discrete AdaBoost algorithm, outlined below. Initially the -same weight is assigned to each sample (step 2). Then, a weak classifier \f$f_{m(x)}\f$ is trained on -the weighted training data (step 3a). Its weighted training error and scaling factor \f$c_m\f$ is -computed (step 3b). The weights are increased for training samples that have been misclassified -(step 3c). All weights are then normalized, and the process of finding the next weak classifier -continues for another \f$M\f$ -1 times. The final classifier \f$F(x)\f$ is the sign of the weighted sum over -the individual weak classifiers (step 4). - -**Two-class Discrete AdaBoost Algorithm** - -- Set \f$N\f$ examples \f${(x_i,y_i)}1N\f$ with \f$x_i \in{R^K}, y_i \in{-1, +1}\f$ . - -- Assign weights as \f$w_i = 1/N, i = 1,...,N\f$ . - -- Repeat for \f$m = 1,2,...,M\f$ : - - 3.1. Fit the classifier \f$f_m(x) \in{-1,1}\f$, using weights \f$w_i\f$ on the training data. - - 3.2. Compute \f$err_m = E_w [1_{(y \neq f_m(x))}], c_m = log((1 - err_m)/err_m)\f$ . - - 3.3. Set \f$w_i \Leftarrow w_i exp[c_m 1_{(y_i \neq f_m(x_i))}], i = 1,2,...,N,\f$ and renormalize - so that \f$\Sigma i w_i = 1\f$ . - -1. Classify new samples *x* using the formula: \f$\textrm{sign} (\Sigma m = 1M c_m f_m(x))\f$ . - -@note Similar to the classical boosting methods, the current implementation supports two-class -classifiers only. For M \> 2 classes, there is the **AdaBoost.MH** algorithm (described in -@cite FHT98) that reduces the problem to the two-class problem, yet with a much larger training set. -To reduce computation time for boosted models without substantially losing accuracy, the influence -trimming technique can be employed. As the training algorithm proceeds and the number of trees in -the ensemble is increased, a larger number of the training samples are classified correctly and with -increasing confidence, thereby those samples receive smaller weights on the subsequent iterations. -Examples with a very low relative weight have a small impact on the weak classifier training. Thus, -such examples may be excluded during the weak classifier training without having much effect on the -induced classifier. This process is controlled with the weight_trim_rate parameter. Only examples -with the summary fraction weight_trim_rate of the total weight mass are used in the weak -classifier training. Note that the weights for **all** training examples are recomputed at each -training iteration. Examples deleted at a particular iteration may be used again for learning some -of the weak classifiers further @cite FHT98 . - -Prediction with Boost ---------------------- -StatModel::predict(samples, results, flags) should be used. Pass flags=StatModel::RAW_OUTPUT to get -the raw sum from Boost classifier. - -@defgroup ml_randomtrees Random Trees - -Random trees have been introduced by Leo Breiman and Adele Cutler: - . The algorithm can deal with both -classification and regression problems. Random trees is a collection (ensemble) of tree predictors -that is called *forest* further in this section (the term has been also introduced by L. Breiman). -The classification works as follows: the random trees classifier takes the input feature vector, -classifies it with every tree in the forest, and outputs the class label that received the majority -of "votes". In case of a regression, the classifier response is the average of the responses over -all the trees in the forest. - -All the trees are trained with the same parameters but on different training sets. These sets are -generated from the original training set using the bootstrap procedure: for each training set, you -randomly select the same number of vectors as in the original set ( =N ). The vectors are chosen -with replacement. That is, some vectors will occur more than once and some will be absent. At each -node of each trained tree, not all the variables are used to find the best split, but a random -subset of them. With each node a new subset is generated. However, its size is fixed for all the -nodes and all the trees. It is a training parameter set to \f$\sqrt{number_of_variables}\f$ by -default. None of the built trees are pruned. - -In random trees there is no need for any accuracy estimation procedures, such as cross-validation or -bootstrap, or a separate test set to get an estimate of the training error. The error is estimated -internally during the training. When the training set for the current tree is drawn by sampling with -replacement, some vectors are left out (so-called *oob (out-of-bag) data* ). The size of oob data is -about N/3 . The classification error is estimated by using this oob-data as follows: -- Get a prediction for each vector, which is oob relative to the i-th tree, using the very i-th - tree. + The Machine Learning Library (MLL) is a set of classes and functions for statistical + classification, regression, and clustering of data. -- After all the trees have been trained, for each vector that has ever been oob, find the - class-*winner* for it (the class that has got the majority of votes in the trees where the - vector was oob) and compare it to the ground-truth response. + Most of the classification and regression algorithms are implemented as C++ classes. As the + algorithms have different sets of features (like an ability to handle missing measurements or + categorical input variables), there is a little common ground between the classes. This common + ground is defined by the class cv::ml::StatModel that all the other ML classes are derived from. -- Compute the classification error estimate as a ratio of the number of misclassified oob vectors - to all the vectors in the original data. In case of regression, the oob-error is computed as the - squared error for oob vectors difference divided by the total number of vectors. - -For the random trees usage example, please, see letter_recog.cpp sample in OpenCV distribution. - -**References:** - -- *Machine Learning*, Wald I, July 2002. - -- *Looking Inside the Black Box*, Wald II, July 2002. - -- *Software for the Masses*, Wald III, July 2002. - -- And other articles from the web site - - -@defgroup ml_em Expectation Maximization - -The Expectation Maximization(EM) algorithm estimates the parameters of the multivariate probability -density function in the form of a Gaussian mixture distribution with a specified number of mixtures. - -Consider the set of the N feature vectors { \f$x_1, x_2,...,x_{N}\f$ } from a d-dimensional Euclidean -space drawn from a Gaussian mixture: - -\f[p(x;a_k,S_k, \pi _k) = \sum _{k=1}^{m} \pi _kp_k(x), \quad \pi _k \geq 0, \quad \sum _{k=1}^{m} \pi _k=1,\f] - -\f[p_k(x)= \varphi (x;a_k,S_k)= \frac{1}{(2\pi)^{d/2}\mid{S_k}\mid^{1/2}} exp \left \{ - \frac{1}{2} (x-a_k)^TS_k^{-1}(x-a_k) \right \} ,\f] - -where \f$m\f$ is the number of mixtures, \f$p_k\f$ is the normal distribution density with the mean \f$a_k\f$ -and covariance matrix \f$S_k\f$, \f$\pi_k\f$ is the weight of the k-th mixture. Given the number of mixtures -\f$M\f$ and the samples \f$x_i\f$, \f$i=1..N\f$ the algorithm finds the maximum-likelihood estimates (MLE) of -all the mixture parameters, that is, \f$a_k\f$, \f$S_k\f$ and \f$\pi_k\f$ : - -\f[L(x, \theta )=logp(x, \theta )= \sum _{i=1}^{N}log \left ( \sum _{k=1}^{m} \pi _kp_k(x) \right ) \to \max _{ \theta \in \Theta },\f] - -\f[\Theta = \left \{ (a_k,S_k, \pi _k): a_k \in \mathbbm{R} ^d,S_k=S_k^T>0,S_k \in \mathbbm{R} ^{d \times d}, \pi _k \geq 0, \sum _{k=1}^{m} \pi _k=1 \right \} .\f] - -The EM algorithm is an iterative procedure. Each iteration includes two steps. At the first step -(Expectation step or E-step), you find a probability \f$p_{i,k}\f$ (denoted \f$\alpha_{i,k}\f$ in the -formula below) of sample i to belong to mixture k using the currently available mixture parameter -estimates: - -\f[\alpha _{ki} = \frac{\pi_k\varphi(x;a_k,S_k)}{\sum\limits_{j=1}^{m}\pi_j\varphi(x;a_j,S_j)} .\f] - -At the second step (Maximization step or M-step), the mixture parameter estimates are refined using -the computed probabilities: - -\f[\pi _k= \frac{1}{N} \sum _{i=1}^{N} \alpha _{ki}, \quad a_k= \frac{\sum\limits_{i=1}^{N}\alpha_{ki}x_i}{\sum\limits_{i=1}^{N}\alpha_{ki}} , \quad S_k= \frac{\sum\limits_{i=1}^{N}\alpha_{ki}(x_i-a_k)(x_i-a_k)^T}{\sum\limits_{i=1}^{N}\alpha_{ki}}\f] - -Alternatively, the algorithm may start with the M-step when the initial values for \f$p_{i,k}\f$ can be -provided. Another alternative when \f$p_{i,k}\f$ are unknown is to use a simpler clustering algorithm to -pre-cluster the input samples and thus obtain initial \f$p_{i,k}\f$ . Often (including machine learning) -the k-means algorithm is used for that purpose. - -One of the main problems of the EM algorithm is a large number of parameters to estimate. The -majority of the parameters reside in covariance matrices, which are \f$d \times d\f$ elements each where -\f$d\f$ is the feature space dimensionality. However, in many practical problems, the covariance -matrices are close to diagonal or even to \f$\mu_k*I\f$ , where \f$I\f$ is an identity matrix and \f$\mu_k\f$ is -a mixture-dependent "scale" parameter. So, a robust computation scheme could start with harder -constraints on the covariance matrices and then use the estimated parameters as an input for a less -constrained optimization problem (often a diagonal covariance matrix is already a good enough -approximation). - -References: -- Bilmes98 J. A. Bilmes. *A Gentle Tutorial of the EM Algorithm and its Application to Parameter - Estimation for Gaussian Mixture and Hidden Markov Models*. Technical Report TR-97-021, - International Computer Science Institute and Computer Science Division, University of California - at Berkeley, April 1998. - -@defgroup ml_neural Neural Networks - -ML implements feed-forward artificial neural networks or, more particularly, multi-layer perceptrons -(MLP), the most commonly used type of neural networks. MLP consists of the input layer, output -layer, and one or more hidden layers. Each layer of MLP includes one or more neurons directionally -linked with the neurons from the previous and the next layer. The example below represents a 3-layer -perceptron with three inputs, two outputs, and the hidden layer including five neurons: - -![image](pics/mlp.png) - -All the neurons in MLP are similar. Each of them has several input links (it takes the output values -from several neurons in the previous layer as input) and several output links (it passes the -response to several neurons in the next layer). The values retrieved from the previous layer are -summed up with certain weights, individual for each neuron, plus the bias term. The sum is -transformed using the activation function \f$f\f$ that may be also different for different neurons. - -![image](pics/neuron_model.png) - -In other words, given the outputs \f$x_j\f$ of the layer \f$n\f$ , the outputs \f$y_i\f$ of the layer \f$n+1\f$ are -computed as: - -\f[u_i = \sum _j (w^{n+1}_{i,j}*x_j) + w^{n+1}_{i,bias}\f] - -\f[y_i = f(u_i)\f] - -Different activation functions may be used. ML implements three standard functions: - -- Identity function ( ANN_MLP::IDENTITY ): \f$f(x)=x\f$ - -- Symmetrical sigmoid ( ANN_MLP::SIGMOID_SYM ): \f$f(x)=\beta*(1-e^{-\alpha x})/(1+e^{-\alpha x}\f$ - ), which is the default choice for MLP. The standard sigmoid with \f$\beta =1, \alpha =1\f$ is shown - below: - - ![image](pics/sigmoid_bipolar.png) - -- Gaussian function ( ANN_MLP::GAUSSIAN ): \f$f(x)=\beta e^{-\alpha x*x}\f$ , which is not completely - supported at the moment. - -In ML, all the neurons have the same activation functions, with the same free parameters ( -\f$\alpha, \beta\f$ ) that are specified by user and are not altered by the training algorithms. - -So, the whole trained network works as follows: - -1. Take the feature vector as input. The vector size is equal to the size of the input layer. -2. Pass values as input to the first hidden layer. -3. Compute outputs of the hidden layer using the weights and the activation functions. -4. Pass outputs further downstream until you compute the output layer. - -So, to compute the network, you need to know all the weights \f$w^{n+1)}_{i,j}\f$ . The weights are -computed by the training algorithm. The algorithm takes a training set, multiple input vectors with -the corresponding output vectors, and iteratively adjusts the weights to enable the network to give -the desired response to the provided input vectors. - -The larger the network size (the number of hidden layers and their sizes) is, the more the potential -network flexibility is. The error on the training set could be made arbitrarily small. But at the -same time the learned network also "learns" the noise present in the training set, so the error on -the test set usually starts increasing after the network size reaches a limit. Besides, the larger -networks are trained much longer than the smaller ones, so it is reasonable to pre-process the data, -using PCA::operator() or similar technique, and train a smaller network on only essential features. - -Another MLP feature is an inability to handle categorical data as is. However, there is a -workaround. If a certain feature in the input or output (in case of n -class classifier for \f$n>2\f$ ) -layer is categorical and can take \f$M>2\f$ different values, it makes sense to represent it as a binary -tuple of M elements, where the i -th element is 1 if and only if the feature is equal to the i -th -value out of M possible. It increases the size of the input/output layer but speeds up the training -algorithm convergence and at the same time enables "fuzzy" values of such variables, that is, a -tuple of probabilities instead of a fixed value. - -ML implements two algorithms for training MLP's. The first algorithm is a classical random -sequential back-propagation algorithm. The second (default) one is a batch RPROP algorithm. - -@defgroup ml_lr Logistic Regression - -ML implements logistic regression, which is a probabilistic classification technique. Logistic -Regression is a binary classification algorithm which is closely related to Support Vector Machines -(SVM). Like SVM, Logistic Regression can be extended to work on multi-class classification problems -like digit recognition (i.e. recognizing digitis like 0,1 2, 3,... from the given images). This -version of Logistic Regression supports both binary and multi-class classifications (for multi-class -it creates a multiple 2-class classifiers). In order to train the logistic regression classifier, -Batch Gradient Descent and Mini-Batch Gradient Descent algorithms are used (see ). -Logistic Regression is a discriminative classifier (see for more details). -Logistic Regression is implemented as a C++ class in LogisticRegression. - -In Logistic Regression, we try to optimize the training paramater \f$\theta\f$ such that the hypothesis -\f$0 \leq h_\theta(x) \leq 1\f$ is acheived. We have \f$h_\theta(x) = g(h_\theta(x))\f$ and -\f$g(z) = \frac{1}{1+e^{-z}}\f$ as the logistic or sigmoid function. The term "Logistic" in Logistic -Regression refers to this function. For given data of a binary classification problem of classes 0 -and 1, one can determine that the given data instance belongs to class 1 if \f$h_\theta(x) \geq 0.5\f$ -or class 0 if \f$h_\theta(x) < 0.5\f$ . - -In Logistic Regression, choosing the right parameters is of utmost importance for reducing the -training error and ensuring high training accuracy. LogisticRegression::Params is the structure that -defines parameters that are required to train a Logistic Regression classifier. The learning rate is -determined by LogisticRegression::Params.alpha. It determines how faster we approach the solution. -It is a positive real number. Optimization algorithms like Batch Gradient Descent and Mini-Batch -Gradient Descent are supported in LogisticRegression. It is important that we mention the number of -iterations these optimization algorithms have to run. The number of iterations are mentioned by -LogisticRegression::Params.num_iters. The number of iterations can be thought as number of steps -taken and learning rate specifies if it is a long step or a short step. These two parameters define -how fast we arrive at a possible solution. In order to compensate for overfitting regularization is -performed, which can be enabled by setting LogisticRegression::Params.regularized to a positive -integer (greater than zero). One can specify what kind of regularization has to be performed by -setting LogisticRegression::Params.norm to LogisticRegression::REG_L1 or -LogisticRegression::REG_L2 values. LogisticRegression provides a choice of 2 training methods with -Batch Gradient Descent or the Mini-Batch Gradient Descent. To specify this, set -LogisticRegression::Params.train_method to either LogisticRegression::BATCH or -LogisticRegression::MINI_BATCH. If LogisticRegression::Params is set to -LogisticRegression::MINI_BATCH, the size of the mini batch has to be to a postive integer using -LogisticRegression::Params.mini_batch_size. - -A sample set of training parameters for the Logistic Regression classifier can be initialized as -follows: -@code - LogisticRegression::Params params; - params.alpha = 0.5; - params.num_iters = 10000; - params.norm = LogisticRegression::REG_L2; - params.regularized = 1; - params.train_method = LogisticRegression::MINI_BATCH; - params.mini_batch_size = 10; -@endcode - -@defgroup ml_data Training Data - -In machine learning algorithms there is notion of training data. Training data includes several -components: - -- A set of training samples. Each training sample is a vector of values (in Computer Vision it's - sometimes referred to as feature vector). Usually all the vectors have the same number of - components (features); OpenCV ml module assumes that. Each feature can be ordered (i.e. its - values are floating-point numbers that can be compared with each other and strictly ordered, - i.e. sorted) or categorical (i.e. its value belongs to a fixed set of values that can be - integers, strings etc.). -- Optional set of responses corresponding to the samples. Training data with no responses is used - in unsupervised learning algorithms that learn structure of the supplied data based on distances - between different samples. Training data with responses is used in supervised learning - algorithms, which learn the function mapping samples to responses. Usually the responses are - scalar values, ordered (when we deal with regression problem) or categorical (when we deal with - classification problem; in this case the responses are often called "labels"). Some algorithms, - most noticeably Neural networks, can handle not only scalar, but also multi-dimensional or - vector responses. -- Another optional component is the mask of missing measurements. Most algorithms require all the - components in all the training samples be valid, but some other algorithms, such as decision - tress, can handle the cases of missing measurements. -- In the case of classification problem user may want to give different weights to different - classes. This is useful, for example, when - - user wants to shift prediction accuracy towards lower false-alarm rate or higher hit-rate. - - user wants to compensate for significantly different amounts of training samples from - different classes. -- In addition to that, each training sample may be given a weight, if user wants the algorithm to - pay special attention to certain training samples and adjust the training model accordingly. -- Also, user may wish not to use the whole training data at once, but rather use parts of it, e.g. - to do parameter optimization via cross-validation procedure. - -As you can see, training data can have rather complex structure; besides, it may be very big and/or -not entirely available, so there is need to make abstraction for this concept. In OpenCV ml there is -cv::ml::TrainData class for that. - - @} + See detailed overview here: @ref ml_intro. */ namespace cv @@ -516,83 +77,62 @@ namespace ml //! @addtogroup ml //! @{ -/* Variable type */ -enum +/** @brief Variable types */ +enum VariableTypes { - VAR_NUMERICAL =0, - VAR_ORDERED =0, - VAR_CATEGORICAL =1 + VAR_NUMERICAL =0, //!< same as VAR_ORDERED + VAR_ORDERED =0, //!< ordered variables + VAR_CATEGORICAL =1 //!< categorical variables }; -enum +/** @brief %Error types */ +enum ErrorTypes { TEST_ERROR = 0, TRAIN_ERROR = 1 }; -enum +/** @brief Sample types */ +enum SampleTypes { - ROW_SAMPLE = 0, - COL_SAMPLE = 1 + ROW_SAMPLE = 0, //!< each training sample is a row of samples + COL_SAMPLE = 1 //!< each training sample occupies a column of samples }; -//! @addtogroup ml_svm -//! @{ - /** @brief The structure represents the logarithmic grid range of statmodel parameters. It is used for optimizing statmodel accuracy by varying model parameters, the accuracy estimate being computed by cross-validation. -- member double ParamGrid::minVal -Minimum value of the statmodel parameter. -- member double ParamGrid::maxVal -Maximum value of the statmodel parameter. -- member double ParamGrid::logStep -Logarithmic step for iterating the statmodel parameter. -The grid determines the following iteration sequence of the statmodel parameter values: - -\f[(minVal, minVal*step, minVal*{step}^2, \dots, minVal*{logStep}^n),\f] - -where \f$n\f$ is the maximal index satisfying - -\f[\texttt{minVal} * \texttt{logStep} ^n < \texttt{maxVal}\f] - -The grid is logarithmic, so logStep must always be greater then 1. */ class CV_EXPORTS_W_MAP ParamGrid { public: - /** @brief The constructors. - - The full constructor initializes corresponding members. The default constructor creates a dummy - grid: - @code - ParamGrid::ParamGrid() - { - minVal = maxVal = 0; - logStep = 1; - } - @endcode - */ + /** @brief Default constructor */ ParamGrid(); + /** @brief Constructor with parameters */ ParamGrid(double _minVal, double _maxVal, double _logStep); - CV_PROP_RW double minVal; - CV_PROP_RW double maxVal; + CV_PROP_RW double minVal; //!< Minimum value of the statmodel parameter. Default value is 0. + CV_PROP_RW double maxVal; //!< Maximum value of the statmodel parameter. Default value is 0. + /** @brief Logarithmic step for iterating the statmodel parameter. + + The grid determines the following iteration sequence of the statmodel parameter values: + \f[(minVal, minVal*step, minVal*{step}^2, \dots, minVal*{logStep}^n),\f] + where \f$n\f$ is the maximal index satisfying + \f[\texttt{minVal} * \texttt{logStep} ^n < \texttt{maxVal}\f] + The grid is logarithmic, so logStep must always be greater then 1. Default value is 1. + */ CV_PROP_RW double logStep; }; -//! @} ml_svm - -//! @addtogroup ml_data -//! @{ - /** @brief Class encapsulating training data. Please note that the class only specifies the interface of training data, but not implementation. -All the statistical model classes in ml take Ptr\. In other words, you can create your -own class derived from TrainData and supply smart pointer to the instance of this class into -StatModel::train. +All the statistical model classes in _ml_ module accepts Ptr\ as parameter. In other +words, you can create your own class derived from TrainData and pass smart pointer to the instance +of this class into StatModel::train. + +@sa @ref ml_intro_data */ class CV_EXPORTS TrainData { @@ -614,14 +154,14 @@ public: /** @brief Returns matrix of train samples @param layout The requested layout. If it's different from the initial one, the matrix is - transposed. + transposed. See ml::SampleTypes. @param compressSamples if true, the function returns only the training samples (specified by - sampleIdx) + sampleIdx) @param compressVars if true, the function returns the shorter training samples, containing only - the active variables. + the active variables. - In current implementation the function tries to avoid physical data copying and returns the matrix - stored inside TrainData (unless the transposition or compression is needed). + In current implementation the function tries to avoid physical data copying and returns the + matrix stored inside TrainData (unless the transposition or compression is needed). */ virtual Mat getTrainSamples(int layout=ROW_SAMPLE, bool compressSamples=true, @@ -629,15 +169,15 @@ public: /** @brief Returns the vector of responses - The function returns ordered or the original categorical responses. Usually it's used in regression - algorithms. + The function returns ordered or the original categorical responses. Usually it's used in + regression algorithms. */ virtual Mat getTrainResponses() const = 0; /** @brief Returns the vector of normalized categorical responses - The function returns vector of responses. Each response is integer from 0 to \-1. The actual label value can be retrieved then from the class label vector, see + The function returns vector of responses. Each response is integer from `0` to `-1`. The actual label value can be retrieved then from the class label vector, see TrainData::getClassLabels. */ virtual Mat getTrainNormCatResponses() const = 0; @@ -668,14 +208,18 @@ public: virtual Mat getCatOfs() const = 0; virtual Mat getCatMap() const = 0; + /** @brief Splits the training data into the training and test parts + @sa TrainData::setTrainTestSplitRatio + */ virtual void setTrainTestSplit(int count, bool shuffle=true) = 0; /** @brief Splits the training data into the training and test parts - The function selects a subset of specified relative size and then returns it as the training set. If - the function is not called, all the data is used for training. Please, note that for each of - TrainData::getTrain\* there is corresponding TrainData::getTest\*, so that the test subset can be - retrieved and processed as well. + The function selects a subset of specified relative size and then returns it as the training + set. If the function is not called, all the data is used for training. Please, note that for + each of TrainData::getTrain\* there is corresponding TrainData::getTest\*, so that the test + subset can be retrieved and processed as well. + @sa TrainData::setTrainTestSplit */ virtual void setTrainTestSplitRatio(double ratio, bool shuffle=true) = 0; virtual void shuffleTrainTest() = 0; @@ -686,23 +230,28 @@ public: @param filename The input file name @param headerLineCount The number of lines in the beginning to skip; besides the header, the - function also skips empty lines and lines staring with '\#' - @param responseStartIdx Index of the first output variable. If -1, the function considers the last - variable as the response - @param responseEndIdx Index of the last output variable + 1. If -1, then there is single response - variable at responseStartIdx. - @param varTypeSpec The optional text string that specifies the variables' types. It has the format ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]. That is, variables from n1 to n2 (inclusive range), n3, n4 to n5 ... are considered ordered and n6, n7 to n8 ... are considered as categorical. The range [n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8] should cover all the variables. If varTypeSpec is not specified, then algorithm uses the following rules: - # all input variables are considered ordered by default. If some column contains has - non-numerical values, e.g. 'apple', 'pear', 'apple', 'apple', 'mango', the corresponding - variable is considered categorical. - # if there are several output variables, they are all considered as ordered. Error is - reported when non-numerical values are used. - # if there is a single output variable, then if its values are non-numerical or are all - integers, then it's considered categorical. Otherwise, it's considered ordered. + function also skips empty lines and lines staring with `#` + @param responseStartIdx Index of the first output variable. If -1, the function considers the + last variable as the response + @param responseEndIdx Index of the last output variable + 1. If -1, then there is single + response variable at responseStartIdx. + @param varTypeSpec The optional text string that specifies the variables' types. It has the + format `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables from `n1 to n2` + (inclusive range), `n3`, `n4 to n5` ... are considered ordered and `n6`, `n7 to n8` ... are + considered as categorical. The range `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` + should cover all the variables. If varTypeSpec is not specified, then algorithm uses the + following rules: + - all input variables are considered ordered by default. If some column contains has non- + numerical values, e.g. 'apple', 'pear', 'apple', 'apple', 'mango', the corresponding + variable is considered categorical. + - if there are several output variables, they are all considered as ordered. Error is + reported when non-numerical values are used. + - if there is a single output variable, then if its values are non-numerical or are all + integers, then it's considered categorical. Otherwise, it's considered ordered. @param delimiter The character used to separate values in each line. @param missch The character used to specify missing measurements. It should not be a digit. - Although it's a non-numerical value, it surely does not affect the decision of whether the - variable ordered or categorical. + Although it's a non-numerical value, it surely does not affect the decision of whether the + variable ordered or categorical. */ static Ptr loadFromCSV(const String& filename, int headerLineCount, @@ -711,76 +260,66 @@ public: const String& varTypeSpec=String(), char delimiter=',', char missch='?'); + /** @brief Creates training data from in-memory arrays. @param samples matrix of samples. It should have CV_32F type. - @param layout it's either ROW_SAMPLE, which means that each training sample is a row of samples, - or COL_SAMPLE, which means that each training sample occupies a column of samples. + @param layout see ml::SampleTypes. @param responses matrix of responses. If the responses are scalar, they should be stored as a - single row or as a single column. The matrix should have type CV_32F or CV_32S (in the former - case the responses are considered as ordered by default; in the latter case - as categorical) + single row or as a single column. The matrix should have type CV_32F or CV_32S (in the + former case the responses are considered as ordered by default; in the latter case - as + categorical) @param varIdx vector specifying which variables to use for training. It can be an integer vector - (CV_32S) containing 0-based variable indices or byte vector (CV_8U) containing a mask of active - variables. - @param sampleIdx vector specifying which samples to use for training. It can be an integer vector - (CV_32S) containing 0-based sample indices or byte vector (CV_8U) containing a mask of training - samples. + (CV_32S) containing 0-based variable indices or byte vector (CV_8U) containing a mask of + active variables. + @param sampleIdx vector specifying which samples to use for training. It can be an integer + vector (CV_32S) containing 0-based sample indices or byte vector (CV_8U) containing a mask + of training samples. @param sampleWeights optional vector with weights for each sample. It should have CV_32F type. - @param varType optional vector of type CV_8U and size \ + - \, containing types of each input and output variable. The - ordered variables are denoted by value VAR_ORDERED, and categorical - by VAR_CATEGORICAL. + @param varType optional vector of type CV_8U and size ` + + `, containing types of each input and output variable. See + ml::VariableTypes. */ static Ptr create(InputArray samples, int layout, InputArray responses, InputArray varIdx=noArray(), InputArray sampleIdx=noArray(), InputArray sampleWeights=noArray(), InputArray varType=noArray()); }; -//! @} ml_data - -//! @addtogroup ml_stat -//! @{ - /** @brief Base class for statistical models in OpenCV ML. */ class CV_EXPORTS_W StatModel : public Algorithm { public: - enum { UPDATE_MODEL = 1, RAW_OUTPUT=1, COMPRESSED_INPUT=2, PREPROCESSED_INPUT=4 }; + /** Predict options */ + enum Flags { + UPDATE_MODEL = 1, + RAW_OUTPUT=1, //!< makes the method return the raw results (the sum), not the class label + COMPRESSED_INPUT=2, + PREPROCESSED_INPUT=4 + }; virtual void clear(); - /** @brief Returns the number of variables in training samples - - The method must be overwritten in the derived classes. - */ + /** @brief Returns the number of variables in training samples */ virtual int getVarCount() const = 0; - /** @brief Returns true if the model is trained - - The method must be overwritten in the derived classes. - */ + /** @brief Returns true if the model is trained */ virtual bool isTrained() const = 0; - /** @brief Returns true if the model is classifier - - The method must be overwritten in the derived classes. - */ + /** @brief Returns true if the model is classifier */ virtual bool isClassifier() const = 0; /** @brief Trains the statistical model @param trainData training data that can be loaded from file using TrainData::loadFromCSV or - created with TrainData::create. + created with TrainData::create. @param flags optional flags, depending on the model. Some of the models can be updated with the - new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP). - - There are 2 instance methods and 2 static (class) template methods. The first two train the already - created model (the very first method must be overwritten in the derived classes). And the latter two - variants are convenience methods that construct empty model and then call its train method. + new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP). */ virtual bool train( const Ptr& trainData, int flags=0 ); - /** @overload + + /** @brief Trains the statistical model + @param samples training samples - @param layout ROW_SAMPLE (training samples are the matrix rows) or COL_SAMPLE (training samples - are the matrix columns) + @param layout See ml::SampleTypes. @param responses vector of responses associated with the training samples. */ virtual bool train( InputArray samples, int layout, InputArray responses ); @@ -789,14 +328,14 @@ public: @param data the training data @param test if true, the error is computed over the test subset of the data, otherwise it's - computed over the training subset of the data. Please note that if you loaded a completely - different dataset to evaluate already trained classifier, you will probably want not to set the - test subset at all with TrainData::setTrainTestSplitRatio and specify test=false, so that the - error is computed for the whole new set. Yes, this sounds a bit confusing. + computed over the training subset of the data. Please note that if you loaded a completely + different dataset to evaluate already trained classifier, you will probably want not to set + the test subset at all with TrainData::setTrainTestSplitRatio and specify test=false, so + that the error is computed for the whole new set. Yes, this sounds a bit confusing. @param resp the optional output responses. - The method uses StatModel::predict to compute the error. For regression models the error is computed - as RMS, for classifiers - as a percent of missclassified samples (0%-100%). + The method uses StatModel::predict to compute the error. For regression models the error is + computed as RMS, for classifiers - as a percent of missclassified samples (0%-100%). */ virtual float calcError( const Ptr& data, bool test, OutputArray resp ) const; @@ -804,20 +343,18 @@ public: @param samples The input samples, floating-point matrix @param results The optional output matrix of results. - @param flags The optional flags, model-dependent. Some models, such as Boost, SVM recognize - StatModel::RAW_OUTPUT flag, which makes the method return the raw results (the sum), not the - class label. + @param flags The optional flags, model-dependent. See cv::ml::StatModel::Flags. */ virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0; /** @brief Loads model from the file - This is static template method of StatModel. It's usage is following (in the case of SVM): : - - Ptr svm = StatModel::load("my_svm_model.xml"); - - In order to make this method work, the derived class must overwrite - Algorithm::read(const FileNode& fn). + This is static template method of StatModel. It's usage is following (in the case of SVM): + @code + Ptr svm = StatModel::load("my_svm_model.xml"); + @endcode + In order to make this method work, the derived class must overwrite Algorithm::read(const + FileNode& fn). */ template static Ptr<_Tp> load(const String& filename) { @@ -828,10 +365,13 @@ public: } /** @brief Loads model from a String + @param strModel The string variable containing the model you want to load. This is static template method of StatModel. It's usage is following (in the case of SVM): - Ptr svm = StatModel::loadFromString(myStringModel); + @code + Ptr svm = StatModel::loadFromString(myStringModel); + @endcode */ template static Ptr<_Tp> loadFromString(const String& strModel) { @@ -841,12 +381,30 @@ public: return model->isTrained() ? model : Ptr<_Tp>(); } + + /** @brief Creates new statistical model and trains it + + @param data training data that can be loaded from file using TrainData::loadFromCSV or + created with TrainData::create. + @param p model parameters + @param flags optional flags, depending on the model. Some of the models can be updated with the + new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP). + */ template static Ptr<_Tp> train(const Ptr& data, const typename _Tp::Params& p, int flags=0) { Ptr<_Tp> model = _Tp::create(p); return !model.empty() && model->train(data, flags) ? model : Ptr<_Tp>(); } + /** @brief Creates new statistical model and trains it + + @param samples training samples + @param layout See ml::SampleTypes. + @param responses vector of responses associated with the training samples. + @param p model parameters + @param flags optional flags, depending on the model. Some of the models can be updated with the + new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP). + */ template static Ptr<_Tp> train(InputArray samples, int layout, InputArray responses, const typename _Tp::Params& p, int flags=0) { @@ -863,16 +421,13 @@ public: virtual String getDefaultModelName() const = 0; }; -//! @} ml_stat - /****************************************************************************************\ * Normal Bayes Classifier * \****************************************************************************************/ -//! @addtogroup ml_bayes -//! @{ - /** @brief Bayes classifier for normally distributed data. + +@sa @ref ml_intro_bayes */ class CV_EXPORTS_W NormalBayesClassifier : public StatModel { @@ -884,10 +439,11 @@ public: }; /** @brief Predicts the response for sample(s). - The method estimates the most probable classes for input vectors. Input vectors (one or more) are - stored as rows of the matrix inputs. In case of multiple input vectors, there should be one output - vector outputs. The predicted class for a single input vector is returned by the method. The vector - outputProbs contains the output probabilities corresponding to each element of result. + The method estimates the most probable classes for input vectors. Input vectors (one or more) + are stored as rows of the matrix inputs. In case of multiple input vectors, there should be one + output vector outputs. The predicted class for a single input vector is returned by the method. + The vector outputProbs contains the output probabilities corresponding to each element of + result. */ virtual float predictProb( InputArray inputs, OutputArray outputs, OutputArray outputProbs, int flags=0 ) const = 0; @@ -897,33 +453,24 @@ public: /** @brief Creates empty model @param params The model parameters. There is none so far, the structure is used as a placeholder - for possible extensions. + for possible extensions. - Use StatModel::train to train the model, - StatModel::train\(traindata, params) to create and train the model, - StatModel::load\(filename) to load the pre-trained model. + Use StatModel::train to train the model: + @code + StatModel::train(traindata, params); // to create and train the model + StatModel::load(filename); // load the pre-trained model + @endcode */ static Ptr create(const Params& params=Params()); }; -//! @} ml_bayes - /****************************************************************************************\ * K-Nearest Neighbour Classifier * \****************************************************************************************/ -//! @addtogroup ml_knearest -//! @{ +/** @brief The class implements K-Nearest Neighbors model -/** @brief The class implements K-Nearest Neighbors model as described in the beginning of this section. - -@note - - (Python) An example of digit recognition using KNearest can be found at - opencv_source/samples/python2/digits.py - - (Python) An example of grid search digit recognition using KNearest can be found at - opencv_source/samples/python2/digits_adjust.py - - (Python) An example of video digit recognition using KNearest can be found at - opencv_source/samples/python2/digits_video.py +@sa @ref ml_intro_knn */ class CV_EXPORTS_W KNearest : public StatModel { @@ -931,12 +478,13 @@ public: class CV_EXPORTS_W_MAP Params { public: + /** @brief Constructor with parameters */ Params(int defaultK=10, bool isclassifier_=true, int Emax_=INT_MAX, int algorithmType_=BRUTE_FORCE); - CV_PROP_RW int defaultK; - CV_PROP_RW bool isclassifier; - CV_PROP_RW int Emax; // for implementation with KDTree - CV_PROP_RW int algorithmType; + CV_PROP_RW int defaultK; //!< default number of neighbors to use in predict method + CV_PROP_RW bool isclassifier; //!< whether classification or regression model should be trained + CV_PROP_RW int Emax; //!< for implementation with KDTree + CV_PROP_RW int algorithmType; //!< See KNearest::Types }; virtual void setParams(const Params& p) = 0; virtual Params getParams() const = 0; @@ -944,17 +492,17 @@ public: /** @brief Finds the neighbors and predicts responses for input vectors. @param samples Input samples stored by rows. It is a single-precision floating-point matrix of - \ \* k size. + ` * k` size. @param k Number of used nearest neighbors. Should be greater than 1. @param results Vector with results of prediction (regression or classification) for each input - sample. It is a single-precision floating-point vector with \ elements. - @param neighborResponses Optional output values for corresponding neighbors. It is a - single-precision floating-point matrix of \ \* k size. - @param dist Optional output distances from the input vectors to the corresponding neighbors. It is - a single-precision floating-point matrix of \ \* k size. - - For each input vector (a row of the matrix samples), the method finds the k nearest neighbors. In - case of regression, the predicted result is a mean value of the particular vector's neighbor + sample. It is a single-precision floating-point vector with `` elements. + @param neighborResponses Optional output values for corresponding neighbors. It is a single- + precision floating-point matrix of ` * k` size. + @param dist Optional output distances from the input vectors to the corresponding neighbors. It + is a single-precision floating-point matrix of ` * k` size. + + For each input vector (a row of the matrix samples), the method finds the k nearest neighbors. + In case of regression, the predicted result is a mean value of the particular vector's neighbor responses. In case of classification, the class is determined by voting. For each input vector, the neighbors are sorted by their distances to the vector. @@ -962,8 +510,8 @@ public: In case of C++ interface you can use output pointers to empty matrices and the function will allocate memory itself. - If only a single input vector is passed, all output matrices are optional and the predicted value is - returned by the method. + If only a single input vector is passed, all output matrices are optional and the predicted + value is returned by the method. The function is parallelized with the TBB library. */ @@ -972,122 +520,77 @@ public: OutputArray neighborResponses=noArray(), OutputArray dist=noArray() ) const = 0; - enum { BRUTE_FORCE=1, KDTREE=2 }; + enum Types { BRUTE_FORCE=1, KDTREE=2 }; /** @brief Creates the empty model - @param params The model parameters: default number of neighbors to use in predict method (in - KNearest::findNearest this number must be passed explicitly) and the flag on whether - classification or regression model should be trained. + @param params The model parameters - The static method creates empty KNearest classifier. It should be then trained using train method - (see StatModel::train). Alternatively, you can load boost model from file using - StatModel::load\(filename). + The static method creates empty %KNearest classifier. It should be then trained using train + method (see StatModel::train). Alternatively, you can load boost model from file using: + `StatModel::load(filename)` */ static Ptr create(const Params& params=Params()); }; -//! @} ml_knearest - /****************************************************************************************\ * Support Vector Machines * \****************************************************************************************/ -//! @addtogroup ml_svm -//! @{ - /** @brief Support Vector Machines. -@note - - (Python) An example of digit recognition using SVM can be found at - opencv_source/samples/python2/digits.py - - (Python) An example of grid search digit recognition using SVM can be found at - opencv_source/samples/python2/digits_adjust.py - - (Python) An example of video digit recognition using SVM can be found at - opencv_source/samples/python2/digits_video.py +@sa @ref ml_intro_svm */ class CV_EXPORTS_W SVM : public StatModel { public: - /** @brief SVM training parameters. + /** @brief %SVM training parameters. - The structure must be initialized and passed to the training method of SVM. + The structure must be initialized and passed to the training method of %SVM. */ class CV_EXPORTS_W_MAP Params { public: + /** @brief Default constructor */ Params(); - /** @brief The constructors - - @param svm_type Type of a SVM formulation. Possible values are: - - **SVM::C_SVC** C-Support Vector Classification. n-class classification (n \f$\geq\f$ 2), allows - imperfect separation of classes with penalty multiplier C for outliers. - - **SVM::NU_SVC** \f$\nu\f$-Support Vector Classification. n-class classification with possible - imperfect separation. Parameter \f$\nu\f$ (in the range 0..1, the larger the value, the smoother - the decision boundary) is used instead of C. - - **SVM::ONE_CLASS** Distribution Estimation (One-class SVM). All the training data are from - the same class, SVM builds a boundary that separates the class from the rest of the feature - space. - - **SVM::EPS_SVR** \f$\epsilon\f$-Support Vector Regression. The distance between feature vectors - from the training set and the fitting hyper-plane must be less than p. For outliers the - penalty multiplier C is used. - - **SVM::NU_SVR** \f$\nu\f$-Support Vector Regression. \f$\nu\f$ is used instead of p. - See @cite LibSVM for details. - @param kernel_type Type of a SVM kernel. Possible values are: - - **SVM::LINEAR** Linear kernel. No mapping is done, linear discrimination (or regression) is - done in the original feature space. It is the fastest option. \f$K(x_i, x_j) = x_i^T x_j\f$. - - **SVM::POLY** Polynomial kernel: - \f$K(x_i, x_j) = (\gamma x_i^T x_j + coef0)^{degree}, \gamma > 0\f$. - - **SVM::RBF** Radial basis function (RBF), a good choice in most cases. - \f$K(x_i, x_j) = e^{-\gamma ||x_i - x_j||^2}, \gamma > 0\f$. - - **SVM::SIGMOID** Sigmoid kernel: \f$K(x_i, x_j) = \tanh(\gamma x_i^T x_j + coef0)\f$. - - **SVM::CHI2** Exponential Chi2 kernel, similar to the RBF kernel: - \f$K(x_i, x_j) = e^{-\gamma \chi^2(x_i,x_j)}, \chi^2(x_i,x_j) = (x_i-x_j)^2/(x_i+x_j), \gamma > 0\f$. - - **SVM::INTER** Histogram intersection kernel. A fast kernel. \f$K(x_i, x_j) = min(x_i,x_j)\f$. - @param degree Parameter degree of a kernel function (POLY). - @param gamma Parameter \f$\gamma\f$ of a kernel function (POLY / RBF / SIGMOID / CHI2). - @param coef0 Parameter coef0 of a kernel function (POLY / SIGMOID). - @param Cvalue Parameter C of a SVM optimization problem (C_SVC / EPS_SVR / NU_SVR). - @param nu Parameter \f$\nu\f$ of a SVM optimization problem (NU_SVC / ONE_CLASS / NU_SVR). - @param p Parameter \f$\epsilon\f$ of a SVM optimization problem (EPS_SVR). - @param classWeights Optional weights in the C_SVC problem , assigned to particular classes. They - are multiplied by C so the parameter C of class \#i becomes classWeights(i) \* C. Thus these - weights affect the misclassification penalty for different classes. The larger weight, the larger - penalty on misclassification of data from the corresponding class. - @param termCrit Termination criteria of the iterative SVM training procedure which solves a - partial case of constrained quadratic optimization problem. You can specify tolerance and/or the - maximum number of iterations. - - The default constructor initialize the structure with following values: - @code - SVMParams::SVMParams() : - svmType(SVM::C_SVC), kernelType(SVM::RBF), degree(0), - gamma(1), coef0(0), C(1), nu(0), p(0), classWeights(0) - { - termCrit = TermCriteria( TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, FLT_EPSILON ); - } - @endcode - A comparison of different kernels on the following 2D test case with four classes. Four C_SVC SVMs - have been trained (one against rest) with auto_train. Evaluation on three different kernels (CHI2, - INTER, RBF). The color depicts the class with max score. Bright means max-score \> 0, dark means - max-score \< 0. - - ![image](pics/SVM_Comparison.png) - */ + /** @brief Constructor with parameters */ Params( int svm_type, int kernel_type, double degree, double gamma, double coef0, double Cvalue, double nu, double p, const Mat& classWeights, TermCriteria termCrit ); + /** Type of a %SVM formulation. See SVM::Types. Default value is SVM::C_SVC. */ CV_PROP_RW int svmType; + /** Type of a %SVM kernel. See SVM::KernelTypes. Default value is SVM::RBF. */ CV_PROP_RW int kernelType; - CV_PROP_RW double gamma, coef0, degree; - - CV_PROP_RW double C; // for CV_SVM_C_SVC, CV_SVM_EPS_SVR and CV_SVM_NU_SVR - CV_PROP_RW double nu; // for CV_SVM_NU_SVC, CV_SVM_ONE_CLASS, and CV_SVM_NU_SVR - CV_PROP_RW double p; // for CV_SVM_EPS_SVR - CV_PROP_RW Mat classWeights; // for CV_SVM_C_SVC - CV_PROP_RW TermCriteria termCrit; // termination criteria + /** Parameter \f$\gamma\f$ of a kernel function (SVM::POLY / SVM::RBF / SVM::SIGMOID / + SVM::CHI2). Default value is 1. */ + CV_PROP_RW double gamma; + /** Parameter coef0 of a kernel function (SVM::POLY / SVM::SIGMOID). Default value is 0. */ + CV_PROP_RW double coef0; + /** Parameter degree of a kernel function (SVM::POLY). Default value is 0. */ + CV_PROP_RW double degree; + + /** Parameter C of a %SVM optimization problem (SVM::C_SVC / SVM::EPS_SVR / SVM::NU_SVR). + Default value is 0. */ + CV_PROP_RW double C; + /** Parameter \f$\nu\f$ of a %SVM optimization problem (SVM::NU_SVC / SVM::ONE_CLASS / + SVM::NU_SVR). Default value is 0. */ + CV_PROP_RW double nu; + /** Parameter \f$\epsilon\f$ of a %SVM optimization problem (SVM::EPS_SVR). Default value is 0. */ + CV_PROP_RW double p; + + /** Optional weights in the SVM::C_SVC problem , assigned to particular classes. They are + multiplied by C so the parameter C of class \#i becomes classWeights(i) \* C. Thus these + weights affect the misclassification penalty for different classes. The larger weight, the + larger penalty on misclassification of data from the corresponding class. Default value is + empty Mat.*/ + CV_PROP_RW Mat classWeights; + /** Termination criteria of the iterative %SVM training procedure which solves a partial + case of constrained quadratic optimization problem. You can specify tolerance and/or the + maximum number of iterations. Default value is TermCriteria( + TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, FLT_EPSILON );*/ + CV_PROP_RW TermCriteria termCrit; }; class CV_EXPORTS Kernel : public Algorithm @@ -1097,49 +600,99 @@ public: virtual void calc( int vcount, int n, const float* vecs, const float* another, float* results ) = 0; }; - // SVM type - enum { C_SVC=100, NU_SVC=101, ONE_CLASS=102, EPS_SVR=103, NU_SVR=104 }; + //! %SVM type + enum Types { + /** C-Support Vector Classification. n-class classification (n \f$\geq\f$ 2), allows + imperfect separation of classes with penalty multiplier C for outliers. */ + C_SVC=100, + /** \f$\nu\f$-Support Vector Classification. n-class classification with possible + imperfect separation. Parameter \f$\nu\f$ (in the range 0..1, the larger the value, the smoother + the decision boundary) is used instead of C. */ + NU_SVC=101, + /** Distribution Estimation (One-class %SVM). All the training data are from + the same class, %SVM builds a boundary that separates the class from the rest of the feature + space. */ + ONE_CLASS=102, + /** \f$\epsilon\f$-Support Vector Regression. The distance between feature vectors + from the training set and the fitting hyper-plane must be less than p. For outliers the + penalty multiplier C is used. */ + EPS_SVR=103, + /** \f$\nu\f$-Support Vector Regression. \f$\nu\f$ is used instead of p. + See @cite LibSVM for details. */ + NU_SVR=104 + }; + + /** @brief %SVM kernel type - // SVM kernel type - enum { CUSTOM=-1, LINEAR=0, POLY=1, RBF=2, SIGMOID=3, CHI2=4, INTER=5 }; + A comparison of different kernels on the following 2D test case with four classes. Four + SVM::C_SVC SVMs have been trained (one against rest) with auto_train. Evaluation on three + different kernels (SVM::CHI2, SVM::INTER, SVM::RBF). The color depicts the class with max score. + Bright means max-score \> 0, dark means max-score \< 0. + ![image](pics/SVM_Comparison.png) + */ + enum KernelTypes { + CUSTOM=-1, + /** Linear kernel. No mapping is done, linear discrimination (or regression) is + done in the original feature space. It is the fastest option. \f$K(x_i, x_j) = x_i^T x_j\f$. */ + LINEAR=0, + /** Polynomial kernel: + \f$K(x_i, x_j) = (\gamma x_i^T x_j + coef0)^{degree}, \gamma > 0\f$. */ + POLY=1, + /** Radial basis function (RBF), a good choice in most cases. + \f$K(x_i, x_j) = e^{-\gamma ||x_i - x_j||^2}, \gamma > 0\f$. */ + RBF=2, + /** Sigmoid kernel: \f$K(x_i, x_j) = \tanh(\gamma x_i^T x_j + coef0)\f$. */ + SIGMOID=3, + /** Exponential Chi2 kernel, similar to the RBF kernel: + \f$K(x_i, x_j) = e^{-\gamma \chi^2(x_i,x_j)}, \chi^2(x_i,x_j) = (x_i-x_j)^2/(x_i+x_j), \gamma > 0\f$. */ + CHI2=4, + /** Histogram intersection kernel. A fast kernel. \f$K(x_i, x_j) = min(x_i,x_j)\f$. */ + INTER=5 + }; - // SVM params type - enum { C=0, GAMMA=1, P=2, NU=3, COEF=4, DEGREE=5 }; + //! %SVM params type + enum ParamTypes { + C=0, + GAMMA=1, + P=2, + NU=3, + COEF=4, + DEGREE=5 + }; - /** @brief Trains an SVM with optimal parameters. + /** @brief Trains an %SVM with optimal parameters. @param data the training data that can be constructed using TrainData::create or - TrainData::loadFromCSV. + TrainData::loadFromCSV. @param kFold Cross-validation parameter. The training set is divided into kFold subsets. One - subset is used to test the model, the others form the train set. So, the SVM algorithm is executed - kFold times. - @param Cgrid - @param gammaGrid - @param pGrid - @param nuGrid - @param coeffGrid - @param degreeGrid Iteration grid for the corresponding SVM parameter. + subset is used to test the model, the others form the train set. So, the %SVM algorithm is + executed kFold times. + @param Cgrid grid for C + @param gammaGrid grid for gamma + @param pGrid grid for p + @param nuGrid grid for nu + @param coeffGrid grid for coeff + @param degreeGrid grid for degree @param balanced If true and the problem is 2-class classification then the method creates more - balanced cross-validation subsets that is proportions between classes in subsets are close to such - proportion in the whole train dataset. - - The method trains the SVM model automatically by choosing the optimal parameters C, gamma, p, nu, - coef0, degree from SVM::Params. Parameters are considered optimal when the cross-validation estimate - of the test set error is minimal. - - If there is no need to optimize a parameter, the corresponding grid step should be set to any value - less than or equal to 1. For example, to avoid optimization in gamma, set gammaGrid.step = 0, - gammaGrid.minVal, gamma_grid.maxVal as arbitrary numbers. In this case, the value params.gamma is - taken for gamma. - - And, finally, if the optimization in a parameter is required but the corresponding grid is unknown, - you may call the function SVM::getDefaulltGrid. To generate a grid, for example, for gamma, call - SVM::getDefaulltGrid(SVM::GAMMA). - - This function works for the classification (params.svmType=SVM::C_SVC or - params.svmType=SVM::NU_SVC) as well as for the regression (params.svmType=SVM::EPS_SVR or - params.svmType=SVM::NU_SVR). If params.svmType=SVM::ONE_CLASS, no optimization is made and the - usual SVM with parameters specified in params is executed. + balanced cross-validation subsets that is proportions between classes in subsets are close + to such proportion in the whole train dataset. + + The method trains the %SVM model automatically by choosing the optimal parameters C, gamma, p, + nu, coef0, degree from SVM::Params. Parameters are considered optimal when the cross-validation + estimate of the test set error is minimal. + + If there is no need to optimize a parameter, the corresponding grid step should be set to any + value less than or equal to 1. For example, to avoid optimization in gamma, set `gammaGrid.step + = 0`, `gammaGrid.minVal`, `gamma_grid.maxVal` as arbitrary numbers. In this case, the value + `params.gamma` is taken for gamma. + + And, finally, if the optimization in a parameter is required but the corresponding grid is + unknown, you may call the function SVM::getDefaultGrid. To generate a grid, for example, for + gamma, call `SVM::getDefaultGrid(SVM::GAMMA)`. + + This function works for the classification (SVM::C_SVC or SVM::NU_SVC) as well as for the + regression (SVM::EPS_SVR or SVM::NU_SVR). If it is SVM::ONE_CLASS, no optimization is made and + the usual %SVM with parameters specified in params is executed. */ virtual bool trainAuto( const Ptr& data, int kFold = 10, ParamGrid Cgrid = SVM::getDefaultGrid(SVM::C), @@ -1152,14 +705,14 @@ public: /** @brief Retrieves all the support vectors - The method returns all the support vector as floating-point matrix, where support vectors are stored - as matrix rows. + The method returns all the support vector as floating-point matrix, where support vectors are + stored as matrix rows. */ CV_WRAP virtual Mat getSupportVectors() const = 0; virtual void setParams(const Params& p, const Ptr& customKernel=Ptr()) = 0; - /** @brief Returns the current SVM parameters. + /** @brief Returns the current %SVM parameters. This function may be used to get the optimal parameters obtained while automatically training SVM::trainAuto. @@ -1170,101 +723,102 @@ public: /** @brief Retrieves the decision function @param i the index of the decision function. If the problem solved is regression, 1-class or - 2-class classification, then there will be just one decision function and the index should always - be 0. Otherwise, in the case of N-class classification, there will be N\*(N-1)/2 decision - functions. + 2-class classification, then there will be just one decision function and the index should + always be 0. Otherwise, in the case of N-class classification, there will be \f$N(N-1)/2\f$ + decision functions. @param alpha the optional output vector for weights, corresponding to different support vectors. - In the case of linear SVM all the alpha's will be 1's. - @param svidx the optional output vector of indices of support vectors within the matrix of support - vectors (which can be retrieved by SVM::getSupportVectors). In the case of linear SVM each - decision function consists of a single "compressed" support vector. + In the case of linear %SVM all the alpha's will be 1's. + @param svidx the optional output vector of indices of support vectors within the matrix of + support vectors (which can be retrieved by SVM::getSupportVectors). In the case of linear + %SVM each decision function consists of a single "compressed" support vector. - The method returns rho parameter of the decision function, a scalar subtracted from the weighted sum - of kernel responses. + The method returns rho parameter of the decision function, a scalar subtracted from the weighted + sum of kernel responses. */ virtual double getDecisionFunction(int i, OutputArray alpha, OutputArray svidx) const = 0; - /** @brief Generates a grid for SVM parameters. + /** @brief Generates a grid for %SVM parameters. - @param param_id SVM parameters IDs that must be one of the following: - - **SVM::C** - - **SVM::GAMMA** - - **SVM::P** - - **SVM::NU** - - **SVM::COEF** - - **SVM::DEGREE** - The grid is generated for the parameter with this ID. + @param param_id %SVM parameters IDs that must be one of the SVM::ParamTypes. The grid is + generated for the parameter with this ID. - The function generates a grid for the specified parameter of the SVM algorithm. The grid may be + The function generates a grid for the specified parameter of the %SVM algorithm. The grid may be passed to the function SVM::trainAuto. */ static ParamGrid getDefaultGrid( int param_id ); /** @brief Creates empty model - @param p SVM parameters + @param p %SVM parameters @param customKernel the optional custom kernel to use. It must implement SVM::Kernel interface. - Use StatModel::train to train the model, StatModel::train\(traindata, params) to create and - train the model, StatModel::load\(filename) to load the pre-trained model. Since SVM has - several parameters, you may want to find the best parameters for your problem. It can be done with - SVM::trainAuto. + Use StatModel::train to train the model: + @code + StatModel::train(traindata, params); // to create and train the model + // or + StatModel::load(filename); // to load the pre-trained model. + @endcode + Since %SVM has several parameters, you may want to find the best parameters for your problem. It + can be done with SVM::trainAuto. */ static Ptr create(const Params& p=Params(), const Ptr& customKernel=Ptr()); }; -//! @} ml_svm - /****************************************************************************************\ * Expectation - Maximization * \****************************************************************************************/ -//! @addtogroup ml_em -//! @{ +/** @brief The class implements the Expectation Maximization algorithm. -/** @brief The class implements the EM algorithm as described in the beginning of this section. +@sa @ref ml_intro_em */ class CV_EXPORTS_W EM : public StatModel { public: - // Type of covariation matrices - enum {COV_MAT_SPHERICAL=0, COV_MAT_DIAGONAL=1, COV_MAT_GENERIC=2, COV_MAT_DEFAULT=COV_MAT_DIAGONAL}; + //! Type of covariation matrices + enum Types { + /** A scaled identity matrix \f$\mu_k * I\f$. There is the only + parameter \f$\mu_k\f$ to be estimated for each matrix. The option may be used in special cases, + when the constraint is relevant, or as a first step in the optimization (for example in case + when the data is preprocessed with PCA). The results of such preliminary estimation may be + passed again to the optimization procedure, this time with + covMatType=EM::COV_MAT_DIAGONAL. */ + COV_MAT_SPHERICAL=0, + /** A diagonal matrix with positive diagonal elements. The number of + free parameters is d for each matrix. This is most commonly used option yielding good + estimation results. */ + COV_MAT_DIAGONAL=1, + /** A symmetric positively defined matrix. The number of free + parameters in each matrix is about \f$d^2/2\f$. It is not recommended to use this option, unless + there is pretty accurate initial estimation of the parameters and/or a huge number of + training samples. */ + COV_MAT_GENERIC=2, + COV_MAT_DEFAULT=COV_MAT_DIAGONAL + }; - // Default parameters + //! Default parameters enum {DEFAULT_NCLUSTERS=5, DEFAULT_MAX_ITERS=100}; - // The initial step + //! The initial step enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0}; - /** @brief The class describes EM training parameters. + /** @brief The class describes %EM training parameters. */ class CV_EXPORTS_W_MAP Params { public: /** @brief The constructor - @param nclusters The number of mixture components in the Gaussian mixture model. Default value of - the parameter is EM::DEFAULT_NCLUSTERS=5. Some of EM implementation could determine the optimal - number of mixtures within a specified value range, but that is not the case in ML yet. - @param covMatType Constraint on covariance matrices which defines type of matrices. Possible - values are: - - **EM::COV_MAT_SPHERICAL** A scaled identity matrix \f$\mu_k * I\f$. There is the only - parameter \f$\mu_k\f$ to be estimated for each matrix. The option may be used in special cases, - when the constraint is relevant, or as a first step in the optimization (for example in case - when the data is preprocessed with PCA). The results of such preliminary estimation may be - passed again to the optimization procedure, this time with - covMatType=EM::COV_MAT_DIAGONAL. - - **EM::COV_MAT_DIAGONAL** A diagonal matrix with positive diagonal elements. The number of - free parameters is d for each matrix. This is most commonly used option yielding good - estimation results. - - **EM::COV_MAT_GENERIC** A symmetric positively defined matrix. The number of free - parameters in each matrix is about \f$d^2/2\f$. It is not recommended to use this option, unless - there is pretty accurate initial estimation of the parameters and/or a huge number of - training samples. - @param termCrit The termination criteria of the EM algorithm. The EM algorithm can be terminated - by the number of iterations termCrit.maxCount (number of M-steps) or when relative change of - likelihood logarithm is less than termCrit.epsilon. Default maximum number of iterations is - EM::DEFAULT_MAX_ITERS=100. + @param nclusters The number of mixture components in the Gaussian mixture model. Default + value of the parameter is EM::DEFAULT_NCLUSTERS=5. Some of %EM implementation could + determine the optimal number of mixtures within a specified value range, but that is not + the case in ML yet. + @param covMatType Constraint on covariance matrices which defines type of matrices. See + EM::Types. + @param termCrit The termination criteria of the %EM algorithm. The %EM algorithm can be + terminated by the number of iterations termCrit.maxCount (number of M-steps) or when + relative change of likelihood logarithm is less than termCrit.epsilon. Default maximum + number of iterations is EM::DEFAULT_MAX_ITERS=100. */ explicit Params(int nclusters=DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL, const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, @@ -1283,62 +837,61 @@ public: virtual Mat getWeights() const = 0; /** @brief Returns the cluster centers (means of the Gaussian mixture) - Returns matrix with the number of rows equal to the number of mixtures and number of columns equal - to the space dimensionality. + Returns matrix with the number of rows equal to the number of mixtures and number of columns + equal to the space dimensionality. */ virtual Mat getMeans() const = 0; /** @brief Returns covariation matrices - Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures, each - matrix is a square floating-point matrix NxN, where N is the space dimensionality. + Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures, + each matrix is a square floating-point matrix NxN, where N is the space dimensionality. */ virtual void getCovs(std::vector& covs) const = 0; - /** @brief Returns a likelihood logarithm value and an index of the most probable mixture component for the - given sample. + /** @brief Returns a likelihood logarithm value and an index of the most probable mixture component + for the given sample. - @param sample A sample for classification. It should be a one-channel matrix of \f$1 \times dims\f$ or - \f$dims \times 1\f$ size. - @param probs Optional output matrix that contains posterior probabilities of each component given - the sample. It has \f$1 \times nclusters\f$ size and CV_64FC1 type. + @param sample A sample for classification. It should be a one-channel matrix of + \f$1 \times dims\f$ or \f$dims \times 1\f$ size. + @param probs Optional output matrix that contains posterior probabilities of each component + given the sample. It has \f$1 \times nclusters\f$ size and CV_64FC1 type. - The method returns a two-element double vector. Zero element is a likelihood logarithm value for the - sample. First element is an index of the most probable mixture component for the given sample. + The method returns a two-element double vector. Zero element is a likelihood logarithm value for + the sample. First element is an index of the most probable mixture component for the given + sample. */ CV_WRAP virtual Vec2d predict2(InputArray sample, OutputArray probs) const = 0; virtual bool train( const Ptr& trainData, int flags=0 ) = 0; - /** @brief Static methods that estimate the Gaussian mixture parameters from a samples set + /** @brief Static method that estimate the Gaussian mixture parameters from a samples set + + This variation starts with Expectation step. Initial values of the model parameters will be + estimated by the k-means algorithm. + + Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take + responses (class labels or function values) as input. Instead, it computes the *Maximum + Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the + parameters inside the structure: \f$p_{i,k}\f$ in probs, \f$a_k\f$ in means , \f$S_k\f$ in + covs[k], \f$\pi_k\f$ in weights , and optionally computes the output "class label" for each + sample: \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most + probable mixture component for each sample). + + The trained model can be used further for prediction, just like any other classifier. The + trained model is similar to the NormalBayesClassifier. @param samples Samples from which the Gaussian mixture model will be estimated. It should be a - one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type it - will be converted to the inner matrix of such type for the further computing. + one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type + it will be converted to the inner matrix of such type for the further computing. @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for - each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. + each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. @param labels The optional output "class label" for each sample: - \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable mixture - component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. + \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable + mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. @param probs The optional output matrix that contains posterior probabilities of each Gaussian - mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and CV_64FC1 - type. + mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and + CV_64FC1 type. @param params The Gaussian mixture params, see EM::Params description - @return true if the Gaussian mixture model was trained successfully, otherwise it returns - false. - - Starts with Expectation step. Initial values of the model parameters will be estimated by the - k-means algorithm. - - Unlike many of the ML models, EM is an unsupervised learning algorithm and it does not take - responses (class labels or function values) as input. Instead, it computes the *Maximum Likelihood - Estimate* of the Gaussian mixture parameters from an input sample set, stores all the parameters - inside the structure: \f$p_{i,k}\f$ in probs, \f$a_k\f$ in means , \f$S_k\f$ in covs[k], \f$\pi_k\f$ in weights , - and optionally computes the output "class label" for each sample: - \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable mixture - component for each sample). - - The trained model can be used further for prediction, just like any other classifier. The trained - model is similar to the NormalBayesClassifier. */ static Ptr train(InputArray samples, OutputArray logLikelihoods=noArray(), @@ -1346,30 +899,32 @@ public: OutputArray probs=noArray(), const Params& params=Params()); - /** Starts with Expectation step. You need to provide initial means \f$a_k\f$ of mixture - components. Optionally you can pass initial weights \f$\pi_k\f$ and covariance matrices + /** @brief Static method that estimate the Gaussian mixture parameters from a samples set + + This variation starts with Expectation step. You need to provide initial means \f$a_k\f$ of + mixture components. Optionally you can pass initial weights \f$\pi_k\f$ and covariance matrices \f$S_k\f$ of mixture components. @param samples Samples from which the Gaussian mixture model will be estimated. It should be a - one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type it - will be converted to the inner matrix of such type for the further computing. + one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type + it will be converted to the inner matrix of such type for the further computing. @param means0 Initial means \f$a_k\f$ of mixture components. It is a one-channel matrix of - \f$nclusters \times dims\f$ size. If the matrix does not have CV_64F type it will be converted to the - inner matrix of such type for the further computing. + \f$nclusters \times dims\f$ size. If the matrix does not have CV_64F type it will be + converted to the inner matrix of such type for the further computing. @param covs0 The vector of initial covariance matrices \f$S_k\f$ of mixture components. Each of - covariance matrices is a one-channel matrix of \f$dims \times dims\f$ size. If the matrices do not - have CV_64F type they will be converted to the inner matrices of such type for the further - computing. + covariance matrices is a one-channel matrix of \f$dims \times dims\f$ size. If the matrices + do not have CV_64F type they will be converted to the inner matrices of such type for the + further computing. @param weights0 Initial weights \f$\pi_k\f$ of mixture components. It should be a one-channel - floating-point matrix with \f$1 \times nclusters\f$ or \f$nclusters \times 1\f$ size. + floating-point matrix with \f$1 \times nclusters\f$ or \f$nclusters \times 1\f$ size. @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for - each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. + each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. @param labels The optional output "class label" for each sample: - \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable mixture - component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. + \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable + mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. @param probs The optional output matrix that contains posterior probabilities of each Gaussian - mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and CV_64FC1 - type. + mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and + CV_64FC1 type. @param params The Gaussian mixture params, see EM::Params description */ static Ptr train_startWithE(InputArray samples, InputArray means0, @@ -1380,21 +935,23 @@ public: OutputArray probs=noArray(), const Params& params=Params()); - /** Starts with Maximization step. You need to provide initial probabilities \f$p_{i,k}\f$ to - use this option. + /** @brief Static method that estimate the Gaussian mixture parameters from a samples set + + This variation starts with Maximization step. You need to provide initial probabilities + \f$p_{i,k}\f$ to use this option. @param samples Samples from which the Gaussian mixture model will be estimated. It should be a - one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type it - will be converted to the inner matrix of such type for the further computing. + one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type + it will be converted to the inner matrix of such type for the further computing. @param probs0 @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for - each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. + each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. @param labels The optional output "class label" for each sample: - \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable mixture - component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. + \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable + mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. @param probs The optional output matrix that contains posterior probabilities of each Gaussian - mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and CV_64FC1 - type. + mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and + CV_64FC1 type. @param params The Gaussian mixture params, see EM::Params description */ static Ptr train_startWithM(InputArray samples, InputArray probs0, @@ -1403,9 +960,9 @@ public: OutputArray probs=noArray(), const Params& params=Params()); - /** @brief Creates empty EM model + /** @brief Creates empty %EM model - @param params EM parameters + @param params %EM parameters The model should be trained then using StatModel::train(traindata, flags) method. Alternatively, you can use one of the EM::train\* methods or load it from file using StatModel::load\(filename). @@ -1413,194 +970,154 @@ public: static Ptr create(const Params& params=Params()); }; -//! @} ml_em - /****************************************************************************************\ * Decision Tree * \****************************************************************************************/ -//! @addtogroup ml_decsiontrees -//! @{ +/** @brief The class represents a single decision tree or a collection of decision trees. -/** @brief The class represents a single decision tree or a collection of decision trees. The current public -interface of the class allows user to train only a single decision tree, however the class is -capable of storing multiple decision trees and using them for prediction (by summing responses or -using a voting schemes), and the derived from DTrees classes (such as RTrees and Boost) use this -capability to implement decision tree ensembles. - */ +The current public interface of the class allows user to train only a single decision tree, however +the class is capable of storing multiple decision trees and using them for prediction (by summing +responses or using a voting schemes), and the derived from DTrees classes (such as RTrees and Boost) +use this capability to implement decision tree ensembles. + +@sa @ref ml_intro_trees +*/ class CV_EXPORTS_W DTrees : public StatModel { public: - enum { PREDICT_AUTO=0, PREDICT_SUM=(1<<8), PREDICT_MAX_VOTE=(2<<8), PREDICT_MASK=(3<<8) }; + /** Predict options */ + enum Flags { PREDICT_AUTO=0, PREDICT_SUM=(1<<8), PREDICT_MAX_VOTE=(2<<8), PREDICT_MASK=(3<<8) }; - /** @brief The structure contains all the decision tree training parameters. You can initialize it by default - constructor and then override any parameters directly before training, or the structure may be fully - initialized using the advanced variant of the constructor. + /** @brief The structure contains all the decision tree training parameters. + + You can initialize it by default constructor and then override any parameters directly before + training, or the structure may be fully initialized using the advanced variant of the + constructor. */ class CV_EXPORTS_W_MAP Params { public: + /** @brief Default constructor. */ Params(); - /** @brief The constructors - - @param maxDepth The maximum possible depth of the tree. That is the training algorithms attempts - to split a node while its depth is less than maxDepth. The root node has zero depth. The actual - depth may be smaller if the other termination criteria are met (see the outline of the training - procedure in the beginning of the section), and/or if the tree is pruned. - @param minSampleCount If the number of samples in a node is less than this parameter then the node - will not be split. - @param regressionAccuracy Termination criteria for regression trees. If all absolute differences - between an estimated value in a node and values of train samples in this node are less than this - parameter then the node will not be split further. - @param useSurrogates If true then surrogate splits will be built. These splits allow to work with - missing data and compute variable importance correctly. - - @note currently it's not implemented. - - @param maxCategories Cluster possible values of a categorical variable into K\<=maxCategories - clusters to find a suboptimal split. If a discrete variable, on which the training procedure - tries to make a split, takes more than maxCategories values, the precise best subset estimation - may take a very long time because the algorithm is exponential. Instead, many decision trees - engines (including our implementation) try to find sub-optimal split in this case by clustering - all the samples into maxCategories clusters that is some categories are merged together. The - clustering is applied only in n \> 2-class classification problems for categorical variables - with N \> max_categories possible values. In case of regression and 2-class classification the - optimal split can be found efficiently without employing clustering, thus the parameter is not - used in these cases. - - @param CVFolds If CVFolds \> 1 then algorithms prunes the built decision tree using K-fold - cross-validation procedure where K is equal to CVFolds. - - @param use1SERule If true then a pruning will be harsher. This will make a tree more compact and - more resistant to the training data noise but a bit less accurate. - - @param truncatePrunedTree If true then pruned branches are physically removed from the tree. - Otherwise they are retained and it is possible to get results from the original unpruned (or - pruned less aggressively) tree. - - @param priors The array of a priori class probabilities, sorted by the class label value. The - parameter can be used to tune the decision tree preferences toward a certain class. For example, - if you want to detect some rare anomaly occurrence, the training base will likely contain much - more normal cases than anomalies, so a very good classification performance will be achieved - just by considering every case as normal. To avoid this, the priors can be specified, where the - anomaly probability is artificially increased (up to 0.5 or even greater), so the weight of the - misclassified anomalies becomes much bigger, and the tree is adjusted properly. You can also - think about this parameter as weights of prediction categories which determine relative weights - that you give to misclassification. That is, if the weight of the first category is 1 and the - weight of the second category is 10, then each mistake in predicting the second category is - equivalent to making 10 mistakes in predicting the first category. - - The default constructor initializes all the parameters with the default values tuned for the - standalone classification tree: - @code - DTrees::Params::Params() - { - maxDepth = INT_MAX; - minSampleCount = 10; - regressionAccuracy = 0.01f; - useSurrogates = false; - maxCategories = 10; - CVFolds = 10; - use1SERule = true; - truncatePrunedTree = true; - priors = Mat(); - } - @endcode - */ + /** @brief Constructor with parameters */ Params( int maxDepth, int minSampleCount, double regressionAccuracy, bool useSurrogates, int maxCategories, int CVFolds, bool use1SERule, bool truncatePrunedTree, const Mat& priors ); + /** @brief Cluster possible values of a categorical variable into K\<=maxCategories clusters + to find a suboptimal split. + + If a discrete variable, on which the training procedure tries to make a split, takes more + than maxCategories values, the precise best subset estimation may take a very long time + because the algorithm is exponential. Instead, many decision trees engines (including our + implementation) try to find sub-optimal split in this case by clustering all the samples + into maxCategories clusters that is some categories are merged together. The clustering is + applied only in n \> 2-class classification problems for categorical variables with N \> + max_categories possible values. In case of regression and 2-class classification the optimal + split can be found efficiently without employing clustering, thus the parameter is not used + in these cases. Default value is 10.*/ CV_PROP_RW int maxCategories; + /** @brief The maximum possible depth of the tree. + + That is the training algorithms attempts to split a node while its depth is less than + maxDepth. The root node has zero depth. The actual depth may be smaller if the other + termination criteria are met (see the outline of the training procedure @ref ml_intro_trees + "here"), and/or if the tree is pruned. Default value is INT_MAX.*/ CV_PROP_RW int maxDepth; + /** If the number of samples in a node is less than this parameter then the node will not be + split. Default value is 10.*/ CV_PROP_RW int minSampleCount; + /** If CVFolds \> 1 then algorithms prunes the built decision tree using K-fold + cross-validation procedure where K is equal to CVFolds. Default value is 10.*/ CV_PROP_RW int CVFolds; + /** @brief If true then surrogate splits will be built. + + These splits allow to work with missing data and compute variable importance correctly. + @note currently it's not implemented. Default value is false.*/ CV_PROP_RW bool useSurrogates; + /** If true then a pruning will be harsher. This will make a tree more compact and more + resistant to the training data noise but a bit less accurate. Default value is true.*/ CV_PROP_RW bool use1SERule; + /** If true then pruned branches are physically removed from the tree. Otherwise they are + retained and it is possible to get results from the original unpruned (or pruned less + aggressively) tree. Default value is true.*/ CV_PROP_RW bool truncatePrunedTree; + /** @brief Termination criteria for regression trees. + + If all absolute differences between an estimated value in a node and values of train samples + in this node are less than this parameter then the node will not be split further. Default + value is 0.01f*/ CV_PROP_RW float regressionAccuracy; + /** @brief The array of a priori class probabilities, sorted by the class label value. + + The parameter can be used to tune the decision tree preferences toward a certain class. For + example, if you want to detect some rare anomaly occurrence, the training base will likely + contain much more normal cases than anomalies, so a very good classification performance + will be achieved just by considering every case as normal. To avoid this, the priors can be + specified, where the anomaly probability is artificially increased (up to 0.5 or even + greater), so the weight of the misclassified anomalies becomes much bigger, and the tree is + adjusted properly. + + You can also think about this parameter as weights of prediction categories which determine + relative weights that you give to misclassification. That is, if the weight of the first + category is 1 and the weight of the second category is 10, then each mistake in predicting + the second category is equivalent to making 10 mistakes in predicting the first category. + Default value is empty Mat.*/ CV_PROP_RW Mat priors; }; - /** @brief The class represents a decision tree node. It has public members: - - - member double value - Value at the node: a class label in case of classification or estimated function value in case - of regression. - - member int classIdx - Class index normalized to 0..class_count-1 range and assigned to the node. It is used - internally in classification trees and tree ensembles. - - member int parent - Index of the parent node - - member int left - Index of the left child node - - member int right - Index of right child node. - - member int defaultDir - Default direction where to go (-1: left or +1: right). It helps in the case of missing values. - - member int split - Index of the first split + /** @brief The class represents a decision tree node. */ class CV_EXPORTS Node { public: Node(); - double value; - int classIdx; - - int parent; - int left; - int right; - int defaultDir; - - int split; + double value; //!< Value at the node: a class label in case of classification or estimated + //!< function value in case of regression. + int classIdx; //!< Class index normalized to 0..class_count-1 range and assigned to the + //!< node. It is used internally in classification trees and tree ensembles. + int parent; //!< Index of the parent node + int left; //!< Index of the left child node + int right; //!< Index of right child node + int defaultDir; //!< Default direction where to go (-1: left or +1: right). It helps in the + //!< case of missing values. + int split; //!< Index of the first split }; - /** @brief The class represents split in a decision tree. It has public members: - - member int varIdx - Index of variable on which the split is created. - - member bool inversed - If true, then the inverse split rule is used (i.e. left and right branches are exchanged in - the rule expressions below). - - member float quality - The split quality, a positive number. It is used to choose the best split. - - member int next - Index of the next split in the list of splits for the node - - member float c - The threshold value in case of split on an ordered variable. The rule is: : - if var_value < c - then next_node<-left - else next_node<-right - - member int subsetOfs - Offset of the bitset used by the split on a categorical variable. The rule is: : - if bitset[var_value] == 1 - then next_node <- left - else next_node <- right + /** @brief The class represents split in a decision tree. */ class CV_EXPORTS Split { public: Split(); - int varIdx; - bool inversed; - float quality; - int next; - float c; - int subsetOfs; + int varIdx; //!< Index of variable on which the split is created. + bool inversed; //!< If true, then the inverse split rule is used (i.e. left and right + //!< branches are exchanged in the rule expressions below). + float quality; //!< The split quality, a positive number. It is used to choose the best split. + int next; //!< Index of the next split in the list of splits for the node + float c; /**< The threshold value in case of split on an ordered variable. + The rule is: + @code{.none} + if var_value < c + then next_node <- left + else next_node <- right + @endcode */ + int subsetOfs; /**< Offset of the bitset used by the split on a categorical variable. + The rule is: + @code{.none} + if bitset[var_value] == 1 + then next_node <- left + else next_node <- right + @endcode */ }; /** @brief Sets the training parameters - - @param p Training parameters of type DTrees::Params. - - The method sets the training parameters. */ virtual void setDParams(const Params& p); /** @brief Returns the training parameters - - The method returns the training parameters. */ virtual Params getDParams() const; @@ -1609,13 +1126,12 @@ public: virtual const std::vector& getRoots() const = 0; /** @brief Returns all the nodes - all the node indices, mentioned above (left, right, parent, root indices) are indices in the - returned vector + all the node indices are indices in the returned vector */ virtual const std::vector& getNodes() const = 0; /** @brief Returns all the splits - all the split indices, mentioned above (split, next etc.) are indices in the returned vector + all the split indices are indices in the returned vector */ virtual const std::vector& getSplits() const = 0; /** @brief Returns all the bitsets for categorical splits @@ -1627,22 +1143,19 @@ public: /** @brief Creates the empty model The static method creates empty decision tree with the specified parameters. It should be then - trained using train method (see StatModel::train). Alternatively, you can load the model from file - using StatModel::load\(filename). + trained using train method (see StatModel::train). Alternatively, you can load the model from + file using StatModel::load\(filename). */ static Ptr create(const Params& params=Params()); }; -//! @} ml_decsiontrees - /****************************************************************************************\ * Random Trees Classifier * \****************************************************************************************/ -//! @addtogroup ml_randomtrees -//! @{ +/** @brief The class implements the random forest predictor. -/** @brief The class implements the random forest predictor as described in the beginning of this section. +@sa @ref ml_intro_rtrees */ class CV_EXPORTS_W RTrees : public DTrees { @@ -1656,55 +1169,29 @@ public: class CV_EXPORTS_W_MAP Params : public DTrees::Params { public: + /** @brief Default constructor. */ Params(); - /** @brief The constructors - - @param maxDepth the depth of the tree. A low value will likely underfit and conversely a high - value will likely overfit. The optimal value can be obtained using cross validation or other - suitable methods. - @param minSampleCount minimum samples required at a leaf node for it to be split. A reasonable - value is a small percentage of the total data e.g. 1%. - @param regressionAccuracy - @param useSurrogates - @param maxCategories Cluster possible values of a categorical variable into K \<= maxCategories - clusters to find a suboptimal split. If a discrete variable, on which the training procedure tries - to make a split, takes more than max_categories values, the precise best subset estimation may - take a very long time because the algorithm is exponential. Instead, many decision trees engines - (including ML) try to find sub-optimal split in this case by clustering all the samples into - maxCategories clusters that is some categories are merged together. The clustering is applied only - in n\>2-class classification problems for categorical variables with N \> max_categories possible - values. In case of regression and 2-class classification the optimal split can be found - efficiently without employing clustering, thus the parameter is not used in these cases. - @param priors - @param calcVarImportance If true then variable importance will be calculated and then it can be - retrieved by RTrees::getVarImportance. - @param nactiveVars The size of the randomly selected subset of features at each tree node and that - are used to find the best split(s). If you set it to 0 then the size will be set to the square - root of the total number of features. - @param termCrit The termination criteria that specifies when the training algorithm stops - either - when the specified number of trees is trained and added to the ensemble or when sufficient - accuracy (measured as OOB error) is achieved. Typically the more trees you have the better the - accuracy. However, the improvement in accuracy generally diminishes and asymptotes pass a certain - number of trees. Also to keep in mind, the number of tree increases the prediction time linearly. - - The default constructor sets all parameters to default values which are different from default - values of `DTrees::Params`: - @code - RTrees::Params::Params() : DTrees::Params( 5, 10, 0, false, 10, 0, false, false, Mat() ), - calcVarImportance(false), nactiveVars(0) - { - termCrit = cvTermCriteria( TermCriteria::MAX_ITERS + TermCriteria::EPS, 50, 0.1 ); - } - @endcode - */ + /** @brief Constructor with parameters. */ Params( int maxDepth, int minSampleCount, double regressionAccuracy, bool useSurrogates, int maxCategories, const Mat& priors, bool calcVarImportance, int nactiveVars, TermCriteria termCrit ); - CV_PROP_RW bool calcVarImportance; // true <=> RF processes variable importance + /** If true then variable importance will be calculated and then it can be retrieved by + RTrees::getVarImportance. Default value is false.*/ + CV_PROP_RW bool calcVarImportance; + /** The size of the randomly selected subset of features at each tree node and that are used + to find the best split(s). If you set it to 0 then the size will be set to the square root + of the total number of features. Default value is 0.*/ CV_PROP_RW int nactiveVars; + /** The termination criteria that specifies when the training algorithm stops - either when + the specified number of trees is trained and added to the ensemble or when sufficient + accuracy (measured as OOB error) is achieved. Typically the more trees you have the better + the accuracy. However, the improvement in accuracy generally diminishes and asymptotes pass + a certain number of trees. Also to keep in mind, the number of tree increases the prediction + time linearly. Default value is TermCriteria(TermCriteria::MAX_ITERS + TermCriteria::EPS, + 50, 0.1)*/ CV_PROP_RW TermCriteria termCrit; }; @@ -1714,109 +1201,82 @@ public: /** @brief Returns the variable importance array. The method returns the variable importance vector, computed at the training stage when - RTParams::calcVarImportance is set to true. If this flag was set to false, the empty matrix is + Params::calcVarImportance is set to true. If this flag was set to false, the empty matrix is returned. */ virtual Mat getVarImportance() const = 0; /** @brief Creates the empty model - Use StatModel::train to train the model, StatModel::train to create and - train the model, StatModel::load to load the pre-trained model. + Use StatModel::train to train the model, StatModel::train to create and train the model, + StatModel::load to load the pre-trained model. */ static Ptr create(const Params& params=Params()); }; -//! @} ml_randomtrees - /****************************************************************************************\ * Boosted tree classifier * \****************************************************************************************/ -//! @addtogroup ml_boost -//! @{ - /** @brief Boosted tree classifier derived from DTrees + +@sa @ref ml_intro_boost */ class CV_EXPORTS_W Boost : public DTrees { public: - /** @brief The structure is derived from DTrees::Params but not all of the decision tree parameters are + /** @brief Parameters of Boost trees. + + The structure is derived from DTrees::Params but not all of the decision tree parameters are supported. In particular, cross-validation is not supported. - All parameters are public. You can initialize them by a constructor and then override some of them - directly if you want. + All parameters are public. You can initialize them by a constructor and then override some of + them directly if you want. */ class CV_EXPORTS_W_MAP Params : public DTrees::Params { public: - CV_PROP_RW int boostType; - CV_PROP_RW int weakCount; + CV_PROP_RW int boostType; //!< Type of the boosting algorithm. See Boost::Types. + //!< Default value is Boost::REAL. + CV_PROP_RW int weakCount; //!< The number of weak classifiers. Default value is 100. + /** A threshold between 0 and 1 used to save computational time. Samples with summary weight + \f$\leq 1 - weight_trim_rate\f$ do not participate in the *next* iteration of training. Set + this parameter to 0 to turn off this functionality. Default value is 0.95.*/ CV_PROP_RW double weightTrimRate; + /** @brief Default constructor */ Params(); - /** @brief The constructors. - - @param boostType Type of the boosting algorithm. Possible values are: - - **Boost::DISCRETE** Discrete AdaBoost. - - **Boost::REAL** Real AdaBoost. It is a technique that utilizes confidence-rated predictions - and works well with categorical data. - - **Boost::LOGIT** LogitBoost. It can produce good regression fits. - - **Boost::GENTLE** Gentle AdaBoost. It puts less weight on outlier data points and for that - reason is often good with regression data. - Gentle AdaBoost and Real AdaBoost are often the preferable choices. - @param weakCount The number of weak classifiers. - @param weightTrimRate A threshold between 0 and 1 used to save computational time. Samples - with summary weight \f$\leq 1 - weight_trim_rate\f$ do not participate in the *next* iteration of - training. Set this parameter to 0 to turn off this functionality. - @param maxDepth - @param useSurrogates - @param priors - - See DTrees::Params for description of other parameters. - - Default parameters are: - @code - Boost::Params::Params() - { - boostType = Boost::REAL; - weakCount = 100; - weightTrimRate = 0.95; - CVFolds = 0; - maxDepth = 1; - } - @endcode - */ + /** @brief Constructor with parameters */ Params( int boostType, int weakCount, double weightTrimRate, int maxDepth, bool useSurrogates, const Mat& priors ); }; - // Boosting type - enum { DISCRETE=0, REAL=1, LOGIT=2, GENTLE=3 }; + /** @brief Boosting type - /** @brief Returns the boosting parameters + Gentle AdaBoost and Real AdaBoost are often the preferable choices. + */ + enum Types { + DISCRETE=0, //!< Discrete AdaBoost. + REAL=1, //!< Real AdaBoost. It is a technique that utilizes confidence-rated predictions + //!< and works well with categorical data. + LOGIT=2, //!< LogitBoost. It can produce good regression fits. + GENTLE=3 //!< Gentle AdaBoost. It puts less weight on outlier data points and for that + //!(traindata, params) to create and - train the model, StatModel::load\(filename) to load the pre-trained model. + Use StatModel::train to train the model, StatModel::train\(traindata, params) to create + and train the model, StatModel::load\(filename) to load the pre-trained model. */ static Ptr create(const Params& params=Params()); }; -//! @} ml_boost - /****************************************************************************************\ * Gradient Boosted Trees * \****************************************************************************************/ @@ -1852,170 +1312,138 @@ public: /////////////////////////////////// Multi-Layer Perceptrons ////////////////////////////// -//! @addtogroup ml_neural -//! @{ - -/** @brief MLP model. +/** @brief Artificial Neural Networks - Multi-Layer Perceptrons. Unlike many other models in ML that are constructed and trained at once, in the MLP model these steps are separated. First, a network with the specified topology is created using the non-default constructor or the method ANN_MLP::create. All the weights are set to zeros. Then, the network is trained using a set of input and output vectors. The training procedure can be repeated more than once, that is, the weights can be adjusted based on the new training data. + +Additional flags for StatModel::train are available: ANN_MLP::TrainFlags. + +@sa @ref ml_intro_ann */ class CV_EXPORTS_W ANN_MLP : public StatModel { public: /** @brief Parameters of the MLP and of the training algorithm. - - You can initialize the structure by a constructor or the individual parameters can be adjusted - after the structure is created. - The network structure: - - member Mat layerSizes - The number of elements in each layer of network. The very first element specifies the number - of elements in the input layer. The last element - number of elements in the output layer. - - member int activateFunc - The activation function. Currently the only fully supported activation function is - ANN_MLP::SIGMOID_SYM. - - member double fparam1 - The first parameter of activation function, 0 by default. - - member double fparam2 - The second parameter of the activation function, 0 by default. - @note - If you are using the default ANN_MLP::SIGMOID_SYM activation function with the default - parameter values fparam1=0 and fparam2=0 then the function used is y = 1.7159\*tanh(2/3 \* x), - so the output will range from [-1.7159, 1.7159], instead of [0,1]. - - The back-propagation algorithm parameters: - - member double bpDWScale - Strength of the weight gradient term. The recommended value is about 0.1. - - member double bpMomentScale - Strength of the momentum term (the difference between weights on the 2 previous iterations). - This parameter provides some inertia to smooth the random fluctuations of the weights. It - can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good - enough - The RPROP algorithm parameters (see @cite RPROP93 for details): - - member double prDW0 - Initial value \f$\Delta_0\f$ of update-values \f$\Delta_{ij}\f$. - - member double rpDWPlus - Increase factor \f$\eta^+\f$. It must be \>1. - - member double rpDWMinus - Decrease factor \f$\eta^-\f$. It must be \<1. - - member double rpDWMin - Update-values lower limit \f$\Delta_{min}\f$. It must be positive. - - member double rpDWMax - Update-values upper limit \f$\Delta_{max}\f$. It must be \>1. - */ + */ struct CV_EXPORTS_W_MAP Params { + /** @brief Default constructor */ Params(); - /** @brief Construct the parameter structure - - @param layerSizes Integer vector specifying the number of neurons in each layer including the - input and output layers. - @param activateFunc Parameter specifying the activation function for each neuron: one of - ANN_MLP::IDENTITY, ANN_MLP::SIGMOID_SYM, and ANN_MLP::GAUSSIAN. - @param fparam1 The first parameter of the activation function, \f$\alpha\f$. See the formulas in the - introduction section. - @param fparam2 The second parameter of the activation function, \f$\beta\f$. See the formulas in the - introduction section. - @param termCrit Termination criteria of the training algorithm. You can specify the maximum number - of iterations (maxCount) and/or how much the error could change between the iterations to make the - algorithm continue (epsilon). - @param trainMethod Training method of the MLP. Possible values are: - - **ANN_MLP_TrainParams::BACKPROP** The back-propagation algorithm. - - **ANN_MLP_TrainParams::RPROP** The RPROP algorithm. - @param param1 Parameter of the training method. It is rp_dw0 for RPROP and bp_dw_scale for - BACKPROP. - @param param2 Parameter of the training method. It is rp_dw_min for RPROP and bp_moment_scale - for BACKPROP. - - By default the RPROP algorithm is used: - @code - ANN_MLP_TrainParams::ANN_MLP_TrainParams() - { - layerSizes = Mat(); - activateFun = SIGMOID_SYM; - fparam1 = fparam2 = 0; - term_crit = TermCriteria( TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 0.01 ); - train_method = RPROP; - bpDWScale = bpMomentScale = 0.1; - rpDW0 = 0.1; rpDWPlus = 1.2; rpDWMinus = 0.5; - rpDWMin = FLT_EPSILON; rpDWMax = 50.; - } - @endcode + /** @brief Constructor with parameters + @note param1 sets Params::rp_dw0 for RPROP and Paramss::bp_dw_scale for BACKPROP. + @note param2 sets Params::rp_dw_min for RPROP and Params::bp_moment_scale for BACKPROP. */ Params( const Mat& layerSizes, int activateFunc, double fparam1, double fparam2, TermCriteria termCrit, int trainMethod, double param1, double param2=0 ); - enum { BACKPROP=0, RPROP=1 }; + /** Available training methods */ + enum TrainingMethods { + BACKPROP=0, //!< The back-propagation algorithm. + RPROP=1 //!< The RPROP algorithm. See @cite RPROP93 for details. + }; + /** Integer vector specifying the number of neurons in each layer including the input and + output layers. The very first element specifies the number of elements in the input layer. + The last element - number of elements in the output layer. Default value is empty Mat.*/ CV_PROP_RW Mat layerSizes; + /** The activation function for each neuron. Currently the default and the only fully + supported activation function is ANN_MLP::SIGMOID_SYM. See ANN_MLP::ActivationFunctions.*/ CV_PROP_RW int activateFunc; + /** The first parameter of the activation function, \f$\alpha\f$. Default value is 0. */ CV_PROP_RW double fparam1; + /** The second parameter of the activation function, \f$\beta\f$. Default value is 0. */ CV_PROP_RW double fparam2; + /** Termination criteria of the training algorithm. You can specify the maximum number of + iterations (maxCount) and/or how much the error could change between the iterations to make + the algorithm continue (epsilon). Default value is TermCriteria(TermCriteria::MAX_ITER + + TermCriteria::EPS, 1000, 0.01).*/ CV_PROP_RW TermCriteria termCrit; + /** Training method. Default value is Params::RPROP. See ANN_MLP::Params::TrainingMethods.*/ CV_PROP_RW int trainMethod; // backpropagation parameters - CV_PROP_RW double bpDWScale, bpMomentScale; + /** BPROP: Strength of the weight gradient term. The recommended value is about 0.1. Default + value is 0.1.*/ + CV_PROP_RW double bpDWScale; + /** BPROP: Strength of the momentum term (the difference between weights on the 2 previous + iterations). This parameter provides some inertia to smooth the random fluctuations of the + weights. It can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so + is good enough. Default value is 0.1.*/ + CV_PROP_RW double bpMomentScale; // rprop parameters - CV_PROP_RW double rpDW0, rpDWPlus, rpDWMinus, rpDWMin, rpDWMax; + /** RPROP: Initial value \f$\Delta_0\f$ of update-values \f$\Delta_{ij}\f$. Default value is 0.1.*/ + CV_PROP_RW double rpDW0; + /** RPROP: Increase factor \f$\eta^+\f$. It must be \>1. Default value is 1.2.*/ + CV_PROP_RW double rpDWPlus; + /** RPROP: Decrease factor \f$\eta^-\f$. It must be \<1. Default value is 0.5.*/ + CV_PROP_RW double rpDWMinus; + /** RPROP: Update-values lower limit \f$\Delta_{min}\f$. It must be positive. Default value is FLT_EPSILON.*/ + CV_PROP_RW double rpDWMin; + /** RPROP: Update-values upper limit \f$\Delta_{max}\f$. It must be \>1. Default value is 50.*/ + CV_PROP_RW double rpDWMax; }; - // possible activation functions - enum { IDENTITY = 0, SIGMOID_SYM = 1, GAUSSIAN = 2 }; + /** possible activation functions */ + enum ActivationFunctions { + /** Identity function: \f$f(x)=x\f$ */ + IDENTITY = 0, + /** Symmetrical sigmoid: \f$f(x)=\beta*(1-e^{-\alpha x})/(1+e^{-\alpha x}\f$ + @note + If you are using the default sigmoid activation function with the default parameter values + fparam1=0 and fparam2=0 then the function used is y = 1.7159\*tanh(2/3 \* x), so the output + will range from [-1.7159, 1.7159], instead of [0,1].*/ + SIGMOID_SYM = 1, + /** Gaussian function: \f$f(x)=\beta e^{-\alpha x*x}\f$ */ + GAUSSIAN = 2 + }; - // available training flags - enum { UPDATE_WEIGHTS = 1, NO_INPUT_SCALE = 2, NO_OUTPUT_SCALE = 4 }; + /** Train options */ + enum TrainFlags { + /** Update the network weights, rather than compute them from scratch. In the latter case + the weights are initialized using the Nguyen-Widrow algorithm. */ + UPDATE_WEIGHTS = 1, + /** Do not normalize the input vectors. If this flag is not set, the training algorithm + normalizes each input feature independently, shifting its mean value to 0 and making the + standard deviation equal to 1. If the network is assumed to be updated frequently, the new + training data could be much different from original one. In this case, you should take care + of proper normalization. */ + NO_INPUT_SCALE = 2, + /** Do not normalize the output vectors. If the flag is not set, the training algorithm + normalizes each output feature independently, by transforming it to the certain range + depending on the used activation function. */ + NO_OUTPUT_SCALE = 4 + }; virtual Mat getWeights(int layerIdx) const = 0; - /** @brief Sets the new network parameters - - @param p The new parameters - - The existing network, if any, will be destroyed and new empty one will be created. It should be - re-trained after that. - */ + /** @brief Sets the new network parameters */ virtual void setParams(const Params& p) = 0; - /** @brief Retrieves the current network parameters - */ + /** @brief Retrieves the current network parameters */ virtual Params getParams() const = 0; /** @brief Creates empty model - Use StatModel::train to train the model, StatModel::train\(traindata, params) to create - and train the model, StatModel::load\(filename) to load the pre-trained model. Note that - the train method has optional flags, and the following flags are handled by \`ANN_MLP\`: - - - **UPDATE_WEIGHTS** Algorithm updates the network weights, rather than computes them from - scratch. In the latter case the weights are initialized using the Nguyen-Widrow algorithm. - - **NO_INPUT_SCALE** Algorithm does not normalize the input vectors. If this flag is not set, - the training algorithm normalizes each input feature independently, shifting its mean value to - 0 and making the standard deviation equal to 1. If the network is assumed to be updated - frequently, the new training data could be much different from original one. In this case, you - should take care of proper normalization. - - **NO_OUTPUT_SCALE** Algorithm does not normalize the output vectors. If the flag is not set, - the training algorithm normalizes each output feature independently, by transforming it to the - certain range depending on the used activation function. + Use StatModel::train to train the model, StatModel::train\(traindata, params) to + create and train the model, StatModel::load\(filename) to load the pre-trained model. + Note that the train method has optional flags: ANN_MLP::TrainFlags. */ static Ptr create(const Params& params=Params()); }; -//! @} ml_neural - /****************************************************************************************\ * Logistic Regression * \****************************************************************************************/ -//! @addtogroup ml_lr -//! @{ - /** @brief Implements Logistic Regression classifier. + +@sa @ref ml_intro_lr */ class CV_EXPORTS LogisticRegression : public StatModel { @@ -2023,71 +1451,55 @@ public: class CV_EXPORTS Params { public: - /** @brief The constructors - - @param learning_rate Specifies the learning rate. - @param iters Specifies the number of iterations. - @param method Specifies the kind of training method used. It should be set to either - LogisticRegression::BATCH or LogisticRegression::MINI_BATCH. If using - LogisticRegression::MINI_BATCH, set LogisticRegression::Params.mini_batch_size to a positive - integer. - @param normalization Specifies the kind of regularization to be applied. - LogisticRegression::REG_L1 or LogisticRegression::REG_L2 (L1 norm or L2 norm). To use this, set - LogisticRegression::Params.regularized to a integer greater than zero. - @param reg To enable or disable regularization. Set to positive integer (greater than zero) to - enable and to 0 to disable. - @param batch_size Specifies the number of training samples taken in each step of Mini-Batch - Gradient Descent. Will only be used if using LogisticRegression::MINI_BATCH training algorithm. - It has to take values less than the total number of training samples. - - By initializing this structure, one can set all the parameters required for Logistic Regression - classifier. - */ + /** @brief Constructor */ Params(double learning_rate = 0.001, int iters = 1000, int method = LogisticRegression::BATCH, int normalization = LogisticRegression::REG_L2, int reg = 1, int batch_size = 1); - double alpha; - int num_iters; + double alpha; //!< learning rate. + int num_iters; //!< number of iterations. + /** Kind of regularization to be applied. See LogisticRegression::RegKinds. */ int norm; + /** Enable or disable regularization. Set to positive integer (greater than zero) to enable + and to 0 to disable. */ int regularized; + /** Kind of training method used. See LogisticRegression::Methods. */ int train_method; + /** Specifies the number of training samples taken in each step of Mini-Batch Gradient + Descent. Will only be used if using LogisticRegression::MINI_BATCH training algorithm. It + has to take values less than the total number of training samples. */ int mini_batch_size; + /** Termination criteria of the algorithm */ TermCriteria term_crit; }; - enum { REG_L1 = 0, REG_L2 = 1}; - enum { BATCH = 0, MINI_BATCH = 1}; - - /** @brief This function writes the trained LogisticRegression clasifier to disk. - */ - virtual void write( FileStorage &fs ) const = 0; - /** @brief This function reads the trained LogisticRegression clasifier from disk. - */ - virtual void read( const FileNode &fn ) = 0; + //! Regularization kinds + enum RegKinds { + REG_L1 = 0, //!< %L1 norm + REG_L2 = 1 //!< %L2 norm. Set Params::regularized \> 0 when using this kind + }; - /** @brief Trains the Logistic Regression classifier and returns true if successful. + //! Training methods + enum Methods { + BATCH = 0, + MINI_BATCH = 1 //!< Set Params::mini_batch_size to a positive integer when using this method. + }; - @param trainData Instance of ml::TrainData class holding learning data. - @param flags Not used. - */ - virtual bool train( const Ptr& trainData, int flags=0 ) = 0; /** @brief Predicts responses for input samples and returns a float type. @param samples The input data for the prediction algorithm. Matrix [m x n], where each row - contains variables (features) of one object being classified. Should have data type CV_32F. + contains variables (features) of one object being classified. Should have data type CV_32F. @param results Predicted labels as a column matrix of type CV_32S. @param flags Not used. */ virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0; - virtual void clear() = 0; /** @brief This function returns the trained paramters arranged across rows. - For a two class classifcation problem, it returns a row matrix. - It returns learnt paramters of the Logistic Regression as a matrix of type CV_32F. + For a two class classifcation problem, it returns a row matrix. It returns learnt paramters of + the Logistic Regression as a matrix of type CV_32F. */ virtual Mat get_learnt_thetas() const = 0; @@ -2100,21 +1512,24 @@ public: static Ptr create( const Params& params = Params() ); }; -//! @} ml_lr - /****************************************************************************************\ * Auxilary functions declarations * \****************************************************************************************/ -/** Generates `sample` from multivariate normal distribution, where `mean` - is an - average row vector, `cov` - symmetric covariation matrix */ +/** @brief Generates _sample_ from multivariate normal distribution + +@param mean an average row vector +@param cov symmetric covariation matrix +@param nsamples returned samples count +@param samples returned samples array +*/ CV_EXPORTS void randMVNormal( InputArray mean, InputArray cov, int nsamples, OutputArray samples); -/** Generates sample from gaussian mixture distribution */ +/** @brief Generates sample from gaussian mixture distribution */ CV_EXPORTS void randGaussMixture( InputArray means, InputArray covs, InputArray weights, int nsamples, OutputArray samples, OutputArray sampClasses ); -/** creates test set */ +/** @brief Creates test set */ CV_EXPORTS void createConcentricSpheresTestSet( int nsamples, int nfeatures, int nclasses, OutputArray samples, OutputArray responses); diff --git a/modules/objdetect/src/cascadedetect.cpp b/modules/objdetect/src/cascadedetect.cpp index 841cfa2725..a7255820ba 100644 --- a/modules/objdetect/src/cascadedetect.cpp +++ b/modules/objdetect/src/cascadedetect.cpp @@ -587,7 +587,7 @@ bool HaarEvaluator::read(const FileNode& node, Size _origWinSize) localSize = lbufSize = Size(0, 0); if (ocl::haveOpenCL()) { - if (ocl::Device::getDefault().isAMD()) + if (ocl::Device::getDefault().isAMD() || ocl::Device::getDefault().isIntel()) { localSize = Size(8, 8); lbufSize = Size(origWinSize.width + localSize.width, diff --git a/modules/objdetect/src/opencl/cascadedetect.cl b/modules/objdetect/src/opencl/cascadedetect.cl index 13cb1aa389..465fa0c65d 100644 --- a/modules/objdetect/src/opencl/cascadedetect.cl +++ b/modules/objdetect/src/opencl/cascadedetect.cl @@ -233,11 +233,12 @@ void runHaarClassifier( for( stageIdx = SPLIT_STAGE; stageIdx < N_STAGES; stageIdx++ ) { + barrier(CLK_LOCAL_MEM_FENCE); int nrects = lcount[0]; - barrier(CLK_LOCAL_MEM_FENCE); if( nrects == 0 ) break; + barrier(CLK_LOCAL_MEM_FENCE); if( lidx == 0 ) lcount[0] = 0; @@ -396,8 +397,8 @@ __kernel void runLBPClassifierStumpSimple( for( tileIdx = groupIdx; tileIdx < totalTiles; tileIdx += ngroups ) { - int iy = ((tileIdx / ntiles.x)*local_size_y + ly)*ystep; - int ix = ((tileIdx % ntiles.x)*local_size_x + lx)*ystep; + int iy = mad24((tileIdx / ntiles.x), local_size_y, ly) * ystep; + int ix = mad24((tileIdx % ntiles.x), local_size_x, lx) * ystep; if( ix < worksize.x && iy < worksize.y ) { diff --git a/modules/photo/test/test_cloning.cpp b/modules/photo/test/test_cloning.cpp index 56d166205c..1f86612a4a 100644 --- a/modules/photo/test/test_cloning.cpp +++ b/modules/photo/test/test_cloning.cpp @@ -64,6 +64,7 @@ TEST(Photo_SeamlessClone_normal, regression) string original_path1 = folder + "source1.png"; string original_path2 = folder + "destination1.png"; string original_path3 = folder + "mask.png"; + string reference_path = folder + "reference.png"; Mat source = imread(original_path1, IMREAD_COLOR); Mat destination = imread(original_path2, IMREAD_COLOR); @@ -79,8 +80,8 @@ TEST(Photo_SeamlessClone_normal, regression) p.y = destination.size().height/2; seamlessClone(source, destination, mask, p, result, 1); - - Mat reference = imread(folder + "reference.png"); + Mat reference = imread(reference_path); + ASSERT_FALSE(reference.empty()) << "Could not load reference image " << reference_path; SAVE(result); @@ -94,6 +95,7 @@ TEST(Photo_SeamlessClone_mixed, regression) string original_path1 = folder + "source1.png"; string original_path2 = folder + "destination1.png"; string original_path3 = folder + "mask.png"; + string reference_path = folder + "reference.png"; Mat source = imread(original_path1, IMREAD_COLOR); Mat destination = imread(original_path2, IMREAD_COLOR); @@ -111,7 +113,9 @@ TEST(Photo_SeamlessClone_mixed, regression) SAVE(result); - Mat reference = imread(folder + "reference.png"); + Mat reference = imread(reference_path); + ASSERT_FALSE(reference.empty()) << "Could not load reference image " << reference_path; + double error = cvtest::norm(reference, result, NORM_L1); EXPECT_LE(error, numerical_precision); @@ -123,6 +127,7 @@ TEST(Photo_SeamlessClone_featureExchange, regression) string original_path1 = folder + "source1.png"; string original_path2 = folder + "destination1.png"; string original_path3 = folder + "mask.png"; + string reference_path = folder + "reference.png"; Mat source = imread(original_path1, IMREAD_COLOR); Mat destination = imread(original_path2, IMREAD_COLOR); @@ -140,7 +145,9 @@ TEST(Photo_SeamlessClone_featureExchange, regression) SAVE(result); - Mat reference = imread(folder + "reference.png"); + Mat reference = imread(reference_path); + ASSERT_FALSE(reference.empty()) << "Could not load reference image " << reference_path; + double error = cvtest::norm(reference, result, NORM_L1); EXPECT_LE(error, numerical_precision); @@ -151,6 +158,7 @@ TEST(Photo_SeamlessClone_colorChange, regression) string folder = string(cvtest::TS::ptr()->get_data_path()) + "cloning/color_change/"; string original_path1 = folder + "source1.png"; string original_path2 = folder + "mask.png"; + string reference_path = folder + "reference.png"; Mat source = imread(original_path1, IMREAD_COLOR); Mat mask = imread(original_path2, IMREAD_COLOR); @@ -163,7 +171,9 @@ TEST(Photo_SeamlessClone_colorChange, regression) SAVE(result); - Mat reference = imread(folder + "reference.png"); + Mat reference = imread(reference_path); + ASSERT_FALSE(reference.empty()) << "Could not load reference image " << reference_path; + double error = cvtest::norm(reference, result, NORM_L1); EXPECT_LE(error, numerical_precision); @@ -174,6 +184,7 @@ TEST(Photo_SeamlessClone_illuminationChange, regression) string folder = string(cvtest::TS::ptr()->get_data_path()) + "cloning/Illumination_Change/"; string original_path1 = folder + "source1.png"; string original_path2 = folder + "mask.png"; + string reference_path = folder + "reference.png"; Mat source = imread(original_path1, IMREAD_COLOR); Mat mask = imread(original_path2, IMREAD_COLOR); @@ -186,7 +197,7 @@ TEST(Photo_SeamlessClone_illuminationChange, regression) SAVE(result); - Mat reference = imread(folder + "reference.png"); + Mat reference = imread(reference_path); double error = cvtest::norm(reference, result, NORM_L1); EXPECT_LE(error, numerical_precision); @@ -197,6 +208,7 @@ TEST(Photo_SeamlessClone_textureFlattening, regression) string folder = string(cvtest::TS::ptr()->get_data_path()) + "cloning/Texture_Flattening/"; string original_path1 = folder + "source1.png"; string original_path2 = folder + "mask.png"; + string reference_path = folder + "reference.png"; Mat source = imread(original_path1, IMREAD_COLOR); Mat mask = imread(original_path2, IMREAD_COLOR); @@ -209,7 +221,9 @@ TEST(Photo_SeamlessClone_textureFlattening, regression) SAVE(result); - Mat reference = imread(folder + "reference.png"); + Mat reference = imread(reference_path); + ASSERT_FALSE(reference.empty()) << "Could not load reference image " << reference_path; + double error = cvtest::norm(reference, result, NORM_L1); EXPECT_LE(error, numerical_precision); diff --git a/modules/shape/test/test_emdl1.cpp b/modules/shape/test/test_emdl1.cpp deleted file mode 100644 index e52351bcf6..0000000000 --- a/modules/shape/test/test_emdl1.cpp +++ /dev/null @@ -1,263 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "test_precomp.hpp" - -using namespace cv; -using namespace std; - -const int angularBins=12; -const int radialBins=4; -const float minRad=0.2f; -const float maxRad=2; -const int NSN=5;//10;//20; //number of shapes per class -const int NP=100; //number of points sympliying the contour -const float CURRENT_MAX_ACCUR=95; //98% and 99% reached in several tests, 95 is fixed as minimum boundary - -class CV_ShapeEMDTest : public cvtest::BaseTest -{ -public: - CV_ShapeEMDTest(); - ~CV_ShapeEMDTest(); -protected: - void run(int); - -private: - void mpegTest(); - void listShapeNames(vector &listHeaders); - vector convertContourType(const Mat &, int n=0 ); - float computeShapeDistance(vector & queryNormal, - vector & queryFlipped1, - vector & queryFlipped2, - vector& testq); - void displayMPEGResults(); -}; - -CV_ShapeEMDTest::CV_ShapeEMDTest() -{ -} -CV_ShapeEMDTest::~CV_ShapeEMDTest() -{ -} - -vector CV_ShapeEMDTest::convertContourType(const Mat& currentQuery, int n) -{ - vector > _contoursQuery; - vector contoursQuery; - findContours(currentQuery, _contoursQuery, RETR_LIST, CHAIN_APPROX_NONE); - for (size_t border=0; border<_contoursQuery.size(); border++) - { - for (size_t p=0; p<_contoursQuery[border].size(); p++) - { - contoursQuery.push_back(Point2f((float)_contoursQuery[border][p].x, - (float)_contoursQuery[border][p].y)); - } - } - - // In case actual number of points is less than n - int dum=0; - for (int add=(int)contoursQuery.size()-1; add cont; - for (int i=0; i &listHeaders) -{ - listHeaders.push_back("apple"); //ok - listHeaders.push_back("children"); // ok - listHeaders.push_back("device7"); // ok - listHeaders.push_back("Heart"); // ok - listHeaders.push_back("teddy"); // ok -} -float CV_ShapeEMDTest::computeShapeDistance(vector & query1, vector & query2, - vector & query3, vector & testq) -{ - //waitKey(0); - Ptr mysc = createShapeContextDistanceExtractor(angularBins, radialBins, minRad, maxRad); - //Ptr cost = createNormHistogramCostExtractor(cv::DIST_L1); - //Ptr cost = createChiHistogramCostExtractor(30,0.15); - //Ptr cost = createEMDHistogramCostExtractor(); - // Ptr cost = createEMDL1HistogramCostExtractor(); - mysc->setIterations(1); //(3) - mysc->setCostExtractor( createEMDL1HistogramCostExtractor() ); - //mysc->setTransformAlgorithm(createAffineTransformer(true)); - mysc->setTransformAlgorithm( createThinPlateSplineShapeTransformer() ); - //mysc->setImageAppearanceWeight(1.6); - //mysc->setImageAppearanceWeight(0.0); - //mysc->setImages(im1,imtest); - return ( std::min( mysc->computeDistance(query1, testq), - std::min(mysc->computeDistance(query2, testq), mysc->computeDistance(query3, testq) ))); -} - -void CV_ShapeEMDTest::mpegTest() -{ - string baseTestFolder="shape/mpeg_test/"; - string path = cvtest::TS::ptr()->get_data_path() + baseTestFolder; - vector namesHeaders; - listShapeNames(namesHeaders); - - // distance matrix // - Mat distanceMat=Mat::zeros(NSN*(int)namesHeaders.size(), NSN*(int)namesHeaders.size(), CV_32F); - - // query contours (normal v flipped, h flipped) and testing contour // - vector contoursQuery1, contoursQuery2, contoursQuery3, contoursTesting; - - // reading query and computing its properties // - int counter=0; - const int loops=NSN*(int)namesHeaders.size()*NSN*(int)namesHeaders.size(); - for (size_t n=0; n origContour; - contoursQuery1=convertContourType(currentQuery, NP); - origContour=contoursQuery1; - contoursQuery2=convertContourType(flippedHQuery, NP); - contoursQuery3=convertContourType(flippedVQuery, NP); - - // compare with all the rest of the images: testing // - for (size_t nt=0; nt(NSN*(int)n+i-1, - NSN*(int)nt+it-1)=0; - continue; - } - // read testing image // - stringstream thetestpathandname; - thetestpathandname<(NSN*(int)n+i-1, NSN*(int)nt+it-1)= - computeShapeDistance(contoursQuery1, contoursQuery2, contoursQuery3, contoursTesting); - std::cout<(NSN*(int)n+i-1, NSN*(int)nt+it-1)<get_data_path() + baseTestFolder + "distanceMatrixMPEGTest.yml", FileStorage::WRITE); - fs << "distanceMat" << distanceMat; -} - -const int FIRST_MANY=2*NSN; -void CV_ShapeEMDTest::displayMPEGResults() -{ - string baseTestFolder="shape/mpeg_test/"; - Mat distanceMat; - FileStorage fs(cvtest::TS::ptr()->get_data_path() + baseTestFolder + "distanceMatrixMPEGTest.yml", FileStorage::READ); - vector namesHeaders; - listShapeNames(namesHeaders); - - // Read generated MAT // - fs["distanceMat"]>>distanceMat; - - int corrects=0; - int divi=0; - for (int row=0; row(row,col)>distanceMat.at(row,i)) - { - nsmall++; - } - } - if (nsmall<=FIRST_MANY) - { - corrects++; - } - } - } - float porc = 100*float(corrects)/(NSN*distanceMat.rows); - std::cout<<"%="<= CURRENT_MAX_ACCUR) - ts->set_failed_test_info(cvtest::TS::OK); - else - ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); - -} - -void CV_ShapeEMDTest::run( int /*start_from*/ ) -{ - mpegTest(); - displayMPEGResults(); -} - -TEST(ShapeEMD_SCD, regression) { CV_ShapeEMDTest test; test.safe_run(); } diff --git a/modules/shape/test/test_hausdorff.cpp b/modules/shape/test/test_hausdorff.cpp deleted file mode 100644 index ec33436f06..0000000000 --- a/modules/shape/test/test_hausdorff.cpp +++ /dev/null @@ -1,280 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "test_precomp.hpp" -#include - -using namespace cv; -using namespace std; - -const int NSN=5;//10;//20; //number of shapes per class -const float CURRENT_MAX_ACCUR=85; //90% and 91% reached in several tests, 85 is fixed as minimum boundary - -class CV_HaussTest : public cvtest::BaseTest -{ -public: - CV_HaussTest(); - ~CV_HaussTest(); -protected: - void run(int); -private: - float computeShapeDistance(vector &query1, vector &query2, - vector &query3, vector &testq); - vector convertContourType(const Mat& currentQuery, int n=180); - vector normalizeContour(const vector & contour); - void listShapeNames( vector &listHeaders); - void mpegTest(); - void displayMPEGResults(); -}; - -CV_HaussTest::CV_HaussTest() -{ -} -CV_HaussTest::~CV_HaussTest() -{ -} - -vector CV_HaussTest::normalizeContour(const vector &contour) -{ - vector output(contour.size()); - Mat disMat((int)contour.size(),(int)contour.size(),CV_32F); - Point2f meanpt(0,0); - float meanVal=1; - - for (int ii=0, end1 = (int)contour.size(); ii(ii,jj)=0; - else - { - disMat.at(ii,jj)= - float(fabs(double(contour[ii].x*contour[jj].x)))+float(fabs(double(contour[ii].y*contour[jj].y))); - } - } - meanpt.x+=contour[ii].x; - meanpt.y+=contour[ii].y; - } - meanpt.x/=contour.size(); - meanpt.y/=contour.size(); - meanVal=float(cv::mean(disMat)[0]); - for (size_t ii=0; ii &listHeaders) -{ - listHeaders.push_back("apple"); //ok - listHeaders.push_back("children"); // ok - listHeaders.push_back("device7"); // ok - listHeaders.push_back("Heart"); // ok - listHeaders.push_back("teddy"); // ok -} - - -vector CV_HaussTest::convertContourType(const Mat& currentQuery, int n) -{ - vector > _contoursQuery; - vector contoursQuery; - findContours(currentQuery, _contoursQuery, RETR_LIST, CHAIN_APPROX_NONE); - for (size_t border=0; border<_contoursQuery.size(); border++) - { - for (size_t p=0; p<_contoursQuery[border].size(); p++) - { - contoursQuery.push_back(_contoursQuery[border][p]); - } - } - - // In case actual number of points is less than n - for (int add=(int)contoursQuery.size()-1; add cont; - for (int i=0; i& query1, vector & query2, - vector & query3, vector & testq) -{ - Ptr haus = createHausdorffDistanceExtractor(); - return std::min(haus->computeDistance(query1,testq), std::min(haus->computeDistance(query2,testq), - haus->computeDistance(query3,testq))); -} - -void CV_HaussTest::mpegTest() -{ - string baseTestFolder="shape/mpeg_test/"; - string path = cvtest::TS::ptr()->get_data_path() + baseTestFolder; - vector namesHeaders; - listShapeNames(namesHeaders); - - // distance matrix // - Mat distanceMat=Mat::zeros(NSN*(int)namesHeaders.size(), NSN*(int)namesHeaders.size(), CV_32F); - - // query contours (normal v flipped, h flipped) and testing contour // - vector contoursQuery1, contoursQuery2, contoursQuery3, contoursTesting; - - // reading query and computing its properties // - int counter=0; - const int loops=NSN*(int)namesHeaders.size()*NSN*(int)namesHeaders.size(); - for (size_t n=0; n origContour; - contoursQuery1=convertContourType(currentQuery); - origContour=contoursQuery1; - contoursQuery2=convertContourType(flippedHQuery); - contoursQuery3=convertContourType(flippedVQuery); - - // compare with all the rest of the images: testing // - for (size_t nt=0; nt(NSN*(int)n+i-1, - NSN*(int)nt+it-1)=0; - continue; - } - // read testing image // - stringstream thetestpathandname; - thetestpathandname<(NSN*(int)n+i-1, NSN*(int)nt+it-1)= - computeShapeDistance(contoursQuery1, contoursQuery2, contoursQuery3, contoursTesting); - std::cout<(NSN*(int)n+i-1, NSN*(int)nt+it-1)<get_data_path() + baseTestFolder + "distanceMatrixMPEGTest.yml", FileStorage::WRITE); - fs << "distanceMat" << distanceMat; -} - -const int FIRST_MANY=2*NSN; -void CV_HaussTest::displayMPEGResults() -{ - string baseTestFolder="shape/mpeg_test/"; - Mat distanceMat; - FileStorage fs(cvtest::TS::ptr()->get_data_path() + baseTestFolder + "distanceMatrixMPEGTest.yml", FileStorage::READ); - vector namesHeaders; - listShapeNames(namesHeaders); - - // Read generated MAT // - fs["distanceMat"]>>distanceMat; - - int corrects=0; - int divi=0; - for (int row=0; row(row,col)>distanceMat.at(row,i)) - { - nsmall++; - } - } - if (nsmall<=FIRST_MANY) - { - corrects++; - } - } - } - float porc = 100*float(corrects)/(NSN*distanceMat.rows); - std::cout<<"%="<= CURRENT_MAX_ACCUR) - ts->set_failed_test_info(cvtest::TS::OK); - else - ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); - -} - - -void CV_HaussTest::run(int /* */) -{ - mpegTest(); - displayMPEGResults(); - ts->set_failed_test_info(cvtest::TS::OK); -} - -TEST(Hauss, regression) { CV_HaussTest test; test.safe_run(); } diff --git a/modules/shape/test/test_precomp.cpp b/modules/shape/test/test_precomp.cpp deleted file mode 100644 index 5956e13e3e..0000000000 --- a/modules/shape/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" diff --git a/modules/shape/test/test_precomp.hpp b/modules/shape/test/test_precomp.hpp index 819d711e85..af10e80088 100644 --- a/modules/shape/test/test_precomp.hpp +++ b/modules/shape/test/test_precomp.hpp @@ -16,6 +16,4 @@ #include "opencv2/imgcodecs.hpp" #include "opencv2/shape.hpp" -#include "opencv2/opencv_modules.hpp" - #endif diff --git a/modules/shape/test/test_shape.cpp b/modules/shape/test/test_shape.cpp index 04e89fe6b9..0601594f08 100644 --- a/modules/shape/test/test_shape.cpp +++ b/modules/shape/test/test_shape.cpp @@ -44,222 +44,258 @@ using namespace cv; using namespace std; -const int angularBins=12; -const int radialBins=4; -const float minRad=0.2f; -const float maxRad=2; -const int NSN=5;//10;//20; //number of shapes per class -const int NP=120; //number of points sympliying the contour -const float CURRENT_MAX_ACCUR=95; //99% and 100% reached in several tests, 95 is fixed as minimum boundary - -class CV_ShapeTest : public cvtest::BaseTest +template +class ShapeBaseTest : public cvtest::BaseTest { public: - CV_ShapeTest(); - ~CV_ShapeTest(); -protected: - void run(int); - -private: - void mpegTest(); - void listShapeNames(vector &listHeaders); - vector convertContourType(const Mat &, int n=0 ); - float computeShapeDistance(vector & queryNormal, - vector & queryFlipped1, - vector & queryFlipped2, - vector& testq); - void displayMPEGResults(); -}; - -CV_ShapeTest::CV_ShapeTest() -{ -} -CV_ShapeTest::~CV_ShapeTest() -{ -} - -vector CV_ShapeTest::convertContourType(const Mat& currentQuery, int n) -{ - vector > _contoursQuery; - vector contoursQuery; - findContours(currentQuery, _contoursQuery, RETR_LIST, CHAIN_APPROX_NONE); - for (size_t border=0; border<_contoursQuery.size(); border++) + typedef Point_ PointType; + ShapeBaseTest(int _NSN, int _NP, float _CURRENT_MAX_ACCUR) + : NSN(_NSN), NP(_NP), CURRENT_MAX_ACCUR(_CURRENT_MAX_ACCUR) { - for (size_t p=0; p<_contoursQuery[border].size(); p++) + // generate file list + vector shapeNames; + shapeNames.push_back("apple"); //ok + shapeNames.push_back("children"); // ok + shapeNames.push_back("device7"); // ok + shapeNames.push_back("Heart"); // ok + shapeNames.push_back("teddy"); // ok + for (vector::const_iterator i = shapeNames.begin(); i != shapeNames.end(); ++i) { - contoursQuery.push_back(Point2f((float)_contoursQuery[border][p].x, - (float)_contoursQuery[border][p].y)); + for (int j = 0; j < NSN; ++j) + { + stringstream filename; + filename << cvtest::TS::ptr()->get_data_path() + << "shape/mpeg_test/" << *i << "-" << j + 1 << ".png"; + filenames.push_back(filename.str()); + } } + // distance matrix + const int totalCount = (int)filenames.size(); + distanceMat = Mat::zeros(totalCount, totalCount, CV_32F); } - // In case actual number of points is less than n - for (int add=(int)contoursQuery.size()-1; add cont; - for (int i=0; i convertContourType(const Mat& currentQuery) const { - cont.push_back(contoursQuery[i]); - } - return cont; -} - -void CV_ShapeTest::listShapeNames( vector &listHeaders) -{ - listHeaders.push_back("apple"); //ok - listHeaders.push_back("children"); // ok - listHeaders.push_back("device7"); // ok - listHeaders.push_back("Heart"); // ok - listHeaders.push_back("teddy"); // ok -} - -float CV_ShapeTest::computeShapeDistance(vector & query1, vector & query2, - vector & query3, vector & testq) -{ - //waitKey(0); - Ptr mysc = createShapeContextDistanceExtractor(angularBins, radialBins, minRad, maxRad); - //Ptr cost = createNormHistogramCostExtractor(cv::DIST_L1); - Ptr cost = createChiHistogramCostExtractor(30,0.15f); - //Ptr cost = createEMDHistogramCostExtractor(); - //Ptr cost = createEMDL1HistogramCostExtractor(); - mysc->setIterations(1); - mysc->setCostExtractor( cost ); - //mysc->setTransformAlgorithm(createAffineTransformer(true)); - mysc->setTransformAlgorithm( createThinPlateSplineShapeTransformer() ); - //mysc->setImageAppearanceWeight(1.6); - //mysc->setImageAppearanceWeight(0.0); - //mysc->setImages(im1,imtest); - return ( std::min( mysc->computeDistance(query1, testq), - std::min(mysc->computeDistance(query2, testq), mysc->computeDistance(query3, testq) ))); -} + vector > _contoursQuery; + findContours(currentQuery, _contoursQuery, RETR_LIST, CHAIN_APPROX_NONE); -void CV_ShapeTest::mpegTest() -{ - string baseTestFolder="shape/mpeg_test/"; - string path = cvtest::TS::ptr()->get_data_path() + baseTestFolder; - vector namesHeaders; - listShapeNames(namesHeaders); + vector contoursQuery; + for (size_t border=0; border<_contoursQuery.size(); border++) + { + for (size_t p=0; p<_contoursQuery[border].size(); p++) + { + contoursQuery.push_back(PointType((T)_contoursQuery[border][p].x, + (T)_contoursQuery[border][p].y)); + } + } - // distance matrix // - Mat distanceMat=Mat::zeros(NSN*(int)namesHeaders.size(), NSN*(int)namesHeaders.size(), CV_32F); + // In case actual number of points is less than n + for (int add=(int)contoursQuery.size()-1; add contoursQuery1, contoursQuery2, contoursQuery3, contoursTesting; + // Uniformly sampling + random_shuffle(contoursQuery.begin(), contoursQuery.end()); + int nStart=NP; + vector cont; + for (int i=0; i contoursQuery1, contoursQuery2, contoursQuery3, contoursTesting; + // reading query and computing its properties + for (vector::const_iterator a = filenames.begin(); a != filenames.end(); ++a) { - // read current image // - stringstream thepathandname; - thepathandname< origContour; - contoursQuery1=convertContourType(currentQuery, NP); - origContour=contoursQuery1; - contoursQuery2=convertContourType(flippedHQuery, NP); - contoursQuery3=convertContourType(flippedVQuery, NP); - - // compare with all the rest of the images: testing // - for (size_t nt=0; nt::const_iterator b = filenames.begin(); b != filenames.end(); ++b) { - for (int it=1; it<=NSN; it++) + int bIndex = (int)(b - filenames.begin()); + float distance = 0; + // skip self-comparisson + if (a != b) { - // skip self-comparisson // - counter++; - if (nt==n && it==i) - { - distanceMat.at(NSN*(int)n+i-1, - NSN*(int)nt+it-1)=0; - continue; - } - // read testing image // - stringstream thetestpathandname; - thetestpathandname<(NSN*(int)n+i-1, NSN*(int)nt+it-1)= - computeShapeDistance(contoursQuery1, contoursQuery2, contoursQuery3, contoursTesting); - std::cout<(NSN*(int)n+i-1, NSN*(int)nt+it-1)<(aIndex, bIndex) = distance; } } } - // save distance matrix // - FileStorage fs(cvtest::TS::ptr()->get_data_path() + baseTestFolder + "distanceMatrixMPEGTest.yml", FileStorage::WRITE); - fs << "distanceMat" << distanceMat; -} -const int FIRST_MANY=2*NSN; -void CV_ShapeTest::displayMPEGResults() -{ - string baseTestFolder="shape/mpeg_test/"; - Mat distanceMat; - FileStorage fs(cvtest::TS::ptr()->get_data_path() + baseTestFolder + "distanceMatrixMPEGTest.yml", FileStorage::READ); - vector namesHeaders; - listShapeNames(namesHeaders); - - // Read generated MAT // - fs["distanceMat"]>>distanceMat; - - int corrects=0; - int divi=0; - for (int row=0; row(row,col)>distanceMat.at(row,i)) - { - nsmall++; - } + divi+=NSN; } - if (nsmall<=FIRST_MANY) + for (int col=divi-NSN; col(row,col) > distanceMat.at(row,i)) + { + nsmall++; + } + } + if (nsmall<=FIRST_MANY) + { + corrects++; + } } } + float porc = 100*float(corrects)/(NSN*distanceMat.rows); + std::cout << "Test result: " << porc << "%" << std::endl; + if (porc >= CURRENT_MAX_ACCUR) + ts->set_failed_test_info(cvtest::TS::OK); + else + ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); + } + +protected: + int NSN; + int NP; + float CURRENT_MAX_ACCUR; + vector filenames; + Mat distanceMat; + compute cmp; +}; + +//------------------------------------------------------------------------ +// Test Shape_SCD.regression +//------------------------------------------------------------------------ + +class computeShapeDistance_Chi +{ + Ptr mysc; +public: + computeShapeDistance_Chi() + { + const int angularBins=12; + const int radialBins=4; + const float minRad=0.2f; + const float maxRad=2; + mysc = createShapeContextDistanceExtractor(angularBins, radialBins, minRad, maxRad); + mysc->setIterations(1); + mysc->setCostExtractor(createChiHistogramCostExtractor(30,0.15f)); + mysc->setTransformAlgorithm( createThinPlateSplineShapeTransformer() ); + } + float operator()(vector & query1, vector & query2, + vector & query3, vector & testq) + { + return std::min(mysc->computeDistance(query1, testq), + std::min(mysc->computeDistance(query2, testq), + mysc->computeDistance(query3, testq))); } - float porc = 100*float(corrects)/(NSN*distanceMat.rows); - std::cout<<"%="<= CURRENT_MAX_ACCUR) - ts->set_failed_test_info(cvtest::TS::OK); - else - ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); - //done +}; + +TEST(Shape_SCD, regression) +{ + const int NSN_val=5;//10;//20; //number of shapes per class + const int NP_val=120; //number of points simplifying the contour + const float CURRENT_MAX_ACCUR_val=95; //99% and 100% reached in several tests, 95 is fixed as minimum boundary + ShapeBaseTest test(NSN_val, NP_val, CURRENT_MAX_ACCUR_val); + test.safe_run(); } -void CV_ShapeTest::run( int /*start_from*/ ) +//------------------------------------------------------------------------ +// Test ShapeEMD_SCD.regression +//------------------------------------------------------------------------ + +class computeShapeDistance_EMD { - mpegTest(); - displayMPEGResults(); - ts->set_failed_test_info(cvtest::TS::OK); + Ptr mysc; +public: + computeShapeDistance_EMD() + { + const int angularBins=12; + const int radialBins=4; + const float minRad=0.2f; + const float maxRad=2; + mysc = createShapeContextDistanceExtractor(angularBins, radialBins, minRad, maxRad); + mysc->setIterations(1); + mysc->setCostExtractor( createEMDL1HistogramCostExtractor() ); + mysc->setTransformAlgorithm( createThinPlateSplineShapeTransformer() ); + } + float operator()(vector & query1, vector & query2, + vector & query3, vector & testq) + { + return std::min(mysc->computeDistance(query1, testq), + std::min(mysc->computeDistance(query2, testq), + mysc->computeDistance(query3, testq))); + } +}; + +TEST(ShapeEMD_SCD, regression) +{ + const int NSN_val=5;//10;//20; //number of shapes per class + const int NP_val=100; //number of points simplifying the contour + const float CURRENT_MAX_ACCUR_val=95; //98% and 99% reached in several tests, 95 is fixed as minimum boundary + ShapeBaseTest test(NSN_val, NP_val, CURRENT_MAX_ACCUR_val); + test.safe_run(); } -TEST(Shape_SCD, regression) { CV_ShapeTest test; test.safe_run(); } +//------------------------------------------------------------------------ +// Test Hauss.regression +//------------------------------------------------------------------------ + +class computeShapeDistance_Haussdorf +{ + Ptr haus; +public: + computeShapeDistance_Haussdorf() + { + haus = createHausdorffDistanceExtractor(); + } + float operator()(vector &query1, vector &query2, + vector &query3, vector &testq) + { + return std::min(haus->computeDistance(query1,testq), + std::min(haus->computeDistance(query2,testq), + haus->computeDistance(query3,testq))); + } +}; + +TEST(Hauss, regression) +{ + const int NSN_val=5;//10;//20; //number of shapes per class + const int NP_val = 180; //number of points simplifying the contour + const float CURRENT_MAX_ACCUR_val=85; //90% and 91% reached in several tests, 85 is fixed as minimum boundary + ShapeBaseTest test(NSN_val, NP_val, CURRENT_MAX_ACCUR_val); + test.safe_run(); +} diff --git a/modules/stitching/CMakeLists.txt b/modules/stitching/CMakeLists.txt index 36d4452c7f..8650f7280e 100644 --- a/modules/stitching/CMakeLists.txt +++ b/modules/stitching/CMakeLists.txt @@ -5,4 +5,4 @@ if(HAVE_CUDA) endif() ocv_define_module(stitching opencv_imgproc opencv_features2d opencv_calib3d opencv_objdetect - OPTIONAL opencv_cuda opencv_cudaarithm opencv_cudafilters opencv_cudafeatures2d opencv_xfeatures2d) + OPTIONAL opencv_cudaarithm opencv_cudafilters opencv_cudafeatures2d opencv_cudalegacy opencv_xfeatures2d) diff --git a/modules/stitching/include/opencv2/stitching/detail/seam_finders.hpp b/modules/stitching/include/opencv2/stitching/detail/seam_finders.hpp index e4f7816bb4..37029215e3 100644 --- a/modules/stitching/include/opencv2/stitching/detail/seam_finders.hpp +++ b/modules/stitching/include/opencv2/stitching/detail/seam_finders.hpp @@ -249,7 +249,7 @@ private: }; -#ifdef HAVE_OPENCV_CUDA +#ifdef HAVE_OPENCV_CUDALEGACY class CV_EXPORTS GraphCutSeamFinderGpu : public GraphCutSeamFinderBase, public PairwiseSeamFinder { public: diff --git a/modules/stitching/src/precomp.hpp b/modules/stitching/src/precomp.hpp index c70d9f9a45..70636b6848 100644 --- a/modules/stitching/src/precomp.hpp +++ b/modules/stitching/src/precomp.hpp @@ -83,8 +83,8 @@ # include "opencv2/cudafeatures2d.hpp" #endif -#ifdef HAVE_OPENCV_CUDA -# include "opencv2/cuda.hpp" +#ifdef HAVE_OPENCV_CUDALEGACY +# include "opencv2/cudalegacy.hpp" #endif #ifdef HAVE_OPENCV_XFEATURES2D diff --git a/modules/stitching/src/seam_finders.cpp b/modules/stitching/src/seam_finders.cpp index 4d5c8d1634..8a673ede0c 100644 --- a/modules/stitching/src/seam_finders.cpp +++ b/modules/stitching/src/seam_finders.cpp @@ -1321,7 +1321,7 @@ void GraphCutSeamFinder::find(const std::vector &src, const std::vector &src, const std::vector &corners, std::vector &masks) { diff --git a/modules/stitching/src/stitcher.cpp b/modules/stitching/src/stitcher.cpp index 43efbd3880..c515c192fb 100644 --- a/modules/stitching/src/stitcher.cpp +++ b/modules/stitching/src/stitcher.cpp @@ -56,7 +56,7 @@ Stitcher Stitcher::createDefault(bool try_use_gpu) stitcher.setFeaturesMatcher(makePtr(try_use_gpu)); stitcher.setBundleAdjuster(makePtr()); -#ifdef HAVE_OPENCV_CUDA +#ifdef HAVE_CUDA if (try_use_gpu && cuda::getCudaEnabledDeviceCount() > 0) { #ifdef HAVE_OPENCV_XFEATURES2D @@ -544,7 +544,7 @@ Ptr createStitcher(bool try_use_gpu) stitcher->setFeaturesMatcher(makePtr(try_use_gpu)); stitcher->setBundleAdjuster(makePtr()); - #ifdef HAVE_OPENCV_CUDA + #ifdef HAVE_CUDA if (try_use_gpu && cuda::getCudaEnabledDeviceCount() > 0) { #ifdef HAVE_OPENCV_NONFREE diff --git a/modules/superres/src/optical_flow.cpp b/modules/superres/src/optical_flow.cpp index fcc9bef347..52fc2648e2 100644 --- a/modules/superres/src/optical_flow.cpp +++ b/modules/superres/src/optical_flow.cpp @@ -341,7 +341,7 @@ namespace int iterations_; bool useInitialFlow_; - Ptr alg_; + Ptr alg_; }; CV_INIT_ALGORITHM(DualTVL1, "DenseOpticalFlowExt.DualTVL1", @@ -514,7 +514,7 @@ namespace int outerIterations_; int solverIterations_; - BroxOpticalFlow alg_; + Ptr alg_; }; CV_INIT_ALGORITHM(Brox_CUDA, "DenseOpticalFlowExt.Brox_CUDA", @@ -525,31 +525,40 @@ namespace obj.info()->addParam(obj, "outerIterations", obj.outerIterations_, false, 0, 0, "Number of warping iterations (number of pyramid levels)"); obj.info()->addParam(obj, "solverIterations", obj.solverIterations_, false, 0, 0, "Number of linear system solver iterations")) - Brox_CUDA::Brox_CUDA() : GpuOpticalFlow(CV_32FC1), alg_(0.197f, 50.0f, 0.8f, 10, 77, 10) + Brox_CUDA::Brox_CUDA() : GpuOpticalFlow(CV_32FC1) { - alpha_ = alg_.alpha; - gamma_ = alg_.gamma; - scaleFactor_ = alg_.scale_factor; - innerIterations_ = alg_.inner_iterations; - outerIterations_ = alg_.outer_iterations; - solverIterations_ = alg_.solver_iterations; + alg_ = cuda::BroxOpticalFlow::create(0.197f, 50.0f, 0.8f, 10, 77, 10); + + alpha_ = alg_->getFlowSmoothness(); + gamma_ = alg_->getGradientConstancyImportance(); + scaleFactor_ = alg_->getPyramidScaleFactor(); + innerIterations_ = alg_->getInnerIterations(); + outerIterations_ = alg_->getOuterIterations(); + solverIterations_ = alg_->getSolverIterations(); } void Brox_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2) { - alg_.alpha = static_cast(alpha_); - alg_.gamma = static_cast(gamma_); - alg_.scale_factor = static_cast(scaleFactor_); - alg_.inner_iterations = innerIterations_; - alg_.outer_iterations = outerIterations_; - alg_.solver_iterations = solverIterations_; + alg_->setFlowSmoothness(alpha_); + alg_->setGradientConstancyImportance(gamma_); + alg_->setPyramidScaleFactor(scaleFactor_); + alg_->setInnerIterations(innerIterations_); + alg_->setOuterIterations(outerIterations_); + alg_->setSolverIterations(solverIterations_); + + GpuMat flow; + alg_->calc(input0, input1, flow); + + GpuMat flows[2]; + cuda::split(flow, flows); - alg_(input0, input1, dst1, dst2); + dst1 = flows[0]; + dst2 = flows[1]; } void Brox_CUDA::collectGarbage() { - alg_.buf.release(); + alg_ = cuda::BroxOpticalFlow::create(alpha_, gamma_, scaleFactor_, innerIterations_, outerIterations_, solverIterations_); GpuOpticalFlow::collectGarbage(); } } @@ -581,7 +590,7 @@ namespace int maxLevel_; int iterations_; - PyrLKOpticalFlow alg_; + Ptr alg_; }; CV_INIT_ALGORITHM(PyrLK_CUDA, "DenseOpticalFlowExt.PyrLK_CUDA", @@ -591,24 +600,32 @@ namespace PyrLK_CUDA::PyrLK_CUDA() : GpuOpticalFlow(CV_8UC1) { - winSize_ = alg_.winSize.width; - maxLevel_ = alg_.maxLevel; - iterations_ = alg_.iters; + alg_ = cuda::DensePyrLKOpticalFlow::create(); + + winSize_ = alg_->getWinSize().width; + maxLevel_ = alg_->getMaxLevel(); + iterations_ = alg_->getNumIters(); } void PyrLK_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2) { - alg_.winSize.width = winSize_; - alg_.winSize.height = winSize_; - alg_.maxLevel = maxLevel_; - alg_.iters = iterations_; + alg_->setWinSize(Size(winSize_, winSize_)); + alg_->setMaxLevel(maxLevel_); + alg_->setNumIters(iterations_); + + GpuMat flow; + alg_->calc(input0, input1, flow); + + GpuMat flows[2]; + cuda::split(flow, flows); - alg_.dense(input0, input1, dst1, dst2); + dst1 = flows[0]; + dst2 = flows[1]; } void PyrLK_CUDA::collectGarbage() { - alg_.releaseMemory(); + alg_ = cuda::DensePyrLKOpticalFlow::create(); GpuOpticalFlow::collectGarbage(); } } @@ -644,7 +661,7 @@ namespace double polySigma_; int flags_; - FarnebackOpticalFlow alg_; + Ptr alg_; }; CV_INIT_ALGORITHM(Farneback_CUDA, "DenseOpticalFlowExt.Farneback_CUDA", @@ -658,31 +675,40 @@ namespace Farneback_CUDA::Farneback_CUDA() : GpuOpticalFlow(CV_8UC1) { - pyrScale_ = alg_.pyrScale; - numLevels_ = alg_.numLevels; - winSize_ = alg_.winSize; - numIters_ = alg_.numIters; - polyN_ = alg_.polyN; - polySigma_ = alg_.polySigma; - flags_ = alg_.flags; + alg_ = cuda::FarnebackOpticalFlow::create(); + + pyrScale_ = alg_->getPyrScale(); + numLevels_ = alg_->getNumLevels(); + winSize_ = alg_->getWinSize(); + numIters_ = alg_->getNumIters(); + polyN_ = alg_->getPolyN(); + polySigma_ = alg_->getPolySigma(); + flags_ = alg_->getFlags(); } void Farneback_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2) { - alg_.pyrScale = pyrScale_; - alg_.numLevels = numLevels_; - alg_.winSize = winSize_; - alg_.numIters = numIters_; - alg_.polyN = polyN_; - alg_.polySigma = polySigma_; - alg_.flags = flags_; + alg_->setPyrScale(pyrScale_); + alg_->setNumLevels(numLevels_); + alg_->setWinSize(winSize_); + alg_->setNumIters(numIters_); + alg_->setPolyN(polyN_); + alg_->setPolySigma(polySigma_); + alg_->setFlags(flags_); + + GpuMat flow; + alg_->calc(input0, input1, flow); + + GpuMat flows[2]; + cuda::split(flow, flows); - alg_(input0, input1, dst1, dst2); + dst1 = flows[0]; + dst2 = flows[1]; } void Farneback_CUDA::collectGarbage() { - alg_.releaseMemory(); + alg_ = cuda::FarnebackOpticalFlow::create(); GpuOpticalFlow::collectGarbage(); } } @@ -719,7 +745,7 @@ namespace int iterations_; bool useInitialFlow_; - OpticalFlowDual_TVL1_CUDA alg_; + Ptr alg_; }; CV_INIT_ALGORITHM(DualTVL1_CUDA, "DenseOpticalFlowExt.DualTVL1_CUDA", @@ -734,33 +760,42 @@ namespace DualTVL1_CUDA::DualTVL1_CUDA() : GpuOpticalFlow(CV_8UC1) { - tau_ = alg_.tau; - lambda_ = alg_.lambda; - theta_ = alg_.theta; - nscales_ = alg_.nscales; - warps_ = alg_.warps; - epsilon_ = alg_.epsilon; - iterations_ = alg_.iterations; - useInitialFlow_ = alg_.useInitialFlow; + alg_ = cuda::OpticalFlowDual_TVL1::create(); + + tau_ = alg_->getTau(); + lambda_ = alg_->getLambda(); + theta_ = alg_->getTheta(); + nscales_ = alg_->getNumScales(); + warps_ = alg_->getNumWarps(); + epsilon_ = alg_->getEpsilon(); + iterations_ = alg_->getNumIterations(); + useInitialFlow_ = alg_->getUseInitialFlow(); } void DualTVL1_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2) { - alg_.tau = tau_; - alg_.lambda = lambda_; - alg_.theta = theta_; - alg_.nscales = nscales_; - alg_.warps = warps_; - alg_.epsilon = epsilon_; - alg_.iterations = iterations_; - alg_.useInitialFlow = useInitialFlow_; + alg_->setTau(tau_); + alg_->setLambda(lambda_); + alg_->setTheta(theta_); + alg_->setNumScales(nscales_); + alg_->setNumWarps(warps_); + alg_->setEpsilon(epsilon_); + alg_->setNumIterations(iterations_); + alg_->setUseInitialFlow(useInitialFlow_); + + GpuMat flow; + alg_->calc(input0, input1, flow); + + GpuMat flows[2]; + cuda::split(flow, flows); - alg_(input0, input1, dst1, dst2); + dst1 = flows[0]; + dst2 = flows[1]; } void DualTVL1_CUDA::collectGarbage() { - alg_.collectGarbage(); + alg_ = cuda::OpticalFlowDual_TVL1::create(); GpuOpticalFlow::collectGarbage(); } } diff --git a/modules/ts/src/ts_func.cpp b/modules/ts/src/ts_func.cpp index 03877c0910..b6a832b6bb 100644 --- a/modules/ts/src/ts_func.cpp +++ b/modules/ts/src/ts_func.cpp @@ -2998,6 +2998,12 @@ void printVersionInfo(bool useStdOut) std::string cpu_features; +#if CV_POPCNT + if (checkHardwareSupport(CV_CPU_POPCNT)) cpu_features += " popcnt"; +#endif +#if CV_MMX + if (checkHardwareSupport(CV_CPU_MMX)) cpu_features += " mmx"; +#endif #if CV_SSE if (checkHardwareSupport(CV_CPU_SSE)) cpu_features += " sse"; #endif @@ -3019,6 +3025,39 @@ void printVersionInfo(bool useStdOut) #if CV_AVX if (checkHardwareSupport(CV_CPU_AVX)) cpu_features += " avx"; #endif +#if CV_AVX2 + if (checkHardwareSupport(CV_CPU_AVX2)) cpu_features += " avx2"; +#endif +#if CV_FMA3 + if (checkHardwareSupport(CV_CPU_FMA3)) cpu_features += " fma3"; +#endif +#if CV_AVX_512F + if (checkHardwareSupport(CV_CPU_AVX_512F) cpu_features += " avx-512f"; +#endif +#if CV_AVX_512BW + if (checkHardwareSupport(CV_CPU_AVX_512BW) cpu_features += " avx-512bw"; +#endif +#if CV_AVX_512CD + if (checkHardwareSupport(CV_CPU_AVX_512CD) cpu_features += " avx-512cd"; +#endif +#if CV_AVX_512DQ + if (checkHardwareSupport(CV_CPU_AVX_512DQ) cpu_features += " avx-512dq"; +#endif +#if CV_AVX_512ER + if (checkHardwareSupport(CV_CPU_AVX_512ER) cpu_features += " avx-512er"; +#endif +#if CV_AVX_512IFMA512 + if (checkHardwareSupport(CV_CPU_AVX_512IFMA512) cpu_features += " avx-512ifma512"; +#endif +#if CV_AVX_512PF + if (checkHardwareSupport(CV_CPU_AVX_512PF) cpu_features += " avx-512pf"; +#endif +#if CV_AVX_512VBMI + if (checkHardwareSupport(CV_CPU_AVX_512VBMI) cpu_features += " avx-512vbmi"; +#endif +#if CV_AVX_512VL + if (checkHardwareSupport(CV_CPU_AVX_512VL) cpu_features += " avx-512vl"; +#endif #if CV_NEON if (checkHardwareSupport(CV_CPU_NEON)) cpu_features += " neon"; #endif diff --git a/modules/videoio/src/cap_openni2.cpp b/modules/videoio/src/cap_openni2.cpp index cfe51eba47..ca897ae42d 100644 --- a/modules/videoio/src/cap_openni2.cpp +++ b/modules/videoio/src/cap_openni2.cpp @@ -573,7 +573,7 @@ bool CvCapture_OpenNI2::setDepthGeneratorProperty( int propIdx, double propValue // then the property isn't avaliable if ( color.isValid() ) { - openni::ImageRegistrationMode mode = propValue < 1.0 ? openni::IMAGE_REGISTRATION_OFF : openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR; + openni::ImageRegistrationMode mode = propValue < 1.0 ? openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR : openni::IMAGE_REGISTRATION_OFF; if( !device.getImageRegistrationMode() == mode ) { if (device.isImageRegistrationModeSupported(mode)) diff --git a/modules/videostab/CMakeLists.txt b/modules/videostab/CMakeLists.txt index e252bdbf53..f57a5d2151 100644 --- a/modules/videostab/CMakeLists.txt +++ b/modules/videostab/CMakeLists.txt @@ -1,3 +1,8 @@ set(the_description "Video stabilization") + +if(HAVE_CUDA) + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wmissing-declarations -Wshadow -Wunused-parameter) +endif() + ocv_define_module(videostab opencv_imgproc opencv_features2d opencv_video opencv_photo opencv_calib3d - OPTIONAL opencv_cuda opencv_cudawarping opencv_cudaoptflow opencv_videoio) + OPTIONAL opencv_cudawarping opencv_cudaoptflow opencv_videoio) diff --git a/modules/videostab/include/opencv2/videostab/global_motion.hpp b/modules/videostab/include/opencv2/videostab/global_motion.hpp index 547f1b2821..5d51e4234a 100644 --- a/modules/videostab/include/opencv2/videostab/global_motion.hpp +++ b/modules/videostab/include/opencv2/videostab/global_motion.hpp @@ -249,7 +249,7 @@ private: std::vector pointsPrevGood_, pointsGood_; }; -#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDA) && defined(HAVE_OPENCV_CUDAOPTFLOW) +#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW) class CV_EXPORTS KeypointBasedMotionEstimatorGpu : public ImageMotionEstimatorBase { @@ -280,7 +280,7 @@ private: std::vector rejectionStatus_; }; -#endif // defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDA) && defined(HAVE_OPENCV_CUDAOPTFLOW) +#endif // defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW) /** @brief Computes motion between two frames assuming that all the intermediate motions are known. diff --git a/modules/videostab/include/opencv2/videostab/optical_flow.hpp b/modules/videostab/include/opencv2/videostab/optical_flow.hpp index a34a82e3f8..41d1953549 100644 --- a/modules/videostab/include/opencv2/videostab/optical_flow.hpp +++ b/modules/videostab/include/opencv2/videostab/optical_flow.hpp @@ -121,7 +121,7 @@ public: cuda::GpuMat &status); private: - cuda::PyrLKOpticalFlow optFlowEstimator_; + Ptr optFlowEstimator_; cuda::GpuMat frame0_, frame1_, points0_, points1_, status_, errors_; }; @@ -136,7 +136,7 @@ public: OutputArray errors); private: - cuda::PyrLKOpticalFlow optFlowEstimator_; + Ptr optFlowEstimator_; cuda::GpuMat frame0_, frame1_, flowX_, flowY_, errors_; }; diff --git a/modules/videostab/include/opencv2/videostab/wobble_suppression.hpp b/modules/videostab/include/opencv2/videostab/wobble_suppression.hpp index 6701d78101..3f0a9432b9 100644 --- a/modules/videostab/include/opencv2/videostab/wobble_suppression.hpp +++ b/modules/videostab/include/opencv2/videostab/wobble_suppression.hpp @@ -119,7 +119,7 @@ private: Mat_ mapx_, mapy_; }; -#if defined(HAVE_OPENCV_CUDA) && defined(HAVE_OPENCV_CUDAWARPING) +#if defined(HAVE_OPENCV_CUDAWARPING) class CV_EXPORTS MoreAccurateMotionWobbleSuppressorGpu : public MoreAccurateMotionWobbleSuppressorBase { public: diff --git a/modules/cuda/src/cuda/global_motion.cu b/modules/videostab/src/cuda/global_motion.cu similarity index 100% rename from modules/cuda/src/cuda/global_motion.cu rename to modules/videostab/src/cuda/global_motion.cu diff --git a/modules/videostab/src/global_motion.cpp b/modules/videostab/src/global_motion.cpp index 4875bef23d..d840e895d2 100644 --- a/modules/videostab/src/global_motion.cpp +++ b/modules/videostab/src/global_motion.cpp @@ -47,8 +47,33 @@ #include "opencv2/opencv_modules.hpp" #include "clp.hpp" -#ifdef HAVE_OPENCV_CUDA -# include "opencv2/cuda.hpp" +#include "opencv2/core/private.cuda.hpp" + +#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW) + #if !defined HAVE_CUDA || defined(CUDA_DISABLER) + namespace cv { namespace cuda { + static void compactPoints(GpuMat&, GpuMat&, const GpuMat&) { throw_no_cuda(); } + }} + #else + namespace cv { namespace cuda { namespace device { namespace globmotion { + int compactPoints(int N, float *points0, float *points1, const uchar *mask); + }}}} + namespace cv { namespace cuda { + static void compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask) + { + CV_Assert(points0.rows == 1 && points1.rows == 1 && mask.rows == 1); + CV_Assert(points0.type() == CV_32FC2 && points1.type() == CV_32FC2 && mask.type() == CV_8U); + CV_Assert(points0.cols == mask.cols && points1.cols == mask.cols); + + int npoints = points0.cols; + int remaining = cv::cuda::device::globmotion::compactPoints( + npoints, (float*)points0.data, (float*)points1.data, mask.data); + + points0 = points0.colRange(0, remaining); + points1 = points1.colRange(0, remaining); + } + }} + #endif #endif namespace cv @@ -736,8 +761,7 @@ Mat KeypointBasedMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, return motionEstimator_->estimate(pointsPrevGood_, pointsGood_, ok); } - -#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDA) && defined(HAVE_OPENCV_CUDAOPTFLOW) +#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW) KeypointBasedMotionEstimatorGpu::KeypointBasedMotionEstimatorGpu(Ptr estimator) : ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator) @@ -812,7 +836,7 @@ Mat KeypointBasedMotionEstimatorGpu::estimate(const cuda::GpuMat &frame0, const return motionEstimator_->estimate(hostPointsPrev_, hostPoints_, ok); } -#endif // defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDA) && defined(HAVE_OPENCV_CUDAOPTFLOW) +#endif // defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW) Mat getMotion(int from, int to, const std::vector &motions) diff --git a/modules/videostab/src/optical_flow.cpp b/modules/videostab/src/optical_flow.cpp index d8a059c1fd..32c8133a7d 100644 --- a/modules/videostab/src/optical_flow.cpp +++ b/modules/videostab/src/optical_flow.cpp @@ -45,6 +45,10 @@ #include "opencv2/videostab/optical_flow.hpp" #include "opencv2/videostab/ring_buffer.hpp" +#ifdef HAVE_OPENCV_CUDAARITHM + #include "opencv2/cudaarithm.hpp" +#endif + namespace cv { namespace videostab @@ -63,6 +67,7 @@ void SparsePyrLkOptFlowEstimator::run( SparsePyrLkOptFlowEstimatorGpu::SparsePyrLkOptFlowEstimatorGpu() { CV_Assert(cuda::getCudaEnabledDeviceCount() > 0); + optFlowEstimator_ = cuda::SparsePyrLKOpticalFlow::create(); } @@ -91,9 +96,9 @@ void SparsePyrLkOptFlowEstimatorGpu::run( const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, const cuda::GpuMat &points0, cuda::GpuMat &points1, cuda::GpuMat &status, cuda::GpuMat &errors) { - optFlowEstimator_.winSize = winSize_; - optFlowEstimator_.maxLevel = maxLevel_; - optFlowEstimator_.sparse(frame0, frame1, points0, points1, status, &errors); + optFlowEstimator_->setWinSize(winSize_); + optFlowEstimator_->setMaxLevel(maxLevel_); + optFlowEstimator_->calc(frame0, frame1, points0, points1, status, errors); } @@ -101,15 +106,16 @@ void SparsePyrLkOptFlowEstimatorGpu::run( const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, const cuda::GpuMat &points0, cuda::GpuMat &points1, cuda::GpuMat &status) { - optFlowEstimator_.winSize = winSize_; - optFlowEstimator_.maxLevel = maxLevel_; - optFlowEstimator_.sparse(frame0, frame1, points0, points1, status); + optFlowEstimator_->setWinSize(winSize_); + optFlowEstimator_->setMaxLevel(maxLevel_); + optFlowEstimator_->calc(frame0, frame1, points0, points1, status); } DensePyrLkOptFlowEstimatorGpu::DensePyrLkOptFlowEstimatorGpu() { CV_Assert(cuda::getCudaEnabledDeviceCount() > 0); + optFlowEstimator_ = cuda::DensePyrLKOpticalFlow::create(); } @@ -120,16 +126,24 @@ void DensePyrLkOptFlowEstimatorGpu::run( frame0_.upload(frame0.getMat()); frame1_.upload(frame1.getMat()); - optFlowEstimator_.winSize = winSize_; - optFlowEstimator_.maxLevel = maxLevel_; + optFlowEstimator_->setWinSize(winSize_); + optFlowEstimator_->setMaxLevel(maxLevel_); if (errors.needed()) { - optFlowEstimator_.dense(frame0_, frame1_, flowX_, flowY_, &errors_); - errors_.download(errors.getMatRef()); + CV_Error(Error::StsNotImplemented, "DensePyrLkOptFlowEstimatorGpu doesn't support errors calculation"); } else - optFlowEstimator_.dense(frame0_, frame1_, flowX_, flowY_); + { + cuda::GpuMat flow; + optFlowEstimator_->calc(frame0_, frame1_, flow); + + cuda::GpuMat flows[2]; + cuda::split(flow, flows); + + flowX_ = flows[0]; + flowY_ = flows[1]; + } flowX_.download(flowX.getMatRef()); flowY_.download(flowY.getMatRef()); diff --git a/modules/videostab/src/wobble_suppression.cpp b/modules/videostab/src/wobble_suppression.cpp index e2635d5e08..86067fbcc9 100644 --- a/modules/videostab/src/wobble_suppression.cpp +++ b/modules/videostab/src/wobble_suppression.cpp @@ -44,15 +44,42 @@ #include "opencv2/videostab/wobble_suppression.hpp" #include "opencv2/videostab/ring_buffer.hpp" +#include "opencv2/core/private.cuda.hpp" + #ifdef HAVE_OPENCV_CUDAWARPING # include "opencv2/cudawarping.hpp" #endif -#ifdef HAVE_OPENCV_CUDA -# include "opencv2/cuda.hpp" +#if defined(HAVE_OPENCV_CUDAWARPING) + #if !defined HAVE_CUDA || defined(CUDA_DISABLER) + namespace cv { namespace cuda { + static void calcWobbleSuppressionMaps(int, int, int, Size, const Mat&, const Mat&, GpuMat&, GpuMat&) { throw_no_cuda(); } + }} + #else + namespace cv { namespace cuda { namespace device { namespace globmotion { + void calcWobbleSuppressionMaps( + int left, int idx, int right, int width, int height, + const float *ml, const float *mr, PtrStepSzf mapx, PtrStepSzf mapy); + }}}} + namespace cv { namespace cuda { + static void calcWobbleSuppressionMaps( + int left, int idx, int right, Size size, const Mat &ml, const Mat &mr, + GpuMat &mapx, GpuMat &mapy) + { + CV_Assert(ml.size() == Size(3, 3) && ml.type() == CV_32F && ml.isContinuous()); + CV_Assert(mr.size() == Size(3, 3) && mr.type() == CV_32F && mr.isContinuous()); + + mapx.create(size, CV_32F); + mapy.create(size, CV_32F); + + cv::cuda::device::globmotion::calcWobbleSuppressionMaps( + left, idx, right, size.width, size.height, + ml.ptr(), mr.ptr(), mapx, mapy); + } + }} + #endif #endif - namespace cv { namespace videostab @@ -121,8 +148,7 @@ void MoreAccurateMotionWobbleSuppressor::suppress(int idx, const Mat &frame, Mat remap(frame, result, mapx_, mapy_, INTER_LINEAR, BORDER_REPLICATE); } - -#if defined(HAVE_OPENCV_CUDA) && defined(HAVE_OPENCV_CUDAWARPING) +#if defined(HAVE_OPENCV_CUDAWARPING) void MoreAccurateMotionWobbleSuppressorGpu::suppress(int idx, const cuda::GpuMat &frame, cuda::GpuMat &result) { CV_Assert(motions_ && stabilizationMotions_); diff --git a/samples/cpp/lsd_lines.cpp b/samples/cpp/lsd_lines.cpp index 69497b3871..34f6b906b4 100644 --- a/samples/cpp/lsd_lines.cpp +++ b/samples/cpp/lsd_lines.cpp @@ -37,7 +37,7 @@ int main(int argc, char** argv) #endif double start = double(getTickCount()); - vector lines_std; + vector lines_std; // Detect the lines ls->detect(image, lines_std); diff --git a/samples/cpp/stitching_detailed.cpp b/samples/cpp/stitching_detailed.cpp index 0d9b4a683c..2d6351ae11 100644 --- a/samples/cpp/stitching_detailed.cpp +++ b/samples/cpp/stitching_detailed.cpp @@ -673,7 +673,7 @@ int main(int argc, char* argv[]) seam_finder = makePtr(); else if (seam_find_type == "gc_color") { -#ifdef HAVE_OPENCV_CUDA +#ifdef HAVE_OPENCV_CUDALEGACY if (try_cuda && cuda::getCudaEnabledDeviceCount() > 0) seam_finder = makePtr(GraphCutSeamFinderBase::COST_COLOR); else @@ -682,7 +682,7 @@ int main(int argc, char* argv[]) } else if (seam_find_type == "gc_colorgrad") { -#ifdef HAVE_OPENCV_CUDA +#ifdef HAVE_OPENCV_CUDALEGACY if (try_cuda && cuda::getCudaEnabledDeviceCount() > 0) seam_finder = makePtr(GraphCutSeamFinderBase::COST_COLOR_GRAD); else diff --git a/samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp b/samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp new file mode 100644 index 0000000000..424a4901ec --- /dev/null +++ b/samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp @@ -0,0 +1,168 @@ +/** + * @function Watershed_and_Distance_Transform.cpp + * @brief Sample code showing how to segment overlapping objects using Laplacian filtering, in addition to Watershed and Distance Transformation + * @author OpenCV Team + */ + +#include +#include + +using namespace std; +using namespace cv; + +int main(int, char** argv) +{ +//! [load_image] + // Load the image + Mat src = imread(argv[1]); + + // Check if everything was fine + if (!src.data) + return -1; + + // Show source image + imshow("Source Image", src); +//! [load_image] + +//! [black_bg] + // Change the background from white to black, since that will help later to extract + // better results during the use of Distance Transform + for( int x = 0; x < src.rows; x++ ) { + for( int y = 0; y < src.cols; y++ ) { + if ( src.at(x, y) == Vec3b(255,255,255) ) { + src.at(x, y)[0] = 0; + src.at(x, y)[1] = 0; + src.at(x, y)[2] = 0; + } + } + } + + // Show output image + imshow("Black Background Image", src); +//! [black_bg] + +//! [sharp] + // Create a kernel that we will use for accuting/sharpening our image + Mat kernel = (Mat_(3,3) << + 1, 1, 1, + 1, -8, 1, + 1, 1, 1); // an approximation of second derivative, a quite strong kernel + + // do the laplacian filtering as it is + // well, we need to convert everything in something more deeper then CV_8U + // because the kernel has some negative values, + // and we can expect in general to have a Laplacian image with negative values + // BUT a 8bits unsigned int (the one we are working with) can contain values from 0 to 255 + // so the possible negative number will be truncated + Mat imgLaplacian; + Mat sharp = src; // copy source image to another temporary one + filter2D(sharp, imgLaplacian, CV_32F, kernel); + src.convertTo(sharp, CV_32F); + Mat imgResult = sharp - imgLaplacian; + + // convert back to 8bits gray scale + imgResult.convertTo(imgResult, CV_8UC3); + imgLaplacian.convertTo(imgLaplacian, CV_8UC3); + + // imshow( "Laplace Filtered Image", imgLaplacian ); + imshow( "New Sharped Image", imgResult ); +//! [sharp] + + src = imgResult; // copy back + +//! [bin] + // Create binary image from source image + Mat bw; + cvtColor(src, bw, CV_BGR2GRAY); + threshold(bw, bw, 40, 255, CV_THRESH_BINARY | CV_THRESH_OTSU); + imshow("Binary Image", bw); +//! [bin] + +//! [dist] + // Perform the distance transform algorithm + Mat dist; + distanceTransform(bw, dist, CV_DIST_L2, 3); + + // Normalize the distance image for range = {0.0, 1.0} + // so we can visualize and threshold it + normalize(dist, dist, 0, 1., NORM_MINMAX); + imshow("Distance Transform Image", dist); +//! [dist] + +//! [peaks] + // Threshold to obtain the peaks + // This will be the markers for the foreground objects + threshold(dist, dist, .4, 1., CV_THRESH_BINARY); + + // Dilate a bit the dist image + Mat kernel1 = Mat::ones(3, 3, CV_8UC1); + dilate(dist, dist, kernel1); + imshow("Peaks", dist); +//! [peaks] + +//! [seeds] + // Create the CV_8U version of the distance image + // It is needed for findContours() + Mat dist_8u; + dist.convertTo(dist_8u, CV_8U); + + // Find total markers + vector > contours; + findContours(dist_8u, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); + + // Create the marker image for the watershed algorithm + Mat markers = Mat::zeros(dist.size(), CV_32SC1); + + // Draw the foreground markers + for (size_t i = 0; i < contours.size(); i++) + drawContours(markers, contours, static_cast(i), Scalar::all(static_cast(i)+1), -1); + + // Draw the background marker + circle(markers, Point(5,5), 3, CV_RGB(255,255,255), -1); + imshow("Markers", markers*10000); +//! [seeds] + +//! [watershed] + // Perform the watershed algorithm + watershed(src, markers); + + Mat mark = Mat::zeros(markers.size(), CV_8UC1); + markers.convertTo(mark, CV_8UC1); + bitwise_not(mark, mark); +// imshow("Markers_v2", mark); // uncomment this if you want to see how the mark + // image looks like at that point + + // Generate random colors + vector colors; + for (size_t i = 0; i < contours.size(); i++) + { + int b = theRNG().uniform(0, 255); + int g = theRNG().uniform(0, 255); + int r = theRNG().uniform(0, 255); + + colors.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r)); + } + + // Create the result image + Mat dst = Mat::zeros(markers.size(), CV_8UC3); + + // Fill labeled objects with random colors + for (int i = 0; i < markers.rows; i++) + { + for (int j = 0; j < markers.cols; j++) + { + int index = markers.at(i,j); + if (index > 0 && index <= static_cast(contours.size())) + dst.at(i,j) = colors[index-1]; + else + dst.at(i,j) = Vec3b(0,0,0); + } + } + + // Visualize the final image + imshow("Final Result", dst); +//! [watershed] + + waitKey(0); + return 0; +} \ No newline at end of file diff --git a/samples/cpp/videostab.cpp b/samples/cpp/videostab.cpp index 703acba41e..2eea9b9026 100644 --- a/samples/cpp/videostab.cpp +++ b/samples/cpp/videostab.cpp @@ -217,7 +217,7 @@ public: outlierRejector = tblor; } -#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDA) && defined(HAVE_OPENCV_CUDAOPTFLOW) +#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW) if (gpu) { Ptr kbest = makePtr(est); @@ -258,7 +258,7 @@ public: outlierRejector = tblor; } -#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDA) && defined(HAVE_OPENCV_CUDAOPTFLOW) +#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW) if (gpu) { Ptr kbest = makePtr(est); @@ -343,7 +343,6 @@ int main(int argc, const char **argv) return 0; } -#ifdef HAVE_OPENCV_CUDA if (arg("gpu") == "yes") { cout << "initializing GPU..."; cout.flush(); @@ -352,7 +351,6 @@ int main(int argc, const char **argv) deviceTmp.upload(hostTmp); cout << endl; } -#endif StabilizerBase *stabilizer = 0; @@ -421,7 +419,7 @@ int main(int argc, const char **argv) { Ptr ws = makePtr(); if (arg("gpu") == "yes") -#ifdef HAVE_OPENCV_CUDA +#ifdef HAVE_OPENCV_CUDAWARPING ws = makePtr(); #else throw runtime_error("OpenCV is built without CUDA support"); diff --git a/samples/data/cards.png b/samples/data/cards.png new file mode 100644 index 0000000000..1e61d874f6 Binary files /dev/null and b/samples/data/cards.png differ diff --git a/samples/gpu/CMakeLists.txt b/samples/gpu/CMakeLists.txt index 10c91991c9..8741f11701 100644 --- a/samples/gpu/CMakeLists.txt +++ b/samples/gpu/CMakeLists.txt @@ -1,6 +1,6 @@ SET(OPENCV_CUDA_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc opencv_imgcodecs opencv_videoio opencv_highgui opencv_ml opencv_video opencv_objdetect opencv_features2d - opencv_calib3d opencv_cuda opencv_superres + opencv_calib3d opencv_superres opencv_cudaarithm opencv_cudafilters opencv_cudawarping opencv_cudaimgproc opencv_cudafeatures2d opencv_cudaoptflow opencv_cudabgsegm opencv_cudastereo opencv_cudalegacy opencv_cudaobjdetect) diff --git a/samples/gpu/bgfg_segm.cpp b/samples/gpu/bgfg_segm.cpp index 89bb8d6a2f..00bb59e248 100644 --- a/samples/gpu/bgfg_segm.cpp +++ b/samples/gpu/bgfg_segm.cpp @@ -4,6 +4,7 @@ #include "opencv2/core.hpp" #include "opencv2/core/utility.hpp" #include "opencv2/cudabgsegm.hpp" +#include "opencv2/cudalegacy.hpp" #include "opencv2/video.hpp" #include "opencv2/highgui.hpp" diff --git a/samples/gpu/brox_optical_flow.cpp b/samples/gpu/brox_optical_flow.cpp deleted file mode 100644 index 638aade45a..0000000000 --- a/samples/gpu/brox_optical_flow.cpp +++ /dev/null @@ -1,270 +0,0 @@ -#include -#include -#include -#include - -#include "opencv2/core.hpp" -#include "opencv2/core/utility.hpp" -#include "opencv2/highgui.hpp" -#include "opencv2/imgproc.hpp" -#include "opencv2/cudaoptflow.hpp" -#include "opencv2/cudaarithm.hpp" - -using namespace std; -using namespace cv; -using namespace cv::cuda; - -void getFlowField(const Mat& u, const Mat& v, Mat& flowField); - -int main(int argc, const char* argv[]) -{ - try - { - const char* keys = - "{ h help | | print help message }" - "{ l left | | specify left image }" - "{ r right | | specify right image }" - "{ s scale | 0.8 | set pyramid scale factor }" - "{ a alpha | 0.197 | set alpha }" - "{ g gamma | 50.0 | set gamma }" - "{ i inner | 10 | set number of inner iterations }" - "{ o outer | 77 | set number of outer iterations }" - "{ si solver | 10 | set number of basic solver iterations }" - "{ t time_step | 0.1 | set frame interpolation time step }"; - - CommandLineParser cmd(argc, argv, keys); - - if (cmd.has("help") || !cmd.check()) - { - cmd.printMessage(); - cmd.printErrors(); - return 0; - } - - string frame0Name = cmd.get("left"); - string frame1Name = cmd.get("right"); - float scale = cmd.get("scale"); - float alpha = cmd.get("alpha"); - float gamma = cmd.get("gamma"); - int inner_iterations = cmd.get("inner"); - int outer_iterations = cmd.get("outer"); - int solver_iterations = cmd.get("solver"); - float timeStep = cmd.get("time_step"); - - if (frame0Name.empty() || frame1Name.empty()) - { - cerr << "Missing input file names" << endl; - return -1; - } - - Mat frame0Color = imread(frame0Name); - Mat frame1Color = imread(frame1Name); - - if (frame0Color.empty() || frame1Color.empty()) - { - cout << "Can't load input images" << endl; - return -1; - } - - cv::cuda::printShortCudaDeviceInfo(cv::cuda::getDevice()); - - cout << "OpenCV / NVIDIA Computer Vision" << endl; - cout << "Optical Flow Demo: Frame Interpolation" << endl; - cout << "=========================================" << endl; - - namedWindow("Forward flow"); - namedWindow("Backward flow"); - - namedWindow("Interpolated frame"); - - cout << "Press:" << endl; - cout << "\tESC to quit" << endl; - cout << "\t'a' to move to the previous frame" << endl; - cout << "\t's' to move to the next frame\n" << endl; - - frame0Color.convertTo(frame0Color, CV_32F, 1.0 / 255.0); - frame1Color.convertTo(frame1Color, CV_32F, 1.0 / 255.0); - - Mat frame0Gray, frame1Gray; - - cv::cvtColor(frame0Color, frame0Gray, COLOR_BGR2GRAY); - cv::cvtColor(frame1Color, frame1Gray, COLOR_BGR2GRAY); - - GpuMat d_frame0(frame0Gray); - GpuMat d_frame1(frame1Gray); - - cout << "Estimating optical flow" << endl; - - BroxOpticalFlow d_flow(alpha, gamma, scale, inner_iterations, outer_iterations, solver_iterations); - - cout << "\tForward..." << endl; - - GpuMat d_fu, d_fv; - - d_flow(d_frame0, d_frame1, d_fu, d_fv); - - Mat flowFieldForward; - getFlowField(Mat(d_fu), Mat(d_fv), flowFieldForward); - - cout << "\tBackward..." << endl; - - GpuMat d_bu, d_bv; - - d_flow(d_frame1, d_frame0, d_bu, d_bv); - - Mat flowFieldBackward; - getFlowField(Mat(d_bu), Mat(d_bv), flowFieldBackward); - - cout << "Interpolating..." << endl; - - // first frame color components - GpuMat d_b, d_g, d_r; - - // second frame color components - GpuMat d_bt, d_gt, d_rt; - - // prepare color components on host and copy them to device memory - Mat channels[3]; - cv::split(frame0Color, channels); - - d_b.upload(channels[0]); - d_g.upload(channels[1]); - d_r.upload(channels[2]); - - cv::split(frame1Color, channels); - - d_bt.upload(channels[0]); - d_gt.upload(channels[1]); - d_rt.upload(channels[2]); - - // temporary buffer - GpuMat d_buf; - - // intermediate frame color components (GPU memory) - GpuMat d_rNew, d_gNew, d_bNew; - - GpuMat d_newFrame; - - vector frames; - frames.reserve(static_cast(1.0f / timeStep) + 2); - - frames.push_back(frame0Color); - - // compute interpolated frames - for (float timePos = timeStep; timePos < 1.0f; timePos += timeStep) - { - // interpolate blue channel - interpolateFrames(d_b, d_bt, d_fu, d_fv, d_bu, d_bv, timePos, d_bNew, d_buf); - - // interpolate green channel - interpolateFrames(d_g, d_gt, d_fu, d_fv, d_bu, d_bv, timePos, d_gNew, d_buf); - - // interpolate red channel - interpolateFrames(d_r, d_rt, d_fu, d_fv, d_bu, d_bv, timePos, d_rNew, d_buf); - - GpuMat channels3[] = {d_bNew, d_gNew, d_rNew}; - cuda::merge(channels3, 3, d_newFrame); - - frames.push_back(Mat(d_newFrame)); - - cout << setprecision(4) << timePos * 100.0f << "%\r"; - } - - frames.push_back(frame1Color); - - cout << setw(5) << "100%" << endl; - - cout << "Done" << endl; - - imshow("Forward flow", flowFieldForward); - imshow("Backward flow", flowFieldBackward); - - int currentFrame = 0; - - imshow("Interpolated frame", frames[currentFrame]); - - for(;;) - { - int key = toupper(waitKey(10) & 0xff); - - switch (key) - { - case 27: - return 0; - - case 'A': - if (currentFrame > 0) - --currentFrame; - - imshow("Interpolated frame", frames[currentFrame]); - break; - - case 'S': - if (currentFrame < static_cast(frames.size()) - 1) - ++currentFrame; - - imshow("Interpolated frame", frames[currentFrame]); - break; - } - } - } - catch (const exception& ex) - { - cerr << ex.what() << endl; - return -1; - } - catch (...) - { - cerr << "Unknow error" << endl; - return -1; - } -} - -template inline T clamp (T x, T a, T b) -{ - return ((x) > (a) ? ((x) < (b) ? (x) : (b)) : (a)); -} - -template inline T mapValue(T x, T a, T b, T c, T d) -{ - x = clamp(x, a, b); - return c + (d - c) * (x - a) / (b - a); -} - -void getFlowField(const Mat& u, const Mat& v, Mat& flowField) -{ - float maxDisplacement = 1.0f; - - for (int i = 0; i < u.rows; ++i) - { - const float* ptr_u = u.ptr(i); - const float* ptr_v = v.ptr(i); - - for (int j = 0; j < u.cols; ++j) - { - float d = max(fabsf(ptr_u[j]), fabsf(ptr_v[j])); - - if (d > maxDisplacement) - maxDisplacement = d; - } - } - - flowField.create(u.size(), CV_8UC4); - - for (int i = 0; i < flowField.rows; ++i) - { - const float* ptr_u = u.ptr(i); - const float* ptr_v = v.ptr(i); - - - Vec4b* row = flowField.ptr(i); - - for (int j = 0; j < flowField.cols; ++j) - { - row[j][0] = 0; - row[j][1] = static_cast (mapValue (-ptr_v[j], -maxDisplacement, maxDisplacement, 0.0f, 255.0f)); - row[j][2] = static_cast (mapValue ( ptr_u[j], -maxDisplacement, maxDisplacement, 0.0f, 255.0f)); - row[j][3] = 255; - } - } -} diff --git a/samples/gpu/farneback_optical_flow.cpp b/samples/gpu/farneback_optical_flow.cpp index b8ed55ea6c..798b108a62 100644 --- a/samples/gpu/farneback_optical_flow.cpp +++ b/samples/gpu/farneback_optical_flow.cpp @@ -7,6 +7,7 @@ #include "opencv2/highgui.hpp" #include "opencv2/video.hpp" #include "opencv2/cudaoptflow.hpp" +#include "opencv2/cudaarithm.hpp" using namespace std; using namespace cv; @@ -70,8 +71,8 @@ int main(int argc, char **argv) if (frameL.empty() || frameR.empty()) return -1; GpuMat d_frameL(frameL), d_frameR(frameR); - GpuMat d_flowx, d_flowy; - FarnebackOpticalFlow d_calc; + GpuMat d_flow; + Ptr d_calc = cuda::FarnebackOpticalFlow::create(); Mat flowxy, flowx, flowy, image; bool running = true, gpuMode = true; @@ -86,17 +87,21 @@ int main(int argc, char **argv) if (gpuMode) { tc0 = getTickCount(); - d_calc(d_frameL, d_frameR, d_flowx, d_flowy); + d_calc->calc(d_frameL, d_frameR, d_flow); tc1 = getTickCount(); - d_flowx.download(flowx); - d_flowy.download(flowy); + + GpuMat planes[2]; + cuda::split(d_flow, planes); + + planes[0].download(flowx); + planes[1].download(flowy); } else { tc0 = getTickCount(); calcOpticalFlowFarneback( - frameL, frameR, flowxy, d_calc.pyrScale, d_calc.numLevels, d_calc.winSize, - d_calc.numIters, d_calc.polyN, d_calc.polySigma, d_calc.flags); + frameL, frameR, flowxy, d_calc->getPyrScale(), d_calc->getNumLevels(), d_calc->getWinSize(), + d_calc->getNumIters(), d_calc->getPolyN(), d_calc->getPolySigma(), d_calc->getFlags()); tc1 = getTickCount(); Mat planes[] = {flowx, flowy}; diff --git a/samples/gpu/optical_flow.cpp b/samples/gpu/optical_flow.cpp index 7d625de85b..b1b3c8de1e 100644 --- a/samples/gpu/optical_flow.cpp +++ b/samples/gpu/optical_flow.cpp @@ -5,6 +5,7 @@ #include #include "opencv2/highgui.hpp" #include "opencv2/cudaoptflow.hpp" +#include "opencv2/cudaarithm.hpp" using namespace std; using namespace cv; @@ -122,10 +123,13 @@ static void drawOpticalFlow(const Mat_& flowx, const Mat_& flowy, } } -static void showFlow(const char* name, const GpuMat& d_flowx, const GpuMat& d_flowy) +static void showFlow(const char* name, const GpuMat& d_flow) { - Mat flowx(d_flowx); - Mat flowy(d_flowy); + GpuMat planes[2]; + cuda::split(d_flow, planes); + + Mat flowx(planes[0]); + Mat flowy(planes[1]); Mat out; drawOpticalFlow(flowx, flowy, out, 10); @@ -171,14 +175,12 @@ int main(int argc, const char* argv[]) GpuMat d_frame0(frame0); GpuMat d_frame1(frame1); - GpuMat d_flowx(frame0.size(), CV_32FC1); - GpuMat d_flowy(frame0.size(), CV_32FC1); + GpuMat d_flow(frame0.size(), CV_32FC2); - BroxOpticalFlow brox(0.197f, 50.0f, 0.8f, 10, 77, 10); - PyrLKOpticalFlow lk; lk.winSize = Size(7, 7); - FarnebackOpticalFlow farn; - OpticalFlowDual_TVL1_CUDA tvl1; - FastOpticalFlowBM fastBM; + Ptr brox = cuda::BroxOpticalFlow::create(0.197f, 50.0f, 0.8f, 10, 77, 10); + Ptr lk = cuda::DensePyrLKOpticalFlow::create(Size(7, 7)); + Ptr farn = cuda::FarnebackOpticalFlow::create(); + Ptr tvl1 = cuda::OpticalFlowDual_TVL1::create(); { GpuMat d_frame0f; @@ -189,68 +191,45 @@ int main(int argc, const char* argv[]) const int64 start = getTickCount(); - brox(d_frame0f, d_frame1f, d_flowx, d_flowy); + brox->calc(d_frame0f, d_frame1f, d_flow); const double timeSec = (getTickCount() - start) / getTickFrequency(); cout << "Brox : " << timeSec << " sec" << endl; - showFlow("Brox", d_flowx, d_flowy); + showFlow("Brox", d_flow); } { const int64 start = getTickCount(); - lk.dense(d_frame0, d_frame1, d_flowx, d_flowy); + lk->calc(d_frame0, d_frame1, d_flow); const double timeSec = (getTickCount() - start) / getTickFrequency(); cout << "LK : " << timeSec << " sec" << endl; - showFlow("LK", d_flowx, d_flowy); + showFlow("LK", d_flow); } { const int64 start = getTickCount(); - farn(d_frame0, d_frame1, d_flowx, d_flowy); + farn->calc(d_frame0, d_frame1, d_flow); const double timeSec = (getTickCount() - start) / getTickFrequency(); cout << "Farn : " << timeSec << " sec" << endl; - showFlow("Farn", d_flowx, d_flowy); + showFlow("Farn", d_flow); } { const int64 start = getTickCount(); - tvl1(d_frame0, d_frame1, d_flowx, d_flowy); + tvl1->calc(d_frame0, d_frame1, d_flow); const double timeSec = (getTickCount() - start) / getTickFrequency(); cout << "TVL1 : " << timeSec << " sec" << endl; - showFlow("TVL1", d_flowx, d_flowy); - } - - { - const int64 start = getTickCount(); - - GpuMat buf; - calcOpticalFlowBM(d_frame0, d_frame1, Size(7, 7), Size(1, 1), Size(21, 21), false, d_flowx, d_flowy, buf); - - const double timeSec = (getTickCount() - start) / getTickFrequency(); - cout << "BM : " << timeSec << " sec" << endl; - - showFlow("BM", d_flowx, d_flowy); - } - - { - const int64 start = getTickCount(); - - fastBM(d_frame0, d_frame1, d_flowx, d_flowy); - - const double timeSec = (getTickCount() - start) / getTickFrequency(); - cout << "Fast BM : " << timeSec << " sec" << endl; - - showFlow("Fast BM", d_flowx, d_flowy); + showFlow("TVL1", d_flow); } imshow("Frame 0", frame0); diff --git a/samples/gpu/performance/performance.cpp b/samples/gpu/performance/performance.cpp index 082a6383c1..cef979954c 100644 --- a/samples/gpu/performance/performance.cpp +++ b/samples/gpu/performance/performance.cpp @@ -2,6 +2,7 @@ #include #include #include "performance.h" +#include "opencv2/core/cuda.hpp" using namespace std; using namespace cv; diff --git a/samples/gpu/performance/performance.h b/samples/gpu/performance/performance.h index d2386284d3..98889f3422 100644 --- a/samples/gpu/performance/performance.h +++ b/samples/gpu/performance/performance.h @@ -7,7 +7,6 @@ #include #include #include -#include "opencv2/cuda.hpp" #define TAB " " diff --git a/samples/gpu/performance/tests.cpp b/samples/gpu/performance/tests.cpp index 14910f9a38..b4bf4cfbec 100644 --- a/samples/gpu/performance/tests.cpp +++ b/samples/gpu/performance/tests.cpp @@ -3,7 +3,7 @@ #include "opencv2/highgui.hpp" #include "opencv2/calib3d.hpp" #include "opencv2/video.hpp" -#include "opencv2/cuda.hpp" +#include "opencv2/cudalegacy.hpp" #include "opencv2/cudaimgproc.hpp" #include "opencv2/cudaarithm.hpp" #include "opencv2/cudawarping.hpp" @@ -1187,87 +1187,6 @@ TEST(GoodFeaturesToTrack) CUDA_OFF; } -TEST(PyrLKOpticalFlow) -{ - Mat frame0 = imread(abspath("../data/rubberwhale1.png")); - if (frame0.empty()) throw runtime_error("can't open ../data/rubberwhale1.png"); - - Mat frame1 = imread(abspath("../data/rubberwhale2.png")); - if (frame1.empty()) throw runtime_error("can't open ../data/rubberwhale2.png"); - - Mat gray_frame; - cvtColor(frame0, gray_frame, COLOR_BGR2GRAY); - - for (int points = 1000; points <= 8000; points *= 2) - { - SUBTEST << points; - - vector pts; - goodFeaturesToTrack(gray_frame, pts, points, 0.01, 0.0); - - vector nextPts; - vector status; - - vector err; - - calcOpticalFlowPyrLK(frame0, frame1, pts, nextPts, status, err); - - CPU_ON; - calcOpticalFlowPyrLK(frame0, frame1, pts, nextPts, status, err); - CPU_OFF; - - cuda::PyrLKOpticalFlow d_pyrLK; - - cuda::GpuMat d_frame0(frame0); - cuda::GpuMat d_frame1(frame1); - - cuda::GpuMat d_pts; - Mat pts_mat(1, (int)pts.size(), CV_32FC2, (void*)&pts[0]); - d_pts.upload(pts_mat); - - cuda::GpuMat d_nextPts; - cuda::GpuMat d_status; - cuda::GpuMat d_err; - - d_pyrLK.sparse(d_frame0, d_frame1, d_pts, d_nextPts, d_status, &d_err); - - CUDA_ON; - d_pyrLK.sparse(d_frame0, d_frame1, d_pts, d_nextPts, d_status, &d_err); - CUDA_OFF; - } -} - - -TEST(FarnebackOpticalFlow) -{ - const string datasets[] = {"../data/rubberwhale", "../data/basketball"}; - for (size_t i = 0; i < sizeof(datasets)/sizeof(*datasets); ++i) { - for (int fastPyramids = 0; fastPyramids < 2; ++fastPyramids) { - for (int useGaussianBlur = 0; useGaussianBlur < 2; ++useGaussianBlur) { - - SUBTEST << "dataset=" << datasets[i] << ", fastPyramids=" << fastPyramids << ", useGaussianBlur=" << useGaussianBlur; - Mat frame0 = imread(abspath(datasets[i] + "1.png"), IMREAD_GRAYSCALE); - Mat frame1 = imread(abspath(datasets[i] + "2.png"), IMREAD_GRAYSCALE); - if (frame0.empty()) throw runtime_error("can't open " + datasets[i] + "1.png"); - if (frame1.empty()) throw runtime_error("can't open " + datasets[i] + "2.png"); - - cuda::FarnebackOpticalFlow calc; - calc.fastPyramids = fastPyramids != 0; - calc.flags |= useGaussianBlur ? OPTFLOW_FARNEBACK_GAUSSIAN : 0; - - cuda::GpuMat d_frame0(frame0), d_frame1(frame1), d_flowx, d_flowy; - CUDA_ON; - calc(d_frame0, d_frame1, d_flowx, d_flowy); - CUDA_OFF; - - Mat flow; - CPU_ON; - calcOpticalFlowFarneback(frame0, frame1, flow, calc.pyrScale, calc.numLevels, calc.winSize, calc.numIters, calc.polyN, calc.polySigma, calc.flags); - CPU_OFF; - - }}} -} - #ifdef HAVE_OPENCV_BGSEGM TEST(MOG) diff --git a/samples/gpu/pyrlk_optical_flow.cpp b/samples/gpu/pyrlk_optical_flow.cpp index febc28f28d..f13487b622 100644 --- a/samples/gpu/pyrlk_optical_flow.cpp +++ b/samples/gpu/pyrlk_optical_flow.cpp @@ -77,44 +77,6 @@ template inline T mapValue(T x, T a, T b, T c, T d) return c + (d - c) * (x - a) / (b - a); } -static void getFlowField(const Mat& u, const Mat& v, Mat& flowField) -{ - float maxDisplacement = 1.0f; - - for (int i = 0; i < u.rows; ++i) - { - const float* ptr_u = u.ptr(i); - const float* ptr_v = v.ptr(i); - - for (int j = 0; j < u.cols; ++j) - { - float d = max(fabsf(ptr_u[j]), fabsf(ptr_v[j])); - - if (d > maxDisplacement) - maxDisplacement = d; - } - } - - flowField.create(u.size(), CV_8UC4); - - for (int i = 0; i < flowField.rows; ++i) - { - const float* ptr_u = u.ptr(i); - const float* ptr_v = v.ptr(i); - - - Vec4b* row = flowField.ptr(i); - - for (int j = 0; j < flowField.cols; ++j) - { - row[j][0] = 0; - row[j][1] = static_cast (mapValue (-ptr_v[j], -maxDisplacement, maxDisplacement, 0.0f, 255.0f)); - row[j][2] = static_cast (mapValue ( ptr_u[j], -maxDisplacement, maxDisplacement, 0.0f, 255.0f)); - row[j][3] = 255; - } - } -} - int main(int argc, const char* argv[]) { const char* keys = @@ -186,12 +148,8 @@ int main(int argc, const char* argv[]) // Sparse - PyrLKOpticalFlow d_pyrLK; - - d_pyrLK.winSize.width = winSize; - d_pyrLK.winSize.height = winSize; - d_pyrLK.maxLevel = maxLevel; - d_pyrLK.iters = iters; + Ptr d_pyrLK = cuda::SparsePyrLKOpticalFlow::create( + Size(winSize, winSize), maxLevel, iters); GpuMat d_frame0(frame0); GpuMat d_frame1(frame1); @@ -199,7 +157,7 @@ int main(int argc, const char* argv[]) GpuMat d_nextPts; GpuMat d_status; - d_pyrLK.sparse(useGray ? d_frame0Gray : d_frame0, useGray ? d_frame1Gray : d_frame1, d_prevPts, d_nextPts, d_status); + d_pyrLK->calc(useGray ? d_frame0Gray : d_frame0, useGray ? d_frame1Gray : d_frame1, d_prevPts, d_nextPts, d_status); // Draw arrows @@ -216,20 +174,6 @@ int main(int argc, const char* argv[]) imshow("PyrLK [Sparse]", frame0); - // Dense - - GpuMat d_u; - GpuMat d_v; - - d_pyrLK.dense(d_frame0Gray, d_frame1Gray, d_u, d_v); - - // Draw flow field - - Mat flowField; - getFlowField(Mat(d_u), Mat(d_v), flowField); - - imshow("PyrLK [Dense] Flow Field", flowField); - waitKey(); return 0;