Compare commits

...

144 Commits
4.x ... 2.4

Author SHA1 Message Date
Alexander Alekhin 82f8176b06
Merge pull request #14676 from 103yiran:103yiran-clang 6 years ago
103yiran a03b7575ba
make it compatible with clang 6 years ago
Alexander Alekhin 8bae39ce8a
Merge pull request #13669 from alalek:test_eigen_relax_eps_2.4 6 years ago
Alexander Alekhin aa89881321
Merge pull request #13665 from alalek:backport_8294_flann_rand 6 years ago
Alexander Alekhin 30b01a2d29 core(test): relax eigen eps value: 0.01 -> 0.02 6 years ago
Alexander Alekhin dc328451eb flann: use OpenCV theRNG() 6 years ago
Alexander Alekhin a49600cb24
Merge pull request #13510 from knsong:2.4 6 years ago
Kangning Song 0157ff0bc3
fix initial values bug 6 years ago
Alexander Alekhin 7e71666a0b
Merge pull request #13448 from alalek:issue_13445_2.4 6 years ago
Alexander Alekhin 7b677bb017 videoio(dc1394): use lazy initialization on demand 6 years ago
Alexander Alekhin 9b954de175 Merge pull request #13265 from xusiwei:2.4 6 years ago
Siwei Xu 7f3af2b2d9 androidcamera: add a missing header. 6 years ago
Alexander Alekhin fd63c60418
Merge pull request #12181 from alalek:fix_gpu_sparse_multi_definition 7 years ago
Alexander Alekhin 1c34941537
Merge pull request #12179 from alalek:fix_gpu_samples_2.4 7 years ago
Alexander Alekhin b0f0194595 gpu(sparse_multi): fix definition without TBB 7 years ago
Alexander Alekhin 8484c8af7c samples(gpu): fix build (invalid access to cvconfig.h from sample) 7 years ago
Alexander Alekhin 51cfa51924 OpenCV version++ 7 years ago
Alexander Alekhin f1c5d8364f
Merge pull request #11640 from alalek:backport_11617 7 years ago
Alexander Alekhin 19f4c4403a videoio(ffmpeg): specify stream->time_base 7 years ago
Alexander Alekhin e89405d48f
Merge pull request #11534 from juanecito:2.4 7 years ago
juanitov eaf0b04530 Add sample of sparse pyrlk optical flow thread safe 7 years ago
juanitov cbae431752 Fix HAVE_TBB is not defined in pyrlk.cu in spite of CMake TBB option is ON 7 years ago
Juan María Gómez López 0239c195d8 Merge pull request #11060 from juanecito:2.4 7 years ago
Alexander Alekhin a32aec5ba6
Merge pull request #11496 from shengyu7697:tab_to_space 7 years ago
shengyu dd131219b2 tab to space 7 years ago
Alexander Alekhin 2cf58febf8
Merge pull request #11369 from ilovezfs:ffmpeg-4.0 7 years ago
ilovezfs 99091a6246 Fix build with FFmpeg 4.0 7 years ago
Alexander Alekhin 0354d01e79 OpenCV version++ 7 years ago
Alexander Alekhin 45d3aac730
Merge pull request #10913 from alalek:fix_imgcodecs_hang_2.4 7 years ago
Alexander Alekhin 318ac6b8c9 imgcodecs: fix RBaseStream hang on truncated inputs 7 years ago
Alexander Alekhin 7d332100a4
Merge pull request #10901 from alalek:backport_imgcodecs_fixes 7 years ago
Alexander Alekhin 56072c4406 imgcodecs: add more Jasper checks for supported and tested cases 7 years ago
Alexander Alekhin cd64b504b8 imgcodecs: add overflow checks 7 years ago
Alexander Alekhin 443059e371 imgcodecs(pxm): fix memcpy size 7 years ago
Alexander Alekhin 8f9c4d23e0
Merge pull request #10695 from mworchel:blobdetector_mask 7 years ago
mworchel 8b90db3f25 Add mask support to SimpleBlobDetector 7 years ago
Alexander Alekhin ec16307632
Merge pull request #10509 from kislinsk:support-msvc-14.1-minor-upgrades 7 years ago
Alexander Alekhin fdefc4b09d cmake: allow custom OpenCV_ARCH / OpenCV_RUNTIME values 7 years ago
Stefan Dinkelacker 61d8292652 cmake: add support for MSVC 14.1 minor upgrades 7 years ago
Alexander Alekhin 43f1b72e92
Merge pull request #10443 from alalek:backport_10435 7 years ago
Arthur Williams 1f4b8c2785 Fixed #10433 7 years ago
Alexander Alekhin 97eae6b546 OpenCV version++ 7 years ago
Alexander Alekhin 572d6d795d
Merge pull request #10301 from alalek:cmake_timestamp_update_2.4 7 years ago
Alexander Alekhin 7459388dd9 cmake: update timestamp status 7 years ago
Alexander Alekhin 249edb2c99
Merge pull request #10204 from alalek:fix_calib3d_fisheye_rectify_test_2.4 7 years ago
Alexander Alekhin 00cc4aa230 calib3d: fix fisheye stereoRectify test 7 years ago
Alexander Alekhin 7c0193bb75
Merge pull request #10198 from alalek:fix_build_xcode_9.1_2.4 7 years ago
Alexander Alekhin 97507e42a1 build: eliminate Xcode 9.1 warnings 7 years ago
Alexander Alekhin 91fe01beca
Merge pull request #10052 from alalek:fix_macos_pkg_config_2.4 7 years ago
Alexander Alekhin 591a08721e cmake: fix pkg-config generation for MacOSX 7 years ago
Alexander Alekhin a28733148e
Merge pull request #10038 from alalek:update_gitignore_2.4 7 years ago
Alexander Alekhin e10e628036 git: .gitignore update 7 years ago
Alexander Alekhin 738b388a64
Merge pull request #10030 from hosjiu1702:2.4 7 years ago
Alexander Alekhin 9802024f71
Merge pull request #9974 from opalmirror:test_stereo_min_disparity_24 7 years ago
hosjiu1702 1659e96766 change from variance to standard deviation 7 years ago
James Perkins c1dea8465e test_stereomatching.cpp: validate min disparity affect on valid ROI 7 years ago
Alexander Alekhin 678d383f26
Merge pull request #9844 from opalmirror:fix_stereobm_mindisp_truncation_24 7 years ago
Alexander Alekhin e15a56d142 Merge pull request #9915 from alalek:backport_9903 8 years ago
blendin c46521ad65 Fix out of bounds write 8 years ago
Alexander Alekhin d0f3468477 Merge pull request #9873 from alalek:fix_documentation_2.4 8 years ago
Alexander Alekhin 3224b0a75a doc: fix youtube videos handling 8 years ago
James Perkins 7d8110772a fix StereoBM disparity map right margin truncation when minDisparities > 0 8 years ago
Alexander Alekhin fb4c2ba64d OpenCV version++ 8 years ago
Alexander Alekhin e53f1b2532 Merge pull request #9808 from alalek:backport_fixes 8 years ago
Yaron Inger 621a1d3b09 videoio: remove AssetsLibrary dependency. 8 years ago
berak 14b686cdc7 imgcodecs: fix 4 reading channel bmp images 8 years ago
blendin 0202e52747 Fix out of bounds write 8 years ago
Benoit Blanchon 7997e7aee9 cmake: map RelWithDebInfo and MinSizeRel configuration to Release 8 years ago
Guillaume Jacob b43e5e2d21 CMakeLists.txt: fix a typo in a message 8 years ago
KUANG Fangjun c92ecc7f96 Improve the documentation. 8 years ago
Alexander Alekhin ae52d94ceb Merge pull request #9776 from IgWod:matrix-memory-leak-fix 8 years ago
Igor Wodiany 37d4e24806
Fix a memory leak in the Mat copying constructor 8 years ago
Alexander Alekhin 7b861cac9f Merge pull request #9662 from alalek:backport_9448_9504 8 years ago
Alexander Alekhin da9395b592 imgcodecs: fix regression 9376 8 years ago
Alexander Alekhin df1a026329 imgcodesc: fix code problems with integer overflow / address arithmetic / UB 8 years ago
Alexander Alekhin dd9bf1ba1d Merge pull request #9435 from alalek:fix_numpy_warning_2.4 8 years ago
Alexander Alekhin 30f7576029 Merge pull request #9383 from alalek:imgcodecs_refactoring_2.4 8 years ago
Alexander Alekhin f548d660ba Merge pull request #9432 from alalek:cmake_deprecated_policies_2.4 8 years ago
Alexander Alekhin 8a2bbc57e4 python: eliminate -Wundef warning about NPY_INTERNAL_BUILD 8 years ago
Alexander Alekhin e575b5ff9a cmake: remove INSTALL_NAME_DIR 8 years ago
Alexander Alekhin 355553b0a6 cmake: CMP0026 NEW 8 years ago
Alexander Alekhin d25b04149a cmake: CMP0022 NEW 8 years ago
Alexander Alekhin 268d17e086 cmake: drop CMP0017 8 years ago
Alexander Alekhin d567a79581 cmake: use CMAKE_CXX_COMPILER_VERSION (CMake 2.8.8+) 8 years ago
Alexander Alekhin 3ba7c16670 cmake: bump minimal version 8 years ago
Alexander Alekhin f7d99f3f6a Merge pull request #9408 from alalek:backport_9228 8 years ago
neok-m4700 126de0cd95 Update OpenCVCompilerOptions.cmake 8 years ago
Alexander Alekhin 72d29259ca imgcodecs: refactoring, improve code quality 8 years ago
Alexander Alekhin c9488c661f Merge pull request #9380 from StevenPuttemans:fix_doc_9359_2.4 8 years ago
Steven Puttemans 4852f017fa backport of PR 9367 8 years ago
Alexander Alekhin b398b572cc OpenCV version++ 8 years ago
Alexander Alekhin a0520bef42 Merge pull request #9266 from alalek:backport_9238 8 years ago
Alexander Alekhin 7dceebbc4e flann: fix out of buffer access 8 years ago
Alexander Alekhin 0312df4812 cmake: fix compiler flags 8 years ago
Alexander Alekhin d19147bc78 Merge pull request #9218 from alalek:backport_2.4 8 years ago
Alexander Alekhin fc2a71dbab build: enable __STDC_FORMAT_MACROS macro 8 years ago
kvaghel1 6bafc2c598 Fix frame timestamp in VideoCapture::get 8 years ago
Alexander Alekhin 777a0080cb cmake: disallow in-source builds 8 years ago
Christof Kaufmann 572c86176a cmake: Avoid adding default path as system directory for GCC 6.x 8 years ago
Alexander Alekhin 8736ece97d Merge pull request #9116 from alalek:backport_9110_2.4 8 years ago
Patrik Huber ebd961585e Added detection of MSVC1911 8 years ago
Alexander Alekhin 2e343ef631 Merge pull request #9102 from varunagrawal:2.4-vector 8 years ago
Varun Agrawal af3c544483 All vector definitions have correct namespace scopes 8 years ago
Alexander Alekhin 3dedd62f54 Merge pull request #8970 from alalek:fix_libname_2.4 8 years ago
Alexander Alekhin 07b7c03efc Merge pull request #8980 from alalek:fix_typo_8979_2.4 8 years ago
Alexander Alekhin aae7621243 highgui(macos): fix video file reading via AVFoundation 8 years ago
Alexander Alekhin fa36e769cf Merge pull request #8979 from sabzo:issue/8305_unhandled_objective_c_exception_video_avi_playback 8 years ago
Sabelo f71bf21aaf Unhandled Objective-C exception when playing video 8 years ago
Alexander Alekhin 70489b1e22 cmake: fix libname for pkg-config configuration 8 years ago
Alexander Alekhin e397794ae2 Merge pull request #8959 from alalek:gitignore_cache_2.4 8 years ago
Alexander Alekhin 3d4e1bd641 .gitignore: add .cache directory (to be consistent with master branch) 8 years ago
Alexander Alekhin 3649ee3cd3 Merge pull request #8887 from krishraghuram:gpu_reduce_doc 8 years ago
Raghuram Krishnaswami d0f3a14456 Modified doc for gpu::reduce(fixes issue 8628) 8 years ago
krishraghuram 01e34b6a91 correct bug in fastmeans (ref #7899) (#8757) 8 years ago
Alexander Alekhin df5c090f2e Merge pull request #8761 from kvaghel1:Issue-8760 8 years ago
kvaghel1 27213a845f Correct findChessboardCorners flags naming in calib3d doc. 8 years ago
Alexander Alekhin 91a6940930 Merge pull request #8574 from elmewo:fix-affine-constructor 8 years ago
Alexander Alekhin 7577f1420d Merge pull request #8592 from tomoaki0705:fixTypoCalib3d24 8 years ago
Tomoaki Teshima c72a191145 fix typo 8 years ago
André Mewes 34d7b96bfc create homogeneous affine matrix when constructing from 4x3 cv::Mat 8 years ago
Alexander Alekhin 160f26192c Merge pull request #8562 from alalek:fix_ffmpeg_check_2.4 8 years ago
Alexander Alekhin 9e76ba5ccd ffmpeg: add __STDC_CONSTANT_MACROS to check code 8 years ago
Alexander Alekhin ec784331fb Merge pull request #8353 from lpetre:backport_3593 8 years ago
Adam Borowski 61936eb1a4 Get rid of sysctl includes on Linux. 8 years ago
Alexander Alekhin d856604a4a Merge pull request #8322 from StevenPuttemans:backport_8207 8 years ago
StevenPuttemans ab806b63a7 backport of PR #8207 8 years ago
Alexander Alekhin 0051744fa0 Merge pull request #8267 from umbraclet16:fixCalibSample 8 years ago
umbraclet16 b8d99e1ffb Solve issue #8264 8 years ago
Alexander Alekhin 54f65a4672 Merge pull request #8002 from alalek:c_defines_2.4 8 years ago
Alexander Alekhin d103c116db Merge pull request #8031 from terfendail:shortline_fix_2.4 8 years ago
Alexander Alekhin e304795622 cmake: add defines to enable useful macroses 8 years ago
Alexander Alekhin ebae963884 drawing: workaround MSVC2010 32-bit compiler bug 8 years ago
Vadim Pisarevsky 19e4c7727b Merge pull request #7546 from savuor:fix2.4/yuv_channel_order 8 years ago
Alexander Alekhin f2a59b3d94 Merge pull request #7913 from DabeDotCom:CoreImage-pre_Xcode_7 8 years ago
Dabrien 'Dabe' Murphy 88bc0f7838 Fix 7606 "ld: framework not found CoreImage" Bug 8 years ago
Vitaly Tuzov 2346ba7ea2 Backport of PR #7161 fix for drawing beyond 32768 range 8 years ago
parismita f5db748312 Update linux_install.rst (#7776) 8 years ago
Alexander Alekhin 3e66654963 Merge pull request #7973 from sturkmen72:patch-2 8 years ago
Suleyman TURKMEN f108795e2c Update sift.cpp 8 years ago
Alexander Alekhin 6ed571b3c6 Merge pull request #7893 from alalek:fix_ffmpeg_check_2.4 8 years ago
Alexander Alekhin 9d45f15627 cmake: fix ffmpeg check code 8 years ago
Alexander Alekhin d7504ecaed OpenCV version++ 8 years ago
Rostislav Vasilikhin d23190dc3e fixed channel order from YVU to YUV 8 years ago
Rostislav Vasilikhin e3070ed553 fixed YUV channel equivalence 9 years ago
  1. 7
      .gitignore
  2. 2
      3rdparty/libjasper/CMakeLists.txt
  3. 4
      3rdparty/libpng/CMakeLists.txt
  4. 2
      3rdparty/tbb/CMakeLists.txt
  5. 57
      CMakeLists.txt
  6. 4
      apps/traincascade/cascadeclassifier.cpp
  7. 12
      cmake/FindCUDA.cmake
  8. 11
      cmake/OpenCVCompilerOptions.cmake
  9. 6
      cmake/OpenCVConfig.cmake
  10. 5
      cmake/OpenCVDetectAndroidSDK.cmake
  11. 2
      cmake/OpenCVDetectCUDA.cmake
  12. 52
      cmake/OpenCVDetectCXXCompiler.cmake
  13. 26
      cmake/OpenCVFindLibsVideo.cmake
  14. 22
      cmake/OpenCVGenPkgconfig.cmake
  15. 12
      cmake/OpenCVModule.cmake
  16. 6
      cmake/OpenCVPCHSupport.cmake
  17. 2
      cmake/OpenCVPackaging.cmake
  18. 19
      cmake/OpenCVUtils.cmake
  19. 4
      cmake/OpenCVVersion.cmake
  20. 2
      cmake/checks/ffmpeg_test.cpp
  21. 8
      cmake/templates/OpenCVConfig.cmake.in
  22. 2
      doc/tutorials/calib3d/camera_calibration/camera_calibration.rst
  23. 2
      doc/tutorials/core/file_input_output_with_xml_yml/file_input_output_with_xml_yml.rst
  24. 2
      doc/tutorials/core/how_to_scan_images/how_to_scan_images.rst
  25. 2
      doc/tutorials/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.rst
  26. 2
      doc/tutorials/core/mat-mask-operations/mat-mask-operations.rst
  27. 2
      doc/tutorials/core/mat_the_basic_image_container/mat_the_basic_image_container.rst
  28. 2
      doc/tutorials/gpu/gpu-basics-similarity/gpu-basics-similarity.rst
  29. 4
      doc/tutorials/highgui/video-input-psnr-ssim/video-input-psnr-ssim.rst
  30. 2
      doc/tutorials/highgui/video-write/video-write.rst
  31. 2
      doc/tutorials/imgproc/gausian_median_blur_bilateral_filter/gausian_median_blur_bilateral_filter.rst
  32. 2
      doc/tutorials/introduction/display_image/display_image.rst
  33. 4
      doc/tutorials/introduction/how_to_write_a_tutorial/how_to_write_a_tutorial.rst
  34. 2
      doc/tutorials/introduction/linux_install/linux_install.rst
  35. 4
      doc/tutorials/introduction/windows_install/windows_install.rst
  36. 4
      doc/tutorials/ios/image_manipulation/image_manipulation.rst
  37. 2
      doc/tutorials/ml/non_linear_svms/non_linear_svms.rst
  38. 2
      modules/androidcamera/src/camera_activity.cpp
  39. 6
      modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst
  40. 4
      modules/calib3d/src/stereosgbm.cpp
  41. 110
      modules/calib3d/test/test_fisheye.cpp
  42. 110
      modules/calib3d/test/test_stereomatching.cpp
  43. 12
      modules/core/doc/utility_and_system_functions_and_macros.rst
  44. 8
      modules/core/include/opencv2/core/affine.hpp
  45. 11
      modules/core/include/opencv2/core/core.hpp
  46. 3
      modules/core/include/opencv2/core/operations.hpp
  47. 2
      modules/core/include/opencv2/core/version.hpp
  48. 378
      modules/core/src/drawing.cpp
  49. 1
      modules/core/src/matmul.cpp
  50. 34
      modules/core/src/matrix.cpp
  51. 2
      modules/core/src/parallel.cpp
  52. 2
      modules/core/src/system.cpp
  53. 2
      modules/core/test/test_eigen.cpp
  54. 8
      modules/features2d/src/blobdetector.cpp
  55. 5
      modules/flann/include/opencv2/flann/kdtree_index.h
  56. 5
      modules/flann/include/opencv2/flann/lsh_index.h
  57. 25
      modules/flann/include/opencv2/flann/lsh_table.h
  58. 28
      modules/flann/include/opencv2/flann/random.h
  59. 6
      modules/gpu/doc/matrix_reductions.rst
  60. 41
      modules/gpu/include/opencv2/gpu/gpu.hpp
  61. 82
      modules/gpu/perf/perf_video.cpp
  62. 502
      modules/gpu/src/cuda/pyrlk.cu
  63. 158
      modules/gpu/src/pyrlk.cpp
  64. 132
      modules/gpu/test/test_optflow.cpp
  65. 7
      modules/highgui/CMakeLists.txt
  66. 25
      modules/highgui/src/bitstrm.cpp
  67. 19
      modules/highgui/src/bitstrm.hpp
  68. 10
      modules/highgui/src/cap_avfoundation_mac.mm
  69. 18
      modules/highgui/src/cap_dc1394_v2.cpp
  70. 41
      modules/highgui/src/cap_ffmpeg_impl.hpp
  71. 8
      modules/highgui/src/cap_ios_video_camera.mm
  72. 6
      modules/highgui/src/cap_qt.cpp
  73. 6
      modules/highgui/src/cap_v4l.cpp
  74. 35
      modules/highgui/src/grfmt_bmp.cpp
  75. 10
      modules/highgui/src/grfmt_exr.cpp
  76. 2
      modules/highgui/src/grfmt_jpeg.cpp
  77. 53
      modules/highgui/src/grfmt_jpeg2000.cpp
  78. 121
      modules/highgui/src/grfmt_pxm.cpp
  79. 14
      modules/highgui/src/grfmt_sunras.cpp
  80. 120
      modules/highgui/src/loadsave.cpp
  81. 2
      modules/highgui/src/precomp.hpp
  82. 53
      modules/highgui/src/utils.cpp
  83. 2
      modules/highgui/src/utils.hpp
  84. 134
      modules/highgui/test/test_drawing.cpp
  85. 1
      modules/highgui/test/test_ffmpeg.cpp
  86. 124
      modules/imgproc/src/color.cpp
  87. 17
      modules/java/CMakeLists.txt
  88. 8
      modules/nonfree/src/sift.cpp
  89. 109
      modules/ocl/src/opencl/cvt_color.cl
  90. 4
      modules/photo/doc/denoising.rst
  91. 2
      modules/python/CMakeLists.txt
  92. 1
      modules/stitching/src/matchers.cpp
  93. 2
      modules/viz/CMakeLists.txt
  94. 1
      platforms/ios/build_framework.py
  95. 5
      platforms/ios/cmake/Modules/Platform/iOS.cmake
  96. 5
      platforms/ios/cmake/Toolchains/Toolchain-iPhoneOS_Xcode.cmake
  97. 5
      platforms/ios/cmake/Toolchains/Toolchain-iPhoneSimulator_Xcode.cmake
  98. 12
      samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp
  99. 9
      samples/gpu/CMakeLists.txt
  100. 272
      samples/gpu/pyrlk_optical_flow_multithreading.cpp
  101. Some files were not shown because too many files have changed in this diff Show More

7
.gitignore vendored

@ -1,9 +1,10 @@
# ignore dot files/directories
.*
!.gitignore
*.autosave
*.pyc
*.user
*~
.*.swp
.DS_Store
.sw[a-z]
tags
tegra/

@ -25,7 +25,7 @@ endif(WIN32 AND NOT MINGW)
ocv_warnings_disable(CMAKE_C_FLAGS -Wno-implicit-function-declaration -Wno-uninitialized -Wmissing-prototypes
-Wno-unused-but-set-parameter -Wmissing-declarations -Wunused -Wshadow -Wsign-compare)
ocv_warnings_disable(CMAKE_C_FLAGS -Wunused-parameter) # clang
ocv_warnings_disable(CMAKE_C_FLAGS -Wunused-parameter -Wstrict-prototypes) # clang
ocv_warnings_disable(CMAKE_C_FLAGS /wd4013 /wd4018 /wd4101 /wd4244 /wd4267 /wd4715) # vs2005
if(UNIX)

@ -3,7 +3,7 @@
#
# ----------------------------------------------------------------------------
if(ARM AND ENABLE_NEON AND NOT AARCH64)
if(ARM AND ENABLE_NEON AND NOT AARCH64 AND NOT IOS)
project(${PNG_LIBRARY} ASM)
else()
project(${PNG_LIBRARY})
@ -14,7 +14,7 @@ ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}" ${ZLIB_INCLUDE_DIR})
file(GLOB lib_srcs *.c)
file(GLOB lib_hdrs *.h)
if(ARM AND ENABLE_NEON AND NOT AARCH64)
if(ARM AND ENABLE_NEON AND NOT AARCH64 AND NOT IOS)
list(APPEND lib_srcs arm/filter_neon.S arm/arm_init.c)
add_definitions(-DPNG_ARM_NEON_OPT=2)
else()

@ -187,7 +187,7 @@ if (HAVE_LIBPTHREAD)
add_definitions(-DUSE_PTHREAD) #required for Unix
endif()
if (CMAKE_COMPILER_IS_GNUCXX)
if(CMAKE_COMPILER_IS_GNUCXX AND NOT CMAKE_COMPILER_IS_CLANGCXX)
add_definitions(-DTBB_USE_GCC_BUILTINS=1) #required for ARM GCC
endif()

@ -4,11 +4,16 @@
# From the off-tree build directory, invoke:
# $ cmake <PATH_TO_OPENCV_ROOT>
#
#
# - OCT-2008: Initial version <joseluisblancoc@gmail.com>
#
# ----------------------------------------------------------------------------
# Disable in-source builds to prevent source tree corruption.
if(" ${CMAKE_SOURCE_DIR}" STREQUAL " ${CMAKE_BINARY_DIR}")
message(FATAL_ERROR "
FATAL: In-source builds are not allowed.
You should create a separate directory for build files.
")
endif()
set(CMAKE_ALLOW_LOOSE_LOOP_CONSTRUCTS true)
# Following block can broke build in case of cross-compilng
@ -32,24 +37,15 @@ endif(NOT CMAKE_TOOLCHAIN_FILE)
# Top level OpenCV project
# --------------------------------------------------------------
if(CMAKE_GENERATOR MATCHES Xcode AND XCODE_VERSION VERSION_GREATER 4.3)
cmake_minimum_required(VERSION 2.8.8)
cmake_minimum_required(VERSION 3.0)
elseif(IOS)
cmake_minimum_required(VERSION 2.8.0)
cmake_minimum_required(VERSION 3.0)
else()
cmake_minimum_required(VERSION 2.6.3)
endif()
if(POLICY CMP0017)
cmake_policy(SET CMP0017 NEW)
endif()
if(POLICY CMP0022)
cmake_policy(SET CMP0022 OLD)
cmake_minimum_required(VERSION 2.8.12.2)
endif()
if(POLICY CMP0026)
# silence cmake 3.0+ warnings about reading LOCATION attribute
cmake_policy(SET CMP0026 OLD)
cmake_policy(SET CMP0026 NEW)
endif()
if (POLICY CMP0042)
@ -81,8 +77,8 @@ ocv_clear_vars(OpenCVModules_TARGETS)
# Break in case of popular CMake configuration mistakes
# ----------------------------------------------------------------------------
if(NOT CMAKE_SIZEOF_VOID_P GREATER 0)
message(FATAL_ERROR "CMake fails to deterimine the bitness of target platform.
Please check your CMake and compiler installation. If you are crosscompiling then ensure that your CMake toolchain file correctly sets the compiler details.")
message(FATAL_ERROR "CMake fails to determine the bitness of the target platform.
Please check your CMake and compiler installation. If you are cross-compiling then ensure that your CMake toolchain file correctly sets the compiler details.")
endif()
# ----------------------------------------------------------------------------
@ -193,7 +189,7 @@ OCV_OPTION(BUILD_PACKAGE "Enables 'make package_source' command"
OCV_OPTION(BUILD_PERF_TESTS "Build performance tests" ON IF (NOT IOS) )
OCV_OPTION(BUILD_TESTS "Build accuracy & regression tests" ON IF (NOT IOS) )
OCV_OPTION(BUILD_WITH_DEBUG_INFO "Include debug info into debug libs (not MSCV only)" ON )
OCV_OPTION(BUILD_WITH_STATIC_CRT "Enables use of staticaly linked CRT for staticaly linked OpenCV" ON IF MSVC )
OCV_OPTION(BUILD_WITH_STATIC_CRT "Enables use of statically linked CRT for staticaly linked OpenCV" ON IF MSVC )
OCV_OPTION(BUILD_FAT_JAVA_LIB "Create fat java wrapper containing the whole OpenCV library" ON IF NOT BUILD_SHARED_LIBS AND CMAKE_COMPILER_IS_GNUCXX )
OCV_OPTION(BUILD_ANDROID_SERVICE "Build OpenCV Manager for Google Play" OFF IF ANDROID AND ANDROID_SOURCE_TREE )
OCV_OPTION(BUILD_ANDROID_PACKAGE "Build platform-specific package for Google Play" OFF IF ANDROID )
@ -687,6 +683,16 @@ endif()
# ========================== build platform ==========================
status("")
status(" Platform:")
if(NOT DEFINED OPENCV_TIMESTAMP
AND NOT CMAKE_VERSION VERSION_LESS 2.8.11
AND NOT BUILD_INFO_SKIP_TIMESTAMP
)
string(TIMESTAMP OPENCV_TIMESTAMP "" UTC)
set(OPENCV_TIMESTAMP "${OPENCV_TIMESTAMP}" CACHE STRING "Timestamp of OpenCV build configuration" FORCE)
endif()
if(OPENCV_TIMESTAMP)
status(" Timestamp:" ${OPENCV_TIMESTAMP})
endif()
status(" Host:" ${CMAKE_HOST_SYSTEM_NAME} ${CMAKE_HOST_SYSTEM_VERSION} ${CMAKE_HOST_SYSTEM_PROCESSOR})
if(CMAKE_CROSSCOMPILING)
status(" Target:" ${CMAKE_SYSTEM_NAME} ${CMAKE_SYSTEM_VERSION} ${CMAKE_SYSTEM_PROCESSOR})
@ -707,10 +713,6 @@ endif()
# ========================== C/C++ options ==========================
if(CMAKE_CXX_COMPILER_VERSION)
set(OPENCV_COMPILER_STR "${CMAKE_CXX_COMPILER} ${CMAKE_CXX_COMPILER_ARG1} (ver ${CMAKE_CXX_COMPILER_VERSION})")
elseif(CMAKE_COMPILER_IS_CLANGCXX)
set(OPENCV_COMPILER_STR "${CMAKE_CXX_COMPILER} ${CMAKE_CXX_COMPILER_ARG1} (ver ${CMAKE_CLANG_REGEX_VERSION})")
elseif(CMAKE_COMPILER_IS_GNUCXX)
set(OPENCV_COMPILER_STR "${CMAKE_CXX_COMPILER} ${CMAKE_CXX_COMPILER_ARG1} (ver ${CMAKE_GCC_REGEX_VERSION})")
else()
set(OPENCV_COMPILER_STR "${CMAKE_CXX_COMPILER} ${CMAKE_CXX_COMPILER_ARG1}")
endif()
@ -1024,7 +1026,7 @@ if(HAVE_OPENCL)
set(__libs "")
foreach(l ${OPENCL_LIBRARIES})
if(TARGET ${l})
get_target_property(p ${l} LOCATION)
get_target_property(p ${l} IMPORTED_LOCATION)
if(p MATCHES NOTFOUND)
list(APPEND __libs "${l}")
else()
@ -1094,13 +1096,6 @@ status("")
ocv_finalize_status()
# ----------------------------------------------------------------------------
# Warn in the case of in-source build
# ----------------------------------------------------------------------------
if("${CMAKE_CURRENT_SOURCE_DIR}" STREQUAL "${CMAKE_CURRENT_BINARY_DIR}")
message(WARNING "The source directory is the same as binary directory. \"make clean\" may damage the source tree")
endif()
# ----------------------------------------------------------------------------
# CPack stuff
# ----------------------------------------------------------------------------

@ -140,7 +140,7 @@ bool CvCascadeClassifier::train( const string _cascadeDirName,
double acceptanceRatioBreakValue)
{
// Start recording clock ticks for training time output
const clock_t begin_time = clock();
double time = (double)getTickCount();
if( _cascadeDirName.empty() || _posFilename.empty() || _negFilename.empty() )
CV_Error( CV_StsBadArg, "_cascadeDirName or _bgfileName or _vecFileName is NULL" );
@ -268,7 +268,7 @@ bool CvCascadeClassifier::train( const string _cascadeDirName,
fs << "}";
// Output training time up till now
float seconds = float( clock () - begin_time ) / CLOCKS_PER_SEC;
double seconds = ( (double)getTickCount() - time)/ getTickFrequency();
int days = int(seconds) / 60 / 60 / 24;
int hours = (int(seconds) / 60 / 60) % 24;
int minutes = (int(seconds) / 60) % 60;

@ -1553,7 +1553,7 @@ macro(CUDA_ADD_LIBRARY cuda_target)
# variable will have been defined.
CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS("${link_file}" ${cuda_target} "${_options}" "${${cuda_target}_SEPARABLE_COMPILATION_OBJECTS}")
target_link_libraries(${cuda_target}
target_link_libraries(${cuda_target} LINK_PRIVATE
${CUDA_LIBRARIES}
)
@ -1597,7 +1597,7 @@ macro(CUDA_ADD_EXECUTABLE cuda_target)
# variable will have been defined.
CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS("${link_file}" ${cuda_target} "${_options}" "${${cuda_target}_SEPARABLE_COMPILATION_OBJECTS}")
target_link_libraries(${cuda_target}
target_link_libraries(${cuda_target} LINK_PRIVATE
${CUDA_LIBRARIES}
)
@ -1672,9 +1672,9 @@ endmacro()
###############################################################################
macro(CUDA_ADD_CUFFT_TO_TARGET target)
if (CUDA_BUILD_EMULATION)
target_link_libraries(${target} ${CUDA_cufftemu_LIBRARY})
target_link_libraries(${target} LINK_PRIVATE ${CUDA_cufftemu_LIBRARY})
else()
target_link_libraries(${target} ${CUDA_cufft_LIBRARY})
target_link_libraries(${target} LINK_PRIVATE ${CUDA_cufft_LIBRARY})
endif()
endmacro()
@ -1685,9 +1685,9 @@ endmacro()
###############################################################################
macro(CUDA_ADD_CUBLAS_TO_TARGET target)
if (CUDA_BUILD_EMULATION)
target_link_libraries(${target} ${CUDA_cublasemu_LIBRARY})
target_link_libraries(${target} LINK_PRIVATE ${CUDA_cublasemu_LIBRARY})
else()
target_link_libraries(${target} ${CUDA_cublas_LIBRARY})
target_link_libraries(${target} LINK_PRIVATE ${CUDA_cublas_LIBRARY})
endif()
endmacro()

@ -18,9 +18,9 @@ if(ENABLE_CCACHE AND NOT CMAKE_COMPILER_IS_CCACHE)
message(STATUS "Unable to compile program with enabled ccache, reverting...")
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${__OLD_RULE_LAUNCH_COMPILE}")
endif()
else()
message(STATUS "Looking for ccache - not found")
endif()
else()
message(STATUS "Looking for ccache - not found")
endif()
endif()
@ -164,7 +164,7 @@ if(CMAKE_COMPILER_IS_GNUCXX)
# Other optimizations
if(ENABLE_OMIT_FRAME_POINTER)
add_extra_compiler_option(-fomit-frame-pointer)
else()
elseif(DEFINED ENABLE_OMIT_FRAME_POINTER)
add_extra_compiler_option(-fno-omit-frame-pointer)
endif()
if(ENABLE_FAST_MATH)
@ -246,7 +246,10 @@ if(CMAKE_COMPILER_IS_GNUCXX)
endif()
set(OPENCV_EXTRA_FLAGS_RELEASE "${OPENCV_EXTRA_FLAGS_RELEASE} -DNDEBUG")
set(OPENCV_EXTRA_FLAGS_DEBUG "${OPENCV_EXTRA_FLAGS_DEBUG} -O0 -DDEBUG -D_DEBUG")
if(NOT " ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_DEBUG} " MATCHES "-O")
set(OPENCV_EXTRA_FLAGS_DEBUG "${OPENCV_EXTRA_FLAGS_DEBUG} -O0")
endif()
set(OPENCV_EXTRA_FLAGS_DEBUG "${OPENCV_EXTRA_FLAGS_DEBUG} -DDEBUG -D_DEBUG")
endif()
if(MSVC)

@ -61,7 +61,9 @@ if(NOT DEFINED OpenCV_CUDA)
endif()
endif()
if(MSVC)
if(DEFINED OpenCV_ARCH AND DEFINED OpenCV_RUNTIME)
# custom overrided values
elseif(MSVC)
if(CMAKE_CL_64)
set(OpenCV_ARCH x64)
set(OpenCV_TBB_ARCH intel64)
@ -81,7 +83,7 @@ if(MSVC)
set(OpenCV_RUNTIME vc12)
elseif(MSVC_VERSION EQUAL 1900)
set(OpenCV_RUNTIME vc14)
elseif(MSVC_VERSION EQUAL 1910)
elseif(MSVC_VERSION MATCHES "^191[0-9]$")
set(OpenCV_RUNTIME vc15)
endif()
elseif(MINGW)

@ -302,16 +302,15 @@ macro(add_android_project target path)
endif()
add_library(${JNI_LIB_NAME} SHARED ${android_proj_jni_files})
target_link_libraries(${JNI_LIB_NAME} ${OPENCV_LINKER_LIBS} ${android_proj_NATIVE_DEPS})
target_link_libraries(${JNI_LIB_NAME} LINK_PRIVATE ${OPENCV_LINKER_LIBS} ${android_proj_NATIVE_DEPS})
set_target_properties(${JNI_LIB_NAME} PROPERTIES
OUTPUT_NAME "${JNI_LIB_NAME}"
LIBRARY_OUTPUT_DIRECTORY "${android_proj_bin_dir}/libs/${ANDROID_NDK_ABI_NAME}"
)
get_target_property(android_proj_jni_location "${JNI_LIB_NAME}" LOCATION)
if (NOT (CMAKE_BUILD_TYPE MATCHES "debug"))
add_custom_command(TARGET ${JNI_LIB_NAME} POST_BUILD COMMAND ${CMAKE_STRIP} --strip-unneeded "${android_proj_jni_location}")
add_custom_command(TARGET ${JNI_LIB_NAME} POST_BUILD COMMAND ${CMAKE_STRIP} --strip-unneeded "$<TARGET_FILE:${JNI_LIB_NAME}>")
endif()
endif()

@ -212,7 +212,7 @@ if(CUDA_FOUND)
endif()
# disabled because of multiple warnings during building nvcc auto generated files
if(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_GCC_REGEX_VERSION VERSION_GREATER "4.6.0")
if(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "4.6.0")
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wunused-but-set-variable)
endif()

@ -42,49 +42,11 @@ if(MSVC AND CMAKE_C_COMPILER MATCHES "icc|icl")
set(CV_ICC __INTEL_COMPILER_FOR_WINDOWS)
endif()
# ----------------------------------------------------------------------------
# Detect GNU version:
# ----------------------------------------------------------------------------
if(CMAKE_COMPILER_IS_CLANGCXX)
set(CMAKE_GCC_REGEX_VERSION "4.2.1")
set(CMAKE_OPENCV_GCC_VERSION_MAJOR 4)
set(CMAKE_OPENCV_GCC_VERSION_MINOR 2)
set(CMAKE_OPENCV_GCC_VERSION 42)
set(CMAKE_OPENCV_GCC_VERSION_NUM 402)
execute_process(COMMAND ${CMAKE_CXX_COMPILER} ${CMAKE_CXX_COMPILER_ARG1} -v
ERROR_VARIABLE CMAKE_OPENCV_CLANG_VERSION_FULL
ERROR_STRIP_TRAILING_WHITESPACE)
string(REGEX MATCH "version.*$" CMAKE_OPENCV_CLANG_VERSION_FULL "${CMAKE_OPENCV_CLANG_VERSION_FULL}")
string(REGEX MATCH "[0-9]+\\.[0-9]+" CMAKE_CLANG_REGEX_VERSION "${CMAKE_OPENCV_CLANG_VERSION_FULL}")
elseif(CMAKE_COMPILER_IS_GNUCXX)
execute_process(COMMAND ${CMAKE_CXX_COMPILER} ${CMAKE_CXX_COMPILER_ARG1} -dumpversion
OUTPUT_VARIABLE CMAKE_OPENCV_GCC_VERSION_FULL
OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process(COMMAND ${CMAKE_CXX_COMPILER} ${CMAKE_CXX_COMPILER_ARG1} -v
ERROR_VARIABLE CMAKE_OPENCV_GCC_INFO_FULL
OUTPUT_STRIP_TRAILING_WHITESPACE)
# Typical output in CMAKE_OPENCV_GCC_VERSION_FULL: "c+//0 (whatever) 4.2.3 (...)"
# Look for the version number
string(REGEX MATCH "[0-9]+\\.[0-9]+\\.[0-9]+" CMAKE_GCC_REGEX_VERSION "${CMAKE_OPENCV_GCC_VERSION_FULL}")
if(NOT CMAKE_GCC_REGEX_VERSION)
string(REGEX MATCH "[0-9]+\\.[0-9]+" CMAKE_GCC_REGEX_VERSION "${CMAKE_OPENCV_GCC_VERSION_FULL}")
endif()
# Split the three parts:
string(REGEX MATCHALL "[0-9]+" CMAKE_OPENCV_GCC_VERSIONS "${CMAKE_GCC_REGEX_VERSION}")
list(GET CMAKE_OPENCV_GCC_VERSIONS 0 CMAKE_OPENCV_GCC_VERSION_MAJOR)
list(GET CMAKE_OPENCV_GCC_VERSIONS 1 CMAKE_OPENCV_GCC_VERSION_MINOR)
set(CMAKE_OPENCV_GCC_VERSION ${CMAKE_OPENCV_GCC_VERSION_MAJOR}${CMAKE_OPENCV_GCC_VERSION_MINOR})
math(EXPR CMAKE_OPENCV_GCC_VERSION_NUM "${CMAKE_OPENCV_GCC_VERSION_MAJOR}*100 + ${CMAKE_OPENCV_GCC_VERSION_MINOR}")
message(STATUS "Detected version of GNU GCC: ${CMAKE_OPENCV_GCC_VERSION} (${CMAKE_OPENCV_GCC_VERSION_NUM})")
if(NOT DEFINED CMAKE_CXX_COMPILER_VERSION)
message(WARNING "Compiler version is not available: CMAKE_CXX_COMPILER_VERSION is not set")
endif()
if(CMAKE_COMPILER_IS_GNUCXX)
if(WIN32)
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -dumpmachine
OUTPUT_VARIABLE OPENCV_GCC_TARGET_MACHINE
@ -120,7 +82,9 @@ if(NOT DEFINED OpenCV_STATIC)
endif()
endif()
if(MSVC)
if(DEFINED OpenCV_ARCH AND DEFINED OpenCV_RUNTIME)
# custom overrided values
elseif(MSVC)
if(CMAKE_CL_64)
set(OpenCV_ARCH x64)
else()
@ -138,7 +102,7 @@ if(MSVC)
set(OpenCV_RUNTIME vc12)
elseif(MSVC_VERSION EQUAL 1900)
set(OpenCV_RUNTIME vc14)
elseif(MSVC_VERSION EQUAL 1910)
elseif(MSVC_VERSION MATCHES "^191[0-9]$")
set(OpenCV_RUNTIME vc15)
endif()
elseif(MINGW)

@ -84,23 +84,16 @@ if(WITH_PVAPI)
set(PVAPI_SDK_SUBDIR arm)
endif()
get_filename_component(_PVAPI_LIBRARY "${PVAPI_INCLUDE_PATH}/../lib-pc" ABSOLUTE)
if(PVAPI_SDK_SUBDIR)
set(_PVAPI_LIBRARY "${_PVAPI_LIBRARY}/${PVAPI_SDK_SUBDIR}")
endif()
if(NOT WIN32 AND CMAKE_COMPILER_IS_GNUCXX)
set(_PVAPI_LIBRARY "${_PVAPI_LIBRARY}/${CMAKE_OPENCV_GCC_VERSION_MAJOR}.${CMAKE_OPENCV_GCC_VERSION_MINOR}")
endif()
get_filename_component(_PVAPI_LIBRARY_HINT "${PVAPI_INCLUDE_PATH}/../lib-pc" ABSOLUTE)
if(WIN32)
if(MINGW)
set(PVAPI_DEFINITIONS "-DPVDECL=__stdcall")
endif(MINGW)
set(PVAPI_LIBRARY "${_PVAPI_LIBRARY}/PvAPI.lib" CACHE PATH "The PvAPI library")
else(WIN32)
set(PVAPI_LIBRARY "${_PVAPI_LIBRARY}/${CMAKE_STATIC_LIBRARY_PREFIX}PvAPI${CMAKE_STATIC_LIBRARY_SUFFIX}" CACHE PATH "The PvAPI library")
endif(WIN32)
if(EXISTS "${PVAPI_LIBRARY}")
find_library(PVAPI_LIBRARY NAMES "PvAPI" PATHS "${_PVAPI_LIBRARY_HINT}")
if(PVAPI_LIBRARY)
if(WIN32)
if(MINGW)
set(PVAPI_DEFINITIONS "-DPVDECL=__stdcall")
endif(MINGW)
endif()
set(HAVE_PVAPI TRUE)
endif()
endif(PVAPI_INCLUDE_PATH)
@ -213,6 +206,7 @@ if(WITH_FFMPEG)
if(NOT __VALID_FFMPEG)
#message(FATAL_ERROR "FFMPEG: test check build log:\n${TRY_OUT}")
message(STATUS "WARNING: Can't build ffmpeg test code")
set(HAVE_FFMPEG FALSE)
else()
ocv_append_build_options(HIGHGUI FFMPEG)
endif()

@ -46,16 +46,16 @@ endif()
set(OpenCV_LIB_COMPONENTS_)
foreach(CVLib ${OpenCV_LIB_COMPONENTS})
get_target_property(libloc ${CVLib} LOCATION_${CMAKE_BUILD_TYPE})
if(libloc MATCHES "3rdparty")
set(libpath "\${exec_prefix}/share/OpenCV/3rdparty/${OPENCV_LIB_INSTALL_PATH}")
else()
set(libpath "\${exec_prefix}/${OPENCV_LIB_INSTALL_PATH}")
get_target_property(libname ${CVLib} OUTPUT_NAME_${CMAKE_BUILD_TYPE})
if(NOT libname)
get_target_property(libname ${CVLib} OUTPUT_NAME)
endif()
if(NOT libname)
set(libname "${CVLib}")
endif()
list(APPEND OpenCV_LIB_COMPONENTS_ "-L${libpath}")
get_filename_component(libname ${CVLib} NAME_WE)
string(REGEX REPLACE "^lib" "" libname "${libname}")
set(libpath "\${exec_prefix}/${OPENCV_LIB_INSTALL_PATH}")
list(APPEND OpenCV_LIB_COMPONENTS_ "-L${libpath}")
list(APPEND OpenCV_LIB_COMPONENTS_ "-l${libname}")
endforeach()
@ -66,7 +66,7 @@ if(OpenCV_EXTRA_COMPONENTS)
if(TARGET "${extra_component}")
get_target_property(extra_component_is_imported "${extra_component}" IMPORTED)
if(extra_component_is_imported)
get_target_property(extra_component "${extra_component}" LOCATION)
get_target_property(extra_component "${extra_component}" IMPORTED_LOCATION)
endif()
endif()
@ -74,8 +74,8 @@ if(OpenCV_EXTRA_COMPONENTS)
list(APPEND OpenCV_LIB_COMPONENTS_ "${extra_component}")
elseif(extra_component MATCHES "[\\/]")
get_filename_component(libdir "${extra_component}" PATH)
get_filename_component(libname "${extra_component}" NAME_WE)
string(REGEX REPLACE "^lib" "" libname "${libname}")
get_filename_component(libname "${extra_component}" NAME)
ocv_get_libname(libname "${libname}")
list(APPEND OpenCV_LIB_COMPONENTS_ "-L${libdir}" "-l${libname}")
else()
list(APPEND OpenCV_LIB_COMPONENTS_ "-l${extra_component}")

@ -170,6 +170,10 @@ macro(ocv_add_module _name)
return() # extra protection from redefinition
endif()
project(${the_module})
add_definitions(
-D_USE_MATH_DEFINES # M_PI constant in MSVS
-D__STDC_CONSTANT_MACROS -D__STDC_LIMIT_MACROS -D__STDC_FORMAT_MACROS # to use C libraries from C++ code (ffmpeg)
)
endif(OPENCV_INITIAL_PASS)
endmacro()
@ -574,12 +578,12 @@ macro(ocv_create_module)
${${the_module}_pch})
if(NOT "${ARGN}" STREQUAL "SKIP_LINK")
target_link_libraries(${the_module} ${OPENCV_MODULE_${the_module}_DEPS})
target_link_libraries(${the_module} LINK_INTERFACE_LIBRARIES ${OPENCV_MODULE_${the_module}_DEPS})
target_link_libraries(${the_module} LINK_PUBLIC ${OPENCV_MODULE_${the_module}_DEPS})
target_link_libraries(${the_module} LINK_PUBLIC ${OPENCV_MODULE_${the_module}_DEPS})
set(extra_deps ${OPENCV_MODULE_${the_module}_DEPS_EXT} ${OPENCV_LINKER_LIBS} ${IPP_LIBS} ${ARGN})
ocv_extract_simple_libs(extra_deps _simple_deps _other_deps)
target_link_libraries(${the_module} LINK_INTERFACE_LIBRARIES ${_simple_deps}) # this list goes to "export"
target_link_libraries(${the_module} ${extra_deps})
target_link_libraries(${the_module} LINK_PRIVATE ${_simple_deps}) # this list goes to "export"
target_link_libraries(${the_module} LINK_PRIVATE ${extra_deps})
endif()
add_dependencies(opencv_modules ${the_module})

@ -69,6 +69,9 @@ MACRO(_PCH_GET_COMPILE_FLAGS _out_compile_flags)
FOREACH(item ${DIRINC})
if(item MATCHES "^${OpenCV_SOURCE_DIR}/modules/")
LIST(APPEND ${_out_compile_flags} "${_PCH_include_prefix}\"${item}\"")
elseif(CMAKE_COMPILER_IS_GNUCXX AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS "6.0" AND
item MATCHES "/usr/include$")
# workaround for GCC 6.x bug
else()
LIST(APPEND ${_out_compile_flags} "${_PCH_isystem_prefix}\"${item}\"")
endif()
@ -78,6 +81,9 @@ MACRO(_PCH_GET_COMPILE_FLAGS _out_compile_flags)
FOREACH(item ${DIRINC})
if(item MATCHES "^${OpenCV_SOURCE_DIR}/modules/")
LIST(APPEND ${_out_compile_flags} "${_PCH_include_prefix}\"${item}\"")
elseif(CMAKE_COMPILER_IS_GNUCXX AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS "6.0" AND
item MATCHES "/usr/include$")
# workaround for GCC 6.x bug
else()
LIST(APPEND ${_out_compile_flags} "${_PCH_isystem_prefix}\"${item}\"")
endif()

@ -124,7 +124,7 @@ set(STD_OPENCV_DEV libopencv-dev)
set(ABI_VERSION_SUFFIX "")
if(CMAKE_COMPILER_IS_GNUCXX)
if(${CMAKE_OPENCV_GCC_VERSION_MAJOR} EQUAL 5)
if(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5 AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 6)
set(ABI_VERSION_SUFFIX "v5")
endif()
endif()

@ -5,8 +5,8 @@ if(UNIX)
find_package(PkgConfig)
endif()
# Search packages for host system instead of packages for target system
# in case of cross compilation thess macro should be defined by toolchain file
# Search packages for the host system instead of packages for target system
# in case of cross compilation these macros should be defined by the toolchain file
if(NOT COMMAND find_host_package)
macro(find_host_package)
find_package(${ARGN})
@ -82,7 +82,7 @@ macro(ocv_check_environment_variables)
endforeach()
endmacro()
# adds include directories in such way that directories from the OpenCV source tree go first
# adds include directories in such a way that directories from the OpenCV source tree go first
function(ocv_include_directories)
set(__add_before "")
foreach(dir ${ARGN})
@ -337,7 +337,7 @@ macro(ocv_check_modules define)
endmacro()
# Macros that checks if module have been installed.
# Macro that checks if module has been installed.
# After it adds module to build and define
# constants passed as second arg
macro(CHECK_MODULE module_name define)
@ -526,7 +526,7 @@ macro(ocv_list_add_suffix LST SUFFIX)
endmacro()
# gets and removes the first element from list
# gets and removes the first element from the list
macro(ocv_list_pop_front LST VAR)
if(${LST})
list(GET ${LST} 0 ${VAR})
@ -749,6 +749,15 @@ function(ocv_source_group group)
source_group(${group} FILES ${srcs})
endfunction()
macro(ocv_get_libname var_name)
get_filename_component(__libname "${ARGN}" NAME)
# libopencv_core.so.3.3 -> opencv_core
string(REGEX REPLACE "^lib(.+)\\.(a|so)(\\.[.0-9]+)?$" "\\1" __libname "${__libname}")
# MacOSX: libopencv_core.3.3.1.dylib -> opencv_core
string(REGEX REPLACE "^lib(.+[^.0-9])\\.([.0-9]+\\.)?dylib$" "\\1" __libname "${__libname}")
set(${var_name} "${__libname}")
endmacro()
# build the list of simple dependencies, that links via "-l"
# _all_libs - name of variable with input list
# _simple - name of variable with output list of simple libs

@ -14,6 +14,6 @@ endif()
set(OPENCV_SOVERSION "${OPENCV_VERSION_MAJOR}.${OPENCV_VERSION_MINOR}")
set(OPENCV_LIBVERSION "${OPENCV_VERSION_MAJOR}.${OPENCV_VERSION_MINOR}.${OPENCV_VERSION_PATCH}")
# create a dependency on version file
# we never use output of the following command but cmake will rerun automatically if the version file changes
# create a dependency on the version file
# we never use the output of the following command but cmake will rerun automatically if the version file changes
configure_file("${OPENCV_VERSION_FILE}" "${CMAKE_BINARY_DIR}/junk/version.junk" COPYONLY)

@ -1,3 +1,5 @@
#define __STDC_CONSTANT_MACROS
#include <stdlib.h>
extern "C" {

@ -219,6 +219,14 @@ foreach(__cvcomponent ${OpenCV_FIND_COMPONENTS})
string(TOUPPER "${__cvcomponent}" __cvcomponent)
set(${__cvcomponent}_FOUND 1)
endif()
# OpenCV supports Debug and Release only.
# RelWithDebInfo and MinSizeRel are mapped to Release
if(TARGET ${__cvcomponent})
set_target_properties(${__cvcomponent} PROPERTIES
MAP_IMPORTED_CONFIG_MINSIZEREL "Release"
MAP_IMPORTED_CONFIG_RELWITHDEBINFO "Release"
)
endif()
endforeach()
set(OpenCV_FIND_COMPONENTS ${OpenCV_FIND_COMPONENTS_})

@ -440,5 +440,5 @@ You may observe a runtime instance of this on the `YouTube here <https://www.you
.. raw:: html
<div align="center">
<iframe title=" Camera calibration With OpenCV - Chessboard or asymmetrical circle pattern." width="560" height="349" src="http://www.youtube.com/embed/ViPN810E0SU?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
<iframe title=" Camera calibration With OpenCV - Chessboard or asymmetrical circle pattern." width="560" height="349" src="https://www.youtube.com/embed/ViPN810E0SU?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
</div>

@ -275,5 +275,5 @@ You may observe a runtime instance of this on the `YouTube here <https://www.you
.. raw:: html
<div align="center">
<iframe title="File Input and Output using XML and YAML files in OpenCV" width="560" height="349" src="http://www.youtube.com/embed/A4yqVnByMMM?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
<iframe title="File Input and Output using XML and YAML files in OpenCV" width="560" height="349" src="https://www.youtube.com/embed/A4yqVnByMMM?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
</div>

@ -179,5 +179,5 @@ Finally, you may watch a sample run of the program on the `video posted <https:/
.. raw:: html
<div align="center">
<iframe title="How to scan images in OpenCV?" width="560" height="349" src="http://www.youtube.com/embed/fB3AN5fjgwc?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
<iframe title="How to scan images in OpenCV?" width="560" height="349" src="https://www.youtube.com/embed/fB3AN5fjgwc?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
</div>

@ -125,5 +125,5 @@ You may observe a runtime instance of this on the `YouTube here <https://www.you
.. raw:: html
<div align="center">
<iframe title="Interoperability with OpenCV 1" width="560" height="349" src="http://www.youtube.com/embed/qckm-zvo31w?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
<iframe title="Interoperability with OpenCV 1" width="560" height="349" src="https://www.youtube.com/embed/qckm-zvo31w?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
</div>

@ -128,7 +128,7 @@ For example:
You can download this source code from :download:`here <../../../../samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp>` or look in the OpenCV source code libraries sample directory at :file:`samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp`.
Check out an instance of running the program on our `YouTube channel <http://www.youtube.com/watch?v=7PF1tAU9se4>`_ .
Check out an instance of running the program on our `YouTube channel <https://www.youtube.com/watch?v=7PF1tAU9se4>`_ .
.. raw:: html

@ -307,5 +307,5 @@ You can also find a quick video demonstration of this on `YouTube <https://www.y
.. raw:: html
<div align="center">
<iframe title="Install OpenCV by using its source files - Part 1" width="560" height="349" src="http://www.youtube.com/embed/1tibU7vGWpk?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
<iframe title="Install OpenCV by using its source files - Part 1" width="560" height="349" src="https://www.youtube.com/embed/1tibU7vGWpk?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
</div>

@ -144,5 +144,5 @@ In both cases we managed a performance increase of almost 100% compared to the C
.. raw:: html
<div align="center">
<iframe title="Similarity check (PNSR and SSIM) on the GPU" width="560" height="349" src="http://www.youtube.com/embed/3_ESXmFlnvY?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
<iframe title="Similarity check (PNSR and SSIM) on the GPU" width="560" height="349" src="https://www.youtube.com/embed/3_ESXmFlnvY?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
</div>

@ -111,7 +111,7 @@ Then the PSNR is expressed as:
PSNR = 10 \cdot \log_{10} \left( \frac{MAX_I^2}{MSE} \right)
Here the :math:`MAX_I^2` is the maximum valid value for a pixel. In case of the simple single byte image per pixel per channel this is 255. When two images are the same the MSE will give zero, resulting in an invalid divide by zero operation in the PSNR formula. In this case the PSNR is undefined and as we'll need to handle this case separately. The transition to a logarithmic scale is made because the pixel values have a very wide dynamic range. All this translated to OpenCV and a C++ function looks like:
Here the :math:`MAX_I` is the maximum valid value for a pixel. In case of the simple single byte image per pixel per channel this is 255. When two images are the same the MSE will give zero, resulting in an invalid divide by zero operation in the PSNR formula. In this case the PSNR is undefined and as we'll need to handle this case separately. The transition to a logarithmic scale is made because the pixel values have a very wide dynamic range. All this translated to OpenCV and a C++ function looks like:
.. code-block:: cpp
@ -212,5 +212,5 @@ You may observe a runtime instance of this on the `YouTube here <https://www.you
.. raw:: html
<div align="center">
<iframe title="Video Input with OpenCV (Plus PSNR and MSSIM)" width="560" height="349" src="http://www.youtube.com/embed/iOcNljutOgg?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
<iframe title="Video Input with OpenCV (Plus PSNR and MSSIM)" width="560" height="349" src="https://www.youtube.com/embed/iOcNljutOgg?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
</div>

@ -132,5 +132,5 @@ You may observe a runtime instance of this on the `YouTube here <https://www.you
.. raw:: html
<div align="center">
<iframe title="Creating a video with OpenCV" width="560" height="349" src="http://www.youtube.com/embed/jpBwHxsl1_0?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
<iframe title="Creating a video with OpenCV" width="560" height="349" src="https://www.youtube.com/embed/jpBwHxsl1_0?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
</div>

@ -81,7 +81,7 @@ Gaussian Filter
G_{0}(x, y) = A e^{ \dfrac{ -(x - \mu_{x})^{2} }{ 2\sigma^{2}_{x} } + \dfrac{ -(y - \mu_{y})^{2} }{ 2\sigma^{2}_{y} } }
where :math:`\mu` is the mean (the peak) and :math:`\sigma` represents the variance (per each of the variables :math:`x` and :math:`y`)
where :math:`\mu` is the mean (the peak) and :math:`\sigma` represents the standard deviation (per each of the variables :math:`x` and :math:`y`)
Median Filter

@ -129,5 +129,5 @@ Result
.. raw:: html
<div align="center">
<iframe title="Introduction - Display an Image" width="560" height="349" src="http://www.youtube.com/embed/1OJEqpuaGc4?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
<iframe title="Introduction - Display an Image" width="560" height="349" src="https://www.youtube.com/embed/1OJEqpuaGc4?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
</div>

@ -358,7 +358,7 @@ Now here's our recommendation for the structure of the tutorial (although, remem
.. raw:: html
<div align="center">
<iframe title="Creating a video with OpenCV" width="560" height="349" src="http://www.youtube.com/embed/jpBwHxsl1_0?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
<iframe title="Creating a video with OpenCV" width="560" height="349" src="https://www.youtube.com/embed/jpBwHxsl1_0?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
</div>
This results in the text and video: You may observe a runtime instance of this on the `YouTube here <https://www.youtube.com/watch?v=jpBwHxsl1_0>`_.
@ -366,7 +366,7 @@ Now here's our recommendation for the structure of the tutorial (although, remem
.. raw:: html
<div align="center">
<iframe title="Creating a video with OpenCV" width="560" height="349" src="http://www.youtube.com/embed/jpBwHxsl1_0?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
<iframe title="Creating a video with OpenCV" width="560" height="349" src="https://www.youtube.com/embed/jpBwHxsl1_0?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
</div>
When these aren't self-explanatory make sure to throw in a few guiding lines about what and why we can see.

@ -81,4 +81,6 @@ Building OpenCV from Source Using CMake, Using the Command Line
.. note::
Use ``cmake -DCMAKE_BUILD_TYPE=RELEASE -DCMAKE_INSTALL_PREFIX=/usr/local ..`` , without spaces after -D if step 2 do not work.
If the size of the created library is a critical issue (like in case of an Android build) you can use the ``install/strip`` command to get the smallest size as possible. The *stripped* version appears to be twice as small. However, we do not recommend using this unless those extra megabytes do really matter.

@ -40,8 +40,8 @@ You may find the content of this tutorial also inside the following videos: `Par
.. raw:: html
<div align="center">
<iframe title="Install OpenCV by using its source files - Part 1" width="560" height="349" src="http://www.youtube.com/embed/NnovZ1cTlMs?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
<iframe title="Install OpenCV by using its source files - Part 2" width="560" height="349" src="http://www.youtube.com/embed/qGNWMcfWwPU?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
<iframe title="Install OpenCV by using its source files - Part 1" width="560" height="349" src="https://www.youtube.com/embed/NnovZ1cTlMs?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
<iframe title="Install OpenCV by using its source files - Part 2" width="560" height="349" src="https://www.youtube.com/embed/qGNWMcfWwPU?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
</div>
.. warning:: These videos above are long-obsolete and contain inaccurate information. Be careful, since solutions described in those videos are no longer supported and may even break your install.

@ -120,10 +120,10 @@ After the processing we need to convert it back to UIImage.
:alt: header
:align: center
Check out an instance of running code with more Image Effects on `YouTube <http://www.youtube.com/watch?v=Ko3K_xdhJ1I>`_ .
Check out an instance of running code with more Image Effects on `YouTube <https://www.youtube.com/watch?v=Ko3K_xdhJ1I>`_ .
.. raw:: html
<div align="center">
<iframe width="560" height="350" src="http://www.youtube.com/embed/Ko3K_xdhJ1I" frameborder="0" allowfullscreen></iframe>
<iframe width="560" height="350" src="https://www.youtube.com/embed/Ko3K_xdhJ1I" frameborder="0" allowfullscreen></iframe>
</div>

@ -215,5 +215,5 @@ You may observe a runtime instance of this on the `YouTube here <https://www.you
.. raw:: html
<div align="center">
<iframe title="Support Vector Machines for Non-Linearly Separable Data" width="560" height="349" src="http://www.youtube.com/embed/vFv2yPcSo-Q?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
<iframe title="Support Vector Machines for Non-Linearly Separable Data" width="560" height="349" src="https://www.youtube.com/embed/vFv2yPcSo-Q?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
</div>

@ -5,8 +5,10 @@
#include <android/log.h>
#include <cctype>
#include <string>
#include <cstring>
#include <vector>
#include <algorithm>
#include <functional>
#include <opencv2/core/version.hpp>
#include "camera_activity.hpp"
#include "camera_wrapper.h"

@ -483,11 +483,11 @@ Finds the positions of internal corners of the chessboard.
:param flags: Various operation flags that can be zero or a combination of the following values:
* **CV_CALIB_CB_ADAPTIVE_THRESH** Use adaptive thresholding to convert the image to black and white, rather than a fixed threshold level (computed from the average image brightness).
* **CALIB_CB_ADAPTIVE_THRESH** Use adaptive thresholding to convert the image to black and white, rather than a fixed threshold level (computed from the average image brightness).
* **CV_CALIB_CB_NORMALIZE_IMAGE** Normalize the image gamma with :ocv:func:`equalizeHist` before applying fixed or adaptive thresholding.
* **CALIB_CB_NORMALIZE_IMAGE** Normalize the image gamma with :ocv:func:`equalizeHist` before applying fixed or adaptive thresholding.
* **CV_CALIB_CB_FILTER_QUADS** Use additional criteria (like contour area, perimeter, square-like shape) to filter out false quads extracted at the contour retrieval stage.
* **CALIB_CB_FILTER_QUADS** Use additional criteria (like contour area, perimeter, square-like shape) to filter out false quads extracted at the contour retrieval stage.
* **CALIB_CB_FAST_CHECK** Run a fast check on the image that looks for chessboard corners, and shortcut the call if none is found. This can drastically speed up the call in the degenerate condition when no chessboard is observed.

@ -864,10 +864,10 @@ Rect getValidDisparityROI( Rect roi1, Rect roi2,
int SADWindowSize )
{
int SW2 = SADWindowSize/2;
int minD = minDisparity, maxD = minDisparity + numberOfDisparities - 1;
int maxD = minDisparity + numberOfDisparities - 1;
int xmin = max(roi1.x, roi2.x + maxD) + SW2;
int xmax = min(roi1.x + roi1.width, roi2.x + roi2.width - minD) - SW2;
int xmax = min(roi1.x + roi1.width, roi2.x + roi2.width) - SW2;
int ymin = max(roi1.y, roi2.y) + SW2;
int ymax = min(roi1.y + roi1.height, roi2.y + roi2.height) - SW2;

@ -60,7 +60,7 @@ protected:
protected:
std::string combine(const std::string& _item1, const std::string& _item2);
cv::Mat mergeRectification(const cv::Mat& l, const cv::Mat& r);
static void merge4(const cv::Mat& tl, const cv::Mat& tr, const cv::Mat& bl, const cv::Mat& br, cv::Mat& merged);
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -329,7 +329,7 @@ TEST_F(fisheyeTest, Homography)
EXPECT_MAT_NEAR(std_err, correct_std_err, 1e-12);
}
TEST_F(fisheyeTest, EtimateUncertainties)
TEST_F(fisheyeTest, EstimateUncertainties)
{
const int n_images = 34;
@ -385,11 +385,7 @@ TEST_F(fisheyeTest, EtimateUncertainties)
CV_Assert(errors.alpha == 0);
}
#ifdef HAVE_TEGRA_OPTIMIZATION
TEST_F(fisheyeTest, DISABLED_rectify)
#else
TEST_F(fisheyeTest, rectify)
#endif
TEST_F(fisheyeTest, stereoRectify)
{
const std::string folder =combine(datasets_repository_path, "calib-3_stereo_from_JY");
@ -405,20 +401,65 @@ TEST_F(fisheyeTest, rectify)
cv::fisheye::stereoRectify(K1, D1, K2, D2, calibration_size, theR, theT, R1, R2, P1, P2, Q,
cv::CALIB_ZERO_DISPARITY, requested_size, balance, fov_scale);
// Collected with these CMake flags: -DWITH_IPP=OFF -DCMAKE_BUILD_TYPE=Debug
cv::Matx33d R1_ref(
0.9992853269091279, 0.03779164101000276, -0.0007920188690205426,
-0.03778569762983931, 0.9992646472015868, 0.006511981857667881,
0.001037534936357442, -0.006477400933964018, 0.9999784831677112
);
cv::Matx33d R2_ref(
0.9994868963898833, -0.03197579751378937, -0.001868774538573449,
0.03196298186616116, 0.9994677442608699, -0.0065265589947392,
0.002076471801477729, 0.006463478587068991, 0.9999769555891836
);
cv::Matx34d P1_ref(
420.8551870450913, 0, 586.501617798451, 0,
0, 420.8551870450913, 374.7667511986098, 0,
0, 0, 1, 0
);
cv::Matx34d P2_ref(
420.8551870450913, 0, 586.501617798451, -41.77758076597302,
0, 420.8551870450913, 374.7667511986098, 0,
0, 0, 1, 0
);
cv::Matx44d Q_ref(
1, 0, 0, -586.501617798451,
0, 1, 0, -374.7667511986098,
0, 0, 0, 420.8551870450913,
0, 0, 10.07370889670733, -0
);
const double eps = 1e-10;
EXPECT_MAT_NEAR(R1_ref, R1, eps);
EXPECT_MAT_NEAR(R2_ref, R2, eps);
EXPECT_MAT_NEAR(P1_ref, P1, eps);
EXPECT_MAT_NEAR(P2_ref, P2, eps);
EXPECT_MAT_NEAR(Q_ref, Q, eps);
if (::testing::Test::HasFailure())
{
std::cout << "Actual values are:" << std::endl
<< "R1 =" << std::endl << R1 << std::endl
<< "R2 =" << std::endl << R2 << std::endl
<< "P1 =" << std::endl << P1 << std::endl
<< "P2 =" << std::endl << P2 << std::endl
<< "Q =" << std::endl << Q << std::endl;
}
#if 1 // Debug code
cv::Mat lmapx, lmapy, rmapx, rmapy;
//rewrite for fisheye
cv::fisheye::initUndistortRectifyMap(K1, D1, R1, P1, requested_size, CV_32F, lmapx, lmapy);
cv::fisheye::initUndistortRectifyMap(K2, D2, R2, P2, requested_size, CV_32F, rmapx, rmapy);
cv::Mat l, r, lundist, rundist;
cv::VideoCapture lcap(combine(folder, "left/stereo_pair_%03d.jpg")),
rcap(combine(folder, "right/stereo_pair_%03d.jpg"));
for(int i = 0;; ++i)
for (int i = 0; i < 34; ++i)
{
lcap >> l; rcap >> r;
if (l.empty() || r.empty())
break;
SCOPED_TRACE(cv::format("image %d", i));
l = imread(combine(folder, cv::format("left/stereo_pair_%03d.jpg", i)), cv::IMREAD_COLOR);
r = imread(combine(folder, cv::format("right/stereo_pair_%03d.jpg", i)), cv::IMREAD_COLOR);
ASSERT_FALSE(l.empty());
ASSERT_FALSE(r.empty());
int ndisp = 128;
cv::rectangle(l, cv::Rect(255, 0, 829, l.rows-1), CV_RGB(255, 0, 0));
@ -427,15 +468,18 @@ TEST_F(fisheyeTest, rectify)
cv::remap(l, lundist, lmapx, lmapy, cv::INTER_LINEAR);
cv::remap(r, rundist, rmapx, rmapy, cv::INTER_LINEAR);
cv::Mat rectification = mergeRectification(lundist, rundist);
for (int ii = 0; ii < lundist.rows; ii += 20)
{
cv::line(lundist, cv::Point(0, ii), cv::Point(lundist.cols, ii), cv::Scalar(0, 255, 0));
cv::line(rundist, cv::Point(0, ii), cv::Point(lundist.cols, ii), cv::Scalar(0, 255, 0));
}
cv::Mat correct = cv::imread(combine(datasets_repository_path, cv::format("rectification_AB_%03d.png", i)));
cv::Mat rectification;
merge4(l, r, lundist, rundist, rectification);
if (correct.empty())
cv::imwrite(combine(datasets_repository_path, cv::format("rectification_AB_%03d.png", i)), rectification);
else
EXPECT_MAT_NEAR(correct, rectification, 1e-10);
}
cv::imwrite(cv::format("fisheye_rectification_AB_%03d.png", i), rectification);
}
#endif
}
TEST_F(fisheyeTest, stereoCalibrate)
@ -601,17 +645,17 @@ std::string fisheyeTest::combine(const std::string& _item1, const std::string& _
return item1 + (last != '/' ? "/" : "") + item2;
}
cv::Mat fisheyeTest::mergeRectification(const cv::Mat& l, const cv::Mat& r)
void fisheyeTest::merge4(const cv::Mat& tl, const cv::Mat& tr, const cv::Mat& bl, const cv::Mat& br, cv::Mat& merged)
{
CV_Assert(l.type() == r.type() && l.size() == r.size());
cv::Mat merged(l.rows, l.cols * 2, l.type());
cv::Mat lpart = merged.colRange(0, l.cols);
cv::Mat rpart = merged.colRange(l.cols, merged.cols);
l.copyTo(lpart);
r.copyTo(rpart);
for(int i = 0; i < l.rows; i+=20)
cv::line(merged, cv::Point(0, i), cv::Point(merged.cols, i), CV_RGB(0, 255, 0));
return merged;
int type = tl.type();
cv::Size sz = tl.size();
ASSERT_EQ(type, tr.type()); ASSERT_EQ(type, bl.type()); ASSERT_EQ(type, br.type());
ASSERT_EQ(sz.width, tr.cols); ASSERT_EQ(sz.width, bl.cols); ASSERT_EQ(sz.width, br.cols);
ASSERT_EQ(sz.height, tr.rows); ASSERT_EQ(sz.height, bl.rows); ASSERT_EQ(sz.height, br.rows);
merged.create(cv::Size(sz.width * 2, sz.height * 2), type);
tl.copyTo(merged(cv::Rect(0, 0, sz.width, sz.height)));
tr.copyTo(merged(cv::Rect(sz.width, 0, sz.width, sz.height)));
bl.copyTo(merged(cv::Rect(0, sz.height, sz.width, sz.height)));
bl.copyTo(merged(cv::Rect(sz.width, sz.height, sz.width, sz.height)));
}

@ -326,9 +326,15 @@ string ERROR_PREFIXES[] = { "borderedAll",
"borderedTextureless",
"borderedDepthDiscont" }; // size of ERROR_KINDS_COUNT
string ROI_PREFIXES[] = { "roiX",
"roiY",
"roiWidth",
"roiHeight" };
const string RMS_STR = "RMS";
const string BAD_PXLS_FRACTION_STR = "BadPxlsFraction";
const string ROI_STR = "ValidDisparityROI";
class QualityEvalParams
{
@ -366,16 +372,20 @@ public:
protected:
// assumed that left image is a reference image
virtual int runStereoMatchingAlgorithm( const Mat& leftImg, const Mat& rightImg,
Mat& leftDisp, Mat& rightDisp, int caseIdx ) = 0; // return ignored border width
Rect& calcROI, Mat& leftDisp, Mat& rightDisp, int caseIdx ) = 0; // return ignored border width
int readDatasetsParams( FileStorage& fs );
virtual int readRunParams( FileStorage& fs );
void writeErrors( const string& errName, const vector<float>& errors, FileStorage* fs = 0 );
void writeROI( const Rect& calcROI, FileStorage* fs = 0 );
void readErrors( FileNode& fn, const string& errName, vector<float>& errors );
void readROI( FileNode& fn, Rect& trueROI );
int compareErrors( const vector<float>& calcErrors, const vector<float>& validErrors,
const vector<float>& eps, const string& errName );
int compareROI( const Rect& calcROI, const Rect& validROI );
int processStereoMatchingResults( FileStorage& fs, int caseIdx, bool isWrite,
const Mat& leftImg, const Mat& rightImg,
const Rect& calcROI,
const Mat& trueLeftDisp, const Mat& trueRightDisp,
const Mat& leftDisp, const Mat& rightDisp,
const QualityEvalParams& qualityEvalParams );
@ -451,6 +461,7 @@ void CV_StereoMatchingTest::run(int)
Mat rightImg = imread(datasetFullDirName + RIGHT_IMG_NAME);
Mat trueLeftDisp = imread(datasetFullDirName + TRUE_LEFT_DISP_NAME, 0);
Mat trueRightDisp = imread(datasetFullDirName + TRUE_RIGHT_DISP_NAME, 0);
Rect calcROI;
if( leftImg.empty() || rightImg.empty() || trueLeftDisp.empty() )
{
@ -473,7 +484,7 @@ void CV_StereoMatchingTest::run(int)
}
Mat leftDisp, rightDisp;
int ignBorder = max(runStereoMatchingAlgorithm(leftImg, rightImg, leftDisp, rightDisp, ci), EVAL_IGNORE_BORDER);
int ignBorder = max(runStereoMatchingAlgorithm(leftImg, rightImg, calcROI, leftDisp, rightDisp, ci), EVAL_IGNORE_BORDER);
leftDisp.convertTo( tmp, CV_32FC1 );
leftDisp = tmp;
@ -484,7 +495,7 @@ void CV_StereoMatchingTest::run(int)
tmp.release();
int tempCode = processStereoMatchingResults( resFS, ci, isWrite,
leftImg, rightImg, trueLeftDisp, trueRightDisp, leftDisp, rightDisp, QualityEvalParams(ignBorder));
leftImg, rightImg, calcROI, trueLeftDisp, trueRightDisp, leftDisp, rightDisp, QualityEvalParams(ignBorder));
code = tempCode==cvtest::TS::OK ? code : tempCode;
}
@ -538,6 +549,7 @@ void calcErrors( const Mat& leftImg, const Mat& /*rightImg*/,
int CV_StereoMatchingTest::processStereoMatchingResults( FileStorage& fs, int caseIdx, bool isWrite,
const Mat& leftImg, const Mat& rightImg,
const Rect& calcROI,
const Mat& trueLeftDisp, const Mat& trueRightDisp,
const Mat& leftDisp, const Mat& rightDisp,
const QualityEvalParams& qualityEvalParams )
@ -574,6 +586,8 @@ int CV_StereoMatchingTest::processStereoMatchingResults( FileStorage& fs, int ca
writeErrors( RMS_STR, rmss, &fs );
cvWriteComment( fs.fs, BAD_PXLS_FRACTION_STR.c_str(), 0 );
writeErrors( BAD_PXLS_FRACTION_STR, badPxlsFractions, &fs );
cvWriteComment( fs.fs, ROI_STR.c_str(), 0 );
writeROI( calcROI, &fs );
fs << "}"; // datasetName
}
else // compare
@ -583,16 +597,22 @@ int CV_StereoMatchingTest::processStereoMatchingResults( FileStorage& fs, int ca
writeErrors( RMS_STR, rmss );
ts->printf( cvtest::TS::LOG, "%s\n", BAD_PXLS_FRACTION_STR.c_str() );
writeErrors( BAD_PXLS_FRACTION_STR, badPxlsFractions );
ts->printf( cvtest::TS::LOG, "%s\n", ROI_STR.c_str() );
writeROI( calcROI );
FileNode fn = fs.getFirstTopLevelNode()[caseNames[caseIdx]];
vector<float> validRmss, validBadPxlsFractions;
Rect validROI;
readErrors( fn, RMS_STR, validRmss );
readErrors( fn, BAD_PXLS_FRACTION_STR, validBadPxlsFractions );
readROI( fn, validROI );
int tempCode = compareErrors( rmss, validRmss, rmsEps, RMS_STR );
code = tempCode==cvtest::TS::OK ? code : tempCode;
tempCode = compareErrors( badPxlsFractions, validBadPxlsFractions, fracEps, BAD_PXLS_FRACTION_STR );
code = tempCode==cvtest::TS::OK ? code : tempCode;
tempCode = compareROI( calcROI, validROI );
code = tempCode==cvtest::TS::OK ? code : tempCode;
}
return code;
}
@ -642,6 +662,24 @@ void CV_StereoMatchingTest::writeErrors( const string& errName, const vector<flo
ts->printf( cvtest::TS::LOG, "%s = %f\n", string(ERROR_PREFIXES[i]+errName).c_str(), *it );
}
void CV_StereoMatchingTest::writeROI( const Rect& calcROI, FileStorage* fs )
{
if( fs )
{
*fs << ROI_PREFIXES[0] << calcROI.x;
*fs << ROI_PREFIXES[1] << calcROI.y;
*fs << ROI_PREFIXES[2] << calcROI.width;
*fs << ROI_PREFIXES[3] << calcROI.height;
}
else
{
ts->printf( cvtest::TS::LOG, "%s = %d\n", ROI_PREFIXES[0].c_str(), calcROI.x );
ts->printf( cvtest::TS::LOG, "%s = %d\n", ROI_PREFIXES[1].c_str(), calcROI.y );
ts->printf( cvtest::TS::LOG, "%s = %d\n", ROI_PREFIXES[2].c_str(), calcROI.width );
ts->printf( cvtest::TS::LOG, "%s = %d\n", ROI_PREFIXES[3].c_str(), calcROI.height );
}
}
void CV_StereoMatchingTest::readErrors( FileNode& fn, const string& errName, vector<float>& errors )
{
errors.resize( ERROR_KINDS_COUNT );
@ -650,6 +688,14 @@ void CV_StereoMatchingTest::readErrors( FileNode& fn, const string& errName, vec
fn[ERROR_PREFIXES[i]+errName] >> *it;
}
void CV_StereoMatchingTest::readROI( FileNode& fn, Rect& validROI )
{
fn[ROI_PREFIXES[0]] >> validROI.x;
fn[ROI_PREFIXES[1]] >> validROI.y;
fn[ROI_PREFIXES[2]] >> validROI.width;
fn[ROI_PREFIXES[3]] >> validROI.height;
}
int CV_StereoMatchingTest::compareErrors( const vector<float>& calcErrors, const vector<float>& validErrors,
const vector<float>& eps, const string& errName )
{
@ -669,6 +715,26 @@ int CV_StereoMatchingTest::compareErrors( const vector<float>& calcErrors, const
return ok ? cvtest::TS::OK : cvtest::TS::FAIL_BAD_ACCURACY;
}
int CV_StereoMatchingTest::compareROI( const Rect& calcROI, const Rect& validROI )
{
int compare[4][2] = {
{ calcROI.x, validROI.x },
{ calcROI.y, validROI.y },
{ calcROI.width, validROI.width },
{ calcROI.height, validROI.height },
};
bool ok = true;
for (int i = 0; i < 4; i++)
{
if (compare[i][0] != compare[i][1])
{
ts->printf( cvtest::TS::LOG, "bad accuracy of %s (valid=%d; calc=%d)\n", ROI_PREFIXES[i].c_str(), compare[i][1], compare[i][0] );
ok = false;
}
}
return ok ? cvtest::TS::OK : cvtest::TS::FAIL_BAD_ACCURACY;
}
//----------------------------------- StereoBM test -----------------------------------------------------
class CV_StereoBMTest : public CV_StereoMatchingTest
@ -685,6 +751,7 @@ protected:
struct RunParams
{
int ndisp;
int mindisp;
int winSize;
};
vector<RunParams> caseRunParams;
@ -694,12 +761,13 @@ protected:
int code = CV_StereoMatchingTest::readRunParams( fs );
FileNode fn = fs.getFirstTopLevelNode();
assert(fn.isSeq());
for( int i = 0; i < (int)fn.size(); i+=4 )
for( int i = 0; i < (int)fn.size(); i+=5 )
{
string caseName = fn[i], datasetName = fn[i+1];
RunParams params;
string ndisp = fn[i+2]; params.ndisp = atoi(ndisp.c_str());
string winSize = fn[i+3]; params.winSize = atoi(winSize.c_str());
String ndisp = fn[i+2]; params.ndisp = atoi(ndisp.c_str());
String mindisp = fn[i+3]; params.mindisp = atoi(mindisp.c_str());
String winSize = fn[i+4]; params.winSize = atoi(winSize.c_str());
caseNames.push_back( caseName );
caseDatasets.push_back( datasetName );
caseRunParams.push_back( params );
@ -708,7 +776,7 @@ protected:
}
virtual int runStereoMatchingAlgorithm( const Mat& _leftImg, const Mat& _rightImg,
Mat& leftDisp, Mat& /*rightDisp*/, int caseIdx )
Rect& calcROI, Mat& leftDisp, Mat& /*rightDisp*/, int caseIdx )
{
RunParams params = caseRunParams[caseIdx];
assert( params.ndisp%16 == 0 );
@ -717,7 +785,21 @@ protected:
Mat rightImg; cvtColor( _rightImg, rightImg, CV_BGR2GRAY );
StereoBM bm( StereoBM::BASIC_PRESET, params.ndisp, params.winSize );
bm.state->minDisparity = params.mindisp;
Rect cROI(0, 0, _leftImg.cols, _leftImg.rows);
calcROI = getValidDisparityROI(cROI, cROI, params.mindisp, params.ndisp, params.winSize);
bm( leftImg, rightImg, leftDisp, CV_32F );
if (params.mindisp != 0)
for (int y = 0; y < leftDisp.rows; y++)
for (int x = 0; x < leftDisp.cols; x++)
{
if (leftDisp.at<float>(y, x) < params.mindisp)
leftDisp.at<float>(y, x) = -0.0625; // treat disparity < mindisp as no disparity
}
return params.winSize/2;
}
};
@ -754,7 +836,13 @@ protected:
RunParams params;
string ndisp = fn[i+2]; params.ndisp = atoi(ndisp.c_str());
string winSize = fn[i+3]; params.winSize = atoi(winSize.c_str());
string fullDP = fn[i+4]; params.fullDP = atoi(fullDP.c_str()) == 0 ? false : true;
string fullDP = fn[i+4];
int n = atoi(fullDP.c_str());
params.fullDP = n == 0 ? false : true;
if (n > 1)
continue; // OpenCV 3+ compatibility - skip tests for mode > 1
caseNames.push_back( caseName );
caseDatasets.push_back( datasetName );
caseRunParams.push_back( params );
@ -763,12 +851,16 @@ protected:
}
virtual int runStereoMatchingAlgorithm( const Mat& leftImg, const Mat& rightImg,
Mat& leftDisp, Mat& /*rightDisp*/, int caseIdx )
Rect& calcROI, Mat& leftDisp, Mat& /*rightDisp*/, int caseIdx )
{
RunParams params = caseRunParams[caseIdx];
assert( params.ndisp%16 == 0 );
StereoSGBM sgbm( 0, params.ndisp, params.winSize, 10*params.winSize*params.winSize, 40*params.winSize*params.winSize,
1, 63, 10, 100, 32, params.fullDP );
Rect cROI(0, 0, leftImg.cols, leftImg.rows);
calcROI = getValidDisparityROI(cROI, cROI, 0, params.ndisp, params.winSize);
sgbm( leftImg, rightImg, leftDisp );
assert( leftDisp.type() == CV_16SC1 );
leftDisp/=16;

@ -31,7 +31,7 @@ Aligns a buffer size to the specified number of bytes.
:param n: Alignment size that must be a power of two.
The function returns the minimum number that is greater or equal to ``sz`` and is divisible by ``n`` :
The function returns the minimum number that is greater than or equal to ``sz`` and is divisible by ``n`` :
.. math::
@ -363,7 +363,7 @@ Always returns 0 if called outside of parallel region.
.. ocv:function:: int getThreadNum()
The exact meaning of return value depends on the threading framework used by OpenCV library:
The exact meaning of the return value depends on the threading framework used by OpenCV library:
* **TBB** – Unsupported with current 4.1 TBB release. May be will be supported in future.
* **OpenMP** – The thread number, within the current team, of the calling thread.
@ -450,7 +450,7 @@ This operation is used in the simplest or most complex image processing function
setNumThreads
-----------------
OpenCV will try to set the number of threads for the next parallel region.
If ``threads == 0``, OpenCV will disable threading optimizations and run all it's
If ``threads == 0``, OpenCV will disable threading optimizations and run all its
functions sequentially. Passing ``threads < 0`` will reset threads number to system default.
This function must be called outside of parallel region.
@ -458,14 +458,14 @@ This function must be called outside of parallel region.
:param nthreads: Number of threads used by OpenCV.
OpenCV will try to run it's functions with specified threads number, but
OpenCV will try to run its functions with specified threads number, but
some behaviour differs from framework:
* **TBB** – User-defined parallel constructions will run with the same threads number,
if another does not specified. If late on user creates own scheduler, OpenCV will be use it.
if another is not specified. If late on user creates his own scheduler, OpenCV will be use it.
* **OpenMP** – No special defined behaviour.
* **Concurrency** – If ``threads == 1``, OpenCV will disable threading optimizations
and run it's functions sequentially.
and run its functions sequentially.
* **GCD** – Supports only values <= 0.
* **C=** – No special defined behaviour.

@ -203,11 +203,13 @@ cv::Affine3<T>::Affine3(const cv::Mat& data, const Vec3& t)
{
rotation(data(Rect(0, 0, 3, 3)));
translation(data(Rect(3, 0, 1, 3)));
return;
}
else
{
rotation(data);
translation(t);
}
rotation(data);
translation(t);
matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
matrix.val[15] = 1;
}

@ -407,7 +407,7 @@ public:
The class is specialized for each fundamental numerical data type supported by OpenCV.
It provides DataDepth<T>::value constant.
*/
template<typename _Tp> class DataDepth {};
template<typename _Tp> class DataDepth { public: enum { value = -1, fmt = 0 }; };
template<> class DataDepth<bool> { public: enum { value = CV_8U, fmt=(int)'u' }; };
template<> class DataDepth<uchar> { public: enum { value = CV_8U, fmt=(int)'u' }; };
@ -880,8 +880,10 @@ public:
typedef Point_<int> Point2i;
typedef Point_<int64> Point2l;
typedef Point2i Point;
typedef Size_<int> Size2i;
typedef Size_<int64> Size2l;
typedef Size_<double> Size2d;
typedef Size2i Size;
typedef Rect_<int> Rect;
@ -2711,6 +2713,7 @@ CV_EXPORTS_W void polylines(InputOutputArray img, InputArrayOfArrays pts,
//! clips the line segment by the rectangle Rect(0, 0, imgSize.width, imgSize.height)
CV_EXPORTS bool clipLine(Size imgSize, CV_IN_OUT Point& pt1, CV_IN_OUT Point& pt2);
CV_EXPORTS bool clipLine(Size2l imgSize, CV_IN_OUT Point2l& pt1, CV_IN_OUT Point2l& pt2);
//! clips the line segment by the rectangle imgRect
CV_EXPORTS_W bool clipLine(Rect imgRect, CV_OUT CV_IN_OUT Point& pt1, CV_OUT CV_IN_OUT Point& pt2);
@ -2748,6 +2751,9 @@ public:
CV_EXPORTS_W void ellipse2Poly( Point center, Size axes, int angle,
int arcStart, int arcEnd, int delta,
CV_OUT vector<Point>& pts );
CV_EXPORTS void ellipse2Poly( Point2d center, Size2d axes, int angle,
int arcStart, int arcEnd, int delta,
CV_OUT vector<Point2d>& pts );
enum
{
@ -3242,6 +3248,9 @@ public:
//! returns read-only pointer to the real buffer, stack-allocated or head-allocated
operator const _Tp* () const;
//! returns number of allocated elements
size_t getSize() const;
protected:
//! pointer to the real buffer, can point to buf if the buffer is small enough
_Tp* ptr;

@ -2581,6 +2581,9 @@ template<typename _Tp, size_t fixed_size> inline AutoBuffer<_Tp, fixed_size>::op
template<typename _Tp, size_t fixed_size> inline AutoBuffer<_Tp, fixed_size>::operator const _Tp* () const
{ return ptr; }
template<typename _Tp, size_t fixed_size> inline size_t AutoBuffer<_Tp, fixed_size>::getSize() const
{ return size; }
/////////////////////////////////// Ptr ////////////////////////////////////////

@ -50,7 +50,7 @@
#define CV_VERSION_EPOCH 2
#define CV_VERSION_MAJOR 4
#define CV_VERSION_MINOR 13
#define CV_VERSION_REVISION 1
#define CV_VERSION_REVISION 7
#define CVAUX_STR_EXP(__A) #__A
#define CVAUX_STR(__A) CVAUX_STR_EXP(__A)

@ -51,12 +51,12 @@ struct PolyEdge
//PolyEdge(int _y0, int _y1, int _x, int _dx) : y0(_y0), y1(_y1), x(_x), dx(_dx) {}
int y0, y1;
int x, dx;
int64 x, dx;
PolyEdge *next;
};
static void
CollectPolyEdges( Mat& img, const Point* v, int npts,
CollectPolyEdges( Mat& img, const Point2l* v, int npts,
vector<PolyEdge>& edges, const void* color, int line_type,
int shift, Point offset=Point() );
@ -64,11 +64,11 @@ static void
FillEdgeCollection( Mat& img, vector<PolyEdge>& edges, const void* color );
static void
PolyLine( Mat& img, const Point* v, int npts, bool closed,
PolyLine( Mat& img, const Point2l* v, int npts, bool closed,
const void* color, int thickness, int line_type, int shift );
static void
FillConvexPoly( Mat& img, const Point* v, int npts,
FillConvexPoly( Mat& img, const Point2l* v, int npts,
const void* color, int line_type, int shift );
/****************************************************************************************\
@ -77,14 +77,25 @@ FillConvexPoly( Mat& img, const Point* v, int npts,
bool clipLine( Size img_size, Point& pt1, Point& pt2 )
{
int64 x1, y1, x2, y2;
Point2l p1(pt1.x, pt1.y);
Point2l p2(pt2.x, pt2.y);
bool inside = clipLine(Size2l(img_size.width, img_size.height), p1, p2);
pt1.x = (int)p1.x;
pt1.y = (int)p1.y;
pt2.x = (int)p2.x;
pt2.y = (int)p2.y;
return inside;
}
bool clipLine( Size2l img_size, Point2l& pt1, Point2l& pt2 )
{
int c1, c2;
int64 right = img_size.width-1, bottom = img_size.height-1;
if( img_size.width <= 0 || img_size.height <= 0 )
return false;
x1 = pt1.x; y1 = pt1.y; x2 = pt2.x; y2 = pt2.y;
int64 &x1 = pt1.x, &y1 = pt1.y, &x2 = pt2.x, &y2 = pt2.y;
c1 = (x1 < 0) + (x1 > right) * 2 + (y1 < 0) * 4 + (y1 > bottom) * 8;
c2 = (x2 < 0) + (x2 > right) * 2 + (y2 < 0) * 4 + (y2 > bottom) * 8;
@ -124,11 +135,6 @@ bool clipLine( Size img_size, Point& pt1, Point& pt2 )
}
assert( (c1 & c2) != 0 || (x1 | y1 | x2 | y2) >= 0 );
pt1.x = (int)x1;
pt1.y = (int)y1;
pt2.x = (int)x2;
pt2.y = (int)y2;
}
return (c1 | c2) == 0;
@ -279,25 +285,25 @@ static const int FilterTable[] = {
};
static void
LineAA( Mat& img, Point pt1, Point pt2, const void* color )
LineAA( Mat& img, Point2l pt1, Point2l pt2, const void* color )
{
int dx, dy;
int64 dx, dy;
int ecount, scount = 0;
int slope;
int ax, ay;
int x_step, y_step;
int i, j;
int64 ax, ay;
int64 x_step, y_step;
int64 i, j;
int ep_table[9];
int cb = ((uchar*)color)[0], cg = ((uchar*)color)[1], cr = ((uchar*)color)[2], ca = ((uchar*)color)[3];
int _cb, _cg, _cr, _ca;
int nch = img.channels();
uchar* ptr = img.data;
size_t step = img.step;
Size size = img.size();
Size2l size(img.cols, img.rows);
if( !((nch == 1 || nch == 3 || nch == 4) && img.depth() == CV_8U) )
{
Line(img, pt1, pt2, color);
Line(img, Point((int)(pt1.x<<XY_SHIFT), (int)(pt1.y<<XY_SHIFT)), Point((int)(pt2.x<<XY_SHIFT), (int)(pt2.y<<XY_SHIFT)), color);
return;
}
@ -333,11 +339,11 @@ LineAA( Mat& img, Point pt1, Point pt2, const void* color )
pt1.y ^= pt2.y & j;
x_step = XY_ONE;
y_step = (int) (((int64) dy << XY_SHIFT) / (ax | 1));
y_step = (dy << XY_SHIFT) / (ax | 1);
pt2.x += XY_ONE;
ecount = (pt2.x >> XY_SHIFT) - (pt1.x >> XY_SHIFT);
ecount = (int)((pt2.x >> XY_SHIFT) - (pt1.x >> XY_SHIFT));
j = -(pt1.x & (XY_ONE - 1));
pt1.y += (int) ((((int64) y_step) * j) >> XY_SHIFT) + (XY_ONE >> 1);
pt1.y += ((y_step * j) >> XY_SHIFT) + (XY_ONE >> 1);
slope = (y_step >> (XY_SHIFT - 5)) & 0x3f;
slope ^= (y_step < 0 ? 0x3f : 0);
@ -356,12 +362,12 @@ LineAA( Mat& img, Point pt1, Point pt2, const void* color )
pt2.y ^= pt1.y & i;
pt1.y ^= pt2.y & i;
x_step = (int) (((int64) dx << XY_SHIFT) / (ay | 1));
x_step = (dx << XY_SHIFT) / (ay | 1);
y_step = XY_ONE;
pt2.y += XY_ONE;
ecount = (pt2.y >> XY_SHIFT) - (pt1.y >> XY_SHIFT);
ecount = (int)((pt2.y >> XY_SHIFT) - (pt1.y >> XY_SHIFT));
j = -(pt1.y & (XY_ONE - 1));
pt1.x += (int) ((((int64) x_step) * j) >> XY_SHIFT) + (XY_ONE >> 1);
pt1.x += ((x_step * j) >> XY_SHIFT) + (XY_ONE >> 1);
slope = (x_step >> (XY_SHIFT - 5)) & 0x3f;
slope ^= (x_step < 0 ? 0x3f : 0);
@ -375,8 +381,8 @@ LineAA( Mat& img, Point pt1, Point pt2, const void* color )
/* Calc end point correction table */
{
int t0 = slope << 7;
int t1 = ((0x78 - i) | 4) * slope;
int t2 = (j | 4) * slope;
int t1 = ((0x78 - (int)i) | 4) * slope;
int t2 = ((int)j | 4) * slope;
ep_table[0] = 0;
ep_table[8] = slope;
@ -630,23 +636,25 @@ LineAA( Mat& img, Point pt1, Point pt2, const void* color )
static void
Line2( Mat& img, Point pt1, Point pt2, const void* color )
Line2( Mat& img, Point2l pt1, Point2l pt2, const void* color)
{
int dx, dy;
int64 dx, dy;
int ecount;
int ax, ay;
int i, j, x, y;
int x_step, y_step;
int64 ax, ay;
int64 i, j;
int x, y;
int64 x_step, y_step;
int cb = ((uchar*)color)[0];
int cg = ((uchar*)color)[1];
int cr = ((uchar*)color)[2];
int pix_size = (int)img.elemSize();
uchar *ptr = img.data, *tptr;
size_t step = img.step;
Size size = img.size(), sizeScaled(size.width*XY_ONE, size.height*XY_ONE);
Size size = img.size();
//assert( img && (nch == 1 || nch == 3) && img.depth() == CV_8U );
Size2l sizeScaled(((int64)size.width) << XY_SHIFT, ((int64)size.height) << XY_SHIFT);
if( !clipLine( sizeScaled, pt1, pt2 ))
return;
@ -670,8 +678,8 @@ Line2( Mat& img, Point pt1, Point pt2, const void* color )
pt1.y ^= pt2.y & j;
x_step = XY_ONE;
y_step = (int) (((int64) dy << XY_SHIFT) / (ax | 1));
ecount = (pt2.x - pt1.x) >> XY_SHIFT;
y_step = (dy << XY_SHIFT) / (ax | 1);
ecount = (int)((pt2.x - pt1.x) >> XY_SHIFT);
}
else
{
@ -684,9 +692,9 @@ Line2( Mat& img, Point pt1, Point pt2, const void* color )
pt2.y ^= pt1.y & i;
pt1.y ^= pt2.y & i;
x_step = (int) (((int64) dx << XY_SHIFT) / (ay | 1));
x_step = (dx << XY_SHIFT) / (ay | 1);
y_step = XY_ONE;
ecount = (pt2.y - pt1.y) >> XY_SHIFT;
ecount = (int)((pt2.y - pt1.y) >> XY_SHIFT);
}
pt1.x += (XY_ONE >> 1);
@ -705,8 +713,8 @@ Line2( Mat& img, Point pt1, Point pt2, const void* color )
tptr[2] = (uchar)cr; \
}
ICV_PUT_POINT((pt2.x + (XY_ONE >> 1)) >> XY_SHIFT,
(pt2.y + (XY_ONE >> 1)) >> XY_SHIFT);
ICV_PUT_POINT((int)((pt2.x + (XY_ONE >> 1)) >> XY_SHIFT),
(int)((pt2.y + (XY_ONE >> 1)) >> XY_SHIFT));
if( ax > ay )
{
@ -714,7 +722,7 @@ Line2( Mat& img, Point pt1, Point pt2, const void* color )
while( ecount >= 0 )
{
ICV_PUT_POINT(pt1.x, pt1.y >> XY_SHIFT);
ICV_PUT_POINT((int)(pt1.x), (int)(pt1.y >> XY_SHIFT));
pt1.x++;
pt1.y += y_step;
ecount--;
@ -726,7 +734,7 @@ Line2( Mat& img, Point pt1, Point pt2, const void* color )
while( ecount >= 0 )
{
ICV_PUT_POINT(pt1.x >> XY_SHIFT, pt1.y);
ICV_PUT_POINT((int)(pt1.x >> XY_SHIFT), (int)(pt1.y));
pt1.x += x_step;
pt1.y++;
ecount--;
@ -746,8 +754,8 @@ Line2( Mat& img, Point pt1, Point pt2, const void* color )
tptr[0] = (uchar)cb; \
}
ICV_PUT_POINT((pt2.x + (XY_ONE >> 1)) >> XY_SHIFT,
(pt2.y + (XY_ONE >> 1)) >> XY_SHIFT);
ICV_PUT_POINT((int)((pt2.x + (XY_ONE >> 1)) >> XY_SHIFT),
(int)((pt2.y + (XY_ONE >> 1)) >> XY_SHIFT));
if( ax > ay )
{
@ -755,7 +763,7 @@ Line2( Mat& img, Point pt1, Point pt2, const void* color )
while( ecount >= 0 )
{
ICV_PUT_POINT(pt1.x, pt1.y >> XY_SHIFT);
ICV_PUT_POINT((int)(pt1.x), (int)(pt1.y >> XY_SHIFT));
pt1.x++;
pt1.y += y_step;
ecount--;
@ -767,7 +775,7 @@ Line2( Mat& img, Point pt1, Point pt2, const void* color )
while( ecount >= 0 )
{
ICV_PUT_POINT(pt1.x >> XY_SHIFT, pt1.y);
ICV_PUT_POINT((int)(pt1.x >> XY_SHIFT), (int)(pt1.y));
pt1.x += x_step;
pt1.y++;
ecount--;
@ -788,8 +796,8 @@ Line2( Mat& img, Point pt1, Point pt2, const void* color )
tptr[j] = ((uchar*)color)[j]; \
}
ICV_PUT_POINT((pt2.x + (XY_ONE >> 1)) >> XY_SHIFT,
(pt2.y + (XY_ONE >> 1)) >> XY_SHIFT);
ICV_PUT_POINT((int)((pt2.x + (XY_ONE >> 1)) >> XY_SHIFT),
(int)((pt2.y + (XY_ONE >> 1)) >> XY_SHIFT));
if( ax > ay )
{
@ -797,7 +805,7 @@ Line2( Mat& img, Point pt1, Point pt2, const void* color )
while( ecount >= 0 )
{
ICV_PUT_POINT(pt1.x, pt1.y >> XY_SHIFT);
ICV_PUT_POINT((int)(pt1.x), (int)(pt1.y >> XY_SHIFT));
pt1.x++;
pt1.y += y_step;
ecount--;
@ -809,7 +817,7 @@ Line2( Mat& img, Point pt1, Point pt2, const void* color )
while( ecount >= 0 )
{
ICV_PUT_POINT(pt1.x >> XY_SHIFT, pt1.y);
ICV_PUT_POINT((int)(pt1.x >> XY_SHIFT), (int)(pt1.y));
pt1.x += x_step;
pt1.y++;
ecount--;
@ -917,13 +925,35 @@ sincos( int angle, float& cosval, float& sinval )
constructs polygon that represents elliptic arc.
*/
void ellipse2Poly( Point center, Size axes, int angle,
int arcStart, int arcEnd,
int delta, CV_OUT std::vector<Point>& pts )
{
vector<Point2d> _pts;
ellipse2Poly(Point2d(center.x, center.y), Size2d(axes.width, axes.height), angle,
arcStart, arcEnd, delta, _pts);
Point prevPt(INT_MIN, INT_MIN);
pts.resize(0);
for (unsigned int i = 0; i < _pts.size(); ++i)
{
Point pt;
pt.x = cvRound(_pts[i].x);
pt.y = cvRound(_pts[i].y);
if (pt != prevPt) {
pts.push_back(pt);
prevPt = pt;
}
}
// If there are no points, it's a zero-size polygon
if( pts.size() == 1 )
pts.push_back(pts[0]);
}
void ellipse2Poly( Point2d center, Size2d axes, int angle,
int arc_start, int arc_end,
int delta, vector<Point>& pts )
int delta, vector<Point2d>& pts )
{
float alpha, beta;
double size_a = axes.width, size_b = axes.height;
double cx = center.x, cy = center.y;
Point prevPt(INT_MIN,INT_MIN);
int i;
while( angle < 0 )
@ -964,15 +994,12 @@ void ellipse2Poly( Point center, Size axes, int angle,
if( angle < 0 )
angle += 360;
x = size_a * SinTable[450-angle];
y = size_b * SinTable[angle];
Point pt;
pt.x = cvRound( cx + x * alpha - y * beta );
pt.y = cvRound( cy + x * beta + y * alpha );
if( pt != prevPt ){
pts.push_back(pt);
prevPt = pt;
}
x = axes.width * SinTable[450-angle];
y = axes.height * SinTable[angle];
Point2d pt;
pt.x = center.x + x * alpha - y * beta;
pt.y = center.y + x * beta + y * alpha;
pts.push_back(pt);
}
if( pts.size() == 1 )
@ -981,16 +1008,37 @@ void ellipse2Poly( Point center, Size axes, int angle,
static void
EllipseEx( Mat& img, Point center, Size axes,
EllipseEx( Mat& img, Point2l center, Size2l axes,
int angle, int arc_start, int arc_end,
const void* color, int thickness, int line_type )
{
axes.width = std::abs(axes.width), axes.height = std::abs(axes.height);
int delta = (std::max(axes.width,axes.height)+(XY_ONE>>1))>>XY_SHIFT;
int delta = (int)((std::max(axes.width,axes.height)+(XY_ONE>>1))>>XY_SHIFT);
delta = delta < 3 ? 90 : delta < 10 ? 30 : delta < 15 ? 18 : 5;
vector<Point> v;
ellipse2Poly( center, axes, angle, arc_start, arc_end, delta, v );
vector<Point2d> _v;
ellipse2Poly( Point2d((double)center.x, (double)center.y), Size2d((double)axes.width, (double)axes.height), angle, arc_start, arc_end, delta, _v );
vector<Point2l> v;
Point2l prevPt(0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF);
v.resize(0);
for (unsigned int i = 0; i < _v.size(); ++i)
{
Point2l pt;
pt.x = (int64)cvRound(_v[i].x / XY_ONE) << XY_SHIFT;
pt.y = (int64)cvRound(_v[i].y / XY_ONE) << XY_SHIFT;
pt.x += cvRound(_v[i].x - pt.x);
pt.y += cvRound(_v[i].y - pt.y);
if (pt != prevPt) {
v.push_back(pt);
prevPt = pt;
}
}
// If there are no points, it's a zero-size polygon
if (v.size() == 1) {
v.assign(2, center);
}
if( thickness >= 0 )
PolyLine( img, &v[0], (int)v.size(), false, color, thickness, line_type, XY_SHIFT );
@ -1029,23 +1077,24 @@ EllipseEx( Mat& img, Point center, Size axes,
/* filling convex polygon. v - array of vertices, ntps - number of points */
static void
FillConvexPoly( Mat& img, const Point* v, int npts, const void* color, int line_type, int shift )
FillConvexPoly( Mat& img, const Point2l* v, int npts, const void* color, int line_type, int shift )
{
struct
{
int idx, di;
int x, dx, ye;
int64 x, dx;
int ye;
}
edge[2];
int delta = shift ? 1 << (shift - 1) : 0;
int i, y, imin = 0, left = 0, right = 1, x1, x2;
int delta = 1 << shift >> 1;
int i, y, imin = 0, left = 0, right = 1;
int edges = npts;
int xmin, xmax, ymin, ymax;
int64 xmin, xmax, ymin, ymax;
uchar* ptr = img.data;
Size size = img.size();
int pix_size = (int)img.elemSize();
Point p0;
Point2l p0;
int delta1, delta2;
if( line_type < CV_AA )
@ -1063,7 +1112,7 @@ FillConvexPoly( Mat& img, const Point* v, int npts, const void* color, int line_
for( i = 0; i < npts; i++ )
{
Point p = v[i];
Point2l p = v[i];
if( p.y < ymin )
{
ymin = p.y;
@ -1082,10 +1131,10 @@ FillConvexPoly( Mat& img, const Point* v, int npts, const void* color, int line_
if( shift == 0 )
{
Point pt0, pt1;
pt0.x = p0.x >> XY_SHIFT;
pt0.y = p0.y >> XY_SHIFT;
pt1.x = p.x >> XY_SHIFT;
pt1.y = p.y >> XY_SHIFT;
pt0.x = (int)(p0.x >> XY_SHIFT);
pt0.y = (int)(p0.y >> XY_SHIFT);
pt1.x = (int)(p.x >> XY_SHIFT);
pt1.y = (int)(p.y >> XY_SHIFT);
Line( img, pt0, pt1, color, line_type );
}
else
@ -1101,13 +1150,13 @@ FillConvexPoly( Mat& img, const Point* v, int npts, const void* color, int line_
ymin = (ymin + delta) >> shift;
ymax = (ymax + delta) >> shift;
if( npts < 3 || xmax < 0 || ymax < 0 || xmin >= size.width || ymin >= size.height )
if( npts < 3 || (int)xmax < 0 || (int)ymax < 0 || (int)xmin >= size.width || (int)ymin >= size.height )
return;
ymax = MIN( ymax, size.height - 1 );
edge[0].idx = edge[1].idx = imin;
edge[0].ye = edge[1].ye = y = ymin;
edge[0].ye = edge[1].ye = y = (int)ymin;
edge[0].di = 1;
edge[1].di = npts - 1;
@ -1115,18 +1164,19 @@ FillConvexPoly( Mat& img, const Point* v, int npts, const void* color, int line_
do
{
if( line_type < CV_AA || y < ymax || y == ymin )
if( line_type < CV_AA || y < (int)ymax || y == (int)ymin )
{
for( i = 0; i < 2; i++ )
{
if( y >= edge[i].ye )
{
int idx = edge[i].idx, di = edge[i].di;
int xs = 0, xe, ye, ty = 0;
int64 xs = 0, xe;
int ty = 0;
for(;;)
{
ty = (v[idx].y + delta) >> shift;
ty = (int)((v[idx].y + delta) >> shift);
if( ty > y || edges == 0 )
break;
xs = v[idx].x;
@ -1135,16 +1185,19 @@ FillConvexPoly( Mat& img, const Point* v, int npts, const void* color, int line_
edges--;
}
ye = ty;
xs <<= XY_SHIFT - shift;
xe = v[idx].x << (XY_SHIFT - shift);
xe = v[idx].x;
if (XY_SHIFT - shift != 0)
{
xs <<= XY_SHIFT - shift;
xe <<= XY_SHIFT - shift;
}
/* no more edges */
if( y >= ye )
if( y >= ty)
return;
edge[i].ye = ye;
edge[i].dx = ((xe - xs)*2 + (ye - y)) / (2 * (ye - y));
edge[i].ye = ty;
edge[i].dx = ((xe - xs)*2 + (ty - y)) / (2 * (ty - y));
edge[i].x = xs;
edge[i].idx = idx;
}
@ -1157,13 +1210,10 @@ FillConvexPoly( Mat& img, const Point* v, int npts, const void* color, int line_
right ^= 1;
}
x1 = edge[left].x;
x2 = edge[right].x;
if( y >= 0 )
{
int xx1 = (x1 + delta1) >> XY_SHIFT;
int xx2 = (x2 + delta2) >> XY_SHIFT;
int xx1 = (int)((edge[left].x + delta1) >> XY_SHIFT);
int xx2 = (int)((edge[right].x + delta2) >> XY_SHIFT);
if( xx2 >= 0 && xx1 < size.width )
{
@ -1175,25 +1225,22 @@ FillConvexPoly( Mat& img, const Point* v, int npts, const void* color, int line_
}
}
x1 += edge[left].dx;
x2 += edge[right].dx;
edge[left].x = x1;
edge[right].x = x2;
edge[left].x += edge[left].dx;
edge[right].x += edge[right].dx;
ptr += img.step;
}
while( ++y <= ymax );
while( ++y <= (int)ymax );
}
/******** Arbitrary polygon **********/
static void
CollectPolyEdges( Mat& img, const Point* v, int count, vector<PolyEdge>& edges,
CollectPolyEdges( Mat& img, const Point2l* v, int count, vector<PolyEdge>& edges,
const void* color, int line_type, int shift, Point offset )
{
int i, delta = offset.y + (shift ? 1 << (shift - 1) : 0);
Point pt0 = v[count-1], pt1;
int i, delta = offset.y + ((1 << shift) >> 1);
Point2l pt0 = v[count-1], pt1;
pt0.x = (pt0.x + offset.x) << (XY_SHIFT - shift);
pt0.y = (pt0.y + delta) >> shift;
@ -1201,7 +1248,7 @@ CollectPolyEdges( Mat& img, const Point* v, int count, vector<PolyEdge>& edges,
for( i = 0; i < count; i++, pt0 = pt1 )
{
Point t0, t1;
Point2l t0, t1;
PolyEdge edge;
pt1 = v[i];
@ -1213,7 +1260,7 @@ CollectPolyEdges( Mat& img, const Point* v, int count, vector<PolyEdge>& edges,
t0.y = pt0.y; t1.y = pt1.y;
t0.x = (pt0.x + (XY_ONE >> 1)) >> XY_SHIFT;
t1.x = (pt1.x + (XY_ONE >> 1)) >> XY_SHIFT;
Line( img, t0, t1, color, line_type );
Line( img, Point((int)(t0.x), (int)(t0.y)), Point((int)(t1.x), (int)(t1.y)), color, line_type );
}
else
{
@ -1228,14 +1275,14 @@ CollectPolyEdges( Mat& img, const Point* v, int count, vector<PolyEdge>& edges,
if( pt0.y < pt1.y )
{
edge.y0 = pt0.y;
edge.y1 = pt1.y;
edge.y0 = (int)(pt0.y);
edge.y1 = (int)(pt1.y);
edge.x = pt0.x;
}
else
{
edge.y0 = pt1.y;
edge.y1 = pt0.y;
edge.y0 = (int)(pt1.y);
edge.y1 = (int)(pt0.y);
edge.x = pt1.x;
}
edge.dx = (pt1.x - pt0.x) / (pt1.y - pt0.y);
@ -1261,7 +1308,8 @@ FillEdgeCollection( Mat& img, vector<PolyEdge>& edges, const void* color )
int i, y, total = (int)edges.size();
Size size = img.size();
PolyEdge* e;
int y_max = INT_MIN, x_max = INT_MIN, y_min = INT_MAX, x_min = INT_MAX;
int y_max = INT_MIN, y_min = INT_MAX;
int64 x_max = 0xFFFFFFFFFFFFFFFF, x_min = 0x7FFFFFFFFFFFFFFF;
int pix_size = (int)img.elemSize();
if( total < 2 )
@ -1273,7 +1321,7 @@ FillEdgeCollection( Mat& img, vector<PolyEdge>& edges, const void* color )
assert( e1.y0 < e1.y1 );
// Determine x-coordinate of the end of the edge.
// (This is not necessary x-coordinate of any vertex in the array.)
int x1 = e1.x + (e1.y1 - e1.y0) * e1.dx;
int64 x1 = e1.x + (e1.y1 - e1.y0) * e1.dx;
y_min = std::min( y_min, e1.y0 );
y_max = std::max( y_max, e1.y1 );
x_min = std::min( x_min, e1.x );
@ -1282,7 +1330,7 @@ FillEdgeCollection( Mat& img, vector<PolyEdge>& edges, const void* color )
x_max = std::max( x_max, x1 );
}
if( y_max < 0 || y_min >= size.height || x_max < 0 || x_min >= (size.width<<XY_SHIFT) )
if( y_max < 0 || y_min >= size.height || x_max < 0 || x_min >= ((int64)size.width<<XY_SHIFT) )
return;
std::sort( edges.begin(), edges.end(), CmpEdges() );
@ -1338,19 +1386,18 @@ FillEdgeCollection( Mat& img, vector<PolyEdge>& edges, const void* color )
{
// convert x's from fixed-point to image coordinates
uchar *timg = img.data + y * img.step;
int x1 = keep_prelast->x;
int x2 = prelast->x;
int x1, x2;
if( x1 > x2 )
if (keep_prelast->x > prelast->x)
{
int t = x1;
x1 = x2;
x2 = t;
x1 = (int)((prelast->x + XY_ONE - 1) >> XY_SHIFT);
x2 = (int)(keep_prelast->x >> XY_SHIFT);
}
else
{
x1 = (int)((keep_prelast->x + XY_ONE - 1) >> XY_SHIFT);
x2 = (int)(prelast->x >> XY_SHIFT);
}
x1 = (x1 + XY_ONE - 1) >> XY_SHIFT;
x2 = x2 >> XY_SHIFT;
// clip and draw the line
if( x1 < size.width && x2 >= 0 )
@ -1550,7 +1597,7 @@ Circle( Mat& img, Point center, int radius, const void* color, int fill )
static void
ThickLine( Mat& img, Point p0, Point p1, const void* color,
ThickLine( Mat& img, Point2l p0, Point2l p1, const void* color,
int thickness, int line_type, int flags, int shift )
{
static const double INV_XY_ONE = 1./XY_ONE;
@ -1570,7 +1617,7 @@ ThickLine( Mat& img, Point p0, Point p1, const void* color,
p0.y = (p0.y + (XY_ONE>>1)) >> XY_SHIFT;
p1.x = (p1.x + (XY_ONE>>1)) >> XY_SHIFT;
p1.y = (p1.y + (XY_ONE>>1)) >> XY_SHIFT;
Line( img, p0, p1, color, line_type );
Line( img, Point((int)(p0.x), (int)(p0.y)), Point((int)(p1.x), (int)(p1.y)), color, line_type );
}
else
Line2( img, p0, p1, color );
@ -1580,7 +1627,7 @@ ThickLine( Mat& img, Point p0, Point p1, const void* color,
}
else
{
Point pt[4], dp = Point(0,0);
Point2l pt[4], dp = Point2l(0,0);
double dx = (p0.x - p1.x)*INV_XY_ONE, dy = (p1.y - p0.y)*INV_XY_ONE;
double r = dx * dx + dy * dy;
int i, oddThickness = thickness & 1;
@ -1611,13 +1658,13 @@ ThickLine( Mat& img, Point p0, Point p1, const void* color,
if( line_type < CV_AA )
{
Point center;
center.x = (p0.x + (XY_ONE>>1)) >> XY_SHIFT;
center.y = (p0.y + (XY_ONE>>1)) >> XY_SHIFT;
center.x = (int)((p0.x + (XY_ONE>>1)) >> XY_SHIFT);
center.y = (int)((p0.y + (XY_ONE>>1)) >> XY_SHIFT);
Circle( img, center, (thickness + (XY_ONE>>1)) >> XY_SHIFT, color, 1 );
}
else
{
EllipseEx( img, p0, cvSize(thickness, thickness),
EllipseEx( img, p0, Size2l(thickness, thickness),
0, 0, 360, color, -1, line_type );
}
}
@ -1628,7 +1675,7 @@ ThickLine( Mat& img, Point p0, Point p1, const void* color,
static void
PolyLine( Mat& img, const Point* v, int count, bool is_closed,
PolyLine( Mat& img, const Point2l* v, int count, bool is_closed,
const void* color, int thickness,
int line_type, int shift )
{
@ -1637,13 +1684,13 @@ PolyLine( Mat& img, const Point* v, int count, bool is_closed,
int i = is_closed ? count - 1 : 0;
int flags = 2 + !is_closed;
Point p0;
Point2l p0;
CV_Assert( 0 <= shift && shift <= XY_SHIFT && thickness >= 0 );
p0 = v[i];
for( i = !is_closed; i < count; i++ )
{
Point p = v[i];
Point2l p = v[i];
ThickLine( img, p0, p, color, thickness, line_type, flags, shift );
p0 = p;
flags = 2;
@ -1700,7 +1747,7 @@ void rectangle( Mat& img, Point pt1, Point pt2,
double buf[4];
scalarToRawData(color, buf, img.type(), 0);
Point pt[4];
Point2l pt[4];
pt[0] = pt1;
pt[1].x = pt2.x;
@ -1741,10 +1788,12 @@ void circle( Mat& img, Point center, int radius,
if( thickness > 1 || line_type >= CV_AA || shift > 0 )
{
center.x <<= XY_SHIFT - shift;
center.y <<= XY_SHIFT - shift;
radius <<= XY_SHIFT - shift;
EllipseEx( img, center, Size(radius, radius),
Point2l _center(center.x, center.y);
int64 _radius(radius);
_center.x <<= XY_SHIFT - shift;
_center.y <<= XY_SHIFT - shift;
_radius <<= XY_SHIFT - shift;
EllipseEx( img, _center, Size2l(_radius, _radius),
0, 0, 360, buf, thickness, line_type );
}
else
@ -1768,12 +1817,14 @@ void ellipse( Mat& img, Point center, Size axes,
int _angle = cvRound(angle);
int _start_angle = cvRound(start_angle);
int _end_angle = cvRound(end_angle);
center.x <<= XY_SHIFT - shift;
center.y <<= XY_SHIFT - shift;
axes.width <<= XY_SHIFT - shift;
axes.height <<= XY_SHIFT - shift;
EllipseEx( img, center, axes, _angle, _start_angle,
Point2l _center(center.x, center.y);
Size2l _axes(axes.width, axes.height);
_center.x <<= XY_SHIFT - shift;
_center.y <<= XY_SHIFT - shift;
_axes.width <<= XY_SHIFT - shift;
_axes.height <<= XY_SHIFT - shift;
EllipseEx( img, _center, _axes, _angle, _start_angle,
_end_angle, buf, thickness, line_type );
}
@ -1790,10 +1841,14 @@ void ellipse(Mat& img, const RotatedRect& box, const Scalar& color,
scalarToRawData(color, buf, img.type(), 0);
int _angle = cvRound(box.angle);
Point center(cvRound(box.center.x*(1 << XY_SHIFT)),
cvRound(box.center.y*(1 << XY_SHIFT)));
Size axes(cvRound(box.size.width*(1 << (XY_SHIFT - 1))),
cvRound(box.size.height*(1 << (XY_SHIFT - 1))));
Point2l center(cvRound(box.center.x),
cvRound(box.center.y));
center.x = (center.x << XY_SHIFT) + cvRound((box.center.x - center.x)*XY_ONE);
center.y = (center.y << XY_SHIFT) + cvRound((box.center.y - center.y)*XY_ONE);
Size2l axes(cvRound(box.size.width),
cvRound(box.size.height));
axes.width = (axes.width << (XY_SHIFT - 1)) + cvRound((box.size.width - axes.width)*(XY_ONE>>1));
axes.height = (axes.height << (XY_SHIFT - 1)) + cvRound((box.size.height - axes.height)*(XY_ONE>>1));
EllipseEx( img, center, axes, _angle, 0, 360, buf, thickness, lineType );
}
@ -1876,7 +1931,10 @@ void fillConvexPoly( Mat& img, const Point* pts, int npts,
double buf[4];
CV_Assert( 0 <= shift && shift <= XY_SHIFT );
scalarToRawData(color, buf, img.type(), 0);
FillConvexPoly( img, pts, npts, buf, line_type, shift );
vector<Point2l> _pts;
for( int n = 0; n < npts; n++ )
_pts.push_back(Point2l(pts[n].x, pts[n].y));
FillConvexPoly( img, _pts.data(), npts, buf, line_type, shift );
}
@ -1900,7 +1958,12 @@ void fillPoly( Mat& img, const Point** pts, const int* npts, int ncontours,
edges.reserve( total + 1 );
for( i = 0; i < ncontours; i++ )
CollectPolyEdges( img, pts[i], npts[i], edges, buf, line_type, shift, offset );
{
vector<Point2l> _pts;
for( int n = 0; n < npts[i]; n++ )
_pts.push_back(Point2l(pts[i][n].x, pts[i][n].y));
CollectPolyEdges( img, _pts.data(), npts[i], edges, buf, line_type, shift, offset );
}
FillEdgeCollection(img, edges, buf);
}
@ -1920,7 +1983,12 @@ void polylines( Mat& img, const Point** pts, const int* npts, int ncontours, boo
scalarToRawData( color, buf, img.type(), 0 );
for( int i = 0; i < ncontours; i++ )
PolyLine( img, pts[i], npts[i], isClosed, buf, thickness, line_type, shift );
{
vector<Point2l> _pts;
for( int n = 0; n < npts[i]; n++ )
_pts.push_back(Point2l(pts[i][n].x, pts[i][n].y));
PolyLine( img, _pts.data(), npts[i], isClosed, buf, thickness, line_type, shift );
}
}
@ -2156,23 +2224,23 @@ void putText( Mat& img, const string& text, Point org,
if( bottomLeftOrigin )
vscale = -vscale;
int view_x = org.x << XY_SHIFT;
int view_y = (org.y << XY_SHIFT) + base_line*vscale;
vector<Point> pts;
int64 view_x = (int64)org.x << XY_SHIFT;
int64 view_y = ((int64)org.y << XY_SHIFT) + base_line*vscale;
vector<Point2l> pts;
pts.reserve(1 << 10);
const char **faces = cv::g_HersheyGlyphs;
for( int i = 0; i < (int)text.size(); i++ )
{
int c = (uchar)text[i];
Point p;
Point2l p;
readCheck(c, i, text, fontFace);
const char* ptr = faces[ascii[(c-' ')+1]];
p.x = (uchar)ptr[0] - 'R';
p.y = (uchar)ptr[1] - 'R';
int dx = p.y*hscale;
int64 dx = p.y*hscale;
view_x -= p.x*hscale;
pts.resize(0);
@ -2191,7 +2259,7 @@ void putText( Mat& img, const string& text, Point org,
p.x = (uchar)ptr[0] - 'R';
p.y = (uchar)ptr[1] - 'R';
ptr += 2;
pts.push_back(Point(p.x*hscale + view_x, p.y*vscale + view_y));
pts.push_back(Point2l(p.x*hscale + view_x, p.y*vscale + view_y));
}
}
view_x += dx;
@ -2310,7 +2378,7 @@ cvDrawContours( void* _img, CvSeq* contour,
CvSeq *contour0 = contour, *h_next = 0;
CvTreeNodeIterator iterator;
cv::vector<cv::PolyEdge> edges;
cv::vector<cv::Point> pts;
cv::vector<cv::Point2l> pts;
cv::Scalar externalColor = _externalColor, holeColor = _holeColor;
cv::Mat img = cv::cvarrToMat(_img);
cv::Point offset = _offset;
@ -2434,7 +2502,7 @@ cvEllipse2Poly( CvPoint center, CvSize axes, int angle,
int arc_start, int arc_end, CvPoint* _pts, int delta )
{
cv::vector<cv::Point> pts;
cv::ellipse2Poly( center, axes, angle, arc_start, arc_end, delta, pts );
cv::ellipse2Poly( cv::Point(center), cv::Size(axes), angle, arc_start, arc_end, delta, pts );
memcpy( _pts, &pts[0], pts.size()*sizeof(_pts[0]) );
return (int)pts.size();
}

@ -2994,6 +2994,7 @@ PCA& PCA::computeVar(InputArray _data, InputArray __mean, int flags, double reta
{
CV_Assert( _mean.size() == mean_sz );
_mean.convertTo(mean, ctype);
covar_flags |= CV_COVAR_USE_AVG;
}
calcCovarMatrix( data, covar, mean, covar_flags, ctype );

@ -279,21 +279,31 @@ Mat::Mat(const Mat& m, const Range& _rowRange, const Range& _colRange) : size(&r
}
*this = m;
if( _rowRange != Range::all() && _rowRange != Range(0,rows) )
try
{
CV_Assert( 0 <= _rowRange.start && _rowRange.start <= _rowRange.end && _rowRange.end <= m.rows );
rows = _rowRange.size();
data += step*_rowRange.start;
flags |= SUBMATRIX_FLAG;
}
if( _rowRange != Range::all() && _rowRange != Range(0,rows) )
{
CV_Assert( 0 <= _rowRange.start && _rowRange.start <= _rowRange.end
&& _rowRange.end <= m.rows );
rows = _rowRange.size();
data += step*_rowRange.start;
flags |= SUBMATRIX_FLAG;
}
if( _colRange != Range::all() && _colRange != Range(0,cols) )
if( _colRange != Range::all() && _colRange != Range(0,cols) )
{
CV_Assert( 0 <= _colRange.start && _colRange.start <= _colRange.end
&& _colRange.end <= m.cols );
cols = _colRange.size();
data += _colRange.start*elemSize();
flags &= cols < m.cols ? ~CONTINUOUS_FLAG : -1;
flags |= SUBMATRIX_FLAG;
}
}
catch(...)
{
CV_Assert( 0 <= _colRange.start && _colRange.start <= _colRange.end && _colRange.end <= m.cols );
cols = _colRange.size();
data += _colRange.start*elemSize();
flags &= cols < m.cols ? ~CONTINUOUS_FLAG : -1;
flags |= SUBMATRIX_FLAG;
release();
throw;
}
if( rows == 1 )

@ -56,7 +56,7 @@
#include <sys/types.h>
#if defined ANDROID
#include <sys/sysconf.h>
#else
#elif defined __APPLE__
#include <sys/sysctl.h>
#endif
#endif

@ -163,8 +163,6 @@ std::wstring GetTempFileNameWinRT(std::wstring prefix)
#include <sys/types.h>
#if defined ANDROID
#include <sys/sysconf.h>
#else
#include <sys/sysctl.h>
#endif
#endif

@ -164,7 +164,7 @@ void Core_EigenTest_32::run(int) { check_full(CV_32FC1); }
void Core_EigenTest_64::run(int) { check_full(CV_64FC1); }
Core_EigenTest::Core_EigenTest()
: eps_val_32(1e-3f), eps_vec_32(1e-2f),
: eps_val_32(1e-3f), eps_vec_32(2e-2f),
eps_val_64(1e-4f), eps_vec_64(1e-3f), ntests(100) {}
Core_EigenTest::~Core_EigenTest() {}

@ -272,9 +272,8 @@ void SimpleBlobDetector::findBlobs(const cv::Mat &image, const cv::Mat &binaryIm
#endif
}
void SimpleBlobDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, const cv::Mat&) const
void SimpleBlobDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, const cv::Mat& mask) const
{
//TODO: support mask
keypoints.clear();
Mat grayscaleImage;
if (image.channels() == 3)
@ -355,6 +354,11 @@ void SimpleBlobDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoi
keypoints.push_back(kpt);
}
if (!mask.empty())
{
KeyPointsFilter::runByPixelsMask(keypoints, mask);
}
#ifdef DEBUG_BLOB_DETECTOR
namedWindow("keypoints", CV_WINDOW_NORMAL);
Mat outImg = image.clone();

@ -132,7 +132,12 @@ public:
/* Construct the randomized trees. */
for (int i = 0; i < trees_; i++) {
/* Randomize the order of vectors to allow for unbiased sampling. */
#ifndef OPENCV_FLANN_USE_STD_RAND
cv::randShuffle(vind_);
#else
std::random_shuffle(vind_.begin(), vind_.end());
#endif
tree_roots_[i] = divideTree(&vind_[0], int(size_) );
}
}

@ -136,7 +136,12 @@ public:
indices.resize( feature_size_ * CHAR_BIT );
for (size_t j = 0; j < feature_size_ * CHAR_BIT; ++j)
indices[j] = j;
#ifndef OPENCV_FLANN_USE_STD_RAND
cv::randShuffle(indices);
#else
std::random_shuffle(indices.begin(), indices.end());
#endif
}
lsh::LshTable<ElementType>& table = tables_[i];

@ -146,6 +146,7 @@ public:
*/
LshTable()
{
feature_size_ = 0;
}
/** Default constructor
@ -156,7 +157,7 @@ public:
*/
LshTable(unsigned int feature_size, unsigned int key_size, std::vector<size_t> & indices)
{
(void)feature_size;
feature_size_ = feature_size;
(void)key_size;
(void)indices;
std::cerr << "LSH is not implemented for that type" << std::endl;
@ -335,6 +336,8 @@ private:
*/
unsigned int key_size_;
unsigned int feature_size_;
// Members only used for the unsigned char specialization
/** The mask to apply to a feature to get the hash key
* Only used in the unsigned char case
@ -350,9 +353,10 @@ inline LshTable<unsigned char>::LshTable( unsigned int feature_size,
unsigned int subsignature_size,
std::vector<size_t> & indices )
{
feature_size_ = feature_size;
initialize(subsignature_size);
// Allocate the mask
mask_ = std::vector<size_t>((size_t)ceil((float)(feature_size * sizeof(char)) / (float)sizeof(size_t)), 0);
mask_ = std::vector<size_t>((feature_size * sizeof(char) + sizeof(size_t) - 1) / sizeof(size_t), 0);
// Generate a random set of order of subsignature_size_ bits
for (unsigned int i = 0; i < key_size_; ++i) {
@ -391,6 +395,7 @@ inline size_t LshTable<unsigned char>::getKey(const unsigned char* feature) cons
{
// no need to check if T is dividable by sizeof(size_t) like in the Hamming
// distance computation as we have a mask
// FIXIT: This is bad assumption, because we reading tail bytes after of the allocated features buffer
const size_t* feature_block_ptr = reinterpret_cast<const size_t*> ((const void*)feature);
// Figure out the subsignature of the feature
@ -399,10 +404,20 @@ inline size_t LshTable<unsigned char>::getKey(const unsigned char* feature) cons
size_t subsignature = 0;
size_t bit_index = 1;
for (std::vector<size_t>::const_iterator pmask_block = mask_.begin(); pmask_block != mask_.end(); ++pmask_block) {
for (unsigned i = 0; i < feature_size_; i += sizeof(size_t)) {
// get the mask and signature blocks
size_t feature_block = *feature_block_ptr;
size_t mask_block = *pmask_block;
size_t feature_block;
if (i <= feature_size_ - sizeof(size_t))
{
feature_block = *feature_block_ptr;
}
else
{
size_t tmp = 0;
memcpy(&tmp, feature_block_ptr, feature_size_ - i); // preserve bytes order
feature_block = tmp;
}
size_t mask_block = mask_[i / sizeof(size_t)];
while (mask_block) {
// Get the lowest set bit in the mask block
size_t lowest_bit = mask_block & (-(ptrdiff_t)mask_block);

@ -40,13 +40,31 @@
namespace cvflann
{
inline int rand()
{
#ifndef OPENCV_FLANN_USE_STD_RAND
# if INT_MAX == RAND_MAX
int v = cv::theRNG().next() & INT_MAX;
# else
int v = cv::theRNG().uniform(0, RAND_MAX + 1);
# endif
#else
int v = std::rand();
#endif // OPENCV_FLANN_USE_STD_RAND
return v;
}
/**
* Seeds the random number generator
* @param seed Random seed
*/
inline void seed_random(unsigned int seed)
{
srand(seed);
#ifndef OPENCV_FLANN_USE_STD_RAND
cv::theRNG() = cv::RNG(seed);
#else
std::srand(seed);
#endif
}
/*
@ -60,7 +78,7 @@ inline void seed_random(unsigned int seed)
*/
inline double rand_double(double high = 1.0, double low = 0)
{
return low + ((high-low) * (std::rand() / (RAND_MAX + 1.0)));
return low + ((high-low) * (rand() / (RAND_MAX + 1.0)));
}
/**
@ -71,7 +89,7 @@ inline double rand_double(double high = 1.0, double low = 0)
*/
inline int rand_int(int high = RAND_MAX, int low = 0)
{
return low + (int) ( double(high-low) * (std::rand() / (RAND_MAX + 1.0)));
return low + (int) ( double(high-low) * (rand() / (RAND_MAX + 1.0)));
}
/**
@ -107,7 +125,11 @@ public:
for (int i = 0; i < size_; ++i) vals_[i] = i;
// shuffle the elements in the array
#ifndef OPENCV_FLANN_USE_STD_RAND
cv::randShuffle(vals_);
#else
std::random_shuffle(vals_.begin(), vals_.end());
#endif
counter_ = 0;
}

@ -186,9 +186,9 @@ Reduces a matrix to a vector.
:param mtx: Source 2D matrix.
:param vec: Destination vector. Its size and type is defined by ``dim`` and ``dtype`` parameters.
:param vec: Destination row vector. Its type is defined by ``dtype`` parameter.
:param dim: Dimension index along which the matrix is reduced. 0 means that the matrix is reduced to a single row. 1 means that the matrix is reduced to a single column.
:param dim: Dimension index along which the matrix is reduced. 0 means that the matrix is reduced to a single row(of length equal to number of matrix columns). 1 means that the matrix is reduced to a single column(of length equal to the number of matrix rows). In either case, the output is always stored as a row vector of appropriate length.
:param reduceOp: Reduction operation that could be one of the following:
@ -202,6 +202,6 @@ Reduces a matrix to a vector.
:param dtype: When it is negative, the destination vector will have the same type as the source matrix. Otherwise, its type will be ``CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), mtx.channels())`` .
The function ``reduce`` reduces the matrix to a vector by treating the matrix rows/columns as a set of 1D vectors and performing the specified operation on the vectors until a single row/column is obtained. For example, the function can be used to compute horizontal and vertical projections of a raster image. In case of ``CV_REDUCE_SUM`` and ``CV_REDUCE_AVG`` , the output may have a larger element bit-depth to preserve accuracy. And multi-channel arrays are also supported in these two reduction modes.
The function ``reduce`` reduces the matrix to a vector by treating the matrix rows/columns as a set of 1D vectors and performing the specified operation on the vectors until a single column/row is obtained. However, the result is always stored as a row vector. For example, the function can be used to compute horizontal and vertical projections of a raster image. In case of ``CV_REDUCE_SUM`` and ``CV_REDUCE_AVG`` , the output may have a larger element bit-depth to preserve accuracy. And multi-channel arrays are also supported in these two reduction modes.
.. seealso:: :ocv:func:`reduce`

@ -435,13 +435,13 @@ CV_EXPORTS void LUT(const GpuMat& src, const Mat& lut, GpuMat& dst, Stream& stre
CV_EXPORTS void merge(const GpuMat* src, size_t n, GpuMat& dst, Stream& stream = Stream::Null());
//! makes multi-channel array out of several single-channel arrays
CV_EXPORTS void merge(const vector<GpuMat>& src, GpuMat& dst, Stream& stream = Stream::Null());
CV_EXPORTS void merge(const std::vector<GpuMat>& src, GpuMat& dst, Stream& stream = Stream::Null());
//! copies each plane of a multi-channel array to a dedicated array
CV_EXPORTS void split(const GpuMat& src, GpuMat* dst, Stream& stream = Stream::Null());
//! copies each plane of a multi-channel array to a dedicated array
CV_EXPORTS void split(const GpuMat& src, vector<GpuMat>& dst, Stream& stream = Stream::Null());
CV_EXPORTS void split(const GpuMat& src, std::vector<GpuMat>& dst, Stream& stream = Stream::Null());
//! computes magnitude of complex (x(i).re, x(i).im) vector
//! supports only CV_32FC2 type
@ -1268,9 +1268,9 @@ private:
struct CV_EXPORTS HOGConfidence
{
double scale;
vector<Point> locations;
vector<double> confidences;
vector<double> part_scores[4];
std::vector<Point> locations;
std::vector<double> confidences;
std::vector<double> part_scores[4];
};
struct CV_EXPORTS HOGDescriptor
@ -1288,27 +1288,27 @@ struct CV_EXPORTS HOGDescriptor
size_t getDescriptorSize() const;
size_t getBlockHistogramSize() const;
void setSVMDetector(const vector<float>& detector);
void setSVMDetector(const std::vector<float>& detector);
static vector<float> getDefaultPeopleDetector();
static vector<float> getPeopleDetector48x96();
static vector<float> getPeopleDetector64x128();
static std::vector<float> getDefaultPeopleDetector();
static std::vector<float> getPeopleDetector48x96();
static std::vector<float> getPeopleDetector64x128();
void detect(const GpuMat& img, vector<Point>& found_locations,
void detect(const GpuMat& img, std::vector<Point>& found_locations,
double hit_threshold=0, Size win_stride=Size(),
Size padding=Size());
void detectMultiScale(const GpuMat& img, vector<Rect>& found_locations,
void detectMultiScale(const GpuMat& img, std::vector<Rect>& found_locations,
double hit_threshold=0, Size win_stride=Size(),
Size padding=Size(), double scale0=1.05,
int group_threshold=2);
void computeConfidence(const GpuMat& img, vector<Point>& hits, double hit_threshold,
Size win_stride, Size padding, vector<Point>& locations, vector<double>& confidences);
void computeConfidence(const GpuMat& img, std::vector<Point>& hits, double hit_threshold,
Size win_stride, Size padding, std::vector<Point>& locations, std::vector<double>& confidences);
void computeConfidenceMultiScale(const GpuMat& img, vector<Rect>& found_locations,
void computeConfidenceMultiScale(const GpuMat& img, std::vector<Rect>& found_locations,
double hit_threshold, Size win_stride, Size padding,
vector<HOGConfidence> &conf_out, int group_threshold);
std::vector<HOGConfidence> &conf_out, int group_threshold);
void getDescriptors(const GpuMat& img, Size win_stride,
GpuMat& descriptors,
@ -1824,6 +1824,9 @@ public:
void sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts,
GpuMat& status, GpuMat* err = 0);
void sparse_multi(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts,
GpuMat& status, Stream& stream, GpuMat* err = 0);
void dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, GpuMat* err = 0);
void releaseMemory();
@ -1838,11 +1841,11 @@ public:
private:
GpuMat uPyr_[2];
vector<GpuMat> prevPyr_;
vector<GpuMat> nextPyr_;
std::vector<GpuMat> prevPyr_;
std::vector<GpuMat> nextPyr_;
GpuMat vPyr_[2];
vector<GpuMat> buf_;
vector<GpuMat> unused;
std::vector<GpuMat> buf_;
std::vector<GpuMat> unused;
bool isDeviceArch11_;
};

@ -303,6 +303,88 @@ PERF_TEST_P(ImagePair_Gray_NPts_WinSz_Levels_Iters, Video_PyrLKOpticalFlowSparse
}
}
//////////////////////////////////////////////////////
// PyrLKOpticalFlowSparseMulti
#if defined(HAVE_TBB) && defined(HAVE_CUDA)
DEF_PARAM_TEST(ImagePair_Gray_NPts_WinSz_Levels_Iters, pair_string, bool, int, int, int, int);
PERF_TEST_P(ImagePair_Gray_NPts_WinSz_Levels_Iters, Video_PyrLKOpticalFlowSparseMulti,
Combine(Values<pair_string>(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png")),
Bool(),
Values(8000),
Values(21),
Values(1, 3),
Values(1, 30)))
{
declare.time(20.0);
const pair_string imagePair = GET_PARAM(0);
const bool useGray = GET_PARAM(1);
const int points = GET_PARAM(2);
const int winSize = GET_PARAM(3);
const int levels = GET_PARAM(4);
const int iters = GET_PARAM(5);
const cv::Mat frame0 = readImage(imagePair.first, useGray ? cv::IMREAD_GRAYSCALE : cv::IMREAD_COLOR);
ASSERT_FALSE(frame0.empty());
const cv::Mat frame1 = readImage(imagePair.second, useGray ? cv::IMREAD_GRAYSCALE : cv::IMREAD_COLOR);
ASSERT_FALSE(frame1.empty());
cv::Mat gray_frame;
if (useGray)
gray_frame = frame0;
else
cv::cvtColor(frame0, gray_frame, cv::COLOR_BGR2GRAY);
cv::Mat pts;
cv::goodFeaturesToTrack(gray_frame, pts, points, 0.01, 0.0);
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_pts(pts.reshape(2, 1));
cv::gpu::PyrLKOpticalFlow d_pyrLK;
d_pyrLK.winSize = cv::Size(winSize, winSize);
d_pyrLK.maxLevel = levels - 1;
d_pyrLK.iters = iters;
const cv::gpu::GpuMat d_frame0(frame0);
const cv::gpu::GpuMat d_frame1(frame1);
cv::gpu::GpuMat nextPts;
cv::gpu::GpuMat status;
cv::gpu::Stream stream;
TEST_CYCLE()
{
d_pyrLK.sparse_multi(d_frame0, d_frame1, d_pts, nextPts, status, stream);
stream.waitForCompletion();
}
GPU_SANITY_CHECK(nextPts);
GPU_SANITY_CHECK(status);
}
else
{
cv::Mat nextPts;
cv::Mat status;
TEST_CYCLE()
{
cv::calcOpticalFlowPyrLK(frame0, frame1, pts, nextPts, status, cv::noArray(),
cv::Size(winSize, winSize), levels - 1,
cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, iters, 0.01));
}
CPU_SANITY_CHECK(nextPts);
CPU_SANITY_CHECK(status);
}
}
#endif // HAVE_TBB
//////////////////////////////////////////////////////
// PyrLKOpticalFlowDense

@ -49,6 +49,10 @@
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/reduce.hpp"
#include "opencv2/core/core.hpp"
#include "cvconfig.h"
using namespace cv::gpu;
using namespace cv::gpu::device;
@ -60,12 +64,54 @@ namespace pyrlk
__constant__ int c_halfWin_y;
__constant__ int c_iters;
texture<float, cudaTextureType2D, cudaReadModeElementType> tex_If(false, cudaFilterModeLinear, cudaAddressModeClamp);
texture<float4, cudaTextureType2D, cudaReadModeElementType> tex_If4(false, cudaFilterModeLinear, cudaAddressModeClamp);
texture<uchar, cudaTextureType2D, cudaReadModeElementType> tex_Ib(false, cudaFilterModePoint, cudaAddressModeClamp);
texture<float, cudaTextureType2D, cudaReadModeElementType> tex_Jf(false, cudaFilterModeLinear, cudaAddressModeClamp);
texture<float4, cudaTextureType2D, cudaReadModeElementType> tex_Jf4(false, cudaFilterModeLinear, cudaAddressModeClamp);
#define CUDA_CONSTANTS(index) \
__constant__ int c_winSize_x##index; \
__constant__ int c_winSize_y##index; \
__constant__ int c_halfWin_x##index; \
__constant__ int c_halfWin_y##index; \
__constant__ int c_iters##index;
CUDA_CONSTANTS(0)
CUDA_CONSTANTS(1)
CUDA_CONSTANTS(2)
CUDA_CONSTANTS(3)
CUDA_CONSTANTS(4)
template <int index> struct c_multi_winSize_x;
template <int index> struct c_multi_winSize_y;
template <int index> struct c_multi_halfWin_x;
template <int index> struct c_multi_halfWin_y;
template <int index> struct c_multi_iters;
#define CUDA_CONSTANTS_ACCESSOR(index) \
template <> struct c_multi_winSize_x<index> \
{ static __device__ __forceinline__ int get(void){ return c_winSize_x##index;} }; \
template <> struct c_multi_winSize_y<index> \
{ static __device__ __forceinline__ int get(void){ return c_winSize_y##index;} }; \
template <> struct c_multi_halfWin_x<index> \
{ static __device__ __forceinline__ int get(void){ return c_halfWin_x##index;} }; \
template <> struct c_multi_halfWin_y<index> \
{ static __device__ __forceinline__ int get(void){ return c_halfWin_y##index;} }; \
template <> struct c_multi_iters<index> \
{ static __device__ __forceinline__ int get(void){ return c_iters##index;} };
CUDA_CONSTANTS_ACCESSOR(0)
CUDA_CONSTANTS_ACCESSOR(1)
CUDA_CONSTANTS_ACCESSOR(2)
CUDA_CONSTANTS_ACCESSOR(3)
CUDA_CONSTANTS_ACCESSOR(4)
texture<float, cudaTextureType2D, cudaReadModeElementType>
tex_If(false, cudaFilterModeLinear, cudaAddressModeClamp);
texture<float4, cudaTextureType2D, cudaReadModeElementType>
tex_If4(false, cudaFilterModeLinear, cudaAddressModeClamp);
texture<uchar, cudaTextureType2D, cudaReadModeElementType>
tex_Ib(false, cudaFilterModePoint, cudaAddressModeClamp);
texture<float, cudaTextureType2D, cudaReadModeElementType>
tex_Jf(false, cudaFilterModeLinear, cudaAddressModeClamp);
texture<float4, cudaTextureType2D, cudaReadModeElementType>
tex_Jf4(false, cudaFilterModeLinear, cudaAddressModeClamp);
template <int cn> struct Tex_I;
template <> struct Tex_I<1>
@ -99,6 +145,57 @@ namespace pyrlk
}
};
//--------------------------------------------------------------------------
#define CUDA_DECL_TEX_MULTI(texname, type, filtermode) \
texture<type, cudaTextureType2D, cudaReadModeElementType> \
texname##_multi0(false, filtermode, cudaAddressModeClamp); \
texture<type, cudaTextureType2D, cudaReadModeElementType> \
texname##_multi1(false, filtermode, cudaAddressModeClamp); \
texture<type, cudaTextureType2D, cudaReadModeElementType> \
texname##_multi2(false, filtermode, cudaAddressModeClamp); \
texture<type, cudaTextureType2D, cudaReadModeElementType> \
texname##_multi3(false, filtermode, cudaAddressModeClamp); \
texture<type, cudaTextureType2D, cudaReadModeElementType> \
texname##_multi4(false, filtermode, cudaAddressModeClamp); \
CUDA_DECL_TEX_MULTI(tex_If1, float, cudaFilterModeLinear)
CUDA_DECL_TEX_MULTI(tex_If4, float4, cudaFilterModeLinear)
CUDA_DECL_TEX_MULTI(tex_Ib1, uchar, cudaFilterModePoint)
CUDA_DECL_TEX_MULTI(tex_Jf1, float, cudaFilterModeLinear)
CUDA_DECL_TEX_MULTI(tex_Jf4, float4, cudaFilterModeLinear)
template <int cn, int index> struct Tex_I_multi;
template <int cn, int index> struct Tex_J_multi;
template <int cn, int index> struct Tex_B_multi;
#define CUDA_DECL_TEX_MULTI_ACCESS(accessorname, texname, cn, returntype) \
template <> struct accessorname##_multi<cn, 0> \
{ static __device__ __forceinline__ returntype read(float x, float y) \
{ return tex2D(texname##cn##_multi0, x, y); } }; \
template <> struct accessorname##_multi<cn, 1> \
{ static __device__ __forceinline__ returntype read(float x, float y) \
{ return tex2D(texname##cn##_multi1, x, y); } }; \
template <> struct accessorname##_multi<cn, 2> \
{ static __device__ __forceinline__ returntype read(float x, float y) \
{ return tex2D(texname##cn##_multi2, x, y); } }; \
template <> struct accessorname##_multi<cn, 3> \
{ static __device__ __forceinline__ returntype read(float x, float y) \
{ return tex2D(texname##cn##_multi3, x, y); } }; \
template <> struct accessorname##_multi<cn, 4> \
{ static __device__ __forceinline__ returntype read(float x, float y) \
{ return tex2D(texname##cn##_multi4, x, y); } };
CUDA_DECL_TEX_MULTI_ACCESS(Tex_I, tex_If, 1, float)
CUDA_DECL_TEX_MULTI_ACCESS(Tex_I, tex_If, 4, float4)
CUDA_DECL_TEX_MULTI_ACCESS(Tex_B, tex_Ib, 1, uchar)
CUDA_DECL_TEX_MULTI_ACCESS(Tex_J, tex_Jf, 1, float)
CUDA_DECL_TEX_MULTI_ACCESS(Tex_J, tex_Jf, 4, float4)
//--------------------------------------------------------------------------
__device__ __forceinline__ void accum(float& dst, float val)
{
dst += val;
@ -309,6 +406,200 @@ namespace pyrlk
}
}
#if defined(HAVE_TBB)
template <int cn, int index, int PATCH_X, int PATCH_Y, bool calcErr>
__global__ void sparseKernel_multi(const float2* prevPts, float2* nextPts, uchar* status, float* err, const int level, const int rows, const int cols)
{
#if __CUDA_ARCH__ <= 110
const int BLOCK_SIZE = 128;
#else
const int BLOCK_SIZE = 256;
#endif
__shared__ float smem1[BLOCK_SIZE];
__shared__ float smem2[BLOCK_SIZE];
__shared__ float smem3[BLOCK_SIZE];
const unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x;
float2 prevPt = prevPts[blockIdx.x];
prevPt.x *= (1.0f / (1 << level));
prevPt.y *= (1.0f / (1 << level));
if (prevPt.x < 0 || prevPt.x >= cols || prevPt.y < 0 || prevPt.y >= rows)
{
if (tid == 0 && level == 0)
status[blockIdx.x] = 0;
return;
}
prevPt.x -= c_multi_halfWin_x<index>::get();
prevPt.y -= c_multi_halfWin_y<index>::get();
// extract the patch from the first image, compute covariation matrix of derivatives
float A11 = 0;
float A12 = 0;
float A22 = 0;
typedef typename TypeVec<float, cn>::vec_type work_type;
work_type I_patch [PATCH_Y][PATCH_X];
work_type dIdx_patch[PATCH_Y][PATCH_X];
work_type dIdy_patch[PATCH_Y][PATCH_X];
for (int yBase = threadIdx.y, i = 0; yBase < c_multi_winSize_y<index>::get(); yBase += blockDim.y, ++i)
{
for (int xBase = threadIdx.x, j = 0; xBase < c_multi_winSize_x<index>::get(); xBase += blockDim.x, ++j)
{
float x = prevPt.x + xBase + 0.5f;
float y = prevPt.y + yBase + 0.5f;
I_patch[i][j] = Tex_I_multi<cn,index>::read(x, y);
// Sharr Deriv
work_type dIdx = 3.0f * Tex_I_multi<cn,index>::read(x+1, y-1) + 10.0f * Tex_I_multi<cn,index>::read(x+1, y) + 3.0f * Tex_I_multi<cn,index>::read(x+1, y+1) -
(3.0f * Tex_I_multi<cn,index>::read(x-1, y-1) + 10.0f * Tex_I_multi<cn,index>::read(x-1, y) + 3.0f * Tex_I_multi<cn,index>::read(x-1, y+1));
work_type dIdy = 3.0f * Tex_I_multi<cn,index>::read(x-1, y+1) + 10.0f * Tex_I_multi<cn,index>::read(x, y+1) + 3.0f * Tex_I_multi<cn,index>::read(x+1, y+1) -
(3.0f * Tex_I_multi<cn,index>::read(x-1, y-1) + 10.0f * Tex_I_multi<cn,index>::read(x, y-1) + 3.0f * Tex_I_multi<cn,index>::read(x+1, y-1));
dIdx_patch[i][j] = dIdx;
dIdy_patch[i][j] = dIdy;
accum(A11, dIdx * dIdx);
accum(A12, dIdx * dIdy);
accum(A22, dIdy * dIdy);
}
}
reduce<BLOCK_SIZE>(smem_tuple(smem1, smem2, smem3), thrust::tie(A11, A12, A22), tid, thrust::make_tuple(plus<float>(), plus<float>(), plus<float>()));
#if __CUDA_ARCH__ >= 300
if (tid == 0)
{
smem1[0] = A11;
smem2[0] = A12;
smem3[0] = A22;
}
#endif
__syncthreads();
A11 = smem1[0];
A12 = smem2[0];
A22 = smem3[0];
float D = A11 * A22 - A12 * A12;
if (abs_(D) < numeric_limits<float>::epsilon())
{
if (tid == 0 && level == 0)
status[blockIdx.x] = 0;
return;
}
D = 1.f / D;
A11 *= D;
A12 *= D;
A22 *= D;
float2 nextPt = nextPts[blockIdx.x];
nextPt.x *= 2.f;
nextPt.y *= 2.f;
nextPt.x -= c_multi_halfWin_x<index>::get();
nextPt.y -= c_multi_halfWin_y<index>::get();
for (int k = 0; k < c_multi_iters<index>::get(); ++k)
{
if (nextPt.x < -c_multi_halfWin_x<index>::get() || nextPt.x >= cols || nextPt.y < -c_multi_halfWin_y<index>::get() || nextPt.y >= rows)
{
if (tid == 0 && level == 0)
status[blockIdx.x] = 0;
return;
}
float b1 = 0;
float b2 = 0;
for (int y = threadIdx.y, i = 0; y < c_multi_winSize_y<index>::get(); y += blockDim.y, ++i)
{
for (int x = threadIdx.x, j = 0; x < c_multi_winSize_x<index>::get(); x += blockDim.x, ++j)
{
work_type I_val = I_patch[i][j];
work_type J_val = Tex_J_multi<cn,index>::read(nextPt.x + x + 0.5f, nextPt.y + y + 0.5f);
work_type diff = (J_val - I_val) * 32.0f;
accum(b1, diff * dIdx_patch[i][j]);
accum(b2, diff * dIdy_patch[i][j]);
}
}
reduce<BLOCK_SIZE>(smem_tuple(smem1, smem2), thrust::tie(b1, b2), tid, thrust::make_tuple(plus<float>(), plus<float>()));
#if __CUDA_ARCH__ >= 300
if (tid == 0)
{
smem1[0] = b1;
smem2[0] = b2;
}
#endif
__syncthreads();
b1 = smem1[0];
b2 = smem2[0];
float2 delta;
delta.x = A12 * b2 - A22 * b1;
delta.y = A12 * b1 - A11 * b2;
nextPt.x += delta.x;
nextPt.y += delta.y;
if (::fabs(delta.x) < 0.01f && ::fabs(delta.y) < 0.01f)
break;
}
float errval = 0;
if (calcErr)
{
for (int y = threadIdx.y, i = 0; y < c_multi_winSize_y<index>::get(); y += blockDim.y, ++i)
{
for (int x = threadIdx.x, j = 0; x < c_multi_winSize_x<index>::get(); x += blockDim.x, ++j)
{
work_type I_val = I_patch[i][j];
work_type J_val = Tex_J_multi<cn,index>::read(nextPt.x + x + 0.5f, nextPt.y + y + 0.5f);
work_type diff = J_val - I_val;
accum(errval, abs_(diff));
}
}
reduce<BLOCK_SIZE>(smem1, errval, tid, plus<float>());
}
if (tid == 0)
{
nextPt.x += c_multi_halfWin_x<index>::get();
nextPt.y += c_multi_halfWin_y<index>::get();
nextPts[blockIdx.x] = nextPt;
if (calcErr)
err[blockIdx.x] = static_cast<float>(errval) / (cn * c_multi_winSize_x<index>::get() * c_multi_winSize_y<index>::get());
}
}
#endif // defined(HAVE_TBB)
template <int cn, int PATCH_X, int PATCH_Y>
void sparse_caller(int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, cudaStream_t stream)
@ -326,6 +617,26 @@ namespace pyrlk
cudaSafeCall( cudaDeviceSynchronize() );
}
#if defined(HAVE_TBB)
template <int cn, int index, int PATCH_X, int PATCH_Y>
void sparse_caller_multi(int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, cudaStream_t stream)
{
dim3 grid(ptcount);
if (level == 0 && err)
sparseKernel_multi<cn, index, PATCH_X, PATCH_Y, true><<<grid, block>>>(prevPts, nextPts, status, err, level, rows, cols);
else
sparseKernel_multi<cn, index, PATCH_X, PATCH_Y, false><<<grid, block>>>(prevPts, nextPts, status, err, level, rows, cols);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
#endif // defined(HAVE_TBB)
template <bool calcErr>
__global__ void denseKernel(PtrStepf u, PtrStepf v, const PtrStepf prevU, const PtrStepf prevV, PtrStepf err, const int rows, const int cols)
{
@ -484,6 +795,30 @@ namespace pyrlk
cudaSafeCall( cudaMemcpyToSymbol(c_iters, &iters, sizeof(int)) );
}
#if defined(HAVE_TBB)
void loadConstants_multi(int2 winSize, int iters, int index, cudaStream_t stream = 0)
{
int2 halfWin;
#define COPY_TO_SYMBOL_CALL(index) \
cudaSafeCall( cudaMemcpyToSymbolAsync(c_winSize_x##index, &winSize.x, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); \
cudaSafeCall( cudaMemcpyToSymbolAsync(c_winSize_y##index, &winSize.y, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); \
halfWin = make_int2((winSize.x - 1) / 2, (winSize.y - 1) / 2); \
cudaSafeCall( cudaMemcpyToSymbolAsync(c_halfWin_x##index, &halfWin.x, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); \
cudaSafeCall( cudaMemcpyToSymbolAsync(c_halfWin_y##index, &halfWin.y, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); \
cudaSafeCall( cudaMemcpyToSymbolAsync(c_iters##index, &iters, sizeof(int), 0, cudaMemcpyHostToDevice, stream) );
switch(index)
{
case 0: COPY_TO_SYMBOL_CALL(0) break;
case 1: COPY_TO_SYMBOL_CALL(1) break;
case 2: COPY_TO_SYMBOL_CALL(2) break;
case 3: COPY_TO_SYMBOL_CALL(3) break;
case 4: COPY_TO_SYMBOL_CALL(4) break;
default: CV_Error(CV_StsBadArg, "invalid execution line index"); break;
}
}
#endif // defined(HAVE_TBB)
void sparse1(PtrStepSzf I, PtrStepSzf J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, dim3 patch, cudaStream_t stream)
{
@ -528,6 +863,161 @@ namespace pyrlk
level, block, stream);
}
#if defined(HAVE_TBB)
void sparse1_multi(PtrStepSzf I, PtrStepSzf J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, dim3 patch, cudaStream_t stream, int index)
{
typedef void (*func_t)(int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, cudaStream_t stream);
static const func_t funcs[5][5][5] =
{
{ // index 0
{sparse_caller_multi<1, 0, 1, 1>, sparse_caller_multi<1, 0, 2, 1>, sparse_caller_multi<1, 0, 3, 1>, sparse_caller_multi<1, 0, 4, 1>, sparse_caller_multi<1, 0, 5, 1>},
{sparse_caller_multi<1, 0, 1, 2>, sparse_caller_multi<1, 0, 2, 2>, sparse_caller_multi<1, 0, 3, 2>, sparse_caller_multi<1, 0, 4, 2>, sparse_caller_multi<1, 0, 5, 2>},
{sparse_caller_multi<1, 0, 1, 3>, sparse_caller_multi<1, 0, 2, 3>, sparse_caller_multi<1, 0, 3, 3>, sparse_caller_multi<1, 0, 4, 3>, sparse_caller_multi<1, 0, 5, 3>},
{sparse_caller_multi<1, 0, 1, 4>, sparse_caller_multi<1, 0, 2, 4>, sparse_caller_multi<1, 0, 3, 4>, sparse_caller_multi<1, 0, 4, 4>, sparse_caller_multi<1, 0, 5, 4>},
{sparse_caller_multi<1, 0, 1, 5>, sparse_caller_multi<1, 0, 2, 5>, sparse_caller_multi<1, 0, 3, 5>, sparse_caller_multi<1, 0, 4, 5>, sparse_caller_multi<1, 0, 5, 5>}
},
{ // index 1
{sparse_caller_multi<1, 1, 1, 1>, sparse_caller_multi<1, 1, 2, 1>, sparse_caller_multi<1, 1, 3, 1>, sparse_caller_multi<1, 1, 4, 1>, sparse_caller_multi<1, 1, 5, 1>},
{sparse_caller_multi<1, 1, 1, 2>, sparse_caller_multi<1, 1, 2, 2>, sparse_caller_multi<1, 1, 3, 2>, sparse_caller_multi<1, 1, 4, 2>, sparse_caller_multi<1, 1, 5, 2>},
{sparse_caller_multi<1, 1, 1, 3>, sparse_caller_multi<1, 1, 2, 3>, sparse_caller_multi<1, 1, 3, 3>, sparse_caller_multi<1, 1, 4, 3>, sparse_caller_multi<1, 1, 5, 3>},
{sparse_caller_multi<1, 1, 1, 4>, sparse_caller_multi<1, 1, 2, 4>, sparse_caller_multi<1, 1, 3, 4>, sparse_caller_multi<1, 1, 4, 4>, sparse_caller_multi<1, 1, 5, 4>},
{sparse_caller_multi<1, 1, 1, 5>, sparse_caller_multi<1, 1, 2, 5>, sparse_caller_multi<1, 1, 3, 5>, sparse_caller_multi<1, 1, 4, 5>, sparse_caller_multi<1, 1, 5, 5>}
},
{ // index 2
{sparse_caller_multi<1, 2, 1, 1>, sparse_caller_multi<1, 2, 2, 1>, sparse_caller_multi<1, 2, 3, 1>, sparse_caller_multi<1, 2, 4, 1>, sparse_caller_multi<1, 2, 5, 1>},
{sparse_caller_multi<1, 2, 1, 2>, sparse_caller_multi<1, 2, 2, 2>, sparse_caller_multi<1, 2, 3, 2>, sparse_caller_multi<1, 2, 4, 2>, sparse_caller_multi<1, 2, 5, 2>},
{sparse_caller_multi<1, 2, 1, 3>, sparse_caller_multi<1, 2, 2, 3>, sparse_caller_multi<1, 2, 3, 3>, sparse_caller_multi<1, 2, 4, 3>, sparse_caller_multi<1, 2, 5, 3>},
{sparse_caller_multi<1, 2, 1, 4>, sparse_caller_multi<1, 2, 2, 4>, sparse_caller_multi<1, 2, 3, 4>, sparse_caller_multi<1, 2, 4, 4>, sparse_caller_multi<1, 2, 5, 4>},
{sparse_caller_multi<1, 2, 1, 5>, sparse_caller_multi<1, 2, 2, 5>, sparse_caller_multi<1, 2, 3, 5>, sparse_caller_multi<1, 2, 4, 5>, sparse_caller_multi<1, 2, 5, 5>}
},
{ // index 3
{sparse_caller_multi<1, 3, 1, 1>, sparse_caller_multi<1, 3, 2, 1>, sparse_caller_multi<1, 3, 3, 1>, sparse_caller_multi<1, 3, 4, 1>, sparse_caller_multi<1, 3, 5, 1>},
{sparse_caller_multi<1, 3, 1, 2>, sparse_caller_multi<1, 3, 2, 2>, sparse_caller_multi<1, 3, 3, 2>, sparse_caller_multi<1, 3, 4, 2>, sparse_caller_multi<1, 3, 5, 2>},
{sparse_caller_multi<1, 3, 1, 3>, sparse_caller_multi<1, 3, 2, 3>, sparse_caller_multi<1, 3, 3, 3>, sparse_caller_multi<1, 3, 4, 3>, sparse_caller_multi<1, 3, 5, 3>},
{sparse_caller_multi<1, 3, 1, 4>, sparse_caller_multi<1, 3, 2, 4>, sparse_caller_multi<1, 3, 3, 4>, sparse_caller_multi<1, 3, 4, 4>, sparse_caller_multi<1, 3, 5, 4>},
{sparse_caller_multi<1, 3, 1, 5>, sparse_caller_multi<1, 3, 2, 5>, sparse_caller_multi<1, 3, 3, 5>, sparse_caller_multi<1, 3, 4, 5>, sparse_caller_multi<1, 3, 5, 5>}
},
{ // index 4
{sparse_caller_multi<1, 4, 1, 1>, sparse_caller_multi<1, 4, 2, 1>, sparse_caller_multi<1, 4, 3, 1>, sparse_caller_multi<1, 4, 4, 1>, sparse_caller_multi<1, 4, 5, 1>},
{sparse_caller_multi<1, 4, 1, 2>, sparse_caller_multi<1, 4, 2, 2>, sparse_caller_multi<1, 4, 3, 2>, sparse_caller_multi<1, 4, 4, 2>, sparse_caller_multi<1, 4, 5, 2>},
{sparse_caller_multi<1, 4, 1, 3>, sparse_caller_multi<1, 4, 2, 3>, sparse_caller_multi<1, 4, 3, 3>, sparse_caller_multi<1, 4, 4, 3>, sparse_caller_multi<1, 4, 5, 3>},
{sparse_caller_multi<1, 4, 1, 4>, sparse_caller_multi<1, 4, 2, 4>, sparse_caller_multi<1, 4, 3, 4>, sparse_caller_multi<1, 4, 4, 4>, sparse_caller_multi<1, 4, 5, 4>},
{sparse_caller_multi<1, 4, 1, 5>, sparse_caller_multi<1, 4, 2, 5>, sparse_caller_multi<1, 4, 3, 5>, sparse_caller_multi<1, 4, 4, 5>, sparse_caller_multi<1, 4, 5, 5>}
}
};
switch(index)
{
case 0:
bindTexture(&tex_If1_multi0, I);
bindTexture(&tex_Jf1_multi0, J);
break;
case 1:
bindTexture(&tex_If1_multi1, I);
bindTexture(&tex_Jf1_multi1, J);
break;
case 2:
bindTexture(&tex_If1_multi2, I);
bindTexture(&tex_Jf1_multi2, J);
break;
case 3:
bindTexture(&tex_If1_multi3, I);
bindTexture(&tex_Jf1_multi3, J);
break;
case 4:
bindTexture(&tex_If1_multi4, I);
bindTexture(&tex_Jf1_multi4, J);
break;
default:
CV_Error(CV_StsBadArg, "invalid execution line index");
break;
}
funcs[index][patch.y - 1][patch.x - 1](I.rows, I.cols, prevPts, nextPts, status, err, ptcount,
level, block, stream);
}
void sparse4_multi(PtrStepSz<float4> I, PtrStepSz<float4> J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, dim3 patch, cudaStream_t stream, int index)
{
typedef void (*func_t)(int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, cudaStream_t stream);
static const func_t funcs[5][5][5] =
{
{ // index 0
{sparse_caller_multi<4, 0, 1, 1>, sparse_caller_multi<4, 0, 2, 1>, sparse_caller_multi<4, 0, 3, 1>, sparse_caller_multi<4, 0, 4, 1>, sparse_caller_multi<4, 0, 5, 1>},
{sparse_caller_multi<4, 0, 1, 2>, sparse_caller_multi<4, 0, 2, 2>, sparse_caller_multi<4, 0, 3, 2>, sparse_caller_multi<4, 0, 4, 2>, sparse_caller_multi<4, 0, 5, 2>},
{sparse_caller_multi<4, 0, 1, 3>, sparse_caller_multi<4, 0, 2, 3>, sparse_caller_multi<4, 0, 3, 3>, sparse_caller_multi<4, 0, 4, 3>, sparse_caller_multi<4, 0, 5, 3>},
{sparse_caller_multi<4, 0, 1, 4>, sparse_caller_multi<4, 0, 2, 4>, sparse_caller_multi<4, 0, 3, 4>, sparse_caller_multi<4, 0, 4, 4>, sparse_caller_multi<4, 0, 5, 4>},
{sparse_caller_multi<4, 0, 1, 5>, sparse_caller_multi<4, 0, 2, 5>, sparse_caller_multi<4, 0, 3, 5>, sparse_caller_multi<4, 0, 4, 5>, sparse_caller_multi<4, 0, 5, 5>}
},
{ // index 1
{sparse_caller_multi<4, 1, 1, 1>, sparse_caller_multi<4, 1, 2, 1>, sparse_caller_multi<4, 1, 3, 1>, sparse_caller_multi<4, 1, 4, 1>, sparse_caller_multi<4, 1, 5, 1>},
{sparse_caller_multi<4, 1, 1, 2>, sparse_caller_multi<4, 1, 2, 2>, sparse_caller_multi<4, 1, 3, 2>, sparse_caller_multi<4, 1, 4, 2>, sparse_caller_multi<4, 1, 5, 2>},
{sparse_caller_multi<4, 1, 1, 3>, sparse_caller_multi<4, 1, 2, 3>, sparse_caller_multi<4, 1, 3, 3>, sparse_caller_multi<4, 1, 4, 3>, sparse_caller_multi<4, 1, 5, 3>},
{sparse_caller_multi<4, 1, 1, 4>, sparse_caller_multi<4, 1, 2, 4>, sparse_caller_multi<4, 1, 3, 4>, sparse_caller_multi<4, 1, 4, 4>, sparse_caller_multi<4, 1, 5, 4>},
{sparse_caller_multi<4, 1, 1, 5>, sparse_caller_multi<4, 1, 2, 5>, sparse_caller_multi<4, 1, 3, 5>, sparse_caller_multi<4, 1, 4, 5>, sparse_caller_multi<4, 1, 5, 5>}
},
{ // index 2
{sparse_caller_multi<4, 2, 1, 1>, sparse_caller_multi<4, 2, 2, 1>, sparse_caller_multi<4, 2, 3, 1>, sparse_caller_multi<4, 2, 4, 1>, sparse_caller_multi<4, 2, 5, 1>},
{sparse_caller_multi<4, 2, 1, 2>, sparse_caller_multi<4, 2, 2, 2>, sparse_caller_multi<4, 2, 3, 2>, sparse_caller_multi<4, 2, 4, 2>, sparse_caller_multi<4, 2, 5, 2>},
{sparse_caller_multi<4, 2, 1, 3>, sparse_caller_multi<4, 2, 2, 3>, sparse_caller_multi<4, 2, 3, 3>, sparse_caller_multi<4, 2, 4, 3>, sparse_caller_multi<4, 2, 5, 3>},
{sparse_caller_multi<4, 2, 1, 4>, sparse_caller_multi<4, 2, 2, 4>, sparse_caller_multi<4, 2, 3, 4>, sparse_caller_multi<4, 2, 4, 4>, sparse_caller_multi<4, 2, 5, 4>},
{sparse_caller_multi<4, 2, 1, 5>, sparse_caller_multi<4, 2, 2, 5>, sparse_caller_multi<4, 2, 3, 5>, sparse_caller_multi<4, 2, 4, 5>, sparse_caller_multi<4, 2, 5, 5>}
},
{ // index 3
{sparse_caller_multi<4, 3, 1, 1>, sparse_caller_multi<4, 3, 2, 1>, sparse_caller_multi<4, 3, 3, 1>, sparse_caller_multi<4, 3, 4, 1>, sparse_caller_multi<4, 3, 5, 1>},
{sparse_caller_multi<4, 3, 1, 2>, sparse_caller_multi<4, 3, 2, 2>, sparse_caller_multi<4, 3, 3, 2>, sparse_caller_multi<4, 3, 4, 2>, sparse_caller_multi<4, 3, 5, 2>},
{sparse_caller_multi<4, 3, 1, 3>, sparse_caller_multi<4, 3, 2, 3>, sparse_caller_multi<4, 3, 3, 3>, sparse_caller_multi<4, 3, 4, 3>, sparse_caller_multi<4, 3, 5, 3>},
{sparse_caller_multi<4, 3, 1, 4>, sparse_caller_multi<4, 3, 2, 4>, sparse_caller_multi<4, 3, 3, 4>, sparse_caller_multi<4, 3, 4, 4>, sparse_caller_multi<4, 3, 5, 4>},
{sparse_caller_multi<4, 3, 1, 5>, sparse_caller_multi<4, 3, 2, 5>, sparse_caller_multi<4, 3, 3, 5>, sparse_caller_multi<4, 3, 4, 5>, sparse_caller_multi<4, 3, 5, 5>}
},
{ // index 4
{sparse_caller_multi<4, 4, 1, 1>, sparse_caller_multi<4, 4, 2, 1>, sparse_caller_multi<4, 4, 3, 1>, sparse_caller_multi<4, 4, 4, 1>, sparse_caller_multi<4, 4, 5, 1>},
{sparse_caller_multi<4, 4, 1, 2>, sparse_caller_multi<4, 4, 2, 2>, sparse_caller_multi<4, 4, 3, 2>, sparse_caller_multi<4, 4, 4, 2>, sparse_caller_multi<4, 4, 5, 2>},
{sparse_caller_multi<4, 4, 1, 3>, sparse_caller_multi<4, 4, 2, 3>, sparse_caller_multi<4, 4, 3, 3>, sparse_caller_multi<4, 4, 4, 3>, sparse_caller_multi<4, 4, 5, 3>},
{sparse_caller_multi<4, 4, 1, 4>, sparse_caller_multi<4, 4, 2, 4>, sparse_caller_multi<4, 4, 3, 4>, sparse_caller_multi<4, 4, 4, 4>, sparse_caller_multi<4, 4, 5, 4>},
{sparse_caller_multi<4, 4, 1, 5>, sparse_caller_multi<4, 4, 2, 5>, sparse_caller_multi<4, 4, 3, 5>, sparse_caller_multi<4, 4, 4, 5>, sparse_caller_multi<4, 4, 5, 5>}
}
};
switch(index)
{
case 0:
bindTexture(&tex_If4_multi0, I);
bindTexture(&tex_Jf4_multi0, J);
break;
case 1:
bindTexture(&tex_If4_multi1, I);
bindTexture(&tex_Jf4_multi1, J);
break;
case 2:
bindTexture(&tex_If4_multi2, I);
bindTexture(&tex_Jf4_multi2, J);
break;
case 3:
bindTexture(&tex_If4_multi3, I);
bindTexture(&tex_Jf4_multi3, J);
break;
case 4:
bindTexture(&tex_If4_multi4, I);
bindTexture(&tex_Jf4_multi4, J);
break;
default:
CV_Error(CV_StsBadArg, "invalid execution line index");
break;
}
funcs[index][patch.y - 1][patch.x - 1](I.rows, I.cols, prevPts, nextPts, status, err, ptcount,
level, block, stream);
}
#endif // defined(HAVE_TBB)
void dense(PtrStepSzb I, PtrStepSzf J, PtrStepSzf u, PtrStepSzf v, PtrStepSzf prevU, PtrStepSzf prevV, PtrStepSzf err, int2 winSize, cudaStream_t stream)
{
dim3 block(16, 16);

@ -42,12 +42,18 @@
#include "precomp.hpp"
#ifdef HAVE_TBB
#include <tbb/compat/condition_variable>
#include <tbb/mutex.h>
#endif
using namespace std;
using namespace cv;
using namespace cv::gpu;
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
cv::gpu::PyrLKOpticalFlow::PyrLKOpticalFlow() { throw_nogpu(); }
void cv::gpu::PyrLKOpticalFlow::sparse(const GpuMat&, const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat*) { throw_nogpu(); }
void cv::gpu::PyrLKOpticalFlow::dense(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat*) { throw_nogpu(); }
@ -64,6 +70,23 @@ namespace pyrlk
void sparse4(PtrStepSz<float4> I, PtrStepSz<float4> J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, dim3 patch, cudaStream_t stream = 0);
#if !defined(HAVE_TBB)
#define throw_notbb() CV_Error(CV_StsNotImplemented, "The library is compiled without TBB support")
void loadConstants_multi(int2, int, int, cudaStream_t) { throw_notbb(); }
void sparse1_multi(PtrStepSzf, PtrStepSzf, const float2*, float2*, uchar*, float*, int,
int, dim3, dim3, cudaStream_t, int) { throw_notbb(); }
void sparse4_multi(PtrStepSz<float4>, PtrStepSz<float4>, const float2*, float2*, uchar*, float*, int,
int, dim3, dim3, cudaStream_t, int) { throw_notbb(); }
#else
void loadConstants_multi(int2 winSize, int iters, int index = 0, cudaStream_t stream = 0);
void sparse1_multi(PtrStepSzf I, PtrStepSzf J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, dim3 patch, cudaStream_t stream = 0, int index = 0);
void sparse4_multi(PtrStepSz<float4> I, PtrStepSz<float4> J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, dim3 patch, cudaStream_t stream = 0, int index = 0);
#endif
void dense(PtrStepSzb I, PtrStepSzf J, PtrStepSzf u, PtrStepSzf v, PtrStepSzf prevU, PtrStepSzf prevV,
PtrStepSzf err, int2 winSize, cudaStream_t stream = 0);
}
@ -98,7 +121,9 @@ namespace
}
}
void cv::gpu::PyrLKOpticalFlow::sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts, GpuMat& status, GpuMat* err)
void cv::gpu::PyrLKOpticalFlow::sparse(const GpuMat& prevImg,
const GpuMat& nextImg, const GpuMat& prevPts,
GpuMat& nextPts, GpuMat& status, GpuMat* err)
{
if (prevPts.empty())
{
@ -181,6 +206,137 @@ void cv::gpu::PyrLKOpticalFlow::sparse(const GpuMat& prevImg, const GpuMat& next
}
}
#ifdef HAVE_TBB
//--------------------------------------------------------------------------
// Multi-threading support
static bool index_vector_use[5] = {true, true, true, true, true}; // all free
static tbb::mutex s_PyrLKOpticalFlow_Mutex;
static condition_variable s_PyrLKOpticalFlow_ConditionVariable;
void cv::gpu::PyrLKOpticalFlow::sparse_multi(const GpuMat& prevImg,
const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts,
GpuMat& status, Stream& stream, GpuMat* err)
{
if (prevPts.empty())
{
nextPts.release();
status.release();
if (err) err->release();
return;
}
dim3 block, patch;
calcPatchSize(winSize, block, patch);
CV_Assert(prevImg.channels() == 1 || prevImg.channels() == 3 || prevImg.channels() == 4);
CV_Assert(prevImg.size() == nextImg.size() && prevImg.type() == nextImg.type());
CV_Assert(maxLevel >= 0);
CV_Assert(winSize.width > 2 && winSize.height > 2);
CV_Assert(patch.x > 0 && patch.x < 6 && patch.y > 0 && patch.y < 6);
CV_Assert(prevPts.rows == 1 && prevPts.type() == CV_32FC2);
if (useInitialFlow)
CV_Assert(nextPts.size() == prevPts.size() && nextPts.type() == CV_32FC2);
else
ensureSizeIsEnough(1, prevPts.cols, prevPts.type(), nextPts);
GpuMat temp1 = (useInitialFlow ? nextPts : prevPts).reshape(1);
GpuMat temp2 = nextPts.reshape(1);
multiply(temp1, Scalar::all(1.0 / (1 << maxLevel) / 2.0), temp2);
ensureSizeIsEnough(1, prevPts.cols, CV_8UC1, status);
status.setTo(Scalar::all(1));
if (err)
ensureSizeIsEnough(1, prevPts.cols, CV_32FC1, *err);
// build the image pyramids.
prevPyr_.resize(maxLevel + 1);
nextPyr_.resize(maxLevel + 1);
int cn = prevImg.channels();
if (cn == 1 || cn == 4)
{
prevImg.convertTo(prevPyr_[0], CV_32F);
nextImg.convertTo(nextPyr_[0], CV_32F);
}
else
{
buf_.resize(1);
cvtColor(prevImg, buf_[0], COLOR_BGR2BGRA);
buf_[0].convertTo(prevPyr_[0], CV_32F);
cvtColor(nextImg, buf_[0], COLOR_BGR2BGRA);
buf_[0].convertTo(nextPyr_[0], CV_32F);
}
for (int level = 1; level <= maxLevel; ++level)
{
pyrDown(prevPyr_[level - 1], prevPyr_[level]);
pyrDown(nextPyr_[level - 1], nextPyr_[level]);
}
//--------------------------------------------------------------------------
// Multithreading support
int index = -1;
do
{
unique_lock<tbb::mutex> ul(s_PyrLKOpticalFlow_Mutex);
for (unsigned int uiI = 0; uiI < 5; ++uiI)
{
if (index_vector_use[uiI])
{
index = uiI;
index_vector_use[uiI] = false;
break;
}
}
if (index < 0)
s_PyrLKOpticalFlow_ConditionVariable.wait(ul);
ul.unlock();
}while (index < 0);
//--------------------------------------------------------------------------
pyrlk::loadConstants_multi(make_int2(winSize.width, winSize.height), iters, index);
for (int level = maxLevel; level >= 0; level--)
{
if (cn == 1)
{
pyrlk::sparse1_multi(prevPyr_[level], nextPyr_[level],
prevPts.ptr<float2>(), nextPts.ptr<float2>(), status.ptr(),
level == 0 && err ? err->ptr<float>() : 0, prevPts.cols,
level, block, patch, StreamAccessor::getStream(stream), index);
}
else
{
pyrlk::sparse4_multi(prevPyr_[level], nextPyr_[level],
prevPts.ptr<float2>(), nextPts.ptr<float2>(), status.ptr(),
level == 0 && err ? err->ptr<float>() : 0, prevPts.cols,
level, block, patch, StreamAccessor::getStream(stream), index);
}
}
unique_lock<tbb::mutex> ul(s_PyrLKOpticalFlow_Mutex);
index_vector_use[index] = true;
s_PyrLKOpticalFlow_ConditionVariable.notify_one();
}
#else
void cv::gpu::PyrLKOpticalFlow::sparse_multi(const GpuMat& /*prevImg*/,
const GpuMat& /*nextImg*/, const GpuMat& /*prevPts*/, GpuMat& /*nextPts*/,
GpuMat& /*status*/, Stream& /*stream*/, GpuMat* /*err*/)
{
throw_notbb();
}
#endif
void cv::gpu::PyrLKOpticalFlow::dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, GpuMat* err)
{
CV_Assert(prevImg.type() == CV_8UC1);

@ -44,6 +44,10 @@
#ifdef HAVE_CUDA
#ifdef HAVE_TBB
#include <tbb/tbb.h>
#endif
using namespace cvtest;
//////////////////////////////////////////////////////
@ -322,6 +326,134 @@ GPU_TEST_P(PyrLKOpticalFlow, Sparse)
ASSERT_LE(bad_ratio, 0.01);
}
#ifdef HAVE_TBB
struct Sparse_Multi_Functor
{
explicit Sparse_Multi_Functor(const cv::Mat& in_frame0, const cv::Mat& in_frame1,
const cv::Mat& in_pts_mat,
cv::gpu::GpuMat* in_d_pts,
cv::gpu::GpuMat* in_d_nextPts,
cv::gpu::GpuMat* in_d_status,
cv::gpu::Stream* in_streams):
m_frame0(in_frame0), m_frame1(in_frame1),
m_pts_mat(in_pts_mat),
m_d_pts(in_d_pts), m_d_nextPts(in_d_nextPts),
m_d_status(in_d_status), m_streams(in_streams){}
void operator()( const tbb::blocked_range<size_t>& r ) const
{
for( size_t i = r.begin(); i != r.end(); ++i )
{
m_d_pts[i].upload(m_pts_mat);
cv::gpu::PyrLKOpticalFlow pyrLK;
pyrLK.sparse_multi(loadMat(m_frame0), loadMat(m_frame1), m_d_pts[i],
m_d_nextPts[i], m_d_status[i], m_streams[i]);
m_streams[i].waitForCompletion();
}
}
const cv::Mat& m_frame0;
const cv::Mat& m_frame1;
const cv::Mat& m_pts_mat;
cv::gpu::GpuMat* m_d_pts;
cv::gpu::GpuMat* m_d_nextPts;
cv::gpu::GpuMat* m_d_status;
cv::gpu::Stream* m_streams;
};
GPU_TEST_P(PyrLKOpticalFlow, Sparse_Multi)
{
cv::Mat frame0 = readImage("opticalflow/frame0.png", useGray ? cv::IMREAD_GRAYSCALE : cv::IMREAD_COLOR);
ASSERT_FALSE(frame0.empty());
cv::Mat frame1 = readImage("opticalflow/frame1.png", useGray ? cv::IMREAD_GRAYSCALE : cv::IMREAD_COLOR);
ASSERT_FALSE(frame1.empty());
cv::Mat gray_frame;
if (useGray)
gray_frame = frame0;
else
cv::cvtColor(frame0, gray_frame, cv::COLOR_BGR2GRAY);
std::vector<cv::Point2f> pts;
cv::goodFeaturesToTrack(gray_frame, pts, 1000, 0.01, 0.0);
//--------------------------------------------------------------------------
// GPU
const unsigned int NB_EXEC_LINES = 27;
cv::gpu::GpuMat d_pts[NB_EXEC_LINES];
cv::gpu::GpuMat d_nextPts[NB_EXEC_LINES];
cv::gpu::GpuMat d_status[NB_EXEC_LINES];
cv::gpu::Stream streams[NB_EXEC_LINES];
cv::Mat pts_mat(1, (int) pts.size(), CV_32FC2, (void*) &pts[0]);
tbb::parallel_for(tbb::blocked_range<size_t>(0, NB_EXEC_LINES),
Sparse_Multi_Functor(frame0, frame1, pts_mat,
d_pts, d_nextPts, d_status, streams));
std::vector<cv::Point2f> nextPts[NB_EXEC_LINES];
std::vector<unsigned char> status[NB_EXEC_LINES];
for (unsigned int i = 0; i < NB_EXEC_LINES; ++i)
{
nextPts[i].resize(d_nextPts[i].cols);
cv::Mat nextPts_mat(1, d_nextPts[i].cols, CV_32FC2, (void*) &(nextPts[i][0]));
d_nextPts[i].download(nextPts_mat);
status[i].resize(d_status[i].cols);
cv::Mat status_mat(1, d_status[i].cols, CV_8UC1, (void*) &(status[i][0]));
d_status[i].download(status_mat);
}
//--------------------------------------------------------------------------
// CPU
std::vector<cv::Point2f> nextPts_gold;
std::vector<unsigned char> status_gold;
cv::calcOpticalFlowPyrLK(frame0, frame1, pts, nextPts_gold, status_gold, cv::noArray());
//--------------------------------------------------------------------------
// CHECKS
for (unsigned int uiI = 0; uiI < NB_EXEC_LINES; ++uiI)
{
ASSERT_EQ(nextPts_gold.size(), nextPts[uiI].size());
ASSERT_EQ(status_gold.size(), status[uiI].size());
}
size_t mistmatch = 0;
for (unsigned int uiI = 0; uiI < NB_EXEC_LINES; ++uiI)
{
for (size_t i = 0; i < nextPts[uiI].size(); ++i)
{
cv::Point2i a = nextPts[uiI][i];
cv::Point2i b = nextPts_gold[i];
if (status[uiI][i] != status_gold[i])
{
++mistmatch;
continue;
}
if (status[uiI][i])
{
bool eq = std::abs(a.x - b.x) <= 1 && std::abs(a.y - b.y) <= 1;
if (!eq)
++mistmatch;
}
}
}
double bad_ratio = static_cast<double>(mistmatch) / (nextPts[0].size() * NB_EXEC_LINES);
ASSERT_LE(bad_ratio, 0.01);
}
#endif // HAVE_TBB
INSTANTIATE_TEST_CASE_P(GPU_Video, PyrLKOpticalFlow, testing::Combine(
ALL_DEVICES,
testing::Values(UseGray(true), UseGray(false))));

@ -220,7 +220,7 @@ if(HAVE_AVFOUNDATION)
list(APPEND HIGHGUI_LIBRARIES "-framework AVFoundation" "-framework QuartzCore")
else()
list(APPEND highgui_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_avfoundation_mac.mm)
list(APPEND HIGHGUI_LIBRARIES "-framework Cocoa" "-framework Accelerate" "-framework AVFoundation" "-framework CoreGraphics" "-framework CoreImage" "-framework CoreMedia" "-framework CoreVideo" "-framework QuartzCore")
list(APPEND HIGHGUI_LIBRARIES "-framework Cocoa" "-framework Accelerate" "-framework AVFoundation" "-framework CoreGraphics" "-framework CoreMedia" "-framework CoreVideo" "-framework QuartzCore")
endif()
endif()
@ -241,7 +241,7 @@ endif(HAVE_INTELPERC)
if(IOS)
add_definitions(-DHAVE_IOS=1)
list(APPEND highgui_srcs src/ios_conversions.mm src/cap_ios_abstract_camera.mm src/cap_ios_photo_camera.mm src/cap_ios_video_camera.mm)
list(APPEND HIGHGUI_LIBRARIES "-framework Accelerate" "-framework AVFoundation" "-framework CoreGraphics" "-framework CoreImage" "-framework CoreMedia" "-framework CoreVideo" "-framework QuartzCore" "-framework AssetsLibrary")
list(APPEND HIGHGUI_LIBRARIES "-framework Accelerate" "-framework AVFoundation" "-framework CoreGraphics" "-framework CoreMedia" "-framework CoreVideo" "-framework QuartzCore" "-framework UIKit")
endif()
if(WIN32)
@ -293,9 +293,6 @@ if(MSVC)
set_target_properties(${the_module} PROPERTIES LINK_FLAGS "/NODEFAULTLIB:atlthunk.lib /NODEFAULTLIB:atlsd.lib /NODEFAULTLIB:libcmt.lib /DEBUG")
endif()
#stop automatic dependencies propagation for this module
set_target_properties(${the_module} PROPERTIES LINK_INTERFACE_LIBRARIES "")
ocv_add_precompiled_headers(${the_module})
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-deprecated-declarations -Wno-clobbered)

@ -42,6 +42,7 @@
#include "precomp.hpp"
#include "bitstrm.hpp"
#include "utils.hpp"
namespace cv
{
@ -103,7 +104,6 @@ void RBaseStream::readBlock()
fseek( m_file, m_block_pos, SEEK_SET );
size_t readed = fread( m_start, 1, m_block_size, m_file );
m_end = m_start + readed;
m_current = m_start;
if( readed == 0 || m_current >= m_end )
throw RBS_THROW_EOS;
@ -164,7 +164,7 @@ void RBaseStream::release()
void RBaseStream::setPos( int pos )
{
assert( isOpened() && pos >= 0 );
CV_Assert(isOpened() && pos >= 0);
if( !m_file )
{
@ -181,14 +181,19 @@ void RBaseStream::setPos( int pos )
int RBaseStream::getPos()
{
assert( isOpened() );
return m_block_pos + (int)(m_current - m_start);
CV_Assert(isOpened());
int pos = validateToInt((m_current - m_start) + m_block_pos);
CV_Assert(pos >= m_block_pos); // overflow check
CV_Assert(pos >= 0); // overflow check
return pos;
}
void RBaseStream::skip( int bytes )
{
assert( bytes >= 0 );
CV_Assert(bytes >= 0);
uchar* old = m_current;
m_current += bytes;
CV_Assert(m_current >= old); // overflow check
}
///////////////////////// RLByteStream ////////////////////////////
@ -208,6 +213,8 @@ int RLByteStream::getByte()
current = m_current;
}
CV_Assert(current < m_end);
val = *((uchar*)current);
m_current = current + 1;
return val;
@ -218,7 +225,7 @@ int RLByteStream::getBytes( void* buffer, int count )
{
uchar* data = (uchar*)buffer;
int readed = 0;
assert( count >= 0 );
CV_Assert(count >= 0);
while( count > 0 )
{
@ -369,7 +376,7 @@ void WBaseStream::writeBlock()
{
int size = (int)(m_current - m_start);
assert( isOpened() );
CV_Assert(isOpened());
if( size == 0 )
return;
@ -440,7 +447,7 @@ void WBaseStream::release()
int WBaseStream::getPos()
{
assert( isOpened() );
CV_Assert(isOpened());
return m_block_pos + (int)(m_current - m_start);
}
@ -463,7 +470,7 @@ void WLByteStream::putBytes( const void* buffer, int count )
{
uchar* data = (uchar*)buffer;
assert( data && m_current && count >= 0 );
CV_Assert(data && m_current && count >= 0);
while( count )
{

@ -48,13 +48,20 @@
namespace cv
{
enum
{
RBS_THROW_EOS=-123, // <end of stream> exception code
RBS_THROW_FORB=-124, // <forrbidden huffman code> exception code
RBS_HUFF_FORB=2047, // forrbidden huffman code "value"
RBS_BAD_HEADER=-125 // invalid header
#define DECLARE_RBS_EXCEPTION(name) \
class RBS_ ## name ## _Exception : public cv::Exception \
{ \
public: \
RBS_ ## name ## _Exception(int code_, const String& err_, const String& func_, const String& file_, int line_) : \
cv::Exception(code_, err_, func_, file_, line_) \
{} \
};
DECLARE_RBS_EXCEPTION(THROW_EOS)
#define RBS_THROW_EOS RBS_THROW_EOS_Exception(CV_StsError, "Unexpected end of input stream", CV_Func, __FILE__, __LINE__)
DECLARE_RBS_EXCEPTION(THROW_FORB)
#define RBS_THROW_FORB RBS_THROW_FORB_Exception(CV_StsError, "Forrbidden huffman code", CV_Func, __FILE__, __LINE__)
DECLARE_RBS_EXCEPTION(BAD_HEADER)
#define RBS_BAD_HEADER RBS_BAD_HEADER_Exception(CV_StsError, "Invalid header", CV_Func, __FILE__, __LINE__)
typedef unsigned long ulong;

@ -697,7 +697,15 @@ CvCaptureFile::CvCaptureFile(const char* filename) {
return;
}
mAssetTrack = [[mAsset tracksWithMediaType: AVMediaTypeVideo][0] retain];
NSArray *tracks = [mAsset tracksWithMediaType:AVMediaTypeVideo];
if ([tracks count] == 0) {
fprintf(stderr, "OpenCV: Couldn't read video stream from file \"%s\"\n", filename);
[localpool drain];
started = 0;
return;
}
mAssetTrack = [tracks[0] retain];
if ( ! setupReadingAt(kCMTimeZero) ) {
fprintf(stderr, "OpenCV: Couldn't read movie file \"%s\"\n", filename);

@ -192,7 +192,11 @@ CvDC1394::~CvDC1394()
dc = 0;
}
static CvDC1394 dc1394;
static CvDC1394& getDC1394()
{
static CvDC1394 dc1394;
return dc1394;
}
class CvCaptureCAM_DC1394_v2_CPP : public CvCapture
{
@ -451,7 +455,7 @@ bool CvCaptureCAM_DC1394_v2_CPP::startCapture()
code = dc1394_capture_setup(dcCam, nDMABufs, DC1394_CAPTURE_FLAGS_DEFAULT);
if (code >= 0)
{
FD_SET(dc1394_capture_get_fileno(dcCam), &dc1394.camFds);
FD_SET(dc1394_capture_get_fileno(dcCam), &getDC1394().camFds);
dc1394_video_set_transmission(dcCam, DC1394_ON);
if (cameraId == VIDERE)
{
@ -477,15 +481,15 @@ bool CvCaptureCAM_DC1394_v2_CPP::open(int index)
close();
if (!dc1394.dc)
if (!getDC1394().dc)
goto _exit_;
err = dc1394_camera_enumerate(dc1394.dc, &cameraList);
err = dc1394_camera_enumerate(getDC1394().dc, &cameraList);
if (err < 0 || !cameraList || (unsigned)index >= (unsigned)cameraList->num)
goto _exit_;
guid = cameraList->ids[index].guid;
dcCam = dc1394_camera_new(dc1394.dc, guid);
dcCam = dc1394_camera_new(getDC1394().dc, guid);
if (!dcCam)
goto _exit_;
@ -510,8 +514,8 @@ void CvCaptureCAM_DC1394_v2_CPP::close()
// check for fileno valid before using
int fileno=dc1394_capture_get_fileno(dcCam);
if (fileno>=0 && FD_ISSET(fileno, &dc1394.camFds))
FD_CLR(fileno, &dc1394.camFds);
if (fileno>=0 && FD_ISSET(fileno, &getDC1394().camFds))
FD_CLR(fileno, &getDC1394().camFds);
dc1394_video_set_transmission(dcCam, DC1394_OFF);
dc1394_capture_stop(dcCam);
dc1394_camera_free(dcCam);

@ -129,9 +129,9 @@ extern "C" {
#include <unistd.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/sysctl.h>
#include <sys/time.h>
#if defined __APPLE__
#include <sys/sysctl.h>
#include <mach/clock.h>
#include <mach/mach.h>
#endif
@ -173,6 +173,10 @@ extern "C" {
#define AV_PIX_FMT_GRAY16BE PIX_FMT_GRAY16BE
#endif
#ifndef PKT_FLAG_KEY
#define PKT_FLAG_KEY AV_PKT_FLAG_KEY
#endif
#if LIBAVUTIL_BUILD >= (LIBAVUTIL_VERSION_MICRO >= 100 \
? CALC_FFMPEG_VERSION(52, 38, 100) : CALC_FFMPEG_VERSION(52, 13, 0))
#define USE_AV_FRAME_GET_BUFFER 1
@ -1481,13 +1485,20 @@ static AVStream *icv_add_video_stream_FFMPEG(AVFormatContext *oc,
// some formats want stream headers to be seperate
if(oc->oformat->flags & AVFMT_GLOBALHEADER)
{
#if LIBAVCODEC_BUILD > CALC_FFMPEG_VERSION(56, 35, 0)
c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
#else
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
#endif
}
#endif
#if LIBAVCODEC_BUILD >= CALC_FFMPEG_VERSION(52, 42, 0)
st->avg_frame_rate = (AVRational){frame_rate, frame_rate_base};
#endif
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(55, 20, 0)
st->time_base = c->time_base;
#endif
return st;
}
@ -1509,23 +1520,24 @@ static int icv_av_write_frame_FFMPEG( AVFormatContext * oc, AVStream * video_st,
#endif
int ret = OPENCV_NO_FRAMES_WRITTEN_CODE;
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(57, 0, 0)
if (oc->oformat->flags & AVFMT_RAWPICTURE)
{
/* raw video case. The API will change slightly in the near
futur for that */
AVPacket pkt;
av_init_packet(&pkt);
#ifndef PKT_FLAG_KEY
#define PKT_FLAG_KEY AV_PKT_FLAG_KEY
#endif
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= video_st->index;
pkt.data= (uint8_t *)picture;
pkt.size= sizeof(AVPicture);
ret = av_write_frame(oc, &pkt);
} else {
}
else
#endif
{
/* encode the image */
AVPacket pkt;
av_init_packet(&pkt);
@ -1683,7 +1695,9 @@ void CvVideoWriter_FFMPEG::close()
/* write the trailer, if any */
if(ok && oc)
{
if( (oc->oformat->flags & AVFMT_RAWPICTURE) == 0 )
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(57, 0, 0)
if (!(oc->oformat->flags & AVFMT_RAWPICTURE))
#endif
{
for(;;)
{
@ -1917,7 +1931,10 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
outbuf = NULL;
if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(57, 0, 0)
if (!(oc->oformat->flags & AVFMT_RAWPICTURE))
#endif
{
/* allocate output buffer */
/* assume we will never get codec output with more than 4 bytes per pixel... */
outbuf_size = width*height*4;
@ -2211,7 +2228,11 @@ AVStream* OutputMediaStream_FFMPEG::addVideoStream(AVFormatContext *oc, CV_CODEC
// some formats want stream headers to be seperate
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
{
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
#if LIBAVCODEC_BUILD > CALC_FFMPEG_VERSION(56, 35, 0)
c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
#else
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
#endif
}
#endif

@ -31,7 +31,7 @@
#import "opencv2/highgui/cap_ios.h"
#include "precomp.hpp"
#import <AssetsLibrary/AssetsLibrary.h>
#import <UIKit/UIKit.h>
static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;};
@ -595,11 +595,7 @@ static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;};
return;
}
ALAssetsLibrary *library = [[ALAssetsLibrary alloc] init];
if ([library videoAtPathIsCompatibleWithSavedPhotosAlbum:[self videoFileURL]]) {
[library writeVideoAtPathToSavedPhotosAlbum:[self videoFileURL]
completionBlock:^(NSURL *assetURL, NSError *error){ (void)assetURL; (void)error; }];
}
UISaveVideoAtPathToSavedPhotosAlbum([self videoFileString], nil, nil, NULL);
}

@ -634,11 +634,11 @@ static int icvOpenCamera_QT (CvCapture_QT_Cam * capture, const int index)
OPENCV_ASSERT (capture, "icvOpenCamera_QT", "'capture' is a NULL-pointer");
OPENCV_ASSERT (index >=0, "icvOpenCamera_QT", "camera index is negative");
ComponentDescription component_description;
Component component = 0;
ComponentDescription component_description;
Component component = 0;
int number_of_inputs = 0;
Rect myRect;
ComponentResult result = noErr;
ComponentResult result = noErr;
// travers all components and count video digitizer channels

@ -1164,12 +1164,12 @@ static int read_frame_v4l2(CvCaptureCAM_V4L* capture) {
//printf("got data in buff %d, len=%d, flags=0x%X, seq=%d, used=%d)\n",
// buf.index, buf.length, buf.flags, buf.sequence, buf.bytesused);
if (-1 == ioctl (capture->deviceHandle, VIDIOC_QBUF, &buf))
perror ("VIDIOC_QBUF");
//set timestamp in capture struct to be timestamp of most recent frame
capture->timestamp = buf.timestamp;
if (-1 == ioctl (capture->deviceHandle, VIDIOC_QBUF, &buf))
perror ("VIDIOC_QBUF");
return 1;
}

@ -92,6 +92,7 @@ bool BmpDecoder::readHeader()
m_offset = m_strm.getDWord();
int size = m_strm.getDWord();
CV_Assert(size > 0); // overflow, 2Gb limit
if( size >= 36 )
{
@ -115,8 +116,9 @@ bool BmpDecoder::readHeader()
if( m_bpp <= 8 )
{
memset( m_palette, 0, sizeof(m_palette));
m_strm.getBytes( m_palette, (clrused == 0? 1<<m_bpp : clrused)*4 );
CV_Assert(clrused >= 0 && clrused <= 256);
memset(m_palette, 0, sizeof(m_palette));
m_strm.getBytes(m_palette, (clrused == 0? 1<<m_bpp : clrused)*4 );
iscolor = IsColorPalette( m_palette, m_bpp );
}
else if( m_bpp == 16 && m_rle_code == BMP_BITFIELDS )
@ -165,6 +167,7 @@ bool BmpDecoder::readHeader()
}
catch(...)
{
throw;
}
m_type = iscolor ? CV_8UC3 : CV_8UC1;
@ -184,7 +187,7 @@ bool BmpDecoder::readHeader()
bool BmpDecoder::readData( Mat& img )
{
uchar* data = img.data;
int step = (int)img.step;
int step = validateToInt(img.step);
bool color = img.channels() > 1;
uchar gray_palette[256];
bool result = false;
@ -197,7 +200,7 @@ bool BmpDecoder::readData( Mat& img )
if( m_origin == IPL_ORIGIN_BL )
{
data += (m_height - 1)*step;
data += (m_height - 1)*(size_t)step;
step = -step;
}
@ -282,7 +285,9 @@ bool BmpDecoder::readData( Mat& img )
else if( code > 2 ) // absolute mode
{
if( data + code*nch > line_end ) goto decode_rle4_bad;
m_strm.getBytes( src, (((code + 1)>>1) + 1) & -2 );
int sz = (((code + 1)>>1) + 1) & (~1);
CV_Assert((size_t)sz < _src.getSize());
m_strm.getBytes(src, sz);
if( color )
data = FillColorRow4( data, src, code, m_palette );
else
@ -363,6 +368,9 @@ decode_rle4_bad: ;
gray_palette[code] );
line_end_flag = y - prev_y;
if( y >= m_height )
break;
}
else if( code > 2 ) // absolute mode
{
@ -371,7 +379,9 @@ decode_rle4_bad: ;
if( data + code3 > line_end )
goto decode_rle8_bad;
m_strm.getBytes( src, (code + 1) & -2 );
int sz = (code + 1) & (~1);
CV_Assert((size_t)sz < _src.getSize());
m_strm.getBytes(src, sz);
if( color )
data = FillColorRow8( data, src, code, m_palette );
else
@ -464,17 +474,20 @@ decode_rle8_bad: ;
if( !color )
icvCvt_BGRA2Gray_8u_C4C1R( src, 0, data, 0, cvSize(m_width,1) );
else
icvCvt_BGRA2BGR_8u_C4C3R( src, 0, data, 0, cvSize(m_width,1) );
else if( img.channels() == 3 )
icvCvt_BGRA2BGR_8u_C4C3R(src, 0, data, 0, cvSize(m_width, 1));
else if( img.channels() == 4 )
memcpy(data, src, m_width * 4);
}
result = true;
break;
default:
assert(0);
CV_Error(CV_StsError, "Invalid/unsupported mode");
}
}
catch(...)
{
throw;
}
return result;
@ -517,7 +530,7 @@ bool BmpEncoder::write( const Mat& img, const vector<int>& )
int bitmapHeaderSize = 40;
int paletteSize = channels > 1 ? 0 : 1024;
int headerSize = 14 /* fileheader */ + bitmapHeaderSize + paletteSize;
int fileSize = fileStep*height + headerSize;
size_t fileSize = (size_t)fileStep*height + headerSize;
PaletteEntry palette[256];
if( m_buf )
@ -527,7 +540,7 @@ bool BmpEncoder::write( const Mat& img, const vector<int>& )
strm.putBytes( fmtSignBmp, (int)strlen(fmtSignBmp) );
// write file header
strm.putDWord( fileSize ); // file size
strm.putDWord( validateToInt(fileSize) ); // file size
strm.putDWord( 0 );
strm.putDWord( headerSize );

@ -188,7 +188,7 @@ bool ExrDecoder::readData( Mat& img )
bool color = img.channels() > 1;
uchar* data = img.data;
int step = img.step;
size_t step = img.step;
bool justcopy = m_native_depth;
bool chromatorgb = false;
bool rgbtogray = false;
@ -196,8 +196,8 @@ bool ExrDecoder::readData( Mat& img )
FrameBuffer frame;
int xsample[3] = {1, 1, 1};
char *buffer;
int xstep;
int ystep;
size_t xstep = 0;
size_t ystep = 0;
xstep = m_native_depth ? 4 : 1;
@ -589,7 +589,7 @@ bool ExrEncoder::write( const Mat& img, const vector<int>& )
bool isfloat = depth == CV_32F || depth == CV_64F;
depth = CV_ELEM_SIZE1(depth)*8;
uchar* data = img.data;
int step = img.step;
size_t step = img.step;
Header header( width, height );
Imf::PixelType type;
@ -619,7 +619,7 @@ bool ExrEncoder::write( const Mat& img, const vector<int>& )
FrameBuffer frame;
char *buffer;
int bufferstep;
size_t bufferstep;
int size;
if( type == FLOAT && depth == 32 )
{

@ -392,7 +392,7 @@ int my_jpeg_load_dht (struct jpeg_decompress_struct *info, unsigned char *dht,
bool JpegDecoder::readData( Mat& img )
{
bool result = false;
int step = (int)img.step;
size_t step = img.step;
bool color = img.channels() > 1;
if( m_state && m_width && m_height )

@ -77,7 +77,8 @@ static JasperInitializer initialize_jasper;
Jpeg2KDecoder::Jpeg2KDecoder()
{
m_signature = '\0' + string() + '\0' + string() + '\0' + string("\x0cjP \r\n\x87\n");
static const unsigned char signature_[12] = { 0, 0, 0, 0x0c, 'j', 'P', ' ', ' ', 13, 10, 0x87, 10};
m_signature = string((const char*)signature_, (const char*)signature_ + sizeof(signature_));
m_stream = 0;
m_image = 0;
}
@ -121,6 +122,8 @@ bool Jpeg2KDecoder::readHeader()
jas_image_t* image = jas_image_decode( stream, -1, 0 );
m_image = image;
if( image ) {
CV_Assert(0 == (jas_image_tlx(image)) && "not supported");
CV_Assert(0 == (jas_image_tly(image)) && "not supported");
m_width = jas_image_width( image );
m_height = jas_image_height( image );
@ -130,14 +133,31 @@ bool Jpeg2KDecoder::readHeader()
for( int i = 0; i < numcmpts; i++ )
{
int depth_i = jas_image_cmptprec( image, i );
CV_Assert(depth == 0 || depth == depth_i); // component data type mismatch
depth = MAX(depth, depth_i);
if( jas_image_cmpttype( image, i ) > 2 )
continue;
int sgnd = jas_image_cmptsgnd(image, i);
int xstart = jas_image_cmpttlx(image, i);
int xend = jas_image_cmptbrx(image, i);
int xstep = jas_image_cmpthstep(image, i);
int ystart = jas_image_cmpttly(image, i);
int yend = jas_image_cmptbry(image, i);
int ystep = jas_image_cmptvstep(image, i);
CV_Assert(sgnd == 0 && "not supported");
CV_Assert(xstart == 0 && "not supported");
CV_Assert(ystart == 0 && "not supported");
CV_Assert(xstep == 1 && "not supported");
CV_Assert(ystep == 1 && "not supported");
CV_Assert(xend == m_width);
CV_Assert(yend == m_height);
cntcmpts++;
}
if( cntcmpts )
{
CV_Assert(depth == 8 || depth == 16);
CV_Assert(cntcmpts == 1 || cntcmpts == 3);
m_type = CV_MAKETYPE(depth <= 8 ? CV_8U : CV_16U, cntcmpts > 1 ? 3 : 1);
result = true;
}
@ -150,13 +170,19 @@ bool Jpeg2KDecoder::readHeader()
return result;
}
static void Jpeg2KDecoder_close(Jpeg2KDecoder* ptr)
{
ptr->close();
}
template<> void Ptr<Jpeg2KDecoder>::delete_obj() { Jpeg2KDecoder_close(obj); }
bool Jpeg2KDecoder::readData( Mat& img )
{
Ptr<Jpeg2KDecoder> close_this(this); // auto cleanup: Jpeg2KDecoder_close
bool result = false;
int color = img.channels() > 1;
uchar* data = img.data;
int step = (int)img.step;
size_t step = img.step;
jas_stream_t* stream = (jas_stream_t*)m_stream;
jas_image_t* image = (jas_image_t*)m_image;
@ -204,11 +230,16 @@ bool Jpeg2KDecoder::readData( Mat& img )
result = true;
}
else
fprintf(stderr, "JPEG 2000 LOADER ERROR: cannot convert colorspace\n");
{
jas_cmprof_destroy(clrprof);
CV_Error(CV_StsError, "JPEG 2000 LOADER ERROR: cannot convert colorspace");
}
jas_cmprof_destroy( clrprof );
}
else
fprintf(stderr, "JPEG 2000 LOADER ERROR: unable to create colorspace\n");
{
CV_Error(CV_StsError, "JPEG 2000 LOADER ERROR: unable to create colorspace");
}
}
else
result = true;
@ -252,13 +283,13 @@ bool Jpeg2KDecoder::readData( Mat& img )
if( !jas_image_readcmpt( image, cmptlut[i], 0, 0, xend / xstep, yend / ystep, buffer ))
{
if( img.depth() == CV_8U )
result = readComponent8u( data + i, buffer, step, cmptlut[i], maxval, offset, ncmpts );
result = readComponent8u( data + i, buffer, validateToInt(step), cmptlut[i], maxval, offset, ncmpts );
else
result = readComponent16u( ((unsigned short *)data) + i, buffer, step / 2, cmptlut[i], maxval, offset, ncmpts );
result = readComponent16u( ((unsigned short *)data) + i, buffer, validateToInt(step / 2), cmptlut[i], maxval, offset, ncmpts );
if( !result )
{
i = ncmpts;
result = false;
jas_matrix_destroy( buffer );
CV_Error(CV_StsError, "JPEG2000 LOADER ERROR: failed to read component");
}
}
jas_matrix_destroy( buffer );
@ -267,10 +298,12 @@ bool Jpeg2KDecoder::readData( Mat& img )
}
}
else
fprintf(stderr, "JPEG2000 LOADER ERROR: colorspace conversion failed\n" );
{
CV_Error(CV_StsError, "JPEG2000 LOADER ERROR: colorspace conversion failed");
}
}
close();
CV_Assert(result == true);
#ifndef WIN32
if (!clr.empty())

@ -43,50 +43,58 @@
#include "precomp.hpp"
#include "utils.hpp"
#include "grfmt_pxm.hpp"
#include <iostream>
namespace cv
{
///////////////////////// P?M reader //////////////////////////////
static int ReadNumber( RLByteStream& strm, int maxdigits )
static int ReadNumber(RLByteStream& strm, int maxdigits = 0)
{
int code;
int val = 0;
int64 val = 0;
int digits = 0;
code = strm.getByte();
if( !isdigit(code))
while (!isdigit(code))
{
do
if (code == '#' )
{
if( code == '#' )
do
{
do
{
code = strm.getByte();
}
while( code != '\n' && code != '\r' );
code = strm.getByte();
}
while (code != '\n' && code != '\r');
code = strm.getByte();
while( isspace(code))
}
else if (isspace(code))
{
while (isspace(code))
code = strm.getByte();
}
while( !isdigit( code ));
else
{
#if 1
CV_Error_(CV_StsError, ("PXM: Unexpected code in ReadNumber(): 0x%x (%d)", code, code));
#else
code = strm.getByte();
#endif
}
}
do
{
val = val*10 + code - '0';
if( ++digits >= maxdigits ) break;
val = val*10 + (code - '0');
CV_Assert(val <= INT_MAX && "PXM: ReadNumber(): result is too large");
digits++;
if (maxdigits != 0 && digits >= maxdigits) break;
code = strm.getByte();
}
while( isdigit(code));
while (isdigit(code));
return val;
return (int)val;
}
@ -119,13 +127,13 @@ ImageDecoder PxMDecoder::newDecoder() const
return new PxMDecoder;
}
void PxMDecoder::close()
void PxMDecoder::close()
{
m_strm.close();
}
bool PxMDecoder::readHeader()
bool PxMDecoder::readHeader()
{
bool result = false;
@ -155,10 +163,10 @@ bool PxMDecoder::readHeader()
m_binary = code >= '4';
m_type = m_bpp > 8 ? CV_8UC3 : CV_8UC1;
m_width = ReadNumber( m_strm, INT_MAX );
m_height = ReadNumber( m_strm, INT_MAX );
m_width = ReadNumber(m_strm);
m_height = ReadNumber(m_strm);
m_maxval = m_bpp == 1 ? 1 : ReadNumber( m_strm, INT_MAX );
m_maxval = m_bpp == 1 ? 1 : ReadNumber(m_strm);
if( m_maxval > 65535 )
throw RBS_BAD_HEADER;
@ -172,8 +180,14 @@ bool PxMDecoder::readHeader()
result = true;
}
}
catch(...)
catch (const cv::Exception&)
{
throw;
}
catch (...)
{
std::cerr << "PXM::readHeader(): unknown C++ exception" << std::endl << std::flush;
throw;
}
if( !result )
@ -193,27 +207,23 @@ bool PxMDecoder::readData( Mat& img )
int step = (int)img.step;
PaletteEntry palette[256];
bool result = false;
int bit_depth = CV_ELEM_SIZE1(m_type)*8;
int src_pitch = (m_width*m_bpp*bit_depth/8 + 7)/8;
const int bit_depth = CV_ELEM_SIZE1(m_type)*8;
const int src_pitch = (m_width*m_bpp*(bit_depth/8) + 7) / 8;
int nch = CV_MAT_CN(m_type);
int width3 = m_width*nch;
int i, x, y;
if( m_offset < 0 || !m_strm.isOpened())
return false;
AutoBuffer<uchar,1024> _src(src_pitch + 32);
uchar* src = _src;
AutoBuffer<uchar,1024> _gray_palette;
uchar* gray_palette = _gray_palette;
uchar gray_palette[256] = {0};
// create LUT for converting colors
if( bit_depth == 8 )
{
_gray_palette.allocate(m_maxval + 1);
gray_palette = _gray_palette;
CV_Assert(m_maxval < 256);
for( i = 0; i <= m_maxval; i++ )
for (int i = 0; i <= m_maxval; i++)
gray_palette[i] = (uchar)((i*255/m_maxval)^(m_bpp == 1 ? 255 : 0));
FillGrayPalette( palette, m_bpp==1 ? 1 : 8 , m_bpp == 1 );
@ -227,12 +237,16 @@ bool PxMDecoder::readData( Mat& img )
{
////////////////////////// 1 BPP /////////////////////////
case 1:
CV_Assert(CV_MAT_DEPTH(m_type) == CV_8U);
if( !m_binary )
{
for( y = 0; y < m_height; y++, data += step )
AutoBuffer<uchar> _src(m_width);
uchar* src = _src;
for (int y = 0; y < m_height; y++, data += step)
{
for( x = 0; x < m_width; x++ )
src[x] = ReadNumber( m_strm, 1 ) != 0;
for (int x = 0; x < m_width; x++)
src[x] = ReadNumber(m_strm, 1) != 0;
if( color )
FillColorRow8( data, src, m_width, palette );
@ -242,7 +256,10 @@ bool PxMDecoder::readData( Mat& img )
}
else
{
for( y = 0; y < m_height; y++, data += step )
AutoBuffer<uchar> _src(src_pitch);
uchar* src = _src;
for (int y = 0; y < m_height; y++, data += step)
{
m_strm.getBytes( src, src_pitch );
@ -258,11 +275,15 @@ bool PxMDecoder::readData( Mat& img )
////////////////////////// 8 BPP /////////////////////////
case 8:
case 24:
for( y = 0; y < m_height; y++, data += step )
{
AutoBuffer<uchar> _src(std::max<size_t>(width3*2, src_pitch));
uchar* src = _src;
for (int y = 0; y < m_height; y++, data += step)
{
if( !m_binary )
{
for( x = 0; x < width3; x++ )
for (int x = 0; x < width3; x++)
{
int code = ReadNumber( m_strm, INT_MAX );
if( (unsigned)code > (unsigned)m_maxval ) code = m_maxval;
@ -277,7 +298,7 @@ bool PxMDecoder::readData( Mat& img )
m_strm.getBytes( src, src_pitch );
if( bit_depth == 16 && !isBigEndian() )
{
for( x = 0; x < width3; x++ )
for (int x = 0; x < width3; x++)
{
uchar v = src[x * 2];
src[x * 2] = src[x * 2 + 1];
@ -288,7 +309,7 @@ bool PxMDecoder::readData( Mat& img )
if( img.depth() == CV_8U && bit_depth == 16 )
{
for( x = 0; x < width3; x++ )
for (int x = 0; x < width3; x++)
{
int v = ((ushort *)src)[x];
src[x] = (uchar)(v >> 8);
@ -310,7 +331,7 @@ bool PxMDecoder::readData( Mat& img )
}
}
else
memcpy( data, src, m_width*(bit_depth/8) );
memcpy(data, src, img.elemSize1()*m_width);
}
else
{
@ -329,12 +350,19 @@ bool PxMDecoder::readData( Mat& img )
}
result = true;
break;
}
default:
assert(0);
CV_Error(CV_StsError, "m_bpp is not supported");
}
}
catch(...)
catch (const cv::Exception&)
{
throw;
}
catch (...)
{
std::cerr << "PXM::readData(): unknown exception" << std::endl << std::flush;
throw;
}
return result;
@ -410,8 +438,9 @@ bool PxMEncoder::write( const Mat& img, const vector<int>& params )
char* buffer = _buffer;
// write header;
sprintf( buffer, "P%c\n%d %d\n%d\n",
sprintf( buffer, "P%c\n# Generated by OpenCV %s\n%d %d\n%d\n",
'2' + (channels > 1 ? 1 : 0) + (isBinary ? 3 : 0),
CV_VERSION,
width, height, (1 << depth) - 1 );
strm.putBytes( buffer, (int)strlen(buffer) );

@ -120,7 +120,7 @@ bool SunRasterDecoder::readHeader()
m_type = IsColorPalette( m_palette, m_bpp ) ? CV_8UC3 : CV_8UC1;
m_offset = m_strm.getPos();
assert( m_offset == 32 + m_maplength );
CV_Assert(m_offset == 32 + m_maplength);
result = true;
}
}
@ -133,7 +133,7 @@ bool SunRasterDecoder::readHeader()
m_offset = m_strm.getPos();
assert( m_offset == 32 + m_maplength );
CV_Assert(m_offset == 32 + m_maplength);
result = true;
}
}
@ -156,7 +156,7 @@ bool SunRasterDecoder::readData( Mat& img )
{
int color = img.channels() > 1;
uchar* data = img.data;
int step = (int)img.step;
size_t step = img.step;
uchar gray_palette[256];
bool result = false;
int src_pitch = ((m_width*m_bpp + 7)/8 + 1) & -2;
@ -226,7 +226,7 @@ bool SunRasterDecoder::readData( Mat& img )
code = m_strm.getByte();
if( len > line_end - tsrc )
{
assert(0);
CV_Error(CV_StsInternal, "");
goto bad_decoding_1bpp;
}
@ -304,11 +304,11 @@ bad_decoding_1bpp:
code = m_strm.getByte();
if( color )
data = FillUniColor( data, line_end, step, width3,
data = FillUniColor( data, line_end, validateToInt(step), width3,
y, m_height, len,
m_palette[code] );
else
data = FillUniGray( data, line_end, step, width3,
data = FillUniGray( data, line_end, validateToInt(step), width3,
y, m_height, len,
gray_palette[code] );
if( y >= m_height )
@ -367,7 +367,7 @@ bad_decoding_end:
result = true;
break;
default:
assert(0);
CV_Error(CV_StsInternal, "");
}
}
catch( ... )

@ -48,12 +48,32 @@
#undef min
#undef max
#include <iostream>
/****************************************************************************************\
* Image Codecs *
\****************************************************************************************/
namespace cv
{
// TODO Add runtime configuration
#define CV_IO_MAX_IMAGE_PARAMS (50)
#define CV_IO_MAX_IMAGE_WIDTH (1<<20)
#define CV_IO_MAX_IMAGE_HEIGHT (1<<20)
#define CV_IO_MAX_IMAGE_PIXELS (1<<30) // 1 Gigapixel
static Size validateInputImageSize(const Size& size)
{
CV_Assert(size.width > 0);
CV_Assert(size.width <= CV_IO_MAX_IMAGE_WIDTH);
CV_Assert(size.height > 0);
CV_Assert(size.height <= CV_IO_MAX_IMAGE_HEIGHT);
uint64 pixels = (uint64)size.width * (uint64)size.height;
CV_Assert(pixels <= CV_IO_MAX_IMAGE_PIXELS);
return size;
}
struct ImageCodecInitializer
{
ImageCodecInitializer()
@ -203,12 +223,26 @@ imread_( const string& filename, int flags, int hdrtype, Mat* mat=0 )
if( decoder.empty() )
return 0;
decoder->setSource(filename);
if( !decoder->readHeader() )
try
{
// read the header to make sure it succeeds
if (!decoder->readHeader())
return 0;
}
catch (const cv::Exception& e)
{
std::cerr << "imread_('" << filename << "'): can't read header: " << e.what() << std::endl << std::flush;
return 0;
}
catch (...)
{
std::cerr << "imread_('" << filename << "'): can't read header: unknown exception" << std::endl << std::flush;
return 0;
}
CvSize size;
size.width = decoder->width();
size.height = decoder->height();
Size size = validateInputImageSize(Size(decoder->width(), decoder->height()));
int type = decoder->type();
if( flags != -1 )
@ -242,7 +276,21 @@ imread_( const string& filename, int flags, int hdrtype, Mat* mat=0 )
temp = cvarrToMat(image);
}
if( !decoder->readData( *data ))
bool success = false;
try
{
if (decoder->readData(*data))
success = true;
}
catch (const cv::Exception& e)
{
std::cerr << "imread_('" << filename << "'): can't read data: " << e.what() << std::endl << std::flush;
}
catch (...)
{
std::cerr << "imread_('" << filename << "'): can't read data: unknown exception" << std::endl << std::flush;
}
if (!success)
{
cvReleaseImage( &image );
cvReleaseMat( &matrix );
@ -288,6 +336,7 @@ static bool imwrite_( const string& filename, const Mat& image,
}
encoder->setDestination( filename );
CV_Assert(params.size() <= CV_IO_MAX_IMAGE_PARAMS*2);
bool code = encoder->write( *pimage, params );
// CV_Assert( code );
@ -326,16 +375,34 @@ imdecode_( const Mat& buf, int flags, int hdrtype, Mat* mat=0 )
decoder->setSource(filename);
}
if( !decoder->readHeader() )
bool success = false;
try
{
if( !filename.empty() )
remove(filename.c_str());
if (decoder->readHeader())
success = true;
}
catch (const cv::Exception& e)
{
std::cerr << "imdecode_('" << filename << "'): can't read header: " << e.what() << std::endl << std::flush;
}
catch (...)
{
std::cerr << "imdecode_('" << filename << "'): can't read header: unknown exception" << std::endl << std::flush;
}
if (!success)
{
if (!filename.empty())
{
if (0 != remove(filename.c_str()))
{
std::cerr << "unable to remove temporary file:" << filename << std::endl << std::flush;
}
}
return 0;
}
CvSize size;
size.width = decoder->width();
size.height = decoder->height();
// established the required input image size
Size size = validateInputImageSize(Size(decoder->width(), decoder->height()));
int type = decoder->type();
if( flags != -1 )
@ -369,11 +436,30 @@ imdecode_( const Mat& buf, int flags, int hdrtype, Mat* mat=0 )
temp = cvarrToMat(image);
}
bool code = decoder->readData( *data );
if( !filename.empty() )
remove(filename.c_str());
success = false;
try
{
if (decoder->readData(*data))
success = true;
}
catch (const cv::Exception& e)
{
std::cerr << "imdecode_('" << filename << "'): can't read data: " << e.what() << std::endl << std::flush;
}
catch (...)
{
std::cerr << "imdecode_('" << filename << "'): can't read data: unknown exception" << std::endl << std::flush;
}
if( !code )
if (!filename.empty())
{
if (0 != remove(filename.c_str()))
{
std::cerr << "unable to remove temporary file:" << filename << std::endl << std::flush;
}
}
if (!success)
{
cvReleaseImage( &image );
cvReleaseMat( &matrix );
@ -490,7 +576,7 @@ cvSaveImage( const char* filename, const CvArr* arr, const int* _params )
if( _params )
{
for( ; _params[i] > 0; i += 2 )
;
CV_Assert(i < CV_IO_MAX_IMAGE_PARAMS*2); // Limit number of params for security reasons
}
return cv::imwrite_(filename, cv::cvarrToMat(arr),
i > 0 ? cv::vector<int>(_params, _params+i) : cv::vector<int>(),
@ -521,7 +607,7 @@ cvEncodeImage( const char* ext, const CvArr* arr, const int* _params )
if( _params )
{
for( ; _params[i] > 0; i += 2 )
;
CV_Assert(i < CV_IO_MAX_IMAGE_PARAMS*2); // Limit number of params for security reasons
}
cv::Mat img = cv::cvarrToMat(arr);
if( CV_IS_IMAGE(arr) && ((const IplImage*)arr)->origin == IPL_ORIGIN_BL )

@ -54,7 +54,7 @@
#include <string.h>
#include <limits.h>
#include <ctype.h>
#include <assert.h>
#include <assert.h> // FIX IT: remove this
#if defined WIN32 || defined WINCE
#if !defined _WIN32_WINNT

@ -42,6 +42,13 @@
#include "precomp.hpp"
#include "utils.hpp"
int validateToInt(size_t sz)
{
int valueInt = (int)sz;
CV_Assert((size_t)valueInt == sz);
return valueInt;
}
#define SCALE 14
#define cR (int)(0.299*(1 << SCALE) + 0.5)
#define cG (int)(0.587*(1 << SCALE) + 0.5)
@ -537,23 +544,25 @@ uchar* FillColorRow1( uchar* data, uchar* indices, int len, PaletteEntry* palett
{
uchar* end = data + len*3;
const PaletteEntry p0 = palette[0], p1 = palette[1];
while( (data += 24) < end )
{
int idx = *indices++;
*((PaletteEntry*)(data - 24)) = palette[(idx & 128) != 0];
*((PaletteEntry*)(data - 21)) = palette[(idx & 64) != 0];
*((PaletteEntry*)(data - 18)) = palette[(idx & 32) != 0];
*((PaletteEntry*)(data - 15)) = palette[(idx & 16) != 0];
*((PaletteEntry*)(data - 12)) = palette[(idx & 8) != 0];
*((PaletteEntry*)(data - 9)) = palette[(idx & 4) != 0];
*((PaletteEntry*)(data - 6)) = palette[(idx & 2) != 0];
*((PaletteEntry*)(data - 3)) = palette[(idx & 1) != 0];
*((PaletteEntry*)(data - 24)) = (idx & 128) ? p1 : p0;
*((PaletteEntry*)(data - 21)) = (idx & 64) ? p1 : p0;
*((PaletteEntry*)(data - 18)) = (idx & 32) ? p1 : p0;
*((PaletteEntry*)(data - 15)) = (idx & 16) ? p1 : p0;
*((PaletteEntry*)(data - 12)) = (idx & 8) ? p1 : p0;
*((PaletteEntry*)(data - 9)) = (idx & 4) ? p1 : p0;
*((PaletteEntry*)(data - 6)) = (idx & 2) ? p1 : p0;
*((PaletteEntry*)(data - 3)) = (idx & 1) ? p1 : p0;
}
int idx = indices[0] << 24;
int idx = indices[0];
for( data -= 24; data < end; data += 3, idx += idx )
{
PaletteEntry clr = palette[idx < 0];
const PaletteEntry clr = (idx & 128) ? p1 : p0;
WRITE_PIX( data, clr );
}
@ -565,23 +574,25 @@ uchar* FillGrayRow1( uchar* data, uchar* indices, int len, uchar* palette )
{
uchar* end = data + len;
const uchar p0 = palette[0], p1 = palette[1];
while( (data += 8) < end )
{
int idx = *indices++;
*((uchar*)(data - 8)) = palette[(idx & 128) != 0];
*((uchar*)(data - 7)) = palette[(idx & 64) != 0];
*((uchar*)(data - 6)) = palette[(idx & 32) != 0];
*((uchar*)(data - 5)) = palette[(idx & 16) != 0];
*((uchar*)(data - 4)) = palette[(idx & 8) != 0];
*((uchar*)(data - 3)) = palette[(idx & 4) != 0];
*((uchar*)(data - 2)) = palette[(idx & 2) != 0];
*((uchar*)(data - 1)) = palette[(idx & 1) != 0];
*((uchar*)(data - 8)) = (idx & 128) ? p1 : p0;
*((uchar*)(data - 7)) = (idx & 64) ? p1 : p0;
*((uchar*)(data - 6)) = (idx & 32) ? p1 : p0;
*((uchar*)(data - 5)) = (idx & 16) ? p1 : p0;
*((uchar*)(data - 4)) = (idx & 8) ? p1 : p0;
*((uchar*)(data - 3)) = (idx & 4) ? p1 : p0;
*((uchar*)(data - 2)) = (idx & 2) ? p1 : p0;
*((uchar*)(data - 1)) = (idx & 1) ? p1 : p0;
}
int idx = indices[0] << 24;
int idx = indices[0];
for( data -= 8; data < end; data++, idx += idx )
{
data[0] = palette[idx < 0];
data[0] = (idx & 128) ? p1 : p0;
}
return data;
@ -659,7 +670,7 @@ cvConvertImage( const CvArr* srcarr, CvArr* dstarr, int flags )
icvCvt_BGR2Gray_8u_C3C1R( s, s_step, d, d_step, size, swap_rb );
break;
case 33:
assert( swap_rb );
CV_Assert(swap_rb);
icvCvt_RGB2BGR_8u_C3R( s, s_step, d, d_step, size );
break;
case 41:

@ -42,6 +42,8 @@
#ifndef _UTILS_H_
#define _UTILS_H_
int validateToInt(size_t step);
struct PaletteEntry
{
unsigned char b, g, r, a;

@ -409,9 +409,143 @@ int CV_DrawingTest_C::checkLineIterator( Mat& _img )
return 0;
}
class CV_DrawingTest_Far : public CV_DrawingTest_CPP
{
public:
CV_DrawingTest_Far() {}
protected:
virtual void draw(Mat& img);
};
void CV_DrawingTest_Far::draw(Mat& img)
{
Size imgSize(32768 + 600, 400);
img.create(imgSize, CV_8UC3);
vector<Point> polyline(4);
polyline[0] = Point(32768 + 0, 0);
polyline[1] = Point(imgSize.width, 0);
polyline[2] = Point(imgSize.width, imgSize.height);
polyline[3] = Point(32768 + 0, imgSize.height);
const Point* pts = &polyline[0];
int n = (int)polyline.size();
fillPoly(img, &pts, &n, 1, Scalar::all(255));
Point p1(32768 + 1, 1), p2(32768 + 3, 3);
if (clipLine(Rect(32768 + 0, 0, imgSize.width, imgSize.height), p1, p2) && clipLine(imgSize, p1, p2))
circle(img, Point(32768 + 300, 100), 40, Scalar(0, 0, 255), 3); // draw
p2 = Point(32768 + 3, imgSize.height + 1000);
if (clipLine(Rect(32768 + 0, 0, imgSize.width, imgSize.height), p1, p2) && clipLine(imgSize, p1, p2))
circle(img, Point(65536 + 500, 300), 50, cvColorToScalar(255, CV_8UC3), 5, 8, 1); // draw
p1 = Point(imgSize.width, 1), p2 = Point(imgSize.width, 3);
if (clipLine(Rect(32768 + 0, 0, imgSize.width, imgSize.height), p1, p2) && clipLine(imgSize, p1, p2))
circle(img, Point(32768 + 390, 100), 10, Scalar(0, 0, 255), 3); // not draw
p1 = Point(imgSize.width - 1, 1), p2 = Point(imgSize.width, 3);
if (clipLine(Rect(32768 + 0, 0, imgSize.width, imgSize.height), p1, p2) && clipLine(imgSize, p1, p2))
ellipse(img, Point(32768 + 390, 100), Size(20, 30), 60, 0, 220.0, Scalar(0, 200, 0), 4); //draw
ellipse(img, RotatedRect(Point(32768 + 100, 200), Size(200, 100), 160), Scalar(200, 200, 255), 5);
polyline.clear();
ellipse2Poly(Point(32768 + 430, 180), Size(100, 150), 30, 0, 150, 20, polyline);
pts = &polyline[0];
n = (int)polyline.size();
polylines(img, &pts, &n, 1, false, Scalar(0, 0, 150), 4, CV_AA);
n = 0;
for (vector<Point>::const_iterator it = polyline.begin(); n < (int)polyline.size() - 1; ++it, n++)
{
line(img, *it, *(it + 1), Scalar(50, 250, 100));
}
polyline.clear();
ellipse2Poly(Point(32768 + 500, 300), Size(50, 80), 0, 0, 180, 10, polyline);
pts = &polyline[0];
n = (int)polyline.size();
polylines(img, &pts, &n, 1, true, Scalar(100, 200, 100), 20);
fillConvexPoly(img, pts, n, Scalar(0, 80, 0));
polyline.resize(8);
// external rectengular
polyline[0] = Point(32768 + 0, 0);
polyline[1] = Point(32768 + 80, 0);
polyline[2] = Point(32768 + 80, 80);
polyline[3] = Point(32768 + 0, 80);
// internal rectangular
polyline[4] = Point(32768 + 20, 20);
polyline[5] = Point(32768 + 60, 20);
polyline[6] = Point(32768 + 60, 60);
polyline[7] = Point(32768 + 20, 60);
const Point* ppts[] = { &polyline[0], &polyline[0] + 4 };
int pn[] = { 4, 4 };
fillPoly(img, ppts, pn, 2, Scalar(100, 100, 0), 8, 0, Point(500, 20));
rectangle(img, Point(32768 + 0, 300), Point(32768 + 50, 398), Scalar(0, 0, 255));
string text1 = "OpenCV";
int baseline = 0, thickness = 3, fontFace = FONT_HERSHEY_SCRIPT_SIMPLEX;
float fontScale = 2;
Size textSize = getTextSize(text1, fontFace, fontScale, thickness, &baseline);
baseline += thickness;
Point textOrg((32768 + img.cols - textSize.width) / 2, (img.rows + textSize.height) / 2);
rectangle(img, textOrg + Point(0, baseline), textOrg + Point(textSize.width, -textSize.height), Scalar(0, 0, 255));
line(img, textOrg + Point(0, thickness), textOrg + Point(textSize.width, thickness), Scalar(0, 0, 255));
putText(img, text1, textOrg, fontFace, fontScale, Scalar(150, 0, 150), thickness, 8);
string text2 = "abcdefghijklmnopqrstuvwxyz1234567890";
Scalar color(200, 0, 0);
fontScale = 0.5, thickness = 1;
int dist = 5;
textSize = getTextSize(text2, FONT_HERSHEY_SIMPLEX, fontScale, thickness, &baseline);
textOrg = Point(32768 + 5, 5) + Point(0, textSize.height + dist);
putText(img, text2, textOrg, FONT_HERSHEY_SIMPLEX, fontScale, color, thickness, CV_AA);
fontScale = 1;
textSize = getTextSize(text2, FONT_HERSHEY_PLAIN, fontScale, thickness, &baseline);
textOrg += Point(0, textSize.height + dist);
putText(img, text2, textOrg, FONT_HERSHEY_PLAIN, fontScale, color, thickness, CV_AA);
fontScale = 0.5;
textSize = getTextSize(text2, FONT_HERSHEY_DUPLEX, fontScale, thickness, &baseline);
textOrg += Point(0, textSize.height + dist);
putText(img, text2, textOrg, FONT_HERSHEY_DUPLEX, fontScale, color, thickness, CV_AA);
textSize = getTextSize(text2, FONT_HERSHEY_COMPLEX, fontScale, thickness, &baseline);
textOrg += Point(0, textSize.height + dist);
putText(img, text2, textOrg, FONT_HERSHEY_COMPLEX, fontScale, color, thickness, CV_AA);
textSize = getTextSize(text2, FONT_HERSHEY_TRIPLEX, fontScale, thickness, &baseline);
textOrg += Point(0, textSize.height + dist);
putText(img, text2, textOrg, FONT_HERSHEY_TRIPLEX, fontScale, color, thickness, CV_AA);
fontScale = 1;
textSize = getTextSize(text2, FONT_HERSHEY_COMPLEX_SMALL, fontScale, thickness, &baseline);
textOrg += Point(0, 180) + Point(0, textSize.height + dist);
putText(img, text2, textOrg, FONT_HERSHEY_COMPLEX_SMALL, fontScale, color, thickness, CV_AA);
textSize = getTextSize(text2, FONT_HERSHEY_SCRIPT_SIMPLEX, fontScale, thickness, &baseline);
textOrg += Point(0, textSize.height + dist);
putText(img, text2, textOrg, FONT_HERSHEY_SCRIPT_SIMPLEX, fontScale, color, thickness, CV_AA);
textSize = getTextSize(text2, FONT_HERSHEY_SCRIPT_COMPLEX, fontScale, thickness, &baseline);
textOrg += Point(0, textSize.height + dist);
putText(img, text2, textOrg, FONT_HERSHEY_SCRIPT_COMPLEX, fontScale, color, thickness, CV_AA);
dist = 15, fontScale = 0.5;
textSize = getTextSize(text2, FONT_ITALIC, fontScale, thickness, &baseline);
textOrg += Point(0, textSize.height + dist);
putText(img, text2, textOrg, FONT_ITALIC, fontScale, color, thickness, CV_AA);
img = img(Rect(32768, 0, 600, 400)).clone();
}
#ifdef HAVE_JPEG
TEST(Highgui_Drawing, cpp_regression) { CV_DrawingTest_CPP test; test.safe_run(); }
TEST(Highgui_Drawing, c_regression) { CV_DrawingTest_C test; test.safe_run(); }
TEST(Highgui_Drawing, far_regression) { CV_DrawingTest_Far test; test.safe_run(); }
#endif
class CV_FillConvexPolyTest : public cvtest::BaseTest

@ -320,6 +320,7 @@ public:
for (unsigned int i = 0; i < frameCount && next; ++i)
{
SCOPED_TRACE(cv::format("frame=%d", (int)i));
Mat actual;
(*capture) >> actual;

@ -102,6 +102,38 @@ static IppStatus sts = ippInit();
namespace cv
{
//constants for conversion from/to RGB and Gray, YUV, YCrCb according to BT.601
const float B2YF = 0.114f;
const float G2YF = 0.587f;
const float R2YF = 0.299f;
//to YCbCr
const float YCBF = 0.564f; // == 1/2/(1-B2YF)
const float YCRF = 0.713f; // == 1/2/(1-R2YF)
const int YCBI = 9241; // == YCBF*16384
const int YCRI = 11682; // == YCRF*16384
//to YUV
const float B2UF = 0.492f;
const float R2VF = 0.877f;
const int B2UI = 8061; // == B2UF*16384
const int R2VI = 14369; // == R2VF*16384
//from YUV
const float U2BF = 2.032f;
const float U2GF = -0.395f;
const float V2GF = -0.581f;
const float V2RF = 1.140f;
const int U2BI = 33292;
const int U2GI = -6472;
const int V2GI = -9519;
const int V2RI = 18678;
//from YCrCb
const float CR2RF = 1.403f;
const float CB2GF = -0.344f;
const float CR2GF = -0.714f;
const float CB2BF = 1.773f;
const int CR2RI = 22987;
const int CB2GI = -5636;
const int CR2GI = -11698;
const int CB2BI = 29049;
// computes cubic spline coefficients for a function: (xi=i, yi=f[i]), i=0..n
template<typename _Tp> static void splineBuild(const _Tp* f, int n, _Tp* tab)
@ -402,9 +434,9 @@ struct IPPColor2GrayFunctor
{
IPPColor2GrayFunctor(ippiColor2GrayFunc _func) : func(_func)
{
coeffs[0] = 0.114f;
coeffs[1] = 0.587f;
coeffs[2] = 0.299f;
coeffs[0] = B2YF;
coeffs[1] = G2YF;
coeffs[2] = R2YF;
}
bool operator()(const void *src, int srcStep, void *dst, int dstStep, int cols, int rows) const
{
@ -668,9 +700,9 @@ enum
{
yuv_shift = 14,
xyz_shift = 12,
R2Y = 4899,
G2Y = 9617,
B2Y = 1868,
R2Y = 4899, // B2YF*16384
G2Y = 9617, // G2YF*16384
B2Y = 1868, // B2YF*16384
BLOCK_SIZE = 256
};
@ -709,7 +741,7 @@ template<typename _Tp> struct RGB2Gray
RGB2Gray(int _srccn, int blueIdx, const float* _coeffs) : srccn(_srccn)
{
static const float coeffs0[] = { 0.299f, 0.587f, 0.114f };
static const float coeffs0[] = { R2YF, G2YF, B2YF };
memcpy( coeffs, _coeffs ? _coeffs : coeffs0, 3*sizeof(coeffs[0]) );
if(blueIdx == 0)
std::swap(coeffs[0], coeffs[2]);
@ -787,16 +819,18 @@ template<typename _Tp> struct RGB2YCrCb_f
{
typedef _Tp channel_type;
RGB2YCrCb_f(int _srccn, int _blueIdx, const float* _coeffs) : srccn(_srccn), blueIdx(_blueIdx)
RGB2YCrCb_f(int _srccn, int _blueIdx, bool _isCrCb) : srccn(_srccn), blueIdx(_blueIdx), isCrCb(_isCrCb)
{
static const float coeffs0[] = {0.299f, 0.587f, 0.114f, 0.713f, 0.564f};
memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 5*sizeof(coeffs[0]));
static const float coeffs_crb[] = { R2YF, G2YF, B2YF, YCRF, YCBF };
static const float coeffs_yuv[] = { R2YF, G2YF, B2YF, R2VF, B2UF };
memcpy(coeffs, isCrCb ? coeffs_crb : coeffs_yuv, 5*sizeof(coeffs[0]));
if(blueIdx==0) std::swap(coeffs[0], coeffs[2]);
}
void operator()(const _Tp* src, _Tp* dst, int n) const
{
int scn = srccn, bidx = blueIdx;
int yuvOrder = !isCrCb; //1 if YUV, 0 if YCrCb
const _Tp delta = ColorChannel<_Tp>::half();
float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3], C4 = coeffs[4];
n *= 3;
@ -805,10 +839,11 @@ template<typename _Tp> struct RGB2YCrCb_f
_Tp Y = saturate_cast<_Tp>(src[0]*C0 + src[1]*C1 + src[2]*C2);
_Tp Cr = saturate_cast<_Tp>((src[bidx^2] - Y)*C3 + delta);
_Tp Cb = saturate_cast<_Tp>((src[bidx] - Y)*C4 + delta);
dst[i] = Y; dst[i+1] = Cr; dst[i+2] = Cb;
dst[i] = Y; dst[i+1+yuvOrder] = Cr; dst[i+2-yuvOrder] = Cb;
}
}
int srccn, blueIdx;
bool isCrCb;
float coeffs[5];
};
@ -817,16 +852,18 @@ template<typename _Tp> struct RGB2YCrCb_i
{
typedef _Tp channel_type;
RGB2YCrCb_i(int _srccn, int _blueIdx, const int* _coeffs)
: srccn(_srccn), blueIdx(_blueIdx)
RGB2YCrCb_i(int _srccn, int _blueIdx, bool _isCrCb)
: srccn(_srccn), blueIdx(_blueIdx), isCrCb(_isCrCb)
{
static const int coeffs0[] = {R2Y, G2Y, B2Y, 11682, 9241};
memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 5*sizeof(coeffs[0]));
static const int coeffs_crb[] = { R2Y, G2Y, B2Y, YCRI, YCBI };
static const int coeffs_yuv[] = { R2Y, G2Y, B2Y, R2VI, B2UI };
memcpy(coeffs, isCrCb ? coeffs_crb : coeffs_yuv, 5*sizeof(coeffs[0]));
if(blueIdx==0) std::swap(coeffs[0], coeffs[2]);
}
void operator()(const _Tp* src, _Tp* dst, int n) const
{
int scn = srccn, bidx = blueIdx;
int yuvOrder = !isCrCb; //1 if YUV, 0 if YCrCb
int C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3], C4 = coeffs[4];
int delta = ColorChannel<_Tp>::half()*(1 << yuv_shift);
n *= 3;
@ -836,11 +873,12 @@ template<typename _Tp> struct RGB2YCrCb_i
int Cr = CV_DESCALE((src[bidx^2] - Y)*C3 + delta, yuv_shift);
int Cb = CV_DESCALE((src[bidx] - Y)*C4 + delta, yuv_shift);
dst[i] = saturate_cast<_Tp>(Y);
dst[i+1] = saturate_cast<_Tp>(Cr);
dst[i+2] = saturate_cast<_Tp>(Cb);
dst[i+1+yuvOrder] = saturate_cast<_Tp>(Cr);
dst[i+2-yuvOrder] = saturate_cast<_Tp>(Cb);
}
}
int srccn, blueIdx;
bool isCrCb;
int coeffs[5];
};
@ -849,23 +887,25 @@ template<typename _Tp> struct YCrCb2RGB_f
{
typedef _Tp channel_type;
YCrCb2RGB_f(int _dstcn, int _blueIdx, const float* _coeffs)
: dstcn(_dstcn), blueIdx(_blueIdx)
YCrCb2RGB_f(int _dstcn, int _blueIdx, bool _isCrCb)
: dstcn(_dstcn), blueIdx(_blueIdx), isCrCb(_isCrCb)
{
static const float coeffs0[] = {1.403f, -0.714f, -0.344f, 1.773f};
memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 4*sizeof(coeffs[0]));
static const float coeffs_cbr[] = {CR2RF, CR2GF, CB2GF, CB2BF};
static const float coeffs_yuv[] = { V2RF, V2GF, U2GF, U2BF};
memcpy(coeffs, isCrCb ? coeffs_cbr : coeffs_yuv, 4*sizeof(coeffs[0]));
}
void operator()(const _Tp* src, _Tp* dst, int n) const
{
int dcn = dstcn, bidx = blueIdx;
int yuvOrder = !isCrCb; //1 if YUV, 0 if YCrCb
const _Tp delta = ColorChannel<_Tp>::half(), alpha = ColorChannel<_Tp>::max();
float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3];
n *= 3;
for(int i = 0; i < n; i += 3, dst += dcn)
{
_Tp Y = src[i];
_Tp Cr = src[i+1];
_Tp Cb = src[i+2];
_Tp Cr = src[i+1+yuvOrder];
_Tp Cb = src[i+2-yuvOrder];
_Tp b = saturate_cast<_Tp>(Y + (Cb - delta)*C3);
_Tp g = saturate_cast<_Tp>(Y + (Cb - delta)*C2 + (Cr - delta)*C1);
@ -877,6 +917,7 @@ template<typename _Tp> struct YCrCb2RGB_f
}
}
int dstcn, blueIdx;
bool isCrCb;
float coeffs[4];
};
@ -885,24 +926,26 @@ template<typename _Tp> struct YCrCb2RGB_i
{
typedef _Tp channel_type;
YCrCb2RGB_i(int _dstcn, int _blueIdx, const int* _coeffs)
: dstcn(_dstcn), blueIdx(_blueIdx)
YCrCb2RGB_i(int _dstcn, int _blueIdx, bool _isCrCb)
: dstcn(_dstcn), blueIdx(_blueIdx), isCrCb(_isCrCb)
{
static const int coeffs0[] = {22987, -11698, -5636, 29049};
memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 4*sizeof(coeffs[0]));
static const int coeffs_crb[] = { CR2RI, CR2GI, CB2GI, CB2BI};
static const int coeffs_yuv[] = { V2RI, V2GI, U2GI, U2BI };
memcpy(coeffs, isCrCb ? coeffs_crb : coeffs_yuv, 4*sizeof(coeffs[0]));
}
void operator()(const _Tp* src, _Tp* dst, int n) const
{
int dcn = dstcn, bidx = blueIdx;
int yuvOrder = !isCrCb; //1 if YUV, 0 if YCrCb
const _Tp delta = ColorChannel<_Tp>::half(), alpha = ColorChannel<_Tp>::max();
int C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3];
n *= 3;
for(int i = 0; i < n; i += 3, dst += dcn)
{
_Tp Y = src[i];
_Tp Cr = src[i+1];
_Tp Cb = src[i+2];
_Tp Cr = src[i+1+yuvOrder];
_Tp Cb = src[i+2-yuvOrder];
int b = Y + CV_DESCALE((Cb - delta)*C3, yuv_shift);
int g = Y + CV_DESCALE((Cb - delta)*C2 + (Cr - delta)*C1, yuv_shift);
@ -916,6 +959,7 @@ template<typename _Tp> struct YCrCb2RGB_i
}
}
int dstcn, blueIdx;
bool isCrCb;
int coeffs[4];
};
@ -3832,10 +3876,7 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn )
{
CV_Assert( scn == 3 || scn == 4 );
bidx = code == CV_BGR2YCrCb || code == CV_BGR2YUV ? 0 : 2;
static const float yuv_f[] = { 0.114f, 0.587f, 0.299f, 0.492f, 0.877f };
static const int yuv_i[] = { B2Y, G2Y, R2Y, 8061, 14369 };
const float* coeffs_f = code == CV_BGR2YCrCb || code == CV_RGB2YCrCb ? 0 : yuv_f;
const int* coeffs_i = code == CV_BGR2YCrCb || code == CV_RGB2YCrCb ? 0 : yuv_i;
const bool isCrCb = (code == CV_BGR2YCrCb || code == CV_RGB2YCrCb);
_dst.create(sz, CV_MAKETYPE(depth, 3));
dst = _dst.getMat();
@ -3846,12 +3887,12 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn )
if((code == CV_RGB2YCrCb || code == CV_BGR2YCrCb) && tegra::cvtRGB2YCrCb(src, dst, bidx))
break;
#endif
CvtColorLoop(src, dst, RGB2YCrCb_i<uchar>(scn, bidx, coeffs_i));
CvtColorLoop(src, dst, RGB2YCrCb_i<uchar>(scn, bidx, isCrCb));
}
else if( depth == CV_16U )
CvtColorLoop(src, dst, RGB2YCrCb_i<ushort>(scn, bidx, coeffs_i));
CvtColorLoop(src, dst, RGB2YCrCb_i<ushort>(scn, bidx, isCrCb));
else
CvtColorLoop(src, dst, RGB2YCrCb_f<float>(scn, bidx, coeffs_f));
CvtColorLoop(src, dst, RGB2YCrCb_f<float>(scn, bidx, isCrCb));
}
break;
@ -3861,20 +3902,17 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn )
if( dcn <= 0 ) dcn = 3;
CV_Assert( scn == 3 && (dcn == 3 || dcn == 4) );
bidx = code == CV_YCrCb2BGR || code == CV_YUV2BGR ? 0 : 2;
static const float yuv_f[] = { 2.032f, -0.395f, -0.581f, 1.140f };
static const int yuv_i[] = { 33292, -6472, -9519, 18678 };
const float* coeffs_f = code == CV_YCrCb2BGR || code == CV_YCrCb2RGB ? 0 : yuv_f;
const int* coeffs_i = code == CV_YCrCb2BGR || code == CV_YCrCb2RGB ? 0 : yuv_i;
const bool isCrCb = (code == CV_YCrCb2BGR || code == CV_YCrCb2RGB);
_dst.create(sz, CV_MAKETYPE(depth, dcn));
dst = _dst.getMat();
if( depth == CV_8U )
CvtColorLoop(src, dst, YCrCb2RGB_i<uchar>(dcn, bidx, coeffs_i));
CvtColorLoop(src, dst, YCrCb2RGB_i<uchar>(dcn, bidx, isCrCb));
else if( depth == CV_16U )
CvtColorLoop(src, dst, YCrCb2RGB_i<ushort>(dcn, bidx, coeffs_i));
CvtColorLoop(src, dst, YCrCb2RGB_i<ushort>(dcn, bidx, isCrCb));
else
CvtColorLoop(src, dst, YCrCb2RGB_f<float>(dcn, bidx, coeffs_f));
CvtColorLoop(src, dst, YCrCb2RGB_f<float>(dcn, bidx, isCrCb));
}
break;

@ -314,25 +314,26 @@ if(BUILD_FAT_JAVA_LIB)
endif()
if(APPLE)
foreach(_dep ${__deps})
target_link_libraries(${the_module} -Wl,-force_load "${_dep}")
target_link_libraries(${the_module} LINK_PRIVATE -Wl,-force_load "${_dep}")
endforeach()
else()
target_link_libraries(${the_module} -Wl,-whole-archive ${__deps} -Wl,-no-whole-archive)
target_link_libraries(${the_module} LINK_PRIVATE -Wl,-whole-archive ${__deps} -Wl,-no-whole-archive)
endif()
target_link_libraries(${the_module} ${__extradeps} ${OPENCV_LINKER_LIBS})
target_link_libraries(${the_module} LINK_PRIVATE ${__extradeps} ${OPENCV_LINKER_LIBS})
else()
target_link_libraries(${the_module} ${OPENCV_MODULE_${the_module}_DEPS} ${OPENCV_LINKER_LIBS})
target_link_libraries(${the_module} LINK_PRIVATE ${OPENCV_MODULE_${the_module}_DEPS} ${OPENCV_LINKER_LIBS})
endif()
if(ANDROID)
target_link_libraries(${the_module} jnigraphics) # for Mat <=> Bitmap converters
target_link_libraries(${the_module} LINK_PUBLIC jnigraphics) # for Mat <=> Bitmap converters
target_link_libraries(${the_module} LINK_PUBLIC log dl z)
target_link_libraries(${the_module} LINK_PRIVATE ${OPENCV_LINKER_LIBS})
# force strip library after the build command
# because samples and tests will make a copy of the library before install
get_target_property(__opencv_java_location ${the_module} LOCATION)
# Turn off stripping in debug build
if ( NOT (CMAKE_BUILD_TYPE MATCHES "Debug"))
add_custom_command(TARGET ${the_module} POST_BUILD COMMAND ${CMAKE_STRIP} --strip-unneeded "${__opencv_java_location}")
add_custom_command(TARGET ${the_module} POST_BUILD COMMAND ${CMAKE_STRIP} --strip-unneeded "$<TARGET_FILE:${the_module}>")
endif()
endif()
@ -342,8 +343,6 @@ set_target_properties(${the_module} PROPERTIES
ARCHIVE_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_PATH}
LIBRARY_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_PATH}
RUNTIME_OUTPUT_DIRECTORY ${EXECUTABLE_OUTPUT_PATH}
INSTALL_NAME_DIR ${OPENCV_LIB_INSTALL_PATH}
LINK_INTERFACE_LIBRARIES ""
)
if(WIN32)

@ -170,10 +170,12 @@ static Mat createInitialImage( const Mat& img, bool doubleImageSize, float sigma
{
Mat gray, gray_fpt;
if( img.channels() == 3 || img.channels() == 4 )
{
cvtColor(img, gray, COLOR_BGR2GRAY);
gray.convertTo(gray_fpt, DataType<sift_wt>::type, SIFT_FIXPT_SCALE, 0);
}
else
img.copyTo(gray);
gray.convertTo(gray_fpt, DataType<sift_wt>::type, SIFT_FIXPT_SCALE, 0);
img.convertTo(gray_fpt, DataType<sift_wt>::type, SIFT_FIXPT_SCALE, 0);
float sig_diff;
@ -181,7 +183,7 @@ static Mat createInitialImage( const Mat& img, bool doubleImageSize, float sigma
{
sig_diff = sqrtf( std::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA * 4, 0.01f) );
Mat dbl;
resize(gray_fpt, dbl, Size(gray.cols*2, gray.rows*2), 0, 0, INTER_LINEAR);
resize(gray_fpt, dbl, Size(gray_fpt.cols*2, gray_fpt.rows*2), 0, 0, INTER_LINEAR);
GaussianBlur(dbl, dbl, Size(), sig_diff, sig_diff);
return dbl;
}

@ -115,9 +115,42 @@ enum
BLOCK_SIZE = 256
};
//constants for conversion from/to RGB and Gray, YUV, YCrCb according to BT.601
#define B2YF 0.114f
#define G2YF 0.587f
#define R2YF 0.299f
//to YCbCr
#define YCBF 0.564f
#define YCRF 0.713f
#define YCBI 9241
#define YCRI 11682
//to YUV
#define B2UF 0.492f
#define R2VF 0.877f
#define B2UI 8061
#define R2VI 14369
//from YUV
#define U2BF 2.032f
#define U2GF -0.395f
#define V2GF -0.581f
#define V2RF 1.140f
#define U2BI 33292
#define U2GI -6472
#define V2GI -9519
#define V2RI 18678
//from YCrCb
#define CR2RF 1.403f
#define CB2GF -0.344f
#define CR2GF -0.714f
#define CB2BF 1.773f
#define CR2RI 22987
#define CB2GI -5636
#define CR2GI -11698
#define CB2BI 29049
///////////////////////////////////// RGB <-> GRAY //////////////////////////////////////
__constant float c_RGB2GrayCoeffs_f[3] = { 0.114f, 0.587f, 0.299f };
__constant float c_RGB2GrayCoeffs_f[3] = { B2YF, G2YF, R2YF };
__constant int c_RGB2GrayCoeffs_i[3] = { B2Y, G2Y, R2Y };
__kernel void RGB2Gray(int cols, int rows, int src_step, int dst_step,
@ -135,7 +168,7 @@ __kernel void RGB2Gray(int cols, int rows, int src_step, int dst_step,
#ifndef INTEL_DEVICE
#ifdef DEPTH_5
dst[dst_idx] = src[src_idx + bidx] * 0.114f + src[src_idx + 1] * 0.587f + src[src_idx + (bidx^2)] * 0.299f;
dst[dst_idx] = src[src_idx + bidx] * B2YF + src[src_idx + 1] * G2YF + src[src_idx + (bidx^2)] * R2YF;
#else
dst[dst_idx] = (DATA_TYPE)CV_DESCALE((src[src_idx + bidx] * B2Y + src[src_idx + 1] * G2Y + src[src_idx + (bidx^2)] * R2Y), yuv_shift);
#endif
@ -221,8 +254,8 @@ __kernel void Gray2RGB(int cols, int rows, int src_step, int dst_step,
///////////////////////////////////// RGB <-> YUV //////////////////////////////////////
__constant float c_RGB2YUVCoeffs_f[5] = { 0.114f, 0.587f, 0.299f, 0.492f, 0.877f };
__constant int c_RGB2YUVCoeffs_i[5] = { B2Y, G2Y, R2Y, 8061, 14369 };
__constant float c_RGB2YUVCoeffs_f[5] = { B2YF, G2YF, R2YF, B2UF, R2VF };
__constant int c_RGB2YUVCoeffs_i[5] = { B2Y, G2Y, R2Y, B2UI, R2VI };
__kernel void RGB2YUV(int cols, int rows, int src_step, int dst_step,
__global const DATA_TYPE* src, __global DATA_TYPE* dst,
@ -252,13 +285,13 @@ __kernel void RGB2YUV(int cols, int rows, int src_step, int dst_step,
const DATA_TYPE rgb[] = {src_ptr[0], src_ptr[1], src_ptr[2]};
#ifdef DEPTH_5
float Y = rgb[0] * coeffs[bidx^2] + rgb[1] * coeffs[1] + rgb[2] * coeffs[bidx];
float U = (rgb[bidx^2] - Y) * coeffs[3] + HALF_MAX;
float V = (rgb[bidx] - Y) * coeffs[4] + HALF_MAX;
float Y = rgb[0] * coeffs[bidx] + rgb[1] * coeffs[1] + rgb[2] * coeffs[bidx^2];
float U = (rgb[bidx] - Y) * coeffs[3] + HALF_MAX;
float V = (rgb[bidx^2] - Y) * coeffs[4] + HALF_MAX;
#else
int Y = CV_DESCALE(rgb[0] * coeffs[bidx^2] + rgb[1] * coeffs[1] + rgb[2] * coeffs[bidx], yuv_shift);
int U = CV_DESCALE((rgb[bidx^2] - Y) * coeffs[3] + delta, yuv_shift);
int V = CV_DESCALE((rgb[bidx] - Y) * coeffs[4] + delta, yuv_shift);
int Y = CV_DESCALE(rgb[0] * coeffs[bidx] + rgb[1] * coeffs[1] + rgb[2] * coeffs[bidx^2], yuv_shift);
int U = CV_DESCALE((rgb[bidx] - Y) * coeffs[3] + delta, yuv_shift);
int V = CV_DESCALE((rgb[bidx^2] - Y) * coeffs[4] + delta, yuv_shift);
#endif
dst_ptr[0] = SAT_CAST( Y );
@ -274,23 +307,22 @@ __kernel void RGB2YUV(int cols, int rows, int src_step, int dst_step,
const float2 c1 = r0.s15;
const float2 c2 = r0.s26;
const float2 Y = (bidx == 0) ? (c0 * coeffs[2] + c1 * coeffs[1] + c2 * coeffs[0]) : (c0 * coeffs[0] + c1 * coeffs[1] + c2 * coeffs[2]);
const float2 U = (bidx == 0) ? ((c2 - Y) * coeffs[3] + HALF_MAX) : ((c0 - Y) * coeffs[3] + HALF_MAX);
const float2 V = (bidx == 0) ? ((c0 - Y) * coeffs[4] + HALF_MAX) : ((c2 - Y) * coeffs[4] + HALF_MAX);
const float2 Y = (bidx == 0) ? (c0 * coeffs[0] + c1 * coeffs[1] + c2 * coeffs[2]) : (c0 * coeffs[2] + c1 * coeffs[1] + c2 * coeffs[0]);
const float2 U = (bidx == 0) ? ((c0 - Y) * coeffs[3] + HALF_MAX) : ((c2 - Y) * coeffs[3] + HALF_MAX);
const float2 V = (bidx == 0) ? ((c2 - Y) * coeffs[4] + HALF_MAX) : ((c0 - Y) * coeffs[4] + HALF_MAX);
#else
const int2 c0 = convert_int2(r0.s04);
const int2 c1 = convert_int2(r0.s15);
const int2 c2 = convert_int2(r0.s26);
const int2 yi = (bidx == 0) ? CV_DESCALE(c0 * coeffs[2] + c1 * coeffs[1] + c2 * coeffs[0], yuv_shift) : CV_DESCALE(c0 * coeffs[0] + c1 * coeffs[1] + c2 * coeffs[2], yuv_shift);
const int2 ui = (bidx == 0) ? CV_DESCALE((c2 - yi) * coeffs[3] + delta, yuv_shift) : CV_DESCALE((c0 - yi) * coeffs[3] + delta, yuv_shift);
const int2 vi = (bidx == 0) ? CV_DESCALE((c0 - yi) * coeffs[4] + delta, yuv_shift) : CV_DESCALE((c2 - yi) * coeffs[4] + delta, yuv_shift);
const int2 yi = (bidx == 0) ? CV_DESCALE(c0 * coeffs[0] + c1 * coeffs[1] + c2 * coeffs[2], yuv_shift) : CV_DESCALE(c0 * coeffs[2] + c1 * coeffs[1] + c2 * coeffs[0], yuv_shift);
const int2 ui = (bidx == 0) ? CV_DESCALE((c0 - yi) * coeffs[3] + delta, yuv_shift) : CV_DESCALE((c2 - yi) * coeffs[3] + delta, yuv_shift);
const int2 vi = (bidx == 0) ? CV_DESCALE((c2 - yi) * coeffs[4] + delta, yuv_shift) : CV_DESCALE((c0 - yi) * coeffs[4] + delta, yuv_shift);
const VECTOR2 Y = SAT_CAST2(yi);
const VECTOR2 U = SAT_CAST2(ui);
const VECTOR2 V = SAT_CAST2(vi);
#endif
vstore8((VECTOR8)(Y.s0, U.s0, V.s0, 0, Y.s1, U.s1, V.s1, 0), 0, dst_ptr);
}
#elif (4 == pixels_per_work_item)
@ -302,14 +334,13 @@ __kernel void RGB2YUV(int cols, int rows, int src_step, int dst_step,
const int4 c1 = convert_int4(r0.s159d);
const int4 c2 = convert_int4(r0.s26ae);
const int4 yi = (bidx == 0) ? CV_DESCALE(c0 * coeffs[2] + c1 * coeffs[1] + c2 * coeffs[0], yuv_shift) : CV_DESCALE(c0 * coeffs[0] + c1 * coeffs[1] + c2 * coeffs[2], yuv_shift);
const int4 ui = (bidx == 0) ? CV_DESCALE((c2 - yi) * coeffs[3] + delta, yuv_shift) : CV_DESCALE((c0 - yi) * coeffs[3] + delta, yuv_shift);
const int4 vi = (bidx == 0) ? CV_DESCALE((c0 - yi) * coeffs[4] + delta, yuv_shift) : CV_DESCALE((c2 - yi) * coeffs[4] + delta, yuv_shift);
const int4 yi = (bidx == 0) ? CV_DESCALE(c0 * coeffs[0] + c1 * coeffs[1] + c2 * coeffs[2], yuv_shift) : CV_DESCALE(c0 * coeffs[2] + c1 * coeffs[1] + c2 * coeffs[0], yuv_shift);
const int4 ui = (bidx == 0) ? CV_DESCALE((c0 - yi) * coeffs[3] + delta, yuv_shift) : CV_DESCALE((c2 - yi) * coeffs[3] + delta, yuv_shift);
const int4 vi = (bidx == 0) ? CV_DESCALE((c2 - yi) * coeffs[4] + delta, yuv_shift) : CV_DESCALE((c0 - yi) * coeffs[4] + delta, yuv_shift);
const VECTOR4 Y = SAT_CAST4(yi);
const VECTOR4 U = SAT_CAST4(ui);
const VECTOR4 V = SAT_CAST4(vi);
vstore16((VECTOR16)(Y.s0, U.s0, V.s0, 0, Y.s1, U.s1, V.s1, 0, Y.s2, U.s2, V.s2, 0, Y.s3, U.s3, V.s3, 0), 0, dst_ptr);
#endif
}
@ -317,8 +348,8 @@ __kernel void RGB2YUV(int cols, int rows, int src_step, int dst_step,
}
}
__constant float c_YUV2RGBCoeffs_f[5] = { 2.032f, -0.395f, -0.581f, 1.140f };
__constant int c_YUV2RGBCoeffs_i[5] = { 33292, -6472, -9519, 18678 };
__constant float c_YUV2RGBCoeffs_f[5] = { U2BF, U2GF, V2GF, V2RF };
__constant int c_YUV2RGBCoeffs_i[5] = { U2BI, U2GI, V2GI, V2RI };
__kernel void YUV2RGB(int cols, int rows, int src_step, int dst_step,
__global const DATA_TYPE* src, __global DATA_TYPE* dst,
@ -347,13 +378,13 @@ __kernel void YUV2RGB(int cols, int rows, int src_step, int dst_step,
const DATA_TYPE yuv[] = {src_ptr[0], src_ptr[1], src_ptr[2]};
#ifdef DEPTH_5
float B = yuv[0] + (yuv[2] - HALF_MAX) * coeffs[3];
float G = yuv[0] + (yuv[2] - HALF_MAX) * coeffs[2] + (yuv[1] - HALF_MAX) * coeffs[1];
float R = yuv[0] + (yuv[1] - HALF_MAX) * coeffs[0];
float B = yuv[0] + (yuv[1] - HALF_MAX) * coeffs[0];
float G = yuv[0] + (yuv[1] - HALF_MAX) * coeffs[1] + (yuv[2] - HALF_MAX) * coeffs[2];
float R = yuv[0] + (yuv[2] - HALF_MAX) * coeffs[3];
#else
int B = yuv[0] + CV_DESCALE((yuv[2] - HALF_MAX) * coeffs[3], yuv_shift);
int G = yuv[0] + CV_DESCALE((yuv[2] - HALF_MAX) * coeffs[2] + (yuv[1] - HALF_MAX) * coeffs[1], yuv_shift);
int R = yuv[0] + CV_DESCALE((yuv[1] - HALF_MAX) * coeffs[0], yuv_shift);
int B = yuv[0] + CV_DESCALE((yuv[1] - HALF_MAX) * coeffs[0], yuv_shift);
int G = yuv[0] + CV_DESCALE((yuv[1] - HALF_MAX) * coeffs[1] + (yuv[2] - HALF_MAX) * coeffs[2], yuv_shift);
int R = yuv[0] + CV_DESCALE((yuv[2] - HALF_MAX) * coeffs[3], yuv_shift);
#endif
dst_ptr[bidx] = SAT_CAST( B );
@ -372,17 +403,17 @@ __kernel void YUV2RGB(int cols, int rows, int src_step, int dst_step,
const float2 U = r0.s15;
const float2 V = r0.s26;
const float2 c0 = (bidx == 0) ? (Y + (V - HALF_MAX) * coeffs[3]) : (Y + (U - HALF_MAX) * coeffs[0]);
const float2 c0 = (bidx != 0) ? (Y + (V - HALF_MAX) * coeffs[3]) : (Y + (U - HALF_MAX) * coeffs[0]);
const float2 c1 = Y + (V - HALF_MAX) * coeffs[2] + (U - HALF_MAX) * coeffs[1];
const float2 c2 = (bidx == 0) ? (Y + (U - HALF_MAX) * coeffs[0]) : (Y + (V - HALF_MAX) * coeffs[3]);
const float2 c2 = (bidx != 0) ? (Y + (U - HALF_MAX) * coeffs[0]) : (Y + (V - HALF_MAX) * coeffs[3]);
#else
const int2 Y = convert_int2(r0.s04);
const int2 U = convert_int2(r0.s15);
const int2 V = convert_int2(r0.s26);
const int2 c0i = (bidx == 0) ? (Y + CV_DESCALE((V - HALF_MAX) * coeffs[3], yuv_shift)) : (Y + CV_DESCALE((U - HALF_MAX) * coeffs[0], yuv_shift));
const int2 c0i = (bidx != 0) ? (Y + CV_DESCALE((V - HALF_MAX) * coeffs[3], yuv_shift)) : (Y + CV_DESCALE((U - HALF_MAX) * coeffs[0], yuv_shift));
const int2 c1i = Y + CV_DESCALE((V - HALF_MAX) * coeffs[2] + (U - HALF_MAX) * coeffs[1], yuv_shift);
const int2 c2i = (bidx == 0) ? (Y + CV_DESCALE((U - HALF_MAX) * coeffs[0], yuv_shift)) : (Y + CV_DESCALE((V - HALF_MAX) * coeffs[3], yuv_shift));
const int2 c2i = (bidx != 0) ? (Y + CV_DESCALE((U - HALF_MAX) * coeffs[0], yuv_shift)) : (Y + CV_DESCALE((V - HALF_MAX) * coeffs[3], yuv_shift));
const VECTOR2 c0 = SAT_CAST2(c0i);
const VECTOR2 c1 = SAT_CAST2(c1i);
@ -404,9 +435,9 @@ __kernel void YUV2RGB(int cols, int rows, int src_step, int dst_step,
const int4 U = convert_int4(r0.s159d);
const int4 V = convert_int4(r0.s26ae);
const int4 c0i = (bidx == 0) ? (Y + CV_DESCALE((V - HALF_MAX) * coeffs[3], yuv_shift)) : (Y + CV_DESCALE((U - HALF_MAX) * coeffs[0], yuv_shift));
const int4 c0i = (bidx != 0) ? (Y + CV_DESCALE((V - HALF_MAX) * coeffs[3], yuv_shift)) : (Y + CV_DESCALE((U - HALF_MAX) * coeffs[0], yuv_shift));
const int4 c1i = Y + CV_DESCALE((V - HALF_MAX) * coeffs[2] + (U - HALF_MAX) * coeffs[1], yuv_shift);
const int4 c2i = (bidx == 0) ? (Y + CV_DESCALE((U - HALF_MAX) * coeffs[0], yuv_shift)) : (Y + CV_DESCALE((V - HALF_MAX) * coeffs[3], yuv_shift));
const int4 c2i = (bidx != 0) ? (Y + CV_DESCALE((U - HALF_MAX) * coeffs[0], yuv_shift)) : (Y + CV_DESCALE((V - HALF_MAX) * coeffs[3], yuv_shift));
const VECTOR4 c0 = SAT_CAST4(c0i);
const VECTOR4 c1 = SAT_CAST4(c1i);
@ -484,8 +515,8 @@ __kernel void YUV2RGBA_NV12(int cols, int rows, int src_step, int dst_step,
///////////////////////////////////// RGB <-> YCrCb //////////////////////////////////////
__constant float c_RGB2YCrCbCoeffs_f[5] = {0.299f, 0.587f, 0.114f, 0.713f, 0.564f};
__constant int c_RGB2YCrCbCoeffs_i[5] = {R2Y, G2Y, B2Y, 11682, 9241};
__constant float c_RGB2YCrCbCoeffs_f[5] = {R2YF, G2YF, B2YF, YCRF, YCBF};
__constant int c_RGB2YCrCbCoeffs_i[5] = {R2Y, G2Y, B2Y, YCRI, YCBI};
__kernel void RGB2YCrCb(int cols, int rows, int src_step, int dst_step,
__global const DATA_TYPE* src, __global DATA_TYPE* dst,
@ -579,8 +610,8 @@ __kernel void RGB2YCrCb(int cols, int rows, int src_step, int dst_step,
}
}
__constant float c_YCrCb2RGBCoeffs_f[4] = { 1.403f, -0.714f, -0.344f, 1.773f };
__constant int c_YCrCb2RGBCoeffs_i[4] = { 22987, -11698, -5636, 29049 };
__constant float c_YCrCb2RGBCoeffs_f[4] = { CR2RF, CR2GF, CB2GF, CB2BF };
__constant int c_YCrCb2RGBCoeffs_i[4] = { CR2RI, CR2GI, CB2GI, CB2BI };
__kernel void YCrCb2RGB(int cols, int rows, int src_step, int dst_step,
__global const DATA_TYPE* src, __global DATA_TYPE* dst,

@ -40,7 +40,7 @@ Modification of ``fastNlMeansDenoising`` function for colored images
:param h: Parameter regulating filter strength for luminance component. Bigger h value perfectly removes noise but also removes image details, smaller h value preserves details but also preserves some noise
:param hForColorComponents: The same as h but for color components. For most images value equals 10 will be enought to remove colored noise and do not distort colors
:param hColor: The same as h but for color components. For most images value equals 10 will be enought to remove colored noise and do not distort colors
The function converts image to CIELAB colorspace and then separately denoise L and AB components with given h parameters using ``fastNlMeansDenoising`` function.
@ -85,6 +85,6 @@ Modification of ``fastNlMeansDenoisingMulti`` function for colored images sequen
:param h: Parameter regulating filter strength for luminance component. Bigger h value perfectly removes noise but also removes image details, smaller h value preserves details but also preserves some noise.
:param hForColorComponents: The same as h but for color components.
:param hColor: The same as h but for color components.
The function converts images to CIELAB colorspace and then separately denoise L and AB components with given h parameters using ``fastNlMeansDenoisingMulti`` function.

@ -97,6 +97,8 @@ if(MSVC AND NOT ENABLE_NOISY_WARNINGS)
string(REPLACE "/W4" "/W3" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
endif()
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef) # accurate guard via #pragma doesn't work (C++ preprocessor doesn't handle #pragma)
if(MSVC AND NOT BUILD_SHARED_LIBS)
set_target_properties(${the_module} PROPERTIES LINK_FLAGS "/NODEFAULTLIB:atlthunk.lib /NODEFAULTLIB:atlsd.lib /DEBUG")
endif()

@ -543,6 +543,7 @@ void FeaturesMatcher::operator ()(const vector<ImageFeatures> &features, vector<
if (features[i].keypoints.size() > 0 && features[j].keypoints.size() > 0 && mask_(i, j))
near_pairs.push_back(make_pair(i, j));
pairwise_matches.clear(); // clear history values
pairwise_matches.resize(num_images * num_images);
MatchPairsBody body(*this, features, pairwise_matches, near_pairs);

@ -7,7 +7,7 @@ set(the_description "Viz")
ocv_define_module(viz opencv_core ${VTK_LIBRARIES})
if(APPLE AND BUILD_opencv_viz)
target_link_libraries(opencv_viz "-framework Cocoa")
target_link_libraries(opencv_viz LINK_PRIVATE "-framework Cocoa")
endif()
if(TARGET opencv_test_viz)

@ -49,6 +49,7 @@ def build_opencv(srcroot, buildroot, target, arch):
# for some reason, if you do not specify CMAKE_BUILD_TYPE, it puts libs to "RELEASE" rather than "Release"
cmakeargs = ("-GXcode " +
"-DCMAKE_BUILD_TYPE=Release " +
("-DIOS_ARCH=%s " % arch) +
"-DCMAKE_TOOLCHAIN_FILE=%s/platforms/ios/cmake/Toolchains/Toolchain-%s_Xcode.cmake " +
"-DBUILD_opencv_world=ON " +
"-DCMAKE_C_FLAGS=\"-Wno-implicit-function-declaration\" " +

@ -43,7 +43,10 @@ set (no_warn "-Wno-unused-function -Wno-overloaded-virtual")
set (CMAKE_C_FLAGS "${no_warn}")
set (CMAKE_CXX_FLAGS "-stdlib=libc++ -fvisibility=hidden -fvisibility-inlines-hidden ${no_warn}")
set (CMAKE_CXX_FLAGS_RELEASE "-DNDEBUG -O3 -fomit-frame-pointer -ffast-math")
set(CMAKE_CXX_FLAGS_RELEASE "-DNDEBUG -O3 -ffast-math")
if(NOT IOS_ARCH STREQUAL "armv7" AND NOT IOS_ARCH STREQUAL "armv7s")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -fomit-frame-pointer")
endif()
if (HAVE_FLAG_SEARCH_PATHS_FIRST)
set (CMAKE_C_LINK_FLAGS "-Wl,-search_paths_first ${CMAKE_C_LINK_FLAGS}")

@ -2,7 +2,10 @@ message (STATUS "Setting up iPhoneOS toolchain")
set (IPHONEOS TRUE)
# Standard settings
set (CMAKE_SYSTEM_NAME iOS)
set(CMAKE_SYSTEM_NAME iOS)
set(CMAKE_SYSTEM_VERSION 6.0)
set(CMAKE_SYSTEM_PROCESSOR "${IOS_ARCH}")
# Include extra modules for the iOS platform files
set (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/platforms/ios/cmake/Modules")

@ -2,7 +2,10 @@ message (STATUS "Setting up iPhoneSimulator toolchain")
set (IPHONESIMULATOR TRUE)
# Standard settings
set (CMAKE_SYSTEM_NAME iOS)
set(CMAKE_SYSTEM_NAME iOS)
set(CMAKE_SYSTEM_VERSION 6.0)
set(CMAKE_SYSTEM_PROCESSOR "${IOS_ARCH}")
# Include extra modules for the iOS platform files
set (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/platforms/ios/cmake/Modules")

@ -101,7 +101,7 @@ public:
}
else
{
if (readStringList(input, imageList))
if (isListOfImages(input) && readStringList(input, imageList))
{
inputType = IMAGE_LIST;
nrFrames = (nrFrames < (int)imageList.size()) ? nrFrames : (int)imageList.size();
@ -169,6 +169,16 @@ public:
l.push_back((string)*it);
return true;
}
static bool isListOfImages( const string& filename)
{
string s(filename);
// Look for file extension
if( s.find(".xml") == string::npos && s.find(".yaml") == string::npos && s.find(".yml") == string::npos )
return false;
else
return true;
}
public:
Size boardSize; // The size of the board -> Number of items by width and height
Pattern calibrationPattern;// One of the Chessboard, circles, or asymmetric circle pattern

@ -90,6 +90,12 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
list(REMOVE_ITEM all_samples "driver_api_multi.cpp")
list(REMOVE_ITEM all_samples "driver_api_stereo_multi.cpp")
endif()
if(NOT HAVE_CUDA
OR NOT HAVE_TBB
OR OpenCV_FOUND # via find_package() there is no access to cvconfig.h
)
list(REMOVE_ITEM all_samples "pyrlk_optical_flow_multithreading.cpp")
endif()
foreach(sample_filename ${all_samples})
get_filename_component(sample ${sample_filename} NAME_WE)
@ -111,6 +117,9 @@ if (OCV_DEPENDENCIES_FOUND AND INSTALL_C_EXAMPLES AND NOT WIN32)
list_filterout(install_list ".*driver_api_multi.cpp")
list_filterout(install_list ".*driver_api_stereo_multi.cpp")
endif()
if(NOT HAVE_CUDA OR NOT HAVE_TBB)
list(REMOVE_ITEM install_list "pyrlk_optical_flow_multithreading.cpp")
endif()
install(FILES ${install_list}
DESTINATION "${OPENCV_SAMPLES_SRC_INSTALL_PATH}/gpu"
PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ COMPONENT samples)

@ -0,0 +1,272 @@
#include <iostream>
#include <vector>
#include <sstream>
#include "opencv2/core/core.hpp"
#include "cvconfig.h"
#ifdef HAVE_TBB
#include <tbb/parallel_for_each.h>
#include <tbb/task_scheduler_init.h>
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/video/video.hpp"
#include "opencv2/gpu/gpu.hpp"
using namespace std;
using namespace cv;
using namespace cv::gpu;
static void download(const GpuMat& d_mat, vector<Point2f>& vec)
{
vec.resize(d_mat.cols);
Mat mat(1, d_mat.cols, CV_32FC2, (void*)&vec[0]);
d_mat.download(mat);
}
static void download(const GpuMat& d_mat, vector<uchar>& vec)
{
vec.resize(d_mat.cols);
Mat mat(1, d_mat.cols, CV_8UC1, (void*)&vec[0]);
d_mat.download(mat);
}
static void drawArrows(Mat& frame, const vector<Point2f>& prevPts, const vector<Point2f>& nextPts, const vector<uchar>& status, Scalar line_color = Scalar(0, 0, 255))
{
for (size_t i = 0; i < prevPts.size(); ++i)
{
if (status[i])
{
int line_thickness = 1;
Point p = prevPts[i];
Point q = nextPts[i];
double angle = atan2((double) p.y - q.y, (double) p.x - q.x);
double hypotenuse = sqrt( (double)(p.y - q.y)*(p.y - q.y) + (double)(p.x - q.x)*(p.x - q.x) );
if (hypotenuse < 1.0)
continue;
// Here we lengthen the arrow by a factor of three.
q.x = (int) (p.x - 3 * hypotenuse * cos(angle));
q.y = (int) (p.y - 3 * hypotenuse * sin(angle));
// Now we draw the main line of the arrow.
line(frame, p, q, line_color, line_thickness);
// Now draw the tips of the arrow. I do some scaling so that the
// tips look proportional to the main line of the arrow.
p.x = (int) (q.x + 9 * cos(angle + CV_PI / 4));
p.y = (int) (q.y + 9 * sin(angle + CV_PI / 4));
line(frame, p, q, line_color, line_thickness);
p.x = (int) (q.x + 9 * cos(angle - CV_PI / 4));
p.y = (int) (q.y + 9 * sin(angle - CV_PI / 4));
line(frame, p, q, line_color, line_thickness);
}
}
}
template <typename T> inline T clamp (T x, T a, T b)
{
return ((x) > (a) ? ((x) < (b) ? (x) : (b)) : (a));
}
template <typename T> inline T mapValue(T x, T a, T b, T c, T d)
{
x = clamp(x, a, b);
return c + (d - c) * (x - a) / (b - a);
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
struct S_Thread_data
{
Size winSize;
int maxLevel;
int iters;
Stream stream;
Mat frame0;
Mat frame1;
Mat frame1Gray;
GpuMat d_frame0Gray;
GpuMat d_prevPts;
bool useGray;
};
struct pyrLK_task
{
pyrLK_task(size_t n):
_n(n),
_thread_data(NULL){}
void operator()()
{
// Sparse
PyrLKOpticalFlow d_pyrLK;
d_pyrLK.winSize.width = _thread_data->winSize.width;
d_pyrLK.winSize.height = _thread_data->winSize.height;
d_pyrLK.maxLevel = _thread_data->maxLevel;
d_pyrLK.iters = _thread_data->iters;
GpuMat d_frame0(_thread_data->frame0);
GpuMat d_frame1(_thread_data->frame1);
GpuMat d_frame1Gray(_thread_data->frame1Gray);
GpuMat d_nextPts;
GpuMat d_status;
bool useGray = _thread_data->useGray;
d_pyrLK.sparse_multi(useGray ? _thread_data->d_frame0Gray : d_frame0,
useGray ? d_frame1Gray : d_frame1,
_thread_data->d_prevPts, d_nextPts,
d_status, _thread_data->stream, NULL);
// Draw arrows
vector<Point2f> prevPts(_thread_data->d_prevPts.cols);
download(_thread_data->d_prevPts, prevPts);
vector<Point2f> nextPts(d_nextPts.cols);
download(d_nextPts, nextPts);
vector<uchar> status(d_status.cols);
download(d_status, status);
drawArrows(_thread_data->frame0, prevPts, nextPts, status, Scalar(255, 0, 0));
}
size_t _n;
struct S_Thread_data* _thread_data;
};
template <typename T> struct invoker {
void operator()(T& it) const {it();}
};
#define THREADS_NB 12
int main(int argc, const char* argv[])
{
const char* keys =
"{ h | help | false | print help message }"
"{ l | left | | specify left image }"
"{ r | right | | specify right image }"
"{ gray | gray | false | use grayscale sources [PyrLK Sparse] }"
"{ win_size | win_size | 21 | specify windows size [PyrLK] }"
"{ max_level | max_level | 3 | specify max level [PyrLK] }"
"{ iters | iters | 30 | specify iterations count [PyrLK] }"
"{ points | points | 4000 | specify points count [GoodFeatureToTrack] }"
"{ min_dist | min_dist | 0 | specify minimal distance between points [GoodFeatureToTrack] }";
CommandLineParser cmd(argc, argv, keys);
if (cmd.get<bool>("help"))
{
cout << "Usage: pyrlk_optical_flow_multithreading [options]" << endl;
cout << "Avaible options:" << endl;
cmd.printParams();
return 0;
}
string fname0 = cmd.get<string>("left");
string fname1 = cmd.get<string>("right");
if (fname0.empty() || fname1.empty())
{
cerr << "Missing input file names" << endl;
return -1;
}
bool useGray = cmd.get<bool>("gray");
int winSize = cmd.get<int>("win_size");
int maxLevel = cmd.get<int>("max_level");
int iters = cmd.get<int>("iters");
int points = cmd.get<int>("points");
double minDist = cmd.get<double>("min_dist");
Mat frame0 = imread(fname0);
Mat frame1 = imread(fname1);
if (frame0.empty() || frame1.empty())
{
cout << "Can't load input images" << endl;
return -1;
}
cout << "Image size : " << frame0.cols << " x " << frame0.rows << endl;
cout << "Points count : " << points << endl;
cout << endl;
Mat frame0Gray;
cvtColor(frame0, frame0Gray, COLOR_BGR2GRAY);
Mat frame1Gray;
cvtColor(frame1, frame1Gray, COLOR_BGR2GRAY);
// goodFeaturesToTrack
GoodFeaturesToTrackDetector_GPU detector(points, 0.01, minDist);
GpuMat d_frame0Gray(frame0Gray);
GpuMat d_prevPts;
detector(d_frame0Gray, d_prevPts);
// Sparse
tbb::task_scheduler_init init(THREADS_NB);
std::vector<pyrLK_task> tasks;
S_Thread_data s_thread_data[THREADS_NB];
for (unsigned int uiI = 0; uiI < THREADS_NB; ++uiI)
{
s_thread_data[uiI].stream = Stream();
s_thread_data[uiI].frame0 = frame0.clone();
s_thread_data[uiI].frame1 = frame1.clone();
s_thread_data[uiI].frame1Gray = frame0Gray.clone();
s_thread_data[uiI].iters = iters;
s_thread_data[uiI].useGray = useGray;
s_thread_data[uiI].maxLevel = maxLevel;
s_thread_data[uiI].winSize.height = winSize;
s_thread_data[uiI].winSize.width = winSize;
s_thread_data[uiI].d_frame0Gray = d_frame0Gray.clone();
s_thread_data[uiI].d_prevPts = d_prevPts.clone();
tasks.push_back(pyrLK_task(uiI));
tasks.back()._thread_data = &(s_thread_data[uiI]);
}
tbb::parallel_for_each(tasks.begin(),tasks.end(),invoker<pyrLK_task>());
for (unsigned int uiI = 0; uiI < THREADS_NB; ++uiI)
{
stringstream ss;
ss << "PyrLK MultiThreading [Sparse] " << uiI;
imshow(ss.str(), s_thread_data[uiI].frame0);
ss.str("");
}
waitKey();
return 0;
}
#else
int main(int , const char* [])
{
std::cout << "This example pyrlk_optical_flow_multithreading must be compiled with TBB Option" << std::endl;
return 0;
}
#endif // HAVE_TBB

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save