Merge remote-tracking branch 'origin/2.4'

Conflicts:
	3rdparty/ffmpeg/ffmpeg_version.cmake
	cmake/OpenCVFindLibsGrfmt.cmake
	cmake/templates/cvconfig.h.cmake
	modules/bioinspired/doc/retina/index.rst
	modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst
	modules/calib3d/src/precomp.hpp
	modules/contrib/src/inputoutput.cpp
	modules/contrib/src/precomp.hpp
	modules/core/include/opencv2/core/internal.hpp
	modules/core/include/opencv2/core/types_c.h
	modules/core/src/drawing.cpp
	modules/core/src/precomp.hpp
	modules/core/src/system.cpp
	modules/features2d/doc/common_interfaces_of_descriptor_matchers.rst
	modules/features2d/doc/common_interfaces_of_feature_detectors.rst
	modules/features2d/include/opencv2/features2d/features2d.hpp
	modules/features2d/src/precomp.hpp
	modules/flann/src/precomp.hpp
	modules/gpu/doc/camera_calibration_and_3d_reconstruction.rst
	modules/gpu/doc/image_filtering.rst
	modules/gpu/doc/image_processing.rst
	modules/gpu/doc/video.rst
	modules/gpu/perf/perf_imgproc.cpp
	modules/gpu/perf4au/main.cpp
	modules/gpu/src/imgproc.cpp
	modules/gpu/src/precomp.hpp
	modules/gpu/test/test_imgproc.cpp
	modules/highgui/CMakeLists.txt
	modules/highgui/test/test_precomp.hpp
	modules/imgproc/doc/structural_analysis_and_shape_descriptors.rst
	modules/imgproc/src/precomp.hpp
	modules/java/generator/src/cpp/Mat.cpp
	modules/legacy/src/precomp.hpp
	modules/ml/doc/k_nearest_neighbors.rst
	modules/ml/src/precomp.hpp
	modules/nonfree/doc/feature_detection.rst
	modules/nonfree/src/precomp.hpp
	modules/objdetect/include/opencv2/objdetect/objdetect.hpp
	modules/objdetect/src/cascadedetect.cpp
	modules/objdetect/src/hog.cpp
	modules/objdetect/src/precomp.hpp
	modules/objdetect/test/test_latentsvmdetector.cpp
	modules/ocl/src/hog.cpp
	modules/ocl/src/opencl/objdetect_hog.cl
	modules/ocl/src/precomp.hpp
	modules/photo/src/precomp.hpp
	modules/stitching/src/precomp.hpp
	modules/superres/perf/perf_precomp.hpp
	modules/superres/src/optical_flow.cpp
	modules/superres/src/precomp.hpp
	modules/superres/test/test_precomp.hpp
	modules/ts/include/opencv2/ts.hpp
	modules/video/src/precomp.hpp
	modules/videostab/src/precomp.hpp
	modules/world/src/precomp.hpp
pull/1230/head
Roman Donchenko 11 years ago
commit dcaf923517
  1. 1
      .gitignore
  2. 1
      3rdparty/ffmpeg/ffmpeg_version.cmake
  3. 2
      3rdparty/tbb/CMakeLists.txt
  4. 17
      CMakeLists.txt
  5. 17
      README
  6. 23
      README.md
  7. 34
      cmake/OpenCVCRTLinkage.cmake
  8. 9
      cmake/OpenCVFindLibsGUI.cmake
  9. 133
      cmake/OpenCVFindLibsGrfmt.cmake
  10. 19
      cmake/OpenCVFindLibsVideo.cmake
  11. 10
      cmake/OpenCVGenHeaders.cmake
  12. 1
      cmake/OpenCVGenPkgconfig.cmake
  13. 3
      cmake/OpenCVModule.cmake
  14. 6
      cmake/checks/winrttest.cpp
  15. 262
      cmake/templates/cvconfig.h.cmake
  16. 2
      cmake/templates/opencv-XXX.pc.cmake.in
  17. 10
      doc/tutorials/introduction/windows_install/windows_install.rst
  18. 6
      modules/bioinspired/doc/retina/index.rst
  19. 22
      modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst
  20. 6
      modules/contrib/doc/facerec/facerec_api.rst
  21. 4
      modules/contrib/doc/openfabmap.rst
  22. 83
      modules/contrib/src/inputoutput.cpp
  23. 4
      modules/core/CMakeLists.txt
  24. 3
      modules/core/doc/basic_structures.rst
  25. 6
      modules/core/doc/clustering.rst
  26. 1
      modules/core/doc/core.rst
  27. 11
      modules/core/doc/drawing_functions.rst
  28. 543
      modules/core/doc/opengl_interop.rst
  29. 10
      modules/core/doc/operations_on_arrays.rst
  30. 4
      modules/core/doc/xml_yaml_persistence.rst
  31. 9
      modules/core/include/opencv2/core/cvdef.h
  32. 13
      modules/core/src/alloc.cpp
  33. 6
      modules/core/src/drawing.cpp
  34. 56
      modules/core/src/glob.cpp
  35. 4
      modules/core/src/parallel.cpp
  36. 1
      modules/core/src/persistence.cpp
  37. 39
      modules/core/src/rand.cpp
  38. 108
      modules/core/src/system.cpp
  39. 4
      modules/core/test/test_main.cpp
  40. 10
      modules/features2d/doc/common_interfaces_of_descriptor_extractors.rst
  41. 6
      modules/features2d/doc/common_interfaces_of_descriptor_matchers.rst
  42. 8
      modules/features2d/doc/common_interfaces_of_feature_detectors.rst
  43. 4
      modules/features2d/doc/common_interfaces_of_generic_descriptor_matchers.rst
  44. 12
      modules/features2d/doc/feature_detection_and_description.rst
  45. 6
      modules/features2d/doc/object_categorization.rst
  46. 6
      modules/flann/include/opencv2/flann/dist.h
  47. 1
      modules/flann/src/precomp.hpp
  48. 8
      modules/gpu/doc/object_detection.rst
  49. 7
      modules/gpu/perf4au/main.cpp
  50. 3
      modules/gpubgsegm/doc/background_segmentation.rst
  51. 3
      modules/gpucodec/doc/videodec.rst
  52. 3
      modules/gpucodec/doc/videoenc.rst
  53. 2
      modules/gpufilters/doc/filtering.rst
  54. 3
      modules/gpuimgproc/doc/color.rst
  55. 3
      modules/gpuimgproc/doc/hough.rst
  56. 80
      modules/gpuimgproc/src/cuda/hist.cu
  57. 21
      modules/gpuimgproc/src/histogram.cpp
  58. 59
      modules/gpuimgproc/test/test_histogram.cpp
  59. 10
      modules/gpuoptflow/doc/optflow.rst
  60. 4
      modules/gpustereo/doc/stereo.rst
  61. 52
      modules/highgui/CMakeLists.txt
  62. 9
      modules/highgui/doc/reading_and_writing_images_and_video.rst
  63. 41
      modules/highgui/doc/user_interface.rst
  64. 2
      modules/highgui/src/cap_msmf.cpp
  65. 55
      modules/highgui/src/cap_ximea.cpp
  66. 6
      modules/highgui/src/grfmt_jpeg2000.cpp
  67. 1
      modules/highgui/src/grfmt_png.cpp
  68. 13
      modules/imgproc/doc/feature_detection.rst
  69. 21
      modules/imgproc/doc/filtering.rst
  70. 3
      modules/imgproc/doc/geometric_transformations.rst
  71. 5
      modules/imgproc/doc/histograms.rst
  72. 24
      modules/imgproc/doc/miscellaneous_transformations.rst
  73. 3
      modules/imgproc/doc/object_detection.rst
  74. 20
      modules/imgproc/doc/structural_analysis_and_shape_descriptors.rst
  75. 2
      modules/imgproc/perf/perf_cvt_color.cpp
  76. 6
      modules/imgproc/perf/perf_histogram.cpp
  77. 4
      modules/imgproc/perf/perf_resize.cpp
  78. 4
      modules/imgproc/src/color.cpp
  79. 14
      modules/java/android_test/src/org/opencv/test/core/CoreTest.java
  80. 1560
      modules/java/generator/src/cpp/Mat.cpp
  81. 6
      modules/legacy/doc/expectation_maximization.rst
  82. 4
      modules/legacy/doc/feature_detection_and_description.rst
  83. 8
      modules/ml/doc/k_nearest_neighbors.rst
  84. 6
      modules/ml/doc/support_vector_machines.rst
  85. 1
      modules/ml/src/precomp.hpp
  86. 11
      modules/nonfree/doc/feature_detection.rst
  87. 4
      modules/objdetect/doc/cascade_classification.rst
  88. 18
      modules/objdetect/include/opencv2/objdetect.hpp
  89. 18
      modules/objdetect/src/cascadedetect.cpp
  90. 80
      modules/objdetect/src/hog.cpp
  91. 2
      modules/ocl/doc/feature_detection_and_description.rst
  92. 3
      modules/ocl/doc/image_processing.rst
  93. 4
      modules/ocl/doc/object_detection.rst
  94. 101
      modules/ocl/perf/perf_gftt.cpp
  95. 4
      modules/ocl/perf/perf_opticalflow.cpp
  96. 142
      modules/ocl/src/hog.cpp
  97. 177
      modules/ocl/src/opencl/objdetect_hog.cl
  98. 1045
      modules/ocl/src/opencl/pyrlk.cl
  99. 6
      modules/photo/doc/inpainting.rst
  100. 5
      modules/stitching/doc/high_level.rst
  101. Some files were not shown because too many files have changed in this diff Show More

1
.gitignore vendored

@ -2,6 +2,7 @@
.DS_Store
refman.rst
OpenCV4Tegra/
tegra/
*.user
.sw[a-z]
.*.swp

@ -1,5 +1,4 @@
set(HAVE_FFMPEG 1)
set(NEW_FFMPEG 1)
set(HAVE_FFMPEG_CODEC 1)
set(HAVE_FFMPEG_FORMAT 1)
set(HAVE_FFMPEG_UTIL 1)

@ -11,7 +11,7 @@ if (WIN32 AND ARM)
set(tbb_url "http://threadingbuildingblocks.org/sites/default/files/software_releases/source/tbb41_20130613oss_src.tgz")
set(tbb_md5 "108c8c1e481b0aaea61878289eb28b6a")
set(tbb_version_file "version_string.ver")
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wshadow -Wunused-parameter)
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4702)
else()
# 4.1 update 2 - works fine
set(tbb_ver "tbb41_20130116oss")

@ -214,7 +214,7 @@ OCV_OPTION(ENABLE_SSE42 "Enable SSE4.2 instructions"
OCV_OPTION(ENABLE_AVX "Enable AVX instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
OCV_OPTION(ENABLE_NOISY_WARNINGS "Show all warnings even if they are too noisy" OFF )
OCV_OPTION(OPENCV_WARNINGS_ARE_ERRORS "Treat warnings as errors" OFF )
OCV_OPTION(ENABLE_WINRT_MODE "Build with Windows Runtime support" OFF IF WIN32 )
# uncategorized options
# ===================================================
@ -367,9 +367,6 @@ if(UNIX)
include(CheckIncludeFile)
if(NOT APPLE)
CHECK_INCLUDE_FILE(alloca.h HAVE_ALLOCA_H)
CHECK_FUNCTION_EXISTS(alloca HAVE_ALLOCA)
CHECK_INCLUDE_FILE(unistd.h HAVE_UNISTD_H)
CHECK_INCLUDE_FILE(pthread.h HAVE_LIBPTHREAD)
if(ANDROID)
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} dl m log)
@ -379,7 +376,7 @@ if(UNIX)
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} dl m pthread rt)
endif()
else()
add_definitions(-DHAVE_ALLOCA -DHAVE_ALLOCA_H -DHAVE_LIBPTHREAD -DHAVE_UNISTD_H)
set(HAVE_LIBPTHREAD YES)
endif()
endif()
@ -604,6 +601,16 @@ if(ANDROID)
status(" Android examples:" BUILD_ANDROID_EXAMPLES AND CAN_BUILD_ANDROID_PROJECTS THEN YES ELSE NO)
endif()
# ================== Windows RT features ==================
if(WIN32)
status("")
status(" Windows RT support:" HAVE_WINRT THEN YES ELSE NO)
if (ENABLE_WINRT_MODE)
status(" Windows SDK v8.0:" ${WINDOWS_SDK_PATH})
status(" Visual Studio 2012:" ${VISUAL_STUDIO_PATH})
endif()
endif(WIN32)
# ========================== GUI ==========================
status("")
status(" GUI: ")

@ -1,17 +0,0 @@
OpenCV: open source computer vision library
Homepage: http://opencv.org
Online docs: http://docs.opencv.org
Q&A forum: http://answers.opencv.org
Dev zone: http://code.opencv.org
Please read before starting work on a pull request:
http://code.opencv.org/projects/opencv/wiki/How_to_contribute
Summary of guidelines:
* One pull request per issue;
* Choose the right base branch;
* Include tests and documentation;
* Clean up "oops" commits before submitting;
* Follow the coding style guide.

@ -0,0 +1,23 @@
### OpenCV: Open Source Computer Vision Library
#### Resources
* Homepage: <http://opencv.org>
* Docs: <http://docs.opencv.org>
* Q&A forum: <http://answers.opencv.org>
* Issue tracking: <http://code.opencv.org>
#### Contributing
Please read before starting work on a pull request: <http://code.opencv.org/projects/opencv/wiki/How_to_contribute>
Summary of guidelines:
* One pull request per issue;
* Choose the right base branch;
* Include tests and documentation;
* Clean up "oops" commits before submitting;
* Follow the coding style guide.
[![Donate OpenCV project](http://opencv.org/wp-content/uploads/2013/07/gittip1.png)](https://www.gittip.com/OpenCV/)
[![Donate OpenCV project](http://opencv.org/wp-content/uploads/2013/07/paypal-donate-button.png)](https://www.paypal.com/cgi-bin/webscr?item_name=Donation+to+OpenCV&cmd=_donations&business=accountant%40opencv.org)

@ -2,6 +2,40 @@ if(NOT MSVC)
message(FATAL_ERROR "CRT options are available only for MSVC")
endif()
#INCLUDE (CheckIncludeFiles)
if (ENABLE_WINRT_MODE)
set(HAVE_WINRT True)
# search Windows Platform SDK
message(STATUS "Checking for Windows Platfrom SDK")
GET_FILENAME_COMPONENT(WINDOWS_SDK_PATH "[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\v8.0;InstallationFolder]" ABSOLUTE CACHE)
if (WINDOWS_SDK_PATH STREQUAL "")
message(ERROR "Windows Platform SDK 8.0 was not found!")
set(HAVE_WINRT False)
endif()
#search for Visual Studio 11.0 install directory
message(STATUS "Checking for Visual Studio 2012")
GET_FILENAME_COMPONENT(VISUAL_STUDIO_PATH [HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\11.0\\Setup\\VS;ProductDir] REALPATH CACHE)
if (VISUAL_STUDIO_PATH STREQUAL "")
message(ERROR "Visual Studio 2012 was not found!")
set(HAVE_WINRT False)
endif()
if (HAVE_WINRT)
TRY_COMPILE(HAVE_WINRT
"${OPENCV_BINARY_DIR}/CMakeFiles/CMakeTmp"
"${OpenCV_SOURCE_DIR}/cmake/checks/winrttest.cpp"
CMAKE_FLAGS "\"kernel.lib\" \"user32.lib\""
OUTPUT_VARIABLE OUTPUT)
endif()
if (HAVE_WINRT)
add_definitions(/DWINVER=0x0602 /DNTDDI_VERSION=NTDDI_WIN8 /D_WIN32_WINNT=0x0602)
endif()
endif(ENABLE_WINRT_MODE)
if(NOT BUILD_SHARED_LIBS AND BUILD_WITH_STATIC_CRT)
foreach(flag_var
CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE

@ -65,3 +65,12 @@ if(WITH_OPENGL)
endif()
endif()
endif(WITH_OPENGL)
# --- Carbon & Cocoa ---
if(APPLE)
if(WITH_CARBON)
set(HAVE_CARBON YES)
elif(NOT IOS)
set(HAVE_COCOA YES)
endif()
endif()

@ -36,55 +36,58 @@ if(WITH_TIFF)
ocv_parse_header("${TIFF_INCLUDE_DIR}/tiff.h" TIFF_VERSION_LINES TIFF_VERSION_CLASSIC TIFF_VERSION_BIG TIFF_VERSION TIFF_BIGTIFF_VERSION)
endif()
endif()
endif()
if(WITH_TIFF AND NOT TIFF_FOUND)
ocv_clear_vars(TIFF_LIBRARY TIFF_LIBRARIES TIFF_INCLUDE_DIR)
if(NOT TIFF_FOUND)
ocv_clear_vars(TIFF_LIBRARY TIFF_LIBRARIES TIFF_INCLUDE_DIR)
set(TIFF_LIBRARY libtiff)
set(TIFF_LIBRARIES ${TIFF_LIBRARY})
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libtiff")
set(TIFF_INCLUDE_DIR "${${TIFF_LIBRARY}_SOURCE_DIR}" "${${TIFF_LIBRARY}_BINARY_DIR}")
ocv_parse_header("${${TIFF_LIBRARY}_SOURCE_DIR}/tiff.h" TIFF_VERSION_LINES TIFF_VERSION_CLASSIC TIFF_VERSION_BIG TIFF_VERSION TIFF_BIGTIFF_VERSION)
endif()
set(TIFF_LIBRARY libtiff)
set(TIFF_LIBRARIES ${TIFF_LIBRARY})
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libtiff")
set(TIFF_INCLUDE_DIR "${${TIFF_LIBRARY}_SOURCE_DIR}" "${${TIFF_LIBRARY}_BINARY_DIR}")
ocv_parse_header("${${TIFF_LIBRARY}_SOURCE_DIR}/tiff.h" TIFF_VERSION_LINES TIFF_VERSION_CLASSIC TIFF_VERSION_BIG TIFF_VERSION TIFF_BIGTIFF_VERSION)
endif()
if(TIFF_VERSION_CLASSIC AND NOT TIFF_VERSION)
set(TIFF_VERSION ${TIFF_VERSION_CLASSIC})
endif()
if(TIFF_VERSION_CLASSIC AND NOT TIFF_VERSION)
set(TIFF_VERSION ${TIFF_VERSION_CLASSIC})
endif()
if(TIFF_BIGTIFF_VERSION AND NOT TIFF_VERSION_BIG)
set(TIFF_VERSION_BIG ${TIFF_BIGTIFF_VERSION})
endif()
if(TIFF_BIGTIFF_VERSION AND NOT TIFF_VERSION_BIG)
set(TIFF_VERSION_BIG ${TIFF_BIGTIFF_VERSION})
endif()
if(NOT TIFF_VERSION_STRING AND TIFF_INCLUDE_DIR)
list(GET TIFF_INCLUDE_DIR 0 _TIFF_INCLUDE_DIR)
if(EXISTS "${_TIFF_INCLUDE_DIR}/tiffvers.h")
file(STRINGS "${_TIFF_INCLUDE_DIR}/tiffvers.h" tiff_version_str REGEX "^#define[\t ]+TIFFLIB_VERSION_STR[\t ]+\"LIBTIFF, Version .*")
string(REGEX REPLACE "^#define[\t ]+TIFFLIB_VERSION_STR[\t ]+\"LIBTIFF, Version +([^ \\n]*).*" "\\1" TIFF_VERSION_STRING "${tiff_version_str}")
unset(tiff_version_str)
if(NOT TIFF_VERSION_STRING AND TIFF_INCLUDE_DIR)
list(GET TIFF_INCLUDE_DIR 0 _TIFF_INCLUDE_DIR)
if(EXISTS "${_TIFF_INCLUDE_DIR}/tiffvers.h")
file(STRINGS "${_TIFF_INCLUDE_DIR}/tiffvers.h" tiff_version_str REGEX "^#define[\t ]+TIFFLIB_VERSION_STR[\t ]+\"LIBTIFF, Version .*")
string(REGEX REPLACE "^#define[\t ]+TIFFLIB_VERSION_STR[\t ]+\"LIBTIFF, Version +([^ \\n]*).*" "\\1" TIFF_VERSION_STRING "${tiff_version_str}")
unset(tiff_version_str)
endif()
unset(_TIFF_INCLUDE_DIR)
endif()
unset(_TIFF_INCLUDE_DIR)
set(HAVE_TIFF YES)
endif()
# --- libjpeg (optional) ---
if(WITH_JPEG AND NOT IOS)
if(WITH_JPEG)
if(BUILD_JPEG)
ocv_clear_vars(JPEG_FOUND)
else()
include(FindJPEG)
endif()
endif()
if(WITH_JPEG AND NOT JPEG_FOUND)
ocv_clear_vars(JPEG_LIBRARY JPEG_LIBRARIES JPEG_INCLUDE_DIR)
if(NOT JPEG_FOUND)
ocv_clear_vars(JPEG_LIBRARY JPEG_LIBRARIES JPEG_INCLUDE_DIR)
set(JPEG_LIBRARY libjpeg)
set(JPEG_LIBRARIES ${JPEG_LIBRARY})
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjpeg")
set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}")
endif()
set(JPEG_LIBRARY libjpeg)
set(JPEG_LIBRARIES ${JPEG_LIBRARY})
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjpeg")
set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}")
endif()
ocv_parse_header("${JPEG_INCLUDE_DIR}/jpeglib.h" JPEG_VERSION_LINES JPEG_LIB_VERSION)
ocv_parse_header("${JPEG_INCLUDE_DIR}/jpeglib.h" JPEG_VERSION_LINES JPEG_LIB_VERSION)
set(HAVE_JPEG YES)
endif()
# --- libwebp (optional) ---
@ -129,19 +132,21 @@ if(WITH_JASPER)
else()
include(FindJasper)
endif()
endif()
if(WITH_JASPER AND NOT JASPER_FOUND)
ocv_clear_vars(JASPER_LIBRARY JASPER_LIBRARIES JASPER_INCLUDE_DIR)
if(NOT JASPER_FOUND)
ocv_clear_vars(JASPER_LIBRARY JASPER_LIBRARIES JASPER_INCLUDE_DIR)
set(JASPER_LIBRARY libjasper)
set(JASPER_LIBRARIES ${JASPER_LIBRARY})
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjasper")
set(JASPER_INCLUDE_DIR "${${JASPER_LIBRARY}_SOURCE_DIR}")
endif()
set(JASPER_LIBRARY libjasper)
set(JASPER_LIBRARIES ${JASPER_LIBRARY})
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjasper")
set(JASPER_INCLUDE_DIR "${${JASPER_LIBRARY}_SOURCE_DIR}")
endif()
if(NOT JASPER_VERSION_STRING)
ocv_parse_header2(JASPER "${JASPER_INCLUDE_DIR}/jasper/jas_config.h" JAS_VERSION "")
set(HAVE_JASPER YES)
if(NOT JASPER_VERSION_STRING)
ocv_parse_header2(JASPER "${JASPER_INCLUDE_DIR}/jasper/jas_config.h" JAS_VERSION "")
endif()
endif()
# --- libpng (optional, should be searched after zlib) ---
@ -152,29 +157,29 @@ if(WITH_PNG AND NOT IOS)
include(FindPNG)
if(PNG_FOUND)
include(CheckIncludeFile)
check_include_file("${PNG_PNG_INCLUDE_DIR}/png.h" HAVE_PNG_H)
check_include_file("${PNG_PNG_INCLUDE_DIR}/libpng/png.h" HAVE_LIBPNG_PNG_H)
if(HAVE_PNG_H)
ocv_parse_header("${PNG_PNG_INCLUDE_DIR}/png.h" PNG_VERSION_LINES PNG_LIBPNG_VER_MAJOR PNG_LIBPNG_VER_MINOR PNG_LIBPNG_VER_RELEASE)
elseif(HAVE_LIBPNG_PNG_H)
if(HAVE_LIBPNG_PNG_H)
ocv_parse_header("${PNG_PNG_INCLUDE_DIR}/libpng/png.h" PNG_VERSION_LINES PNG_LIBPNG_VER_MAJOR PNG_LIBPNG_VER_MINOR PNG_LIBPNG_VER_RELEASE)
else()
ocv_parse_header("${PNG_PNG_INCLUDE_DIR}/png.h" PNG_VERSION_LINES PNG_LIBPNG_VER_MAJOR PNG_LIBPNG_VER_MINOR PNG_LIBPNG_VER_RELEASE)
endif()
endif()
endif()
endif()
if(WITH_PNG AND NOT PNG_FOUND)
ocv_clear_vars(PNG_LIBRARY PNG_LIBRARIES PNG_INCLUDE_DIR PNG_PNG_INCLUDE_DIR HAVE_PNG_H HAVE_LIBPNG_PNG_H PNG_DEFINITIONS)
if(NOT PNG_FOUND)
ocv_clear_vars(PNG_LIBRARY PNG_LIBRARIES PNG_INCLUDE_DIR PNG_PNG_INCLUDE_DIR HAVE_LIBPNG_PNG_H PNG_DEFINITIONS)
set(PNG_LIBRARY libpng)
set(PNG_LIBRARIES ${PNG_LIBRARY})
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libpng")
set(PNG_INCLUDE_DIR "${${PNG_LIBRARY}_SOURCE_DIR}")
set(PNG_DEFINITIONS "")
ocv_parse_header("${PNG_INCLUDE_DIR}/png.h" PNG_VERSION_LINES PNG_LIBPNG_VER_MAJOR PNG_LIBPNG_VER_MINOR PNG_LIBPNG_VER_RELEASE)
endif()
set(PNG_LIBRARY libpng)
set(PNG_LIBRARIES ${PNG_LIBRARY})
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libpng")
set(PNG_INCLUDE_DIR "${${PNG_LIBRARY}_SOURCE_DIR}")
set(PNG_DEFINITIONS "")
ocv_parse_header("${PNG_INCLUDE_DIR}/png.h" PNG_VERSION_LINES PNG_LIBPNG_VER_MAJOR PNG_LIBPNG_VER_MINOR PNG_LIBPNG_VER_RELEASE)
endif()
set(PNG_VERSION "${PNG_LIBPNG_VER_MAJOR}.${PNG_LIBPNG_VER_MINOR}.${PNG_LIBPNG_VER_RELEASE}")
set(HAVE_PNG YES)
set(PNG_VERSION "${PNG_LIBPNG_VER_MAJOR}.${PNG_LIBPNG_VER_MINOR}.${PNG_LIBPNG_VER_RELEASE}")
endif()
# --- OpenEXR (optional) ---
if(WITH_OPENEXR)
@ -183,14 +188,16 @@ if(WITH_OPENEXR)
else()
include("${OpenCV_SOURCE_DIR}/cmake/OpenCVFindOpenEXR.cmake")
endif()
endif()
if(WITH_OPENEXR AND NOT OPENEXR_FOUND)
ocv_clear_vars(OPENEXR_INCLUDE_PATHS OPENEXR_LIBRARIES OPENEXR_ILMIMF_LIBRARY OPENEXR_VERSION)
if(NOT OPENEXR_FOUND)
ocv_clear_vars(OPENEXR_INCLUDE_PATHS OPENEXR_LIBRARIES OPENEXR_ILMIMF_LIBRARY OPENEXR_VERSION)
set(OPENEXR_LIBRARIES IlmImf)
set(OPENEXR_ILMIMF_LIBRARY IlmImf)
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/openexr")
endif()
set(OPENEXR_LIBRARIES IlmImf)
set(OPENEXR_ILMIMF_LIBRARY IlmImf)
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/openexr")
set(HAVE_OPENEXR YES)
endif()
#cmake 2.8.2 bug - it fails to determine zlib version

@ -87,7 +87,14 @@ if(WITH_PVAPI)
set(_PVAPI_LIBRARY "${_PVAPI_LIBRARY}/${CMAKE_OPENCV_GCC_VERSION_MAJOR}.${CMAKE_OPENCV_GCC_VERSION_MINOR}")
endif()
set(PVAPI_LIBRARY "${_PVAPI_LIBRARY}/${CMAKE_STATIC_LIBRARY_PREFIX}PvAPI${CMAKE_STATIC_LIBRARY_SUFFIX}" CACHE PATH "The PvAPI library")
if(WIN32)
if(MINGW)
set(PVAPI_DEFINITIONS "-DPVDECL=__stdcall")
endif(MINGW)
set(PVAPI_LIBRARY "${_PVAPI_LIBRARY}/PvAPI.lib" CACHE PATH "The PvAPI library")
else(WIN32)
set(PVAPI_LIBRARY "${_PVAPI_LIBRARY}/${CMAKE_STATIC_LIBRARY_PREFIX}PvAPI${CMAKE_STATIC_LIBRARY_SUFFIX}" CACHE PATH "The PvAPI library")
endif(WIN32)
if(EXISTS "${PVAPI_LIBRARY}")
set(HAVE_PVAPI TRUE)
endif()
@ -257,3 +264,13 @@ if(WIN32)
list(APPEND HIGHGUI_LIBRARIES winmm)
endif()
endif(WIN32)
# --- Apple AV Foundation ---
if(WITH_AVFOUNDATION)
set(HAVE_AVFOUNDATION YES)
endif()
# --- QuickTime ---
if(WITH_QUICKTIME)
set(HAVE_QUICKTIME YES)
endif()

@ -1,13 +1,3 @@
# ----------------------------------------------------------------------------
# Variables for cvconfig.h.cmake
# ----------------------------------------------------------------------------
set(PACKAGE "opencv")
set(PACKAGE_BUGREPORT "opencvlibrary-devel@lists.sourceforge.net")
set(PACKAGE_NAME "opencv")
set(PACKAGE_STRING "${PACKAGE} ${OPENCV_VERSION}")
set(PACKAGE_TARNAME "${PACKAGE}")
set(PACKAGE_VERSION "${OPENCV_VERSION}")
# platform-specific config file
configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/cvconfig.h.cmake" "${OPENCV_CONFIG_FILE_INCLUDE_DIR}/cvconfig.h")

@ -12,7 +12,6 @@ set(prefix "${CMAKE_INSTALL_PREFIX}")
set(exec_prefix "\${prefix}")
set(libdir "") #TODO: need link paths for OpenCV_EXTRA_COMPONENTS
set(includedir "\${prefix}/${OPENCV_INCLUDE_INSTALL_PATH}")
set(VERSION ${OPENCV_VERSION})
if(CMAKE_BUILD_TYPE MATCHES "Release")
set(ocv_optkind OPT)

@ -511,7 +511,8 @@ macro(ocv_create_module)
)
endif()
if(BUILD_SHARED_LIBS)
if((NOT DEFINED OPENCV_MODULE_TYPE AND BUILD_SHARED_LIBS)
OR (DEFINED OPENCV_MODULE_TYPE AND OPENCV_MODULE_TYPE STREQUAL SHARED))
if(MSVC)
set_target_properties(${the_module} PROPERTIES DEFINE_SYMBOL CVAPI_EXPORTS)
else()

@ -0,0 +1,6 @@
#include <wrl/client.h>
int main(int, char**)
{
return 0;
}

@ -1,20 +1,20 @@
/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP
systems. This function is required for `alloca.c' support on those systems.
*/
#cmakedefine CRAY_STACKSEG_END
/* OpenCV compiled as static or dynamic libs */
#cmakedefine BUILD_SHARED_LIBS
/* Define to 1 if using `alloca.c'. */
#cmakedefine C_ALLOCA
/* Compile for 'real' NVIDIA GPU architectures */
#define CUDA_ARCH_BIN "${OPENCV_CUDA_ARCH_BIN}"
/* Define to 1 if you have `alloca', as a function or macro. */
#cmakedefine HAVE_ALLOCA 1
/* Create PTX or BIN for 1.0 compute capability */
#cmakedefine CUDA_ARCH_BIN_OR_PTX_10
/* Define to 1 if you have <alloca.h> and it should be used (not on Ultrix).
*/
#cmakedefine HAVE_ALLOCA_H 1
/* NVIDIA GPU features are used */
#define CUDA_ARCH_FEATURES "${OPENCV_CUDA_ARCH_FEATURES}"
/* Video for Windows support */
#cmakedefine HAVE_VFW
/* Compile for 'virtual' NVIDIA PTX architectures */
#define CUDA_ARCH_PTX "${OPENCV_CUDA_ARCH_PTX}"
/* AVFoundation video libraries */
#cmakedefine HAVE_AVFOUNDATION
/* V4L capturing support */
#cmakedefine HAVE_CAMV4L
@ -22,15 +22,33 @@
/* V4L2 capturing support */
#cmakedefine HAVE_CAMV4L2
/* V4L2 capturing support in videoio.h */
#cmakedefine HAVE_VIDEOIO
/* V4L/V4L2 capturing support via libv4l */
#cmakedefine HAVE_LIBV4L
/* Carbon windowing environment */
#cmakedefine HAVE_CARBON
/* AMD's Basic Linear Algebra Subprograms Library*/
#cmakedefine HAVE_CLAMDBLAS
/* AMD's OpenCL Fast Fourier Transform Library*/
#cmakedefine HAVE_CLAMDFFT
/* Clp support */
#cmakedefine HAVE_CLP
/* Cocoa API */
#cmakedefine HAVE_COCOA
/* C= */
#cmakedefine HAVE_CSTRIPES
/* NVidia Cuda Basic Linear Algebra Subprograms (BLAS) API*/
#cmakedefine HAVE_CUBLAS
/* NVidia Cuda Runtime API*/
#cmakedefine HAVE_CUDA
/* NVidia Cuda Fast Fourier Transform (FFT) API*/
#cmakedefine HAVE_CUFFT
/* IEEE1394 capturing support */
#cmakedefine HAVE_DC1394
@ -40,194 +58,106 @@
/* IEEE1394 capturing support - libdc1394 v2.x */
#cmakedefine HAVE_DC1394_2
/* ffmpeg in Gentoo */
#cmakedefine HAVE_GENTOO_FFMPEG
/* DirectShow Video Capture library */
#cmakedefine HAVE_DSHOW
/* FFMpeg video library */
#cmakedefine HAVE_FFMPEG
/* Eigen Matrix & Linear Algebra Library */
#cmakedefine HAVE_EIGEN
/* FFMpeg version flag */
#cmakedefine NEW_FFMPEG
/* FFMpeg video library */
#cmakedefine HAVE_FFMPEG
/* ffmpeg's libswscale */
#cmakedefine HAVE_FFMPEG_SWSCALE
#cmakedefine HAVE_FFMPEG_SWSCALE
/* ffmpeg in Gentoo */
#cmakedefine HAVE_GENTOO_FFMPEG
/* GStreamer multimedia framework */
#cmakedefine HAVE_GSTREAMER
#cmakedefine HAVE_GSTREAMER
/* GTK+ 2.0 Thread support */
#cmakedefine HAVE_GTHREAD
/* Win32 UI */
#cmakedefine HAVE_WIN32UI
#cmakedefine HAVE_GTHREAD
/* GTK+ 2.x toolkit */
#cmakedefine HAVE_GTK
/* OpenEXR codec */
#cmakedefine HAVE_ILMIMF
#cmakedefine HAVE_GTK
/* Define to 1 if you have the <inttypes.h> header file. */
#cmakedefine HAVE_INTTYPES_H 1
#cmakedefine HAVE_INTTYPES_H 1
/* Intel Integrated Performance Primitives */
#cmakedefine HAVE_IPP
/* JPEG-2000 codec */
#cmakedefine HAVE_JASPER
#cmakedefine HAVE_JASPER
/* IJG JPEG codec */
#cmakedefine HAVE_JPEG
/* Define to 1 if you have the `dl' library (-ldl). */
#cmakedefine HAVE_LIBDL 1
/* Define to 1 if you have the `gomp' library (-lgomp). */
#cmakedefine HAVE_LIBGOMP 1
/* Define to 1 if you have the `m' library (-lm). */
#cmakedefine HAVE_LIBM 1
#cmakedefine HAVE_JPEG
/* libpng/png.h needs to be included */
#cmakedefine HAVE_LIBPNG_PNG_H
#cmakedefine HAVE_LIBPNG_PNG_H
/* Define to 1 if you have the `pthread' library (-lpthread). */
#cmakedefine HAVE_LIBPTHREAD 1
/* Define to 1 if you have the `lrint' function. */
#cmakedefine HAVE_LRINT 1
/* PNG codec */
#cmakedefine HAVE_PNG
/* Define to 1 if you have the `png_get_valid' function. */
#cmakedefine HAVE_PNG_GET_VALID 1
/* png.h needs to be included */
#cmakedefine HAVE_PNG_H
/* Define to 1 if you have the `png_set_tRNS_to_alpha' function. */
#cmakedefine HAVE_PNG_SET_TRNS_TO_ALPHA 1
/* QuickTime video libraries */
#cmakedefine HAVE_QUICKTIME
/* V4L/V4L2 capturing support via libv4l */
#cmakedefine HAVE_LIBV4L
/* AVFoundation video libraries */
#cmakedefine HAVE_AVFOUNDATION
/* Microsoft Media Foundation Capture library */
#cmakedefine HAVE_MSMF
/* TIFF codec */
#cmakedefine HAVE_TIFF
/* NVidia Video Decoding API*/
#cmakedefine HAVE_NVCUVID
/* Unicap video capture library */
#cmakedefine HAVE_UNICAP
/* OpenCL Support */
#cmakedefine HAVE_OPENCL
/* Define to 1 if you have the <unistd.h> header file. */
#cmakedefine HAVE_UNISTD_H 1
/* OpenEXR codec */
#cmakedefine HAVE_OPENEXR
/* Xine video library */
#cmakedefine HAVE_XINE
/* OpenGL support*/
#cmakedefine HAVE_OPENGL
/* OpenNI library */
#cmakedefine HAVE_OPENNI
/* LZ77 compression/decompression library (used for PNG) */
#cmakedefine HAVE_ZLIB
/* Intel Integrated Performance Primitives */
#cmakedefine HAVE_IPP
/* OpenCV compiled as static or dynamic libs */
#cmakedefine BUILD_SHARED_LIBS
/* Name of package */
#define PACKAGE "${PACKAGE}"
#cmakedefine HAVE_OPENNI
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT "${PACKAGE_BUGREPORT}"
/* Define to the full name of this package. */
#define PACKAGE_NAME "${PACKAGE_NAME}"
/* Define to the full name and version of this package. */
#define PACKAGE_STRING "${PACKAGE_STRING}"
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME "${PACKAGE_TARNAME}"
/* Define to the version of this package. */
#define PACKAGE_VERSION "${PACKAGE_VERSION}"
/* PNG codec */
#cmakedefine HAVE_PNG
/* If using the C implementation of alloca, define if you know the
direction of stack growth for your system; otherwise it will be
automatically deduced at runtime.
STACK_DIRECTION > 0 => grows toward higher addresses
STACK_DIRECTION < 0 => grows toward lower addresses
STACK_DIRECTION = 0 => direction of growth unknown */
#cmakedefine STACK_DIRECTION
/* Qt support */
#cmakedefine HAVE_QT
/* Version number of package */
#define VERSION "${PACKAGE_VERSION}"
/* Qt OpenGL support */
#cmakedefine HAVE_QT_OPENGL
/* Define to 1 if your processor stores words with the most significant byte
first (like Motorola and SPARC, unlike Intel and VAX). */
#cmakedefine WORDS_BIGENDIAN
/* QuickTime video libraries */
#cmakedefine HAVE_QUICKTIME
/* Intel Threading Building Blocks */
#cmakedefine HAVE_TBB
/* C= */
#cmakedefine HAVE_CSTRIPES
/* Eigen Matrix & Linear Algebra Library */
#cmakedefine HAVE_EIGEN
/* NVidia Cuda Runtime API*/
#cmakedefine HAVE_CUDA
/* NVidia Cuda Fast Fourier Transform (FFT) API*/
#cmakedefine HAVE_CUFFT
/* NVidia Cuda Basic Linear Algebra Subprograms (BLAS) API*/
#cmakedefine HAVE_CUBLAS
#cmakedefine HAVE_TBB
/* NVidia Video Decoding API*/
#cmakedefine HAVE_NVCUVID
/* Compile for 'real' NVIDIA GPU architectures */
#define CUDA_ARCH_BIN "${OPENCV_CUDA_ARCH_BIN}"
/* Compile for 'virtual' NVIDIA PTX architectures */
#define CUDA_ARCH_PTX "${OPENCV_CUDA_ARCH_PTX}"
/* NVIDIA GPU features are used */
#define CUDA_ARCH_FEATURES "${OPENCV_CUDA_ARCH_FEATURES}"
/* Create PTX or BIN for 1.0 compute capability */
#cmakedefine CUDA_ARCH_BIN_OR_PTX_10
/* TIFF codec */
#cmakedefine HAVE_TIFF
/* OpenCL Support */
#cmakedefine HAVE_OPENCL
/* Unicap video capture library */
#cmakedefine HAVE_UNICAP
/* AMD's OpenCL Fast Fourier Transform Library*/
#cmakedefine HAVE_CLAMDFFT
/* Video for Windows support */
#cmakedefine HAVE_VFW
/* AMD's Basic Linear Algebra Subprograms Library*/
#cmakedefine HAVE_CLAMDBLAS
/* V4L2 capturing support in videoio.h */
#cmakedefine HAVE_VIDEOIO
/* DirectShow Video Capture library */
#cmakedefine HAVE_DSHOW
/* Win32 UI */
#cmakedefine HAVE_WIN32UI
/* Microsoft Media Foundation Capture library */
#cmakedefine HAVE_MSMF
/* Windows Runtime support */
#cmakedefine HAVE_WINRT
/* XIMEA camera support */
#cmakedefine HAVE_XIMEA
/* OpenGL support*/
#cmakedefine HAVE_OPENGL
/* Clp support */
#cmakedefine HAVE_CLP
/* Xine video library */
#cmakedefine HAVE_XINE
/* Qt support */
#cmakedefine HAVE_QT
/* Define to 1 if your processor stores words with the most significant byte
first (like Motorola and SPARC, unlike Intel and VAX). */
#cmakedefine WORDS_BIGENDIAN
/* Qt OpenGL support */
#cmakedefine HAVE_QT_OPENGL

@ -8,6 +8,6 @@ includedir_new=@includedir@
Name: OpenCV
Description: Open Source Computer Vision Library
Version: @VERSION@
Version: @OPENCV_VERSION@
Libs: @OpenCV_LIB_COMPONENTS@
Cflags: -I${includedir_old} -I${includedir_new}

@ -312,9 +312,13 @@ First we set an enviroment variable to make easier our work. This will hold the
::
setx -m OPENCV_DIR D:\OpenCV\Build\x86\vc10
Here the directory is where you have your OpenCV binaries (*extracted* or *built*). You can have different platform (e.g. x64 instead of x86) or compiler type, so substitute appropriate value. Inside this you should have folders like *bin* and *include*. The -m should be added if you wish to make the settings computer wise, instead of user wise.
setx -m OPENCV_DIR D:\OpenCV\Build\x86\vc10 (suggested for Visual Studio 2010 - 32 bit Windows)
setx -m OPENCV_DIR D:\OpenCV\Build\x64\vc10 (suggested for Visual Studio 2010 - 64 bit Windows)
setx -m OPENCV_DIR D:\OpenCV\Build\x86\vc11 (suggested for Visual Studio 2012 - 32 bit Windows)
setx -m OPENCV_DIR D:\OpenCV\Build\x64\vc11 (suggested for Visual Studio 2012 - 64 bit Windows)
Here the directory is where you have your OpenCV binaries (*extracted* or *built*). You can have different platform (e.g. x64 instead of x86) or compiler type, so substitute appropriate value. Inside this you should have two folders called *lib* and *bin*. The -m should be added if you wish to make the settings computer wise, instead of user wise.
If you built static libraries then you are done. Otherwise, you need to add the *bin* folders path to the systems path. This is cause you will use the OpenCV library in form of *\"Dynamic-link libraries\"* (also known as **DLL**). Inside these are stored all the algorithms and information the OpenCV library contains. The operating system will load them only on demand, during runtime. However, to do this he needs to know where they are. The systems **PATH** contains a list of folders where DLLs can be found. Add the OpenCV library path to this and the OS will know where to look if he ever needs the OpenCV binaries. Otherwise, you will need to copy the used DLLs right beside the applications executable file (*exe*) for the OS to find it, which is highly unpleasent if you work on many projects. To do this start up again the |PathEditor|_ and add the following new entry (right click in the application to bring up the menu):

@ -108,6 +108,12 @@ Here is an overview of the abstract Retina interface, allocate one instance with
cv::Ptr<Retina> createRetina (Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const double reductionFactor=1.0, const double samplingStrenght=10.0);
}} // cv and bioinspired namespaces end
.. Sample code::
* : An example on retina tone mapping can be found at opencv_source_code/samples/cpp/OpenEXRimages_HighDynamicRange_Retina_toneMapping.cpp
* : An example on retina tone mapping on video input can be found at opencv_source_code/samples/cpp/OpenEXRimages_HighDynamicRange_Retina_toneMapping.cpp
* : A complete example illustrating the retina interface can be found at opencv_source_code/samples/cpp/retinaDemo.cpp
Description
+++++++++++

@ -109,7 +109,16 @@ The functions below use the above model to do the following:
* Estimate the relative position and orientation of the stereo camera "heads" and compute the *rectification* transformation that makes the camera optical axes parallel.
.. Sample code::
* : A calibration sample for 3 cameras in horizontal position can be found at opencv_source_code/samples/cpp/3calibration.cpp
* : A calibration sample based on a sequence of images can be found at opencv_source_code/samples/cpp/calibration.cpp
* : A calibration sample in order to do 3D reconstruction can be found at opencv_source_code/samples/cpp/build3dmodel.cpp
* : A calibration sample of an artificially generated camera and chessboard patterns can be found at opencv_source_code/samples/cpp/calibration_artificial.cpp
* : A calibration example on stereo calibration can be found at opencv_source_code/samples/cpp/stereo_calib.cpp
* : A calibration example on stereo matching can be found at opencv_source_code/samples/cpp/stereo_match.cpp
* : PYTHON : A camera calibration sample can be found at opencv_source_code/samples/python2/calibrate.py
calibrateCamera
---------------
@ -577,7 +586,9 @@ Finds an object pose from 3D-2D point correspondences.
The function estimates the object pose given a set of object points, their corresponding image projections, as well as the camera matrix and the distortion coefficients.
.. Sample code::
* : An example of how to use solvePNP for planar augmented reality can be found at opencv_source_code/samples/python2/plane_ar.py
solvePnPRansac
------------------
@ -879,6 +890,9 @@ Homography matrix is determined up to a scale. Thus, it is normalized so that
:ocv:func:`warpPerspective`,
:ocv:func:`perspectiveTransform`
.. Sample code::
* : A example on calculating a homography for image matching can be found at opencv_source_code/samples/cpp/video_homography.cpp
estimateAffine3D
--------------------
@ -1168,6 +1182,9 @@ StereoBM
Class for computing stereo correspondence using the block matching algorithm, introduced and contributed to OpenCV by K. Konolige.
.. Sample code:
* : OCL : An example for using the stereoBM matching algorithm can be found at opencv_source_code/samples/ocl/stereo_match.cpp
createStereoBM
------------------
@ -1197,8 +1214,11 @@ The class implements the modified H. Hirschmuller algorithm [HH08]_ that differs
* Mutual information cost function is not implemented. Instead, a simpler Birchfield-Tomasi sub-pixel metric from [BT98]_ is used. Though, the color images are supported as well.
* Some pre- and post- processing steps from K. Konolige algorithm ``StereoBM`` are included, for example: pre-filtering (``StereoBM::PREFILTER_XSOBEL`` type) and post-filtering (uniqueness check, quadratic interpolation and speckle filtering).
* Some pre- and post- processing steps from K. Konolige algorithm :ocv:funcx:`StereoBM::operator()` are included, for example: pre-filtering (``CV_STEREO_BM_XSOBEL`` type) and post-filtering (uniqueness check, quadratic interpolation and speckle filtering).
.. Sample code::
* : PYTHON : An example illustrating the use of the StereoSGBM matching algorithm can be found at opencv_source_code/samples/python2/stereo_match.py
createStereoSGBM
--------------------------

@ -3,6 +3,12 @@ FaceRecognizer
.. highlight:: cpp
.. Sample code::
* : An example using the FaceRecognizer class can be found at opencv_source_code/samples/cpp/facerec_demo.cpp
* : PYTHON : An example using the FaceRecognizer class can be found at opencv_source_code/samples/python2/facerec_demo.py
FaceRecognizer
--------------

@ -9,6 +9,10 @@ FAB-MAP is an approach to appearance-based place recognition. FAB-MAP compares i
openFABMAP requires training data (e.g. a collection of images from a similar but not identical environment) to construct a visual vocabulary for the visual bag-of-words model, along with a Chow-Liu tree representation of feature likelihood and for use in the Sampled new place method (see below).
.. Sample code::
* : An example using the openFABMAP package can be found at opencv_source_code/samples/cpp/fabmap_sample.cpp
of2::FabMap
--------------------

@ -1,5 +1,5 @@
#include "opencv2/contrib.hpp"
#include "cvconfig.h"
#if defined(WIN32) || defined(_WIN32)
#include <windows.h>
@ -16,10 +16,22 @@ namespace cv
list.clear();
String path_f = path + "/" + exten;
#ifdef WIN32
WIN32_FIND_DATA FindFileData;
HANDLE hFind;
hFind = FindFirstFile((LPCSTR)path_f.c_str(), &FindFileData);
#ifdef HAVE_WINRT
WIN32_FIND_DATAW FindFileData;
#else
WIN32_FIND_DATAA FindFileData;
#endif
HANDLE hFind;
#ifdef HAVE_WINRT
size_t size = mbstowcs(NULL, path_f.c_str(), path_f.size());
Ptr<wchar_t> wpath = new wchar_t[size+1];
wpath[size] = 0;
mbstowcs(wpath, path_f.c_str(), path_f.size());
hFind = FindFirstFileExW(wpath, FindExInfoStandard, &FindFileData, FindExSearchNameMatch, NULL, 0);
#else
hFind = FindFirstFileA((LPCSTR)path_f.c_str(), &FindFileData);
#endif
if (hFind == INVALID_HANDLE_VALUE)
{
return list;
@ -34,13 +46,26 @@ namespace cv
FindFileData.dwFileAttributes == FILE_ATTRIBUTE_SYSTEM ||
FindFileData.dwFileAttributes == FILE_ATTRIBUTE_READONLY)
{
cv::Ptr<char> fname;
#ifdef HAVE_WINRT
size_t asize = wcstombs(NULL, FindFileData.cFileName, 0);
fname = new char[asize+1];
fname[asize] = 0;
wcstombs(fname, FindFileData.cFileName, asize);
#else
fname = FindFileData.cFileName;
#endif
if (addPath)
list.push_back(path + "/" + FindFileData.cFileName);
list.push_back(path + "/" + std::string(fname));
else
list.push_back(FindFileData.cFileName);
list.push_back(std::string(fname));
}
}
while(FindNextFile(hFind, &FindFileData));
#ifdef HAVE_WINRT
while(FindNextFileW(hFind, &FindFileData));
#else
while(FindNextFileA(hFind, &FindFileData));
#endif
FindClose(hFind);
}
#else
@ -75,10 +100,22 @@ namespace cv
String path_f = path + "/" + exten;
list.clear();
#ifdef WIN32
WIN32_FIND_DATA FindFileData;
#ifdef HAVE_WINRT
WIN32_FIND_DATAW FindFileData;
#else
WIN32_FIND_DATAA FindFileData;
#endif
HANDLE hFind;
hFind = FindFirstFile((LPCSTR)path_f.c_str(), &FindFileData);
#ifdef HAVE_WINRT
size_t size = mbstowcs(NULL, path_f.c_str(), path_f.size());
Ptr<wchar_t> wpath = new wchar_t[size+1];
wpath[size] = 0;
mbstowcs(wpath, path_f.c_str(), path_f.size());
hFind = FindFirstFileExW(wpath, FindExInfoStandard, &FindFileData, FindExSearchNameMatch, NULL, 0);
#else
hFind = FindFirstFileA((LPCSTR)path_f.c_str(), &FindFileData);
#endif
if (hFind == INVALID_HANDLE_VALUE)
{
return list;
@ -87,17 +124,37 @@ namespace cv
{
do
{
#ifdef HAVE_WINRT
if (FindFileData.dwFileAttributes == FILE_ATTRIBUTE_DIRECTORY &&
wcscmp(FindFileData.cFileName, L".") != 0 &&
wcscmp(FindFileData.cFileName, L"..") != 0)
#else
if (FindFileData.dwFileAttributes == FILE_ATTRIBUTE_DIRECTORY &&
strcmp(FindFileData.cFileName, ".") != 0 &&
strcmp(FindFileData.cFileName, "..") != 0)
#endif
{
cv::Ptr<char> fname;
#ifdef HAVE_WINRT
size_t asize = wcstombs(NULL, FindFileData.cFileName, 0);
fname = new char[asize+1];
fname[asize] = 0;
wcstombs(fname, FindFileData.cFileName, asize);
#else
fname = FindFileData.cFileName;
#endif
if (addPath)
list.push_back(path + "/" + FindFileData.cFileName);
list.push_back(path + "/" + std::string(fname));
else
list.push_back(FindFileData.cFileName);
list.push_back(std::string(fname));
}
}
while(FindNextFile(hFind, &FindFileData));
#ifdef HAVE_WINRT
while(FindNextFileW(hFind, &FindFileData));
#else
while(FindNextFileA(hFind, &FindFileData));
#endif
FindClose(hFind);
}

@ -2,6 +2,10 @@ set(the_description "The Core Functionality")
ocv_add_module(core ${ZLIB_LIBRARIES})
ocv_module_include_directories(${ZLIB_INCLUDE_DIR})
if (HAVE_WINRT)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /ZW /GS /Gm- /AI\"${WINDOWS_SDK_PATH}/References/CommonConfiguration/Neutral\" /AI\"${VISUAL_STUDIO_PATH}/vcpackages\"")
endif()
if(HAVE_CUDA)
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef)
endif()

@ -884,6 +884,9 @@ Finally, there are STL-style iterators that are smart enough to skip gaps betwee
The matrix iterators are random-access iterators, so they can be passed to any STL algorithm, including ``std::sort()`` .
.. Sample code::
* : An example demonstrating the serial out capabilities of cv::Mat can be found at opencv_source_code/samples/cpp/cout_mat.cpp
.. _MatrixExpressions:

@ -64,6 +64,12 @@ Basically, you can use only the core of the function, set the number of
attempts to 1, initialize labels each time using a custom algorithm, pass them with the
( ``flags`` = ``KMEANS_USE_INITIAL_LABELS`` ) flag, and then choose the best (most-compact) clustering.
.. Sample code::
* : An example on K-means clustering can be found at opencv_source_code/samples/cpp/kmeans.cpp
* : PYTHON : An example on K-means clustering can be found at opencv_source_code/samples/python2/kmeans.py
partition
-------------
Splits an element set into equivalency classes.

@ -15,4 +15,5 @@ core. The Core Functionality
old_xml_yaml_persistence
clustering
utility_and_system_functions_and_macros
opengl_interop

@ -26,6 +26,10 @@ If a drawn figure is partially or completely outside the image, the drawing func
.. note:: The functions do not support alpha-transparency when the target image is 4-channel. In this case, the ``color[3]`` is simply copied to the repainted pixels. Thus, if you want to paint semi-transparent shapes, you can paint them in a separate buffer and then blend it with the main image.
.. Sample code::
* : An example on using variate drawing functions like line, rectangle, ... can be found at opencv_source_code/samples/cpp/drawing.cpp
circle
----------
Draws a circle.
@ -555,6 +559,12 @@ The function draws contour outlines in the image if
waitKey(0);
}
.. Sample code::
* : An example using the drawContour functionality can be found at opencv_source_code/samples/cpp/contours2.cpp
* : An example using drawContours to clean up a background segmentation result at opencv_source_code/samples/cpp/segment_objects.cpp
* : PYTHON : An example using the drawContour functionality can be found at opencv_source/samples/python2/contours.py
putText
@ -592,4 +602,3 @@ The function ``putText`` renders the specified text string in the image.
Symbols that cannot be rendered using the specified font are
replaced by question marks. See
:ocv:func:`getTextSize` for a text rendering code example.

@ -0,0 +1,543 @@
OpenGL interoperability
=======================
.. highlight:: cpp
General Information
-------------------
This section describes OpenGL interoperability.
To enable OpenGL support, configure OpenCV using ``CMake`` with ``WITH_OPENGL=ON`` .
Currently OpenGL is supported only with WIN32, GTK and Qt backends on Windows and Linux (MacOS and Android are not supported).
For GTK backend ``gtkglext-1.0`` library is required.
To use OpenGL functionality you should first create OpenGL context (window or frame buffer).
You can do this with :ocv:func:`namedWindow` function or with other OpenGL toolkit (GLUT, for example).
ogl::Buffer
-----------
Smart pointer for OpenGL buffer object with reference counting.
.. ocv:class:: ogl::Buffer
Buffer Objects are OpenGL objects that store an array of unformatted memory allocated by the OpenGL context.
These can be used to store vertex data, pixel data retrieved from images or the framebuffer, and a variety of other things.
``ogl::Buffer`` has interface similar with :ocv:class:`Mat` interface and represents 2D array memory.
``ogl::Buffer`` supports memory transfers between host and device and also can be mapped to CUDA memory.
ogl::Buffer::Target
-------------------
The target defines how you intend to use the buffer object.
.. ocv:enum:: ogl::Buffer::Target
.. ocv:emember:: ARRAY_BUFFER
The buffer will be used as a source for vertex data.
.. ocv:emember:: ELEMENT_ARRAY_BUFFER
The buffer will be used for indices (in ``glDrawElements`` or :ocv:func:`ogl::render`, for example).
.. ocv:emember:: PIXEL_PACK_BUFFER
The buffer will be used for reading from OpenGL textures.
.. ocv:emember:: PIXEL_UNPACK_BUFFER
The buffer will be used for writing to OpenGL textures.
ogl::Buffer::Buffer
-------------------
The constructors.
.. ocv:function:: ogl::Buffer::Buffer()
.. ocv:function:: ogl::Buffer::Buffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease = false)
.. ocv:function:: ogl::Buffer::Buffer(Size asize, int atype, unsigned int abufId, bool autoRelease = false)
.. ocv:function:: ogl::Buffer::Buffer(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false)
.. ocv:function:: ogl::Buffer::Buffer(Size asize, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false)
.. ocv:function:: ogl::Buffer::Buffer(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false)
:param arows: Number of rows in a 2D array.
:param acols: Number of columns in a 2D array.
:param asize: 2D array size.
:param atype: Array type ( ``CV_8UC1, ..., CV_64FC4`` ). See :ocv:class:`Mat` for details.
:param abufId: Buffer object name.
:param arr: Input array (host or device memory, it can be :ocv:class:`Mat` , :ocv:class:`gpu::GpuMat` or ``std::vector`` ).
:param target: Buffer usage. See :ocv:enum:`ogl::Buffer::Target` .
:param autoRelease: Auto release mode (if true, release will be called in object's destructor).
Creates empty ``ogl::Buffer`` object, creates ``ogl::Buffer`` object from existed buffer ( ``abufId`` parameter),
allocates memory for ``ogl::Buffer`` object or copies from host/device memory.
ogl::Buffer::create
-------------------
Allocates memory for ``ogl::Buffer`` object.
.. ocv:function:: void ogl::Buffer::create(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false)
.. ocv:function:: void ogl::Buffer::create(Size asize, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false)
:param arows: Number of rows in a 2D array.
:param acols: Number of columns in a 2D array.
:param asize: 2D array size.
:param atype: Array type ( ``CV_8UC1, ..., CV_64FC4`` ). See :ocv:class:`Mat` for details.
:param target: Buffer usage. See :ocv:enum:`ogl::Buffer::Target` .
:param autoRelease: Auto release mode (if true, release will be called in object's destructor).
ogl::Buffer::release
--------------------
Decrements the reference counter and destroys the buffer object if needed.
.. ocv:function:: void ogl::Buffer::release()
ogl::Buffer::setAutoRelease
---------------------------
Sets auto release mode.
.. ocv:function:: void ogl::Buffer::setAutoRelease(bool flag)
:param flag: Auto release mode (if true, release will be called in object's destructor).
The lifetime of the OpenGL object is tied to the lifetime of the context.
If OpenGL context was bound to a window it could be released at any time (user can close a window).
If object's destructor is called after destruction of the context it will cause an error.
Thus ``ogl::Buffer`` doesn't destroy OpenGL object in destructor by default (all OpenGL resources will be released with OpenGL context).
This function can force ``ogl::Buffer`` destructor to destroy OpenGL object.
ogl::Buffer::copyFrom
---------------------
Copies from host/device memory to OpenGL buffer.
.. ocv:function:: void ogl::Buffer::copyFrom(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false)
:param arr: Input array (host or device memory, it can be :ocv:class:`Mat` , :ocv:class:`gpu::GpuMat` or ``std::vector`` ).
:param target: Buffer usage. See :ocv:enum:`ogl::Buffer::Target` .
:param autoRelease: Auto release mode (if true, release will be called in object's destructor).
ogl::Buffer::copyTo
-------------------
Copies from OpenGL buffer to host/device memory or another OpenGL buffer object.
.. ocv:function:: void ogl::Buffer::copyTo(OutputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false) const
:param arr: Destination array (host or device memory, can be :ocv:class:`Mat` , :ocv:class:`gpu::GpuMat` , ``std::vector`` or ``ogl::Buffer`` ).
:param target: Buffer usage for destination buffer (if ``arr`` is OpenGL buffer).
:param autoRelease: Auto release mode for destination buffer (if ``arr`` is OpenGL buffer).
ogl::Buffer::clone
------------------
Creates a full copy of the buffer object and the underlying data.
.. ocv:function:: Buffer ogl::Buffer::clone(Target target = ARRAY_BUFFER, bool autoRelease = false) const
:param target: Buffer usage for destination buffer.
:param autoRelease: Auto release mode for destination buffer.
ogl::Buffer::bind
-----------------
Binds OpenGL buffer to the specified buffer binding point.
.. ocv:function:: void ogl::Buffer::bind(Target target) const
:param target: Binding point. See :ocv:enum:`ogl::Buffer::Target` .
ogl::Buffer::unbind
-------------------
Unbind any buffers from the specified binding point.
.. ocv:function:: static void ogl::Buffer::unbind(Target target)
:param target: Binding point. See :ocv:enum:`ogl::Buffer::Target` .
ogl::Buffer::mapHost
--------------------
Maps OpenGL buffer to host memory.
.. ocv:function:: Mat ogl::Buffer::mapHost(Access access)
:param access: Access policy, indicating whether it will be possible to read from, write to, or both read from and write to the buffer object's mapped data store. The symbolic constant must be ``ogl::Buffer::READ_ONLY`` , ``ogl::Buffer::WRITE_ONLY`` or ``ogl::Buffer::READ_WRITE`` .
``mapHost`` maps to the client's address space the entire data store of the buffer object.
The data can then be directly read and/or written relative to the returned pointer, depending on the specified ``access`` policy.
A mapped data store must be unmapped with :ocv:func:`ogl::Buffer::unmapHost` before its buffer object is used.
This operation can lead to memory transfers between host and device.
Only one buffer object can be mapped at a time.
ogl::Buffer::unmapHost
----------------------
Unmaps OpenGL buffer.
.. ocv:function:: void ogl::Buffer::unmapHost()
ogl::Buffer::mapDevice
----------------------
Maps OpenGL buffer to CUDA device memory.
.. ocv:function:: gpu::GpuMat ogl::Buffer::mapDevice()
This operatation doesn't copy data.
Several buffer objects can be mapped to CUDA memory at a time.
A mapped data store must be unmapped with :ocv:func:`ogl::Buffer::unmapDevice` before its buffer object is used.
ogl::Buffer::unmapDevice
------------------------
Unmaps OpenGL buffer.
.. ocv:function:: void ogl::Buffer::unmapDevice()
ogl::Texture2D
--------------
Smart pointer for OpenGL 2D texture memory with reference counting.
.. ocv:class:: ogl::Texture2D
ogl::Texture2D::Format
----------------------
An Image Format describes the way that the images in Textures store their data.
.. ocv:enum:: ogl::Texture2D::Format
.. ocv:emember:: NONE
.. ocv:emember:: DEPTH_COMPONENT
.. ocv:emember:: RGB
.. ocv:emember:: RGBA
ogl::Texture2D::Texture2D
-------------------------
The constructors.
.. ocv:function:: ogl::Texture2D::Texture2D()
.. ocv:function:: ogl::Texture2D::Texture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease = false)
.. ocv:function:: ogl::Texture2D::Texture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease = false)
.. ocv:function:: ogl::Texture2D::Texture2D(int arows, int acols, Format aformat, bool autoRelease = false)
.. ocv:function:: ogl::Texture2D::Texture2D(Size asize, Format aformat, bool autoRelease = false)
.. ocv:function:: ogl::Texture2D::Texture2D(InputArray arr, bool autoRelease = false)
:param arows: Number of rows.
:param acols: Number of columns.
:param asize: 2D array size.
:param aformat: Image format. See :ocv:enum:`ogl::Texture2D::Format` .
:param arr: Input array (host or device memory, it can be :ocv:class:`Mat` , :ocv:class:`gpu::GpuMat` or :ocv:class:`ogl::Buffer` ).
:param autoRelease: Auto release mode (if true, release will be called in object's destructor).
Creates empty ``ogl::Texture2D`` object, allocates memory for ``ogl::Texture2D`` object or copies from host/device memory.
ogl::Texture2D::create
----------------------
Allocates memory for ``ogl::Texture2D`` object.
.. ocv:function:: void ogl::Texture2D::create(int arows, int acols, Format aformat, bool autoRelease = false)
.. ocv:function:: void ogl::Texture2D::create(Size asize, Format aformat, bool autoRelease = false)
:param arows: Number of rows.
:param acols: Number of columns.
:param asize: 2D array size.
:param aformat: Image format. See :ocv:enum:`ogl::Texture2D::Format` .
:param autoRelease: Auto release mode (if true, release will be called in object's destructor).
ogl::Texture2D::release
-----------------------
Decrements the reference counter and destroys the texture object if needed.
.. ocv:function:: void ogl::Texture2D::release()
ogl::Texture2D::setAutoRelease
------------------------------
Sets auto release mode.
.. ocv:function:: void ogl::Texture2D::setAutoRelease(bool flag)
:param flag: Auto release mode (if true, release will be called in object's destructor).
The lifetime of the OpenGL object is tied to the lifetime of the context.
If OpenGL context was bound to a window it could be released at any time (user can close a window).
If object's destructor is called after destruction of the context it will cause an error.
Thus ``ogl::Texture2D`` doesn't destroy OpenGL object in destructor by default (all OpenGL resources will be released with OpenGL context).
This function can force ``ogl::Texture2D`` destructor to destroy OpenGL object.
ogl::Texture2D::copyFrom
------------------------
Copies from host/device memory to OpenGL texture.
.. ocv:function:: void ogl::Texture2D::copyFrom(InputArray arr, bool autoRelease = false)
:param arr: Input array (host or device memory, it can be :ocv:class:`Mat` , :ocv:class:`gpu::GpuMat` or :ocv:class:`ogl::Buffer` ).
:param autoRelease: Auto release mode (if true, release will be called in object's destructor).
ogl::Texture2D::copyTo
----------------------
Copies from OpenGL texture to host/device memory or another OpenGL texture object.
.. ocv:function:: void ogl::Texture2D::copyTo(OutputArray arr, int ddepth = CV_32F, bool autoRelease = false) const
:param arr: Destination array (host or device memory, can be :ocv:class:`Mat` , :ocv:class:`gpu::GpuMat` , :ocv:class:`ogl::Buffer` or ``ogl::Texture2D`` ).
:param ddepth: Destination depth.
:param autoRelease: Auto release mode for destination buffer (if ``arr`` is OpenGL buffer or texture).
ogl::Texture2D::bind
--------------------
Binds texture to current active texture unit for ``GL_TEXTURE_2D`` target.
.. ocv:function:: void ogl::Texture2D::bind() const
ogl::Arrays
-----------
Wrapper for OpenGL Client-Side Vertex arrays.
.. ocv:class:: ogl::Arrays
``ogl::Arrays`` stores vertex data in :ocv:class:`ogl::Buffer` objects.
ogl::Arrays::setVertexArray
---------------------------
Sets an array of vertex coordinates.
.. ocv:function:: void ogl::Arrays::setVertexArray(InputArray vertex)
:param vertex: array with vertex coordinates, can be both host and device memory.
ogl::Arrays::resetVertexArray
-----------------------------
Resets vertex coordinates.
.. ocv:function:: void ogl::Arrays::resetVertexArray()
ogl::Arrays::setColorArray
--------------------------
Sets an array of vertex colors.
.. ocv:function:: void ogl::Arrays::setColorArray(InputArray color)
:param color: array with vertex colors, can be both host and device memory.
ogl::Arrays::resetColorArray
----------------------------
Resets vertex colors.
.. ocv:function:: void ogl::Arrays::resetColorArray()
ogl::Arrays::setNormalArray
---------------------------
Sets an array of vertex normals.
.. ocv:function:: void ogl::Arrays::setNormalArray(InputArray normal)
:param normal: array with vertex normals, can be both host and device memory.
ogl::Arrays::resetNormalArray
-----------------------------
Resets vertex normals.
.. ocv:function:: void ogl::Arrays::resetNormalArray()
ogl::Arrays::setTexCoordArray
-----------------------------
Sets an array of vertex texture coordinates.
.. ocv:function:: void ogl::Arrays::setTexCoordArray(InputArray texCoord)
:param texCoord: array with vertex texture coordinates, can be both host and device memory.
ogl::Arrays::resetTexCoordArray
-------------------------------
Resets vertex texture coordinates.
.. ocv:function:: void ogl::Arrays::resetTexCoordArray()
ogl::Arrays::release
--------------------
Releases all inner buffers.
.. ocv:function:: void ogl::Arrays::release()
ogl::Arrays::setAutoRelease
---------------------------
Sets auto release mode all inner buffers.
.. ocv:function:: void ogl::Arrays::setAutoRelease(bool flag)
:param flag: Auto release mode.
ogl::Arrays::bind
-----------------
Binds all vertex arrays.
.. ocv:function:: void ogl::Arrays::bind() const
ogl::Arrays::size
-----------------
Returns the vertex count.
.. ocv:function:: int ogl::Arrays::size() const
ogl::render
-----------
Render OpenGL texture or primitives.
.. ocv:function:: void ogl::render(const Texture2D& tex, Rect_<double> wndRect = Rect_<double>(0.0, 0.0, 1.0, 1.0), Rect_<double> texRect = Rect_<double>(0.0, 0.0, 1.0, 1.0))
.. ocv:function:: void ogl::render(const Arrays& arr, int mode = POINTS, Scalar color = Scalar::all(255))
.. ocv:function:: void ogl::render(const Arrays& arr, InputArray indices, int mode = POINTS, Scalar color = Scalar::all(255))
:param tex: Texture to draw.
:param wndRect: Region of window, where to draw a texture (normalized coordinates).
:param texRect: Region of texture to draw (normalized coordinates).
:param arr: Array of privitives vertices.
:param indices: Array of vertices indices (host or device memory).
:param mode: Render mode. Available options:
* **POINTS**
* **LINES**
* **LINE_LOOP**
* **LINE_STRIP**
* **TRIANGLES**
* **TRIANGLE_STRIP**
* **TRIANGLE_FAN**
* **QUADS**
* **QUAD_STRIP**
* **POLYGON**
:param color: Color for all vertices. Will be used if ``arr`` doesn't contain color array.
gpu::setGlDevice
----------------
Sets a CUDA device and initializes it for the current thread with OpenGL interoperability.
.. ocv:function:: void gpu::setGlDevice( int device = 0 )
:param device: System index of a GPU device starting with 0.
This function should be explicitly called after OpenGL context creation and before any CUDA calls.

@ -512,7 +512,7 @@ Performs the per-element comparison of two arrays or an array and scalar value.
:param value: scalar value.
:param dst: output array that has the same size as the input arrays and type= ``CV_8UC1`` .
:param dst: output array that has the same size and type as the input arrays.
:param cmpop: a flag, that specifies correspondence between the arrays:
@ -971,6 +971,12 @@ All of the above improvements have been implemented in :ocv:func:`matchTemplate`
.. seealso:: :ocv:func:`dct` , :ocv:func:`getOptimalDFTSize` , :ocv:func:`mulSpectrums`, :ocv:func:`filter2D` , :ocv:func:`matchTemplate` , :ocv:func:`flip` , :ocv:func:`cartToPolar` , :ocv:func:`magnitude` , :ocv:func:`phase`
.. Sample code::
* : An example using the discrete fourier transform can be found at opencv_source_code/samples/cpp/dft.cpp
* : PYTHON : An example using the dft functionality to perform Wiener deconvolution can be found at opencv_source/samples/python2/deconvolution.py
* : PYTHON : An example rearranging the quadrants of a Fourier image can be found at opencv_source/samples/python2/dft.py
divide
@ -2161,7 +2167,9 @@ The sample below is the function that takes two matrices. The first function sto
:ocv:func:`dft`,
:ocv:func:`dct`
.. Sample code::
* : An example using PCA for dimensionality reduction while maintaining an amount of variance can be found at opencv_source_code/samples/cpp/pca.cpp
PCA::PCA
--------

@ -91,6 +91,10 @@ Several things can be noted by looking at the sample code and the output:
*
In YAML (but not XML), mappings and sequences can be written in a compact Python-like inline form. In the sample above matrix elements, as well as each feature, including its lbp value, is stored in such inline form. To store a mapping/sequence in a compact form, put ":" after the opening character, e.g. use **"{:"** instead of **"{"** and **"[:"** instead of **"["**. When the data is written to XML, those extra ":" are ignored.
.. Sample code::
* : A complete example using the FileStorage interface can be found at opencv_source_code/samples/cpp/filestorage.cpp
Reading data from a file storage.
---------------------------------

@ -149,7 +149,12 @@
# endif
#endif
#ifdef __ARM_NEON__
#if (defined WIN32 || defined _WIN32) && defined(_M_ARM)
# include <Intrin.h>
# include "arm_neon.h"
# define CV_NEON 1
# define CPU_HAS_NEON_FEATURE (true)
#elif defined(__ARM_NEON__)
# include <arm_neon.h>
# define CV_NEON 1
#endif
@ -364,7 +369,7 @@ CV_INLINE int cvRound( double value )
return t;
#elif defined _MSC_VER && defined _M_ARM && defined HAVE_TEGRA_OPTIMIZATION
TEGRA_ROUND(value);
#elif defined HAVE_LRINT || defined CV_ICC || defined __GNUC__
#elif defined CV_ICC || defined __GNUC__
# ifdef HAVE_TEGRA_OPTIMIZATION
TEGRA_ROUND(value);
# else

@ -94,9 +94,20 @@ void fastFree(void* ptr)
#define STAT(stmt)
#ifdef WIN32
#if (_WIN32_WINNT >= 0x0602)
#include <synchapi.h>
#endif
struct CriticalSection
{
CriticalSection() { InitializeCriticalSection(&cs); }
CriticalSection()
{
#if (_WIN32_WINNT >= 0x0600)
InitializeCriticalSectionEx(&cs, 1000, 0);
#else
InitializeCriticalSection(&cs);
#endif
}
~CriticalSection() { DeleteCriticalSection(&cs); }
void lock() { EnterCriticalSection(&cs); }
void unlock() { LeaveCriticalSection(&cs); }

@ -886,12 +886,14 @@ void ellipse2Poly( Point center, Size axes, int angle,
Point pt;
pt.x = cvRound( cx + x * alpha - y * beta );
pt.y = cvRound( cy + x * beta + y * alpha );
if( pt != prevPt )
if( pt != prevPt ){
pts.push_back(pt);
prevPt = pt;
}
}
// If there are no points, it's a zero-size polygon
if( pts.size() < 2) {
if( pts.size() == 1) {
pts.assign(2,center);
}
}

@ -56,16 +56,39 @@ namespace
struct DIR
{
#ifdef HAVE_WINRT
WIN32_FIND_DATAW data;
#else
WIN32_FIND_DATA data;
#endif
HANDLE handle;
dirent ent;
#ifdef HAVE_WINRT
DIR() {};
~DIR()
{
if (ent.d_name)
delete[] ent.d_name;
}
#endif
};
DIR* opendir(const char* path)
{
DIR* dir = new DIR;
dir->ent.d_name = 0;
dir->handle = ::FindFirstFileA((cv::String(path) + "\\*").c_str(), &dir->data);
#ifdef HAVE_WINRT
cv::String full_path = cv::String(path) + "\\*";
size_t size = mbstowcs(NULL, full_path.c_str(), full_path.size());
cv::Ptr<wchar_t> wfull_path = new wchar_t[size+1];
wfull_path[size] = 0;
mbstowcs(wfull_path, full_path.c_str(), full_path.size());
dir->handle = ::FindFirstFileExW(wfull_path, FindExInfoStandard,
&dir->data, FindExSearchNameMatch, NULL, 0);
#else
dir->handle = ::FindFirstFileExA((cv::String(path) + "\\*").c_str(),
FindExInfoStandard, &dir->data, FindExSearchNameMatch, NULL, 0);
#endif
if(dir->handle == INVALID_HANDLE_VALUE)
{
/*closedir will do all cleanup*/
@ -76,12 +99,25 @@ namespace
dirent* readdir(DIR* dir)
{
#ifdef HAVE_WINRT
if (dir->ent.d_name != 0)
{
if (::FindNextFileW(dir->handle, &dir->data) != TRUE)
return 0;
}
size_t asize = wcstombs(NULL, dir->data.cFileName, 0);
char* aname = new char[asize+1];
aname[asize] = 0;
wcstombs(aname, dir->data.cFileName, asize);
dir->ent.d_name = aname;
#else
if (dir->ent.d_name != 0)
{
if (::FindNextFile(dir->handle, &dir->data) != TRUE)
if (::FindNextFileA(dir->handle, &dir->data) != TRUE)
return 0;
}
dir->ent.d_name = dir->data.cFileName;
#endif
return &dir->ent;
}
@ -107,7 +143,19 @@ static bool isDir(const cv::String& path, DIR* dir)
if (dir)
attributes = dir->data.dwFileAttributes;
else
attributes = ::GetFileAttributes(path.c_str());
{
WIN32_FILE_ATTRIBUTE_DATA all_attrs;
#ifdef HAVE_WINRT
size_t size = mbstowcs(NULL, path.c_str(), path.size());
cv::Ptr<wchar_t> wpath = new wchar_t[size+1];
wpath[size] = 0;
mbstowcs(wpath, path.c_str(), path.size());
::GetFileAttributesExW(wpath, GetFileExInfoStandard, &all_attrs);
#else
::GetFileAttributesExA(path.c_str(), GetFileExInfoStandard, &all_attrs);
#endif
attributes = all_attrs.dwFileAttributes;
}
return (attributes != INVALID_FILE_ATTRIBUTES) && ((attributes & FILE_ATTRIBUTE_DIRECTORY) != 0);
#else
@ -241,4 +289,4 @@ void cv::glob(String pattern, std::vector<String>& result, bool recursive)
glob_rec(path, wildchart, result, recursive);
std::sort(result.begin(), result.end());
}
}

@ -453,7 +453,11 @@ int cv::getNumberOfCPUs(void)
{
#if defined WIN32 || defined _WIN32
SYSTEM_INFO sysinfo;
#if defined(_M_ARM) || defined(_M_X64) || defined(HAVE_WINRT)
GetNativeSystemInfo( &sysinfo );
#else
GetSystemInfo( &sysinfo );
#endif
return (int)sysinfo.dwNumberOfProcessors;
#elif defined ANDROID

@ -58,7 +58,6 @@
#endif
#if USE_ZLIB
# undef HAVE_UNISTD_H //to avoid redefinition
# ifndef _LFS64_LARGEFILE
# define _LFS64_LARGEFILE 0
# endif

@ -728,33 +728,54 @@ void RNG::fill( InputOutputArray _mat, int disttype,
}
#ifdef WIN32
#ifdef HAVE_WINRT
// using C++11 thread attribute for local thread data
__declspec( thread ) RNG* rng = NULL;
void deleteThreadRNGData()
{
if (rng)
delete rng;
}
RNG& theRNG()
{
if (!rng)
{
rng = new RNG;
}
return *rng;
}
#else
#ifdef WINCE
# define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF)
#endif
static DWORD tlsRNGKey = TLS_OUT_OF_INDEXES;
void deleteThreadRNGData()
{
if( tlsRNGKey != TLS_OUT_OF_INDEXES )
delete (RNG*)TlsGetValue( tlsRNGKey );
void deleteThreadRNGData()
{
if( tlsRNGKey != TLS_OUT_OF_INDEXES )
delete (RNG*)TlsGetValue( tlsRNGKey );
}
RNG& theRNG()
{
if( tlsRNGKey == TLS_OUT_OF_INDEXES )
{
tlsRNGKey = TlsAlloc();
CV_Assert(tlsRNGKey != TLS_OUT_OF_INDEXES);
tlsRNGKey = TlsAlloc();
CV_Assert(tlsRNGKey != TLS_OUT_OF_INDEXES);
}
RNG* rng = (RNG*)TlsGetValue( tlsRNGKey );
if( !rng )
{
rng = new RNG;
TlsSetValue( tlsRNGKey, rng );
rng = new RNG;
TlsSetValue( tlsRNGKey, rng );
}
return *rng;
}
#endif //HAVE_WINRT
#else
static pthread_key_t tlsRNGKey = 0;

@ -47,6 +47,9 @@
#define _WIN32_WINNT 0x0400 // http://msdn.microsoft.com/en-us/library/ms686857(VS.85).aspx
#endif
#include <windows.h>
#if (_WIN32_WINNT >= 0x0602)
#include <synchapi.h>
#endif
#undef small
#undef min
#undef max
@ -75,6 +78,30 @@
}
#endif
#endif
#ifdef HAVE_WINRT
#include <wrl/client.h>
std::wstring GetTempPathWinRT()
{
return std::wstring(Windows::Storage::ApplicationData::Current->TemporaryFolder->Path->Data());
}
std::wstring GetTempFileNameWinRT(std::wstring prefix)
{
wchar_t guidStr[40];
GUID g;
CoCreateGuid(&g);
wchar_t* mask = L"%08x_%04x_%04x_%02x%02x_%02x%02x%02x%02x%02x%02x";
swprintf(&guidStr[0], sizeof(guidStr)/sizeof(wchar_t), mask,
g.Data1, g.Data2, g.Data3, UINT(g.Data4[0]), UINT(g.Data4[1]),
UINT(g.Data4[2]), UINT(g.Data4[3]), UINT(g.Data4[4]),
UINT(g.Data4[5]), UINT(g.Data4[6]), UINT(g.Data4[7]));
return prefix + std::wstring(guidStr);
}
#endif
#else
#include <pthread.h>
#include <sys/time.h>
@ -371,10 +398,38 @@ String format( const char* fmt, ... )
String tempfile( const char* suffix )
{
#ifdef HAVE_WINRT
std::wstring temp_dir = L"";
const wchar_t* opencv_temp_dir = _wgetenv(L"OPENCV_TEMP_PATH");
if (opencv_temp_dir)
temp_dir = std::wstring(opencv_temp_dir);
#else
const char *temp_dir = getenv("OPENCV_TEMP_PATH");
String fname;
#endif
#if defined WIN32 || defined _WIN32
#ifdef HAVE_WINRT
RoInitialize(RO_INIT_MULTITHREADED);
std::wstring temp_dir2;
if (temp_dir.empty())
temp_dir = GetTempPathWinRT();
std::wstring temp_file;
temp_file = GetTempFileNameWinRT(L"ocv");
if (temp_file.empty())
return std::string();
temp_file = temp_dir + std::wstring(L"\\") + temp_file;
DeleteFileW(temp_file.c_str());
size_t asize = wcstombs(NULL, temp_file.c_str(), 0);
Ptr<char> aname = new char[asize+1];
aname[asize] = 0;
wcstombs(aname, temp_file.c_str(), asize);
fname = std::string(aname);
RoUninitialize();
#else
char temp_dir2[MAX_PATH + 1] = { 0 };
char temp_file[MAX_PATH + 1] = { 0 };
@ -389,6 +444,7 @@ String tempfile( const char* suffix )
DeleteFileA(temp_file);
fname = temp_file;
#endif
# else
# ifdef ANDROID
//char defaultTemplate[] = "/mnt/sdcard/__opencv_temp.XXXXXX";
@ -486,40 +542,6 @@ redirectError( CvErrorCallback errCallback, void* userdata, void** prevUserdata)
}
/*CV_IMPL int
cvGuiBoxReport( int code, const char *func_name, const char *err_msg,
const char *file, int line, void* )
{
#if (!defined WIN32 && !defined _WIN32) || defined WINCE
return cvStdErrReport( code, func_name, err_msg, file, line, 0 );
#else
if( code != CV_StsBackTrace && code != CV_StsAutoTrace )
{
size_t msg_len = strlen(err_msg ? err_msg : "") + 1024;
char* message = (char*)alloca(msg_len);
char title[100];
wsprintf( message, "%s (%s)\nin function %s, %s(%d)\n\n"
"Press \"Abort\" to terminate application.\n"
"Press \"Retry\" to debug (if the app is running under debugger).\n"
"Press \"Ignore\" to continue (this is not safe).\n",
cvErrorStr(code), err_msg ? err_msg : "no description",
func_name, file, line );
wsprintf( title, "OpenCV GUI Error Handler" );
int answer = MessageBox( NULL, message, title, MB_ICONERROR|MB_ABORTRETRYIGNORE|MB_SYSTEMMODAL );
if( answer == IDRETRY )
{
CV_DBG_BREAK();
}
return answer != IDIGNORE;
}
return 0;
#endif
}*/
CV_IMPL int cvCheckHardwareSupport(int feature)
{
CV_DbgAssert( 0 <= feature && feature <= CV_HARDWARE_MAX_FEATURE );
@ -677,7 +699,11 @@ cvErrorFromIppStatus( int status )
}
#if defined BUILD_SHARED_LIBS && defined CVAPI_EXPORTS && defined WIN32 && !defined WINCE
#if defined CVAPI_EXPORTS && defined WIN32 && !defined WINCE
#ifdef HAVE_WINRT
#pragma warning(disable:4447) // Disable warning 'main' signature found without threading model
#endif
BOOL WINAPI DllMain( HINSTANCE, DWORD fdwReason, LPVOID );
BOOL WINAPI DllMain( HINSTANCE, DWORD fdwReason, LPVOID )
@ -698,7 +724,15 @@ namespace cv
struct Mutex::Impl
{
Impl() { InitializeCriticalSection(&cs); refcount = 1; }
Impl()
{
#if (_WIN32_WINNT >= 0x0600)
::InitializeCriticalSectionEx(&cs, 1000, 0);
#else
::InitializeCriticalSection(&cs);
#endif
refcount = 1;
}
~Impl() { DeleteCriticalSection(&cs); }
void lock() { EnterCriticalSection(&cs); }
@ -791,4 +825,4 @@ bool Mutex::trylock() { return impl->trylock(); }
}
/* End of file. */
/* End of file. */

@ -1,3 +1,7 @@
#ifdef HAVE_WINRT
#pragma warning(disable:4447) // Disable warning 'main' signature found without threading model
#endif
#include "test_precomp.hpp"
CV_TEST_MAIN("cv")

@ -9,7 +9,10 @@ represented as vectors in a multidimensional space. All objects that implement t
descriptor extractors inherit the
:ocv:class:`DescriptorExtractor` interface.
.. Sample code::
* : An example explaining keypoint extraction can be found at opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp
* : An example on descriptor evaluation can be found at opencv_source_code/samples/cpp/detector_descriptor_evaluation.cpp
DescriptorExtractor
-------------------
@ -82,9 +85,10 @@ The current implementation supports the following types of a descriptor extracto
* ``"SIFT"`` -- :ocv:class:`SIFT`
* ``"SURF"`` -- :ocv:class:`SURF`
* ``"ORB"`` -- :ocv:class:`ORB`
* ``"BRISK"`` -- :ocv:class:`BRISK`
* ``"BRIEF"`` -- :ocv:class:`BriefDescriptorExtractor`
* ``"BRISK"`` -- :ocv:class:`BRISK`
* ``"ORB"`` -- :ocv:class:`ORB`
* ``"FREAK"`` -- :ocv:class:`FREAK`
A combined format is also supported: descriptor extractor adapter name ( ``"Opponent"`` --
:ocv:class:`OpponentColorDescriptorExtractor` ) + descriptor extractor name (see above),
@ -141,4 +145,6 @@ Strecha C., Fua P. *BRIEF: Binary Robust Independent Elementary Features* ,
...
};
.. Sample code::
* : A complete BRIEF extractor sample can be found at opencv_source_code/samples/cpp/brief_match_test.cpp

@ -9,6 +9,11 @@ that are represented as vectors in a multidimensional space. All objects that im
descriptor matchers inherit the
:ocv:class:`DescriptorMatcher` interface.
.. Sample code::
* : An example explaining keypoint matching can be found at opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp
* : An example on descriptor matching evaluation can be found at opencv_source_code/samples/cpp/detector_descriptor_matcher_evaluation.cpp
* : An example on one to many image matching can be found at opencv_source_code/samples/cpp/matching_to_many_images.cpp
DescriptorMatcher
-----------------
@ -271,4 +276,3 @@ Flann-based descriptor matcher. This matcher trains :ocv:class:`flann::Index_` o
};
..

@ -8,6 +8,9 @@ between different algorithms solving the same problem. All objects that implemen
inherit the
:ocv:class:`FeatureDetector` interface.
.. Sample code::
* : An example explaining keypoint detection can be found at opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp
FeatureDetector
---------------
@ -166,7 +169,7 @@ StarFeatureDetector
-------------------
.. ocv:class:: StarFeatureDetector : public FeatureDetector
The class implements the keypoint detector introduced by K. Konolige, synonym of ``StarDetector``. ::
The class implements the keypoint detector introduced by [Agrawal08]_, synonym of ``StarDetector``. ::
class StarFeatureDetector : public FeatureDetector
{
@ -180,6 +183,9 @@ The class implements the keypoint detector introduced by K. Konolige, synonym of
...
};
.. [Agrawal08] Agrawal, M., Konolige, K., & Blas, M. R. (2008). Censure: Center surround extremas for realtime feature detection and matching. In Computer Vision–ECCV 2008 (pp. 102-115). Springer Berlin Heidelberg.
DenseFeatureDetector
--------------------
.. ocv:class:: DenseFeatureDetector : public FeatureDetector

@ -11,7 +11,11 @@ Every descriptor with the
:ocv:class:`VectorDescriptorMatcher` ).
There are descriptors such as the One-way descriptor and Ferns that have the ``GenericDescriptorMatcher`` interface implemented but do not support ``DescriptorExtractor``.
.. Sample code::
* : An example explaining keypoint description can be found at opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp
* : An example on descriptor matching evaluation can be found at opencv_source_code/samples/cpp/detector_descriptor_matcher_evaluation.cpp
* : An example on one to many image matching can be found at opencv_source_code/samples/cpp/matching_to_many_images.cpp
GenericDescriptorMatcher
------------------------

@ -3,6 +3,10 @@ Feature Detection and Description
.. highlight:: cpp
.. Sample code::
* : An example explaining keypoint detection and description can be found at opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp
FAST
----
Detects corners using the FAST algorithm
@ -58,6 +62,10 @@ Maximally stable extremal region extractor. ::
The class encapsulates all the parameters of the MSER extraction algorithm (see
http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions). Also see http://code.opencv.org/projects/opencv/wiki/MSER for useful comments and parameters description.
.. Sample code::
* : PYTHON : A complete example showing the use of the MSER detector can be found at opencv_source_code/samples/python2/mser.py
ORB
---
@ -182,6 +190,10 @@ Class implementing the FREAK (*Fast Retina Keypoint*) keypoint descriptor, descr
.. [AOV12] A. Alahi, R. Ortiz, and P. Vandergheynst. FREAK: Fast Retina Keypoint. In IEEE Conference on Computer Vision and Pattern Recognition, 2012. CVPR 2012 Open Source Award Winner.
.. Sample code::
* : An example on how to use the FREAK descriptor can be found at opencv_source_code/samples/cpp/freak_demo.cpp
FREAK::FREAK
------------
The FREAK constructor

@ -5,6 +5,12 @@ Object Categorization
This section describes approaches based on local 2D features and used to categorize objects.
.. Sample code::
* : A complete Bag-Of-Words sample can be found at opencv_source_code/samples/cpp/bagofwords_classification.cpp
* : PYTHON : An example using the features2D framework to perform object categorization can be found at opencv_source_code/samples/python2/find_obj.py
BOWTrainer
----------
.. ocv:class:: BOWTrainer

@ -43,8 +43,12 @@ typedef unsigned __int64 uint64_t;
#include "defines.h"
#if (defined WIN32 || defined _WIN32) && defined(_M_ARM)
# include <Intrin.h>
#endif
#ifdef __ARM_NEON__
#include "arm_neon.h"
# include "arm_neon.h"
#endif
namespace cvflann

@ -23,4 +23,3 @@
#include "opencv2/core/private.hpp"
#endif

@ -62,7 +62,12 @@ The class implements Histogram of Oriented Gradients ([Dalal2005]_) object detec
Interfaces of all methods are kept similar to the ``CPU HOG`` descriptor and detector analogues as much as possible.
.. Sample code::
* : An example applying the HOG descriptor for people detection can be found at opencv_source_code/samples/cpp/peopledetect.cpp
* : A GPU example applying the HOG descriptor for people detection can be found at opencv_source_code/samples/gpu/hog.cpp
* : PYTHON : An example applying the HOG descriptor for people detection can be found at opencv_source_code/samples/python2/peopledetect.py
gpu::HOGDescriptor::HOGDescriptor
-------------------------------------
@ -229,7 +234,10 @@ Cascade classifier class used for object detection. Supports HAAR and LBP cascad
Size getClassifierSize() const;
};
.. Sample code::
* : A cascade classifier example can be found at opencv_source_code/samples/gpu/cascadeclassifier.cpp
* : A Nvidea API specific cascade classifier example can be found at opencv_source_code/samples/gpu/cascadeclassifier_nvidia_api.cpp
gpu::CascadeClassifier_GPU::CascadeClassifier_GPU
-----------------------------------------------------

@ -52,7 +52,12 @@
#include "opencv2/ts.hpp"
#include "opencv2/ts/gpu_perf.hpp"
CV_PERF_TEST_CUDA_MAIN(gpu_perf4au)
static const char * impls[] = {
"cuda",
"plain"
};
CV_PERF_TEST_MAIN_WITH_IMPLS(gpu_perf4au, impls, perf::printCudaInfo())
//////////////////////////////////////////////////////////
// HoughLinesP

@ -15,6 +15,9 @@ The class discriminates between foreground and background pixels by building and
.. seealso:: :ocv:class:`BackgroundSubtractorMOG`
.. Sample code::
* : An example on gaussian mixture based background/foreground segmantation can be found at opencv_source_code/samples/gpu/bgfg_segm.cpp
gpu::createBackgroundSubtractorMOG

@ -11,6 +11,9 @@ Video reader interface.
.. ocv:class:: gpucodec::VideoReader
.. Sample code::
* : An example on how to use the videoReader class can be found at opencv_source_code/samples/gpu/video_reader.cpp
gpucodec::VideoReader::nextFrame

@ -15,6 +15,9 @@ The implementation uses H264 video codec.
.. note:: Currently only Windows platform is supported.
.. Sample code::
* : An example on how to use the videoWriter class can be found at opencv_source_code/samples/gpu/video_writer.cpp
gpucodec::VideoWriter::write

@ -5,7 +5,9 @@ Image Filtering
Functions and classes described in this section are used to perform various linear or non-linear filtering operations on 2D images.
.. Sample code::
* : An example containing all basic morphology operators like erode and dilate can be found at opencv_source_code/samples/gpu/morphology.cpp
gpu::Filter
-----------

@ -123,6 +123,9 @@ Composites two images using alpha opacity values contained in each image.
:param stream: Stream for the asynchronous version.
.. Sample code::
* : An example demonstrating the use of alphaComp can be found at opencv_source_code/samples/gpu/alpha_comp.cpp
.. [MHT2011] Pascal Getreuer, Malvar-He-Cutler Linear Image Demosaicking, Image Processing On Line, 2011

@ -108,6 +108,9 @@ Base class for line segments detector algorithm. ::
virtual int getMaxLines() const = 0;
};
.. Sample code::
* : An example using the Hough segment detector can be found at opencv_source_code/samples/gpu/houghlines.cpp
gpu::HoughSegmentDetector::detect

@ -109,6 +109,86 @@ namespace hist
/////////////////////////////////////////////////////////////////////////
namespace hist
{
__device__ __forceinline__ void histEvenInc(int* shist, uint data, int binSize, int lowerLevel, int upperLevel)
{
if (data >= lowerLevel && data <= upperLevel)
{
const uint ind = (data - lowerLevel) / binSize;
Emulation::smem::atomicAdd(shist + ind, 1);
}
}
__global__ void histEven8u(const uchar* src, const size_t step, const int rows, const int cols,
int* hist, const int binCount, const int binSize, const int lowerLevel, const int upperLevel)
{
extern __shared__ int shist[];
const int y = blockIdx.x * blockDim.y + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
if (tid < binCount)
shist[tid] = 0;
__syncthreads();
if (y < rows)
{
const uchar* rowPtr = src + y * step;
const uint* rowPtr4 = (uint*) rowPtr;
const int cols_4 = cols / 4;
for (int x = threadIdx.x; x < cols_4; x += blockDim.x)
{
const uint data = rowPtr4[x];
histEvenInc(shist, (data >> 0) & 0xFFU, binSize, lowerLevel, upperLevel);
histEvenInc(shist, (data >> 8) & 0xFFU, binSize, lowerLevel, upperLevel);
histEvenInc(shist, (data >> 16) & 0xFFU, binSize, lowerLevel, upperLevel);
histEvenInc(shist, (data >> 24) & 0xFFU, binSize, lowerLevel, upperLevel);
}
if (cols % 4 != 0 && threadIdx.x == 0)
{
for (int x = cols_4 * 4; x < cols; ++x)
{
const uchar data = rowPtr[x];
histEvenInc(shist, data, binSize, lowerLevel, upperLevel);
}
}
}
__syncthreads();
if (tid < binCount)
{
const int histVal = shist[tid];
if (histVal > 0)
::atomicAdd(hist + tid, histVal);
}
}
void histEven8u(PtrStepSzb src, int* hist, int binCount, int lowerLevel, int upperLevel, cudaStream_t stream)
{
const dim3 block(32, 8);
const dim3 grid(divUp(src.rows, block.y));
const int binSize = divUp(upperLevel - lowerLevel, binCount);
const size_t smem_size = binCount * sizeof(int);
histEven8u<<<grid, block, smem_size, stream>>>(src.data, src.step, src.rows, src.cols, hist, binCount, binSize, lowerLevel, upperLevel);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
}
/////////////////////////////////////////////////////////////////////////
namespace hist
{
__constant__ int c_lut[256];

@ -478,6 +478,21 @@ void cv::gpu::evenLevels(OutputArray _levels, int nLevels, int lowerLevel, int u
_levels.getGpuMatRef().upload(host_levels);
}
namespace hist
{
void histEven8u(PtrStepSzb src, int* hist, int binCount, int lowerLevel, int upperLevel, cudaStream_t stream);
}
namespace
{
void histEven8u(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel, cudaStream_t stream)
{
hist.create(1, histSize, CV_32S);
cudaSafeCall( cudaMemsetAsync(hist.data, 0, histSize * sizeof(int), stream) );
hist::histEven8u(src, hist.ptr<int>(), histSize, lowerLevel, upperLevel, stream);
}
}
void cv::gpu::histEven(InputArray _src, OutputArray hist, InputOutputArray buf, int histSize, int lowerLevel, int upperLevel, Stream& stream)
{
typedef void (*hist_t)(const GpuMat& src, OutputArray hist, InputOutputArray buf, int levels, int lowerLevel, int upperLevel, cudaStream_t stream);
@ -491,6 +506,12 @@ void cv::gpu::histEven(InputArray _src, OutputArray hist, InputOutputArray buf,
GpuMat src = _src.getGpuMat();
if (src.depth() == CV_8U && deviceSupports(FEATURE_SET_COMPUTE_30))
{
histEven8u(src, hist.getGpuMatRef(), histSize, lowerLevel, upperLevel, StreamAccessor::getStream(stream));
return;
}
CV_Assert( src.type() == CV_8UC1 || src.type() == CV_16UC1 || src.type() == CV_16SC1 );
hist_callers[src.depth()](src, hist, buf, histSize, lowerLevel, upperLevel, StreamAccessor::getStream(stream));

@ -49,13 +49,16 @@ using namespace cvtest;
///////////////////////////////////////////////////////////////////////////////////////////////////////
// HistEven
struct HistEven : testing::TestWithParam<cv::gpu::DeviceInfo>
PARAM_TEST_CASE(HistEven, cv::gpu::DeviceInfo, cv::Size)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
virtual void SetUp()
{
devInfo = GetParam();
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
}
@ -63,57 +66,34 @@ struct HistEven : testing::TestWithParam<cv::gpu::DeviceInfo>
GPU_TEST_P(HistEven, Accuracy)
{
cv::Mat img = readImage("stereobm/aloe-L.png");
ASSERT_FALSE(img.empty());
cv::Mat hsv;
cv::cvtColor(img, hsv, cv::COLOR_BGR2HSV);
cv::Mat src = randomMat(size, CV_8UC1);
int hbins = 30;
float hranges[] = {0.0f, 180.0f};
std::vector<cv::Mat> srcs;
cv::split(hsv, srcs);
float hranges[] = {50.0f, 200.0f};
cv::gpu::GpuMat hist;
cv::gpu::histEven(loadMat(srcs[0]), hist, hbins, (int)hranges[0], (int)hranges[1]);
cv::gpu::histEven(loadMat(src), hist, hbins, (int) hranges[0], (int) hranges[1]);
cv::Mat hist_gold;
cv::MatND histnd;
int histSize[] = {hbins};
const float* ranges[] = {hranges};
int channels[] = {0};
cv::calcHist(&hsv, 1, channels, cv::Mat(), histnd, 1, histSize, ranges);
cv::calcHist(&src, 1, channels, cv::Mat(), hist_gold, 1, histSize, ranges);
cv::Mat hist_gold = histnd;
hist_gold = hist_gold.t();
hist_gold.convertTo(hist_gold, CV_32S);
EXPECT_MAT_NEAR(hist_gold, hist, 0.0);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, HistEven, ALL_DEVICES);
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, HistEven, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES));
///////////////////////////////////////////////////////////////////////////////////////////////////////
// CalcHist
namespace
{
void calcHistGold(const cv::Mat& src, cv::Mat& hist)
{
hist.create(1, 256, CV_32SC1);
hist.setTo(cv::Scalar::all(0));
int* hist_row = hist.ptr<int>();
for (int y = 0; y < src.rows; ++y)
{
const uchar* src_row = src.ptr(y);
for (int x = 0; x < src.cols; ++x)
++hist_row[src_row[x]];
}
}
}
PARAM_TEST_CASE(CalcHist, cv::gpu::DeviceInfo, cv::Size)
{
cv::gpu::DeviceInfo devInfo;
@ -137,7 +117,16 @@ GPU_TEST_P(CalcHist, Accuracy)
cv::gpu::calcHist(loadMat(src), hist);
cv::Mat hist_gold;
calcHistGold(src, hist_gold);
const int hbins = 256;
const float hranges[] = {0.0f, 256.0f};
const int histSize[] = {hbins};
const float* ranges[] = {hranges};
const int channels[] = {0};
cv::calcHist(&src, 1, channels, cv::Mat(), hist_gold, 1, histSize, ranges);
hist_gold = hist_gold.reshape(1, 1);
hist_gold.convertTo(hist_gold, CV_32S);
EXPECT_MAT_NEAR(hist_gold, hist, 0.0);
}

@ -3,6 +3,10 @@ Optical Flow
.. highlight:: cpp
.. Sample code::
* : A general optical flow example can be found at opencv_source_code/samples/gpu/optical_flow.cpp
* : A feneral optical flow example using the nvidia API can be found at opencv_source_code/samples/gpu/opticalflow_nvidia_api.cpp
gpu::BroxOpticalFlow
@ -44,6 +48,9 @@ Class computing the optical flow for two images using Brox et al Optical Flow al
GpuMat buf;
};
.. Sample code::
* : An example illustrating the Brox et al optical flow algorithm can be found at opencv_source_code/samples/gpu/brox_optical_flow.cpp
gpu::FarnebackOpticalFlow
@ -138,6 +145,9 @@ The class can calculate an optical flow for a sparse feature set or dense optica
.. seealso:: :ocv:func:`calcOpticalFlowPyrLK`
.. Sample code::
* : An example of the Lucas Kanade optical flow algorithm can be found at opencv_source_code/samples/gpu/pyrlk_optical_flow.cpp
gpu::PyrLKOpticalFlow::sparse

@ -3,7 +3,11 @@ Stereo Correspondence
.. highlight:: cpp
.. Sample code::
* : A basic stereo matching example can be found at opencv_source_code/samples/gpu/stereo_match.cpp
* : A stereo matching example using several GPU's can be found at opencv_source_code/samples/gpu/stereo_multi.cpp
* : A stereo matching example using several GPU's and driver API can be found at opencv_source_code/samples/gpu/driver_api_stereo_multi.cpp
gpu::StereoBM
-------------

@ -9,13 +9,12 @@ ocv_add_module(highgui opencv_imgproc OPTIONAL opencv_androidcamera)
ocv_clear_vars(GRFMT_LIBS)
if(WITH_PNG OR WITH_TIFF OR WITH_OPENEXR)
if(HAVE_PNG OR HAVE_TIFF OR HAVE_OPENEXR)
ocv_include_directories(${ZLIB_INCLUDE_DIR})
list(APPEND GRFMT_LIBS ${ZLIB_LIBRARIES})
endif()
if(WITH_JPEG)
add_definitions(-DHAVE_JPEG)
if(HAVE_JPEG)
ocv_include_directories(${JPEG_INCLUDE_DIR})
list(APPEND GRFMT_LIBS ${JPEG_LIBRARIES})
endif()
@ -26,27 +25,23 @@ if(WITH_WEBP)
list(APPEND GRFMT_LIBS ${WEBP_LIBRARIES})
endif()
if(WITH_PNG)
add_definitions(-DHAVE_PNG)
if(HAVE_PNG)
add_definitions(${PNG_DEFINITIONS})
ocv_include_directories(${PNG_INCLUDE_DIR})
list(APPEND GRFMT_LIBS ${PNG_LIBRARIES})
endif()
if(WITH_TIFF)
add_definitions(-DHAVE_TIFF)
if(HAVE_TIFF)
ocv_include_directories(${TIFF_INCLUDE_DIR})
list(APPEND GRFMT_LIBS ${TIFF_LIBRARIES})
endif()
if(WITH_JASPER)
add_definitions(-DHAVE_JASPER)
if(HAVE_JASPER)
ocv_include_directories(${JASPER_INCLUDE_DIR})
list(APPEND GRFMT_LIBS ${JASPER_LIBRARIES})
endif()
if(WITH_OPENEXR)
add_definitions(-DHAVE_OPENEXR)
if(HAVE_OPENEXR)
include_directories(SYSTEM ${OPENEXR_INCLUDE_PATHS})
list(APPEND GRFMT_LIBS ${OPENEXR_LIBRARIES})
endif()
@ -114,16 +109,12 @@ elseif(HAVE_WIN32UI)
list(APPEND highgui_srcs src/window_w32.cpp)
elseif(HAVE_GTK)
list(APPEND highgui_srcs src/window_gtk.cpp)
elseif(APPLE)
if(WITH_CARBON)
add_definitions(-DHAVE_CARBON=1)
list(APPEND highgui_srcs src/window_carbon.cpp)
list(APPEND HIGHGUI_LIBRARIES "-framework Carbon" "-framework QuickTime")
elseif(NOT IOS)
add_definitions(-DHAVE_COCOA=1)
list(APPEND highgui_srcs src/window_cocoa.mm)
list(APPEND HIGHGUI_LIBRARIES "-framework Cocoa")
endif()
elseif(HAVE_CARBON)
list(APPEND highgui_srcs src/window_carbon.cpp)
list(APPEND HIGHGUI_LIBRARIES "-framework Carbon" "-framework QuickTime")
elseif(HAVE_COCOA)
list(APPEND highgui_srcs src/window_cocoa.mm)
list(APPEND HIGHGUI_LIBRARIES "-framework Cocoa")
endif()
if(WIN32 AND NOT ARM)
@ -203,6 +194,7 @@ endif(HAVE_FFMPEG)
if(HAVE_PVAPI)
add_definitions(-DHAVE_PVAPI)
add_definitions(${PVAPI_DEFINITIONS})
ocv_include_directories(${PVAPI_INCLUDE_PATH})
set(highgui_srcs src/cap_pvapi.cpp ${highgui_srcs})
list(APPEND HIGHGUI_LIBRARIES ${PVAPI_LIBRARY})
@ -216,19 +208,17 @@ if(HAVE_GIGE_API)
list(APPEND highgui_srcs src/cap_giganetix.cpp)
endif(HAVE_GIGE_API)
if(WITH_AVFOUNDATION)
add_definitions(-DHAVE_AVFOUNDATION=1)
if(HAVE_AVFOUNDATION)
list(APPEND highgui_srcs src/cap_avfoundation.mm)
list(APPEND HIGHGUI_LIBRARIES "-framework AVFoundation" "-framework QuartzCore")
endif()
if(HAVE_QUICKTIME)
list(APPEND highgui_srcs src/cap_qt.cpp)
list(APPEND HIGHGUI_LIBRARIES "-framework Carbon" "-framework QuickTime" "-framework CoreFoundation" "-framework QuartzCore")
elseif(APPLE)
add_definitions(-DHAVE_QUICKTIME=1)
if(WITH_QUICKTIME)
list(APPEND highgui_srcs src/cap_qt.cpp)
list(APPEND HIGHGUI_LIBRARIES "-framework Carbon" "-framework QuickTime" "-framework CoreFoundation" "-framework QuartzCore")
else()
list(APPEND highgui_srcs src/cap_qtkit.mm)
list(APPEND HIGHGUI_LIBRARIES "-framework QTKit" "-framework QuartzCore" "-framework AppKit")
endif()
list(APPEND highgui_srcs src/cap_qtkit.mm)
list(APPEND HIGHGUI_LIBRARIES "-framework QTKit" "-framework QuartzCore" "-framework AppKit")
endif()
if(IOS)

@ -223,6 +223,15 @@ The class provides C++ API for capturing video from cameras or for reading video
.. note:: In C API the black-box structure ``CvCapture`` is used instead of ``VideoCapture``.
.. Sample code::
* : A basic sample on using the VideoCapture interface can be found at opencv_source_code/samples/cpp/starter_video.cpp
* : Another basic video processing sample can be found at opencv_source_code/samples/cpp/video_dmtx.cpp
* : PYTHON : A basic sample on using the VideoCapture interface can be found at opencv_source_code/samples/python2/video.py
* : PYTHON : basic video processing sample can be found at opencv_source_code/samples/python2/video_dmtx.py
* : PYTHON : A multi threaded video processing sample can be found at opencv_source_code/samples/python2/video_threaded.py
VideoCapture::VideoCapture
------------------------------

@ -33,6 +33,10 @@ The function ``createTrackbar`` creates a trackbar (a slider or range control) w
Clicking the label of each trackbar enables editing the trackbar values manually.
.. Sample code::
* : An example of using the trackbar functionality can be found at opencv_source_code/samples/cpp/connected_components.cpp
getTrackbarPos
------------------
Returns the trackbar position.
@ -75,6 +79,7 @@ The function ``imshow`` displays an image in the specified window. If the window
* If the image is 32-bit floating-point, the pixel values are multiplied by 255. That is, the value range [0,1] is mapped to [0,255].
If window was created with OpenGL support, ``imshow`` also support :ocv:class:`ogl::Buffer` , :ocv:class:`ogl::Texture2D` and :ocv:class:`gpu::GpuMat` as input.
namedWindow
---------------
@ -88,7 +93,13 @@ Creates a window.
:param name: Name of the window in the window caption that may be used as a window identifier.
:param flags: Flags of the window. Currently the only supported flag is ``CV_WINDOW_AUTOSIZE`` . If this is set, the window size is automatically adjusted to fit the displayed image (see :ocv:func:`imshow` ), and you cannot change the window size manually.
:param flags: Flags of the window. The supported flags are:
* **WINDOW_NORMAL** If this is set, the user can resize the window (no constraint).
* **WINDOW_AUTOSIZE** If this is set, the window size is automatically adjusted to fit the displayed image (see :ocv:func:`imshow` ), and you cannot change the window size manually.
* **WINDOW_OPENGL** If this is set, the window will be created with OpenGL support.
The function ``namedWindow`` creates a window that can be used as a placeholder for images and trackbars. Created windows are referred to by their names.
@ -238,3 +249,31 @@ The function ``waitKey`` waits for a key event infinitely (when
.. note::
The function only works if there is at least one HighGUI window created and the window is active. If there are several HighGUI windows, any of them can be active.
setOpenGlDrawCallback
---------------------
Set OpenGL render handler for the specified window.
.. ocv:function:: void setOpenGlDrawCallback(const string& winname, OpenGlDrawCallback onOpenGlDraw, void* userdata = 0)
:param winname: Window name
:param onOpenGlDraw: Draw callback.
:param userdata: The optional parameter passed to the callback.
setOpenGlContext
----------------
Sets the specified window as current OpenGL context.
.. ocv:function:: void setOpenGlContext(const string& winname)
:param winname: Window name
updateWindow
------------
Force window to redraw its context and call draw callback ( :ocv:func:`setOpenGlDrawCallback` ).
.. ocv:function:: void updateWindow(const string& winname)
:param winname: Window name

@ -2124,7 +2124,7 @@ long videoDevices::initDevices(IMFAttributes *pAttributes)
return hr;
}
size_t videoDevices::getCount()
unsigned int videoDevices::getCount()
{
return vds_Devices.size();
}

@ -52,6 +52,8 @@ void CvCaptureCAM_XIMEA::init()
{
xiGetNumberDevices( &numDevices);
hmv = NULL;
frame = NULL;
timeout = 0;
memset(&image, 0, sizeof(XI_IMG));
}
@ -60,6 +62,8 @@ void CvCaptureCAM_XIMEA::init()
// Initialize camera input
bool CvCaptureCAM_XIMEA::open( int wIndex )
{
#define HandleXiResult(res) if (res!=XI_OK) goto error;
int mvret = XI_OK;
if(numDevices == 0)
@ -73,26 +77,42 @@ bool CvCaptureCAM_XIMEA::open( int wIndex )
// always use auto exposure/gain
mvret = xiSetParamInt( hmv, XI_PRM_AEAG, 1);
if(mvret != XI_OK) goto error;
// always use auto white ballance
mvret = xiSetParamInt( hmv, XI_PRM_AUTO_WB, 1);
if(mvret != XI_OK) goto error;
// default image format RGB24
mvret = xiSetParamInt( hmv, XI_PRM_IMAGE_DATA_FORMAT, XI_RGB24);
if(mvret != XI_OK) goto error;
HandleXiResult(mvret);
int width = 0;
mvret = xiGetParamInt( hmv, XI_PRM_WIDTH, &width);
if(mvret != XI_OK) goto error;
HandleXiResult(mvret);
int height = 0;
mvret = xiGetParamInt( hmv, XI_PRM_HEIGHT, &height);
if(mvret != XI_OK) goto error;
HandleXiResult(mvret);
int isColor = 0;
mvret = xiGetParamInt(hmv, XI_PRM_IMAGE_IS_COLOR, &isColor);
HandleXiResult(mvret);
if(isColor) // for color cameras
{
// default image format RGB24
mvret = xiSetParamInt( hmv, XI_PRM_IMAGE_DATA_FORMAT, XI_RGB24);
HandleXiResult(mvret);
// always use auto white ballance for color cameras
mvret = xiSetParamInt( hmv, XI_PRM_AUTO_WB, 1);
HandleXiResult(mvret);
// allocate frame buffer for RGB24 image
frame = cvCreateImage(cvSize( width, height), IPL_DEPTH_8U, 3);
}
else // for mono cameras
{
// default image format MONO8
mvret = xiSetParamInt( hmv, XI_PRM_IMAGE_DATA_FORMAT, XI_MONO8);
HandleXiResult(mvret);
// allocate frame buffer for RGB24 image
frame = cvCreateImage(cvSize( width, height), IPL_DEPTH_8U, 3);
// allocate frame buffer for MONO8 image
frame = cvCreateImage(cvSize( width, height), IPL_DEPTH_8U, 1);
}
//default capture timeout 10s
timeout = 10000;
@ -118,9 +138,12 @@ void CvCaptureCAM_XIMEA::close()
{
if(frame)
cvReleaseImage(&frame);
xiStopAcquisition(hmv);
xiCloseDevice(hmv);
if(hmv)
{
xiStopAcquisition(hmv);
xiCloseDevice(hmv);
}
hmv = NULL;
}

@ -53,12 +53,6 @@
#endif
#endif
#undef PACKAGE
#undef PACKAGE_BUGREPORT
#undef PACKAGE_NAME
#undef PACKAGE_STRING
#undef PACKAGE_TARNAME
#undef PACKAGE_VERSION
#undef VERSION
#include <jasper/jasper.h>

@ -51,7 +51,6 @@
and png2bmp sample from libpng distribution (Copyright (C) 1999-2001 MIYASAKA Masaru)
\****************************************************************************************/
#undef HAVE_UNISTD_H //to avoid redefinition
#ifndef _LFS64_LARGEFILE
# define _LFS64_LARGEFILE 0
#endif

@ -30,7 +30,11 @@ Finds edges in an image using the [Canny86]_ algorithm.
The function finds edges in the input image ``image`` and marks them in the output map ``edges`` using the Canny algorithm. The smallest value between ``threshold1`` and ``threshold2`` is used for edge linking. The largest value is used to find initial segments of strong edges. See
http://en.wikipedia.org/wiki/Canny_edge_detector
.. Sample code::
* : An example on using the canny edge detector can be found at opencv_source_code/samples/cpp/edge.cpp
* : PYTHON : An example on using the canny edge detector can be found at opencv_source_code/samples/cpp/edge.py
cornerEigenValsAndVecs
----------------------
@ -81,7 +85,9 @@ The output of the function can be used for robust edge or corner detection.
:ocv:func:`cornerHarris`,
:ocv:func:`preCornerDetect`
.. Sample code::
* : PYTHON : An example on how to use eigenvectors and eigenvalues to estimate image texture flow direction can be found at opencv_source_code/samples/python2/texture_flow.py
cornerHarris
------------
@ -344,6 +350,9 @@ Example: ::
:ocv:func:`fitEllipse`,
:ocv:func:`minEnclosingCircle`
.. Sample code::
* : An example using the Hough circle detector can be found at opencv_source_code/samples/cpp/houghcircles.cpp
HoughLines
----------
@ -398,6 +407,10 @@ Finds lines in a binary image using the standard Hough transform.
The function implements the standard or standard multi-scale Hough transform algorithm for line detection. See http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm for a good explanation of Hough transform.
See also the example in :ocv:func:`HoughLinesP` description.
.. Sample code::
* : An example using the Hough line detector can be found at opencv_source_code/samples/cpp/houghlines.cpp
HoughLinesP
-----------
Finds line segments in a binary image using the probabilistic Hough transform.

@ -22,6 +22,10 @@ OpenCV enables you to specify the extrapolation method. For details, see the fun
* BORDER_CONSTANT: iiiiii|abcdefgh|iiiiiii with some specified 'i'
*/
.. Sample code::
* : PYTHON : A complete example illustrating different morphological operations like erode/dilate, open/close, blackhat/tophat ... can be found at opencv_source_code/samples/python2/morphology.py
BaseColumnFilter
----------------
.. ocv:class:: BaseColumnFilter
@ -779,6 +783,9 @@ The function supports the in-place mode. Dilation can be applied several ( ``ite
:ocv:func:`morphologyEx`,
:ocv:func:`createMorphologyFilter`
.. Sample code::
* : An example using the morphological dilate operation can be found at opencv_source_code/samples/cpp/morphology2.cpp
erode
-----
@ -818,7 +825,9 @@ The function supports the in-place mode. Erosion can be applied several ( ``iter
:ocv:func:`morphologyEx`,
:ocv:func:`createMorphologyFilter`
.. Sample code::
* : An example using the morphological erode operation can be found at opencv_source_code/samples/cpp/morphology2.cpp
filter2D
--------
@ -1150,6 +1159,9 @@ Any of the operations can be done in-place. In case of multi-channel images, eac
:ocv:func:`erode`,
:ocv:func:`createMorphologyFilter`
.. Sample code::
* : An example using the morphologyEx function for the morphological opening and closing operations can be found at opencv_source_code/samples/cpp/morphology2.cpp
Laplacian
---------
@ -1193,7 +1205,9 @@ This is done when ``ksize > 1`` . When ``ksize == 1`` , the Laplacian is compute
:ocv:func:`Sobel`,
:ocv:func:`Scharr`
.. Sample code::
* : An example using the Laplace transformation for edge detection can be found at opencv_source_code/samples/cpp/laplace.cpp
pyrDown
-------
@ -1250,6 +1264,10 @@ Upsamples an image and then blurs it.
The function performs the upsampling step of the Gaussian pyramid construction, though it can actually be used to construct the Laplacian pyramid. First, it upsamples the source image by injecting even zero rows and columns and then convolves the result with the same kernel as in
:ocv:func:`pyrDown` multiplied by 4.
.. Sample code::
* : PYTHON : An example of Laplacian Pyramid construction and merging can be found at opencv_source_code/samples/python2/lappyr.py
pyrMeanShiftFiltering
---------------------
@ -1297,6 +1315,9 @@ After the iterations over, the color components of the initial pixel (that is, t
When ``maxLevel > 0``, the gaussian pyramid of ``maxLevel+1`` levels is built, and the above procedure is run on the smallest layer first. After that, the results are propagated to the larger layer and the iterations are run again only on those pixels where the layer colors differ by more than ``sr`` from the lower-resolution layer of the pyramid. That makes boundaries of color regions sharper. Note that the results will be actually different from the ones obtained by running the meanshift procedure on the whole original image (i.e. when ``maxLevel==0``).
.. Sample code::
* : An example using mean-shift image segmentation can be found at opencv_source_code/samples/cpp/meanshift_segmentation.cpp
sepFilter2D
-----------

@ -298,6 +298,9 @@ where
The function emulates the human "foveal" vision and can be used for fast scale and rotation-invariant template matching, for object tracking and so forth. The function can not operate in-place.
.. Sample code::
* : An example using the geometric logpolar operation in 4 applications can be found at opencv_source_code/samples/cpp/logpolar_bsm.cpp
remap
-----

@ -98,7 +98,12 @@ input arrays at the same location. The sample below shows how to compute a 2D Hu
waitKey();
}
.. Sample code::
* : An example for creating histograms of an image can be found at opencv_source_code/samples/cpp/demhist.cpp
* : PYTHON : An example for creating color histograms can be found at opencv_source/samples/python2/color_histogram.py
* : PYTHON : An example illustrating RGB and grayscale histogram plotting can be found at opencv_source/samples/python2/hist.py
calcBackProject

@ -476,6 +476,12 @@ In this mode, the complexity is still linear.
That is, the function provides a very fast way to compute the Voronoi diagram for a binary image.
Currently, the second variant can use only the approximate distance transform algorithm, i.e. ``maskSize=CV_DIST_MASK_PRECISE`` is not supported yet.
.. Sample code::
* : An example on using the distance transform can be found at opencv_source_code/samples/cpp/distrans.cpp
* : PYTHON : An example on using the distance transform can be found at opencv_source/samples/python2/distrans.py
floodFill
---------
Fills a connected component with the given color.
@ -574,11 +580,15 @@ where
*
Color/brightness of the seed point in case of a fixed range.
Use these functions to either mark a connected component with the specified color in-place, or build a mask and then extract the contour, or copy the region to another image, and so on. Various modes of the function are demonstrated in the ``floodfill.cpp`` sample.
Use these functions to either mark a connected component with the specified color in-place, or build a mask and then extract the contour, or copy the region to another image, and so on.
.. seealso:: :ocv:func:`findContours`
.. Sample code::
* : An example using the FloodFill technique can be found at opencv_source_code/samples/cpp/ffilldemo.cpp
* : PYTHON : An example using the FloodFill technique can be found at opencv_source_code/samples/python2/floodfill.cpp
integral
--------
@ -738,6 +748,12 @@ Visual demonstration and usage example of the function can be found in the OpenC
.. seealso:: :ocv:func:`findContours`
.. Sample code::
* : An example using the watershed algorithm can be found at opencv_source_code/samples/cpp/watershed.cpp
* : PYTHON : An example using the watershed algorithm can be found at opencv_source_code/samples/python2/watershed.py
grabCut
-------
Runs the GrabCut algorithm.
@ -784,3 +800,9 @@ See the sample ``grabcut.cpp`` to learn how to use the function.
.. [Meyer92] Meyer, F. *Color Image Segmentation*, ICIP92, 1992
.. [Telea04] Alexandru Telea, *An Image Inpainting Technique Based on the Fast Marching Method*. Journal of Graphics, GPU, and Game Tools 9 1, pp 23-34 (2004)
.. Sample code::
* : An example using the GrabCut algorithm can be found at opencv_source_code/samples/cpp/grabcut.cpp
* : PYTHON : An example using the GrabCut algorithm can be found at opencv_source_code/samples/python2/grabcut.py

@ -73,3 +73,6 @@ image patch:
After the function finishes the comparison, the best matches can be found as global minimums (when ``CV_TM_SQDIFF`` was used) or maximums (when ``CV_TM_CCORR`` or ``CV_TM_CCOEFF`` was used) using the
:ocv:func:`minMaxLoc` function. In case of a color image, template summation in the numerator and each sum in the denominator is done over all of the channels and separate mean values are used for each channel. That is, the function can take a color template and a color image. The result will still be a single-channel image, which is easier to analyze.
.. Sample code::
* : PYTHON : An example on how to match mouse selected regions in an image can be found at opencv_source_code/samples/python2/mouse_and_match.py

@ -192,6 +192,14 @@ The function retrieves contours from the binary image using the algorithm
.. note:: If you use the new Python interface then the ``CV_`` prefix has to be omitted in contour retrieval mode and contour approximation method parameters (for example, use ``cv2.RETR_LIST`` and ``cv2.CHAIN_APPROX_NONE`` parameters). If you use the old Python interface then these parameters have the ``CV_`` prefix (for example, use ``cv.CV_RETR_LIST`` and ``cv.CV_CHAIN_APPROX_NONE``).
.. Sample code::
* : An example using the findContour functionality can be found at opencv_source_code/samples/cpp/contours2.cpp
* : An example using findContours to clean up a background segmentation result at opencv_source_code/samples/cpp/segment_objects.cpp
* : PYTHON : An example using the findContour functionality can be found at opencv_source/samples/python2/contours.py
* : PYTHON : An example of detecting squares in an image can be found at opencv_source/samples/python2/squares.py
approxPolyDP
----------------
@ -353,6 +361,10 @@ The functions find the convex hull of a 2D point set using the Sklansky's algori
that has
*O(N logN)* complexity in the current implementation. See the OpenCV sample ``convexhull.cpp`` that demonstrates the usage of different function variants.
.. Sample code::
* : An example using the convexHull functionality can be found at opencv_source_code/samples/cpp/convexhull.cpp
convexityDefects
----------------
@ -406,6 +418,11 @@ Fits an ellipse around a set of 2D points.
The function calculates the ellipse that fits (in a least-squares sense) a set of 2D points best of all. It returns the rotated rectangle in which the ellipse is inscribed. The algorithm [Fitzgibbon95]_ is used.
.. Sample code::
* : An example using the fitEllipse technique can be found at opencv_source_code/samples/cpp/fitellipse.cpp
fitLine
-----------
Fits a line to a 2D or 3D point set.
@ -476,6 +493,9 @@ http://en.wikipedia.org/wiki/M-estimator
:math:`w_i` are adjusted to be inversely proportional to
:math:`\rho(r_i)` .
.. Sample code:
* : PYTHON : An example of robust line fitting can be found at opencv_source_code/samples/python2/fitline.py
isContourConvex

@ -258,7 +258,7 @@ PERF_TEST_P(Size_CvtMode, cvtColor8u,
declare.time(100);
declare.in(src, WARMUP_RNG).out(dst);
int runs = sz.width <= 320 ? 70 : 5;
int runs = sz.width <= 320 ? 100 : 5;
TEST_CYCLE_MULTIRUN(runs) cvtColor(src, dst, mode, ch.dcn);
SANITY_CHECK(dst, 1);

@ -28,14 +28,14 @@ PERF_TEST_P(Size_Source, calcHist1d,
int dims = 1;
int numberOfImages = 1;
const float r[] = {rangeLow, rangeHight};
const float* ranges[] = {r};
const float range[] = {rangeLow, rangeHight};
const float* ranges[] = {range};
randu(source, rangeLow, rangeHight);
declare.in(source);
TEST_CYCLE()
TEST_CYCLE_MULTIRUN(3)
{
calcHist(&source, numberOfImages, channels, Mat(), hist, dims, histSize, ranges);
}

@ -25,7 +25,7 @@ PERF_TEST_P(MatInfo_Size_Size, resizeUpLinear,
cvtest::fillGradient(src);
declare.in(src).out(dst);
TEST_CYCLE() resize(src, dst, to);
TEST_CYCLE_MULTIRUN(10) resize(src, dst, to);
#ifdef ANDROID
SANITY_CHECK(dst, 5);
@ -52,7 +52,7 @@ PERF_TEST_P(MatInfo_Size_Size, resizeDownLinear,
cvtest::fillGradient(src);
declare.in(src).out(dst);
TEST_CYCLE() resize(src, dst, to);
TEST_CYCLE_MULTIRUN(10) resize(src, dst, to);
#ifdef ANDROID
SANITY_CHECK(dst, 5);

@ -2517,7 +2517,7 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn )
case CV_BGR2YUV: case CV_RGB2YUV:
{
CV_Assert( scn == 3 || scn == 4 );
bidx = code == CV_BGR2YCrCb || code == CV_RGB2YUV ? 0 : 2;
bidx = code == CV_BGR2YCrCb || code == CV_BGR2YUV ? 0 : 2;
static const float yuv_f[] = { 0.114f, 0.587f, 0.299f, 0.492f, 0.877f };
static const int yuv_i[] = { B2Y, G2Y, R2Y, 8061, 14369 };
const float* coeffs_f = code == CV_BGR2YCrCb || code == CV_RGB2YCrCb ? 0 : yuv_f;
@ -2546,7 +2546,7 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn )
{
if( dcn <= 0 ) dcn = 3;
CV_Assert( scn == 3 && (dcn == 3 || dcn == 4) );
bidx = code == CV_YCrCb2BGR || code == CV_YUV2RGB ? 0 : 2;
bidx = code == CV_YCrCb2BGR || code == CV_YUV2BGR ? 0 : 2;
static const float yuv_f[] = { 2.032f, -0.395f, -0.581f, 1.140f };
static const int yuv_i[] = { 33292, -6472, -9519, 18678 };
const float* coeffs_f = code == CV_YCrCb2BGR || code == CV_YCrCb2RGB ? 0 : yuv_f;

@ -491,20 +491,6 @@ public class CoreTest extends OpenCVTestCase {
Point truth[] = {
new Point(5, 6),
new Point(5, 6),
new Point(5, 6),
new Point(5, 6),
new Point(5, 6),
new Point(5, 6),
new Point(5, 6),
new Point(5, 6),
new Point(4, 6),
new Point(4, 6),
new Point(4, 6),
new Point(4, 6),
new Point(4, 6),
new Point(4, 6),
new Point(4, 6),
new Point(4, 6)
};
assertArrayPointsEquals(truth, pts.toArray(), EPS);

File diff suppressed because it is too large Load Diff

@ -5,6 +5,12 @@ This section describes obsolete ``C`` interface of EM algorithm. Details of the
.. highlight:: cpp
.. Sample code::
* : An example on using the Expectation Maximalization algorithm can be found at opencv_source_code/samples/cpp/em.cpp
* : PYTHON : An example using Expectation Maximalization for Gaussian Mixing can be found at opencv_source_code/samples/python2/gaussian_mix.py
CvEMParams
----------

@ -75,7 +75,9 @@ Class containing a base structure for ``RTreeClassifier``. ::
void estimateQuantPercForPosteriors(float perc[2]);
};
.. Sample code::
* : PYTHON : An example using Randomized Tree training for letter recognition can be found at opencv_source_code/samples/python2/letter_recog.py
RandomizedTree::train
-------------------------
@ -99,7 +101,9 @@ Trains a randomized tree using an input set of keypoints.
:param num_quant_bits: Number of bits used for quantization.
.. Sample code::
* : An example on training a Random Tree Classifier for letter recognition can be found at opencv_source_code\samples\cpp\letter_recog.cpp
RandomizedTree::read
------------------------

@ -11,6 +11,12 @@ CvKNearest
The class implements K-Nearest Neighbors model as described in the beginning of this section.
.. Sample code::
* : PYTHON : An example of digit recognition using KNearest can be found at opencv_source/samples/python2/digits.py
* : PYTHON : An example of grid search digit recognition using KNearest can be found at opencv_source/samples/python2/digits_adjust.py
* : PYTHON : An example of video digit recognition using KNearest can be found at opencv_source/samples/python2/digits_video.py
CvKNearest::CvKNearest
----------------------
Default and training constructors.
@ -188,5 +194,3 @@ The sample below (currently using the obsolete ``CvMat`` structures) demonstrate
cvReleaseMat( &trainData );
return 0;
}

@ -158,6 +158,12 @@ CvSVM
Support Vector Machines.
.. Sample code::
* : PYTHON : An example of digit recognition using SVM can be found at opencv_source/samples/python2/digits.py
* : PYTHON : An example of grid search digit recognition using SVM can be found at opencv_source/samples/python2/digits_adjust.py
* : PYTHON : An example of video digit recognition using SVM can be found at opencv_source/samples/python2/digits_video.py
CvSVM::CvSVM
------------
Default and training constructors.

@ -41,7 +41,6 @@
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#include "opencv2/core.hpp"
#include "opencv2/ml.hpp"
#include "opencv2/core/core_c.h"

@ -84,6 +84,10 @@ SURF
.. [Bay06] Bay, H. and Tuytelaars, T. and Van Gool, L. "SURF: Speeded Up Robust Features", 9th European Conference on Computer Vision, 2006
.. Sample code::
* : An example using the SURF feature detector can be found at opencv_source_code/samples/cpp/generic_descriptor_match.cpp
* : Another example using the SURF feature detector, extractor and matcher can be found at opencv_source_code/samples/cpp/matcher_simple.cpp
SURF::SURF
----------
@ -239,6 +243,9 @@ The class ``SURF_GPU`` uses some buffers and provides access to it. All buffers
.. seealso:: :ocv:class:`SURF`
.. Sample code::
* : An example for using the SURF keypoint matcher on GPU can be found at opencv_source_code/samples/gpu/surf_keypoint_matcher.cpp
ocl::SURF_OCL
-------------
@ -337,3 +344,7 @@ The ``descriptors`` matrix is :math:`\texttt{nFeatures} \times \texttt{descripto
The class ``SURF_OCL`` uses some buffers and provides access to it. All buffers can be safely released between function calls.
.. seealso:: :ocv:class:`SURF`
.. Sample code::
* : OCL : An example of the SURF detector can be found at opencv_source_code/samples/ocl/surf_matcher.cpp

@ -216,6 +216,10 @@ Detects objects of different sizes in the input image. The detected objects are
The function is parallelized with the TBB library.
.. Sample code::
* : PYTHON : A face detection example using cascade classifiers can be found at opencv_source_code/samples/python2/facedetect.py
CascadeClassifier::setImage
-------------------------------

@ -93,6 +93,23 @@ private:
std::vector<String> classNames;
};
// class for grouping object candidates, detected by Cascade Classifier, HOG etc.
// instance of the class is to be passed to cv::partition (see cxoperations.hpp)
class CV_EXPORTS SimilarRects
{
public:
SimilarRects(double _eps) : eps(_eps) {}
inline bool operator()(const Rect& r1, const Rect& r2) const
{
double delta = eps*(std::min(r1.width, r2.width) + std::min(r1.height, r2.height))*0.5;
return std::abs(r1.x - r2.x) <= delta &&
std::abs(r1.y - r2.y) <= delta &&
std::abs(r1.x + r1.width - r2.x - r2.width) <= delta &&
std::abs(r1.y + r1.height - r2.y - r2.height) <= delta;
}
double eps;
};
CV_EXPORTS void groupRectangles(std::vector<Rect>& rectList, int groupThreshold, double eps = 0.2);
CV_EXPORTS_W void groupRectangles(CV_IN_OUT std::vector<Rect>& rectList, CV_OUT std::vector<int>& weights, int groupThreshold, double eps = 0.2);
CV_EXPORTS void groupRectangles(std::vector<Rect>& rectList, int groupThreshold, double eps, std::vector<int>* weights, std::vector<double>* levelWeights );
@ -393,6 +410,7 @@ public:
// read/parse Dalal's alt model file
void readALTModel(String modelfile);
void groupRectangles(std::vector<cv::Rect>& rectList, std::vector<double>& weights, int groupThreshold, double eps) const;
};

@ -113,24 +113,6 @@ struct Logger
namespace cv
{
// class for grouping object candidates, detected by Cascade Classifier, HOG etc.
// instance of the class is to be passed to cv::partition (see cxoperations.hpp)
class CV_EXPORTS SimilarRects
{
public:
SimilarRects(double _eps) : eps(_eps) {}
inline bool operator()(const Rect& r1, const Rect& r2) const
{
double delta = eps*(std::min(r1.width, r2.width) + std::min(r1.height, r2.height))*0.5;
return std::abs(r1.x - r2.x) <= delta &&
std::abs(r1.y - r2.y) <= delta &&
std::abs(r1.x + r1.width - r2.x - r2.width) <= delta &&
std::abs(r1.y + r1.height - r2.y - r2.height) <= delta;
}
double eps;
};
void groupRectangles(std::vector<Rect>& rectList, int groupThreshold, double eps, std::vector<int>* weights, std::vector<double>* levelWeights)
{
if( groupThreshold <= 0 || rectList.empty() )

@ -1303,7 +1303,7 @@ void HOGDescriptor::detectMultiScale(
if ( useMeanshiftGrouping )
groupRectangles_meanshift(foundLocations, foundWeights, foundScales, finalThreshold, winSize);
else
groupRectangles(foundLocations, (int)finalThreshold, 0.2);
groupRectangles(foundLocations, foundWeights, (int)finalThreshold, 0.2);
}
void HOGDescriptor::detectMultiScale(const Mat& img, std::vector<Rect>& foundLocations,
@ -2944,5 +2944,83 @@ void HOGDescriptor::readALTModel(String modelfile)
fclose(modelfl);
}
void HOGDescriptor::groupRectangles(std::vector<cv::Rect>& rectList, std::vector<double>& weights, int groupThreshold, double eps) const
{
if( groupThreshold <= 0 || rectList.empty() )
{
return;
}
CV_Assert(rectList.size() == weights.size());
std::vector<int> labels;
int nclasses = partition(rectList, labels, SimilarRects(eps));
std::vector<cv::Rect_<double> > rrects(nclasses);
std::vector<int> numInClass(nclasses, 0);
std::vector<double> foundWeights(nclasses, DBL_MIN);
std::vector<double> totalFactorsPerClass(nclasses, 1);
int i, j, nlabels = (int)labels.size();
for( i = 0; i < nlabels; i++ )
{
int cls = labels[i];
rrects[cls].x += rectList[i].x;
rrects[cls].y += rectList[i].y;
rrects[cls].width += rectList[i].width;
rrects[cls].height += rectList[i].height;
foundWeights[cls] = max(foundWeights[cls], weights[i]);
numInClass[cls]++;
}
for( i = 0; i < nclasses; i++ )
{
// find the average of all ROI in the cluster
cv::Rect_<double> r = rrects[i];
double s = 1.0/numInClass[i];
rrects[i] = cv::Rect_<double>(cv::saturate_cast<double>(r.x*s),
cv::saturate_cast<double>(r.y*s),
cv::saturate_cast<double>(r.width*s),
cv::saturate_cast<double>(r.height*s));
}
rectList.clear();
weights.clear();
for( i = 0; i < nclasses; i++ )
{
cv::Rect r1 = rrects[i];
int n1 = numInClass[i];
double w1 = foundWeights[i];
if( n1 <= groupThreshold )
continue;
// filter out small rectangles inside large rectangles
for( j = 0; j < nclasses; j++ )
{
int n2 = numInClass[j];
if( j == i || n2 <= groupThreshold )
continue;
cv::Rect r2 = rrects[j];
int dx = cv::saturate_cast<int>( r2.width * eps );
int dy = cv::saturate_cast<int>( r2.height * eps );
if( r1.x >= r2.x - dx &&
r1.y >= r2.y - dy &&
r1.x + r1.width <= r2.x + r2.width + dx &&
r1.y + r1.height <= r2.y + r2.height + dy &&
(n2 > std::max(3, n1) || n1 < 3) )
break;
}
if( j == nclasses )
{
rectList.push_back(r1);
weights.push_back(w1);
}
}
}
}

@ -363,7 +363,9 @@ The class implements Histogram of Oriented Gradients ([Dalal2005]_) object detec
Interfaces of all methods are kept similar to the ``CPU HOG`` descriptor and detector analogues as much as possible.
.. Sample code::
* : OCL : An example using the HOG descriptor can be found at opencv_source_code/samples/ocl/hog.cpp
ocl::HOGDescriptor::HOGDescriptor
-------------------------------------

@ -257,7 +257,10 @@ The class can calculate an optical flow for a sparse feature set or dense optica
.. seealso:: :ocv:func:`calcOpticalFlowPyrLK`
.. Sample code::
* : OCL : An example the Lucas Kanade optical flow pyramid method can be found at opencv_source_code/samples/ocl/pyrlk_optical_flow.cpp
* : OCL : An example for square detection can be found at opencv_source_code/samples/ocl/squares.cpp
ocl::PyrLKOpticalFlow::sparse
-----------------------------

@ -17,6 +17,10 @@ Cascade classifier class used for object detection. Supports HAAR cascade classi
Size minSize = Size(), Size maxSize = Size());
};
.. Sample code::
* : OCL : A face detection example using cascade classifiers can be found at opencv_source_code/samples/ocl/facedetect.cpp
ocl::OclCascadeClassifier::oclHaarDetectObjects
------------------------------------------------------
Detects objects of different sizes in the input image.

@ -0,0 +1,101 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Peng Xiao, pengxiao@outlook.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other oclMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors as is and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
///////////// GoodFeaturesToTrack ////////////////////////
PERFTEST(GoodFeaturesToTrack)
{
using namespace cv;
int maxCorners = 2000;
double qualityLevel = 0.01;
std::string images[] = { "rubberwhale1.png", "aloeL.jpg" };
std::vector<cv::Point2f> pts_gold, pts_ocl;
for(size_t imgIdx = 0; imgIdx < (sizeof(images)/sizeof(std::string)); ++imgIdx)
{
Mat frame = imread(abspath(images[imgIdx]), IMREAD_GRAYSCALE);
CV_Assert(!frame.empty());
for(float minDistance = 0; minDistance < 4; minDistance += 3.0)
{
SUBTEST << "image = " << images[imgIdx] << "; ";
SUBTEST << "minDistance = " << minDistance << "; ";
cv::goodFeaturesToTrack(frame, pts_gold, maxCorners, qualityLevel, minDistance);
CPU_ON;
cv::goodFeaturesToTrack(frame, pts_gold, maxCorners, qualityLevel, minDistance);
CPU_OFF;
cv::ocl::GoodFeaturesToTrackDetector_OCL detector(maxCorners, qualityLevel, minDistance);
ocl::oclMat frame_ocl(frame), pts_oclmat;
WARMUP_ON;
detector(frame_ocl, pts_oclmat);
WARMUP_OFF;
detector.downloadPoints(pts_oclmat, pts_ocl);
double diff = abs(static_cast<float>(pts_gold.size() - pts_ocl.size()));
TestSystem::instance().setAccurate(diff == 0.0, diff);
GPU_ON;
detector(frame_ocl, pts_oclmat);
GPU_OFF;
GPU_FULL_ON;
frame_ocl.upload(frame);
detector(frame_ocl, pts_oclmat);
detector.downloadPoints(pts_oclmat, pts_ocl);
GPU_FULL_OFF;
}
}
}

@ -48,8 +48,8 @@
///////////// PyrLKOpticalFlow ////////////////////////
PERFTEST(PyrLKOpticalFlow)
{
std::string images1[] = {"rubberwhale1.png", "basketball1.png"};
std::string images2[] = {"rubberwhale2.png", "basketball2.png"};
std::string images1[] = {"rubberwhale1.png", "aloeL.jpg"};
std::string images2[] = {"rubberwhale2.png", "aloeR.jpg"};
for (size_t i = 0; i < sizeof(images1) / sizeof(std::string); i++)
{

@ -56,98 +56,6 @@ using namespace cv::ocl;
static oclMat gauss_w_lut;
static bool hog_device_cpu;
/* pre-compute gaussian and interp_weight lookup tables if sigma is 4.0f */
static const float gaussian_interp_lut[] =
{
/* gaussian lut */
0.01831564f, 0.02926831f, 0.04393693f, 0.06196101f, 0.08208500f, 0.10215643f,
0.11943297f, 0.13117145f, 0.13533528f, 0.13117145f, 0.11943297f, 0.10215643f,
0.08208500f, 0.06196101f, 0.04393693f, 0.02926831f, 0.02926831f, 0.04677062f,
0.07021102f, 0.09901341f, 0.13117145f, 0.16324551f, 0.19085334f, 0.20961139f,
0.21626517f, 0.20961139f, 0.19085334f, 0.16324551f, 0.13117145f, 0.09901341f,
0.07021102f, 0.04677062f, 0.04393693f, 0.07021102f, 0.10539922f, 0.14863673f,
0.19691168f, 0.24506053f, 0.28650481f, 0.31466395f, 0.32465246f, 0.31466395f,
0.28650481f, 0.24506053f, 0.19691168f, 0.14863673f, 0.10539922f, 0.07021102f,
0.06196101f, 0.09901341f, 0.14863673f, 0.20961139f, 0.27768996f, 0.34559074f,
0.40403652f, 0.44374731f, 0.45783335f, 0.44374731f, 0.40403652f, 0.34559074f,
0.27768996f, 0.20961139f, 0.14863673f, 0.09901341f, 0.08208500f, 0.13117145f,
0.19691168f, 0.27768996f, 0.36787945f, 0.45783335f, 0.53526145f, 0.58786964f,
0.60653067f, 0.58786964f, 0.53526145f, 0.45783335f, 0.36787945f, 0.27768996f,
0.19691168f, 0.13117145f, 0.10215643f, 0.16324551f, 0.24506053f, 0.34559074f,
0.45783335f, 0.56978285f, 0.66614360f, 0.73161560f, 0.75483960f, 0.73161560f,
0.66614360f, 0.56978285f, 0.45783335f, 0.34559074f, 0.24506053f, 0.16324551f,
0.11943297f, 0.19085334f, 0.28650481f, 0.40403652f, 0.53526145f, 0.66614360f,
0.77880079f, 0.85534531f, 0.88249689f, 0.85534531f, 0.77880079f, 0.66614360f,
0.53526145f, 0.40403652f, 0.28650481f, 0.19085334f, 0.13117145f, 0.20961139f,
0.31466395f, 0.44374731f, 0.58786964f, 0.73161560f, 0.85534531f, 0.93941307f,
0.96923321f, 0.93941307f, 0.85534531f, 0.73161560f, 0.58786964f, 0.44374731f,
0.31466395f, 0.20961139f, 0.13533528f, 0.21626517f, 0.32465246f, 0.45783335f,
0.60653067f, 0.75483960f, 0.88249689f, 0.96923321f, 1.00000000f, 0.96923321f,
0.88249689f, 0.75483960f, 0.60653067f, 0.45783335f, 0.32465246f, 0.21626517f,
0.13117145f, 0.20961139f, 0.31466395f, 0.44374731f, 0.58786964f, 0.73161560f,
0.85534531f, 0.93941307f, 0.96923321f, 0.93941307f, 0.85534531f, 0.73161560f,
0.58786964f, 0.44374731f, 0.31466395f, 0.20961139f, 0.11943297f, 0.19085334f,
0.28650481f, 0.40403652f, 0.53526145f, 0.66614360f, 0.77880079f, 0.85534531f,
0.88249689f, 0.85534531f, 0.77880079f, 0.66614360f, 0.53526145f, 0.40403652f,
0.28650481f, 0.19085334f, 0.10215643f, 0.16324551f, 0.24506053f, 0.34559074f,
0.45783335f, 0.56978285f, 0.66614360f, 0.73161560f, 0.75483960f, 0.73161560f,
0.66614360f, 0.56978285f, 0.45783335f, 0.34559074f, 0.24506053f, 0.16324551f,
0.08208500f, 0.13117145f, 0.19691168f, 0.27768996f, 0.36787945f, 0.45783335f,
0.53526145f, 0.58786964f, 0.60653067f, 0.58786964f, 0.53526145f, 0.45783335f,
0.36787945f, 0.27768996f, 0.19691168f, 0.13117145f, 0.06196101f, 0.09901341f,
0.14863673f, 0.20961139f, 0.27768996f, 0.34559074f, 0.40403652f, 0.44374731f,
0.45783335f, 0.44374731f, 0.40403652f, 0.34559074f, 0.27768996f, 0.20961139f,
0.14863673f, 0.09901341f, 0.04393693f, 0.07021102f, 0.10539922f, 0.14863673f,
0.19691168f, 0.24506053f, 0.28650481f, 0.31466395f, 0.32465246f, 0.31466395f,
0.28650481f, 0.24506053f, 0.19691168f, 0.14863673f, 0.10539922f, 0.07021102f,
0.02926831f, 0.04677062f, 0.07021102f, 0.09901341f, 0.13117145f, 0.16324551f,
0.19085334f, 0.20961139f, 0.21626517f, 0.20961139f, 0.19085334f, 0.16324551f,
0.13117145f, 0.09901341f, 0.07021102f, 0.04677062f,
/* interp_weight lut */
0.00390625f, 0.01171875f, 0.01953125f, 0.02734375f, 0.03515625f, 0.04296875f,
0.05078125f, 0.05859375f, 0.05859375f, 0.05078125f, 0.04296875f, 0.03515625f,
0.02734375f, 0.01953125f, 0.01171875f, 0.00390625f, 0.01171875f, 0.03515625f,
0.05859375f, 0.08203125f, 0.10546875f, 0.12890625f, 0.15234375f, 0.17578125f,
0.17578125f, 0.15234375f, 0.12890625f, 0.10546875f, 0.08203125f, 0.05859375f,
0.03515625f, 0.01171875f, 0.01953125f, 0.05859375f, 0.09765625f, 0.13671875f,
0.17578125f, 0.21484375f, 0.25390625f, 0.29296875f, 0.29296875f, 0.25390625f,
0.21484375f, 0.17578125f, 0.13671875f, 0.09765625f, 0.05859375f, 0.01953125f,
0.02734375f, 0.08203125f, 0.13671875f, 0.19140625f, 0.24609375f, 0.30078125f,
0.35546875f, 0.41015625f, 0.41015625f, 0.35546875f, 0.30078125f, 0.24609375f,
0.19140625f, 0.13671875f, 0.08203125f, 0.02734375f, 0.03515625f, 0.10546875f,
0.17578125f, 0.24609375f, 0.31640625f, 0.38671875f, 0.45703125f, 0.52734375f,
0.52734375f, 0.45703125f, 0.38671875f, 0.31640625f, 0.24609375f, 0.17578125f,
0.10546875f, 0.03515625f, 0.04296875f, 0.12890625f, 0.21484375f, 0.30078125f,
0.38671875f, 0.47265625f, 0.55859375f, 0.64453125f, 0.64453125f, 0.55859375f,
0.47265625f, 0.38671875f, 0.30078125f, 0.21484375f, 0.12890625f, 0.04296875f,
0.05078125f, 0.15234375f, 0.25390625f, 0.35546875f, 0.45703125f, 0.55859375f,
0.66015625f, 0.76171875f, 0.76171875f, 0.66015625f, 0.55859375f, 0.45703125f,
0.35546875f, 0.25390625f, 0.15234375f, 0.05078125f, 0.05859375f, 0.17578125f,
0.29296875f, 0.41015625f, 0.52734375f, 0.64453125f, 0.76171875f, 0.87890625f,
0.87890625f, 0.76171875f, 0.64453125f, 0.52734375f, 0.41015625f, 0.29296875f,
0.17578125f, 0.05859375f, 0.05859375f, 0.17578125f, 0.29296875f, 0.41015625f,
0.52734375f, 0.64453125f, 0.76171875f, 0.87890625f, 0.87890625f, 0.76171875f,
0.64453125f, 0.52734375f, 0.41015625f, 0.29296875f, 0.17578125f, 0.05859375f,
0.05078125f, 0.15234375f, 0.25390625f, 0.35546875f, 0.45703125f, 0.55859375f,
0.66015625f, 0.76171875f, 0.76171875f, 0.66015625f, 0.55859375f, 0.45703125f,
0.35546875f, 0.25390625f, 0.15234375f, 0.05078125f, 0.04296875f, 0.12890625f,
0.21484375f, 0.30078125f, 0.38671875f, 0.47265625f, 0.55859375f, 0.64453125f,
0.64453125f, 0.55859375f, 0.47265625f, 0.38671875f, 0.30078125f, 0.21484375f,
0.12890625f, 0.04296875f, 0.03515625f, 0.10546875f, 0.17578125f, 0.24609375f,
0.31640625f, 0.38671875f, 0.45703125f, 0.52734375f, 0.52734375f, 0.45703125f,
0.38671875f, 0.31640625f, 0.24609375f, 0.17578125f, 0.10546875f, 0.03515625f,
0.02734375f, 0.08203125f, 0.13671875f, 0.19140625f, 0.24609375f, 0.30078125f,
0.35546875f, 0.41015625f, 0.41015625f, 0.35546875f, 0.30078125f, 0.24609375f,
0.19140625f, 0.13671875f, 0.08203125f, 0.02734375f, 0.01953125f, 0.05859375f,
0.09765625f, 0.13671875f, 0.17578125f, 0.21484375f, 0.25390625f, 0.29296875f,
0.29296875f, 0.25390625f, 0.21484375f, 0.17578125f, 0.13671875f, 0.09765625f,
0.05859375f, 0.01953125f, 0.01171875f, 0.03515625f, 0.05859375f, 0.08203125f,
0.10546875f, 0.12890625f, 0.15234375f, 0.17578125f, 0.17578125f, 0.15234375f,
0.12890625f, 0.10546875f, 0.08203125f, 0.05859375f, 0.03515625f, 0.01171875f,
0.00390625f, 0.01171875f, 0.01953125f, 0.02734375f, 0.03515625f, 0.04296875f,
0.05078125f, 0.05859375f, 0.05859375f, 0.05078125f, 0.04296875f, 0.03515625f,
0.02734375f, 0.01953125f, 0.01171875f, 0.00390625f
};
namespace cv
{
@ -180,7 +88,7 @@ namespace cv
int nblocks_win_x, int nblocks_win_y);
void compute_hists(int nbins, int block_stride_x, int blovck_stride_y,
int height, int width, float sigma, const cv::ocl::oclMat &grad,
int height, int width, const cv::ocl::oclMat &grad,
const cv::ocl::oclMat &qangle,
const cv::ocl::oclMat &gauss_w_lut, cv::ocl::oclMat &block_hists);
@ -254,7 +162,7 @@ cv::ocl::HOGDescriptor::HOGDescriptor(Size win_size_, Size block_size_, Size blo
effect_size = Size(0, 0);
if (queryDeviceInfo<IS_CPU_DEVICE, bool>())
if (queryDeviceInfo<IS_CPU_DEVICE, bool>())
hog_device_cpu = true;
else
hog_device_cpu = false;
@ -328,10 +236,18 @@ void cv::ocl::HOGDescriptor::init_buffer(const oclMat &img, Size win_stride)
Size wins_per_img = numPartsWithin(img.size(), win_size, win_stride);
labels.create(1, wins_per_img.area(), CV_8U);
std::vector<float> v_lut = std::vector<float>(gaussian_interp_lut, gaussian_interp_lut +
sizeof(gaussian_interp_lut) / sizeof(gaussian_interp_lut[0]));
Mat m_lut(v_lut);
gauss_w_lut.upload(m_lut.reshape(1,1));
float sigma = getWinSigma();
float scale = 1.f / (2.f * sigma * sigma);
Mat gaussian_lut(1, 512, CV_32FC1);
int idx = 0;
for(int i=-8; i<8; i++)
for(int j=-8; j<8; j++)
gaussian_lut.at<float>(idx++) = std::exp(-(j * j + i * i) * scale);
for(int i=-8; i<8; i++)
for(int j=-8; j<8; j++)
gaussian_lut.at<float>(idx++) = (8.f - fabs(j + 0.5f)) * (8.f - fabs(i + 0.5f)) / 64.f;
gauss_w_lut.upload(gaussian_lut);
}
void cv::ocl::HOGDescriptor::computeGradient(const oclMat &img, oclMat &grad, oclMat &qangle)
@ -358,7 +274,7 @@ void cv::ocl::HOGDescriptor::computeBlockHistograms(const oclMat &img)
computeGradient(img, this->grad, this->qangle);
hog::compute_hists(nbins, block_stride.width, block_stride.height, effect_size.height,
effect_size.width, (float)getWinSigma(), grad, qangle, gauss_w_lut, block_hists);
effect_size.width, grad, qangle, gauss_w_lut, block_hists);
hog::normalize_hists(nbins, block_stride.width, block_stride.height, effect_size.height,
effect_size.width, block_hists, (float)threshold_L2hys);
@ -1707,7 +1623,7 @@ void cv::ocl::device::hog::set_up_constants(int nbins,
void cv::ocl::device::hog::compute_hists(int nbins,
int block_stride_x, int block_stride_y,
int height, int width, float sigma,
int height, int width,
const cv::ocl::oclMat &grad,
const cv::ocl::oclMat &qangle,
const cv::ocl::oclMat &gauss_w_lut,
@ -1715,8 +1631,7 @@ void cv::ocl::device::hog::compute_hists(int nbins,
{
Context *clCxt = Context::getContext();
std::vector< std::pair<size_t, const void *> > args;
String kernelName = (sigma == 4.0f) ? "compute_hists_lut_kernel" :
"compute_hists_kernel";
String kernelName = "compute_hists_lut_kernel";
int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x)
/ block_stride_x;
@ -1726,9 +1641,6 @@ void cv::ocl::device::hog::compute_hists(int nbins,
int grad_quadstep = grad.step >> 2;
int qangle_step = qangle.step;
// Precompute gaussian spatial window parameter
float scale = 1.f / (2.f * sigma * sigma);
int blocks_in_group = 4;
size_t localThreads[3] = { blocks_in_group * 24, 2, 1 };
size_t globalThreads[3] = {
@ -1748,15 +1660,23 @@ void cv::ocl::device::hog::compute_hists(int nbins,
args.push_back( std::make_pair( sizeof(cl_int), (void *)&qangle_step));
args.push_back( std::make_pair( sizeof(cl_mem), (void *)&grad.data));
args.push_back( std::make_pair( sizeof(cl_mem), (void *)&qangle.data));
if (kernelName.compare("compute_hists_lut_kernel") == 0)
args.push_back( std::make_pair( sizeof(cl_mem), (void *)&gauss_w_lut.data));
else
args.push_back( std::make_pair( sizeof(cl_float), (void *)&scale));
args.push_back( std::make_pair( sizeof(cl_mem), (void *)&gauss_w_lut.data));
args.push_back( std::make_pair( sizeof(cl_mem), (void *)&block_hists.data));
args.push_back( std::make_pair( smem, (void *)NULL));
openCLExecuteKernel2(clCxt, &objdetect_hog, kernelName, globalThreads,
localThreads, args, -1, -1);
if(hog_device_cpu)
{
openCLExecuteKernel2(clCxt, &objdetect_hog, kernelName, globalThreads,
localThreads, args, -1, -1, "-D CPU");
}else
{
cl_kernel kernel = openCLGetKernelFromSource(clCxt, &objdetect_hog, kernelName);
int wave_size = queryDeviceInfo<WAVEFRONT_SIZE, int>(kernel);
char opt[32] = {0};
sprintf(opt, "-D WAVE_SIZE=%d", wave_size);
openCLExecuteKernel2(clCxt, &objdetect_hog, kernelName, globalThreads,
localThreads, args, -1, -1, opt);
}
}
void cv::ocl::device::hog::normalize_hists(int nbins,

@ -53,10 +53,10 @@
//----------------------------------------------------------------------------
// Histogram computation
// 12 threads for a cell, 12x4 threads per block
// Use pre-computed gaussian and interp_weight lookup tables if sigma is 4.0f
// Use pre-computed gaussian and interp_weight lookup tables
__kernel void compute_hists_lut_kernel(
const int cblock_stride_x, const int cblock_stride_y,
const int cnbins, const int cblock_hist_size, const int img_block_width,
const int cnbins, const int cblock_hist_size, const int img_block_width,
const int blocks_in_group, const int blocks_total,
const int grad_quadstep, const int qangle_step,
__global const float* grad, __global const uchar* qangle,
@ -76,100 +76,6 @@ __kernel void compute_hists_lut_kernel(
const int cell_y = lidY;
const int cell_thread_x = lidX - cell_x * 12;
__local float* hists = smem + lp * cnbins * (CELLS_PER_BLOCK_X *
CELLS_PER_BLOCK_Y * 12 + CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y);
__local float* final_hist = hists + cnbins *
(CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y * 12);
const int offset_x = gidX * cblock_stride_x + (cell_x << 2) + cell_thread_x;
const int offset_y = gidY * cblock_stride_y + (cell_y << 2);
__global const float* grad_ptr = (gid < blocks_total) ?
grad + offset_y * grad_quadstep + (offset_x << 1) : grad;
__global const uchar* qangle_ptr = (gid < blocks_total) ?
qangle + offset_y * qangle_step + (offset_x << 1) : qangle;
__local float* hist = hists + 12 * (cell_y * CELLS_PER_BLOCK_Y + cell_x) +
cell_thread_x;
for (int bin_id = 0; bin_id < cnbins; ++bin_id)
hist[bin_id * 48] = 0.f;
const int dist_x = -4 + cell_thread_x - 4 * cell_x;
const int dist_center_x = dist_x - 4 * (1 - 2 * cell_x);
const int dist_y_begin = -4 - 4 * lidY;
for (int dist_y = dist_y_begin; dist_y < dist_y_begin + 12; ++dist_y)
{
float2 vote = (float2) (grad_ptr[0], grad_ptr[1]);
uchar2 bin = (uchar2) (qangle_ptr[0], qangle_ptr[1]);
grad_ptr += grad_quadstep;
qangle_ptr += qangle_step;
int dist_center_y = dist_y - 4 * (1 - 2 * cell_y);
int idx = (dist_center_y + 8) * 16 + (dist_center_x + 8);
float gaussian = gauss_w_lut[idx];
idx = (dist_y + 8) * 16 + (dist_x + 8);
float interp_weight = gauss_w_lut[256+idx];
hist[bin.x * 48] += gaussian * interp_weight * vote.x;
hist[bin.y * 48] += gaussian * interp_weight * vote.y;
}
barrier(CLK_LOCAL_MEM_FENCE);
volatile __local float* hist_ = hist;
for (int bin_id = 0; bin_id < cnbins; ++bin_id, hist_ += 48)
{
if (cell_thread_x < 6)
hist_[0] += hist_[6];
barrier(CLK_LOCAL_MEM_FENCE);
if (cell_thread_x < 3)
hist_[0] += hist_[3];
#ifdef CPU
barrier(CLK_LOCAL_MEM_FENCE);
#endif
if (cell_thread_x == 0)
final_hist[(cell_x * 2 + cell_y) * cnbins + bin_id] =
hist_[0] + hist_[1] + hist_[2];
}
#ifdef CPU
barrier(CLK_LOCAL_MEM_FENCE);
#endif
int tid = (cell_y * CELLS_PER_BLOCK_Y + cell_x) * 12 + cell_thread_x;
if ((tid < cblock_hist_size) && (gid < blocks_total))
{
__global float* block_hist = block_hists +
(gidY * img_block_width + gidX) * cblock_hist_size;
block_hist[tid] = final_hist[tid];
}
}
//----------------------------------------------------------------------------
// Histogram computation
// 12 threads for a cell, 12x4 threads per block
__kernel void compute_hists_kernel(
const int cblock_stride_x, const int cblock_stride_y,
const int cnbins, const int cblock_hist_size, const int img_block_width,
const int blocks_in_group, const int blocks_total,
const int grad_quadstep, const int qangle_step,
__global const float* grad, __global const uchar* qangle,
const float scale, __global float* block_hists, __local float* smem)
{
const int lx = get_local_id(0);
const int lp = lx / 24; /* local group id */
const int gid = get_group_id(0) * blocks_in_group + lp;/* global group id */
const int gidY = gid / img_block_width;
const int gidX = gid - gidY * img_block_width;
const int lidX = lx - lp * 24;
const int lidY = get_local_id(1);
const int cell_x = lidX / 12;
const int cell_y = lidY;
const int cell_thread_x = lidX - cell_x * 12;
__local float* hists = smem + lp * cnbins * (CELLS_PER_BLOCK_X *
CELLS_PER_BLOCK_Y * 12 + CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y);
__local float* final_hist = hists + cnbins *
@ -202,10 +108,10 @@ __kernel void compute_hists_kernel(
int dist_center_y = dist_y - 4 * (1 - 2 * cell_y);
float gaussian = exp(-(dist_center_y * dist_center_y + dist_center_x *
dist_center_x) * scale);
float interp_weight = (8.f - fabs(dist_y + 0.5f)) *
(8.f - fabs(dist_x + 0.5f)) / 64.f;
int idx = (dist_center_y + 8) * 16 + (dist_center_x + 8);
float gaussian = gauss_w_lut[idx];
idx = (dist_y + 8) * 16 + (dist_x + 8);
float interp_weight = gauss_w_lut[256+idx];
hist[bin.x * 48] += gaussian * interp_weight * vote.x;
hist[bin.y * 48] += gaussian * interp_weight * vote.y;
@ -230,6 +136,7 @@ __kernel void compute_hists_kernel(
#ifdef CPU
barrier(CLK_LOCAL_MEM_FENCE);
#endif
int tid = (cell_y * CELLS_PER_BLOCK_Y + cell_x) * 12 + cell_thread_x;
if ((tid < cblock_hist_size) && (gid < blocks_total))
{
@ -242,7 +149,7 @@ __kernel void compute_hists_kernel(
//-------------------------------------------------------------
// Normalization of histograms via L2Hys_norm
// optimized for the case of 9 bins
__kernel void normalize_hists_36_kernel(__global float* block_hists,
__kernel void normalize_hists_36_kernel(__global float* block_hists,
const float threshold, __local float *squares)
{
const int tid = get_local_id(0);
@ -298,24 +205,24 @@ float reduce_smem(volatile __local float* smem, int size)
unsigned int tid = get_local_id(0);
float sum = smem[tid];
if (size >= 512) { if (tid < 256) smem[tid] = sum = sum + smem[tid + 256];
if (size >= 512) { if (tid < 256) smem[tid] = sum = sum + smem[tid + 256];
barrier(CLK_LOCAL_MEM_FENCE); }
if (size >= 256) { if (tid < 128) smem[tid] = sum = sum + smem[tid + 128];
if (size >= 256) { if (tid < 128) smem[tid] = sum = sum + smem[tid + 128];
barrier(CLK_LOCAL_MEM_FENCE); }
if (size >= 128) { if (tid < 64) smem[tid] = sum = sum + smem[tid + 64];
if (size >= 128) { if (tid < 64) smem[tid] = sum = sum + smem[tid + 64];
barrier(CLK_LOCAL_MEM_FENCE); }
#ifdef CPU
if (size >= 64) { if (tid < 32) smem[tid] = sum = sum + smem[tid + 32];
if (size >= 64) { if (tid < 32) smem[tid] = sum = sum + smem[tid + 32];
barrier(CLK_LOCAL_MEM_FENCE); }
if (size >= 32) { if (tid < 16) smem[tid] = sum = sum + smem[tid + 16];
barrier(CLK_LOCAL_MEM_FENCE); }
if (size >= 16) { if (tid < 8) smem[tid] = sum = sum + smem[tid + 8];
if (size >= 32) { if (tid < 16) smem[tid] = sum = sum + smem[tid + 16];
barrier(CLK_LOCAL_MEM_FENCE); }
if (size >= 8) { if (tid < 4) smem[tid] = sum = sum + smem[tid + 4];
if (size >= 16) { if (tid < 8) smem[tid] = sum = sum + smem[tid + 8];
barrier(CLK_LOCAL_MEM_FENCE); }
if (size >= 4) { if (tid < 2) smem[tid] = sum = sum + smem[tid + 2];
barrier(CLK_LOCAL_MEM_FENCE); }
if (size >= 2) { if (tid < 1) smem[tid] = sum = sum + smem[tid + 1];
if (size >= 8) { if (tid < 4) smem[tid] = sum = sum + smem[tid + 4];
barrier(CLK_LOCAL_MEM_FENCE); }
if (size >= 4) { if (tid < 2) smem[tid] = sum = sum + smem[tid + 2];
barrier(CLK_LOCAL_MEM_FENCE); }
if (size >= 2) { if (tid < 1) smem[tid] = sum = sum + smem[tid + 1];
barrier(CLK_LOCAL_MEM_FENCE); }
#else
if (tid < 32)
@ -344,7 +251,7 @@ __kernel void normalize_hists_kernel(
const int gidX = get_group_id(0);
const int gidY = get_group_id(1);
__global float* hist = block_hists + (gidY * img_block_width + gidX) *
__global float* hist = block_hists + (gidY * img_block_width + gidX) *
block_hist_size + tid;
float elem = 0.f;
@ -385,14 +292,14 @@ __kernel void classify_hists_180_kernel(
const int gidX = get_group_id(0);
const int gidY = get_group_id(1);
__global const float* hist = block_hists + (gidY * win_block_stride_y *
__global const float* hist = block_hists + (gidY * win_block_stride_y *
img_block_width + gidX * win_block_stride_x) * cblock_hist_size;
float product = 0.f;
for (int i = 0; i < cdescr_height; i++)
{
product += coefs[i * cdescr_width + tid] *
product += coefs[i * cdescr_width + tid] *
hist[i * img_block_width * cblock_hist_size + tid];
}
@ -458,14 +365,14 @@ __kernel void classify_hists_252_kernel(
const int gidX = get_group_id(0);
const int gidY = get_group_id(1);
__global const float* hist = block_hists + (gidY * win_block_stride_y *
__global const float* hist = block_hists + (gidY * win_block_stride_y *
img_block_width + gidX * win_block_stride_x) * cblock_hist_size;
float product = 0.f;
if (tid < cdescr_width)
{
for (int i = 0; i < cdescr_height; i++)
product += coefs[i * cdescr_width + tid] *
product += coefs[i * cdescr_width + tid] *
hist[i * img_block_width * cblock_hist_size + tid];
}
@ -495,7 +402,7 @@ __kernel void classify_hists_252_kernel(
barrier(CLK_LOCAL_MEM_FENCE);
#else
if (tid < 32)
{
{
smem[tid] = product = product + smem[tid + 32];
#if WAVE_SIZE < 32
} barrier(CLK_LOCAL_MEM_FENCE);
@ -527,7 +434,7 @@ __kernel void classify_hists_kernel(
const int gidX = get_group_id(0);
const int gidY = get_group_id(1);
__global const float* hist = block_hists + (gidY * win_block_stride_y *
__global const float* hist = block_hists + (gidY * win_block_stride_y *
img_block_width + gidX * win_block_stride_x) * cblock_hist_size;
float product = 0.f;
@ -535,7 +442,7 @@ __kernel void classify_hists_kernel(
{
int offset_y = i / cdescr_width;
int offset_x = i - offset_y * cdescr_width;
product += coefs[i] *
product += coefs[i] *
hist[offset_y * img_block_width * cblock_hist_size + offset_x];
}
@ -565,7 +472,7 @@ __kernel void classify_hists_kernel(
barrier(CLK_LOCAL_MEM_FENCE);
#else
if (tid < 32)
{
{
smem[tid] = product = product + smem[tid + 32];
#if WAVE_SIZE < 32
} barrier(CLK_LOCAL_MEM_FENCE);
@ -587,8 +494,8 @@ __kernel void classify_hists_kernel(
// Extract descriptors
__kernel void extract_descrs_by_rows_kernel(
const int cblock_hist_size, const int descriptors_quadstep,
const int cdescr_size, const int cdescr_width, const int img_block_width,
const int cblock_hist_size, const int descriptors_quadstep,
const int cdescr_size, const int cdescr_width, const int img_block_width,
const int win_block_stride_x, const int win_block_stride_y,
__global const float* block_hists, __global float* descriptors)
{
@ -597,11 +504,11 @@ __kernel void extract_descrs_by_rows_kernel(
int gidY = get_group_id(1);
// Get left top corner of the window in src
__global const float* hist = block_hists + (gidY * win_block_stride_y *
__global const float* hist = block_hists + (gidY * win_block_stride_y *
img_block_width + gidX * win_block_stride_x) * cblock_hist_size;
// Get left top corner of the window in dst
__global float* descriptor = descriptors +
__global float* descriptor = descriptors +
(gidY * get_num_groups(0) + gidX) * descriptors_quadstep;
// Copy elements from src to dst
@ -615,8 +522,8 @@ __kernel void extract_descrs_by_rows_kernel(
__kernel void extract_descrs_by_cols_kernel(
const int cblock_hist_size, const int descriptors_quadstep, const int cdescr_size,
const int cnblocks_win_x, const int cnblocks_win_y, const int img_block_width,
const int win_block_stride_x, const int win_block_stride_y,
const int cnblocks_win_x, const int cnblocks_win_y, const int img_block_width,
const int win_block_stride_x, const int win_block_stride_y,
__global const float* block_hists, __global float* descriptors)
{
int tid = get_local_id(0);
@ -624,11 +531,11 @@ __kernel void extract_descrs_by_cols_kernel(
int gidY = get_group_id(1);
// Get left top corner of the window in src
__global const float* hist = block_hists + (gidY * win_block_stride_y *
__global const float* hist = block_hists + (gidY * win_block_stride_y *
img_block_width + gidX * win_block_stride_x) * cblock_hist_size;
// Get left top corner of the window in dst
__global float* descriptor = descriptors +
__global float* descriptor = descriptors +
(gidY * get_num_groups(0) + gidX) * descriptors_quadstep;
// Copy elements from src to dst
@ -640,7 +547,7 @@ __kernel void extract_descrs_by_cols_kernel(
int y = block_idx / cnblocks_win_x;
int x = block_idx - y * cnblocks_win_x;
descriptor[(x * cnblocks_win_y + y) * cblock_hist_size + idx_in_block] =
descriptor[(x * cnblocks_win_y + y) * cblock_hist_size + idx_in_block] =
hist[(y * img_block_width + x) * cblock_hist_size + idx_in_block];
}
}
@ -649,7 +556,7 @@ __kernel void extract_descrs_by_cols_kernel(
// Gradients computation
__kernel void compute_gradients_8UC4_kernel(
const int height, const int width,
const int height, const int width,
const int img_step, const int grad_quadstep, const int qangle_step,
const __global uchar4 * img, __global float * grad, __global uchar * qangle,
const float angle_scale, const char correct_gamma, const int cnbins)
@ -693,9 +600,9 @@ __kernel void compute_gradients_8UC4_kernel(
barrier(CLK_LOCAL_MEM_FENCE);
if (x < width)
{
float3 a = (float3) (sh_row[tid], sh_row[tid + (NTHREADS + 2)],
float3 a = (float3) (sh_row[tid], sh_row[tid + (NTHREADS + 2)],
sh_row[tid + 2 * (NTHREADS + 2)]);
float3 b = (float3) (sh_row[tid + 2], sh_row[tid + 2 + (NTHREADS + 2)],
float3 b = (float3) (sh_row[tid + 2], sh_row[tid + 2 + (NTHREADS + 2)],
sh_row[tid + 2 + 2 * (NTHREADS + 2)]);
float3 dx;
@ -752,7 +659,7 @@ __kernel void compute_gradients_8UC4_kernel(
}
__kernel void compute_gradients_8UC1_kernel(
const int height, const int width,
const int height, const int width,
const int img_step, const int grad_quadstep, const int qangle_step,
__global const uchar * img, __global float * grad, __global uchar * qangle,
const float angle_scale, const char correct_gamma, const int cnbins)
@ -810,4 +717,4 @@ __kernel void compute_gradients_8UC1_kernel(
grad[ (gidY * grad_quadstep + x) << 1 ] = mag * (1.f - ang);
grad[ ((gidY * grad_quadstep + x) << 1) + 1 ] = mag * ang;
}
}
}

File diff suppressed because it is too large Load Diff

@ -30,3 +30,9 @@ Restores the selected region in an image using the region neighborhood.
The function reconstructs the selected image area from the pixel near the area boundary. The function may be used to remove dust and scratches from a scanned photo, or to remove undesirable objects from still images or video. See
http://en.wikipedia.org/wiki/Inpainting
for more details.
.. Sample code::
* : An example using the inpainting technique can be found at opencv_source_code/samples/cpp/inpaint.cpp
* : PYTHON : An example using the inpainting technique can be found at opencv_source_code/samples/python2/inpaint.py

@ -88,6 +88,11 @@ High level image stitcher. It's possible to use this class without being aware o
/* hidden */
};
.. Sample code::
* : A basic example on image stitching can be found at opencv_source_code/samples/cpp/stitching.cpp
* : A detailed example on image stitching can be found at opencv_source_code/samples/cpp/stitching_detailed.cpp
Stitcher::createDefault
-----------------------
Creates a stitcher with the default parameters.

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save