Merging in master

pull/3814/head
Erik Karlsson 10 years ago
commit 5a7118a88b
  1. 9
      .gitignore
  2. 6
      3rdparty/libpng/CMakeLists.txt
  3. 4
      3rdparty/libpng/opencv-libpng.patch
  4. 4
      3rdparty/libpng/pngpriv.h
  5. 4
      3rdparty/libtiff/CMakeLists.txt
  6. 98
      CMakeLists.txt
  7. 1
      apps/CMakeLists.txt
  8. 37
      apps/annotation/CMakeLists.txt
  9. 198
      apps/annotation/opencv_annotation.cpp
  10. 2
      apps/createsamples/CMakeLists.txt
  11. 2
      apps/traincascade/CMakeLists.txt
  12. 7
      apps/traincascade/cascadeclassifier.cpp
  13. 97
      apps/traincascade/old_ml.hpp
  14. 60
      cmake/OpenCVCRTLinkage.cmake
  15. 3
      cmake/OpenCVConfig.cmake
  16. 11
      cmake/OpenCVDetectCUDA.cmake
  17. 7
      cmake/OpenCVDetectCXXCompiler.cmake
  18. 9
      cmake/OpenCVDetectOpenCL.cmake
  19. 2
      cmake/OpenCVGenPkgconfig.cmake
  20. 164
      cmake/OpenCVModule.cmake
  21. 25
      cmake/OpenCVUtils.cmake
  22. 17
      cmake/templates/OpenCVConfig.cmake.in
  23. 3
      cmake/templates/cvconfig.h.in
  24. 15452
      data/haarcascades_cuda/haarcascade_eye.xml
  25. 33158
      data/haarcascades_cuda/haarcascade_eye_tree_eyeglasses.xml
  26. 26161
      data/haarcascades_cuda/haarcascade_frontalface_alt.xml
  27. 23550
      data/haarcascades_cuda/haarcascade_frontalface_alt2.xml
  28. 103493
      data/haarcascades_cuda/haarcascade_frontalface_alt_tree.xml
  29. 35712
      data/haarcascades_cuda/haarcascade_frontalface_default.xml
  30. 18118
      data/haarcascades_cuda/haarcascade_fullbody.xml
  31. 9803
      data/haarcascades_cuda/haarcascade_lefteye_2splits.xml
  32. 15085
      data/haarcascades_cuda/haarcascade_lowerbody.xml
  33. 31930
      data/haarcascades_cuda/haarcascade_profileface.xml
  34. 9833
      data/haarcascades_cuda/haarcascade_righteye_2splits.xml
  35. 8353
      data/haarcascades_cuda/haarcascade_smile.xml
  36. 29767
      data/haarcascades_cuda/haarcascade_upperbody.xml
  37. 2
      doc/CMakeLists.txt
  38. 8
      doc/Doxyfile.in
  39. 2
      doc/py_tutorials/py_bindings/py_bindings_basics/py_bindings_basics.markdown
  40. 2
      doc/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.markdown
  41. 12
      doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.markdown
  42. 2
      doc/py_tutorials/py_video/py_lucas_kanade/py_lucas_kanade.markdown
  43. 4
      doc/stylesheet.css
  44. 18
      doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.markdown
  45. 17
      doc/tutorials/features2d/feature_homography/feature_homography.markdown
  46. 2
      doc/tutorials/imgproc/histograms/template_matching/template_matching.markdown
  47. BIN
      doc/tutorials/imgproc/images/Morphology_3_Tutorial_Cover.jpg
  48. BIN
      doc/tutorials/imgproc/images/imgtrans/Distance_Transformation_Tutorial_Cover.jpg
  49. BIN
      doc/tutorials/imgproc/morph_lines_detection/images/binary.png
  50. BIN
      doc/tutorials/imgproc/morph_lines_detection/images/gray.png
  51. BIN
      doc/tutorials/imgproc/morph_lines_detection/images/horiz.png
  52. BIN
      doc/tutorials/imgproc/morph_lines_detection/images/linear_horiz.png
  53. BIN
      doc/tutorials/imgproc/morph_lines_detection/images/linear_vert.png
  54. BIN
      doc/tutorials/imgproc/morph_lines_detection/images/morph12.gif
  55. BIN
      doc/tutorials/imgproc/morph_lines_detection/images/morph21.gif
  56. BIN
      doc/tutorials/imgproc/morph_lines_detection/images/morph211.png
  57. BIN
      doc/tutorials/imgproc/morph_lines_detection/images/morph6.gif
  58. BIN
      doc/tutorials/imgproc/morph_lines_detection/images/morph61.png
  59. BIN
      doc/tutorials/imgproc/morph_lines_detection/images/smooth.png
  60. BIN
      doc/tutorials/imgproc/morph_lines_detection/images/src.png
  61. BIN
      doc/tutorials/imgproc/morph_lines_detection/images/vert.png
  62. 86
      doc/tutorials/imgproc/morph_lines_detection/moprh_lines_detection.md
  63. 8
      doc/tutorials/imgproc/table_of_content_imgproc.markdown
  64. BIN
      doc/tutorials/introduction/biicode/images/bii_lena.png
  65. BIN
      doc/tutorials/introduction/biicode/images/biiapp.png
  66. BIN
      doc/tutorials/introduction/biicode/images/biicode.png
  67. 158
      doc/tutorials/introduction/biicode/tutorial_biicode.markdown
  68. 8
      doc/tutorials/introduction/table_of_content_introduction.markdown
  69. 8
      doc/tutorials/ios/hello/hello.markdown
  70. BIN
      doc/tutorials/ml/images/introduction_to_pca_cover.png
  71. BIN
      doc/tutorials/ml/introduction_to_pca/images/output.png
  72. BIN
      doc/tutorials/ml/introduction_to_pca/images/pca_eigen.png
  73. BIN
      doc/tutorials/ml/introduction_to_pca/images/pca_line.png
  74. BIN
      doc/tutorials/ml/introduction_to_pca/images/pca_test1.jpg
  75. 133
      doc/tutorials/ml/introduction_to_pca/introduction_to_pca.markdown
  76. 103
      doc/tutorials/ml/introduction_to_svm/introduction_to_svm.markdown
  77. 171
      doc/tutorials/ml/non_linear_svms/non_linear_svms.markdown
  78. 10
      doc/tutorials/ml/table_of_content_ml.markdown
  79. 12
      modules/androidcamera/camera_wrapper/camera_wrapper.cpp
  80. 4
      modules/androidcamera/src/camera_activity.cpp
  81. 2
      modules/calib3d/CMakeLists.txt
  82. 0
      modules/calib3d/misc/java/test/Calib3dTest.java
  83. 0
      modules/calib3d/misc/java/test/StereoBMTest.java
  84. 0
      modules/calib3d/misc/java/test/StereoSGBMTest.java
  85. 6
      modules/calib3d/src/circlesgrid.cpp
  86. 4
      modules/calib3d/src/circlesgrid.hpp
  87. 6
      modules/calib3d/src/dls.h
  88. 10
      modules/calib3d/src/fisheye.cpp
  89. 9
      modules/calib3d/src/levmarq.cpp
  90. 22
      modules/calib3d/src/posit.cpp
  91. 16
      modules/calib3d/src/ptsetreg.cpp
  92. 2
      modules/calib3d/src/stereobm.cpp
  93. 2
      modules/calib3d/src/stereosgbm.cpp
  94. 15
      modules/core/CMakeLists.txt
  95. 378
      modules/core/include/opencv2/core.hpp
  96. 2
      modules/core/include/opencv2/core/cvdef.h
  97. 10
      modules/core/include/opencv2/core/matx.hpp
  98. 1
      modules/core/include/opencv2/core/ocl.hpp
  99. 82
      modules/core/include/opencv2/core/operations.hpp
  100. 24
      modules/core/include/opencv2/core/persistence.hpp
  101. Some files were not shown because too many files have changed in this diff Show More

9
.gitignore vendored

@ -8,3 +8,12 @@
Thumbs.db
tags
tegra/
bin/
CMakeFiles/
*.sdf
*.opensdf
*.obj
*.stamp
*.depend
*.rule
*.tmp

@ -14,7 +14,7 @@ ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}" ${ZLIB_INCLUDE_DIRS})
file(GLOB lib_srcs *.c)
file(GLOB lib_hdrs *.h)
if(NEON AND CMAKE_SIZEOF_VOID_P EQUAL 4)
if(NEON AND ARM)
list(APPEND lib_srcs arm/filter_neon.S)
add_definitions(-DPNG_ARM_NEON)
endif()
@ -29,10 +29,6 @@ if(MSVC)
add_definitions(-D_CRT_SECURE_NO_DEPRECATE)
endif(MSVC)
if (HAVE_WINRT)
add_definitions(-DHAVE_WINRT)
endif()
add_library(${PNG_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs})
target_link_libraries(${PNG_LIBRARY} ${ZLIB_LIBRARIES})

@ -7,7 +7,7 @@ index 07b2b0b..e7824b8 100644
/* Memory model/platform independent fns */
#ifndef PNG_ABORT
-# ifdef _WINDOWS_
+# if defined(_WINDOWS_) && !defined(HAVE_WINRT)
+# if defined(_WINDOWS_) && !defined(WINRT)
# define PNG_ABORT() ExitProcess(0)
# else
# define PNG_ABORT() abort()
@ -16,7 +16,7 @@ index 07b2b0b..e7824b8 100644
# define png_memset _fmemset
#else
-# ifdef _WINDOWS_ /* Favor Windows over C runtime fns */
+# if defined(_WINDOWS_) && !defined(HAVE_WINRT) /* Favor Windows over C runtime fns */
+# if defined(_WINDOWS_) && !defined(WINRT) /* Favor Windows over C runtime fns */
# define CVT_PTR(ptr) (ptr)
# define CVT_PTR_NOCHECK(ptr) (ptr)
# define png_strlen lstrlenA

@ -360,7 +360,7 @@ typedef PNG_CONST png_uint_16p FAR * png_const_uint_16pp;
/* Memory model/platform independent fns */
#ifndef PNG_ABORT
# if defined(_WINDOWS_) && !defined(HAVE_WINRT)
# if defined(_WINDOWS_) && !defined(WINRT)
# define PNG_ABORT() ExitProcess(0)
# else
# define PNG_ABORT() abort()
@ -378,7 +378,7 @@ typedef PNG_CONST png_uint_16p FAR * png_const_uint_16pp;
# define png_memcpy _fmemcpy
# define png_memset _fmemset
#else
# if defined(_WINDOWS_) && !defined(HAVE_WINRT) /* Favor Windows over C runtime fns */
# if defined(_WINDOWS_) && !defined(WINRT) /* Favor Windows over C runtime fns */
# define CVT_PTR(ptr) (ptr)
# define CVT_PTR_NOCHECK(ptr) (ptr)
# define png_strlen lstrlenA

@ -17,7 +17,7 @@ check_include_file(string.h HAVE_STRING_H)
check_include_file(sys/types.h HAVE_SYS_TYPES_H)
check_include_file(unistd.h HAVE_UNISTD_H)
if(WIN32 AND NOT HAVE_WINRT)
if(WIN32)
set(USE_WIN32_FILEIO 1)
endif()
@ -79,7 +79,7 @@ set(lib_srcs
"${CMAKE_CURRENT_BINARY_DIR}/tif_config.h"
)
if(WIN32 AND NOT HAVE_WINRT)
if(WIN32)
list(APPEND lib_srcs tif_win32.c)
else()
list(APPEND lib_srcs tif_unix.c)

@ -12,6 +12,12 @@ include(cmake/OpenCVMinDepVersions.cmake)
if(CMAKE_GENERATOR MATCHES Xcode AND XCODE_VERSION VERSION_GREATER 4.3)
cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)
elseif(CMAKE_SYSTEM_NAME MATCHES WindowsPhone OR CMAKE_SYSTEM_NAME MATCHES WindowsStore)
cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
#Required to resolve linker error issues due to incompatibility with CMake v3.0+ policies.
#CMake fails to find _fseeko() which leads to subsequent linker error.
#See details here: http://www.cmake.org/Wiki/CMake/Policies
cmake_policy(VERSION 2.8)
else()
cmake_minimum_required(VERSION "${MIN_VER_CMAKE}" FATAL_ERROR)
endif()
@ -33,6 +39,13 @@ else(NOT CMAKE_TOOLCHAIN_FILE)
set(CMAKE_INSTALL_PREFIX "${CMAKE_BINARY_DIR}/install" CACHE PATH "Installation Directory")
endif(NOT CMAKE_TOOLCHAIN_FILE)
if(CMAKE_SYSTEM_NAME MATCHES WindowsPhone OR CMAKE_SYSTEM_NAME MATCHES WindowsStore)
set(WINRT TRUE)
endif(CMAKE_SYSTEM_NAME MATCHES WindowsPhone OR CMAKE_SYSTEM_NAME MATCHES WindowsStore)
if(WINRT)
add_definitions(-DWINRT -DNO_GETENV)
endif()
if(POLICY CMP0022)
cmake_policy(SET CMP0022 OLD)
@ -120,66 +133,66 @@ endif()
# Optional 3rd party components
# ===================================================
OCV_OPTION(WITH_1394 "Include IEEE1394 support" ON IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_1394 "Include IEEE1394 support" ON IF (NOT ANDROID AND NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_AVFOUNDATION "Use AVFoundation for Video I/O" ON IF IOS)
OCV_OPTION(WITH_CARBON "Use Carbon for UI instead of Cocoa" OFF IF APPLE )
OCV_OPTION(WITH_VTK "Include VTK library support (and build opencv_viz module eiher)" ON IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_CUDA "Include NVidia Cuda Runtime support" ON IF (NOT IOS) )
OCV_OPTION(WITH_CUFFT "Include NVidia Cuda Fast Fourier Transform (FFT) library support" ON IF (NOT IOS) )
OCV_OPTION(WITH_CUBLAS "Include NVidia Cuda Basic Linear Algebra Subprograms (BLAS) library support" OFF IF (NOT IOS) )
OCV_OPTION(WITH_VTK "Include VTK library support (and build opencv_viz module eiher)" ON IF (NOT ANDROID AND NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_CUDA "Include NVidia Cuda Runtime support" ON IF (NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_CUFFT "Include NVidia Cuda Fast Fourier Transform (FFT) library support" ON IF (NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_CUBLAS "Include NVidia Cuda Basic Linear Algebra Subprograms (BLAS) library support" OFF IF (NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_NVCUVID "Include NVidia Video Decoding library support" OFF IF (NOT IOS AND NOT APPLE) )
OCV_OPTION(WITH_EIGEN "Include Eigen2/Eigen3 support" ON)
OCV_OPTION(WITH_EIGEN "Include Eigen2/Eigen3 support" ON IF (NOT WINRT) )
OCV_OPTION(WITH_VFW "Include Video for Windows support" ON IF WIN32 )
OCV_OPTION(WITH_FFMPEG "Include FFMPEG support" ON IF (NOT ANDROID AND NOT IOS))
OCV_OPTION(WITH_FFMPEG "Include FFMPEG support" ON IF (NOT ANDROID AND NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_GSTREAMER "Include Gstreamer support" ON IF (UNIX AND NOT ANDROID) )
OCV_OPTION(WITH_GSTREAMER_0_10 "Enable Gstreamer 0.10 support (instead of 1.x)" OFF )
OCV_OPTION(WITH_GTK "Include GTK support" ON IF (UNIX AND NOT APPLE AND NOT ANDROID) )
OCV_OPTION(WITH_GTK_2_X "Use GTK version 2" OFF IF (UNIX AND NOT APPLE AND NOT ANDROID) )
OCV_OPTION(WITH_IPP "Include Intel IPP support" ON IF (X86_64 OR X86) )
OCV_OPTION(WITH_IPP "Include Intel IPP support" ON IF (X86_64 OR X86) AND NOT WINRT)
OCV_OPTION(WITH_JASPER "Include JPEG2K support" ON IF (NOT IOS) )
OCV_OPTION(WITH_JPEG "Include JPEG support" ON)
OCV_OPTION(WITH_WEBP "Include WebP support" ON IF (NOT IOS) )
OCV_OPTION(WITH_OPENEXR "Include ILM support via OpenEXR" ON IF (NOT IOS) )
OCV_OPTION(WITH_OPENGL "Include OpenGL support" OFF IF (NOT ANDROID) )
OCV_OPTION(WITH_OPENNI "Include OpenNI support" OFF IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_OPENNI2 "Include OpenNI2 support" OFF IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_WEBP "Include WebP support" ON IF (NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_OPENEXR "Include ILM support via OpenEXR" ON IF (NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_OPENGL "Include OpenGL support" OFF IF (NOT ANDROID AND NOT WINRT) )
OCV_OPTION(WITH_OPENNI "Include OpenNI support" OFF IF (NOT ANDROID AND NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_OPENNI2 "Include OpenNI2 support" OFF IF (NOT ANDROID AND NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_PNG "Include PNG support" ON)
OCV_OPTION(WITH_PVAPI "Include Prosilica GigE support" ON IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_GIGEAPI "Include Smartek GigE support" ON IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_QT "Build with Qt Backend support" OFF IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_WIN32UI "Build with Win32 UI Backend support" ON IF WIN32 )
OCV_OPTION(WITH_PVAPI "Include Prosilica GigE support" ON IF (NOT ANDROID AND NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_GIGEAPI "Include Smartek GigE support" ON IF (NOT ANDROID AND NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_QT "Build with Qt Backend support" OFF IF (NOT ANDROID AND NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_WIN32UI "Build with Win32 UI Backend support" ON IF WIN32 AND NOT WINRT)
OCV_OPTION(WITH_QUICKTIME "Use QuickTime for Video I/O insted of QTKit" OFF IF APPLE )
OCV_OPTION(WITH_TBB "Include Intel TBB support" OFF IF (NOT IOS) )
OCV_OPTION(WITH_TBB "Include Intel TBB support" OFF IF (NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_OPENMP "Include OpenMP support" OFF)
OCV_OPTION(WITH_CSTRIPES "Include C= support" OFF IF WIN32 )
OCV_OPTION(WITH_CSTRIPES "Include C= support" OFF IF (WIN32 AND NOT WINRT) )
OCV_OPTION(WITH_TIFF "Include TIFF support" ON IF (NOT IOS) )
OCV_OPTION(WITH_UNICAP "Include Unicap support (GPL)" OFF IF (UNIX AND NOT APPLE AND NOT ANDROID) )
OCV_OPTION(WITH_V4L "Include Video 4 Linux support" ON IF (UNIX AND NOT ANDROID) )
OCV_OPTION(WITH_LIBV4L "Use libv4l for Video 4 Linux support" ON IF (UNIX AND NOT ANDROID) )
OCV_OPTION(WITH_DSHOW "Build VideoIO with DirectShow support" ON IF (WIN32 AND NOT ARM) )
OCV_OPTION(WITH_DSHOW "Build VideoIO with DirectShow support" ON IF (WIN32 AND NOT ARM AND NOT WINRT) )
OCV_OPTION(WITH_MSMF "Build VideoIO with Media Foundation support" OFF IF WIN32 )
OCV_OPTION(WITH_XIMEA "Include XIMEA cameras support" OFF IF (NOT ANDROID) )
OCV_OPTION(WITH_XIMEA "Include XIMEA cameras support" OFF IF (NOT ANDROID AND NOT WINRT) )
OCV_OPTION(WITH_XINE "Include Xine support (GPL)" OFF IF (UNIX AND NOT APPLE AND NOT ANDROID) )
OCV_OPTION(WITH_CLP "Include Clp support (EPL)" OFF)
OCV_OPTION(WITH_OPENCL "Include OpenCL Runtime support" ON IF (NOT IOS) )
OCV_OPTION(WITH_OPENCL "Include OpenCL Runtime support" NOT ANDROID IF (NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_OPENCL_SVM "Include OpenCL Shared Virtual Memory support" OFF ) # experimental
OCV_OPTION(WITH_OPENCLAMDFFT "Include AMD OpenCL FFT library support" ON IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_OPENCLAMDBLAS "Include AMD OpenCL BLAS library support" ON IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_DIRECTX "Include DirectX support" ON IF WIN32 )
OCV_OPTION(WITH_INTELPERC "Include Intel Perceptual Computing support" OFF IF WIN32 )
OCV_OPTION(WITH_OPENCLAMDFFT "Include AMD OpenCL FFT library support" ON IF (NOT ANDROID AND NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_OPENCLAMDBLAS "Include AMD OpenCL BLAS library support" ON IF (NOT ANDROID AND NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_DIRECTX "Include DirectX support" ON IF (WIN32 AND NOT WINRT) )
OCV_OPTION(WITH_INTELPERC "Include Intel Perceptual Computing support" OFF IF (WIN32 AND NOT WINRT) )
OCV_OPTION(WITH_IPP_A "Include Intel IPP_A support" OFF IF (MSVC OR X86 OR X86_64) )
OCV_OPTION(WITH_GDAL "Include GDAL Support" OFF IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_GDAL "Include GDAL Support" OFF IF (NOT ANDROID AND NOT IOS AND NOT WINRT) )
# OpenCV build components
# ===================================================
OCV_OPTION(BUILD_SHARED_LIBS "Build shared libraries (.dll/.so) instead of static ones (.lib/.a)" NOT (ANDROID OR IOS) )
OCV_OPTION(BUILD_opencv_apps "Build utility applications (used for example to train classifiers)" (NOT ANDROID) IF (NOT IOS) )
OCV_OPTION(BUILD_opencv_apps "Build utility applications (used for example to train classifiers)" (NOT ANDROID AND NOT WINRT) IF (NOT IOS) )
OCV_OPTION(BUILD_ANDROID_EXAMPLES "Build examples for Android platform" ON IF ANDROID )
OCV_OPTION(BUILD_DOCS "Create build rules for OpenCV Documentation" ON )
OCV_OPTION(BUILD_DOCS "Create build rules for OpenCV Documentation" ON IF NOT WINRT)
OCV_OPTION(BUILD_EXAMPLES "Build all examples" OFF )
OCV_OPTION(BUILD_PACKAGE "Enables 'make package_source' command" ON )
OCV_OPTION(BUILD_PERF_TESTS "Build performance tests" ON IF (NOT IOS) )
OCV_OPTION(BUILD_TESTS "Build accuracy & regression tests" ON IF (NOT IOS) )
OCV_OPTION(BUILD_PACKAGE "Enables 'make package_source' command" ON IF NOT WINRT)
OCV_OPTION(BUILD_PERF_TESTS "Build performance tests" ON IF (NOT IOS AND NOT WINRT) )
OCV_OPTION(BUILD_TESTS "Build accuracy & regression tests" ON IF (NOT IOS AND NOT WINRT) )
OCV_OPTION(BUILD_WITH_DEBUG_INFO "Include debug info into debug libs (not MSCV only)" ON )
OCV_OPTION(BUILD_WITH_STATIC_CRT "Enables use of staticaly linked CRT for staticaly linked OpenCV" ON IF MSVC )
OCV_OPTION(BUILD_WITH_DYNAMIC_IPP "Enables dynamic linking of IPP (only for standalone IPP)" OFF )
@ -194,7 +207,7 @@ OCV_OPTION(BUILD_TIFF "Build libtiff from source" WIN32 O
OCV_OPTION(BUILD_JASPER "Build libjasper from source" WIN32 OR ANDROID OR APPLE )
OCV_OPTION(BUILD_JPEG "Build libjpeg from source" WIN32 OR ANDROID OR APPLE )
OCV_OPTION(BUILD_PNG "Build libpng from source" WIN32 OR ANDROID OR APPLE )
OCV_OPTION(BUILD_OPENEXR "Build openexr from source" WIN32 OR ANDROID OR APPLE )
OCV_OPTION(BUILD_OPENEXR "Build openexr from source" (WIN32 OR ANDROID OR APPLE) AND NOT WINRT)
OCV_OPTION(BUILD_TBB "Download and build TBB from source" ANDROID )
# OpenCV installation options
@ -225,12 +238,10 @@ OCV_OPTION(ENABLE_POPCNT "Enable POPCNT instructions"
OCV_OPTION(ENABLE_AVX "Enable AVX instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
OCV_OPTION(ENABLE_AVX2 "Enable AVX2 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
OCV_OPTION(ENABLE_FMA3 "Enable FMA3 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
OCV_OPTION(ENABLE_NEON "Enable NEON instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR IOS) )
OCV_OPTION(ENABLE_VFPV3 "Enable VFPv3-D32 instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR IOS) )
OCV_OPTION(ENABLE_NEON "Enable NEON instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR AARCH64 OR IOS) )
OCV_OPTION(ENABLE_VFPV3 "Enable VFPv3-D32 instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR AARCH64 OR IOS) )
OCV_OPTION(ENABLE_NOISY_WARNINGS "Show all warnings even if they are too noisy" OFF )
OCV_OPTION(OPENCV_WARNINGS_ARE_ERRORS "Treat warnings as errors" OFF )
OCV_OPTION(ENABLE_WINRT_MODE "Build with Windows Runtime support" OFF IF WIN32 )
OCV_OPTION(ENABLE_WINRT_MODE_NATIVE "Build with Windows Runtime native C++ support" OFF IF WIN32 )
OCV_OPTION(ANDROID_EXAMPLES_WITH_LIBS "Build binaries of Android examples with native libraries" OFF IF ANDROID )
OCV_OPTION(ENABLE_IMPL_COLLECTION "Collect implementation data on function call" OFF )
@ -759,11 +770,12 @@ endif()
# ================== Windows RT features ==================
if(WIN32)
status("")
status(" Windows RT support:" HAVE_WINRT THEN YES ELSE NO)
if (ENABLE_WINRT_MODE OR ENABLE_WINRT_MODE_NATIVE)
status(" Windows SDK v8.0:" ${WINDOWS_SDK_PATH})
status(" Visual Studio 2012:" ${VISUAL_STUDIO_PATH})
endif()
status(" Windows RT support:" WINRT THEN YES ELSE NO)
if(WINRT)
status(" Building for Microsoft platform: " ${CMAKE_SYSTEM_NAME})
status(" Building for architectures: " ${CMAKE_VS_EFFECTIVE_PLATFORMS})
status(" Building for version: " ${CMAKE_SYSTEM_VERSION})
endif()
endif(WIN32)
# ========================== GUI ==========================
@ -913,7 +925,7 @@ endif(DEFINED WITH_OPENNI)
if(DEFINED WITH_OPENNI2)
status(" OpenNI2:" HAVE_OPENNI2 THEN "YES (ver ${OPENNI2_VERSION_STRING}, build ${OPENNI2_VERSION_BUILD})"
ELSE NO)
ELSE NO)
endif(DEFINED WITH_OPENNI2)
if(DEFINED WITH_PVAPI)

@ -3,3 +3,4 @@ link_libraries(${OPENCV_LINKER_LIBS})
add_subdirectory(traincascade)
add_subdirectory(createsamples)
add_subdirectory(annotation)

@ -0,0 +1,37 @@
SET(OPENCV_ANNOTATION_DEPS opencv_core opencv_highgui opencv_imgproc opencv_imgcodecs opencv_videoio)
ocv_check_dependencies(${OPENCV_ANNOTATION_DEPS})
if(NOT OCV_DEPENDENCIES_FOUND)
return()
endif()
project(annotation)
set(the_target opencv_annotation)
ocv_target_include_directories(${the_target} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}" "${OpenCV_SOURCE_DIR}/include/opencv")
ocv_target_include_modules(${the_target} ${OPENCV_ANNOTATION_DEPS})
file(GLOB SRCS *.cpp)
set(annotation_files ${SRCS})
ocv_add_executable(${the_target} ${annotation_files})
ocv_target_link_libraries(${the_target} ${OPENCV_ANNOTATION_DEPS})
set_target_properties(${the_target} PROPERTIES
DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
ARCHIVE_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_PATH}
RUNTIME_OUTPUT_DIRECTORY ${EXECUTABLE_OUTPUT_PATH}
INSTALL_NAME_DIR lib
OUTPUT_NAME "opencv_annotation")
if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(${the_target} PROPERTIES FOLDER "applications")
endif()
if(INSTALL_CREATE_DISTRIB)
if(BUILD_SHARED_LIBS)
install(TARGETS ${the_target} RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} CONFIGURATIONS Release COMPONENT dev)
endif()
else()
install(TARGETS ${the_target} RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT dev)
endif()

@ -0,0 +1,198 @@
/*****************************************************************************************************
USAGE:
./opencv_annotation -images <folder location> -annotations <ouput file>
Created by: Puttemans Steven
*****************************************************************************************************/
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/imgproc.hpp>
#include <fstream>
#include <iostream>
using namespace std;
using namespace cv;
// Function prototypes
void on_mouse(int, int, int, int, void*);
string int2string(int);
void get_annotations(Mat, stringstream*);
// Public parameters
Mat image;
int roi_x0 = 0, roi_y0 = 0, roi_x1 = 0, roi_y1 = 0, num_of_rec = 0;
bool start_draw = false;
// Window name for visualisation purposes
const string window_name="OpenCV Based Annotation Tool";
// FUNCTION : Mouse response for selecting objects in images
// If left button is clicked, start drawing a rectangle as long as mouse moves
// Stop drawing once a new left click is detected by the on_mouse function
void on_mouse(int event, int x, int y, int , void * )
{
// Action when left button is clicked
if(event == EVENT_LBUTTONDOWN)
{
if(!start_draw)
{
roi_x0 = x;
roi_y0 = y;
start_draw = true;
} else {
roi_x1 = x;
roi_y1 = y;
start_draw = false;
}
}
// Action when mouse is moving
if((event == EVENT_MOUSEMOVE) && start_draw)
{
// Redraw bounding box for annotation
Mat current_view;
image.copyTo(current_view);
rectangle(current_view, Point(roi_x0,roi_y0), Point(x,y), Scalar(0,0,255));
imshow(window_name, current_view);
}
}
// FUNCTION : snippet to convert an integer value to a string using a clean function
// instead of creating a stringstream each time inside the main code
string int2string(int num)
{
stringstream temp_stream;
temp_stream << num;
return temp_stream.str();
}
// FUNCTION : given an image containing positive object instances, add all the object
// annotations to a known stringstream
void get_annotations(Mat input_image, stringstream* output_stream)
{
// Make it possible to exit the annotation
bool stop = false;
// Reset the num_of_rec element at each iteration
// Make sure the global image is set to the current image
num_of_rec = 0;
image = input_image;
// Init window interface and couple mouse actions
namedWindow(window_name, WINDOW_AUTOSIZE);
setMouseCallback(window_name, on_mouse);
imshow(window_name, image);
stringstream temp_stream;
int key_pressed = 0;
do
{
// Keys for processing
// You need to select one for confirming a selection and one to continue to the next image
// Based on the universal ASCII code of the keystroke: http://www.asciitable.com/
// c = 99 add rectangle to current image
// n = 110 save added rectangles and show next image
// <ESC> = 27 exit program
key_pressed = 0xFF & waitKey(0);
switch( key_pressed )
{
case 27:
destroyWindow(window_name);
stop = true;
case 99:
// Add a rectangle to the list
num_of_rec++;
// Draw initiated from top left corner
if(roi_x0<roi_x1 && roi_y0<roi_y1)
{
temp_stream << " " << int2string(roi_x0) << " " << int2string(roi_y0) << " " << int2string(roi_x1-roi_x0) << " " << int2string(roi_y1-roi_y0);
}
// Draw initiated from bottom right corner
if(roi_x0>roi_x1 && roi_y0>roi_y1)
{
temp_stream << " " << int2string(roi_x1) << " " << int2string(roi_y1) << " " << int2string(roi_x0-roi_x1) << " " << int2string(roi_y0-roi_y1);
}
// Draw initiated from top right corner
if(roi_x0>roi_x1 && roi_y0<roi_y1)
{
temp_stream << " " << int2string(roi_x1) << " " << int2string(roi_y0) << " " << int2string(roi_x0-roi_x1) << " " << int2string(roi_y1-roi_y0);
}
// Draw initiated from bottom left corner
if(roi_x0<roi_x1 && roi_y0>roi_y1)
{
temp_stream << " " << int2string(roi_x0) << " " << int2string(roi_y1) << " " << int2string(roi_x1-roi_x0) << " " << int2string(roi_y0-roi_y1);
}
rectangle(input_image, Point(roi_x0,roi_y0), Point(roi_x1,roi_y1), Scalar(0,255,0), 1);
break;
}
// Check if escape has been pressed
if(stop)
{
break;
}
}
// Continue as long as the next image key has not been pressed
while(key_pressed != 110);
// If there are annotations AND the next image key is pressed
// Write the image annotations to the file
if(num_of_rec>0 && key_pressed==110)
{
*output_stream << " " << num_of_rec << temp_stream.str() << endl;
}
// Close down the window
destroyWindow(window_name);
}
int main( int argc, const char** argv )
{
// Read in the input arguments
string image_folder;
string annotations;
for(int i = 1; i < argc; ++i )
{
if( !strcmp( argv[i], "-images" ) )
{
image_folder = argv[++i];
}
else if( !strcmp( argv[i], "-annotations" ) )
{
annotations = argv[++i];
}
}
// Create the outputfilestream
ofstream output(annotations.c_str());
// Return the image filenames inside the image folder
vector<String> filenames;
String folder(image_folder);
glob(folder, filenames);
// Loop through each image stored in the images folder
// Create and temporarily store the annotations
// At the end write everything to the annotations file
for (size_t i = 0; i < filenames.size(); i++){
// Read in an image
Mat current_image = imread(filenames[i]);
// Perform annotations & generate corresponding output
stringstream output_stream;
get_annotations(current_image, &output_stream);
// Store the annotations, write to the output file
if (output_stream.str() != ""){
output << filenames[i] << output_stream.str();
}
}
return 0;
}

@ -35,5 +35,5 @@ if(INSTALL_CREATE_DISTRIB)
install(TARGETS ${the_target} RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} CONFIGURATIONS Release COMPONENT dev)
endif()
else()
install(TARGETS ${the_target} RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT dev)
install(TARGETS ${the_target} OPTIONAL RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT dev)
endif()

@ -35,5 +35,5 @@ if(INSTALL_CREATE_DISTRIB)
install(TARGETS ${the_target} RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} CONFIGURATIONS Release COMPONENT dev)
endif()
else()
install(TARGETS ${the_target} RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT dev)
install(TARGETS ${the_target} OPTIONAL RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT dev)
endif()

@ -168,6 +168,13 @@ bool CvCascadeClassifier::train( const string _cascadeDirName,
featureEvaluator = CvFeatureEvaluator::create(cascadeParams.featureType);
featureEvaluator->init( featureParams, numPos + numNeg, cascadeParams.winSize );
stageClassifiers.reserve( numStages );
}else{
// Make sure that if model parameters are preloaded, that people are aware of this,
// even when passing other parameters to the training command
cout << "---------------------------------------------------------------------------------" << endl;
cout << "Training parameters are pre-loaded from the parameter file in data folder!" << endl;
cout << "Please empty this folder if you want to use a NEW set of training parameters." << endl;
cout << "---------------------------------------------------------------------------------" << endl;
}
cout << "PARAMETERS:" << endl;
cout << "cascadeDirName: " << _cascadeDirName << endl;

@ -122,7 +122,6 @@ CV_INLINE CvParamLattice cvDefaultParamLattice( void )
#define CV_TYPE_NAME_ML_SVM "opencv-ml-svm"
#define CV_TYPE_NAME_ML_KNN "opencv-ml-knn"
#define CV_TYPE_NAME_ML_NBAYES "opencv-ml-bayesian"
#define CV_TYPE_NAME_ML_EM "opencv-ml-em"
#define CV_TYPE_NAME_ML_BOOSTING "opencv-ml-boost-tree"
#define CV_TYPE_NAME_ML_TREE "opencv-ml-tree"
#define CV_TYPE_NAME_ML_ANN_MLP "opencv-ml-ann-mlp"
@ -562,100 +561,6 @@ private:
CvSVM& operator = (const CvSVM&);
};
/****************************************************************************************\
* Expectation - Maximization *
\****************************************************************************************/
namespace cv
{
class EM : public Algorithm
{
public:
// Type of covariation matrices
enum {COV_MAT_SPHERICAL=0, COV_MAT_DIAGONAL=1, COV_MAT_GENERIC=2, COV_MAT_DEFAULT=COV_MAT_DIAGONAL};
// Default parameters
enum {DEFAULT_NCLUSTERS=5, DEFAULT_MAX_ITERS=100};
// The initial step
enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0};
CV_WRAP EM(int nclusters=EM::DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL,
const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
EM::DEFAULT_MAX_ITERS, FLT_EPSILON));
virtual ~EM();
CV_WRAP virtual void clear();
CV_WRAP virtual bool train(InputArray samples,
OutputArray logLikelihoods=noArray(),
OutputArray labels=noArray(),
OutputArray probs=noArray());
CV_WRAP virtual bool trainE(InputArray samples,
InputArray means0,
InputArray covs0=noArray(),
InputArray weights0=noArray(),
OutputArray logLikelihoods=noArray(),
OutputArray labels=noArray(),
OutputArray probs=noArray());
CV_WRAP virtual bool trainM(InputArray samples,
InputArray probs0,
OutputArray logLikelihoods=noArray(),
OutputArray labels=noArray(),
OutputArray probs=noArray());
CV_WRAP Vec2d predict(InputArray sample,
OutputArray probs=noArray()) const;
CV_WRAP bool isTrained() const;
AlgorithmInfo* info() const;
virtual void read(const FileNode& fn);
protected:
virtual void setTrainData(int startStep, const Mat& samples,
const Mat* probs0,
const Mat* means0,
const std::vector<Mat>* covs0,
const Mat* weights0);
bool doTrain(int startStep,
OutputArray logLikelihoods,
OutputArray labels,
OutputArray probs);
virtual void eStep();
virtual void mStep();
void clusterTrainSamples();
void decomposeCovs();
void computeLogWeightDivDet();
Vec2d computeProbabilities(const Mat& sample, Mat* probs) const;
// all inner matrices have type CV_64FC1
CV_PROP_RW int nclusters;
CV_PROP_RW int covMatType;
CV_PROP_RW int maxIters;
CV_PROP_RW double epsilon;
Mat trainSamples;
Mat trainProbs;
Mat trainLogLikelihoods;
Mat trainLabels;
CV_PROP Mat weights;
CV_PROP Mat means;
CV_PROP std::vector<Mat> covs;
std::vector<Mat> covsEigenValues;
std::vector<Mat> covsRotateMats;
std::vector<Mat> invCovsEigenValues;
Mat logWeightDivDet;
};
} // namespace cv
/****************************************************************************************\
* Decision Tree *
\****************************************************************************************/\
@ -2155,8 +2060,6 @@ typedef CvGBTreesParams GradientBoostingTreeParams;
typedef CvGBTrees GradientBoostingTrees;
template<> void DefaultDeleter<CvDTreeSplit>::operator ()(CvDTreeSplit* obj) const;
bool initModule_ml(void);
}
#endif // __cplusplus

@ -2,47 +2,37 @@ if(NOT MSVC)
message(FATAL_ERROR "CRT options are available only for MSVC")
endif()
#INCLUDE (CheckIncludeFiles)
#if (${CMAKE_SYSTEM_NAME} MATCHES "WindowsStore" OR ${CMAKE_SYSTEM_NAME} MATCHES "WindowsPhone")
# set(WINRT TRUE)
set(HAVE_WINRT FALSE)
# search Windows Platform SDK
message(STATUS "Checking for Windows Platform SDK")
GET_FILENAME_COMPONENT(WINDOWS_SDK_PATH "[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\v8.0;InstallationFolder]" ABSOLUTE CACHE)
if(WINDOWS_SDK_PATH STREQUAL "")
set(HAVE_MSPDK FALSE)
message(STATUS "Windows Platform SDK 8.0 was not found")
else()
set(HAVE_MSPDK TRUE)
if (WINRT)
add_definitions(/DWINVER=_WIN32_WINNT_WIN8 /DNTDDI_VERSION=NTDDI_WIN8 /D_WIN32_WINNT=_WIN32_WINNT_WIN8)
endif()
#search for Visual Studio 11.0 install directory
message(STATUS "Checking for Visual Studio 2012")
GET_FILENAME_COMPONENT(VISUAL_STUDIO_PATH [HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\11.0\\Setup\\VS;ProductDir] REALPATH CACHE)
if(VISUAL_STUDIO_PATH STREQUAL "")
set(HAVE_MSVC2012 FALSE)
message(STATUS "Visual Studio 2012 was not found")
else()
set(HAVE_MSVC2012 TRUE)
endif()
# Removing LNK4075 warnings for debug WinRT builds
# "LNK4075: ignoring '/INCREMENTAL' due to '/OPT:ICF' specification"
# "LNK4075: ignoring '/INCREMENTAL' due to '/OPT:REF' specification"
if(MSVC AND WINRT)
# Optional verification checks since we don't know existing contents of variables below
string(REPLACE "/OPT:ICF " "/OPT:NOICF " CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG}")
string(REPLACE "/OPT:REF " "/OPT:NOREF " CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG}")
string(REPLACE "/INCREMENTAL:YES " "/INCREMENTAL:NO " CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG}")
string(REPLACE "/INCREMENTAL " "/INCREMENTAL:NO " CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG}")
try_compile(HAVE_WINRT_SDK
"${OpenCV_BINARY_DIR}"
"${OpenCV_SOURCE_DIR}/cmake/checks/winrttest.cpp")
string(REPLACE "/OPT:ICF " "/OPT:NOICF " CMAKE_MODULE_LINKER_FLAGS_DEBUG "${CMAKE_MODULE_LINKER_FLAGS_DEBUG}")
string(REPLACE "/OPT:REF " "/OPT:NORE F" CMAKE_MODULE_LINKER_FLAGS_DEBUG "${CMAKE_MODULE_LINKER_FLAGS_DEBUG}")
string(REPLACE "/INCREMENTAL:YES " "/INCREMENTAL:NO " CMAKE_MODULE_LINKER_FLAGS_DEBUG "${CMAKE_MODULE_LINKER_FLAGS_DEBUG}")
string(REPLACE "/INCREMENTAL " "/INCREMENTAL:NO " CMAKE_MODULE_LINKER_FLAGS_DEBUG "${CMAKE_MODULE_LINKER_FLAGS_DEBUG}")
if(ENABLE_WINRT_MODE AND HAVE_WINRT_SDK AND HAVE_MSVC2012 AND HAVE_MSPDK)
set(HAVE_WINRT TRUE)
set(HAVE_WINRT_CX TRUE)
elseif(ENABLE_WINRT_MODE_NATIVE AND HAVE_WINRT_SDK AND HAVE_MSVC2012 AND HAVE_MSPDK)
set(HAVE_WINRT TRUE)
set(HAVE_WINRT_CX FALSE)
endif()
string(REPLACE "/OPT:ICF " "/OPT:NOICF " CMAKE_SHARED_LINKER_FLAGS_DEBUG "${CMAKE_SHARED_LINKER_FLAGS_DEBUG}")
string(REPLACE "/OPT:REF " "/OPT:NOREF " CMAKE_SHARED_LINKER_FLAGS_DEBUG "${CMAKE_SHARED_LINKER_FLAGS_DEBUG}")
string(REPLACE "/INCREMENTAL:YES " "/INCREMENTAL:NO " CMAKE_SHARED_LINKER_FLAGS_DEBUG "${CMAKE_SHARED_LINKER_FLAGS_DEBUG}")
string(REPLACE "/INCREMENTAL " "/INCREMENTAL:NO " CMAKE_SHARED_LINKER_FLAGS_DEBUG "${CMAKE_SHARED_LINKER_FLAGS_DEBUG}")
if(HAVE_WINRT)
add_definitions(/DWINVER=0x0602 /DNTDDI_VERSION=NTDDI_WIN8 /D_WIN32_WINNT=0x0602)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /appcontainer")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} /appcontainer")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /appcontainer")
# Mandatory
set(CMAKE_MODULE_LINKER_FLAGS_DEBUG "${CMAKE_MODULE_LINKER_FLAGS_DEBUG} /INCREMENTAL:NO /OPT:NOREF /OPT:NOICF")
set(CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG} /INCREMENTAL:NO /OPT:NOREF /OPT:NOICF")
set(CMAKE_SHARED_LINKER_FLAGS_DEBUG "${CMAKE_SHARED_LINKER_FLAGS_DEBUG} /INCREMENTAL:NO /OPT:NOREF /OPT:NOICF")
endif()
if(NOT BUILD_SHARED_LIBS AND BUILD_WITH_STATIC_CRT)

@ -65,6 +65,9 @@ if(MSVC)
if(CMAKE_CL_64)
set(OpenCV_ARCH x64)
set(OpenCV_TBB_ARCH intel64)
elseif((CMAKE_GENERATOR MATCHES "ARM") OR ("${arch_hint}" STREQUAL "ARM") OR (CMAKE_VS_EFFECTIVE_PLATFORMS MATCHES "ARM|arm"))
# see Modules/CmakeGenericSystem.cmake
set(OpenCV_ARCH ARM)
else()
set(OpenCV_ARCH x86)
set(OpenCV_TBB_ARCH ia32)

@ -80,11 +80,18 @@ if(CUDA_FOUND)
if(NOT DEFINED __cuda_arch_bin)
if(ANDROID)
set(__cuda_arch_bin "3.2")
set(__cuda_arch_ptx "")
if(ARM)
set(__cuda_arch_bin "3.2")
set(__cuda_arch_ptx "")
elseif(AARCH64)
set(__cuda_arch_bin "5.2")
set(__cuda_arch_ptx "")
endif()
else()
if(${CUDA_VERSION} VERSION_LESS "5.0")
set(__cuda_arch_bin "1.1 1.2 1.3 2.0 2.1(2.0) 3.0")
elseif(${CUDA_VERSION} VERSION_GREATER "6.5")
set(__cuda_arch_bin "2.0 2.1(2.0) 3.0 3.5")
else()
set(__cuda_arch_bin "1.1 1.2 1.3 2.0 2.1(2.0) 3.0 3.5")
endif()

@ -107,8 +107,10 @@ elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
set(X86_64 1)
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "i686.*|i386.*|x86.*|amd64.*|AMD64.*")
set(X86 1)
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "arm.*|ARM.*")
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm.*|ARM.*)")
set(ARM 1)
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*)")
set(AARCH64 1)
endif()
@ -125,6 +127,9 @@ endif()
if(MSVC)
if(CMAKE_CL_64)
set(OpenCV_ARCH x64)
elseif((CMAKE_GENERATOR MATCHES "ARM") OR ("${arch_hint}" STREQUAL "ARM") OR (CMAKE_VS_EFFECTIVE_PLATFORMS MATCHES "ARM|arm"))
# see Modules/CmakeGenericSystem.cmake
set(OpenCV_ARCH ARM)
else()
set(OpenCV_ARCH x86)
endif()

@ -10,6 +10,11 @@ else(APPLE)
set(OPENCL_INCLUDE_DIR "${OpenCV_SOURCE_DIR}/3rdparty/include/opencl/1.2")
endif(APPLE)
if(WINRT)
set(OPENCL_FOUND NO)
set(HAVE_OPENCL_STATIC OFF)
endif(WINRT)
if(OPENCL_FOUND)
if(NOT HAVE_OPENCL_STATIC)
try_compile(__VALID_OPENCL
@ -24,7 +29,9 @@ if(OPENCL_FOUND)
endif()
endif()
set(HAVE_OPENCL 1)
if(NOT WINRT)
set(HAVE_OPENCL 1)
endif()
if(WITH_OPENCL_SVM)
set(HAVE_OPENCL_SVM 1)

@ -37,6 +37,7 @@ ocv_list_reverse(OpenCV_EXTRA_COMPONENTS)
#build the list of components
set(OpenCV_LIB_COMPONENTS_ "")
foreach(CVLib ${OpenCV_LIB_COMPONENTS})
if (TARGET ${CVLib})
get_target_property(libpath ${CVLib} LOCATION_${CMAKE_BUILD_TYPE})
get_filename_component(libname "${libpath}" NAME)
@ -52,6 +53,7 @@ foreach(CVLib ${OpenCV_LIB_COMPONENTS})
endif()
set(OpenCV_LIB_COMPONENTS_ "${OpenCV_LIB_COMPONENTS_} \${exec_prefix}/${installDir}/${libname}")
endif()
endforeach()
# add extra dependencies required for OpenCV

@ -19,6 +19,9 @@
# OPENCV_MODULE_${the_module}_PRIVATE_REQ_DEPS
# OPENCV_MODULE_${the_module}_PRIVATE_OPT_DEPS
# OPENCV_MODULE_${the_module}_IS_PART_OF_WORLD
# OPENCV_MODULE_${the_module}_CUDA_OBJECTS - compiled CUDA objects list
# OPENCV_MODULE_${the_module}_CHILDREN - list of submodules for compound modules (cmake >= 2.8.8)
# OPENCV_MODULE_${the_module}_WRAPPERS - list of wrappers supporting this module
# HAVE_${the_module} - for fast check of module availability
# To control the setup of the module you could also set:
@ -26,6 +29,7 @@
# OPENCV_MODULE_TYPE - STATIC|SHARED - set to force override global settings for current module
# OPENCV_MODULE_IS_PART_OF_WORLD - ON|OFF (default ON) - should the module be added to the opencv_world?
# BUILD_${the_module}_INIT - ON|OFF (default ON) - initial value for BUILD_${the_module}
# OPENCV_MODULE_CHILDREN - list of submodules
# The verbose template for OpenCV module:
#
@ -57,6 +61,7 @@ foreach(mod ${OPENCV_MODULES_BUILD} ${OPENCV_MODULES_DISABLED_USER} ${OPENCV_MOD
unset(OPENCV_MODULE_${mod}_PRIVATE_REQ_DEPS CACHE)
unset(OPENCV_MODULE_${mod}_PRIVATE_OPT_DEPS CACHE)
unset(OPENCV_MODULE_${mod}_LINK_DEPS CACHE)
unset(OPENCV_MODULE_${mod}_WRAPPERS CACHE)
endforeach()
# clean modules info which needs to be recalculated
@ -69,7 +74,7 @@ unset(OPENCV_WORLD_MODULES CACHE)
# adds dependencies to OpenCV module
# Usage:
# add_dependencies(opencv_<name> [REQUIRED] [<list of dependencies>] [OPTIONAL <list of modules>])
# add_dependencies(opencv_<name> [REQUIRED] [<list of dependencies>] [OPTIONAL <list of modules>] [WRAP <list of wrappers>])
# Notes:
# * <list of dependencies> - can include full names of modules or full pathes to shared/static libraries or cmake targets
macro(ocv_add_dependencies full_modname)
@ -84,16 +89,28 @@ macro(ocv_add_dependencies full_modname)
set(__depsvar OPENCV_MODULE_${full_modname}_PRIVATE_REQ_DEPS)
elseif(d STREQUAL "PRIVATE_OPTIONAL")
set(__depsvar OPENCV_MODULE_${full_modname}_PRIVATE_OPT_DEPS)
elseif(d STREQUAL "WRAP")
set(__depsvar OPENCV_MODULE_${full_modname}_WRAPPERS)
else()
list(APPEND ${__depsvar} "${d}")
endif()
endforeach()
unset(__depsvar)
# hack for python
set(__python_idx)
list(FIND OPENCV_MODULE_${full_modname}_WRAPPERS "python" __python_idx)
if (NOT __python_idx EQUAL -1)
list(REMOVE_ITEM OPENCV_MODULE_${full_modname}_WRAPPERS "python")
list(APPEND OPENCV_MODULE_${full_modname}_WRAPPERS "python2" "python3")
endif()
unset(__python_idx)
ocv_list_unique(OPENCV_MODULE_${full_modname}_REQ_DEPS)
ocv_list_unique(OPENCV_MODULE_${full_modname}_OPT_DEPS)
ocv_list_unique(OPENCV_MODULE_${full_modname}_PRIVATE_REQ_DEPS)
ocv_list_unique(OPENCV_MODULE_${full_modname}_PRIVATE_OPT_DEPS)
ocv_list_unique(OPENCV_MODULE_${full_modname}_WRAPPERS)
set(OPENCV_MODULE_${full_modname}_REQ_DEPS ${OPENCV_MODULE_${full_modname}_REQ_DEPS}
CACHE INTERNAL "Required dependencies of ${full_modname} module")
@ -103,11 +120,13 @@ macro(ocv_add_dependencies full_modname)
CACHE INTERNAL "Required private dependencies of ${full_modname} module")
set(OPENCV_MODULE_${full_modname}_PRIVATE_OPT_DEPS ${OPENCV_MODULE_${full_modname}_PRIVATE_OPT_DEPS}
CACHE INTERNAL "Optional private dependencies of ${full_modname} module")
set(OPENCV_MODULE_${full_modname}_WRAPPERS ${OPENCV_MODULE_${full_modname}_WRAPPERS}
CACHE INTERNAL "List of wrappers supporting module ${full_modname}")
endmacro()
# declare new OpenCV module in current folder
# Usage:
# ocv_add_module(<name> [INTERNAL|BINDINGS] [REQUIRED] [<list of dependencies>] [OPTIONAL <list of optional dependencies>])
# ocv_add_module(<name> [INTERNAL|BINDINGS] [REQUIRED] [<list of dependencies>] [OPTIONAL <list of optional dependencies>] [WRAP <list of wrappers>])
# Example:
# ocv_add_module(yaom INTERNAL opencv_core opencv_highgui opencv_flann OPTIONAL opencv_cudev)
macro(ocv_add_module _name)
@ -158,13 +177,9 @@ macro(ocv_add_module _name)
endif()
# add self to the world dependencies
# add to world only extra modules (ON) or only main modules (OFF)
set(__expected_extra 0)
if (OPENCV_EXTRA_WORLD)
set(__expected_extra 1)
endif()
if((NOT DEFINED OPENCV_MODULE_IS_PART_OF_WORLD AND NOT OPENCV_MODULE_${the_module}_CLASS STREQUAL "BINDINGS"
AND __expected_extra EQUAL OPENCV_PROCESSING_EXTRA_MODULES)
if((NOT DEFINED OPENCV_MODULE_IS_PART_OF_WORLD
AND NOT OPENCV_MODULE_${the_module}_CLASS STREQUAL "BINDINGS"
AND NOT OPENCV_PROCESSING_EXTRA_MODULES)
OR OPENCV_MODULE_IS_PART_OF_WORLD
)
set(OPENCV_MODULE_${the_module}_IS_PART_OF_WORLD ON CACHE INTERNAL "")
@ -179,7 +194,13 @@ macro(ocv_add_module _name)
set(OPENCV_MODULES_DISABLED_USER ${OPENCV_MODULES_DISABLED_USER} "${the_module}" CACHE INTERNAL "List of OpenCV modules explicitly disabled by user")
endif()
# TODO: add submodules if any
# add submodules if any
set(OPENCV_MODULE_${the_module}_CHILDREN "${OPENCV_MODULE_CHILDREN}" CACHE INTERNAL "List of ${the_module} submodules")
# add reverse wrapper dependencies
foreach (wrapper ${OPENCV_MODULE_${the_module}_WRAPPERS})
ocv_add_dependencies(opencv_${wrapper} OPTIONAL ${the_module})
endforeach()
# stop processing of current file
return()
@ -306,27 +327,42 @@ endfunction()
# sort modules by dependencies
function(__ocv_sort_modules_by_deps __lst)
ocv_list_sort(${__lst})
set(${__lst}_ORDERED ${${__lst}} CACHE INTERNAL "")
set(__result "")
foreach (m ${${__lst}})
list(LENGTH __result __lastindex)
set(__index ${__lastindex})
foreach (__d ${__result})
set(__deps "${OPENCV_MODULE_${__d}_DEPS}")
if(";${__deps};" MATCHES ";${m};")
list(FIND __result "${__d}" __i)
if(__i LESS "${__index}")
set(__index "${__i}")
set(input ${${__lst}})
set(result "")
while(input)
list(LENGTH input length_before)
foreach (m ${input})
# check if module is in the result already
if (NOT ";${result};" MATCHES ";${m};")
# scan through module dependencies...
set(unresolved_deps_found FALSE)
foreach (d ${OPENCV_MODULE_${m}_CHILDREN} ${OPENCV_MODULE_${m}_DEPS})
# ... which are not already in the result and are enabled
if ((NOT ";${result};" MATCHES ";${d};") AND HAVE_${d})
set(unresolved_deps_found TRUE)
break()
endif()
endforeach()
# chek if all dependencies for this module has been resolved
if (NOT unresolved_deps_found)
list(APPEND result ${m})
list(REMOVE_ITEM input ${m})
endif()
endif()
endforeach()
if(__index STREQUAL __lastindex)
list(APPEND __result "${m}")
else()
list(INSERT __result ${__index} "${m}")
list(LENGTH input length_after)
# check for infinite loop or unresolved dependencies
if (NOT length_after LESS length_before)
message(WARNING "Unresolved dependencies or loop in dependency graph (${length_after})\n"
"Processed ${__lst}: ${${__lst}}\n"
"Good modules: ${result}\n"
"Bad modules: ${input}"
)
list(APPEND result ${input})
break()
endif()
endforeach()
set(${__lst} "${__result}" PARENT_SCOPE)
endwhile()
set(${__lst} "${result}" PARENT_SCOPE)
endfunction()
# resolve dependensies
@ -645,13 +681,43 @@ macro(_ocv_create_module)
get_native_precompiled_header(${the_module} precomp.hpp)
endif()
set(sub_objs "")
set(sub_links "")
set(cuda_objs "")
if (OPENCV_MODULE_${the_module}_CHILDREN)
status("Complex module ${the_module}")
foreach (m ${OPENCV_MODULE_${the_module}_CHILDREN})
if (BUILD_${m} AND TARGET ${m}_object)
get_target_property(_sub_links ${m} LINK_LIBRARIES)
list(APPEND sub_objs $<TARGET_OBJECTS:${m}_object>)
list(APPEND sub_links ${_sub_links})
status(" + ${m}")
else()
status(" - ${m}")
endif()
list(APPEND cuda_objs ${OPENCV_MODULE_${m}_CUDA_OBJECTS})
endforeach()
endif()
ocv_add_library(${the_module} ${OPENCV_MODULE_TYPE} ${OPENCV_MODULE_${the_module}_HEADERS} ${OPENCV_MODULE_${the_module}_SOURCES}
"${OPENCV_CONFIG_FILE_INCLUDE_DIR}/cvconfig.h" "${OPENCV_CONFIG_FILE_INCLUDE_DIR}/opencv2/opencv_modules.hpp"
${${the_module}_pch})
if(NOT the_module STREQUAL opencv_ts)
set_target_properties(${the_module} PROPERTIES COMPILE_DEFINITIONS OPENCV_NOSTL)
${${the_module}_pch} ${sub_objs})
if (cuda_objs)
target_link_libraries(${the_module} ${cuda_objs})
endif()
# TODO: is it needed?
if (sub_links)
ocv_list_filterout(sub_links "^opencv_")
ocv_list_unique(sub_links)
target_link_libraries(${the_module} ${sub_links})
endif()
unset(sub_objs)
unset(sub_links)
unset(cuda_objs)
ocv_target_link_libraries(${the_module} ${OPENCV_MODULE_${the_module}_DEPS_TO_LINK})
ocv_target_link_libraries(${the_module} LINK_INTERFACE_LIBRARIES ${OPENCV_MODULE_${the_module}_DEPS_TO_LINK})
ocv_target_link_libraries(${the_module} ${OPENCV_MODULE_${the_module}_DEPS_EXT} ${OPENCV_LINKER_LIBS} ${IPP_LIBS} ${ARGN})
@ -686,6 +752,7 @@ macro(_ocv_create_module)
if((NOT DEFINED OPENCV_MODULE_TYPE AND BUILD_SHARED_LIBS)
OR (DEFINED OPENCV_MODULE_TYPE AND OPENCV_MODULE_TYPE STREQUAL SHARED))
set_target_properties(${the_module} PROPERTIES COMPILE_DEFINITIONS CVAPI_EXPORTS)
set_target_properties(${the_module} PROPERTIES DEFINE_SYMBOL CVAPI_EXPORTS)
endif()
@ -696,22 +763,37 @@ macro(_ocv_create_module)
set_target_properties(${the_module} PROPERTIES LINK_FLAGS "/NODEFAULTLIB:libc /DEBUG")
endif()
ocv_install_target(${the_module} EXPORT OpenCVModules
ocv_install_target(${the_module} EXPORT OpenCVModules OPTIONAL
RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT libs
LIBRARY DESTINATION ${OPENCV_LIB_INSTALL_PATH} COMPONENT libs
ARCHIVE DESTINATION ${OPENCV_LIB_INSTALL_PATH} COMPONENT dev
)
# only "public" headers need to be installed
if(OPENCV_MODULE_${the_module}_HEADERS AND ";${OPENCV_MODULES_PUBLIC};" MATCHES ";${the_module};")
foreach(hdr ${OPENCV_MODULE_${the_module}_HEADERS})
string(REGEX REPLACE "^.*opencv2/" "opencv2/" hdr2 "${hdr}")
if(NOT hdr2 MATCHES "opencv2/${the_module}/private.*" AND hdr2 MATCHES "^(opencv2/?.*)/[^/]+.h(..)?$" )
install(FILES ${hdr} DESTINATION "${OPENCV_INCLUDE_INSTALL_PATH}/${CMAKE_MATCH_1}" COMPONENT dev)
endif()
endforeach()
endif()
foreach(m ${OPENCV_MODULE_${the_module}_CHILDREN} ${the_module})
# only "public" headers need to be installed
if(OPENCV_MODULE_${m}_HEADERS AND ";${OPENCV_MODULES_PUBLIC};" MATCHES ";${m};")
foreach(hdr ${OPENCV_MODULE_${m}_HEADERS})
string(REGEX REPLACE "^.*opencv2/" "opencv2/" hdr2 "${hdr}")
if(NOT hdr2 MATCHES "opencv2/${m}/private.*" AND hdr2 MATCHES "^(opencv2/?.*)/[^/]+.h(..)?$" )
install(FILES ${hdr} OPTIONAL DESTINATION "${OPENCV_INCLUDE_INSTALL_PATH}/${CMAKE_MATCH_1}" COMPONENT dev)
endif()
endforeach()
endif()
endforeach()
_ocv_add_precompiled_headers(${the_module})
if (TARGET ${the_module}_object)
# copy COMPILE_DEFINITIONS
get_target_property(main_defs ${the_module} COMPILE_DEFINITIONS)
if (main_defs)
set_target_properties(${the_module}_object PROPERTIES COMPILE_DEFINITIONS ${main_defs})
endif()
# use same PCH
if (TARGET pch_Generate_${the_module})
add_dependencies(${the_module}_object pch_Generate_${the_module} )
endif()
endif()
endmacro()
# opencv precompiled headers macro (can add pch to modules and tests)
@ -735,7 +817,7 @@ endmacro()
# short command for adding simple OpenCV module
# see ocv_add_module for argument details
# Usage:
# ocv_define_module(module_name [INTERNAL] [EXCLUDE_CUDA] [REQUIRED] [<list of dependencies>] [OPTIONAL <list of optional dependencies>])
# ocv_define_module(module_name [INTERNAL] [EXCLUDE_CUDA] [REQUIRED] [<list of dependencies>] [OPTIONAL <list of optional dependencies>] [WRAP <list of wrappers>])
macro(ocv_define_module module_name)
ocv_debug_message("ocv_define_module(" ${module_name} ${ARGN} ")")
set(_argn ${ARGN})

@ -756,6 +756,9 @@ endfunction()
function(_ocv_append_target_includes target)
if(DEFINED OCV_TARGET_INCLUDE_DIRS_${target})
target_include_directories(${target} PRIVATE ${OCV_TARGET_INCLUDE_DIRS_${target}})
if (TARGET ${target}_object)
target_include_directories(${target}_object PRIVATE ${OCV_TARGET_INCLUDE_DIRS_${target}})
endif()
unset(OCV_TARGET_INCLUDE_DIRS_${target} CACHE)
endif()
endfunction()
@ -780,8 +783,30 @@ function(ocv_add_library target)
ocv_include_directories(${CUDA_INCLUDE_DIRS})
ocv_cuda_compile(cuda_objs ${lib_cuda_srcs} ${lib_cuda_hdrs})
endif()
set(OPENCV_MODULE_${target}_CUDA_OBJECTS ${cuda_objs} CACHE INTERNAL "Compiled CUDA object files")
endif()
add_library(${target} ${ARGN} ${cuda_objs})
# Add OBJECT library (added in cmake 2.8.8) to use in compound modules
if (NOT CMAKE_VERSION VERSION_LESS "2.8.8"
AND NOT OPENCV_MODULE_${target}_CHILDREN
AND NOT OPENCV_MODULE_${target}_CLASS STREQUAL "BINDINGS"
AND NOT ${target} STREQUAL "opencv_ts"
)
set(sources ${ARGN})
ocv_list_filterout(sources "\\\\.(cl|inc)$")
add_library(${target}_object OBJECT ${sources})
set_target_properties(${target}_object PROPERTIES
EXCLUDE_FROM_ALL True
EXCLUDE_FROM_DEFAULT_BUILD True
POSITION_INDEPENDENT_CODE True
)
if (ENABLE_SOLUTION_FOLDERS)
set_target_properties(${target}_object PROPERTIES FOLDER "object_libraries")
endif()
unset(sources)
endif()
_ocv_append_target_includes(${target})
endfunction()

@ -41,6 +41,21 @@
#
# ===================================================================================
# Search packages for host system instead of packages for target system.
# in case of cross compilation thess macro should be defined by toolchain file
if(NOT COMMAND find_host_package)
macro(find_host_package)
find_package(${ARGN})
endmacro()
endif()
if(NOT COMMAND find_host_program)
macro(find_host_program)
find_program(${ARGN})
endmacro()
endif()
if(NOT DEFINED OpenCV_MODULES_SUFFIX)
if(ANDROID)
string(REPLACE - _ OpenCV_MODULES_SUFFIX "_${ANDROID_NDK_ABI_NAME}")
@ -255,7 +270,7 @@ foreach(__opttype OPT DBG)
# CUDA
if(OpenCV_CUDA_VERSION)
if(NOT CUDA_FOUND)
find_package(CUDA ${OpenCV_CUDA_VERSION} EXACT REQUIRED)
find_host_package(CUDA ${OpenCV_CUDA_VERSION} EXACT REQUIRED)
else()
if(NOT CUDA_VERSION_STRING VERSION_EQUAL OpenCV_CUDA_VERSION)
message(FATAL_ERROR "OpenCV static library was compiled with CUDA ${OpenCV_CUDA_VERSION} support. Please, use the same version or rebuild OpenCV with CUDA ${CUDA_VERSION_STRING}")

@ -169,9 +169,6 @@
/* Win32 UI */
#cmakedefine HAVE_WIN32UI
/* Windows Runtime support */
#cmakedefine HAVE_WINRT
/* XIMEA camera support */
#cmakedefine HAVE_XIMEA

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -38,7 +38,7 @@ endif(HAVE_DOC_GENERATOR)
if(BUILD_DOCS AND DOXYGEN_FOUND)
# not documented modules list
list(APPEND blacklist "ts" "java" "python2" "python3" "world")
list(APPEND blacklist "ts" "java" "python2" "python3" "world" "contrib_world")
# gathering headers
set(paths_include)

@ -100,7 +100,7 @@ RECURSIVE = YES
EXCLUDE =
EXCLUDE_SYMLINKS = NO
EXCLUDE_PATTERNS = *.inl.hpp *.impl.hpp *_detail.hpp */cudev/**/detail/*.hpp
EXCLUDE_SYMBOLS = cv::DataType<*> int
EXCLUDE_SYMBOLS = cv::DataType<*> int void
EXAMPLE_PATH = @CMAKE_DOXYGEN_EXAMPLE_PATH@
EXAMPLE_PATTERNS = *
EXAMPLE_RECURSIVE = YES
@ -243,7 +243,11 @@ PREDEFINED = __cplusplus=1 \
CV_NORETURN= \
CV_DEFAULT(x)=" = x" \
CV_NEON=1 \
FLANN_DEPRECATED=
FLANN_DEPRECATED= \
"CV_PURE_PROPERTY(type, name)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(type val) = 0;" \
"CV_IMPL_PROPERTY(type, name, x)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(type val) = 0;" \
"CV_IMPL_PROPERTY_S(type, name, x)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(const type & val);" \
"CV_IMPL_PROPERTY_RO(type, name, x)= virtual type get##name() const;"
EXPAND_AS_DEFINED =
SKIP_FUNCTION_MACROS = YES
TAGFILES =

@ -49,7 +49,7 @@ pyopencv_generated_\*.h files). But there may be some basic OpenCV datatypes lik
Size. They need to be extended manually. For example, a Mat type should be extended to Numpy array,
Size should be extended to a tuple of two integers etc. Similarly, there may be some complex
structs/classes/functions etc. which need to be extended manually. All such manual wrapper functions
are placed in modules/python/src2/pycv2.hpp.
are placed in modules/python/src2/cv2.cpp.
So now only thing left is the compilation of these wrapper files which gives us **cv2** module. So
when you call a function, say res = equalizeHist(img1,img2) in Python, you pass two numpy arrays and

@ -30,7 +30,7 @@ import cv2
im = cv2.imread('test.jpg')
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(imgray,127,255,0)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
@endcode
See, there are three arguments in **cv2.findContours()** function, first one is source image, second
is contour retrieval mode, third is contour approximation method. And it outputs the contours and

@ -59,7 +59,7 @@ denotes they are the parameters of possible lines in the image. (Image courtesy:
![](images/houghlines2.jpg)
Hough Tranform in OpenCV
Hough Transform in OpenCV
=========================
Everything explained above is encapsulated in the OpenCV function, \*\*cv2.HoughLines()\*\*. It simply returns an array of :math:(rho,
@ -78,7 +78,8 @@ gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
lines = cv2.HoughLines(edges,1,np.pi/180,200)
for rho,theta in lines[0]:
for line in lines:
rho,theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
@ -123,10 +124,9 @@ import numpy as np
img = cv2.imread('dave.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
minLineLength = 100
maxLineGap = 10
lines = cv2.HoughLinesP(edges,1,np.pi/180,100,minLineLength,maxLineGap)
for x1,y1,x2,y2 in lines[0]:
lines = cv2.HoughLinesP(edges,1,np.pi/180,100,minLineLength=100,maxLineGap=10)
for line in lines:
x1,y1,x2,y2 = line[0]
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
cv2.imwrite('houghlines5.jpg',img)

@ -46,7 +46,7 @@ get the following equation:
where:
\f[f_x = \frac{\partial f}{\partial x} \; ; \; f_y = \frac{\partial f}{\partial x}\f]\f[u = \frac{dx}{dt} \; ; \; v = \frac{dy}{dt}\f]
\f[f_x = \frac{\partial f}{\partial x} \; ; \; f_y = \frac{\partial f}{\partial y}\f]\f[u = \frac{dx}{dt} \; ; \; v = \frac{dy}{dt}\f]
Above equation is called Optical Flow equation. In it, we can find \f$f_x\f$ and \f$f_y\f$, they are image
gradients. Similarly \f$f_t\f$ is the gradient along time. But \f$(u,v)\f$ is unknown. We cannot solve this

@ -31,3 +31,7 @@ div.contents {
span.arrow {
height: 13px;
}
div.image img{
max-width: 900px;
}

@ -54,25 +54,19 @@ int main( int argc, char** argv )
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
Ptr<SURF> detector = SURF::create();
detector->setMinHessian(minHessian);
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
detector->detectAndCompute( img_1, keypoints_1, descriptors_1 );
detector->detectAndCompute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
//-- Step 2: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );

@ -42,25 +42,18 @@ int main( int argc, char** argv )
if( !img_object.data || !img_scene.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
//-- Step 1: Detect the keypoints and extract descriptors using SURF
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
Ptr<SURF> detector = SURF::create( minHessian );
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector.detect( img_object, keypoints_object );
detector.detect( img_scene, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( img_object, keypoints_object, descriptors_object );
extractor.compute( img_scene, keypoints_scene, descriptors_scene );
detector->detectAndCompute( img_object, keypoints_object, descriptors_object );
detector->detectAndCompute( img_scene, keypoints_scene, descriptors_scene );
//-- Step 3: Matching descriptor vectors using FLANN matcher
//-- Step 2: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );

@ -146,7 +146,7 @@ Explanation
int result_cols = img.cols - templ.cols + 1;
int result_rows = img.rows - templ.rows + 1;
result.create( result_cols, result_rows, CV_32FC1 );
result.create( result_rows, result_cols, CV_32FC1 );
@endcode
-# Perform the template matching operation:
@code{.cpp}

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 767 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.3 KiB

@ -0,0 +1,86 @@
Extract horizontal and vertical lines by using morphological operations {#tutorial_moprh_lines_detection}
=============
Goal
----
In this tutorial you will learn how to:
- Apply two very common morphology operators (i.e. Dilation and Erosion), with the creation of custom kernels, in order to extract straight lines on the horizontal and vertical axes. For this purpose, you will use the following OpenCV functions:
- @ref cv::erode
- @ref cv::dilate
- @ref cv::getStructuringElement
in an example where your goal will be to extract the music notes from a music sheet.
Theory
------
### Morphology Operations
Morphology is a set of image processing operations that process images based on predefined *structuring elements* known also as kernels. The value of each pixel in the output image is based on a comparison of the corresponding pixel in the input image with its neighbors. By choosing the size and shape of the kernel, you can construct a morphological operation that is sensitive to specific shapes regarding the input image.
Two of the most basic morphological operations are dilation and erosion. Dilation adds pixels to the boundaries of the object in an image, while erosion does exactly the opposite. The amount of pixels added or removed, respectively depends on the size and shape of the structuring element used to process the image. In general the rules followed from these two operations have as follows:
- __Dilation__: The value of the output pixel is the <b><em>maximum</em></b> value of all the pixels that fall within the structuring element's size and shape. For example in a binary image, if any of the pixels of the input image falling within the range of the kernel is set to the value 1, the corresponding pixel of the output image will be set to 1 as well. The latter applies to any type of image (e.g. grayscale, rgb, etc).
![Dilation on a Binary Image](images/morph21.gif)
![Dilation on a Grayscale Image](images/morph6.gif)
- __Erosion__: The vise versa applies for the erosion operation. The value of the output pixel is the <b><em>minimum</em></b> value of all the pixels that fall within the structuring element's size and shape. Look the at the example figures below:
![Erosion on a Binary Image](images/morph211.png)
![Erosion on a Grayscale Image](images/morph61.png)
### Structuring Elements
As it can be seen above and in general in any morphological operation the structuring element used to probe the input image, is the most important part.
A structuring element is a matrix consisting of only 0's and 1's that can have any arbitrary shape and size. Typically are much smaller than the image being processed, while the pixels with values of 1 define the neighborhood. The center pixel of the structuring element, called the origin, identifies the pixel of interest -- the pixel being processed.
For example, the following illustrates a diamond-shaped structuring element of 7x7 size.
![A Diamond-Shaped Structuring Element and its Origin](images/morph12.gif)
A structuring element can have many common shapes, such as lines, diamonds, disks, periodic lines, and circles and sizes. You typically choose a structuring element the same size and shape as the objects you want to process/extract in the input image. For example, to find lines in an image, create a linear structuring element as you will see later.
Code
----
This tutorial code's is shown lines below. You can also download it from [here](https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ImgProc/Morphology_3.cpp).
@includelineno samples/cpp/tutorial_code/ImgProc/Morphology_3.cpp
Explanation / Result
--------------------
-# Load the source image and check if it is loaded without any problem, then show it:
@snippet samples/cpp/tutorial_code/ImgProc/Morphology_3.cpp load_image
![](images/src.png)
-# Then transform image to grayscale if it not already:
@snippet samples/cpp/tutorial_code/ImgProc/Morphology_3.cpp gray
![](images/gray.png)
-# Afterwards transform grayscale image to binary. Notice the ~ symbol which indicates that we use the inverse (i.e. bitwise_not) version of it:
@snippet samples/cpp/tutorial_code/ImgProc/Morphology_3.cpp bin
![](images/binary.png)
-# Now we are ready to apply morphological operations in order to extract the horizontal and vertical lines and as a consequence to separate the the music notes from the music sheet, but first let's initialize the output images that we will use for that reason:
@snippet samples/cpp/tutorial_code/ImgProc/Morphology_3.cpp init
-# As we specified in the theory in order to extract the object that we desire, we need to create the corresponding structure element. Since here we want to extract the horizontal lines, a corresponding structure element for that purpose will have the following shape:
![](images/linear_horiz.png)
and in the source code this is represented by the following code snippet:
@snippet samples/cpp/tutorial_code/ImgProc/Morphology_3.cpp horiz
![](images/horiz.png)
-# The same applies for the vertical lines, with the corresponding structure element:
![](images/linear_vert.png)
and again this is represented as follows:
@snippet samples/cpp/tutorial_code/ImgProc/Morphology_3.cpp vert
![](images/vert.png)
-# As you can see we are almost there. However, at that point you will notice that the edges of the notes are a bit rough. For that reason we need to refine the edges in order to obtain a smoother result:
@snippet samples/cpp/tutorial_code/ImgProc/Morphology_3.cpp smooth
![](images/smooth.png)

@ -27,6 +27,14 @@ In this section you will learn about the image processing (manipulation) functio
Here we investigate different morphology operators
- @subpage tutorial_moprh_lines_detection
*Compatibility:* \> OpenCV 2.0
*Author:* Theodore Tsesmelis
Here we will show how we can use different morphology operators to extract horizontal and vertical lines
- @subpage tutorial_pyramids
*Compatibility:* \> OpenCV 2.0

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

@ -0,0 +1,158 @@
Using OpenCV with biicode dependency manager {#tutorial_biicode}
============================================
Goals
-----
In this tutorial you will learn how to:
* Get started with OpenCV using biicode.
* Develop your own application in OpenCV with biicode.
* Switching between OpenCV versions.
What is biicode?
----------------
![](images/biicode.png)
[biicode](http://opencv.org/biicode.html) resolves and keeps track of dependencies and version compatibilities in C/C++ projects.
Using biicode *hooks feature*, **getting started with OpenCV in C++ and C** is pretty straight-forward. **Just write an include to OpenCV headers** and biicode will retrieve and install OpenCV in your computer and configure your project.
Prerequisites
-------------
* biicode. Here is a [link to install it at any OS](http://www.biicode.com/downloads).
* Windows users: Any Visual Studio version (Visual Studio 12 preferred).
Explanation
-----------
### Example: Detect faces in images using the Objdetect module from OpenCV
Once biicode is installed, execute in your terminal/console:
@code{.bash}
$ bii init mycvproject
$ cd mycvproject
$ bii open diego/opencvex
@endcode
Windows users also execute:
@code{.bash}
$ bii cpp:configure -G "Visual Studio 12"
@endcode
Now execute ``bii cpp:build`` to build the project. **Note** that this can take a while, until it downloads and builds OpenCV. However, this is downloaded just once in your machine in your "user/.biicode" folder. If the OpenCV installation process fails, you might simply go there, delete OpenCV files inside "user/.biicode" and repeat.
@code{.bash}
$ bii cpp:build
@endcode
Find your binaries in the bin folder:
@code{.bash}
$ cd bin
$ ./diego_opencvex_main
@endcode
![](images/biiapp.png)
@code{.bash}
$ ./diego_opencvex_mainfaces
@endcode
![](images/bii_lena.png)
###Developing your own application
**biicode works with include headers in your source-code files**, it reads them and retrieves all the dependencies in its database. So it is as simple as typing:
@code{.cpp}
#include "diego/opencv/opencv/cv.h"
@endcode
in the headers of your ``.cpp`` file.
To start a new project using OpenCV, execute:
@code{.bash}
$ bii init mycvproject
$ cd mycvproject
@endcode
The next line just creates a *myuser/myblock* folder inside "blocks" with a simple "Hello World" *main.cpp* into it. You can also do it manually:
@code{.bash}
$ bii new myuser/myblock --hello=cpp
@endcode
Now replace your *main.cpp* contents inside *blocks/myuser/myblock* with **your app code**.
Put the includes as:
@code{.cpp}
#include "diego/opencv/opencv/cv.h
@endcode
If you type:
@code{.bash}
$ bii deps
@endcode
You will check that ``opencv/cv.h`` is an "unresolved" dependency. You can find it with:
@code{.bash}
$ bii find
@endcode
Now, you can just `bii cpp:configure` and `bii cpp:build` your project as described above.
**To use regular include directives**, configure them in your **biicode.conf** file. Let your includes be:
@code{.cpp}
#include "opencv/cv.h"
@endcode
And write in your **biicode.conf**:
@code{.cpp}
[includes]
opencv/cv.h: diego/opencv
[requirements]
diego/opencv: 0
@endcode
###Switching OpenCV versions
If you want to try or develop your application against **OpenCV 2.4.10** and also against **3.0-beta**, change it in your **biicode.conf** file, simply alternating track in your `[requirements]`:
@code{.cpp}
[requirements]
diego/opencv: 0
@endcode
replace with:
@code{.cpp}
[requirements]
diego/opencv(beta): 0
@endcode
**Note** that the first time you switch to 3.0-beta, it will also take a while to download and build the 3.0-beta release. From that point you can change back and forth between versions, just modifying your *biicode.conf requirements*.
Find the hooks and examples:
* [OpenCV 2.4.10](http://www.biicode.com/diego/opencv)
* [OpenCV 3.0 beta](http://www.biicode.com/diego/diego/opencv/beta)
* [objdetect module from OpenCV](@ref tutorial_table_of_content_objdetect)
This is just an example of how can it be done with biicode python hooks. Probably now that CMake files reuse is possible with biicode, it could be better to implement it with CMake, in order to get more control over the build of OpenCV.
Results and conclusion
----------------------
Installing OpenCV with biicode is straight forward for any OS.
Run any example like you just did with *objdetect module* from OpenCV, or develop your own application. It only needs a *biicode.conf* file to get OpenCV library working in your computer.
Switching between OpenCV versions is available too and effortless.
For any doubts or further information regarding biicode, suit yourselves at [Stackoverflow](http://stackoverflow.com/questions/tagged/biicode?sort=newest), biicode’s [forum](http://forum.biicode.com/) or [ask biicode](http://web.biicode.com/contact-us/), we will be glad to help you.

@ -134,6 +134,14 @@ Additionally you can find very basic sample source code to introduce you to the
We will learn how to save an Image in OpenCV...plus a small conversion to grayscale
- @subpage tutorial_biicode
_Compatibility:_ \> OpenCV 2.4
_Author:_ biicode
We will learn how to setup and use OpenCV in Mac OS X, Linux and Windows
- @subpage tutorial_documentation
_Compatibility:_ \> OpenCV 3.0

@ -51,3 +51,11 @@ Output
------
![](images/output.png)
Changes for XCode5+ and iOS8+
-----------------------------
With the newer XCode and iOS versions you need to watch out for some specific details
- The *.m file in your project should be renamed to *.mm.
- You have to manually include AssetsLibrary.framework into your project, which is not done anymore by default.

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 203 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

@ -0,0 +1,133 @@
Introduction to Principal Component Analysis (PCA) {#tutorial_introduction_to_pca}
=======================================
Goal
----
In this tutorial you will learn how to:
- Use the OpenCV class @ref cv::PCA to calculate the orientation of an object.
What is PCA?
--------------
Principal Component Analysis (PCA) is a statistical procedure that extracts the most important features of a dataset.
![](images/pca_line.png)
Consider that you have a set of 2D points as it is shown in the figure above. Each dimension corresponds to a feature you are interested in. Here some could argue that the points are set in a random order. However, if you have a better look you will see that there is a linear pattern (indicated by the blue line) which is hard to dismiss. A key point of PCA is the Dimensionality Reduction. Dimensionality Reduction is the process of reducing the number of the dimensions of the given dataset. For example, in the above case it is possible to approximate the set of points to a single line and therefore, reduce the dimensionality of the given points from 2D to 1D.
Moreover, you could also see that the points vary the most along the blue line, more than they vary along the Feature 1 or Feature 2 axes. This means that if you know the position of a point along the blue line you have more information about the point than if you only knew where it was on Feature 1 axis or Feature 2 axis.
Hence, PCA allows us to find the direction along which our data varies the most. In fact, the result of running PCA on the set of points in the diagram consist of 2 vectors called _eigenvectors_ which are the _principal components_ of the data set.
![](images/pca_eigen.png)
The size of each eigenvector is encoded in the corresponding eigenvalue and indicates how much the data vary along the principal component. The beginning of the eigenvectors is the center of all points in the data set. Applying PCA to N-dimensional data set yields N N-dimensional eigenvectors, N eigenvalues and 1 N-dimensional center point. Enough theory, let’s see how we can put these ideas into code.
How are the eigenvectors and eigenvalues computed?
--------------------------------------------------
The goal is to transform a given data set __X__ of dimension _p_ to an alternative data set __Y__ of smaller dimension _L_. Equivalently, we are seeking to find the matrix __Y__, where __Y__ is the _Karhunen–Loève transform_ (KLT) of matrix __X__:
\f[ \mathbf{Y} = \mathbb{K} \mathbb{L} \mathbb{T} \{\mathbf{X}\} \f]
__Organize the data set__
Suppose you have data comprising a set of observations of _p_ variables, and you want to reduce the data so that each observation can be described with only _L_ variables, _L_ < _p_. Suppose further, that the data are arranged as a set of _n_ data vectors \f$ x_1...x_n \f$ with each \f$ x_i \f$ representing a single grouped observation of the _p_ variables.
- Write \f$ x_1...x_n \f$ as row vectors, each of which has _p_ columns.
- Place the row vectors into a single matrix __X__ of dimensions \f$ n\times p \f$.
__Calculate the empirical mean__
- Find the empirical mean along each dimension \f$ j = 1, ..., p \f$.
- Place the calculated mean values into an empirical mean vector __u__ of dimensions \f$ p\times 1 \f$.
\f[ \mathbf{u[j]} = \frac{1}{n}\sum_{i=1}^{n}\mathbf{X[i,j]} \f]
__Calculate the deviations from the mean__
Mean subtraction is an integral part of the solution towards finding a principal component basis that minimizes the mean square error of approximating the data. Hence, we proceed by centering the data as follows:
- Subtract the empirical mean vector __u__ from each row of the data matrix __X__.
- Store mean-subtracted data in the \f$ n\times p \f$ matrix __B__.
\f[ \mathbf{B} = \mathbf{X} - \mathbf{h}\mathbf{u^{T}} \f]
where __h__ is an \f$ n\times 1 \f$ column vector of all 1s:
\f[ h[i] = 1, i = 1, ..., n \f]
__Find the covariance matrix__
- Find the \f$ p\times p \f$ empirical covariance matrix __C__ from the outer product of matrix __B__ with itself:
\f[ \mathbf{C} = \frac{1}{n-1} \mathbf{B^{*}} \cdot \mathbf{B} \f]
where * is the conjugate transpose operator. Note that if B consists entirely of real numbers, which is the case in many applications, the "conjugate transpose" is the same as the regular transpose.
__Find the eigenvectors and eigenvalues of the covariance matrix__
- Compute the matrix __V__ of eigenvectors which diagonalizes the covariance matrix __C__:
\f[ \mathbf{V^{-1}} \mathbf{C} \mathbf{V} = \mathbf{D} \f]
where __D__ is the diagonal matrix of eigenvalues of __C__.
- Matrix __D__ will take the form of an \f$ p \times p \f$ diagonal matrix:
\f[ D[k,l] = \left\{\begin{matrix} \lambda_k, k = l \\ 0, k \neq l \end{matrix}\right. \f]
here, \f$ \lambda_j \f$ is the _j_-th eigenvalue of the covariance matrix __C__
- Matrix __V__, also of dimension _p_ x _p_, contains _p_ column vectors, each of length _p_, which represent the _p_ eigenvectors of the covariance matrix __C__.
- The eigenvalues and eigenvectors are ordered and paired. The _j_ th eigenvalue corresponds to the _j_ th eigenvector.
@note sources [[1]](https://robospace.wordpress.com/2013/10/09/object-orientation-principal-component-analysis-opencv/), [[2]](http://en.wikipedia.org/wiki/Principal_component_analysis) and special thanks to Svetlin Penkov for the original tutorial.
Source Code
-----------
This tutorial code's is shown lines below. You can also download it from
[here](https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp).
@includelineno cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp
@note Another example using PCA for dimensionality reduction while maintaining an amount of variance can be found at [opencv_source_code/samples/cpp/pca.cpp](https://github.com/Itseez/opencv/tree/master/samples/cpp/pca.cpp)
Explanation
-----------
-# __Read image and convert it to binary__
Here we apply the necessary pre-processing procedures in order to be able to detect the objects of interest.
@snippet samples/cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp pre-process
-# __Extract objects of interest__
Then find and filter contours by size and obtain the orientation of the remaining ones.
@snippet samples/cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp contours
-# __Extract orientation__
Orientation is extracted by the call of getOrientation() function, which performs all the PCA procedure.
@snippet samples/cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp pca
First the data need to be arranged in a matrix with size n x 2, where n is the number of data points we have. Then we can perform that PCA analysis. The calculated mean (i.e. center of mass) is stored in the _cntr_ variable and the eigenvectors and eigenvalues are stored in the corresponding std::vector’s.
-# __Visualize result__
The final result is visualized through the drawAxis() function, where the principal components are drawn in lines, and each eigenvector is multiplied by its eigenvalue and translated to the mean position.
@snippet samples/cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp visualization
@snippet samples/cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp visualization1
Results
-------
The code opens an image, finds the orientation of the detected objects of interest and then visualizes the result by drawing the contours of the detected objects of interest, the center point, and the x-axis, y-axis regarding the extracted orientation.
![](images/pca_test1.jpg)
![](images/output.png)

@ -1,8 +1,6 @@
Introduction to Support Vector Machines {#tutorial_introduction_to_svm}
=======================================
@todo update this tutorial
Goal
----
@ -31,13 +29,11 @@ understand that this is done only because our intuition is better built from exa
to imagine. However, the same concepts apply to tasks where the examples to classify lie in a space
whose dimension is higher than two.
In the above picture you can see that there exists multiple
lines that offer a solution to the problem. Is any of them better than the others? We can
intuitively define a criterion to estimate the worth of the lines:
- A line is bad if it passes too close to the points because it will be noise sensitive and it will
not generalize correctly. Therefore, our goal should be to find the line passing as far as
possible from all points.
In the above picture you can see that there exists multiple lines that offer a solution to the
problem. Is any of them better than the others? We can intuitively define a criterion to estimate
the worth of the lines: <em> A line is bad if it passes too close to the points because it will be
noise sensitive and it will not generalize correctly. </em> Therefore, our goal should be to find
the line passing as far as possible from all points.
Then, the operation of the SVM algorithm is based on finding the hyperplane that gives the largest
minimum distance to the training examples. Twice, this distance receives the important name of
@ -57,7 +53,7 @@ where \f$\beta\f$ is known as the *weight vector* and \f$\beta_{0}\f$ as the *bi
@sa A more in depth description of this and hyperplanes you can find in the section 4.5 (*Seperating
Hyperplanes*) of the book: *Elements of Statistical Learning* by T. Hastie, R. Tibshirani and J. H.
Friedman.
Friedman (@cite HTF01).
The optimal hyperplane can be represented in an infinite number of different ways by
scaling of \f$\beta\f$ and \f$\beta_{0}\f$. As a matter of convention, among all the possible
@ -107,17 +103,14 @@ Explanation
The training data of this exercise is formed by a set of labeled 2D-points that belong to one of
two different classes; one of the classes consists of one point and the other of three points.
@code{.cpp}
float labels[4] = {1.0, -1.0, -1.0, -1.0};
float trainingData[4][2] = {{501, 10}, {255, 10}, {501, 255}, {10, 501}};
@endcode
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp setup1
The function @ref cv::ml::SVM::train that will be used afterwards requires the training data to be
stored as @ref cv::Mat objects of floats. Therefore, we create these objects from the arrays
defined above:
@code{.cpp}
Mat trainingDataMat(4, 2, CV_32FC1, trainingData);
Mat labelsMat (4, 1, CV_32FC1, labels);
@endcode
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp setup2
-# **Set up SVM's parameters**
@ -126,42 +119,35 @@ Explanation
used in a wide variety of problems (e.g. problems with non-linearly separable data, a SVM using
a kernel function to raise the dimensionality of the examples, etc). As a consequence of this,
we have to define some parameters before training the SVM. These parameters are stored in an
object of the class @ref cv::ml::SVM::Params .
@code{.cpp}
ml::SVM::Params params;
params.svmType = ml::SVM::C_SVC;
params.kernelType = ml::SVM::LINEAR;
params.termCrit = TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6);
@endcode
- *Type of SVM*. We choose here the type **ml::SVM::C_SVC** that can be used for n-class
classification (n \f$\geq\f$ 2). This parameter is defined in the attribute
*ml::SVM::Params.svmType*.
The important feature of the type of SVM **CvSVM::C_SVC** deals with imperfect separation of classes (i.e. when the training data is non-linearly separable). This feature is not important here since the data is linearly separable and we chose this SVM type only for being the most commonly used.
object of the class @ref cv::ml::SVM.
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp init
Here:
- *Type of SVM*. We choose here the type @ref cv::ml::SVM::C_SVC "C_SVC" that can be used for
n-class classification (n \f$\geq\f$ 2). The important feature of this type is that it deals
with imperfect separation of classes (i.e. when the training data is non-linearly separable).
This feature is not important here since the data is linearly separable and we chose this SVM
type only for being the most commonly used.
- *Type of SVM kernel*. We have not talked about kernel functions since they are not
interesting for the training data we are dealing with. Nevertheless, let's explain briefly
now the main idea behind a kernel function. It is a mapping done to the training data to
improve its resemblance to a linearly separable set of data. This mapping consists of
increasing the dimensionality of the data and is done efficiently using a kernel function.
We choose here the type **ml::SVM::LINEAR** which means that no mapping is done. This
parameter is defined in the attribute *ml::SVMParams.kernel_type*.
interesting for the training data we are dealing with. Nevertheless, let's explain briefly now
the main idea behind a kernel function. It is a mapping done to the training data to improve
its resemblance to a linearly separable set of data. This mapping consists of increasing the
dimensionality of the data and is done efficiently using a kernel function. We choose here the
type @ref cv::ml::SVM::LINEAR "LINEAR" which means that no mapping is done. This parameter is
defined using cv::ml::SVM::setKernel.
- *Termination criteria of the algorithm*. The SVM training procedure is implemented solving a
constrained quadratic optimization problem in an **iterative** fashion. Here we specify a
maximum number of iterations and a tolerance error so we allow the algorithm to finish in
less number of steps even if the optimal hyperplane has not been computed yet. This
parameter is defined in a structure @ref cv::cvTermCriteria .
parameter is defined in a structure @ref cv::TermCriteria .
-# **Train the SVM**
We call the method @ref cv::ml::SVM::train to build the SVM model.
We call the method
[CvSVM::train](http://docs.opencv.org/modules/ml/doc/support_vector_machines.html#cvsvm-train)
to build the SVM model.
@code{.cpp}
CvSVM SVM;
SVM.train(trainingDataMat, labelsMat, Mat(), Mat(), params);
@endcode
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp train
-# **Regions classified by the SVM**
@ -170,22 +156,8 @@ Explanation
by the SVM. In other words, an image is traversed interpreting its pixels as points of the
Cartesian plane. Each of the points is colored depending on the class predicted by the SVM; in
green if it is the class with label 1 and in blue if it is the class with label -1.
@code{.cpp}
Vec3b green(0,255,0), blue (255,0,0);
for (int i = 0; i < image.rows; ++i)
for (int j = 0; j < image.cols; ++j)
{
Mat sampleMat = (Mat_<float>(1,2) << i,j);
float response = SVM.predict(sampleMat);
if (response == 1)
image.at<Vec3b>(j, i) = green;
else
if (response == -1)
image.at<Vec3b>(j, i) = blue;
}
@endcode
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp show
-# **Support vectors**
@ -193,15 +165,8 @@ Explanation
The method @ref cv::ml::SVM::getSupportVectors obtain all of the support
vectors. We have used this methods here to find the training examples that are
support vectors and highlight them.
@code{.cpp}
int c = SVM.get_support_vector_count();
for (int i = 0; i < c; ++i)
{
const float* v = SVM.get_support_vector(i); // get and then highlight with grayscale
circle( image, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thickness, lineType);
}
@endcode
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp show_vectors
Results
-------

@ -1,8 +1,6 @@
Support Vector Machines for Non-Linearly Separable Data {#tutorial_non_linear_svms}
=======================================================
@todo update this tutorial
Goal
----
@ -10,21 +8,20 @@ In this tutorial you will learn how to:
- Define the optimization problem for SVMs when it is not possible to separate linearly the
training data.
- How to configure the parameters in @ref cv::ml::SVM::Params to adapt your SVM for this class of
problems.
- How to configure the parameters to adapt your SVM for this class of problems.
Motivation
----------
Why is it interesting to extend the SVM optimation problem in order to handle non-linearly separable
training data? Most of the applications in which SVMs are used in computer vision require a more
powerful tool than a simple linear classifier. This stems from the fact that in these tasks **the
training data can be rarely separated using an hyperplane**.
powerful tool than a simple linear classifier. This stems from the fact that in these tasks __the
training data can be rarely separated using an hyperplane__.
Consider one of these tasks, for example, face detection. The training data in this case is composed
by a set of images that are faces and another set of images that are non-faces (*every other thing
in the world except from faces*). This training data is too complex so as to find a representation
of each sample (*feature vector*) that could make the whole set of faces linearly separable from the
by a set of images that are faces and another set of images that are non-faces (_every other thing
in the world except from faces_). This training data is too complex so as to find a representation
of each sample (_feature vector_) that could make the whole set of faces linearly separable from the
whole set of non-faces.
Extension of the Optimization Problem
@ -32,13 +29,13 @@ Extension of the Optimization Problem
Remember that using SVMs we obtain a separating hyperplane. Therefore, since the training data is
now non-linearly separable, we must admit that the hyperplane found will misclassify some of the
samples. This *misclassification* is a new variable in the optimization that must be taken into
samples. This _misclassification_ is a new variable in the optimization that must be taken into
account. The new model has to include both the old requirement of finding the hyperplane that gives
the biggest margin and the new one of generalizing the training data correctly by not allowing too
many classification errors.
We start here from the formulation of the optimization problem of finding the hyperplane which
maximizes the **margin** (this is explained in the previous tutorial (@ref tutorial_introduction_to_svm):
maximizes the __margin__ (this is explained in the previous tutorial (@ref tutorial_introduction_to_svm):
\f[\min_{\beta, \beta_{0}} L(\beta) = \frac{1}{2}||\beta||^{2} \text{ subject to } y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 \text{ } \forall i\f]
@ -50,8 +47,8 @@ constant times the number of misclassification errors in the training data, i.e.
However, this one is not a very good solution since, among some other reasons, we do not distinguish
between samples that are misclassified with a small distance to their appropriate decision region or
samples that are not. Therefore, a better solution will take into account the *distance of the
misclassified samples to their correct decision regions*, i.e.:
samples that are not. Therefore, a better solution will take into account the _distance of the
misclassified samples to their correct decision regions_, i.e.:
\f[\min ||\beta||^{2} + C \text{(distance of misclassified samples to their correct regions)}\f]
@ -68,7 +65,7 @@ distances of the rest of the samples are zero since they lay already in their co
region.
The red and blue lines that appear on the picture are the margins to each one of the
decision regions. It is very **important** to realize that each of the \f$\xi_{i}\f$ goes from a
decision regions. It is very __important__ to realize that each of the \f$\xi_{i}\f$ goes from a
misclassified training sample to the margin of its appropriate region.
Finally, the new formulation for the optimization problem is:
@ -79,26 +76,25 @@ How should the parameter C be chosen? It is obvious that the answer to this ques
the training data is distributed. Although there is no general answer, it is useful to take into
account these rules:
- Large values of C give solutions with *less misclassification errors* but a *smaller margin*.
- Large values of C give solutions with _less misclassification errors_ but a _smaller margin_.
Consider that in this case it is expensive to make misclassification errors. Since the aim of
the optimization is to minimize the argument, few misclassifications errors are allowed.
- Small values of C give solutions with *bigger margin* and *more classification errors*. In this
- Small values of C give solutions with _bigger margin_ and _more classification errors_. In this
case the minimization does not consider that much the term of the sum so it focuses more on
finding a hyperplane with big margin.
Source Code
-----------
You may also find the source code and these video file in the
`samples/cpp/tutorial_code/gpu/non_linear_svms/non_linear_svms` folder of the OpenCV source library
or [download it from here ](https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp).
You may also find the source code in `samples/cpp/tutorial_code/ml/non_linear_svms` folder of the OpenCV source library or
[download it from here](https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp).
@includelineno cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp
Explanation
-----------
-# **Set up the training data**
-# __Set up the training data__
The training data of this exercise is formed by a set of labeled 2D-points that belong to one of
two different classes. To make the exercise more appealing, the training data is generated
@ -107,136 +103,67 @@ Explanation
We have divided the generation of the training data into two main parts.
In the first part we generate data for both classes that is linearly separable.
@code{.cpp}
// Generate random points for the class 1
Mat trainClass = trainData.rowRange(0, nLinearSamples);
// The x coordinate of the points is in [0, 0.4)
Mat c = trainClass.colRange(0, 1);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(0.4 * WIDTH));
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
// Generate random points for the class 2
trainClass = trainData.rowRange(2*NTRAINING_SAMPLES-nLinearSamples, 2*NTRAINING_SAMPLES);
// The x coordinate of the points is in [0.6, 1]
c = trainClass.colRange(0 , 1);
rng.fill(c, RNG::UNIFORM, Scalar(0.6*WIDTH), Scalar(WIDTH));
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
@endcode
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp setup1
In the second part we create data for both classes that is non-linearly separable, data that
overlaps.
@code{.cpp}
// Generate random points for the classes 1 and 2
trainClass = trainData.rowRange( nLinearSamples, 2*NTRAINING_SAMPLES-nLinearSamples);
// The x coordinate of the points is in [0.4, 0.6)
c = trainClass.colRange(0,1);
rng.fill(c, RNG::UNIFORM, Scalar(0.4*WIDTH), Scalar(0.6*WIDTH));
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
@endcode
-# **Set up SVM's parameters**
@sa
In the previous tutorial @ref tutorial_introduction_to_svm there is an explanation of the atributes of the
class @ref cv::ml::SVM::Params that we configure here before training the SVM.
@code{.cpp}
CvSVMParams params;
params.svm_type = SVM::C_SVC;
params.C = 0.1;
params.kernel_type = SVM::LINEAR;
params.term_crit = TermCriteria(TermCriteria::ITER, (int)1e7, 1e-6);
@endcode
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp setup2
-# __Set up SVM's parameters__
@note In the previous tutorial @ref tutorial_introduction_to_svm there is an explanation of the
atributes of the class @ref cv::ml::SVM that we configure here before training the SVM.
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp init
There are just two differences between the configuration we do here and the one that was done in
the previous tutorial (tutorial_introduction_to_svm) that we use as reference.
the previous tutorial (@ref tutorial_introduction_to_svm) that we use as reference.
- *CvSVM::C_SVC*. We chose here a small value of this parameter in order not to punish too much
the misclassification errors in the optimization. The idea of doing this stems from the will
of obtaining a solution close to the one intuitively expected. However, we recommend to get a
- _C_. We chose here a small value of this parameter in order not to punish too much the
misclassification errors in the optimization. The idea of doing this stems from the will of
obtaining a solution close to the one intuitively expected. However, we recommend to get a
better insight of the problem by making adjustments to this parameter.
@note Here there are just very few points in the overlapping region between classes, giving a smaller value to **FRAC_LINEAR_SEP** the density of points can be incremented and the impact of the parameter **CvSVM::C_SVC** explored deeply.
@note In this case there are just very few points in the overlapping region between classes.
By giving a smaller value to __FRAC_LINEAR_SEP__ the density of points can be incremented and the
impact of the parameter _C_ explored deeply.
- *Termination Criteria of the algorithm*. The maximum number of iterations has to be
- _Termination Criteria of the algorithm_. The maximum number of iterations has to be
increased considerably in order to solve correctly a problem with non-linearly separable
training data. In particular, we have increased in five orders of magnitude this value.
-# **Train the SVM**
-# __Train the SVM__
We call the method @ref cv::ml::SVM::train to build the SVM model. Watch out that the training
process may take a quite long time. Have patiance when your run the program.
@code{.cpp}
CvSVM svm;
svm.train(trainData, labels, Mat(), Mat(), params);
@endcode
-# **Show the Decision Regions**
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp train
-# __Show the Decision Regions__
The method @ref cv::ml::SVM::predict is used to classify an input sample using a trained SVM. In
this example we have used this method in order to color the space depending on the prediction done
by the SVM. In other words, an image is traversed interpreting its pixels as points of the
Cartesian plane. Each of the points is colored depending on the class predicted by the SVM; in
dark green if it is the class with label 1 and in dark blue if it is the class with label 2.
@code{.cpp}
Vec3b green(0,100,0), blue (100,0,0);
for (int i = 0; i < I.rows; ++i)
for (int j = 0; j < I.cols; ++j)
{
Mat sampleMat = (Mat_<float>(1,2) << i, j);
float response = svm.predict(sampleMat);
if (response == 1) I.at<Vec3b>(j, i) = green;
else if (response == 2) I.at<Vec3b>(j, i) = blue;
}
@endcode
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show
-# **Show the training data**
-# __Show the training data__
The method @ref cv::circle is used to show the samples that compose the training data. The samples
of the class labeled with 1 are shown in light green and in light blue the samples of the class
labeled with 2.
@code{.cpp}
int thick = -1;
int lineType = 8;
float px, py;
// Class 1
for (int i = 0; i < NTRAINING_SAMPLES; ++i)
{
px = trainData.at<float>(i,0);
py = trainData.at<float>(i,1);
circle(I, Point( (int) px, (int) py ), 3, Scalar(0, 255, 0), thick, lineType);
}
// Class 2
for (int i = NTRAINING_SAMPLES; i <2*NTRAINING_SAMPLES; ++i)
{
px = trainData.at<float>(i,0);
py = trainData.at<float>(i,1);
circle(I, Point( (int) px, (int) py ), 3, Scalar(255, 0, 0), thick, lineType);
}
@endcode
-# **Support vectors**
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show_data
-# __Support vectors__
We use here a couple of methods to obtain information about the support vectors. The method
@ref cv::ml::SVM::getSupportVectors obtain all support vectors.
We have used this methods here to find the training examples that are
support vectors and highlight them.
@code{.cpp}
thick = 2;
lineType = 8;
int x = svm.get_support_vector_count();
for (int i = 0; i < x; ++i)
{
const float* v = svm.get_support_vector(i);
circle( I, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thick, lineType);
}
@endcode
@ref cv::ml::SVM::getSupportVectors obtain all support vectors. We have used this methods here
to find the training examples that are support vectors and highlight them.
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show_vectors
Results
-------

@ -1,7 +1,7 @@
Machine Learning (ml module) {#tutorial_table_of_content_ml}
============================
Use the powerfull machine learning classes for statistical classification, regression and clustering
Use the powerful machine learning classes for statistical classification, regression and clustering
of data.
- @subpage tutorial_introduction_to_svm
@ -20,3 +20,11 @@ of data.
Here you will learn how to define the optimization problem for SVMs when it is not possible to
separate linearly the training data.
- @subpage tutorial_introduction_to_pca
*Compatibility:* \> OpenCV 2.0
*Author:* Theodore Tsesmelis
Learn what a Principal Component Analysis (PCA) is.

@ -1047,14 +1047,14 @@ void CameraHandler::applyProperties(CameraHandler** ppcameraHandler)
return;
}
CameraHandler* handler=*ppcameraHandler;
// delayed resolution setup to exclude errors during other parameres setup on the fly
// without camera restart
if (((*ppcameraHandler)->width != 0) && ((*ppcameraHandler)->height != 0))
(*ppcameraHandler)->params->setPreviewSize((*ppcameraHandler)->width, (*ppcameraHandler)->height);
if ((handler->width != 0) && (handler->height != 0))
handler->params->setPreviewSize(handler->width, handler->height);
#if defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3) || defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) \
|| defined(ANDROID_r4_3_0) || defined(ANDROID_r4_4_0)
CameraHandler* handler=*ppcameraHandler;
handler->camera->stopPreview();
handler->camera->setPreviewCallbackFlags(CAMERA_FRAME_CALLBACK_FLAG_NOOP);
@ -1066,7 +1066,7 @@ void CameraHandler::applyProperties(CameraHandler** ppcameraHandler)
return;
}
handler->camera->setParameters((*ppcameraHandler)->params->flatten());
handler->camera->setParameters(handler->params->flatten());
status_t bufferStatus;
# if defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3)
@ -1107,7 +1107,7 @@ void CameraHandler::applyProperties(CameraHandler** ppcameraHandler)
LOGD("Preview started successfully");
}
#else
CameraHandler* previousCameraHandler=*ppcameraHandler;
CameraHandler* previousCameraHandler=handler;
CameraCallback cameraCallback=previousCameraHandler->cameraCallback;
void* userData=previousCameraHandler->userData;
int cameraId=previousCameraHandler->cameraId;
@ -1117,7 +1117,7 @@ void CameraHandler::applyProperties(CameraHandler** ppcameraHandler)
LOGD("CameraHandler::applyProperties(): after previousCameraHandler->closeCameraConnect");
LOGD("CameraHandler::applyProperties(): before initCameraConnect");
CameraHandler* handler=initCameraConnect(cameraCallback, cameraId, userData, (*ppcameraHandler)->params);
handler=initCameraConnect(cameraCallback, cameraId, userData, handler->params);
LOGD("CameraHandler::applyProperties(): after initCameraConnect, handler=0x%x", (int)handler);
if (handler == NULL) {
LOGE("ERROR in applyProperties --- cannot reinit camera");

@ -309,13 +309,13 @@ cv::String CameraWrapperConnector::getPathLibFolder()
const char* libName=dl_info.dli_fname;
while( ((*libName)=='/') || ((*libName)=='.') )
libName++;
libName++;
char lineBuf[2048];
FILE* file = fopen("/proc/self/smaps", "rt");
if(file)
{
char lineBuf[2048];
while (fgets(lineBuf, sizeof lineBuf, file) != NULL)
{
//verify that line ends with library name

@ -1,2 +1,2 @@
set(the_description "Camera Calibration and 3D Reconstruction")
ocv_define_module(calib3d opencv_imgproc opencv_features2d)
ocv_define_module(calib3d opencv_imgproc opencv_features2d WRAP java python)

@ -66,10 +66,10 @@ void drawPoints(const std::vector<Point2f> &points, Mat &outImage, int radius =
}
#endif
void CirclesGridClusterFinder::hierarchicalClustering(const std::vector<Point2f> points, const Size &patternSz, std::vector<Point2f> &patternPoints)
void CirclesGridClusterFinder::hierarchicalClustering(const std::vector<Point2f> &points, const Size &patternSz, std::vector<Point2f> &patternPoints)
{
#ifdef HAVE_TEGRA_OPTIMIZATION
if(tegra::hierarchicalClustering(points, patternSz, patternPoints))
if(tegra::useTegra() && tegra::hierarchicalClustering(points, patternSz, patternPoints))
return;
#endif
int j, n = (int)points.size();
@ -135,7 +135,7 @@ void CirclesGridClusterFinder::hierarchicalClustering(const std::vector<Point2f>
}
}
void CirclesGridClusterFinder::findGrid(const std::vector<cv::Point2f> points, cv::Size _patternSize, std::vector<Point2f>& centers)
void CirclesGridClusterFinder::findGrid(const std::vector<cv::Point2f> &points, cv::Size _patternSize, std::vector<Point2f>& centers)
{
patternSize = _patternSize;
centers.clear();

@ -62,10 +62,10 @@ public:
squareSize = 1.0f;
maxRectifiedDistance = (float)(squareSize / 2.0);
}
void findGrid(const std::vector<cv::Point2f> points, cv::Size patternSize, std::vector<cv::Point2f>& centers);
void findGrid(const std::vector<cv::Point2f> &points, cv::Size patternSize, std::vector<cv::Point2f>& centers);
//cluster 2d points by geometric coordinates
void hierarchicalClustering(const std::vector<cv::Point2f> points, const cv::Size &patternSize, std::vector<cv::Point2f> &patternPoints);
void hierarchicalClustering(const std::vector<cv::Point2f> &points, const cv::Size &patternSize, std::vector<cv::Point2f> &patternPoints);
private:
void findCorners(const std::vector<cv::Point2f> &hull2f, std::vector<cv::Point2f> &corners);
void findOutsideCorners(const std::vector<cv::Point2f> &corners, std::vector<cv::Point2f> &outsideCorners);

@ -504,7 +504,7 @@ private:
H[n1][n1 - 1] = 0.0;
H[n1][n1] = 1.0;
for (int i = n1 - 2; i >= 0; i--) {
double ra, sa, vr, vi;
double ra, sa;
ra = 0.0;
sa = 0.0;
for (int j = l; j <= n1; j++) {
@ -529,8 +529,8 @@ private:
x = H[i][i + 1];
y = H[i + 1][i];
vr = (d[i] - p) * (d[i] - p) + e[i] * e[i] - q * q;
vi = (d[i] - p) * 2.0 * q;
double vr = (d[i] - p) * (d[i] - p) + e[i] * e[i] - q * q;
double vi = (d[i] - p) * 2.0 * q;
if (vr == 0.0 && vi == 0.0) {
vr = eps * norm * (std::abs(w) + std::abs(q) + std::abs(x)
+ std::abs(y) + std::abs(z));

@ -872,8 +872,8 @@ double cv::fisheye::stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayO
if ((flags & CALIB_FIX_INTRINSIC))
{
internal::CalibrateExtrinsics(objectPoints, imagePoints1, intrinsicLeft, check_cond, thresh_cond, rvecs1, tvecs1);
internal::CalibrateExtrinsics(objectPoints, imagePoints2, intrinsicRight, check_cond, thresh_cond, rvecs2, tvecs2);
cv::internal::CalibrateExtrinsics(objectPoints, imagePoints1, intrinsicLeft, check_cond, thresh_cond, rvecs1, tvecs1);
cv::internal::CalibrateExtrinsics(objectPoints, imagePoints2, intrinsicRight, check_cond, thresh_cond, rvecs2, tvecs2);
}
intrinsicLeft.isEstimate[0] = flags & CALIB_FIX_INTRINSIC ? 0 : 1;
@ -918,8 +918,8 @@ double cv::fisheye::stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayO
om_ref.reshape(3, 1).copyTo(om_list.col(image_idx));
T_ref.reshape(3, 1).copyTo(T_list.col(image_idx));
}
cv::Vec3d omcur = internal::median3d(om_list);
cv::Vec3d Tcur = internal::median3d(T_list);
cv::Vec3d omcur = cv::internal::median3d(om_list);
cv::Vec3d Tcur = cv::internal::median3d(T_list);
cv::Mat J = cv::Mat::zeros(4 * n_points * n_images, 18 + 6 * (n_images + 1), CV_64FC1),
e = cv::Mat::zeros(4 * n_points * n_images, 1, CV_64FC1), Jkk, ekk;
@ -961,7 +961,7 @@ double cv::fisheye::stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayO
jacobians.col(14).copyTo(Jkk.col(4).rowRange(0, 2 * n_points));
//right camera jacobian
internal::compose_motion(rvec, tvec, omcur, Tcur, omr, Tr, domrdomckk, domrdTckk, domrdom, domrdT, dTrdomckk, dTrdTckk, dTrdom, dTrdT);
cv::internal::compose_motion(rvec, tvec, omcur, Tcur, omr, Tr, domrdomckk, domrdTckk, domrdom, domrdT, dTrdomckk, dTrdTckk, dTrdom, dTrdT);
rvec = cv::Mat(rvecs2[image_idx]);
tvec = cv::Mat(tvecs2[image_idx]);

@ -200,8 +200,6 @@ public:
void setCallback(const Ptr<LMSolver::Callback>& _cb) { cb = _cb; }
AlgorithmInfo* info() const;
Ptr<LMSolver::Callback> cb;
double epsx;
@ -211,15 +209,8 @@ public:
};
CV_INIT_ALGORITHM(LMSolverImpl, "LMSolver",
obj.info()->addParam(obj, "epsx", obj.epsx);
obj.info()->addParam(obj, "epsf", obj.epsf);
obj.info()->addParam(obj, "maxIters", obj.maxIters);
obj.info()->addParam(obj, "printInterval", obj.printInterval))
Ptr<LMSolver> createLMSolver(const Ptr<LMSolver::Callback>& cb, int maxIters)
{
CV_Assert( !LMSolverImpl_info_auto.name().empty() );
return makePtr<LMSolverImpl>(cb, maxIters);
}

@ -116,7 +116,7 @@ static CvStatus icvPOSIT( CvPOSITObject *pObject, CvPoint2D32f *imagePoints,
{
int i, j, k;
int count = 0, converged = 0;
float inorm, jnorm, invInorm, invJnorm, invScale, scale = 0, inv_Z = 0;
float scale = 0, inv_Z = 0;
float diff = (float)criteria.epsilon;
/* Check bad arguments */
@ -195,16 +195,18 @@ static CvStatus icvPOSIT( CvPOSITObject *pObject, CvPoint2D32f *imagePoints,
}
}
inorm = rotation[0] /*[0][0]*/ * rotation[0] /*[0][0]*/ +
float inorm =
rotation[0] /*[0][0]*/ * rotation[0] /*[0][0]*/ +
rotation[1] /*[0][1]*/ * rotation[1] /*[0][1]*/ +
rotation[2] /*[0][2]*/ * rotation[2] /*[0][2]*/;
jnorm = rotation[3] /*[1][0]*/ * rotation[3] /*[1][0]*/ +
float jnorm =
rotation[3] /*[1][0]*/ * rotation[3] /*[1][0]*/ +
rotation[4] /*[1][1]*/ * rotation[4] /*[1][1]*/ +
rotation[5] /*[1][2]*/ * rotation[5] /*[1][2]*/;
invInorm = cvInvSqrt( inorm );
invJnorm = cvInvSqrt( jnorm );
const float invInorm = cvInvSqrt( inorm );
const float invJnorm = cvInvSqrt( jnorm );
inorm *= invInorm;
jnorm *= invJnorm;
@ -234,7 +236,7 @@ static CvStatus icvPOSIT( CvPOSITObject *pObject, CvPoint2D32f *imagePoints,
converged = ((criteria.type & CV_TERMCRIT_EPS) && (diff < criteria.epsilon));
converged |= ((criteria.type & CV_TERMCRIT_ITER) && (count == criteria.max_iter));
}
invScale = 1 / scale;
const float invScale = 1 / scale;
translation[0] = imagePoints[0].x * invScale;
translation[1] = imagePoints[0].y * invScale;
translation[2] = 1 / inv_Z;
@ -266,8 +268,6 @@ static CvStatus icvReleasePOSITObject( CvPOSITObject ** ppObject )
void
icvPseudoInverse3D( float *a, float *b, int n, int method )
{
int k;
if( method == 0 )
{
float ata00 = 0;
@ -276,8 +276,8 @@ icvPseudoInverse3D( float *a, float *b, int n, int method )
float ata01 = 0;
float ata02 = 0;
float ata12 = 0;
float det = 0;
int k;
/* compute matrix ata = transpose(a) * a */
for( k = 0; k < n; k++ )
{
@ -295,7 +295,6 @@ icvPseudoInverse3D( float *a, float *b, int n, int method )
}
/* inverse matrix ata */
{
float inv_det;
float p00 = ata11 * ata22 - ata12 * ata12;
float p01 = -(ata01 * ata22 - ata12 * ata02);
float p02 = ata12 * ata01 - ata11 * ata02;
@ -304,11 +303,12 @@ icvPseudoInverse3D( float *a, float *b, int n, int method )
float p12 = -(ata00 * ata12 - ata01 * ata02);
float p22 = ata00 * ata11 - ata01 * ata01;
float det = 0;
det += ata00 * p00;
det += ata01 * p01;
det += ata02 * p02;
inv_det = 1 / det;
const float inv_det = 1 / det;
/* compute resultant matrix */
for( k = 0; k < n; k++ )

@ -256,8 +256,6 @@ public:
void setCallback(const Ptr<PointSetRegistrator::Callback>& _cb) { cb = _cb; }
AlgorithmInfo* info() const;
Ptr<PointSetRegistrator::Callback> cb;
int modelPoints;
bool checkPartialSubsets;
@ -378,25 +376,12 @@ public:
return result;
}
AlgorithmInfo* info() const;
};
CV_INIT_ALGORITHM(RANSACPointSetRegistrator, "PointSetRegistrator.RANSAC",
obj.info()->addParam(obj, "threshold", obj.threshold);
obj.info()->addParam(obj, "confidence", obj.confidence);
obj.info()->addParam(obj, "maxIters", obj.maxIters))
CV_INIT_ALGORITHM(LMeDSPointSetRegistrator, "PointSetRegistrator.LMeDS",
obj.info()->addParam(obj, "confidence", obj.confidence);
obj.info()->addParam(obj, "maxIters", obj.maxIters))
Ptr<PointSetRegistrator> createRANSACPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& _cb,
int _modelPoints, double _threshold,
double _confidence, int _maxIters)
{
CV_Assert( !RANSACPointSetRegistrator_info_auto.name().empty() );
return Ptr<PointSetRegistrator>(
new RANSACPointSetRegistrator(_cb, _modelPoints, _threshold, _confidence, _maxIters));
}
@ -405,7 +390,6 @@ Ptr<PointSetRegistrator> createRANSACPointSetRegistrator(const Ptr<PointSetRegis
Ptr<PointSetRegistrator> createLMeDSPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& _cb,
int _modelPoints, double _confidence, int _maxIters)
{
CV_Assert( !LMeDSPointSetRegistrator_info_auto.name().empty() );
return Ptr<PointSetRegistrator>(
new LMeDSPointSetRegistrator(_cb, _modelPoints, _confidence, _maxIters));
}

@ -1010,8 +1010,6 @@ public:
disp.convertTo(disp0, disp0.type(), 1./(1 << DISPARITY_SHIFT), 0);
}
AlgorithmInfo* info() const { return 0; }
int getMinDisparity() const { return params.minDisparity; }
void setMinDisparity(int minDisparity) { params.minDisparity = minDisparity; }

@ -865,8 +865,6 @@ public:
StereoMatcher::DISP_SCALE*params.speckleRange, buffer);
}
AlgorithmInfo* info() const { return 0; }
int getMinDisparity() const { return params.minDisparity; }
void setMinDisparity(int minDisparity) { params.minDisparity = minDisparity; }

@ -1,11 +1,12 @@
set(the_description "The Core Functionality")
ocv_add_module(core PRIVATE_REQUIRED ${ZLIB_LIBRARIES} "${OPENCL_LIBRARIES}" OPTIONAL opencv_cudev)
ocv_add_module(core PRIVATE_REQUIRED ${ZLIB_LIBRARIES} "${OPENCL_LIBRARIES}"
OPTIONAL opencv_cudev
WRAP java python)
if(HAVE_WINRT_CX)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /ZW")
endif()
if(HAVE_WINRT)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GS /Gm- /AI\"${WINDOWS_SDK_PATH}/References/CommonConfiguration/Neutral\" /AI\"${VISUAL_STUDIO_PATH}/vcpackages\"")
set(extra_libs "")
if(WINRT AND CMAKE_SYSTEM_NAME MATCHES WindowsStore AND CMAKE_SYSTEM_VERSION MATCHES "8.0")
list(APPEND extra_libs ole32.lib)
endif()
if(HAVE_CUDA)
@ -22,7 +23,7 @@ ocv_glob_module_sources(SOURCES "${OPENCV_MODULE_opencv_core_BINARY_DIR}/version
HEADERS ${lib_cuda_hdrs} ${lib_cuda_hdrs_detail})
ocv_module_include_directories(${the_module} ${ZLIB_INCLUDE_DIRS})
ocv_create_module()
ocv_create_module(${extra_libs})
ocv_add_accuracy_tests()
ocv_add_perf_tests()

@ -545,8 +545,31 @@ The function returns the number of non-zero elements in src :
*/
CV_EXPORTS_W int countNonZero( InputArray src );
/** @brief returns the list of locations of non-zero pixels
@todo document
/** @brief Returns the list of locations of non-zero pixels
Given a binary matrix (likely returned from an operation such
as threshold(), compare(), >, ==, etc, return all of
the non-zero indices as a cv::Mat or std::vector<cv::Point> (x,y)
For example:
@code{.cpp}
cv::Mat binaryImage; // input, binary image
cv::Mat locations; // output, locations of non-zero pixels
cv::findNonZero(binaryImage, locations);
// access pixel coordinates
Point pnt = locations.at<Point>(i);
@endcode
or
@code{.cpp}
cv::Mat binaryImage; // input, binary image
vector<Point> locations; // output, locations of non-zero pixels
cv::findNonZero(binaryImage, locations);
// access pixel coordinates
Point pnt = locations[i];
@endcode
@param src single-channel array (type CV_8UC1)
@param idx the output array, type of cv::Mat or std::vector<Point>, corresponding to non-zero indices in the input
*/
CV_EXPORTS_W void findNonZero( InputArray src, OutputArray idx );
@ -2745,8 +2768,6 @@ public:
//////////////////////////////////////// Algorithm ////////////////////////////////////
class CV_EXPORTS Algorithm;
class CV_EXPORTS AlgorithmInfo;
struct CV_EXPORTS AlgorithmInfoData;
template<typename _Tp> struct ParamType {};
@ -2759,32 +2780,13 @@ matching, graph-cut etc.), background subtraction (which can be done using mixtu
models, codebook-based algorithm etc.), optical flow (block matching, Lucas-Kanade, Horn-Schunck
etc.).
The class provides the following features for all derived classes:
- so called "virtual constructor". That is, each Algorithm derivative is registered at program
start and you can get the list of registered algorithms and create instance of a particular
algorithm by its name (see Algorithm::create). If you plan to add your own algorithms, it is
good practice to add a unique prefix to your algorithms to distinguish them from other
algorithms.
- setting/retrieving algorithm parameters by name. If you used video capturing functionality
from OpenCV videoio module, you are probably familar with cvSetCaptureProperty(),
cvGetCaptureProperty(), VideoCapture::set() and VideoCapture::get(). Algorithm provides
similar method where instead of integer id's you specify the parameter names as text strings.
See Algorithm::set and Algorithm::get for details.
- reading and writing parameters from/to XML or YAML files. Every Algorithm derivative can store
all its parameters and then read them back. There is no need to re-implement it each time.
Here is example of SIFT use in your application via Algorithm interface:
@code
#include "opencv2/opencv.hpp"
#include "opencv2/xfeatures2d.hpp"
using namespace cv::xfeatures2d;
...
Ptr<Feature2D> sift = SIFT::create();
FileStorage fs("sift_params.xml", FileStorage::READ);
if( fs.isOpened() ) // if we have file with parameters, read them
{
@ -2794,323 +2796,73 @@ Here is example of SIFT use in your application via Algorithm interface:
else // else modify the parameters and store them; user can later edit the file to use different parameters
{
sift->setContrastThreshold(0.01f); // lower the contrast threshold, compared to the default value
{
WriteStructContext ws(fs, "sift_params", CV_NODE_MAP);
sift->write(fs);
WriteStructContext ws(fs, "sift_params", CV_NODE_MAP);
sift->write(fs);
}
}
Mat image = imread("myimage.png", 0), descriptors;
vector<KeyPoint> keypoints;
sift->detectAndCompute(image, noArray(), keypoints, descriptors);
@endcode
Creating Own Algorithms
-----------------------
If you want to make your own algorithm, derived from Algorithm, you should basically follow a few
conventions and add a little semi-standard piece of code to your class:
- Make a class and specify Algorithm as its base class.
- The algorithm parameters should be the class members. See Algorithm::get() for the list of
possible types of the parameters.
- Add public virtual method `AlgorithmInfo* info() const;` to your class.
- Add constructor function, AlgorithmInfo instance and implement the info() method. The simplest
way is to take <https://github.com/Itseez/opencv/tree/master/modules/ml/src/ml_init.cpp> as
the reference and modify it according to the list of your parameters.
- Add some public function (e.g. `initModule_<mymodule>()`) that calls info() of your algorithm
and put it into the same source file as info() implementation. This is to force C++ linker to
include this object file into the target application. See Algorithm::create() for details.
*/
class CV_EXPORTS_W Algorithm
{
public:
{
public:
Algorithm();
virtual ~Algorithm();
/**Returns the algorithm name*/
String name() const;
/** @brief returns the algorithm parameter
The method returns value of the particular parameter. Since the compiler can not deduce the
type of the returned parameter, you should specify it explicitly in angle brackets. Here are
the allowed forms of get:
- myalgo.get\<int\>("param_name")
- myalgo.get\<double\>("param_name")
- myalgo.get\<bool\>("param_name")
- myalgo.get\<String\>("param_name")
- myalgo.get\<Mat\>("param_name")
- myalgo.get\<vector\<Mat\> \>("param_name")
- myalgo.get\<Algorithm\>("param_name") (it returns Ptr\<Algorithm\>).
In some cases the actual type of the parameter can be cast to the specified type, e.g. integer
parameter can be cast to double, bool can be cast to int. But "dangerous" transformations
(string\<-\>number, double-\>int, 1x1 Mat\<-\>number, ...) are not performed and the method
will throw an exception. In the case of Mat or vector\<Mat\> parameters the method does not
clone the matrix data, so do not modify the matrices. Use Algorithm::set instead - slower, but
more safe.
@param name The parameter name.
*/
template<typename _Tp> typename ParamType<_Tp>::member_type get(const String& name) const;
/** @overload */
template<typename _Tp> typename ParamType<_Tp>::member_type get(const char* name) const;
CV_WRAP int getInt(const String& name) const;
CV_WRAP double getDouble(const String& name) const;
CV_WRAP bool getBool(const String& name) const;
CV_WRAP String getString(const String& name) const;
CV_WRAP Mat getMat(const String& name) const;
CV_WRAP std::vector<Mat> getMatVector(const String& name) const;
CV_WRAP Ptr<Algorithm> getAlgorithm(const String& name) const;
/** @brief Sets the algorithm parameter
The method sets value of the particular parameter. Some of the algorithm
parameters may be declared as read-only. If you try to set such a
parameter, you will get exception with the corresponding error message.
@param name The parameter name.
@param value The parameter value.
*/
void set(const String& name, int value);
void set(const String& name, double value);
void set(const String& name, bool value);
void set(const String& name, const String& value);
void set(const String& name, const Mat& value);
void set(const String& name, const std::vector<Mat>& value);
void set(const String& name, const Ptr<Algorithm>& value);
template<typename _Tp> void set(const String& name, const Ptr<_Tp>& value);
CV_WRAP void setInt(const String& name, int value);
CV_WRAP void setDouble(const String& name, double value);
CV_WRAP void setBool(const String& name, bool value);
CV_WRAP void setString(const String& name, const String& value);
CV_WRAP void setMat(const String& name, const Mat& value);
CV_WRAP void setMatVector(const String& name, const std::vector<Mat>& value);
CV_WRAP void setAlgorithm(const String& name, const Ptr<Algorithm>& value);
template<typename _Tp> void setAlgorithm(const String& name, const Ptr<_Tp>& value);
void set(const char* name, int value);
void set(const char* name, double value);
void set(const char* name, bool value);
void set(const char* name, const String& value);
void set(const char* name, const Mat& value);
void set(const char* name, const std::vector<Mat>& value);
void set(const char* name, const Ptr<Algorithm>& value);
template<typename _Tp> void set(const char* name, const Ptr<_Tp>& value);
void setInt(const char* name, int value);
void setDouble(const char* name, double value);
void setBool(const char* name, bool value);
void setString(const char* name, const String& value);
void setMat(const char* name, const Mat& value);
void setMatVector(const char* name, const std::vector<Mat>& value);
void setAlgorithm(const char* name, const Ptr<Algorithm>& value);
template<typename _Tp> void setAlgorithm(const char* name, const Ptr<_Tp>& value);
CV_WRAP String paramHelp(const String& name) const;
int paramType(const char* name) const;
CV_WRAP int paramType(const String& name) const;
CV_WRAP void getParams(CV_OUT std::vector<String>& names) const;
/** @brief Stores algorithm parameters in a file storage
The method stores all the algorithm parameters (in alphabetic order) to
the file storage. The method is virtual. If you define your own
Algorithm derivative, your can override the method and store some extra
information. However, it's rarely needed. Here are some examples:
- SIFT feature detector (from xfeatures2d module). The class only
stores algorithm parameters and no keypoints or their descriptors.
Therefore, it's enough to store the algorithm parameters, which is
what Algorithm::write() does. Therefore, there is no dedicated
SIFT::write().
- Background subtractor (from video module). It has the algorithm
parameters and also it has the current background model. However,
the background model is not stored. First, it's rather big. Then,
if you have stored the background model, it would likely become
irrelevant on the next run (because of shifted camera, changed
background, different lighting etc.). Therefore,
BackgroundSubtractorMOG and BackgroundSubtractorMOG2 also rely on
the standard Algorithm::write() to store just the algorithm
parameters.
- Expectation Maximization (from ml module). The algorithm finds
mixture of gaussians that approximates user data best of all. In
this case the model may be re-used on the next run to test new
data against the trained statistical model. So EM needs to store
the model. However, since the model is described by a few
parameters that are available as read-only algorithm parameters
(i.e. they are available via EM::get()), EM also relies on
Algorithm::write() to store both EM parameters and the model
(represented by read-only algorithm parameters).
@param fs File storage.
*/
virtual void write(FileStorage& fs) const;
virtual void write(FileStorage& fs) const { (void)fs; }
/** @brief Reads algorithm parameters from a file storage
The method reads all the algorithm parameters from the specified node of
a file storage. Similarly to Algorithm::write(), if you implement an
algorithm that needs to read some extra data and/or re-compute some
internal data, you may override the method.
@param fn File node of the file storage.
*/
virtual void read(const FileNode& fn);
virtual void read(const FileNode& fn) { (void)fn; }
};
typedef Algorithm* (*Constructor)(void);
typedef int (Algorithm::*Getter)() const;
typedef void (Algorithm::*Setter)(int);
// define properties
/** @brief Returns the list of registered algorithms
#define CV_PURE_PROPERTY(type, name) \
CV_WRAP virtual type get##name() const = 0; \
CV_WRAP virtual void set##name(type val) = 0;
This static method returns the list of registered algorithms in
alphabetical order. Here is how to use it :
@code{.cpp}
vector<String> algorithms;
Algorithm::getList(algorithms);
cout << "Algorithms: " << algorithms.size() << endl;
for (size_t i=0; i < algorithms.size(); i++)
cout << algorithms[i] << endl;
@endcode
@param algorithms The output vector of algorithm names.
*/
CV_WRAP static void getList(CV_OUT std::vector<String>& algorithms);
CV_WRAP static Ptr<Algorithm> _create(const String& name);
#define CV_PURE_PROPERTY_S(type, name) \
CV_WRAP virtual type get##name() const = 0; \
CV_WRAP virtual void set##name(const type & val) = 0;
/** @brief Creates algorithm instance by name
#define CV_PURE_PROPERTY_RO(type, name) \
CV_WRAP virtual type get##name() const = 0;
This static method creates a new instance of the specified algorithm. If
there is no such algorithm, the method will silently return a null
pointer. Also, you should specify the particular Algorithm subclass as
_Tp (or simply Algorithm if you do not know it at that point). :
@code{.cpp}
Ptr<BackgroundSubtractor> bgfg = Algorithm::create<BackgroundSubtractor>("BackgroundSubtractor.MOG2");
@endcode
@note This is important note about seemingly mysterious behavior of
Algorithm::create() when it returns NULL while it should not. The reason
is simple - Algorithm::create() resides in OpenCV's core module and the
algorithms are implemented in other modules. If you create algorithms
dynamically, C++ linker may decide to throw away the modules where the
actual algorithms are implemented, since you do not call any functions
from the modules. To avoid this problem, you need to call
initModule_\<modulename\>(); somewhere in the beginning of the program
before Algorithm::create(). For example, call initModule_xfeatures2d()
in order to use SURF/SIFT, call initModule_ml() to use expectation
maximization etc.
@param name The algorithm name, one of the names returned by Algorithm::getList().
*/
template<typename _Tp> static Ptr<_Tp> create(const String& name);
// basic property implementation
virtual AlgorithmInfo* info() const /* TODO: make it = 0;*/ { return 0; }
};
#define CV_IMPL_PROPERTY_RO(type, name, member) \
inline type get##name() const { return member; }
/** @todo document */
class CV_EXPORTS AlgorithmInfo
{
public:
friend class Algorithm;
AlgorithmInfo(const String& name, Algorithm::Constructor create);
~AlgorithmInfo();
void get(const Algorithm* algo, const char* name, int argType, void* value) const;
void addParam_(Algorithm& algo, const char* name, int argType,
void* value, bool readOnly,
Algorithm::Getter getter, Algorithm::Setter setter,
const String& help=String());
String paramHelp(const char* name) const;
int paramType(const char* name) const;
void getParams(std::vector<String>& names) const;
void write(const Algorithm* algo, FileStorage& fs) const;
void read(Algorithm* algo, const FileNode& fn) const;
String name() const;
void addParam(Algorithm& algo, const char* name,
int& value, bool readOnly=false,
int (Algorithm::*getter)()=0,
void (Algorithm::*setter)(int)=0,
const String& help=String());
void addParam(Algorithm& algo, const char* name,
bool& value, bool readOnly=false,
int (Algorithm::*getter)()=0,
void (Algorithm::*setter)(int)=0,
const String& help=String());
void addParam(Algorithm& algo, const char* name,
double& value, bool readOnly=false,
double (Algorithm::*getter)()=0,
void (Algorithm::*setter)(double)=0,
const String& help=String());
void addParam(Algorithm& algo, const char* name,
String& value, bool readOnly=false,
String (Algorithm::*getter)()=0,
void (Algorithm::*setter)(const String&)=0,
const String& help=String());
void addParam(Algorithm& algo, const char* name,
Mat& value, bool readOnly=false,
Mat (Algorithm::*getter)()=0,
void (Algorithm::*setter)(const Mat&)=0,
const String& help=String());
void addParam(Algorithm& algo, const char* name,
std::vector<Mat>& value, bool readOnly=false,
std::vector<Mat> (Algorithm::*getter)()=0,
void (Algorithm::*setter)(const std::vector<Mat>&)=0,
const String& help=String());
void addParam(Algorithm& algo, const char* name,
Ptr<Algorithm>& value, bool readOnly=false,
Ptr<Algorithm> (Algorithm::*getter)()=0,
void (Algorithm::*setter)(const Ptr<Algorithm>&)=0,
const String& help=String());
void addParam(Algorithm& algo, const char* name,
float& value, bool readOnly=false,
float (Algorithm::*getter)()=0,
void (Algorithm::*setter)(float)=0,
const String& help=String());
void addParam(Algorithm& algo, const char* name,
unsigned int& value, bool readOnly=false,
unsigned int (Algorithm::*getter)()=0,
void (Algorithm::*setter)(unsigned int)=0,
const String& help=String());
void addParam(Algorithm& algo, const char* name,
uint64& value, bool readOnly=false,
uint64 (Algorithm::*getter)()=0,
void (Algorithm::*setter)(uint64)=0,
const String& help=String());
void addParam(Algorithm& algo, const char* name,
uchar& value, bool readOnly=false,
uchar (Algorithm::*getter)()=0,
void (Algorithm::*setter)(uchar)=0,
const String& help=String());
template<typename _Tp, typename _Base> void addParam(Algorithm& algo, const char* name,
Ptr<_Tp>& value, bool readOnly=false,
Ptr<_Tp> (Algorithm::*getter)()=0,
void (Algorithm::*setter)(const Ptr<_Tp>&)=0,
const String& help=String());
template<typename _Tp> void addParam(Algorithm& algo, const char* name,
Ptr<_Tp>& value, bool readOnly=false,
Ptr<_Tp> (Algorithm::*getter)()=0,
void (Algorithm::*setter)(const Ptr<_Tp>&)=0,
const String& help=String());
protected:
AlgorithmInfoData* data;
void set(Algorithm* algo, const char* name, int argType,
const void* value, bool force=false) const;
};
#define CV_HELP_IMPL_PROPERTY(r_type, w_type, name, member) \
CV_IMPL_PROPERTY_RO(r_type, name, member) \
inline void set##name(w_type val) { member = val; }
/** @todo document */
struct CV_EXPORTS Param
{
enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7, UNSIGNED_INT=8, UINT64=9, UCHAR=11 };
Param();
Param(int _type, bool _readonly, int _offset,
Algorithm::Getter _getter=0,
Algorithm::Setter _setter=0,
const String& _help=String());
int type;
int offset;
bool readonly;
Algorithm::Getter getter;
Algorithm::Setter setter;
String help;
#define CV_HELP_WRAP_PROPERTY(r_type, w_type, name, internal_name, internal_obj) \
r_type get##name() const { return internal_obj.get##internal_name(); } \
void set##name(w_type val) { internal_obj.set##internal_name(val); }
#define CV_IMPL_PROPERTY(type, name, member) CV_HELP_IMPL_PROPERTY(type, type, name, member)
#define CV_IMPL_PROPERTY_S(type, name, member) CV_HELP_IMPL_PROPERTY(type, const type &, name, member)
#define CV_WRAP_PROPERTY(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, type, name, internal_name, internal_obj)
#define CV_WRAP_PROPERTY_S(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, const type &, name, internal_name, internal_obj)
#define CV_WRAP_SAME_PROPERTY(type, name, internal_obj) CV_WRAP_PROPERTY(type, name, name, internal_obj)
#define CV_WRAP_SAME_PROPERTY_S(type, name, internal_obj) CV_WRAP_PROPERTY_S(type, name, name, internal_obj)
struct Param {
enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7,
UNSIGNED_INT=8, UINT64=9, UCHAR=11 };
};
template<> struct ParamType<bool>
{
typedef bool const_param_type;

@ -191,7 +191,7 @@
# include "arm_neon.h"
# define CV_NEON 1
# define CPU_HAS_NEON_FEATURE (true)
#elif defined(__ARM_NEON__)
#elif defined(__ARM_NEON__) || (defined (__ARM_NEON) && defined(__aarch64__))
# include <arm_neon.h>
# define CV_NEON 1
#endif

@ -817,7 +817,7 @@ Vec<_Tp, n> Matx<_Tp, m, n>::solve(const Vec<_Tp, m>& rhs, int method) const
template<typename _Tp, int m> static inline
double determinant(const Matx<_Tp, m, m>& a)
{
return internal::Matx_DetOp<_Tp, m>()(a);
return cv::internal::Matx_DetOp<_Tp, m>()(a);
}
template<typename _Tp, int m, int n> static inline
@ -960,25 +960,25 @@ Vec<_Tp, cn> Vec<_Tp, cn>::mul(const Vec<_Tp, cn>& v) const
template<> inline
Vec<float, 2> Vec<float, 2>::conj() const
{
return internal::conjugate(*this);
return cv::internal::conjugate(*this);
}
template<> inline
Vec<double, 2> Vec<double, 2>::conj() const
{
return internal::conjugate(*this);
return cv::internal::conjugate(*this);
}
template<> inline
Vec<float, 4> Vec<float, 4>::conj() const
{
return internal::conjugate(*this);
return cv::internal::conjugate(*this);
}
template<> inline
Vec<double, 4> Vec<double, 4>::conj() const
{
return internal::conjugate(*this);
return cv::internal::conjugate(*this);
}
template<typename _Tp, int cn> inline

@ -184,6 +184,7 @@ public:
// After fix restore code in arithm.cpp: ocl_compare()
inline bool isAMD() const { return vendorID() == VENDOR_AMD; }
inline bool isIntel() const { return vendorID() == VENDOR_INTEL; }
inline bool isNVidia() const { return vendorID() == VENDOR_NVIDIA; }
int maxClockFrequency() const;
int maxComputeUnits() const;

@ -193,7 +193,7 @@ Matx<_Tp, n, m> Matx<_Tp, m, n>::inv(int method, bool *p_is_ok /*= NULL*/) const
Matx<_Tp, n, m> b;
bool ok;
if( method == DECOMP_LU || method == DECOMP_CHOLESKY )
ok = internal::Matx_FastInvOp<_Tp, m>()(*this, b, method);
ok = cv::internal::Matx_FastInvOp<_Tp, m>()(*this, b, method);
else
{
Mat A(*this, false), B(b, false);
@ -209,7 +209,7 @@ Matx<_Tp, n, l> Matx<_Tp, m, n>::solve(const Matx<_Tp, m, l>& rhs, int method) c
Matx<_Tp, n, l> x;
bool ok;
if( method == DECOMP_LU || method == DECOMP_CHOLESKY )
ok = internal::Matx_FastSolveOp<_Tp, m, l>()(*this, rhs, x, method);
ok = cv::internal::Matx_FastSolveOp<_Tp, m, l>()(*this, rhs, x, method);
else
{
Mat A(*this, false), B(rhs, false), X(x, false);
@ -412,84 +412,6 @@ int print(const Matx<_Tp, m, n>& matx, FILE* stream = stdout)
return print(Formatter::get()->format(cv::Mat(matx)), stream);
}
////////////////////////////////////////// Algorithm //////////////////////////////////////////
template<typename _Tp> inline
Ptr<_Tp> Algorithm::create(const String& name)
{
return _create(name).dynamicCast<_Tp>();
}
template<typename _Tp> inline
void Algorithm::set(const char* _name, const Ptr<_Tp>& value)
{
Ptr<Algorithm> algo_ptr = value. template dynamicCast<cv::Algorithm>();
if (!algo_ptr) {
CV_Error( Error::StsUnsupportedFormat, "unknown/unsupported Ptr type of the second parameter of the method Algorithm::set");
}
info()->set(this, _name, ParamType<Algorithm>::type, &algo_ptr);
}
template<typename _Tp> inline
void Algorithm::set(const String& _name, const Ptr<_Tp>& value)
{
this->set<_Tp>(_name.c_str(), value);
}
template<typename _Tp> inline
void Algorithm::setAlgorithm(const char* _name, const Ptr<_Tp>& value)
{
Ptr<Algorithm> algo_ptr = value. template ptr<cv::Algorithm>();
if (!algo_ptr) {
CV_Error( Error::StsUnsupportedFormat, "unknown/unsupported Ptr type of the second parameter of the method Algorithm::set");
}
info()->set(this, _name, ParamType<Algorithm>::type, &algo_ptr);
}
template<typename _Tp> inline
void Algorithm::setAlgorithm(const String& _name, const Ptr<_Tp>& value)
{
this->set<_Tp>(_name.c_str(), value);
}
template<typename _Tp> inline
typename ParamType<_Tp>::member_type Algorithm::get(const String& _name) const
{
typename ParamType<_Tp>::member_type value;
info()->get(this, _name.c_str(), ParamType<_Tp>::type, &value);
return value;
}
template<typename _Tp> inline
typename ParamType<_Tp>::member_type Algorithm::get(const char* _name) const
{
typename ParamType<_Tp>::member_type value;
info()->get(this, _name, ParamType<_Tp>::type, &value);
return value;
}
template<typename _Tp, typename _Base> inline
void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, Ptr<_Tp>& value, bool readOnly,
Ptr<_Tp> (Algorithm::*getter)(), void (Algorithm::*setter)(const Ptr<_Tp>&),
const String& help)
{
//TODO: static assert: _Tp inherits from _Base
addParam_(algo, parameter, ParamType<_Base>::type, &value, readOnly,
(Algorithm::Getter)getter, (Algorithm::Setter)setter, help);
}
template<typename _Tp> inline
void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, Ptr<_Tp>& value, bool readOnly,
Ptr<_Tp> (Algorithm::*getter)(), void (Algorithm::*setter)(const Ptr<_Tp>&),
const String& help)
{
//TODO: static assert: _Tp inherits from Algorithm
addParam_(algo, parameter, ParamType<Algorithm>::type, &value, readOnly,
(Algorithm::Getter)getter, (Algorithm::Setter)setter, help);
}
//! @endcond
/****************************************************************************************\

@ -914,7 +914,7 @@ void write(FileStorage& fs, const Range& r )
template<typename _Tp> static inline
void write( FileStorage& fs, const std::vector<_Tp>& vec )
{
internal::VecWriterProxy<_Tp, DataType<_Tp>::fmt != 0> w(&fs);
cv::internal::VecWriterProxy<_Tp, DataType<_Tp>::fmt != 0> w(&fs);
w(vec);
}
@ -922,63 +922,63 @@ void write( FileStorage& fs, const std::vector<_Tp>& vec )
template<typename _Tp> static inline
void write(FileStorage& fs, const String& name, const Point_<_Tp>& pt )
{
internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);
cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);
write(fs, pt);
}
template<typename _Tp> static inline
void write(FileStorage& fs, const String& name, const Point3_<_Tp>& pt )
{
internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);
cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);
write(fs, pt);
}
template<typename _Tp> static inline
void write(FileStorage& fs, const String& name, const Size_<_Tp>& sz )
{
internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);
cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);
write(fs, sz);
}
template<typename _Tp> static inline
void write(FileStorage& fs, const String& name, const Complex<_Tp>& c )
{
internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);
cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);
write(fs, c);
}
template<typename _Tp> static inline
void write(FileStorage& fs, const String& name, const Rect_<_Tp>& r )
{
internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);
cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);
write(fs, r);
}
template<typename _Tp, int cn> static inline
void write(FileStorage& fs, const String& name, const Vec<_Tp, cn>& v )
{
internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);
cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);
write(fs, v);
}
template<typename _Tp> static inline
void write(FileStorage& fs, const String& name, const Scalar_<_Tp>& s )
{
internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);
cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);
write(fs, s);
}
static inline
void write(FileStorage& fs, const String& name, const Range& r )
{
internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);
cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);
write(fs, r);
}
template<typename _Tp> static inline
void write( FileStorage& fs, const String& name, const std::vector<_Tp>& vec )
{
internal::WriteStructContext ws(fs, name, FileNode::SEQ+(DataType<_Tp>::fmt != 0 ? FileNode::FLOW : 0));
cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+(DataType<_Tp>::fmt != 0 ? FileNode::FLOW : 0));
write(fs, vec);
}
@ -1030,7 +1030,7 @@ void read(const FileNode& node, short& value, short default_value)
template<typename _Tp> static inline
void read( FileNodeIterator& it, std::vector<_Tp>& vec, size_t maxCount = (size_t)INT_MAX )
{
internal::VecReaderProxy<_Tp, DataType<_Tp>::fmt != 0> r(&it);
cv::internal::VecReaderProxy<_Tp, DataType<_Tp>::fmt != 0> r(&it);
r(vec, maxCount);
}
@ -1101,7 +1101,7 @@ FileNodeIterator& operator >> (FileNodeIterator& it, _Tp& value)
template<typename _Tp> static inline
FileNodeIterator& operator >> (FileNodeIterator& it, std::vector<_Tp>& vec)
{
internal::VecReaderProxy<_Tp, DataType<_Tp>::fmt != 0> r(&it);
cv::internal::VecReaderProxy<_Tp, DataType<_Tp>::fmt != 0> r(&it);
r(vec, (size_t)INT_MAX);
return it;
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save