diff --git a/3rdparty/tbb/CMakeLists.txt b/3rdparty/tbb/CMakeLists.txt index af1581349e..03183d1c2a 100644 --- a/3rdparty/tbb/CMakeLists.txt +++ b/3rdparty/tbb/CMakeLists.txt @@ -1,12 +1,30 @@ #Cross compile TBB from source project(tbb) -# 4.1 update 2 - works fine -set(tbb_ver "tbb41_20130116oss") -set(tbb_url "http://threadingbuildingblocks.org/sites/default/files/software_releases/source/tbb41_20130116oss_src.tgz") -set(tbb_md5 "3809790e1001a1b32d59c9fee590ee85") +if (WIN32 AND NOT ARM) + message(FATAL_ERROR "BUILD_TBB option supports Windows on ARM only!\nUse regular official TBB build instead of the BUILD_TBB option!") +endif() + +# 4.1 update 4 - works fine +set(tbb_ver "tbb41_20130613oss") +set(tbb_url "http://threadingbuildingblocks.org/sites/default/files/software_releases/source/tbb41_20130613oss_src.tgz") +set(tbb_md5 "108c8c1e481b0aaea61878289eb28b6a") set(tbb_version_file "version_string.ver") -ocv_warnings_disable(CMAKE_CXX_FLAGS -Wshadow) +ocv_warnings_disable(CMAKE_CXX_FLAGS -Wshadow -Wunused-parameter) + +# 4.1 update 3 dev - works fine +#set(tbb_ver "tbb41_20130401oss") +#set(tbb_url "http://threadingbuildingblocks.org/sites/default/files/software_releases/source/tbb41_20130401oss_src.tgz") +#set(tbb_md5 "f2f591a0d2ca8f801e221ce7d9ea84bb") +#set(tbb_version_file "version_string.ver") +#ocv_warnings_disable(CMAKE_CXX_FLAGS -Wshadow) + +# 4.1 update 2 - works fine +#set(tbb_ver "tbb41_20130116oss") +#set(tbb_url "http://threadingbuildingblocks.org/sites/default/files/software_releases/source/tbb41_20130116oss_src.tgz") +#set(tbb_md5 "3809790e1001a1b32d59c9fee590ee85") +#set(tbb_version_file "version_string.ver") +#ocv_warnings_disable(CMAKE_CXX_FLAGS -Wshadow) # 4.1 update 1 - works fine #set(tbb_ver "tbb41_20121003oss") @@ -107,7 +125,7 @@ if(NOT EXISTS "${tbb_src_dir}") RESULT_VARIABLE tbb_untar_RESULT) if(NOT tbb_untar_RESULT EQUAL 0 OR NOT EXISTS "${tbb_src_dir}") - message(FATAL_ERROR "Failed to unpack TBB sources") + message(FATAL_ERROR "Failed to unpack TBB sources from ${tbb_tarball} to ${tbb_src_dir} with error ${tbb_untar_RESULT}") endif() endif() @@ -123,13 +141,22 @@ file(GLOB lib_hdrs "${tbb_src_dir}/src/tbb/*.h") list(APPEND lib_srcs "${tbb_src_dir}/src/rml/client/rml_tbb.cpp") if (WIN32) - add_definitions(-D__TBB_DYNAMIC_LOAD_ENABLED=0 - -D__TBB_BUILD=1 - -D_UNICODE - -DUNICODE - -DWINAPI_FAMILY=WINAPI_FAMILY_APP - -DDO_ITT_NOTIFY=0 + add_definitions(/D__TBB_DYNAMIC_LOAD_ENABLED=0 + /D__TBB_BUILD=1 + /DTBB_NO_LEGACY=1 + /D_UNICODE + /DUNICODE + /DWINAPI_FAMILY=WINAPI_FAMILY_APP + /DDO_ITT_NOTIFY=0 + /DUSE_WINTHREAD ) # defines were copied from windows.cl.inc + + if (ARM) + add_definitions(/D_WIN32_WINNT=0x0602 + /D__TBB_WIN32_USE_CL_BUILTINS + ) + endif() + set(CMAKE_LINKER_FLAGS "${CMAKE_LINKER_FLAGS} /APPCONTAINER") else() add_definitions(-D__TBB_DYNAMIC_LOAD_ENABLED=0 #required @@ -173,7 +200,23 @@ endif() set(TBB_SOURCE_FILES ${TBB_SOURCE_FILES} "${CMAKE_CURRENT_SOURCE_DIR}/${tbb_version_file}") add_library(tbb ${TBB_SOURCE_FILES}) -target_link_libraries(tbb c m dl) + +if (WIN32) + if (ARM) + set(platform_macro /D_M_ARM=1) + endif() + + add_custom_command(TARGET tbb + PRE_BUILD + COMMAND ${CMAKE_C_COMPILER} /nologo /TC /EP ${tbb_src_dir}\\src\\tbb\\win32-tbb-export.def /DTBB_NO_LEGACY=1 /D_CRT_SECURE_NO_DEPRECATE /D__TBB_BUILD=1 ${platform_macro} /I${tbb_src_dir}\\src /I${tbb_src_dir}\\include > "${tbb_src_dir}\\src\\tbb\\tbb.def" + WORKING_DIRECTORY ${tbb_src_dir}\\src\\tbb + COMMENT "Generating tbb.def file" VERBATIM + ) + + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /DEF:${tbb_src_dir}/src/tbb/tbb.def /DLL /MAP /fixed:no /INCREMENTAL:NO") +else() + target_link_libraries(tbb c m dl) +endif() ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wmissing-declarations) string(REPLACE "-Werror=non-virtual-dtor" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") @@ -182,6 +225,7 @@ set_target_properties(tbb PROPERTIES OUTPUT_NAME tbb DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}" ARCHIVE_OUTPUT_DIRECTORY ${3P_LIBRARY_OUTPUT_PATH} + RUNTIME_OUTPUT_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} ) if(ENABLE_SOLUTION_FOLDERS) diff --git a/android/android.toolchain.cmake b/android/android.toolchain.cmake deleted file mode 100644 index 9db174a138..0000000000 --- a/android/android.toolchain.cmake +++ /dev/null @@ -1,1747 +0,0 @@ -message(STATUS "Android toolchain was moved to platfroms/android!") -message(STATUS "This file is depricated and will be removed!") - -# Copyright (c) 2010-2011, Ethan Rublee -# Copyright (c) 2011-2013, Andrey Kamaev -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# 3. The name of the copyright holders may be used to endorse or promote -# products derived from this software without specific prior written -# permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -# ------------------------------------------------------------------------------ -# Android CMake toolchain file, for use with the Android NDK r5-r8 -# Requires cmake 2.6.3 or newer (2.8.5 or newer is recommended). -# See home page: https://github.com/taka-no-me/android-cmake -# -# The file is mantained by the OpenCV project. The latest version can be get at -# http://code.opencv.org/projects/opencv/repository/revisions/master/changes/android/android.toolchain.cmake -# -# Usage Linux: -# $ export ANDROID_NDK=/absolute/path/to/the/android-ndk -# $ mkdir build && cd build -# $ cmake -DCMAKE_TOOLCHAIN_FILE=path/to/the/android.toolchain.cmake .. -# $ make -j8 -# -# Usage Linux (using standalone toolchain): -# $ export ANDROID_STANDALONE_TOOLCHAIN=/absolute/path/to/android-toolchain -# $ mkdir build && cd build -# $ cmake -DCMAKE_TOOLCHAIN_FILE=path/to/the/android.toolchain.cmake .. -# $ make -j8 -# -# Usage Windows: -# You need native port of make to build your project. -# Android NDK r7 (or newer) already has make.exe on board. -# For older NDK you have to install it separately. -# For example, this one: http://gnuwin32.sourceforge.net/packages/make.htm -# -# $ SET ANDROID_NDK=C:\absolute\path\to\the\android-ndk -# $ mkdir build && cd build -# $ cmake.exe -G"MinGW Makefiles" -# -DCMAKE_TOOLCHAIN_FILE=path\to\the\android.toolchain.cmake -# -DCMAKE_MAKE_PROGRAM="%ANDROID_NDK%\prebuilt\windows\bin\make.exe" .. -# $ cmake.exe --build . -# -# -# Options (can be set as cmake parameters: -D=): -# ANDROID_NDK=/opt/android-ndk - path to the NDK root. -# Can be set as environment variable. Can be set only at first cmake run. -# -# ANDROID_STANDALONE_TOOLCHAIN=/opt/android-toolchain - path to the -# standalone toolchain. This option is not used if full NDK is found -# (ignored if ANDROID_NDK is set). -# Can be set as environment variable. Can be set only at first cmake run. -# -# ANDROID_ABI=armeabi-v7a - specifies the target Application Binary -# Interface (ABI). This option nearly matches to the APP_ABI variable -# used by ndk-build tool from Android NDK. -# -# Possible targets are: -# "armeabi" - matches to the NDK ABI with the same name. -# See ${ANDROID_NDK}/docs/CPU-ARCH-ABIS.html for the documentation. -# "armeabi-v7a" - matches to the NDK ABI with the same name. -# See ${ANDROID_NDK}/docs/CPU-ARCH-ABIS.html for the documentation. -# "armeabi-v7a with NEON" - same as armeabi-v7a, but -# sets NEON as floating-point unit -# "armeabi-v7a with VFPV3" - same as armeabi-v7a, but -# sets VFPV3 as floating-point unit (has 32 registers instead of 16). -# "armeabi-v6 with VFP" - tuned for ARMv6 processors having VFP. -# "x86" - matches to the NDK ABI with the same name. -# See ${ANDROID_NDK}/docs/CPU-ARCH-ABIS.html for the documentation. -# "mips" - matches to the NDK ABI with the same name -# (It is not tested on real devices by the authos of this toolchain) -# See ${ANDROID_NDK}/docs/CPU-ARCH-ABIS.html for the documentation. -# -# ANDROID_NATIVE_API_LEVEL=android-8 - level of Android API compile for. -# Option is read-only when standalone toolchain is used. -# -# ANDROID_TOOLCHAIN_NAME=arm-linux-androideabi-4.6 - the name of compiler -# toolchain to be used. The list of possible values depends on the NDK -# version. For NDK r8c the possible values are: -# -# * arm-linux-androideabi-4.4.3 -# * arm-linux-androideabi-4.6 -# * arm-linux-androideabi-clang3.1 -# * mipsel-linux-android-4.4.3 -# * mipsel-linux-android-4.6 -# * mipsel-linux-android-clang3.1 -# * x86-4.4.3 -# * x86-4.6 -# * x86-clang3.1 -# -# ANDROID_FORCE_ARM_BUILD=OFF - set ON to generate 32-bit ARM instructions -# instead of Thumb. Is not available for "x86" (inapplicable) and -# "armeabi-v6 with VFP" (is forced to be ON) ABIs. -# -# ANDROID_NO_UNDEFINED=ON - set ON to show all undefined symbols as linker -# errors even if they are not used. -# -# ANDROID_SO_UNDEFINED=OFF - set ON to allow undefined symbols in shared -# libraries. Automatically turned for NDK r5x and r6x due to GLESv2 -# problems. -# -# LIBRARY_OUTPUT_PATH_ROOT=${CMAKE_SOURCE_DIR} - where to output binary -# files. See additional details below. -# -# ANDROID_SET_OBSOLETE_VARIABLES=ON - if set, then toolchain defines some -# obsolete variables which were used by previous versions of this file for -# backward compatibility. -# -# ANDROID_STL=gnustl_static - specify the runtime to use. -# -# Possible values are: -# none -> Do not configure the runtime. -# system -> Use the default minimal system C++ runtime library. -# Implies -fno-rtti -fno-exceptions. -# Is not available for standalone toolchain. -# system_re -> Use the default minimal system C++ runtime library. -# Implies -frtti -fexceptions. -# Is not available for standalone toolchain. -# gabi++_static -> Use the GAbi++ runtime as a static library. -# Implies -frtti -fno-exceptions. -# Available for NDK r7 and newer. -# Is not available for standalone toolchain. -# gabi++_shared -> Use the GAbi++ runtime as a shared library. -# Implies -frtti -fno-exceptions. -# Available for NDK r7 and newer. -# Is not available for standalone toolchain. -# stlport_static -> Use the STLport runtime as a static library. -# Implies -fno-rtti -fno-exceptions for NDK before r7. -# Implies -frtti -fno-exceptions for NDK r7 and newer. -# Is not available for standalone toolchain. -# stlport_shared -> Use the STLport runtime as a shared library. -# Implies -fno-rtti -fno-exceptions for NDK before r7. -# Implies -frtti -fno-exceptions for NDK r7 and newer. -# Is not available for standalone toolchain. -# gnustl_static -> Use the GNU STL as a static library. -# Implies -frtti -fexceptions. -# gnustl_shared -> Use the GNU STL as a shared library. -# Implies -frtti -fno-exceptions. -# Available for NDK r7b and newer. -# Silently degrades to gnustl_static if not available. -# -# ANDROID_STL_FORCE_FEATURES=ON - turn rtti and exceptions support based on -# chosen runtime. If disabled, then the user is responsible for settings -# these options. -# -# What?: -# android-cmake toolchain searches for NDK/toolchain in the following order: -# ANDROID_NDK - cmake parameter -# ANDROID_NDK - environment variable -# ANDROID_STANDALONE_TOOLCHAIN - cmake parameter -# ANDROID_STANDALONE_TOOLCHAIN - environment variable -# ANDROID_NDK - default locations -# ANDROID_STANDALONE_TOOLCHAIN - default locations -# -# Make sure to do the following in your scripts: -# SET( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${my_cxx_flags}" ) -# SET( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${my_cxx_flags}" ) -# The flags will be prepopulated with critical flags, so don't loose them. -# Also be aware that toolchain also sets configuration-specific compiler -# flags and linker flags. -# -# ANDROID and BUILD_ANDROID will be set to true, you may test any of these -# variables to make necessary Android-specific configuration changes. -# -# Also ARMEABI or ARMEABI_V7A or X86 or MIPS will be set true, mutually -# exclusive. NEON option will be set true if VFP is set to NEON. -# -# LIBRARY_OUTPUT_PATH_ROOT should be set in cache to determine where Android -# libraries will be installed. -# Default is ${CMAKE_SOURCE_DIR}, and the android libs will always be -# under the ${LIBRARY_OUTPUT_PATH_ROOT}/libs/${ANDROID_NDK_ABI_NAME} -# (depending on the target ABI). This is convenient for Android packaging. -# -# Change Log: -# - initial version December 2010 -# - April 2011 -# [+] added possibility to build with NDK (without standalone toolchain) -# [+] support cross-compilation on Windows (native, no cygwin support) -# [+] added compiler option to force "char" type to be signed -# [+] added toolchain option to compile to 32-bit ARM instructions -# [+] added toolchain option to disable SWIG search -# [+] added platform "armeabi-v7a with VFPV3" -# [~] ARM_TARGETS renamed to ARM_TARGET -# [+] EXECUTABLE_OUTPUT_PATH is set by toolchain (required on Windows) -# [~] Fixed bug with ANDROID_API_LEVEL variable -# [~] turn off SWIG search if it is not found first time -# - May 2011 -# [~] ANDROID_LEVEL is renamed to ANDROID_API_LEVEL -# [+] ANDROID_API_LEVEL is detected by toolchain if not specified -# [~] added guard to prevent changing of output directories on the first -# cmake pass -# [~] toolchain exits with error if ARM_TARGET is not recognized -# - June 2011 -# [~] default NDK path is updated for version r5c -# [+] variable CMAKE_SYSTEM_PROCESSOR is set based on ARM_TARGET -# [~] toolchain install directory is added to linker paths -# [-] removed SWIG-related stuff from toolchain -# [+] added macro find_host_package, find_host_program to search -# packages/programs on the host system -# [~] fixed path to STL library -# - July 2011 -# [~] fixed options caching -# [~] search for all supported NDK versions -# [~] allowed spaces in NDK path -# - September 2011 -# [~] updated for NDK r6b -# - November 2011 -# [*] rewritten for NDK r7 -# [+] x86 toolchain support (experimental) -# [+] added "armeabi-v6 with VFP" ABI for ARMv6 processors. -# [~] improved compiler and linker flags management -# [+] support different build flags for Release and Debug configurations -# [~] by default compiler flags the same as used by ndk-build (but only -# where reasonable) -# [~] ANDROID_NDK_TOOLCHAIN_ROOT is splitted to ANDROID_STANDALONE_TOOLCHAIN -# and ANDROID_TOOLCHAIN_ROOT -# [~] ARM_TARGET is renamed to ANDROID_ABI -# [~] ARMEABI_NDK_NAME is renamed to ANDROID_NDK_ABI_NAME -# [~] ANDROID_API_LEVEL is renamed to ANDROID_NATIVE_API_LEVEL -# - January 2012 -# [+] added stlport_static support (experimental) -# [+] added special check for cygwin -# [+] filtered out hidden files (starting with .) while globbing inside NDK -# [+] automatically applied GLESv2 linkage fix for NDK revisions 5-6 -# [+] added ANDROID_GET_ABI_RAWNAME to get NDK ABI names by CMake flags -# - February 2012 -# [+] updated for NDK r7b -# [~] fixed cmake try_compile() command -# [~] Fix for missing install_name_tool on OS X -# - March 2012 -# [~] fixed incorrect C compiler flags -# [~] fixed CMAKE_SYSTEM_PROCESSOR change on ANDROID_ABI change -# [+] improved toolchain loading speed -# [+] added assembler language support (.S) -# [+] allowed preset search paths and extra search suffixes -# - April 2012 -# [+] updated for NDK r7c -# [~] fixed most of problems with compiler/linker flags and caching -# [+] added option ANDROID_FUNCTION_LEVEL_LINKING -# - May 2012 -# [+] updated for NDK r8 -# [+] added mips architecture support -# - August 2012 -# [+] updated for NDK r8b -# [~] all intermediate files generated by toolchain are moved to CMakeFiles -# [~] libstdc++ and libsupc are removed from explicit link libraries -# [+] added CCache support (via NDK_CCACHE environment or cmake variable) -# [+] added gold linker support for NDK r8b -# [~] fixed mips linker flags for NDK r8b -# - September 2012 -# [+] added NDK release name detection (see ANDROID_NDK_RELEASE) -# [+] added support for all C++ runtimes from NDK -# (system, gabi++, stlport, gnustl) -# [+] improved warnings on known issues of NDKs -# [~] use gold linker as default if available (NDK r8b) -# [~] globally turned off rpath -# [~] compiler options are aligned with NDK r8b -# - October 2012 -# [~] fixed C++ linking: explicitly link with math library (OpenCV #2426) -# - November 2012 -# [+] updated for NDK r8c -# [+] added support for clang compiler -# - December 2012 -# [+] suppress warning about unused CMAKE_TOOLCHAIN_FILE variable -# [+] adjust API level to closest compatible as NDK does -# [~] fixed ccache full path search -# [+] updated for NDK r8d -# [~] compiler options are aligned with NDK r8d -# - March 2013 -# [+] updated for NDK r8e (x86 version) -# [+] support x86_64 version of NDK -# - April 2013 -# [+] support non-release NDK layouts (from Linaro git and Android git) -# [~] automatically detect if explicit link to crtbegin_*.o is needed -# ------------------------------------------------------------------------------ - -cmake_minimum_required( VERSION 2.6.3 ) - -if( DEFINED CMAKE_CROSSCOMPILING ) - # subsequent toolchain loading is not really needed - return() -endif() - -if( CMAKE_TOOLCHAIN_FILE ) - # touch toolchain variable only to suppress "unused variable" warning -endif() - -get_property( _CMAKE_IN_TRY_COMPILE GLOBAL PROPERTY IN_TRY_COMPILE ) -if( _CMAKE_IN_TRY_COMPILE ) - include( "${CMAKE_CURRENT_SOURCE_DIR}/../android.toolchain.config.cmake" OPTIONAL ) -endif() - -# this one is important -set( CMAKE_SYSTEM_NAME Linux ) -# this one not so much -set( CMAKE_SYSTEM_VERSION 1 ) - -# rpath makes low sence for Android -set( CMAKE_SKIP_RPATH TRUE CACHE BOOL "If set, runtime paths are not added when using shared libraries." ) - -set( ANDROID_SUPPORTED_NDK_VERSIONS ${ANDROID_EXTRA_NDK_VERSIONS} -r8e -r8d -r8c -r8b -r8 -r7c -r7b -r7 -r6b -r6 -r5c -r5b -r5 "" ) -if(NOT DEFINED ANDROID_NDK_SEARCH_PATHS) - if( CMAKE_HOST_WIN32 ) - file( TO_CMAKE_PATH "$ENV{PROGRAMFILES}" ANDROID_NDK_SEARCH_PATHS ) - set( ANDROID_NDK_SEARCH_PATHS "${ANDROID_NDK_SEARCH_PATHS}/android-ndk" "$ENV{SystemDrive}/NVPACK/android-ndk" ) - else() - file( TO_CMAKE_PATH "$ENV{HOME}" ANDROID_NDK_SEARCH_PATHS ) - set( ANDROID_NDK_SEARCH_PATHS /opt/android-ndk "${ANDROID_NDK_SEARCH_PATHS}/NVPACK/android-ndk" ) - endif() -endif() -if(NOT DEFINED ANDROID_STANDALONE_TOOLCHAIN_SEARCH_PATH) - set( ANDROID_STANDALONE_TOOLCHAIN_SEARCH_PATH /opt/android-toolchain ) -endif() - -set( ANDROID_SUPPORTED_ABIS_arm "armeabi-v7a;armeabi;armeabi-v7a with NEON;armeabi-v7a with VFPV3;armeabi-v6 with VFP" ) -set( ANDROID_SUPPORTED_ABIS_x86 "x86" ) -set( ANDROID_SUPPORTED_ABIS_mipsel "mips" ) - -set( ANDROID_DEFAULT_NDK_API_LEVEL 8 ) -set( ANDROID_DEFAULT_NDK_API_LEVEL_x86 9 ) -set( ANDROID_DEFAULT_NDK_API_LEVEL_mips 9 ) - - -macro( __LIST_FILTER listvar regex ) - if( ${listvar} ) - foreach( __val ${${listvar}} ) - if( __val MATCHES "${regex}" ) - list( REMOVE_ITEM ${listvar} "${__val}" ) - endif() - endforeach() - endif() -endmacro() - -macro( __INIT_VARIABLE var_name ) - set( __test_path 0 ) - foreach( __var ${ARGN} ) - if( __var STREQUAL "PATH" ) - set( __test_path 1 ) - break() - endif() - endforeach() - if( __test_path AND NOT EXISTS "${${var_name}}" ) - unset( ${var_name} CACHE ) - endif() - if( "${${var_name}}" STREQUAL "" ) - set( __values 0 ) - foreach( __var ${ARGN} ) - if( __var STREQUAL "VALUES" ) - set( __values 1 ) - elseif( NOT __var STREQUAL "PATH" ) - set( __obsolete 0 ) - if( __var MATCHES "^OBSOLETE_.*$" ) - string( REPLACE "OBSOLETE_" "" __var "${__var}" ) - set( __obsolete 1 ) - endif() - if( __var MATCHES "^ENV_.*$" ) - string( REPLACE "ENV_" "" __var "${__var}" ) - set( __value "$ENV{${__var}}" ) - elseif( DEFINED ${__var} ) - set( __value "${${__var}}" ) - else() - if( __values ) - set( __value "${__var}" ) - else() - set( __value "" ) - endif() - endif() - if( NOT "${__value}" STREQUAL "" ) - if( __test_path ) - if( EXISTS "${__value}" ) - file( TO_CMAKE_PATH "${__value}" ${var_name} ) - if( __obsolete AND NOT _CMAKE_IN_TRY_COMPILE ) - message( WARNING "Using value of obsolete variable ${__var} as initial value for ${var_name}. Please note, that ${__var} can be completely removed in future versions of the toolchain." ) - endif() - break() - endif() - else() - set( ${var_name} "${__value}" ) - if( __obsolete AND NOT _CMAKE_IN_TRY_COMPILE ) - message( WARNING "Using value of obsolete variable ${__var} as initial value for ${var_name}. Please note, that ${__var} can be completely removed in future versions of the toolchain." ) - endif() - break() - endif() - endif() - endif() - endforeach() - unset( __value ) - unset( __values ) - unset( __obsolete ) - elseif( __test_path ) - file( TO_CMAKE_PATH "${${var_name}}" ${var_name} ) - endif() - unset( __test_path ) -endmacro() - -macro( __DETECT_NATIVE_API_LEVEL _var _path ) - SET( __ndkApiLevelRegex "^[\t ]*#define[\t ]+__ANDROID_API__[\t ]+([0-9]+)[\t ]*$" ) - FILE( STRINGS ${_path} __apiFileContent REGEX "${__ndkApiLevelRegex}" ) - if( NOT __apiFileContent ) - message( SEND_ERROR "Could not get Android native API level. Probably you have specified invalid level value, or your copy of NDK/toolchain is broken." ) - endif() - string( REGEX REPLACE "${__ndkApiLevelRegex}" "\\1" ${_var} "${__apiFileContent}" ) - unset( __apiFileContent ) - unset( __ndkApiLevelRegex ) -endmacro() - -macro( __DETECT_TOOLCHAIN_MACHINE_NAME _var _root ) - if( EXISTS "${_root}" ) - file( GLOB __gccExePath RELATIVE "${_root}/bin/" "${_root}/bin/*-gcc${TOOL_OS_SUFFIX}" ) - __LIST_FILTER( __gccExePath "^[.].*" ) - list( LENGTH __gccExePath __gccExePathsCount ) - if( NOT __gccExePathsCount EQUAL 1 AND NOT _CMAKE_IN_TRY_COMPILE ) - message( WARNING "Could not determine machine name for compiler from ${_root}" ) - set( ${_var} "" ) - else() - get_filename_component( __gccExeName "${__gccExePath}" NAME_WE ) - string( REPLACE "-gcc" "" ${_var} "${__gccExeName}" ) - endif() - unset( __gccExePath ) - unset( __gccExePathsCount ) - unset( __gccExeName ) - else() - set( ${_var} "" ) - endif() -endmacro() - - -# fight against cygwin -set( ANDROID_FORBID_SYGWIN TRUE CACHE BOOL "Prevent cmake from working under cygwin and using cygwin tools") -mark_as_advanced( ANDROID_FORBID_SYGWIN ) -if( ANDROID_FORBID_SYGWIN ) - if( CYGWIN ) - message( FATAL_ERROR "Android NDK and android-cmake toolchain are not welcome Cygwin. It is unlikely that this cmake toolchain will work under cygwin. But if you want to try then you can set cmake variable ANDROID_FORBID_SYGWIN to FALSE and rerun cmake." ) - endif() - - if( CMAKE_HOST_WIN32 ) - # remove cygwin from PATH - set( __new_path "$ENV{PATH}") - __LIST_FILTER( __new_path "cygwin" ) - set(ENV{PATH} "${__new_path}") - unset(__new_path) - endif() -endif() - - -# detect current host platform -if( NOT DEFINED ANDROID_NDK_HOST_X64 AND CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "amd64|x86_64|AMD64") - set( ANDROID_NDK_HOST_X64 1 CACHE BOOL "Try to use 64-bit compiler toolchain" ) - mark_as_advanced( ANDROID_NDK_HOST_X64 ) -endif() - -set( TOOL_OS_SUFFIX "" ) -if( CMAKE_HOST_APPLE ) - set( ANDROID_NDK_HOST_SYSTEM_NAME "darwin-x86_64" ) - set( ANDROID_NDK_HOST_SYSTEM_NAME2 "darwin-x86" ) -elseif( CMAKE_HOST_WIN32 ) - set( ANDROID_NDK_HOST_SYSTEM_NAME "windows-x86_64" ) - set( ANDROID_NDK_HOST_SYSTEM_NAME2 "windows" ) - set( TOOL_OS_SUFFIX ".exe" ) -elseif( CMAKE_HOST_UNIX ) - set( ANDROID_NDK_HOST_SYSTEM_NAME "linux-x86_64" ) - set( ANDROID_NDK_HOST_SYSTEM_NAME2 "linux-x86" ) -else() - message( FATAL_ERROR "Cross-compilation on your platform is not supported by this cmake toolchain" ) -endif() - -if( NOT ANDROID_NDK_HOST_X64 ) - set( ANDROID_NDK_HOST_SYSTEM_NAME ${ANDROID_NDK_HOST_SYSTEM_NAME2} ) -endif() - -# see if we have path to Android NDK -__INIT_VARIABLE( ANDROID_NDK PATH ENV_ANDROID_NDK ) -if( NOT ANDROID_NDK ) - # see if we have path to Android standalone toolchain - __INIT_VARIABLE( ANDROID_STANDALONE_TOOLCHAIN PATH ENV_ANDROID_STANDALONE_TOOLCHAIN OBSOLETE_ANDROID_NDK_TOOLCHAIN_ROOT OBSOLETE_ENV_ANDROID_NDK_TOOLCHAIN_ROOT ) - - if( NOT ANDROID_STANDALONE_TOOLCHAIN ) - #try to find Android NDK in one of the the default locations - set( __ndkSearchPaths ) - foreach( __ndkSearchPath ${ANDROID_NDK_SEARCH_PATHS} ) - foreach( suffix ${ANDROID_SUPPORTED_NDK_VERSIONS} ) - list( APPEND __ndkSearchPaths "${__ndkSearchPath}${suffix}" ) - endforeach() - endforeach() - __INIT_VARIABLE( ANDROID_NDK PATH VALUES ${__ndkSearchPaths} ) - unset( __ndkSearchPaths ) - - if( ANDROID_NDK ) - message( STATUS "Using default path for Android NDK: ${ANDROID_NDK}" ) - message( STATUS " If you prefer to use a different location, please define a cmake or environment variable: ANDROID_NDK" ) - else() - #try to find Android standalone toolchain in one of the the default locations - __INIT_VARIABLE( ANDROID_STANDALONE_TOOLCHAIN PATH ANDROID_STANDALONE_TOOLCHAIN_SEARCH_PATH ) - - if( ANDROID_STANDALONE_TOOLCHAIN ) - message( STATUS "Using default path for standalone toolchain ${ANDROID_STANDALONE_TOOLCHAIN}" ) - message( STATUS " If you prefer to use a different location, please define the variable: ANDROID_STANDALONE_TOOLCHAIN" ) - endif( ANDROID_STANDALONE_TOOLCHAIN ) - endif( ANDROID_NDK ) - endif( NOT ANDROID_STANDALONE_TOOLCHAIN ) -endif( NOT ANDROID_NDK ) - -# remember found paths -if( ANDROID_NDK ) - get_filename_component( ANDROID_NDK "${ANDROID_NDK}" ABSOLUTE ) - set( ANDROID_NDK "${ANDROID_NDK}" CACHE INTERNAL "Path of the Android NDK" FORCE ) - set( BUILD_WITH_ANDROID_NDK True ) - if( EXISTS "${ANDROID_NDK}/RELEASE.TXT" ) - file( STRINGS "${ANDROID_NDK}/RELEASE.TXT" ANDROID_NDK_RELEASE_FULL LIMIT_COUNT 1 REGEX r[0-9]+[a-z]? ) - string( REGEX MATCH r[0-9]+[a-z]? ANDROID_NDK_RELEASE "${ANDROID_NDK_RELEASE_FULL}" ) - else() - set( ANDROID_NDK_RELEASE "r1x" ) - set( ANDROID_NDK_RELEASE_FULL "unreleased" ) - endif() -elseif( ANDROID_STANDALONE_TOOLCHAIN ) - get_filename_component( ANDROID_STANDALONE_TOOLCHAIN "${ANDROID_STANDALONE_TOOLCHAIN}" ABSOLUTE ) - # try to detect change - if( CMAKE_AR ) - string( LENGTH "${ANDROID_STANDALONE_TOOLCHAIN}" __length ) - string( SUBSTRING "${CMAKE_AR}" 0 ${__length} __androidStandaloneToolchainPreviousPath ) - if( NOT __androidStandaloneToolchainPreviousPath STREQUAL ANDROID_STANDALONE_TOOLCHAIN ) - message( FATAL_ERROR "It is not possible to change path to the Android standalone toolchain on subsequent run." ) - endif() - unset( __androidStandaloneToolchainPreviousPath ) - unset( __length ) - endif() - set( ANDROID_STANDALONE_TOOLCHAIN "${ANDROID_STANDALONE_TOOLCHAIN}" CACHE INTERNAL "Path of the Android standalone toolchain" FORCE ) - set( BUILD_WITH_STANDALONE_TOOLCHAIN True ) -else() - list(GET ANDROID_NDK_SEARCH_PATHS 0 ANDROID_NDK_SEARCH_PATH) - message( FATAL_ERROR "Could not find neither Android NDK nor Android standalone toolchain. - You should either set an environment variable: - export ANDROID_NDK=~/my-android-ndk - or - export ANDROID_STANDALONE_TOOLCHAIN=~/my-android-toolchain - or put the toolchain or NDK in the default path: - sudo ln -s ~/my-android-ndk ${ANDROID_NDK_SEARCH_PATH} - sudo ln -s ~/my-android-toolchain ${ANDROID_STANDALONE_TOOLCHAIN_SEARCH_PATH}" ) -endif() - -# android NDK layout -if( BUILD_WITH_ANDROID_NDK ) - if( NOT DEFINED ANDROID_NDK_LAYOUT ) - # try to automatically detect the layout - if( EXISTS "${ANDROID_NDK}/RELEASE.TXT") - set( ANDROID_NDK_LAYOUT "RELEASE" ) - elseif( EXISTS "${ANDROID_NDK}/../../linux-x86/toolchain/" ) - set( ANDROID_NDK_LAYOUT "LINARO" ) - elseif( EXISTS "${ANDROID_NDK}/../../gcc/" ) - set( ANDROID_NDK_LAYOUT "ANDROID" ) - endif() - endif() - set( ANDROID_NDK_LAYOUT "${ANDROID_NDK_LAYOUT}" CACHE STRING "The inner layout of NDK" ) - mark_as_advanced( ANDROID_NDK_LAYOUT ) - if( ANDROID_NDK_LAYOUT STREQUAL "LINARO" ) - set( ANDROID_NDK_HOST_SYSTEM_NAME ${ANDROID_NDK_HOST_SYSTEM_NAME2} ) # only 32-bit at the moment - set( ANDROID_NDK_TOOLCHAINS_PATH "${ANDROID_NDK}/../../${ANDROID_NDK_HOST_SYSTEM_NAME}/toolchain" ) - set( ANDROID_NDK_TOOLCHAINS_SUBPATH "" ) - set( ANDROID_NDK_TOOLCHAINS_SUBPATH2 "" ) - elseif( ANDROID_NDK_LAYOUT STREQUAL "ANDROID" ) - set( ANDROID_NDK_HOST_SYSTEM_NAME ${ANDROID_NDK_HOST_SYSTEM_NAME2} ) # only 32-bit at the moment - set( ANDROID_NDK_TOOLCHAINS_PATH "${ANDROID_NDK}/../../gcc/${ANDROID_NDK_HOST_SYSTEM_NAME}/arm" ) - set( ANDROID_NDK_TOOLCHAINS_SUBPATH "" ) - set( ANDROID_NDK_TOOLCHAINS_SUBPATH2 "" ) - else() # ANDROID_NDK_LAYOUT STREQUAL "RELEASE" - set( ANDROID_NDK_TOOLCHAINS_PATH "${ANDROID_NDK}/toolchains" ) - set( ANDROID_NDK_TOOLCHAINS_SUBPATH "/prebuilt/${ANDROID_NDK_HOST_SYSTEM_NAME}" ) - set( ANDROID_NDK_TOOLCHAINS_SUBPATH2 "/prebuilt/${ANDROID_NDK_HOST_SYSTEM_NAME2}" ) - endif() - get_filename_component( ANDROID_NDK_TOOLCHAINS_PATH "${ANDROID_NDK_TOOLCHAINS_PATH}" ABSOLUTE ) - - # try to detect change of NDK - if( CMAKE_AR ) - string( LENGTH "${ANDROID_NDK_TOOLCHAINS_PATH}" __length ) - string( SUBSTRING "${CMAKE_AR}" 0 ${__length} __androidNdkPreviousPath ) - if( NOT __androidNdkPreviousPath STREQUAL ANDROID_NDK_TOOLCHAINS_PATH ) - message( FATAL_ERROR "It is not possible to change the path to the NDK on subsequent CMake run. You must remove all generated files from your build folder first. - " ) - endif() - unset( __androidNdkPreviousPath ) - unset( __length ) - endif() -endif() - - -# get all the details about standalone toolchain -if( BUILD_WITH_STANDALONE_TOOLCHAIN ) - __DETECT_NATIVE_API_LEVEL( ANDROID_SUPPORTED_NATIVE_API_LEVELS "${ANDROID_STANDALONE_TOOLCHAIN}/sysroot/usr/include/android/api-level.h" ) - set( ANDROID_STANDALONE_TOOLCHAIN_API_LEVEL ${ANDROID_SUPPORTED_NATIVE_API_LEVELS} ) - set( __availableToolchains "standalone" ) - __DETECT_TOOLCHAIN_MACHINE_NAME( __availableToolchainMachines "${ANDROID_STANDALONE_TOOLCHAIN}" ) - if( NOT __availableToolchainMachines ) - message( FATAL_ERROR "Could not determine machine name of your toolchain. Probably your Android standalone toolchain is broken." ) - endif() - if( __availableToolchainMachines MATCHES i686 ) - set( __availableToolchainArchs "x86" ) - elseif( __availableToolchainMachines MATCHES arm ) - set( __availableToolchainArchs "arm" ) - elseif( __availableToolchainMachines MATCHES mipsel ) - set( __availableToolchainArchs "mipsel" ) - endif() - execute_process( COMMAND "${ANDROID_STANDALONE_TOOLCHAIN}/bin/${__availableToolchainMachines}-gcc${TOOL_OS_SUFFIX}" -dumpversion - OUTPUT_VARIABLE __availableToolchainCompilerVersions OUTPUT_STRIP_TRAILING_WHITESPACE ) - string( REGEX MATCH "[0-9]+[.][0-9]+([.][0-9]+)?" __availableToolchainCompilerVersions "${__availableToolchainCompilerVersions}" ) - if( EXISTS "${ANDROID_STANDALONE_TOOLCHAIN}/bin/clang${TOOL_OS_SUFFIX}" ) - list( APPEND __availableToolchains "standalone-clang" ) - list( APPEND __availableToolchainMachines ${__availableToolchainMachines} ) - list( APPEND __availableToolchainArchs ${__availableToolchainArchs} ) - list( APPEND __availableToolchainCompilerVersions ${__availableToolchainCompilerVersions} ) - endif() -endif() - -macro( __GLOB_NDK_TOOLCHAINS __availableToolchainsVar __availableToolchainsLst __toolchain_subpath ) - foreach( __toolchain ${${__availableToolchainsLst}} ) - if( "${__toolchain}" MATCHES "-clang3[.][0-9]$" AND NOT EXISTS "${ANDROID_NDK_TOOLCHAINS_PATH}/${__toolchain}${__toolchain_subpath}" ) - string( REGEX REPLACE "-clang3[.][0-9]$" "-4.6" __gcc_toolchain "${__toolchain}" ) - else() - set( __gcc_toolchain "${__toolchain}" ) - endif() - __DETECT_TOOLCHAIN_MACHINE_NAME( __machine "${ANDROID_NDK_TOOLCHAINS_PATH}/${__gcc_toolchain}${__toolchain_subpath}" ) - if( __machine ) - string( REGEX MATCH "[0-9]+[.][0-9]+([.][0-9x]+)?$" __version "${__gcc_toolchain}" ) - if( __machine MATCHES i686 ) - set( __arch "x86" ) - elseif( __machine MATCHES arm ) - set( __arch "arm" ) - elseif( __machine MATCHES mipsel ) - set( __arch "mipsel" ) - endif() - list( APPEND __availableToolchainMachines "${__machine}" ) - list( APPEND __availableToolchainArchs "${__arch}" ) - list( APPEND __availableToolchainCompilerVersions "${__version}" ) - list( APPEND ${__availableToolchainsVar} "${__toolchain}" ) - endif() - unset( __gcc_toolchain ) - endforeach() -endmacro() - -# get all the details about NDK -if( BUILD_WITH_ANDROID_NDK ) - file( GLOB ANDROID_SUPPORTED_NATIVE_API_LEVELS RELATIVE "${ANDROID_NDK}/platforms" "${ANDROID_NDK}/platforms/android-*" ) - string( REPLACE "android-" "" ANDROID_SUPPORTED_NATIVE_API_LEVELS "${ANDROID_SUPPORTED_NATIVE_API_LEVELS}" ) - set( __availableToolchains "" ) - set( __availableToolchainMachines "" ) - set( __availableToolchainArchs "" ) - set( __availableToolchainCompilerVersions "" ) - if( ANDROID_TOOLCHAIN_NAME AND EXISTS "${ANDROID_NDK_TOOLCHAINS_PATH}/${ANDROID_TOOLCHAIN_NAME}/" ) - # do not go through all toolchains if we know the name - set( __availableToolchainsLst "${ANDROID_TOOLCHAIN_NAME}" ) - __GLOB_NDK_TOOLCHAINS( __availableToolchains __availableToolchainsLst "${ANDROID_NDK_TOOLCHAINS_SUBPATH}" ) - if( NOT __availableToolchains AND NOT ANDROID_NDK_TOOLCHAINS_SUBPATH STREQUAL ANDROID_NDK_TOOLCHAINS_SUBPATH2 ) - __GLOB_NDK_TOOLCHAINS( __availableToolchains __availableToolchainsLst "${ANDROID_NDK_TOOLCHAINS_SUBPATH2}" ) - if( __availableToolchains ) - set( ANDROID_NDK_TOOLCHAINS_SUBPATH ${ANDROID_NDK_TOOLCHAINS_SUBPATH2} ) - endif() - endif() - endif() - if( NOT __availableToolchains ) - file( GLOB __availableToolchainsLst RELATIVE "${ANDROID_NDK_TOOLCHAINS_PATH}" "${ANDROID_NDK_TOOLCHAINS_PATH}/*" ) - if( __availableToolchains ) - list(SORT __availableToolchainsLst) # we need clang to go after gcc - endif() - __LIST_FILTER( __availableToolchainsLst "^[.]" ) - __LIST_FILTER( __availableToolchainsLst "llvm" ) - __GLOB_NDK_TOOLCHAINS( __availableToolchains __availableToolchainsLst "${ANDROID_NDK_TOOLCHAINS_SUBPATH}" ) - if( NOT __availableToolchains AND NOT ANDROID_NDK_TOOLCHAINS_SUBPATH STREQUAL ANDROID_NDK_TOOLCHAINS_SUBPATH2 ) - __GLOB_NDK_TOOLCHAINS( __availableToolchains __availableToolchainsLst "${ANDROID_NDK_TOOLCHAINS_SUBPATH2}" ) - if( __availableToolchains ) - set( ANDROID_NDK_TOOLCHAINS_SUBPATH ${ANDROID_NDK_TOOLCHAINS_SUBPATH2} ) - endif() - endif() - endif() - if( NOT __availableToolchains ) - message( FATAL_ERROR "Could not find any working toolchain in the NDK. Probably your Android NDK is broken." ) - endif() -endif() - -# build list of available ABIs -set( ANDROID_SUPPORTED_ABIS "" ) -set( __uniqToolchainArchNames ${__availableToolchainArchs} ) -list( REMOVE_DUPLICATES __uniqToolchainArchNames ) -list( SORT __uniqToolchainArchNames ) -foreach( __arch ${__uniqToolchainArchNames} ) - list( APPEND ANDROID_SUPPORTED_ABIS ${ANDROID_SUPPORTED_ABIS_${__arch}} ) -endforeach() -unset( __uniqToolchainArchNames ) -if( NOT ANDROID_SUPPORTED_ABIS ) - message( FATAL_ERROR "No one of known Android ABIs is supported by this cmake toolchain." ) -endif() - -# choose target ABI -__INIT_VARIABLE( ANDROID_ABI OBSOLETE_ARM_TARGET OBSOLETE_ARM_TARGETS VALUES ${ANDROID_SUPPORTED_ABIS} ) -# verify that target ABI is supported -list( FIND ANDROID_SUPPORTED_ABIS "${ANDROID_ABI}" __androidAbiIdx ) -if( __androidAbiIdx EQUAL -1 ) - string( REPLACE ";" "\", \"", PRINTABLE_ANDROID_SUPPORTED_ABIS "${ANDROID_SUPPORTED_ABIS}" ) - message( FATAL_ERROR "Specified ANDROID_ABI = \"${ANDROID_ABI}\" is not supported by this cmake toolchain or your NDK/toolchain. - Supported values are: \"${PRINTABLE_ANDROID_SUPPORTED_ABIS}\" - " ) -endif() -unset( __androidAbiIdx ) - -# set target ABI options -if( ANDROID_ABI STREQUAL "x86" ) - set( X86 true ) - set( ANDROID_NDK_ABI_NAME "x86" ) - set( ANDROID_ARCH_NAME "x86" ) - set( ANDROID_ARCH_FULLNAME "x86" ) - set( ANDROID_LLVM_TRIPLE "i686-none-linux-android" ) - set( CMAKE_SYSTEM_PROCESSOR "i686" ) -elseif( ANDROID_ABI STREQUAL "mips" ) - set( MIPS true ) - set( ANDROID_NDK_ABI_NAME "mips" ) - set( ANDROID_ARCH_NAME "mips" ) - set( ANDROID_ARCH_FULLNAME "mipsel" ) - set( ANDROID_LLVM_TRIPLE "mipsel-none-linux-android" ) - set( CMAKE_SYSTEM_PROCESSOR "mips" ) -elseif( ANDROID_ABI STREQUAL "armeabi" ) - set( ARMEABI true ) - set( ANDROID_NDK_ABI_NAME "armeabi" ) - set( ANDROID_ARCH_NAME "arm" ) - set( ANDROID_ARCH_FULLNAME "arm" ) - set( ANDROID_LLVM_TRIPLE "armv5te-none-linux-androideabi" ) - set( CMAKE_SYSTEM_PROCESSOR "armv5te" ) -elseif( ANDROID_ABI STREQUAL "armeabi-v6 with VFP" ) - set( ARMEABI_V6 true ) - set( ANDROID_NDK_ABI_NAME "armeabi" ) - set( ANDROID_ARCH_NAME "arm" ) - set( ANDROID_ARCH_FULLNAME "arm" ) - set( ANDROID_LLVM_TRIPLE "armv5te-none-linux-androideabi" ) - set( CMAKE_SYSTEM_PROCESSOR "armv6" ) - # need always fallback to older platform - set( ARMEABI true ) -elseif( ANDROID_ABI STREQUAL "armeabi-v7a") - set( ARMEABI_V7A true ) - set( ANDROID_NDK_ABI_NAME "armeabi-v7a" ) - set( ANDROID_ARCH_NAME "arm" ) - set( ANDROID_ARCH_FULLNAME "arm" ) - set( ANDROID_LLVM_TRIPLE "armv7-none-linux-androideabi" ) - set( CMAKE_SYSTEM_PROCESSOR "armv7-a" ) -elseif( ANDROID_ABI STREQUAL "armeabi-v7a with VFPV3" ) - set( ARMEABI_V7A true ) - set( ANDROID_NDK_ABI_NAME "armeabi-v7a" ) - set( ANDROID_ARCH_NAME "arm" ) - set( ANDROID_ARCH_FULLNAME "arm" ) - set( ANDROID_LLVM_TRIPLE "armv7-none-linux-androideabi" ) - set( CMAKE_SYSTEM_PROCESSOR "armv7-a" ) - set( VFPV3 true ) -elseif( ANDROID_ABI STREQUAL "armeabi-v7a with NEON" ) - set( ARMEABI_V7A true ) - set( ANDROID_NDK_ABI_NAME "armeabi-v7a" ) - set( ANDROID_ARCH_NAME "arm" ) - set( ANDROID_ARCH_FULLNAME "arm" ) - set( ANDROID_LLVM_TRIPLE "armv7-none-linux-androideabi" ) - set( CMAKE_SYSTEM_PROCESSOR "armv7-a" ) - set( VFPV3 true ) - set( NEON true ) -else() - message( SEND_ERROR "Unknown ANDROID_ABI=\"${ANDROID_ABI}\" is specified." ) -endif() - -if( CMAKE_BINARY_DIR AND EXISTS "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeSystem.cmake" ) - # really dirty hack - # it is not possible to change CMAKE_SYSTEM_PROCESSOR after the first run... - file( APPEND "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeSystem.cmake" "SET(CMAKE_SYSTEM_PROCESSOR \"${CMAKE_SYSTEM_PROCESSOR}\")\n" ) -endif() - -if( ANDROID_ARCH_NAME STREQUAL "arm" AND NOT ARMEABI_V6 ) - __INIT_VARIABLE( ANDROID_FORCE_ARM_BUILD OBSOLETE_FORCE_ARM VALUES OFF ) - set( ANDROID_FORCE_ARM_BUILD ${ANDROID_FORCE_ARM_BUILD} CACHE BOOL "Use 32-bit ARM instructions instead of Thumb-1" FORCE ) - mark_as_advanced( ANDROID_FORCE_ARM_BUILD ) -else() - unset( ANDROID_FORCE_ARM_BUILD CACHE ) -endif() - -# choose toolchain -if( ANDROID_TOOLCHAIN_NAME ) - list( FIND __availableToolchains "${ANDROID_TOOLCHAIN_NAME}" __toolchainIdx ) - if( __toolchainIdx EQUAL -1 ) - list( SORT __availableToolchains ) - string( REPLACE ";" "\n * " toolchains_list "${__availableToolchains}" ) - set( toolchains_list " * ${toolchains_list}") - message( FATAL_ERROR "Specified toolchain \"${ANDROID_TOOLCHAIN_NAME}\" is missing in your NDK or broken. Please verify that your NDK is working or select another compiler toolchain. -To configure the toolchain set CMake variable ANDROID_TOOLCHAIN_NAME to one of the following values:\n${toolchains_list}\n" ) - endif() - list( GET __availableToolchainArchs ${__toolchainIdx} __toolchainArch ) - if( NOT __toolchainArch STREQUAL ANDROID_ARCH_FULLNAME ) - message( SEND_ERROR "Selected toolchain \"${ANDROID_TOOLCHAIN_NAME}\" is not able to compile binaries for the \"${ANDROID_ARCH_NAME}\" platform." ) - endif() -else() - set( __toolchainIdx -1 ) - set( __applicableToolchains "" ) - set( __toolchainMaxVersion "0.0.0" ) - list( LENGTH __availableToolchains __availableToolchainsCount ) - math( EXPR __availableToolchainsCount "${__availableToolchainsCount}-1" ) - foreach( __idx RANGE ${__availableToolchainsCount} ) - list( GET __availableToolchainArchs ${__idx} __toolchainArch ) - if( __toolchainArch STREQUAL ANDROID_ARCH_FULLNAME ) - list( GET __availableToolchainCompilerVersions ${__idx} __toolchainVersion ) - string( REPLACE "x" "99" __toolchainVersion "${__toolchainVersion}") - if( __toolchainVersion VERSION_GREATER __toolchainMaxVersion ) - set( __toolchainMaxVersion "${__toolchainVersion}" ) - set( __toolchainIdx ${__idx} ) - endif() - endif() - endforeach() - unset( __availableToolchainsCount ) - unset( __toolchainMaxVersion ) - unset( __toolchainVersion ) -endif() -unset( __toolchainArch ) -if( __toolchainIdx EQUAL -1 ) - message( FATAL_ERROR "No one of available compiler toolchains is able to compile for ${ANDROID_ARCH_NAME} platform." ) -endif() -list( GET __availableToolchains ${__toolchainIdx} ANDROID_TOOLCHAIN_NAME ) -list( GET __availableToolchainMachines ${__toolchainIdx} ANDROID_TOOLCHAIN_MACHINE_NAME ) -list( GET __availableToolchainCompilerVersions ${__toolchainIdx} ANDROID_COMPILER_VERSION ) - -unset( __toolchainIdx ) -unset( __availableToolchains ) -unset( __availableToolchainMachines ) -unset( __availableToolchainArchs ) -unset( __availableToolchainCompilerVersions ) - -# choose native API level -__INIT_VARIABLE( ANDROID_NATIVE_API_LEVEL ENV_ANDROID_NATIVE_API_LEVEL ANDROID_API_LEVEL ENV_ANDROID_API_LEVEL ANDROID_STANDALONE_TOOLCHAIN_API_LEVEL ANDROID_DEFAULT_NDK_API_LEVEL_${ANDROID_ARCH_NAME} ANDROID_DEFAULT_NDK_API_LEVEL ) -string( REGEX MATCH "[0-9]+" ANDROID_NATIVE_API_LEVEL "${ANDROID_NATIVE_API_LEVEL}" ) -# adjust API level -set( __real_api_level ${ANDROID_DEFAULT_NDK_API_LEVEL_${ANDROID_ARCH_NAME}} ) -foreach( __level ${ANDROID_SUPPORTED_NATIVE_API_LEVELS} ) - if( NOT __level GREATER ANDROID_NATIVE_API_LEVEL AND NOT __level LESS __real_api_level ) - set( __real_api_level ${__level} ) - endif() -endforeach() -if( __real_api_level AND NOT ANDROID_NATIVE_API_LEVEL EQUAL __real_api_level ) - message( STATUS "Adjusting Android API level 'android-${ANDROID_NATIVE_API_LEVEL}' to 'android-${__real_api_level}'") - set( ANDROID_NATIVE_API_LEVEL ${__real_api_level} ) -endif() -unset(__real_api_level) -# validate -list( FIND ANDROID_SUPPORTED_NATIVE_API_LEVELS "${ANDROID_NATIVE_API_LEVEL}" __levelIdx ) -if( __levelIdx EQUAL -1 ) - message( SEND_ERROR "Specified Android native API level 'android-${ANDROID_NATIVE_API_LEVEL}' is not supported by your NDK/toolchain." ) -else() - if( BUILD_WITH_ANDROID_NDK ) - __DETECT_NATIVE_API_LEVEL( __realApiLevel "${ANDROID_NDK}/platforms/android-${ANDROID_NATIVE_API_LEVEL}/arch-${ANDROID_ARCH_NAME}/usr/include/android/api-level.h" ) - if( NOT __realApiLevel EQUAL ANDROID_NATIVE_API_LEVEL ) - message( SEND_ERROR "Specified Android API level (${ANDROID_NATIVE_API_LEVEL}) does not match to the level found (${__realApiLevel}). Probably your copy of NDK is broken." ) - endif() - unset( __realApiLevel ) - endif() - set( ANDROID_NATIVE_API_LEVEL "${ANDROID_NATIVE_API_LEVEL}" CACHE STRING "Android API level for native code" FORCE ) - if( CMAKE_VERSION VERSION_GREATER "2.8" ) - list( SORT ANDROID_SUPPORTED_NATIVE_API_LEVELS ) - set_property( CACHE ANDROID_NATIVE_API_LEVEL PROPERTY STRINGS ${ANDROID_SUPPORTED_NATIVE_API_LEVELS} ) - endif() -endif() -unset( __levelIdx ) - - -# remember target ABI -set( ANDROID_ABI "${ANDROID_ABI}" CACHE STRING "The target ABI for Android. If arm, then armeabi-v7a is recommended for hardware floating point." FORCE ) -if( CMAKE_VERSION VERSION_GREATER "2.8" ) - list( SORT ANDROID_SUPPORTED_ABIS_${ANDROID_ARCH_FULLNAME} ) - set_property( CACHE ANDROID_ABI PROPERTY STRINGS ${ANDROID_SUPPORTED_ABIS_${ANDROID_ARCH_FULLNAME}} ) -endif() - - -# runtime choice (STL, rtti, exceptions) -if( NOT ANDROID_STL ) - # honor legacy ANDROID_USE_STLPORT - if( DEFINED ANDROID_USE_STLPORT ) - if( ANDROID_USE_STLPORT ) - set( ANDROID_STL stlport_static ) - endif() - message( WARNING "You are using an obsolete variable ANDROID_USE_STLPORT to select the STL variant. Use -DANDROID_STL=stlport_static instead." ) - endif() - if( NOT ANDROID_STL ) - set( ANDROID_STL gnustl_static ) - endif() -endif() -set( ANDROID_STL "${ANDROID_STL}" CACHE STRING "C++ runtime" ) -set( ANDROID_STL_FORCE_FEATURES ON CACHE BOOL "automatically configure rtti and exceptions support based on C++ runtime" ) -mark_as_advanced( ANDROID_STL ANDROID_STL_FORCE_FEATURES ) - -if( BUILD_WITH_ANDROID_NDK ) - if( NOT "${ANDROID_STL}" MATCHES "^(none|system|system_re|gabi\\+\\+_static|gabi\\+\\+_shared|stlport_static|stlport_shared|gnustl_static|gnustl_shared)$") - message( FATAL_ERROR "ANDROID_STL is set to invalid value \"${ANDROID_STL}\". -The possible values are: - none -> Do not configure the runtime. - system -> Use the default minimal system C++ runtime library. - system_re -> Same as system but with rtti and exceptions. - gabi++_static -> Use the GAbi++ runtime as a static library. - gabi++_shared -> Use the GAbi++ runtime as a shared library. - stlport_static -> Use the STLport runtime as a static library. - stlport_shared -> Use the STLport runtime as a shared library. - gnustl_static -> (default) Use the GNU STL as a static library. - gnustl_shared -> Use the GNU STL as a shared library. -" ) - endif() -elseif( BUILD_WITH_STANDALONE_TOOLCHAIN ) - if( NOT "${ANDROID_STL}" MATCHES "^(none|gnustl_static|gnustl_shared)$") - message( FATAL_ERROR "ANDROID_STL is set to invalid value \"${ANDROID_STL}\". -The possible values are: - none -> Do not configure the runtime. - gnustl_static -> (default) Use the GNU STL as a static library. - gnustl_shared -> Use the GNU STL as a shared library. -" ) - endif() -endif() - -unset( ANDROID_RTTI ) -unset( ANDROID_EXCEPTIONS ) -unset( ANDROID_STL_INCLUDE_DIRS ) -unset( __libstl ) -unset( __libsupcxx ) - -if( NOT _CMAKE_IN_TRY_COMPILE AND ANDROID_NDK_RELEASE STREQUAL "r7b" AND ARMEABI_V7A AND NOT VFPV3 AND ANDROID_STL MATCHES "gnustl" ) - message( WARNING "The GNU STL armeabi-v7a binaries from NDK r7b can crash non-NEON devices. The files provided with NDK r7b were not configured properly, resulting in crashes on Tegra2-based devices and others when trying to use certain floating-point functions (e.g., cosf, sinf, expf). -You are strongly recommended to switch to another NDK release. -" ) -endif() - -if( NOT _CMAKE_IN_TRY_COMPILE AND X86 AND ANDROID_STL MATCHES "gnustl" AND ANDROID_NDK_RELEASE STREQUAL "r6" ) - message( WARNING "The x86 system header file from NDK r6 has incorrect definition for ptrdiff_t. You are recommended to upgrade to a newer NDK release or manually patch the header: -See https://android.googlesource.com/platform/development.git f907f4f9d4e56ccc8093df6fee54454b8bcab6c2 - diff --git a/ndk/platforms/android-9/arch-x86/include/machine/_types.h b/ndk/platforms/android-9/arch-x86/include/machine/_types.h - index 5e28c64..65892a1 100644 - --- a/ndk/platforms/android-9/arch-x86/include/machine/_types.h - +++ b/ndk/platforms/android-9/arch-x86/include/machine/_types.h - @@ -51,7 +51,11 @@ typedef long int ssize_t; - #endif - #ifndef _PTRDIFF_T - #define _PTRDIFF_T - -typedef long ptrdiff_t; - +# ifdef __ANDROID__ - + typedef int ptrdiff_t; - +# else - + typedef long ptrdiff_t; - +# endif - #endif -" ) -endif() - - -# setup paths and STL for standalone toolchain -if( BUILD_WITH_STANDALONE_TOOLCHAIN ) - set( ANDROID_TOOLCHAIN_ROOT "${ANDROID_STANDALONE_TOOLCHAIN}" ) - set( ANDROID_CLANG_TOOLCHAIN_ROOT "${ANDROID_STANDALONE_TOOLCHAIN}" ) - set( ANDROID_SYSROOT "${ANDROID_STANDALONE_TOOLCHAIN}/sysroot" ) - - if( NOT ANDROID_STL STREQUAL "none" ) - set( ANDROID_STL_INCLUDE_DIRS "${ANDROID_STANDALONE_TOOLCHAIN}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/include/c++/${ANDROID_COMPILER_VERSION}" ) - if( ARMEABI_V7A AND EXISTS "${ANDROID_STL_INCLUDE_DIRS}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/${CMAKE_SYSTEM_PROCESSOR}/bits" ) - list( APPEND ANDROID_STL_INCLUDE_DIRS "${ANDROID_STL_INCLUDE_DIRS}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/${CMAKE_SYSTEM_PROCESSOR}" ) - elseif( ARMEABI AND NOT ANDROID_FORCE_ARM_BUILD AND EXISTS "${ANDROID_STL_INCLUDE_DIRS}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/thumb/bits" ) - list( APPEND ANDROID_STL_INCLUDE_DIRS "${ANDROID_STL_INCLUDE_DIRS}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/thumb" ) - else() - list( APPEND ANDROID_STL_INCLUDE_DIRS "${ANDROID_STL_INCLUDE_DIRS}/${ANDROID_TOOLCHAIN_MACHINE_NAME}" ) - endif() - # always search static GNU STL to get the location of libsupc++.a - if( ARMEABI_V7A AND NOT ANDROID_FORCE_ARM_BUILD AND EXISTS "${ANDROID_STANDALONE_TOOLCHAIN}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/${CMAKE_SYSTEM_PROCESSOR}/thumb/libstdc++.a" ) - set( __libstl "${ANDROID_STANDALONE_TOOLCHAIN}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/${CMAKE_SYSTEM_PROCESSOR}/thumb" ) - elseif( ARMEABI_V7A AND EXISTS "${ANDROID_STANDALONE_TOOLCHAIN}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/${CMAKE_SYSTEM_PROCESSOR}/libstdc++.a" ) - set( __libstl "${ANDROID_STANDALONE_TOOLCHAIN}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/${CMAKE_SYSTEM_PROCESSOR}" ) - elseif( ARMEABI AND NOT ANDROID_FORCE_ARM_BUILD AND EXISTS "${ANDROID_STANDALONE_TOOLCHAIN}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/thumb/libstdc++.a" ) - set( __libstl "${ANDROID_STANDALONE_TOOLCHAIN}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/thumb" ) - elseif( EXISTS "${ANDROID_STANDALONE_TOOLCHAIN}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/libstdc++.a" ) - set( __libstl "${ANDROID_STANDALONE_TOOLCHAIN}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib" ) - endif() - if( __libstl ) - set( __libsupcxx "${__libstl}/libsupc++.a" ) - set( __libstl "${__libstl}/libstdc++.a" ) - endif() - if( NOT EXISTS "${__libsupcxx}" ) - message( FATAL_ERROR "The required libstdsupc++.a is missing in your standalone toolchain. - Usually it happens because of bug in make-standalone-toolchain.sh script from NDK r7, r7b and r7c. - You need to either upgrade to newer NDK or manually copy - $ANDROID_NDK/sources/cxx-stl/gnu-libstdc++/libs/${ANDROID_NDK_ABI_NAME}/libsupc++.a - to - ${__libsupcxx} - " ) - endif() - if( ANDROID_STL STREQUAL "gnustl_shared" ) - if( ARMEABI_V7A AND EXISTS "${ANDROID_STANDALONE_TOOLCHAIN}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/${CMAKE_SYSTEM_PROCESSOR}/libgnustl_shared.so" ) - set( __libstl "${ANDROID_STANDALONE_TOOLCHAIN}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/${CMAKE_SYSTEM_PROCESSOR}/libgnustl_shared.so" ) - elseif( ARMEABI AND NOT ANDROID_FORCE_ARM_BUILD AND EXISTS "${ANDROID_STANDALONE_TOOLCHAIN}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/thumb/libgnustl_shared.so" ) - set( __libstl "${ANDROID_STANDALONE_TOOLCHAIN}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/thumb/libgnustl_shared.so" ) - elseif( EXISTS "${ANDROID_STANDALONE_TOOLCHAIN}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/libgnustl_shared.so" ) - set( __libstl "${ANDROID_STANDALONE_TOOLCHAIN}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/libgnustl_shared.so" ) - endif() - endif() - endif() -endif() - -# clang -if( "${ANDROID_TOOLCHAIN_NAME}" STREQUAL "standalone-clang" ) - set( ANDROID_COMPILER_IS_CLANG 1 ) - execute_process( COMMAND "${ANDROID_CLANG_TOOLCHAIN_ROOT}/bin/clang${TOOL_OS_SUFFIX}" --version OUTPUT_VARIABLE ANDROID_CLANG_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE ) - string( REGEX MATCH "[0-9]+[.][0-9]+" ANDROID_CLANG_VERSION "${ANDROID_CLANG_VERSION}") -elseif( "${ANDROID_TOOLCHAIN_NAME}" MATCHES "-clang3[.][0-9]?$" ) - string( REGEX MATCH "3[.][0-9]$" ANDROID_CLANG_VERSION "${ANDROID_TOOLCHAIN_NAME}") - string( REGEX REPLACE "-clang${ANDROID_CLANG_VERSION}$" "-4.6" ANDROID_GCC_TOOLCHAIN_NAME "${ANDROID_TOOLCHAIN_NAME}" ) - if( NOT EXISTS "${ANDROID_NDK_TOOLCHAINS_PATH}/llvm-${ANDROID_CLANG_VERSION}${ANDROID_NDK_TOOLCHAINS_SUBPATH}/bin/clang${TOOL_OS_SUFFIX}" ) - message( FATAL_ERROR "Could not find the Clang compiler driver" ) - endif() - set( ANDROID_COMPILER_IS_CLANG 1 ) - set( ANDROID_CLANG_TOOLCHAIN_ROOT "${ANDROID_NDK_TOOLCHAINS_PATH}/llvm-${ANDROID_CLANG_VERSION}${ANDROID_NDK_TOOLCHAINS_SUBPATH}" ) -else() - set( ANDROID_GCC_TOOLCHAIN_NAME "${ANDROID_TOOLCHAIN_NAME}" ) - unset( ANDROID_COMPILER_IS_CLANG CACHE ) -endif() - -string( REPLACE "." "" _clang_name "clang${ANDROID_CLANG_VERSION}" ) -if( NOT EXISTS "${ANDROID_CLANG_TOOLCHAIN_ROOT}/bin/${_clang_name}${TOOL_OS_SUFFIX}" ) - set( _clang_name "clang" ) -endif() - - -# setup paths and STL for NDK -if( BUILD_WITH_ANDROID_NDK ) - set( ANDROID_TOOLCHAIN_ROOT "${ANDROID_NDK_TOOLCHAINS_PATH}/${ANDROID_GCC_TOOLCHAIN_NAME}${ANDROID_NDK_TOOLCHAINS_SUBPATH}" ) - set( ANDROID_SYSROOT "${ANDROID_NDK}/platforms/android-${ANDROID_NATIVE_API_LEVEL}/arch-${ANDROID_ARCH_NAME}" ) - - if( ANDROID_STL STREQUAL "none" ) - # do nothing - elseif( ANDROID_STL STREQUAL "system" ) - set( ANDROID_RTTI OFF ) - set( ANDROID_EXCEPTIONS OFF ) - set( ANDROID_STL_INCLUDE_DIRS "${ANDROID_NDK}/sources/cxx-stl/system/include" ) - elseif( ANDROID_STL STREQUAL "system_re" ) - set( ANDROID_RTTI ON ) - set( ANDROID_EXCEPTIONS ON ) - set( ANDROID_STL_INCLUDE_DIRS "${ANDROID_NDK}/sources/cxx-stl/system/include" ) - elseif( ANDROID_STL MATCHES "gabi" ) - if( ANDROID_NDK_RELEASE STRLESS "r7" ) - message( FATAL_ERROR "gabi++ is not awailable in your NDK. You have to upgrade to NDK r7 or newer to use gabi++.") - endif() - set( ANDROID_RTTI ON ) - set( ANDROID_EXCEPTIONS OFF ) - set( ANDROID_STL_INCLUDE_DIRS "${ANDROID_NDK}/sources/cxx-stl/gabi++/include" ) - set( __libstl "${ANDROID_NDK}/sources/cxx-stl/gabi++/libs/${ANDROID_NDK_ABI_NAME}/libgabi++_static.a" ) - elseif( ANDROID_STL MATCHES "stlport" ) - if( NOT ANDROID_NDK_RELEASE STRLESS "r8d" ) - set( ANDROID_EXCEPTIONS ON ) - else() - set( ANDROID_EXCEPTIONS OFF ) - endif() - if( ANDROID_NDK_RELEASE STRLESS "r7" ) - set( ANDROID_RTTI OFF ) - else() - set( ANDROID_RTTI ON ) - endif() - set( ANDROID_STL_INCLUDE_DIRS "${ANDROID_NDK}/sources/cxx-stl/stlport/stlport" ) - set( __libstl "${ANDROID_NDK}/sources/cxx-stl/stlport/libs/${ANDROID_NDK_ABI_NAME}/libstlport_static.a" ) - elseif( ANDROID_STL MATCHES "gnustl" ) - set( ANDROID_EXCEPTIONS ON ) - set( ANDROID_RTTI ON ) - if( EXISTS "${ANDROID_NDK}/sources/cxx-stl/gnu-libstdc++/${ANDROID_COMPILER_VERSION}" ) - if( ARMEABI_V7A AND ANDROID_COMPILER_VERSION VERSION_EQUAL "4.7" AND ANDROID_NDK_RELEASE STREQUAL "r8d" ) - # gnustl binary for 4.7 compiler is buggy :( - # TODO: look for right fix - set( __libstl "${ANDROID_NDK}/sources/cxx-stl/gnu-libstdc++/4.6" ) - else() - set( __libstl "${ANDROID_NDK}/sources/cxx-stl/gnu-libstdc++/${ANDROID_COMPILER_VERSION}" ) - endif() - else() - set( __libstl "${ANDROID_NDK}/sources/cxx-stl/gnu-libstdc++" ) - endif() - set( ANDROID_STL_INCLUDE_DIRS "${__libstl}/include" "${__libstl}/libs/${ANDROID_NDK_ABI_NAME}/include" ) - if( EXISTS "${__libstl}/libs/${ANDROID_NDK_ABI_NAME}/libgnustl_static.a" ) - set( __libstl "${__libstl}/libs/${ANDROID_NDK_ABI_NAME}/libgnustl_static.a" ) - else() - set( __libstl "${__libstl}/libs/${ANDROID_NDK_ABI_NAME}/libstdc++.a" ) - endif() - else() - message( FATAL_ERROR "Unknown runtime: ${ANDROID_STL}" ) - endif() - # find libsupc++.a - rtti & exceptions - if( ANDROID_STL STREQUAL "system_re" OR ANDROID_STL MATCHES "gnustl" ) - set( __libsupcxx "${ANDROID_NDK}/sources/cxx-stl/gnu-libstdc++/${ANDROID_COMPILER_VERSION}/libs/${ANDROID_NDK_ABI_NAME}/libsupc++.a" ) # r8b or newer - if( NOT EXISTS "${__libsupcxx}" ) - set( __libsupcxx "${ANDROID_NDK}/sources/cxx-stl/gnu-libstdc++/libs/${ANDROID_NDK_ABI_NAME}/libsupc++.a" ) # r7-r8 - endif() - if( NOT EXISTS "${__libsupcxx}" ) # before r7 - if( ARMEABI_V7A ) - if( ANDROID_FORCE_ARM_BUILD ) - set( __libsupcxx "${ANDROID_TOOLCHAIN_ROOT}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/${CMAKE_SYSTEM_PROCESSOR}/libsupc++.a" ) - else() - set( __libsupcxx "${ANDROID_TOOLCHAIN_ROOT}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/${CMAKE_SYSTEM_PROCESSOR}/thumb/libsupc++.a" ) - endif() - elseif( ARMEABI AND NOT ANDROID_FORCE_ARM_BUILD ) - set( __libsupcxx "${ANDROID_TOOLCHAIN_ROOT}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/thumb/libsupc++.a" ) - else() - set( __libsupcxx "${ANDROID_TOOLCHAIN_ROOT}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/libsupc++.a" ) - endif() - endif() - if( NOT EXISTS "${__libsupcxx}") - message( ERROR "Could not find libsupc++.a for a chosen platform. Either your NDK is not supported or is broken.") - endif() - endif() -endif() - - -# case of shared STL linkage -if( ANDROID_STL MATCHES "shared" AND DEFINED __libstl ) - string( REPLACE "_static.a" "_shared.so" __libstl "${__libstl}" ) - if( NOT _CMAKE_IN_TRY_COMPILE AND __libstl MATCHES "[.]so$" ) - get_filename_component( __libstlname "${__libstl}" NAME ) - execute_process( COMMAND "${CMAKE_COMMAND}" -E copy_if_different "${__libstl}" "${LIBRARY_OUTPUT_PATH}/${__libstlname}" RESULT_VARIABLE __fileCopyProcess ) - if( NOT __fileCopyProcess EQUAL 0 OR NOT EXISTS "${LIBRARY_OUTPUT_PATH}/${__libstlname}") - message( SEND_ERROR "Failed copying of ${__libstl} to the ${LIBRARY_OUTPUT_PATH}/${__libstlname}" ) - endif() - unset( __fileCopyProcess ) - unset( __libstlname ) - endif() -endif() - - -# ccache support -__INIT_VARIABLE( _ndk_ccache NDK_CCACHE ENV_NDK_CCACHE ) -if( _ndk_ccache ) - if( DEFINED NDK_CCACHE AND NOT EXISTS NDK_CCACHE ) - unset( NDK_CCACHE CACHE ) - endif() - find_program( NDK_CCACHE "${_ndk_ccache}" DOC "The path to ccache binary") -else() - unset( NDK_CCACHE CACHE ) -endif() -unset( _ndk_ccache ) - - -# setup the cross-compiler -if( NOT CMAKE_C_COMPILER ) - if( NDK_CCACHE AND NOT ANDROID_SYSROOT MATCHES "[ ;\"]" ) - set( CMAKE_C_COMPILER "${NDK_CCACHE}" CACHE PATH "ccache as C compiler" ) - set( CMAKE_CXX_COMPILER "${NDK_CCACHE}" CACHE PATH "ccache as C++ compiler" ) - if( ANDROID_COMPILER_IS_CLANG ) - set( CMAKE_C_COMPILER_ARG1 "${ANDROID_CLANG_TOOLCHAIN_ROOT}/bin/${_clang_name}${TOOL_OS_SUFFIX}" CACHE PATH "C compiler") - set( CMAKE_CXX_COMPILER_ARG1 "${ANDROID_CLANG_TOOLCHAIN_ROOT}/bin/${_clang_name}++${TOOL_OS_SUFFIX}" CACHE PATH "C++ compiler") - else() - set( CMAKE_C_COMPILER_ARG1 "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-gcc${TOOL_OS_SUFFIX}" CACHE PATH "C compiler") - set( CMAKE_CXX_COMPILER_ARG1 "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-g++${TOOL_OS_SUFFIX}" CACHE PATH "C++ compiler") - endif() - else() - if( ANDROID_COMPILER_IS_CLANG ) - set( CMAKE_C_COMPILER "${ANDROID_CLANG_TOOLCHAIN_ROOT}/bin/${_clang_name}${TOOL_OS_SUFFIX}" CACHE PATH "C compiler") - set( CMAKE_CXX_COMPILER "${ANDROID_CLANG_TOOLCHAIN_ROOT}/bin/${_clang_name}++${TOOL_OS_SUFFIX}" CACHE PATH "C++ compiler") - else() - set( CMAKE_C_COMPILER "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-gcc${TOOL_OS_SUFFIX}" CACHE PATH "C compiler" ) - set( CMAKE_CXX_COMPILER "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-g++${TOOL_OS_SUFFIX}" CACHE PATH "C++ compiler" ) - endif() - endif() - set( CMAKE_ASM_COMPILER "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-gcc${TOOL_OS_SUFFIX}" CACHE PATH "assembler" ) - set( CMAKE_STRIP "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-strip${TOOL_OS_SUFFIX}" CACHE PATH "strip" ) - set( CMAKE_AR "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-ar${TOOL_OS_SUFFIX}" CACHE PATH "archive" ) - set( CMAKE_LINKER "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-ld${TOOL_OS_SUFFIX}" CACHE PATH "linker" ) - set( CMAKE_NM "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-nm${TOOL_OS_SUFFIX}" CACHE PATH "nm" ) - set( CMAKE_OBJCOPY "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-objcopy${TOOL_OS_SUFFIX}" CACHE PATH "objcopy" ) - set( CMAKE_OBJDUMP "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-objdump${TOOL_OS_SUFFIX}" CACHE PATH "objdump" ) - set( CMAKE_RANLIB "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-ranlib${TOOL_OS_SUFFIX}" CACHE PATH "ranlib" ) -endif() - -set( _CMAKE_TOOLCHAIN_PREFIX "${ANDROID_TOOLCHAIN_MACHINE_NAME}-" ) -if( CMAKE_VERSION VERSION_LESS 2.8.5 ) - set( CMAKE_ASM_COMPILER_ARG1 "-c" ) -endif() -if( APPLE ) - find_program( CMAKE_INSTALL_NAME_TOOL NAMES install_name_tool ) - if( NOT CMAKE_INSTALL_NAME_TOOL ) - message( FATAL_ERROR "Could not find install_name_tool, please check your installation." ) - endif() - mark_as_advanced( CMAKE_INSTALL_NAME_TOOL ) -endif() - -# Force set compilers because standard identification works badly for us -include( CMakeForceCompiler ) -CMAKE_FORCE_C_COMPILER( "${CMAKE_C_COMPILER}" GNU ) -if( ANDROID_COMPILER_IS_CLANG ) - set( CMAKE_C_COMPILER_ID Clang) -endif() -set( CMAKE_C_PLATFORM_ID Linux ) -set( CMAKE_C_SIZEOF_DATA_PTR 4 ) -set( CMAKE_C_HAS_ISYSROOT 1 ) -set( CMAKE_C_COMPILER_ABI ELF ) -CMAKE_FORCE_CXX_COMPILER( "${CMAKE_CXX_COMPILER}" GNU ) -if( ANDROID_COMPILER_IS_CLANG ) - set( CMAKE_CXX_COMPILER_ID Clang) -endif() -set( CMAKE_CXX_PLATFORM_ID Linux ) -set( CMAKE_CXX_SIZEOF_DATA_PTR 4 ) -set( CMAKE_CXX_HAS_ISYSROOT 1 ) -set( CMAKE_CXX_COMPILER_ABI ELF ) -set( CMAKE_CXX_SOURCE_FILE_EXTENSIONS cc cp cxx cpp CPP c++ C ) -# force ASM compiler (required for CMake < 2.8.5) -set( CMAKE_ASM_COMPILER_ID_RUN TRUE ) -set( CMAKE_ASM_COMPILER_ID GNU ) -set( CMAKE_ASM_COMPILER_WORKS TRUE ) -set( CMAKE_ASM_COMPILER_FORCED TRUE ) -set( CMAKE_COMPILER_IS_GNUASM 1) -set( CMAKE_ASM_SOURCE_FILE_EXTENSIONS s S asm ) - -# flags and definitions -remove_definitions( -DANDROID ) -add_definitions( -DANDROID ) - -if( ANDROID_SYSROOT MATCHES "[ ;\"]" ) - if( CMAKE_HOST_WIN32 ) - # try to convert path to 8.3 form - file( WRITE "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/cvt83.cmd" "@echo %~s1" ) - execute_process( COMMAND "$ENV{ComSpec}" /c "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/cvt83.cmd" "${ANDROID_SYSROOT}" - OUTPUT_VARIABLE __path OUTPUT_STRIP_TRAILING_WHITESPACE - RESULT_VARIABLE __result ERROR_QUIET ) - if( __result EQUAL 0 ) - file( TO_CMAKE_PATH "${__path}" ANDROID_SYSROOT ) - set( ANDROID_CXX_FLAGS "--sysroot=${ANDROID_SYSROOT}" ) - else() - set( ANDROID_CXX_FLAGS "--sysroot=\"${ANDROID_SYSROOT}\"" ) - endif() - else() - set( ANDROID_CXX_FLAGS "'--sysroot=${ANDROID_SYSROOT}'" ) - endif() - if( NOT _CMAKE_IN_TRY_COMPILE ) - # quotes can break try_compile and compiler identification - message(WARNING "Path to your Android NDK (or toolchain) has non-alphanumeric symbols.\nThe build might be broken.\n") - endif() -else() - set( ANDROID_CXX_FLAGS "--sysroot=${ANDROID_SYSROOT}" ) -endif() - -# NDK flags -if( ARMEABI OR ARMEABI_V7A ) - set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -fpic -funwind-tables" ) - if( NOT ANDROID_FORCE_ARM_BUILD AND NOT ARMEABI_V6 ) - set( ANDROID_CXX_FLAGS_RELEASE "-mthumb -fomit-frame-pointer -fno-strict-aliasing" ) - set( ANDROID_CXX_FLAGS_DEBUG "-marm -fno-omit-frame-pointer -fno-strict-aliasing" ) - if( NOT ANDROID_COMPILER_IS_CLANG ) - set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -finline-limit=64" ) - endif() - else() - # always compile ARMEABI_V6 in arm mode; otherwise there is no difference from ARMEABI - set( ANDROID_CXX_FLAGS_RELEASE "-marm -fomit-frame-pointer -fstrict-aliasing" ) - set( ANDROID_CXX_FLAGS_DEBUG "-marm -fno-omit-frame-pointer -fno-strict-aliasing" ) - if( NOT ANDROID_COMPILER_IS_CLANG ) - set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -funswitch-loops -finline-limit=300" ) - endif() - endif() -elseif( X86 ) - set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -funwind-tables" ) - if( NOT ANDROID_COMPILER_IS_CLANG ) - set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -funswitch-loops -finline-limit=300" ) - else() - set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -fPIC" ) - endif() - set( ANDROID_CXX_FLAGS_RELEASE "-fomit-frame-pointer -fstrict-aliasing" ) - set( ANDROID_CXX_FLAGS_DEBUG "-fno-omit-frame-pointer -fno-strict-aliasing" ) -elseif( MIPS ) - set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -fpic -fno-strict-aliasing -finline-functions -ffunction-sections -funwind-tables -fmessage-length=0" ) - set( ANDROID_CXX_FLAGS_RELEASE "-fomit-frame-pointer" ) - set( ANDROID_CXX_FLAGS_DEBUG "-fno-omit-frame-pointer" ) - if( NOT ANDROID_COMPILER_IS_CLANG ) - set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -fno-inline-functions-called-once -fgcse-after-reload -frerun-cse-after-loop -frename-registers" ) - set( ANDROID_CXX_FLAGS_RELEASE "${ANDROID_CXX_FLAGS_RELEASE} -funswitch-loops -finline-limit=300" ) - endif() -elseif() - set( ANDROID_CXX_FLAGS_RELEASE "" ) - set( ANDROID_CXX_FLAGS_DEBUG "" ) -endif() - -set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -fsigned-char" ) # good/necessary when porting desktop libraries - -if( NOT X86 AND NOT ANDROID_COMPILER_IS_CLANG ) - set( ANDROID_CXX_FLAGS "-Wno-psabi ${ANDROID_CXX_FLAGS}" ) -endif() - -if( NOT ANDROID_COMPILER_VERSION VERSION_LESS "4.6" ) - set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -no-canonical-prefixes" ) # see https://android-review.googlesource.com/#/c/47564/ -endif() - -# ABI-specific flags -if( ARMEABI_V7A ) - set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -march=armv7-a -mfloat-abi=softfp" ) - if( NEON ) - set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -mfpu=neon" ) - elseif( VFPV3 ) - set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -mfpu=vfpv3" ) - else() - set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -mfpu=vfpv3-d16" ) - endif() -elseif( ARMEABI_V6 ) - set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -march=armv6 -mfloat-abi=softfp -mfpu=vfp" ) # vfp == vfpv2 -elseif( ARMEABI ) - set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -march=armv5te -mtune=xscale -msoft-float" ) -endif() - -if( ANDROID_STL MATCHES "gnustl" AND (EXISTS "${__libstl}" OR EXISTS "${__libsupcxx}") ) - set( CMAKE_CXX_CREATE_SHARED_LIBRARY " -o " ) - set( CMAKE_CXX_CREATE_SHARED_MODULE " -o " ) - set( CMAKE_CXX_LINK_EXECUTABLE " -o " ) -else() - set( CMAKE_CXX_CREATE_SHARED_LIBRARY " -o " ) - set( CMAKE_CXX_CREATE_SHARED_MODULE " -o " ) - set( CMAKE_CXX_LINK_EXECUTABLE " -o " ) -endif() - -# STL -if( EXISTS "${__libstl}" OR EXISTS "${__libsupcxx}" ) - if( EXISTS "${__libstl}" ) - set( CMAKE_CXX_CREATE_SHARED_LIBRARY "${CMAKE_CXX_CREATE_SHARED_LIBRARY} \"${__libstl}\"" ) - set( CMAKE_CXX_CREATE_SHARED_MODULE "${CMAKE_CXX_CREATE_SHARED_MODULE} \"${__libstl}\"" ) - set( CMAKE_CXX_LINK_EXECUTABLE "${CMAKE_CXX_LINK_EXECUTABLE} \"${__libstl}\"" ) - endif() - if( EXISTS "${__libsupcxx}" ) - set( CMAKE_CXX_CREATE_SHARED_LIBRARY "${CMAKE_CXX_CREATE_SHARED_LIBRARY} \"${__libsupcxx}\"" ) - set( CMAKE_CXX_CREATE_SHARED_MODULE "${CMAKE_CXX_CREATE_SHARED_MODULE} \"${__libsupcxx}\"" ) - set( CMAKE_CXX_LINK_EXECUTABLE "${CMAKE_CXX_LINK_EXECUTABLE} \"${__libsupcxx}\"" ) - # C objects: - set( CMAKE_C_CREATE_SHARED_LIBRARY " -o " ) - set( CMAKE_C_CREATE_SHARED_MODULE " -o " ) - set( CMAKE_C_LINK_EXECUTABLE " -o " ) - set( CMAKE_C_CREATE_SHARED_LIBRARY "${CMAKE_C_CREATE_SHARED_LIBRARY} \"${__libsupcxx}\"" ) - set( CMAKE_C_CREATE_SHARED_MODULE "${CMAKE_C_CREATE_SHARED_MODULE} \"${__libsupcxx}\"" ) - set( CMAKE_C_LINK_EXECUTABLE "${CMAKE_C_LINK_EXECUTABLE} \"${__libsupcxx}\"" ) - endif() - if( ANDROID_STL MATCHES "gnustl" ) - if( NOT EXISTS "${ANDROID_LIBM_PATH}" ) - set( ANDROID_LIBM_PATH -lm ) - endif() - set( CMAKE_CXX_CREATE_SHARED_LIBRARY "${CMAKE_CXX_CREATE_SHARED_LIBRARY} ${ANDROID_LIBM_PATH}" ) - set( CMAKE_CXX_CREATE_SHARED_MODULE "${CMAKE_CXX_CREATE_SHARED_MODULE} ${ANDROID_LIBM_PATH}" ) - set( CMAKE_CXX_LINK_EXECUTABLE "${CMAKE_CXX_LINK_EXECUTABLE} ${ANDROID_LIBM_PATH}" ) - endif() -endif() - -# variables controlling optional build flags -if (ANDROID_NDK_RELEASE STRLESS "r7") - # libGLESv2.so in NDK's prior to r7 refers to missing external symbols. - # So this flag option is required for all projects using OpenGL from native. - __INIT_VARIABLE( ANDROID_SO_UNDEFINED VALUES ON ) -else() - __INIT_VARIABLE( ANDROID_SO_UNDEFINED VALUES OFF ) -endif() -__INIT_VARIABLE( ANDROID_NO_UNDEFINED OBSOLETE_NO_UNDEFINED VALUES ON ) -__INIT_VARIABLE( ANDROID_FUNCTION_LEVEL_LINKING VALUES ON ) -__INIT_VARIABLE( ANDROID_GOLD_LINKER VALUES ON ) -__INIT_VARIABLE( ANDROID_NOEXECSTACK VALUES ON ) -__INIT_VARIABLE( ANDROID_RELRO VALUES ON ) - -set( ANDROID_NO_UNDEFINED ${ANDROID_NO_UNDEFINED} CACHE BOOL "Show all undefined symbols as linker errors" ) -set( ANDROID_SO_UNDEFINED ${ANDROID_SO_UNDEFINED} CACHE BOOL "Allows or disallows undefined symbols in shared libraries" ) -set( ANDROID_FUNCTION_LEVEL_LINKING ${ANDROID_FUNCTION_LEVEL_LINKING} CACHE BOOL "Allows or disallows undefined symbols in shared libraries" ) -set( ANDROID_GOLD_LINKER ${ANDROID_GOLD_LINKER} CACHE BOOL "Enables gold linker (only avaialble for NDK r8b for ARM and x86 architectures on linux-86 and darwin-x86 hosts)" ) -set( ANDROID_NOEXECSTACK ${ANDROID_NOEXECSTACK} CACHE BOOL "Allows or disallows undefined symbols in shared libraries" ) -set( ANDROID_RELRO ${ANDROID_RELRO} CACHE BOOL "Enables RELRO - a memory corruption mitigation technique" ) -mark_as_advanced( ANDROID_NO_UNDEFINED ANDROID_SO_UNDEFINED ANDROID_FUNCTION_LEVEL_LINKING ANDROID_GOLD_LINKER ANDROID_NOEXECSTACK ANDROID_RELRO ) - -# linker flags -set( ANDROID_LINKER_FLAGS "" ) - -if( ARMEABI_V7A ) - # this is *required* to use the following linker flags that routes around - # a CPU bug in some Cortex-A8 implementations: - set( ANDROID_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} -Wl,--fix-cortex-a8" ) -endif() - -if( ANDROID_NO_UNDEFINED ) - if( MIPS ) - # there is some sysroot-related problem in mips linker... - if( NOT ANDROID_SYSROOT MATCHES "[ ;\"]" ) - set( ANDROID_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} -Wl,--no-undefined -Wl,-rpath-link,${ANDROID_SYSROOT}/usr/lib" ) - endif() - else() - set( ANDROID_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} -Wl,--no-undefined" ) - endif() -endif() - -if( ANDROID_SO_UNDEFINED ) - set( ANDROID_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} -Wl,-allow-shlib-undefined" ) -endif() - -if( ANDROID_FUNCTION_LEVEL_LINKING ) - set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -fdata-sections -ffunction-sections" ) - set( ANDROID_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} -Wl,--gc-sections" ) -endif() - -if( ANDROID_COMPILER_VERSION VERSION_EQUAL "4.6" ) - if( ANDROID_GOLD_LINKER AND (CMAKE_HOST_UNIX OR ANDROID_NDK_RELEASE STRGREATER "r8b") AND (ARMEABI OR ARMEABI_V7A OR X86) ) - set( ANDROID_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} -fuse-ld=gold" ) - elseif( ANDROID_NDK_RELEASE STRGREATER "r8b") - set( ANDROID_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} -fuse-ld=bfd" ) - elseif( ANDROID_NDK_RELEASE STREQUAL "r8b" AND ARMEABI AND NOT _CMAKE_IN_TRY_COMPILE ) - message( WARNING "The default bfd linker from arm GCC 4.6 toolchain can fail with 'unresolvable R_ARM_THM_CALL relocation' error message. See https://code.google.com/p/android/issues/detail?id=35342 - On Linux and OS X host platform you can workaround this problem using gold linker (default). - Rerun cmake with -DANDROID_GOLD_LINKER=ON option in case of problems. -" ) - endif() -endif() # version 4.6 - -if( ANDROID_NOEXECSTACK ) - if( ANDROID_COMPILER_IS_CLANG ) - set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -Xclang -mnoexecstack" ) - else() - set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -Wa,--noexecstack" ) - endif() - set( ANDROID_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} -Wl,-z,noexecstack" ) -endif() - -if( ANDROID_RELRO ) - set( ANDROID_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} -Wl,-z,relro -Wl,-z,now" ) -endif() - -if( ANDROID_COMPILER_IS_CLANG ) - set( ANDROID_CXX_FLAGS "-Qunused-arguments ${ANDROID_CXX_FLAGS}" ) - if( ARMEABI_V7A AND NOT ANDROID_FORCE_ARM_BUILD ) - set( ANDROID_CXX_FLAGS_RELEASE "-target thumbv7-none-linux-androideabi ${ANDROID_CXX_FLAGS_RELEASE}" ) - set( ANDROID_CXX_FLAGS_DEBUG "-target ${ANDROID_LLVM_TRIPLE} ${ANDROID_CXX_FLAGS_DEBUG}" ) - else() - set( ANDROID_CXX_FLAGS "-target ${ANDROID_LLVM_TRIPLE} ${ANDROID_CXX_FLAGS}" ) - endif() - if( BUILD_WITH_ANDROID_NDK ) - set( ANDROID_CXX_FLAGS "-gcc-toolchain ${ANDROID_TOOLCHAIN_ROOT} ${ANDROID_CXX_FLAGS}" ) - endif() -endif() - -# cache flags -set( CMAKE_CXX_FLAGS "" CACHE STRING "c++ flags" ) -set( CMAKE_C_FLAGS "" CACHE STRING "c flags" ) -set( CMAKE_CXX_FLAGS_RELEASE "-O3 -DNDEBUG" CACHE STRING "c++ Release flags" ) -set( CMAKE_C_FLAGS_RELEASE "-O3 -DNDEBUG" CACHE STRING "c Release flags" ) -set( CMAKE_CXX_FLAGS_DEBUG "-O0 -g -DDEBUG -D_DEBUG" CACHE STRING "c++ Debug flags" ) -set( CMAKE_C_FLAGS_DEBUG "-O0 -g -DDEBUG -D_DEBUG" CACHE STRING "c Debug flags" ) -set( CMAKE_SHARED_LINKER_FLAGS "" CACHE STRING "shared linker flags" ) -set( CMAKE_MODULE_LINKER_FLAGS "" CACHE STRING "module linker flags" ) -set( CMAKE_EXE_LINKER_FLAGS "-Wl,-z,nocopyreloc" CACHE STRING "executable linker flags" ) - -# put flags to cache (for debug purpose only) -set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS}" CACHE INTERNAL "Android specific c/c++ flags" ) -set( ANDROID_CXX_FLAGS_RELEASE "${ANDROID_CXX_FLAGS_RELEASE}" CACHE INTERNAL "Android specific c/c++ Release flags" ) -set( ANDROID_CXX_FLAGS_DEBUG "${ANDROID_CXX_FLAGS_DEBUG}" CACHE INTERNAL "Android specific c/c++ Debug flags" ) -set( ANDROID_LINKER_FLAGS "${ANDROID_LINKER_FLAGS}" CACHE INTERNAL "Android specific c/c++ linker flags" ) - -# finish flags -set( CMAKE_CXX_FLAGS "${ANDROID_CXX_FLAGS} ${CMAKE_CXX_FLAGS}" ) -set( CMAKE_C_FLAGS "${ANDROID_CXX_FLAGS} ${CMAKE_C_FLAGS}" ) -set( CMAKE_CXX_FLAGS_RELEASE "${ANDROID_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_RELEASE}" ) -set( CMAKE_C_FLAGS_RELEASE "${ANDROID_CXX_FLAGS_RELEASE} ${CMAKE_C_FLAGS_RELEASE}" ) -set( CMAKE_CXX_FLAGS_DEBUG "${ANDROID_CXX_FLAGS_DEBUG} ${CMAKE_CXX_FLAGS_DEBUG}" ) -set( CMAKE_C_FLAGS_DEBUG "${ANDROID_CXX_FLAGS_DEBUG} ${CMAKE_C_FLAGS_DEBUG}" ) -set( CMAKE_SHARED_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} ${CMAKE_SHARED_LINKER_FLAGS}" ) -set( CMAKE_MODULE_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} ${CMAKE_MODULE_LINKER_FLAGS}" ) -set( CMAKE_EXE_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} ${CMAKE_EXE_LINKER_FLAGS}" ) - -if( MIPS AND BUILD_WITH_ANDROID_NDK AND ANDROID_NDK_RELEASE STREQUAL "r8" ) - set( CMAKE_SHARED_LINKER_FLAGS "-Wl,-T,${ANDROID_NDK_TOOLCHAINS_PATH}/${ANDROID_GCC_TOOLCHAIN_NAME}/mipself.xsc ${CMAKE_SHARED_LINKER_FLAGS}" ) - set( CMAKE_MODULE_LINKER_FLAGS "-Wl,-T,${ANDROID_NDK_TOOLCHAINS_PATH}/${ANDROID_GCC_TOOLCHAIN_NAME}/mipself.xsc ${CMAKE_MODULE_LINKER_FLAGS}" ) - set( CMAKE_EXE_LINKER_FLAGS "-Wl,-T,${ANDROID_NDK_TOOLCHAINS_PATH}/${ANDROID_GCC_TOOLCHAIN_NAME}/mipself.x ${CMAKE_EXE_LINKER_FLAGS}" ) -endif() - -# configure rtti -if( DEFINED ANDROID_RTTI AND ANDROID_STL_FORCE_FEATURES ) - if( ANDROID_RTTI ) - set( CMAKE_CXX_FLAGS "-frtti ${CMAKE_CXX_FLAGS}" ) - else() - set( CMAKE_CXX_FLAGS "-fno-rtti ${CMAKE_CXX_FLAGS}" ) - endif() -endif() - -# configure exceptios -if( DEFINED ANDROID_EXCEPTIONS AND ANDROID_STL_FORCE_FEATURES ) - if( ANDROID_EXCEPTIONS ) - set( CMAKE_CXX_FLAGS "-fexceptions ${CMAKE_CXX_FLAGS}" ) - set( CMAKE_C_FLAGS "-fexceptions ${CMAKE_C_FLAGS}" ) - else() - set( CMAKE_CXX_FLAGS "-fno-exceptions ${CMAKE_CXX_FLAGS}" ) - set( CMAKE_C_FLAGS "-fno-exceptions ${CMAKE_C_FLAGS}" ) - endif() -endif() - -# global includes and link directories -include_directories( SYSTEM "${ANDROID_SYSROOT}/usr/include" ${ANDROID_STL_INCLUDE_DIRS} ) -link_directories( "${CMAKE_INSTALL_PREFIX}/libs/${ANDROID_NDK_ABI_NAME}" ) - -# detect if need link crtbegin_so.o explicitly -if( NOT DEFINED ANDROID_EXPLICIT_CRT_LINK ) - set( __cmd "${CMAKE_CXX_CREATE_SHARED_LIBRARY}" ) - string( REPLACE "" "${CMAKE_CXX_COMPILER} ${CMAKE_CXX_COMPILER_ARG1}" __cmd "${__cmd}" ) - string( REPLACE "" "${CMAKE_C_COMPILER} ${CMAKE_C_COMPILER_ARG1}" __cmd "${__cmd}" ) - string( REPLACE "" "${CMAKE_CXX_FLAGS}" __cmd "${__cmd}" ) - string( REPLACE "" "" __cmd "${__cmd}" ) - string( REPLACE "" "${CMAKE_SHARED_LINKER_FLAGS}" __cmd "${__cmd}" ) - string( REPLACE "" "-shared" __cmd "${__cmd}" ) - string( REPLACE "" "" __cmd "${__cmd}" ) - string( REPLACE "" "" __cmd "${__cmd}" ) - string( REPLACE "" "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/toolchain_crtlink_test.so" __cmd "${__cmd}" ) - string( REPLACE "" "\"${ANDROID_SYSROOT}/usr/lib/crtbegin_so.o\"" __cmd "${__cmd}" ) - string( REPLACE "" "" __cmd "${__cmd}" ) - separate_arguments( __cmd ) - foreach( __var ANDROID_NDK ANDROID_NDK_TOOLCHAINS_PATH ANDROID_STANDALONE_TOOLCHAIN ) - if( ${__var} ) - set( __tmp "${${__var}}" ) - separate_arguments( __tmp ) - string( REPLACE "${__tmp}" "${${__var}}" __cmd "${__cmd}") - endif() - endforeach() - string( REPLACE "'" "" __cmd "${__cmd}" ) - string( REPLACE "\"" "" __cmd "${__cmd}" ) - execute_process( COMMAND ${__cmd} RESULT_VARIABLE __cmd_result OUTPUT_QUIET ERROR_QUIET ) - if( __cmd_result EQUAL 0 ) - set( ANDROID_EXPLICIT_CRT_LINK ON ) - else() - set( ANDROID_EXPLICIT_CRT_LINK OFF ) - endif() -endif() - -if( ANDROID_EXPLICIT_CRT_LINK ) - set( CMAKE_CXX_CREATE_SHARED_LIBRARY "${CMAKE_CXX_CREATE_SHARED_LIBRARY} \"${ANDROID_SYSROOT}/usr/lib/crtbegin_so.o\"" ) - set( CMAKE_CXX_CREATE_SHARED_MODULE "${CMAKE_CXX_CREATE_SHARED_MODULE} \"${ANDROID_SYSROOT}/usr/lib/crtbegin_so.o\"" ) -endif() - -# setup output directories -set( LIBRARY_OUTPUT_PATH_ROOT ${CMAKE_SOURCE_DIR} CACHE PATH "root for library output, set this to change where android libs are installed to" ) -set( CMAKE_INSTALL_PREFIX "${ANDROID_TOOLCHAIN_ROOT}/user" CACHE STRING "path for installing" ) - -if(NOT _CMAKE_IN_TRY_COMPILE) - if( EXISTS "${CMAKE_SOURCE_DIR}/jni/CMakeLists.txt" ) - set( EXECUTABLE_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/bin/${ANDROID_NDK_ABI_NAME}" CACHE PATH "Output directory for applications" ) - else() - set( EXECUTABLE_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/bin" CACHE PATH "Output directory for applications" ) - endif() - set( LIBRARY_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/libs/${ANDROID_NDK_ABI_NAME}" CACHE PATH "path for android libs" ) -endif() - -# set these global flags for cmake client scripts to change behavior -set( ANDROID True ) -set( BUILD_ANDROID True ) - -# where is the target environment -set( CMAKE_FIND_ROOT_PATH "${ANDROID_TOOLCHAIN_ROOT}/bin" "${ANDROID_TOOLCHAIN_ROOT}/${ANDROID_TOOLCHAIN_MACHINE_NAME}" "${ANDROID_SYSROOT}" "${CMAKE_INSTALL_PREFIX}" "${CMAKE_INSTALL_PREFIX}/share" ) - -# only search for libraries and includes in the ndk toolchain -set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY ) -set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY ) -set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY ) - - -# macro to find packages on the host OS -macro( find_host_package ) - set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER ) - set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY NEVER ) - set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE NEVER ) - if( CMAKE_HOST_WIN32 ) - SET( WIN32 1 ) - SET( UNIX ) - elseif( CMAKE_HOST_APPLE ) - SET( APPLE 1 ) - SET( UNIX ) - endif() - find_package( ${ARGN} ) - SET( WIN32 ) - SET( APPLE ) - SET( UNIX 1 ) - set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY ) - set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY ) - set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY ) -endmacro() - - -# macro to find programs on the host OS -macro( find_host_program ) - set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER ) - set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY NEVER ) - set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE NEVER ) - if( CMAKE_HOST_WIN32 ) - SET( WIN32 1 ) - SET( UNIX ) - elseif( CMAKE_HOST_APPLE ) - SET( APPLE 1 ) - SET( UNIX ) - endif() - find_program( ${ARGN} ) - SET( WIN32 ) - SET( APPLE ) - SET( UNIX 1 ) - set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY ) - set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY ) - set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY ) -endmacro() - - -macro( ANDROID_GET_ABI_RAWNAME TOOLCHAIN_FLAG VAR ) - if( "${TOOLCHAIN_FLAG}" STREQUAL "ARMEABI" ) - set( ${VAR} "armeabi" ) - elseif( "${TOOLCHAIN_FLAG}" STREQUAL "ARMEABI_V7A" ) - set( ${VAR} "armeabi-v7a" ) - elseif( "${TOOLCHAIN_FLAG}" STREQUAL "X86" ) - set( ${VAR} "x86" ) - elseif( "${TOOLCHAIN_FLAG}" STREQUAL "MIPS" ) - set( ${VAR} "mips" ) - else() - set( ${VAR} "unknown" ) - endif() -endmacro() - - -# export toolchain settings for the try_compile() command -if( NOT PROJECT_NAME STREQUAL "CMAKE_TRY_COMPILE" ) - set( __toolchain_config "") - foreach( __var NDK_CCACHE LIBRARY_OUTPUT_PATH_ROOT ANDROID_FORBID_SYGWIN ANDROID_SET_OBSOLETE_VARIABLES - ANDROID_NDK_HOST_X64 - ANDROID_NDK - ANDROID_NDK_LAYOUT - ANDROID_STANDALONE_TOOLCHAIN - ANDROID_TOOLCHAIN_NAME - ANDROID_ABI - ANDROID_NATIVE_API_LEVEL - ANDROID_STL - ANDROID_STL_FORCE_FEATURES - ANDROID_FORCE_ARM_BUILD - ANDROID_NO_UNDEFINED - ANDROID_SO_UNDEFINED - ANDROID_FUNCTION_LEVEL_LINKING - ANDROID_GOLD_LINKER - ANDROID_NOEXECSTACK - ANDROID_RELRO - ANDROID_LIBM_PATH - ANDROID_EXPLICIT_CRT_LINK - ) - if( DEFINED ${__var} ) - if( "${__var}" MATCHES " ") - set( __toolchain_config "${__toolchain_config}set( ${__var} \"${${__var}}\" CACHE INTERNAL \"\" )\n" ) - else() - set( __toolchain_config "${__toolchain_config}set( ${__var} ${${__var}} CACHE INTERNAL \"\" )\n" ) - endif() - endif() - endforeach() - file( WRITE "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/android.toolchain.config.cmake" "${__toolchain_config}" ) - unset( __toolchain_config ) -endif() - - -# set some obsolete variables for backward compatibility -set( ANDROID_SET_OBSOLETE_VARIABLES ON CACHE BOOL "Define obsolete Andrid-specific cmake variables" ) -mark_as_advanced( ANDROID_SET_OBSOLETE_VARIABLES ) -if( ANDROID_SET_OBSOLETE_VARIABLES ) - set( ANDROID_API_LEVEL ${ANDROID_NATIVE_API_LEVEL} ) - set( ARM_TARGET "${ANDROID_ABI}" ) - set( ARMEABI_NDK_NAME "${ANDROID_NDK_ABI_NAME}" ) -endif() - - -# Variables controlling behavior or set by cmake toolchain: -# ANDROID_ABI : "armeabi-v7a" (default), "armeabi", "armeabi-v7a with NEON", "armeabi-v7a with VFPV3", "armeabi-v6 with VFP", "x86", "mips" -# ANDROID_NATIVE_API_LEVEL : 3,4,5,8,9,14 (depends on NDK version) -# ANDROID_STL : gnustl_static/gnustl_shared/stlport_static/stlport_shared/gabi++_static/gabi++_shared/system_re/system/none -# ANDROID_FORBID_SYGWIN : ON/OFF -# ANDROID_NO_UNDEFINED : ON/OFF -# ANDROID_SO_UNDEFINED : OFF/ON (default depends on NDK version) -# ANDROID_FUNCTION_LEVEL_LINKING : ON/OFF -# ANDROID_GOLD_LINKER : ON/OFF -# ANDROID_NOEXECSTACK : ON/OFF -# ANDROID_RELRO : ON/OFF -# ANDROID_FORCE_ARM_BUILD : ON/OFF -# ANDROID_STL_FORCE_FEATURES : ON/OFF -# ANDROID_SET_OBSOLETE_VARIABLES : ON/OFF -# Can be set only at the first run: -# ANDROID_NDK -# ANDROID_STANDALONE_TOOLCHAIN -# ANDROID_TOOLCHAIN_NAME : the NDK name of compiler toolchain -# ANDROID_NDK_HOST_X64 : try to use x86_64 toolchain (default for x64 host systems) -# ANDROID_NDK_LAYOUT : the inner NDK structure (RELEASE, LINARO, ANDROID) -# LIBRARY_OUTPUT_PATH_ROOT : -# NDK_CCACHE : -# Obsolete: -# ANDROID_API_LEVEL : superseded by ANDROID_NATIVE_API_LEVEL -# ARM_TARGET : superseded by ANDROID_ABI -# ARM_TARGETS : superseded by ANDROID_ABI (can be set only) -# ANDROID_NDK_TOOLCHAIN_ROOT : superseded by ANDROID_STANDALONE_TOOLCHAIN (can be set only) -# ANDROID_USE_STLPORT : superseded by ANDROID_STL=stlport_static -# ANDROID_LEVEL : superseded by ANDROID_NATIVE_API_LEVEL (completely removed) -# -# Primary read-only variables: -# ANDROID : always TRUE -# ARMEABI : TRUE for arm v6 and older devices -# ARMEABI_V6 : TRUE for arm v6 -# ARMEABI_V7A : TRUE for arm v7a -# NEON : TRUE if NEON unit is enabled -# VFPV3 : TRUE if VFP version 3 is enabled -# X86 : TRUE if configured for x86 -# MIPS : TRUE if configured for mips -# BUILD_ANDROID : always TRUE -# BUILD_WITH_ANDROID_NDK : TRUE if NDK is used -# BUILD_WITH_STANDALONE_TOOLCHAIN : TRUE if standalone toolchain is used -# ANDROID_NDK_HOST_SYSTEM_NAME : "windows", "linux-x86" or "darwin-x86" depending on host platform -# ANDROID_NDK_ABI_NAME : "armeabi", "armeabi-v7a", "x86" or "mips" depending on ANDROID_ABI -# ANDROID_NDK_RELEASE : one of r5, r5b, r5c, r6, r6b, r7, r7b, r7c, r8, r8b, r8c, r8d, r8e; set only for NDK -# ANDROID_ARCH_NAME : "arm" or "x86" or "mips" depending on ANDROID_ABI -# ANDROID_SYSROOT : path to the compiler sysroot -# TOOL_OS_SUFFIX : "" or ".exe" depending on host platform -# ANDROID_COMPILER_IS_CLANG : TRUE if clang compiler is used -# Obsolete: -# ARMEABI_NDK_NAME : superseded by ANDROID_NDK_ABI_NAME -# -# Secondary (less stable) read-only variables: -# ANDROID_COMPILER_VERSION : GCC version used -# ANDROID_CXX_FLAGS : C/C++ compiler flags required by Android platform -# ANDROID_SUPPORTED_ABIS : list of currently allowed values for ANDROID_ABI -# ANDROID_TOOLCHAIN_MACHINE_NAME : "arm-linux-androideabi", "arm-eabi" or "i686-android-linux" -# ANDROID_TOOLCHAIN_ROOT : path to the top level of toolchain (standalone or placed inside NDK) -# ANDROID_CLANG_TOOLCHAIN_ROOT : path to clang tools -# ANDROID_SUPPORTED_NATIVE_API_LEVELS : list of native API levels found inside NDK -# ANDROID_STL_INCLUDE_DIRS : stl include paths -# ANDROID_RTTI : if rtti is enabled by the runtime -# ANDROID_EXCEPTIONS : if exceptions are enabled by the runtime -# ANDROID_GCC_TOOLCHAIN_NAME : read-only, differs from ANDROID_TOOLCHAIN_NAME only if clang is used -# ANDROID_CLANG_VERSION : version of clang compiler if clang is used -# ANDROID_LIBM_PATH : path to libm.so (set to something like $(TOP)/out/target/product//obj/lib/libm.so) to workaround unresolved `sincos` -# -# Defaults: -# ANDROID_DEFAULT_NDK_API_LEVEL -# ANDROID_DEFAULT_NDK_API_LEVEL_${ARCH} -# ANDROID_NDK_SEARCH_PATHS -# ANDROID_STANDALONE_TOOLCHAIN_SEARCH_PATH -# ANDROID_SUPPORTED_ABIS_${ARCH} -# ANDROID_SUPPORTED_NDK_VERSIONS diff --git a/android/readme.txt b/android/readme.txt deleted file mode 100644 index 2d5f3962fe..0000000000 --- a/android/readme.txt +++ /dev/null @@ -1 +0,0 @@ -All Android specific sources are moved to platforms/android. \ No newline at end of file diff --git a/cmake/OpenCVDetectCUDA.cmake b/cmake/OpenCVDetectCUDA.cmake index c44c8129e4..2f159a337e 100644 --- a/cmake/OpenCVDetectCUDA.cmake +++ b/cmake/OpenCVDetectCUDA.cmake @@ -29,10 +29,42 @@ if(CUDA_FOUND) if(${CUDA_VERSION} VERSION_LESS "5.5") find_cuda_helper_libs(npp) else() - find_cuda_helper_libs(nppc) - find_cuda_helper_libs(nppi) - find_cuda_helper_libs(npps) - set(CUDA_npp_LIBRARY ${CUDA_nppc_LIBRARY} ${CUDA_nppi_LIBRARY} ${CUDA_npps_LIBRARY}) + # hack for CUDA 5.5 + if(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "arm") + unset(CUDA_TOOLKIT_INCLUDE CACHE) + unset(CUDA_CUDART_LIBRARY CACHE) + unset(CUDA_cublas_LIBRARY CACHE) + unset(CUDA_cufft_LIBRARY CACHE) + unset(CUDA_npp_LIBRARY CACHE) + + if(SOFTFP) + set(cuda_arm_path "${CUDA_TOOLKIT_ROOT_DIR}/targets/armv7-linux-gnueabi") + else() + set(cuda_arm_path "${CUDA_TOOLKIT_ROOT_DIR}/targets/armv7-linux-gnueabihf") + endif() + + set(CUDA_TOOLKIT_INCLUDE "${cuda_arm_path}/include" CACHE PATH "include path") + set(CUDA_INCLUDE_DIRS ${CUDA_TOOLKIT_INCLUDE}) + + set(cuda_arm_library_path "${cuda_arm_path}/lib") + + set(CUDA_CUDART_LIBRARY "${cuda_arm_library_path}/libcudart.so" CACHE FILEPATH "cudart library") + set(CUDA_LIBRARIES ${CUDA_CUDART_LIBRARY}) + set(CUDA_cublas_LIBRARY "${cuda_arm_library_path}/libcublas.so" CACHE FILEPATH "cublas library") + set(CUDA_cufft_LIBRARY "${cuda_arm_library_path}/libcufft.so" CACHE FILEPATH "cufft library") + set(CUDA_nppc_LIBRARY "${cuda_arm_library_path}/libnppc.so" CACHE FILEPATH "nppc library") + set(CUDA_nppi_LIBRARY "${cuda_arm_library_path}/libnppi.so" CACHE FILEPATH "nppi library") + set(CUDA_npps_LIBRARY "${cuda_arm_library_path}/libnpps.so" CACHE FILEPATH "npps library") + set(CUDA_npp_LIBRARY "${CUDA_nppc_LIBRARY};${CUDA_nppi_LIBRARY};${CUDA_npps_LIBRARY}" CACHE STRING "npp library") + else() + unset(CUDA_npp_LIBRARY CACHE) + + find_cuda_helper_libs(nppc) + find_cuda_helper_libs(nppi) + find_cuda_helper_libs(npps) + + set(CUDA_npp_LIBRARY "${CUDA_nppc_LIBRARY};${CUDA_nppi_LIBRARY};${CUDA_npps_LIBRARY}" CACHE STRING "npp library") + endif() endif() if(WITH_NVCUVID) diff --git a/cmake/OpenCVFindLibsGUI.cmake b/cmake/OpenCVFindLibsGUI.cmake index 59ce1cd05d..d685d23feb 100644 --- a/cmake/OpenCVFindLibsGUI.cmake +++ b/cmake/OpenCVFindLibsGUI.cmake @@ -24,7 +24,6 @@ if(WITH_QT) if(Qt5Core_FOUND AND Qt5Gui_FOUND AND Qt5Widgets_FOUND AND Qt5Test_FOUND AND Qt5Concurrent_FOUND) set(HAVE_QT5 ON) set(HAVE_QT ON) - add_definitions(-DHAVE_QT) find_package(Qt5OpenGL) if(Qt5OpenGL_FOUND) set(QT_QTOPENGL_FOUND ON) @@ -36,7 +35,6 @@ if(WITH_QT) find_package(Qt4 REQUIRED QtCore QtGui QtTest) if(QT4_FOUND) set(HAVE_QT TRUE) - add_definitions(-DHAVE_QT) # We need to define the macro this way, using cvconfig.h does not work endif() endif() endif() @@ -61,7 +59,6 @@ if(WITH_OPENGL) list(APPEND OPENCV_LINKER_LIBS ${OPENGL_LIBRARIES}) if(QT_QTOPENGL_FOUND) set(HAVE_QT_OPENGL TRUE) - add_definitions(-DHAVE_QT_OPENGL) else() ocv_include_directories(${OPENGL_INCLUDE_DIR}) endif() diff --git a/cmake/templates/cvconfig.h.cmake b/cmake/templates/cvconfig.h.cmake index a419b0c3f2..2d6f9a4cb0 100644 --- a/cmake/templates/cvconfig.h.cmake +++ b/cmake/templates/cvconfig.h.cmake @@ -225,3 +225,9 @@ /* Clp support */ #cmakedefine HAVE_CLP + +/* Qt support */ +#cmakedefine HAVE_QT + +/* Qt OpenGL support */ +#cmakedefine HAVE_QT_OPENGL diff --git a/doc/_themes/blue/layout.html b/doc/_themes/blue/layout.html index a376c97592..d0d43031c8 100644 --- a/doc/_themes/blue/layout.html +++ b/doc/_themes/blue/layout.html @@ -183,7 +183,7 @@ {% if theme_lang == 'c' %} {% endif %} {% if theme_lang == 'cpp' %} -
  • Try the Cheatsheet.
  • +
  • Try the Cheatsheet.
  • {% endif %} {% if theme_lang == 'py' %}
  • Try the Cookbook.
  • diff --git a/doc/conf.py b/doc/conf.py index f3f7aec58a..312a1c2b9e 100755 --- a/doc/conf.py +++ b/doc/conf.py @@ -284,120 +284,122 @@ latex_domain_indices = True # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'opencv', u'The OpenCV Reference Manual', - [u'opencv-dev@itseez.com'], 1) + [u'admin@opencv.org'], 1) ] # ---- External links for tutorials ----------------- extlinks = { - 'basicstructures' : ('http://opencv.itseez.com/modules/core/doc/basic_structures.html#%s', None), - 'oldbasicstructures' : ('http://opencv.itseez.com/modules/core/doc/old_basic_structures.html#%s', None), - 'readwriteimagevideo' : ('http://opencv.itseez.com/modules/highgui/doc/reading_and_writing_images_and_video.html#%s', None), - 'operationsonarrays' : ('http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html#%s', None), - 'utilitysystemfunctions':('http://opencv.itseez.com/modules/core/doc/utility_and_system_functions_and_macros.html#%s', None), - 'imgprocfilter':('http://opencv.itseez.com/modules/imgproc/doc/filtering.html#%s', None), - 'svms':('http://opencv.itseez.com/modules/ml/doc/support_vector_machines.html#%s', None), - 'drawingfunc':('http://opencv.itseez.com/modules/core/doc/drawing_functions.html#%s', None), - 'xmlymlpers':('http://opencv.itseez.com/modules/core/doc/xml_yaml_persistence.html#%s', None), - 'hgvideo' : ('http://opencv.itseez.com/modules/highgui/doc/reading_and_writing_images_and_video.html#%s', None), - 'gpuinit' : ('http://opencv.itseez.com/modules/gpu/doc/initalization_and_information.html#%s', None), - 'gpudatastructure' : ('http://opencv.itseez.com/modules/gpu/doc/data_structures.html#%s', None), - 'gpuopmatrices' : ('http://opencv.itseez.com/modules/gpu/doc/operations_on_matrices.html#%s', None), - 'gpuperelement' : ('http://opencv.itseez.com/modules/gpu/doc/per_element_operations.html#%s', None), - 'gpuimgproc' : ('http://opencv.itseez.com/modules/gpu/doc/image_processing.html#%s', None), - 'gpumatrixreduct' : ('http://opencv.itseez.com/modules/gpu/doc/matrix_reductions.html#%s', None), - 'filtering':('http://opencv.itseez.com/modules/imgproc/doc/filtering.html#%s', None), - 'flann' : ('http://opencv.itseez.com/modules/flann/doc/flann_fast_approximate_nearest_neighbor_search.html#%s', None ), - 'calib3d' : ('http://opencv.itseez.com/trunk/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#%s', None ), - 'feature2d' : ('http://opencv.itseez.com/trunk/modules/imgproc/doc/feature_detection.html#%s', None ), - 'imgproc_geometric' : ('http://opencv.itseez.com/trunk/modules/imgproc/doc/geometric_transformations.html#%s', None ), - - 'opencv_group' : ('http://tech.groups.yahoo.com/group/OpenCV/%s', None), - - 'cvt_color': ('http://opencv.itseez.com/modules/imgproc/doc/miscellaneous_transformations.html?highlight=cvtcolor#cvtcolor%s', None), - 'imread': ('http://opencv.itseez.com/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imread#imread%s', None), - 'imwrite': ('http://opencv.itseez.com/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imwrite#imwrite%s', None), - 'imshow': ('http://opencv.itseez.com/modules/highgui/doc/user_interface.html?highlight=imshow#imshow%s', None), - 'named_window': ('http://opencv.itseez.com/modules/highgui/doc/user_interface.html?highlight=namedwindow#namedwindow%s', None), - 'wait_key': ('http://opencv.itseez.com/modules/highgui/doc/user_interface.html?highlight=waitkey#waitkey%s', None), - 'add_weighted': ('http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html?highlight=addweighted#addweighted%s', None), - 'saturate_cast': ('http://opencv.itseez.com/modules/core/doc/utility_and_system_functions_and_macros.html?highlight=saturate_cast#saturate-cast%s', None), - 'mat_zeros': ('http://opencv.itseez.com/modules/core/doc/basic_structures.html?highlight=zeros#mat-zeros%s', None), - 'convert_to': ('http://opencv.itseez.com/modules/core/doc/basic_structures.html#mat-convertto%s', None), - 'create_trackbar': ('http://opencv.itseez.com/modules/highgui/doc/user_interface.html?highlight=createtrackbar#createtrackbar%s', None), - 'point': ('http://opencv.itseez.com/modules/core/doc/basic_structures.html#point%s', None), - 'scalar': ('http://opencv.itseez.com/modules/core/doc/basic_structures.html#scalar%s', None), - 'line': ('http://opencv.itseez.com/modules/core/doc/drawing_functions.html#line%s', None), - 'ellipse': ('http://opencv.itseez.com/modules/core/doc/drawing_functions.html#ellipse%s', None), - 'rectangle': ('http://opencv.itseez.com/modules/core/doc/drawing_functions.html#rectangle%s', None), - 'circle': ('http://opencv.itseez.com/modules/core/doc/drawing_functions.html#circle%s', None), - 'fill_poly': ('http://opencv.itseez.com/modules/core/doc/drawing_functions.html#fillpoly%s', None), - 'rng': ('http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html?highlight=rng#rng%s', None), - 'put_text': ('http://opencv.itseez.com/modules/core/doc/drawing_functions.html#puttext%s', None), - 'gaussian_blur': ('http://opencv.itseez.com/modules/imgproc/doc/filtering.html?highlight=gaussianblur#gaussianblur%s', None), - 'blur': ('http://opencv.itseez.com/modules/imgproc/doc/filtering.html?highlight=blur#blur%s', None), - 'median_blur': ('http://opencv.itseez.com/modules/imgproc/doc/filtering.html?highlight=medianblur#medianblur%s', None), - 'bilateral_filter': ('http://opencv.itseez.com/modules/imgproc/doc/filtering.html?highlight=bilateralfilter#bilateralfilter%s', None), - 'erode': ('http://opencv.itseez.com/modules/imgproc/doc/filtering.html?highlight=erode#erode%s', None), - 'dilate': ('http://opencv.itseez.com/modules/imgproc/doc/filtering.html?highlight=dilate#dilate%s', None), - 'get_structuring_element': ('http://opencv.itseez.com/modules/imgproc/doc/filtering.html?highlight=getstructuringelement#getstructuringelement%s', None), - 'flood_fill': ( 'http://opencv.itseez.com/modules/imgproc/doc/miscellaneous_transformations.html?highlight=floodfill#floodfill%s', None), - 'morphology_ex': ('http://opencv.itseez.com/modules/imgproc/doc/filtering.html?highlight=morphologyex#morphologyex%s', None), - 'pyr_down': ('http://opencv.itseez.com/modules/imgproc/doc/filtering.html?highlight=pyrdown#pyrdown%s', None), - 'pyr_up': ('http://opencv.itseez.com/modules/imgproc/doc/filtering.html?highlight=pyrup#pyrup%s', None), - 'resize': ('http://opencv.itseez.com/modules/imgproc/doc/geometric_transformations.html?highlight=resize#resize%s', None), - 'threshold': ('http://opencv.itseez.com/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#threshold%s', None), - 'filter2d': ('http://opencv.itseez.com/modules/imgproc/doc/filtering.html?highlight=filter2d#filter2d%s', None), - 'copy_make_border': ('http://opencv.itseez.com/modules/imgproc/doc/filtering.html?highlight=copymakeborder#copymakeborder%s', None), - 'sobel': ('http://opencv.itseez.com/modules/imgproc/doc/filtering.html?highlight=sobel#sobel%s', None), - 'scharr': ('http://opencv.itseez.com/modules/imgproc/doc/filtering.html?highlight=scharr#scharr%s', None), - 'laplacian': ('http://opencv.itseez.com/modules/imgproc/doc/filtering.html?highlight=laplacian#laplacian%s', None), - 'canny': ('http://opencv.itseez.com/modules/imgproc/doc/feature_detection.html?highlight=canny#canny%s', None), - 'copy_to': ('http://opencv.itseez.com/modules/core/doc/basic_structures.html?highlight=copyto#mat-copyto%s', None), - 'hough_lines' : ('http://opencv.itseez.com/modules/imgproc/doc/feature_detection.html?highlight=houghlines#houghlines%s', None), - 'hough_lines_p' : ('http://opencv.itseez.com/modules/imgproc/doc/feature_detection.html?highlight=houghlinesp#houghlinesp%s', None), - 'hough_circles' : ('http://opencv.itseez.com/modules/imgproc/doc/feature_detection.html?highlight=houghcircles#houghcircles%s', None), - 'remap' : ('http://opencv.itseez.com/modules/imgproc/doc/geometric_transformations.html?highlight=remap#remap%s', None), - 'warp_affine' : ('http://opencv.itseez.com/modules/imgproc/doc/geometric_transformations.html?highlight=warpaffine#warpaffine%s' , None), - 'get_rotation_matrix_2d' : ('http://opencv.itseez.com/modules/imgproc/doc/geometric_transformations.html?highlight=getrotationmatrix2d#getrotationmatrix2d%s', None), - 'get_affine_transform' : ('http://opencv.itseez.com/modules/imgproc/doc/geometric_transformations.html?highlight=getaffinetransform#getaffinetransform%s', None), - 'equalize_hist' : ('http://opencv.itseez.com/modules/imgproc/doc/histograms.html?highlight=equalizehist#equalizehist%s', None), - 'split' : ('http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html?highlight=split#split%s', None), - 'calc_hist' : ('http://opencv.itseez.com/modules/imgproc/doc/histograms.html?highlight=calchist#calchist%s', None), - 'normalize' : ('http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html?highlight=normalize#normalize%s', None), - 'match_template' : ('http://opencv.itseez.com/modules/imgproc/doc/object_detection.html?highlight=matchtemplate#matchtemplate%s', None), - 'min_max_loc' : ('http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html?highlight=minmaxloc#minmaxloc%s', None), - 'mix_channels' : ( 'http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html?highlight=mixchannels#mixchannels%s', None), - 'calc_back_project' : ('http://opencv.itseez.com/modules/imgproc/doc/histograms.html?highlight=calcbackproject#calcbackproject%s', None), - 'compare_hist' : ('http://opencv.itseez.com/modules/imgproc/doc/histograms.html?highlight=comparehist#comparehist%s', None), - 'corner_harris' : ('http://opencv.itseez.com/modules/imgproc/doc/feature_detection.html?highlight=cornerharris#cornerharris%s', None), - 'good_features_to_track' : ('http://opencv.itseez.com/modules/imgproc/doc/feature_detection.html?highlight=goodfeaturestotrack#goodfeaturestotrack%s', None), - 'corner_min_eigenval' : ('http://opencv.itseez.com/modules/imgproc/doc/feature_detection.html?highlight=cornermineigenval#cornermineigenval%s', None), - 'corner_eigenvals_and_vecs' : ('http://opencv.itseez.com/modules/imgproc/doc/feature_detection.html?highlight=cornereigenvalsandvecs#cornereigenvalsandvecs%s', None), - 'corner_sub_pix' : ('http://opencv.itseez.com/modules/imgproc/doc/feature_detection.html?highlight=cornersubpix#cornersubpix%s', None), - 'find_contours' : ('http://opencv.itseez.com/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=findcontours#findcontours%s', None), - 'convex_hull' : ('http://opencv.itseez.com/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=convexhull#convexhull%s', None), - 'draw_contours' : ('http://opencv.itseez.com/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=drawcontours#drawcontours%s', None), - 'bounding_rect' : ('http://opencv.itseez.com/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=boundingrect#boundingrect%s', None), - 'min_enclosing_circle' : ('http://opencv.itseez.com/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=minenclosingcircle#minenclosingcircle%s', None), - 'min_area_rect' : ('http://opencv.itseez.com/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=minarearect#minarearect%s', None), - 'fit_ellipse' : ('http://opencv.itseez.com/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=fitellipse#fitellipse%s', None), - 'moments' : ('http://opencv.itseez.com/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=moments#moments%s', None), - 'contour_area' : ('http://opencv.itseez.com/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=contourarea#contourarea%s', None), - 'arc_length' : ('http://opencv.itseez.com/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=arclength#arclength%s', None), - 'point_polygon_test' : ('http://opencv.itseez.com/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=pointpolygontest#pointpolygontest%s', None), - 'feature_detector' : ( 'http://opencv.itseez.com/modules/features2d/doc/common_interfaces_of_feature_detectors.html?highlight=featuredetector#FeatureDetector%s', None), - 'feature_detector_detect' : ('http://opencv.itseez.com/modules/features2d/doc/common_interfaces_of_feature_detectors.html?highlight=detect#featuredetector-detect%s', None ), - 'surf_feature_detector' : ('http://opencv.itseez.com/modules/features2d/doc/common_interfaces_of_feature_detectors.html?highlight=surffeaturedetector#surffeaturedetector%s', None ), - 'draw_keypoints' : ('http://opencv.itseez.com/modules/features2d/doc/drawing_function_of_keypoints_and_matches.html?highlight=drawkeypoints#drawkeypoints%s', None ), - 'descriptor_extractor': ( 'http://opencv.itseez.com/modules/features2d/doc/common_interfaces_of_descriptor_extractors.html?highlight=descriptorextractor#descriptorextractor%s', None ), - 'descriptor_extractor_compute' : ( 'http://opencv.itseez.com/modules/features2d/doc/common_interfaces_of_descriptor_extractors.html?highlight=compute#descriptorextractor-compute%s', None ), - 'surf_descriptor_extractor' : ( 'http://opencv.itseez.com/modules/features2d/doc/common_interfaces_of_descriptor_extractors.html?highlight=surfdescriptorextractor#surfdescriptorextractor%s', None ), - 'draw_matches' : ( 'http://opencv.itseez.com/modules/features2d/doc/drawing_function_of_keypoints_and_matches.html?highlight=drawmatches#drawmatches%s', None ), - 'find_homography' : ('http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html?highlight=findhomography#findhomography%s', None), - 'perspective_transform' : ('http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html?highlight=perspectivetransform#perspectivetransform%s', None ), - 'flann_based_matcher' : ('http://opencv.itseez.com/modules/features2d/doc/common_interfaces_of_descriptor_matchers.html?highlight=flannbasedmatcher#flannbasedmatcher%s', None), - 'brute_force_matcher' : ('http://opencv.itseez.com/modules/features2d/doc/common_interfaces_of_descriptor_matchers.html?highlight=bruteforcematcher#bruteforcematcher%s', None ), - 'cascade_classifier' : ('http://opencv.itseez.com/modules/objdetect/doc/cascade_classification.html?highlight=cascadeclassifier#cascadeclassifier%s', None ), - 'cascade_classifier_load' : ('http://opencv.itseez.com/modules/objdetect/doc/cascade_classification.html?highlight=load#cascadeclassifier-load%s', None ), - 'cascade_classifier_detect_multiscale' : ('http://opencv.itseez.com/modules/objdetect/doc/cascade_classification.html?highlight=detectmultiscale#cascadeclassifier-detectmultiscale%s', None ) + 'basicstructures' : ('http://docs.opencv.org/modules/core/doc/basic_structures.html#%s', None), + 'oldbasicstructures' : ('http://docs.opencv.org/modules/core/doc/old_basic_structures.html#%s', None), + 'readwriteimagevideo' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#%s', None), + 'operationsonarrays' : ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html#%s', None), + 'utilitysystemfunctions':('http://docs.opencv.org/modules/core/doc/utility_and_system_functions_and_macros.html#%s', None), + 'imgprocfilter':('http://docs.opencv.org/modules/imgproc/doc/filtering.html#%s', None), + 'svms':('http://docs.opencv.org/modules/ml/doc/support_vector_machines.html#%s', None), + 'drawingfunc':('http://docs.opencv.org/modules/core/doc/drawing_functions.html#%s', None), + 'xmlymlpers':('http://docs.opencv.org/modules/core/doc/xml_yaml_persistence.html#%s', None), + 'hgvideo' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#%s', None), + 'gpuinit' : ('http://docs.opencv.org/modules/gpu/doc/initalization_and_information.html#%s', None), + 'gpudatastructure' : ('http://docs.opencv.org/modules/gpu/doc/data_structures.html#%s', None), + 'gpuopmatrices' : ('http://docs.opencv.org/modules/gpu/doc/operations_on_matrices.html#%s', None), + 'gpuperelement' : ('http://docs.opencv.org/modules/gpu/doc/per_element_operations.html#%s', None), + 'gpuimgproc' : ('http://docs.opencv.org/modules/gpu/doc/image_processing.html#%s', None), + 'gpumatrixreduct' : ('http://docs.opencv.org/modules/gpu/doc/matrix_reductions.html#%s', None), + 'filtering':('http://docs.opencv.org/modules/imgproc/doc/filtering.html#%s', None), + 'flann' : ('http://docs.opencv.org/modules/flann/doc/flann_fast_approximate_nearest_neighbor_search.html#%s', None ), + 'calib3d' : ('http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#%s', None ), + 'feature2d' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html#%s', None ), + 'imgproc_geometric' : ('http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html#%s', None ), + + # 'opencv_group' : ('http://answers.opencv.org/%s', None), + 'opencv_qa' : ('http://answers.opencv.org/%s', None), + 'how_to_contribute' : ('http://code.opencv.org/projects/opencv/wiki/How_to_contribute/%s', None), + + 'cvt_color': ('http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=cvtcolor#cvtcolor%s', None), + 'imread': ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imread#imread%s', None), + 'imwrite': ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imwrite#imwrite%s', None), + 'imshow': ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=imshow#imshow%s', None), + 'named_window': ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=namedwindow#namedwindow%s', None), + 'wait_key': ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=waitkey#waitkey%s', None), + 'add_weighted': ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=addweighted#addweighted%s', None), + 'saturate_cast': ('http://docs.opencv.org/modules/core/doc/utility_and_system_functions_and_macros.html?highlight=saturate_cast#saturate-cast%s', None), + 'mat_zeros': ('http://docs.opencv.org/modules/core/doc/basic_structures.html?highlight=zeros#mat-zeros%s', None), + 'convert_to': ('http://docs.opencv.org/modules/core/doc/basic_structures.html#mat-convertto%s', None), + 'create_trackbar': ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=createtrackbar#createtrackbar%s', None), + 'point': ('http://docs.opencv.org/modules/core/doc/basic_structures.html#point%s', None), + 'scalar': ('http://docs.opencv.org/modules/core/doc/basic_structures.html#scalar%s', None), + 'line': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#line%s', None), + 'ellipse': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#ellipse%s', None), + 'rectangle': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#rectangle%s', None), + 'circle': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#circle%s', None), + 'fill_poly': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#fillpoly%s', None), + 'rng': ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=rng#rng%s', None), + 'put_text': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#puttext%s', None), + 'gaussian_blur': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=gaussianblur#gaussianblur%s', None), + 'blur': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=blur#blur%s', None), + 'median_blur': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=medianblur#medianblur%s', None), + 'bilateral_filter': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=bilateralfilter#bilateralfilter%s', None), + 'erode': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=erode#erode%s', None), + 'dilate': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=dilate#dilate%s', None), + 'get_structuring_element': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=getstructuringelement#getstructuringelement%s', None), + 'flood_fill': ( 'http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=floodfill#floodfill%s', None), + 'morphology_ex': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=morphologyex#morphologyex%s', None), + 'pyr_down': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=pyrdown#pyrdown%s', None), + 'pyr_up': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=pyrup#pyrup%s', None), + 'resize': ('http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html?highlight=resize#resize%s', None), + 'threshold': ('http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#threshold%s', None), + 'filter2d': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=filter2d#filter2d%s', None), + 'copy_make_border': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=copymakeborder#copymakeborder%s', None), + 'sobel': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=sobel#sobel%s', None), + 'scharr': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=scharr#scharr%s', None), + 'laplacian': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=laplacian#laplacian%s', None), + 'canny': ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=canny#canny%s', None), + 'copy_to': ('http://docs.opencv.org/modules/core/doc/basic_structures.html?highlight=copyto#mat-copyto%s', None), + 'hough_lines' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=houghlines#houghlines%s', None), + 'hough_lines_p' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=houghlinesp#houghlinesp%s', None), + 'hough_circles' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=houghcircles#houghcircles%s', None), + 'remap' : ('http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html?highlight=remap#remap%s', None), + 'warp_affine' : ('http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html?highlight=warpaffine#warpaffine%s' , None), + 'get_rotation_matrix_2d' : ('http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html?highlight=getrotationmatrix2d#getrotationmatrix2d%s', None), + 'get_affine_transform' : ('http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html?highlight=getaffinetransform#getaffinetransform%s', None), + 'equalize_hist' : ('http://docs.opencv.org/modules/imgproc/doc/histograms.html?highlight=equalizehist#equalizehist%s', None), + 'split' : ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=split#split%s', None), + 'calc_hist' : ('http://docs.opencv.org/modules/imgproc/doc/histograms.html?highlight=calchist#calchist%s', None), + 'normalize' : ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=normalize#normalize%s', None), + 'match_template' : ('http://docs.opencv.org/modules/imgproc/doc/object_detection.html?highlight=matchtemplate#matchtemplate%s', None), + 'min_max_loc' : ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=minmaxloc#minmaxloc%s', None), + 'mix_channels' : ( 'http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=mixchannels#mixchannels%s', None), + 'calc_back_project' : ('http://docs.opencv.org/modules/imgproc/doc/histograms.html?highlight=calcbackproject#calcbackproject%s', None), + 'compare_hist' : ('http://docs.opencv.org/modules/imgproc/doc/histograms.html?highlight=comparehist#comparehist%s', None), + 'corner_harris' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=cornerharris#cornerharris%s', None), + 'good_features_to_track' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=goodfeaturestotrack#goodfeaturestotrack%s', None), + 'corner_min_eigenval' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=cornermineigenval#cornermineigenval%s', None), + 'corner_eigenvals_and_vecs' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=cornereigenvalsandvecs#cornereigenvalsandvecs%s', None), + 'corner_sub_pix' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=cornersubpix#cornersubpix%s', None), + 'find_contours' : ('http://docs.opencv.org/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=findcontours#findcontours%s', None), + 'convex_hull' : ('http://docs.opencv.org/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=convexhull#convexhull%s', None), + 'draw_contours' : ('http://docs.opencv.org/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=drawcontours#drawcontours%s', None), + 'bounding_rect' : ('http://docs.opencv.org/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=boundingrect#boundingrect%s', None), + 'min_enclosing_circle' : ('http://docs.opencv.org/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=minenclosingcircle#minenclosingcircle%s', None), + 'min_area_rect' : ('http://docs.opencv.org/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=minarearect#minarearect%s', None), + 'fit_ellipse' : ('http://docs.opencv.org/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=fitellipse#fitellipse%s', None), + 'moments' : ('http://docs.opencv.org/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=moments#moments%s', None), + 'contour_area' : ('http://docs.opencv.org/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=contourarea#contourarea%s', None), + 'arc_length' : ('http://docs.opencv.org/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=arclength#arclength%s', None), + 'point_polygon_test' : ('http://docs.opencv.org/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=pointpolygontest#pointpolygontest%s', None), + 'feature_detector' : ( 'http://docs.opencv.org/modules/features2d/doc/common_interfaces_of_feature_detectors.html?highlight=featuredetector#FeatureDetector%s', None), + 'feature_detector_detect' : ('http://docs.opencv.org/modules/features2d/doc/common_interfaces_of_feature_detectors.html?highlight=detect#featuredetector-detect%s', None ), + 'surf_feature_detector' : ('http://docs.opencv.org/modules/features2d/doc/common_interfaces_of_feature_detectors.html?highlight=surffeaturedetector#surffeaturedetector%s', None ), + 'draw_keypoints' : ('http://docs.opencv.org/modules/features2d/doc/drawing_function_of_keypoints_and_matches.html?highlight=drawkeypoints#drawkeypoints%s', None ), + 'descriptor_extractor': ( 'http://docs.opencv.org/modules/features2d/doc/common_interfaces_of_descriptor_extractors.html?highlight=descriptorextractor#descriptorextractor%s', None ), + 'descriptor_extractor_compute' : ( 'http://docs.opencv.org/modules/features2d/doc/common_interfaces_of_descriptor_extractors.html?highlight=compute#descriptorextractor-compute%s', None ), + 'surf_descriptor_extractor' : ( 'http://docs.opencv.org/modules/features2d/doc/common_interfaces_of_descriptor_extractors.html?highlight=surfdescriptorextractor#surfdescriptorextractor%s', None ), + 'draw_matches' : ( 'http://docs.opencv.org/modules/features2d/doc/drawing_function_of_keypoints_and_matches.html?highlight=drawmatches#drawmatches%s', None ), + 'find_homography' : ('http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html?highlight=findhomography#findhomography%s', None), + 'perspective_transform' : ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=perspectivetransform#perspectivetransform%s', None ), + 'flann_based_matcher' : ('http://docs.opencv.org/modules/features2d/doc/common_interfaces_of_descriptor_matchers.html?highlight=flannbasedmatcher#flannbasedmatcher%s', None), + 'brute_force_matcher' : ('http://docs.opencv.org/modules/features2d/doc/common_interfaces_of_descriptor_matchers.html?highlight=bruteforcematcher#bruteforcematcher%s', None ), + 'cascade_classifier' : ('http://docs.opencv.org/modules/objdetect/doc/cascade_classification.html?highlight=cascadeclassifier#cascadeclassifier%s', None ), + 'cascade_classifier_load' : ('http://docs.opencv.org/modules/objdetect/doc/cascade_classification.html?highlight=load#cascadeclassifier-load%s', None ), + 'cascade_classifier_detect_multiscale' : ('http://docs.opencv.org/modules/objdetect/doc/cascade_classification.html?highlight=detectmultiscale#cascadeclassifier-detectmultiscale%s', None ) } diff --git a/doc/tutorials/definitions/noContent.rst b/doc/tutorials/definitions/noContent.rst index ded9e78fa1..c2780c266d 100644 --- a/doc/tutorials/definitions/noContent.rst +++ b/doc/tutorials/definitions/noContent.rst @@ -1,3 +1,3 @@ .. note:: - Unfortunetly we have no tutorials into this section. Nevertheless, our tutorial writting team is working on it. If you have a tutorial suggestion or you have writen yourself a tutorial (or coded a sample code) that you would like to see here please contact us via our :opencv_group:`user group <>`. \ No newline at end of file + Unfortunetly we have no tutorials into this section. And you can help us with that, since OpenCV is a community effort. If you have a tutorial suggestion or you have written a tutorial yourself (or coded a sample code) that you would like to see here, please contact follow these instructions: :ref:`howToWriteTutorial` and :how_to_contribute:`How to contribute <>`. diff --git a/doc/tutorials/gpu/gpu-basics-similarity/gpu-basics-similarity.rst b/doc/tutorials/gpu/gpu-basics-similarity/gpu-basics-similarity.rst index 86400bfae4..2e8fe9559b 100644 --- a/doc/tutorials/gpu/gpu-basics-similarity/gpu-basics-similarity.rst +++ b/doc/tutorials/gpu/gpu-basics-similarity/gpu-basics-similarity.rst @@ -1 +1,234 @@ -.. _gpuBasicsSimilarity: Similarity check (PNSR and SSIM) on the GPU ******************************************* Goal ==== In the :ref:`videoInputPSNRMSSIM` tutorial I already presented the PSNR and SSIM methods for checking the similarity between the two images. And as you could see there performing these takes quite some time, especially in the case of the SSIM. However, if the performance numbers of an OpenCV implementation for the CPU do not satisfy you and you happen to have an NVidia CUDA GPU device in your system all is not lost. You may try to port or write your algorithm for the video card. This tutorial will give a good grasp on how to approach coding by using the GPU module of OpenCV. As a prerequisite you should already know how to handle the core, highgui and imgproc modules. So, our goals are: .. container:: enumeratevisibleitemswithsquare + What's different compared to the CPU? + Create the GPU code for the PSNR and SSIM + Optimize the code for maximal performance The source code =============== You may also find the source code and these video file in the :file:`samples/cpp/tutorial_code/gpu/gpu-basics-similarity/gpu-basics-similarity` folder of the OpenCV source library or :download:`download it from here <../../../../samples/cpp/tutorial_code/gpu/gpu-basics-similarity/gpu-basics-similarity.cpp>`. The full source code is quite long (due to the controlling of the application via the command line arguments and performance measurement). Therefore, to avoid cluttering up these sections with those you'll find here only the functions itself. The PSNR returns a float number, that if the two inputs are similar between 30 and 50 (higher is better). .. literalinclude:: ../../../../samples/cpp/tutorial_code/gpu/gpu-basics-similarity/gpu-basics-similarity.cpp :language: cpp :linenos: :tab-width: 4 :lines: 165-210, 18-23, 210-235 The SSIM returns the MSSIM of the images. This is too a float number between zero and one (higher is better), however we have one for each channel. Therefore, we return a *Scalar* OpenCV data structure: .. literalinclude:: ../../../../samples/cpp/tutorial_code/gpu/gpu-basics-similarity/gpu-basics-similarity.cpp :language: cpp :linenos: :tab-width: 4 :lines: 235-355, 26-42, 357- How to do it? - The GPU ======================= Now as you can see we have three types of functions for each operation. One for the CPU and two for the GPU. The reason I made two for the GPU is too illustrate that often simple porting your CPU to GPU will actually make it slower. If you want some performance gain you will need to remember a few rules, whose I'm going to detail later on. The development of the GPU module was made so that it resembles as much as possible its CPU counterpart. This is to make porting easy. The first thing you need to do before writing any code is to link the GPU module to your project, and include the header file for the module. All the functions and data structures of the GPU are in a *gpu* sub namespace of the *cv* namespace. You may add this to the default one via the *use namespace* keyword, or mark it everywhere explicitly via the cv:: to avoid confusion. I'll do the later. .. code-block:: cpp #include // GPU structures and methods GPU stands for **g**\ raphics **p**\ rocessing **u**\ nit. It was originally build to render graphical scenes. These scenes somehow build on a lot of data. Nevertheless, these aren't all dependent one from another in a sequential way and as it is possible a parallel processing of them. Due to this a GPU will contain multiple smaller processing units. These aren't the state of the art processors and on a one on one test with a CPU it will fall behind. However, its strength lies in its numbers. In the last years there has been an increasing trend to harvest these massive parallel powers of the GPU in non-graphical scene rendering too. This gave birth to the general-purpose computation on graphics processing units (GPGPU). The GPU has its own memory. When you read data from the hard drive with OpenCV into a *Mat* object that takes place in your systems memory. The CPU works somehow directly on this (via its cache), however the GPU cannot. He has too transferred the information he will use for calculations from the system memory to its own. This is done via an upload process and takes time. In the end the result will have to be downloaded back to your system memory for your CPU to see it and use it. Porting small functions to GPU is not recommended as the upload/download time will be larger than the amount you gain by a parallel execution. Mat objects are stored only in the system memory (or the CPU cache). For getting an OpenCV matrix to the GPU you'll need to use its GPU counterpart :gpudatastructure:`GpuMat `. It works similar to the Mat with a 2D only limitation and no reference returning for its functions (cannot mix GPU references with CPU ones). To upload a Mat object to the GPU you need to call the upload function after creating an instance of the class. To download you may use simple assignment to a Mat object or use the download function. .. code-block:: cpp Mat I1; // Main memory item - read image into with imread for example gpu::GpuMat gI; // GPU matrix - for now empty gI1.upload(I1); // Upload a data from the system memory to the GPU memory I1 = gI1; // Download, gI1.download(I1) will work too Once you have your data up in the GPU memory you may call GPU enabled functions of OpenCV. Most of the functions keep the same name just as on the CPU, with the difference that they only accept *GpuMat* inputs. A full list of these you will find in the documentation: `online here `_ or the OpenCV reference manual that comes with the source code. Another thing to keep in mind is that not for all channel numbers you can make efficient algorithms on the GPU. Generally, I found that the input images for the GPU images need to be either one or four channel ones and one of the char or float type for the item sizes. No double support on the GPU, sorry. Passing other types of objects for some functions will result in an exception thrown, and an error message on the error output. The documentation details in most of the places the types accepted for the inputs. If you have three channel images as an input you can do two things: either adds a new channel (and use char elements) or split up the image and call the function for each image. The first one isn't really recommended as you waste memory. For some functions, where the position of the elements (neighbor items) doesn't matter quick solution is to just reshape it into a single channel image. This is the case for the PSNR implementation where for the *absdiff* method the value of the neighbors is not important. However, for the *GaussianBlur* this isn't an option and such need to use the split method for the SSIM. With this knowledge you can already make a GPU viable code (like mine GPU one) and run it. You'll be surprised to see that it might turn out slower than your CPU implementation. Optimization ============ The reason for this is that you're throwing out on the window the price for memory allocation and data transfer. And on the GPU this is damn high. Another possibility for optimization is to introduce asynchronous OpenCV GPU calls too with the help of the :gpudatastructure:`gpu::Stream`. 1. Memory allocation on the GPU is considerable. Therefore, if it’s possible allocate new memory as few times as possible. If you create a function what you intend to call multiple times it is a good idea to allocate any local parameters for the function only once, during the first call. To do this you create a data structure containing all the local variables you will use. For instance in case of the PSNR these are: .. code-block:: cpp struct BufferPSNR // Optimized GPU versions { // Data allocations are very expensive on GPU. Use a buffer to solve: allocate once reuse later. gpu::GpuMat gI1, gI2, gs, t1,t2; gpu::GpuMat buf; }; Then create an instance of this in the main program: .. code-block:: cpp BufferPSNR bufferPSNR; And finally pass this to the function each time you call it: .. code-block:: cpp double getPSNR_GPU_optimized(const Mat& I1, const Mat& I2, BufferPSNR& b) Now you access these local parameters as: *b.gI1*, *b.buf* and so on. The GpuMat will only reallocate itself on a new call if the new matrix size is different from the previous one. #. Avoid unnecessary function data transfers. Any small data transfer will be significant one once you go to the GPU. Therefore, if possible make all calculations in-place (in other words do not create new memory objects - for reasons explained at the previous point). For example, although expressing arithmetical operations may be easier to express in one line formulas, it will be slower. In case of the SSIM at one point I need to calculate: .. code-block:: cpp b.t1 = 2 * b.mu1_mu2 + C1; Although the upper call will succeed observe that there is a hidden data transfer present. Before it makes the addition it needs to store somewhere the multiplication. Therefore, it will create a local matrix in the background, add to that the *C1* value and finally assign that to *t1*. To avoid this we use the gpu functions, instead of the arithmetic operators: .. code-block:: cpp gpu::multiply(b.mu1_mu2, 2, b.t1); //b.t1 = 2 * b.mu1_mu2 + C1; gpu::add(b.t1, C1, b.t1); #. Use asynchronous calls (the :gpudatastructure:`gpu::Stream `). By default whenever you call a gpu function it will wait for the call to finish and return with the result afterwards. However, it is possible to make asynchronous calls, meaning it will call for the operation execution, make the costly data allocations for the algorithm and return back right away. Now you can call another function if you wish to do so. For the MSSIM this is a small optimization point. In our default implementation we split up the image into channels and call then for each channel the gpu functions. A small degree of parallelization is possible with the stream. By using a stream we can make the data allocation, upload operations while the GPU is already executing a given method. For example we need to upload two images. We queue these one after another and call already the function that processes it. The functions will wait for the upload to finish, however while that happens makes the output buffer allocations for the function to be executed next. .. code-block:: cpp gpu::Stream stream; stream.enqueueConvert(b.gI1, b.t1, CV_32F); // Upload gpu::split(b.t1, b.vI1, stream); // Methods (pass the stream as final parameter). gpu::multiply(b.vI1[i], b.vI1[i], b.I1_2, stream); // I1^2 Result and conclusion ===================== On an Intel P8700 laptop CPU paired with a low end NVidia GT220M here are the performance numbers: .. code-block:: cpp Time of PSNR CPU (averaged for 10 runs): 41.4122 milliseconds. With result of: 19.2506 Time of PSNR GPU (averaged for 10 runs): 158.977 milliseconds. With result of: 19.2506 Initial call GPU optimized: 31.3418 milliseconds. With result of: 19.2506 Time of PSNR GPU OPTIMIZED ( / 10 runs): 24.8171 milliseconds. With result of: 19.2506 Time of MSSIM CPU (averaged for 10 runs): 484.343 milliseconds. With result of B0.890964 G0.903845 R0.936934 Time of MSSIM GPU (averaged for 10 runs): 745.105 milliseconds. With result of B0.89922 G0.909051 R0.968223 Time of MSSIM GPU Initial Call 357.746 milliseconds. With result of B0.890964 G0.903845 R0.936934 Time of MSSIM GPU OPTIMIZED ( / 10 runs): 203.091 milliseconds. With result of B0.890964 G0.903845 R0.936934 In both cases we managed a performance increase of almost 100% compared to the CPU implementation. It may be just the improvement needed for your application to work. You may observe a runtime instance of this on the `YouTube here `_. .. raw:: html
    \ No newline at end of file +.. _gpuBasicsSimilarity: + +Similarity check (PNSR and SSIM) on the GPU +******************************************* + +Goal +==== + +In the :ref:`videoInputPSNRMSSIM` tutorial I already presented the PSNR and SSIM methods for +checking the similarity between the two images. And as you could see there performing these takes +quite some time, especially in the case of the SSIM. However, if the performance numbers of an +OpenCV implementation for the CPU do not satisfy you and you happen to have an NVidia CUDA GPU +device in your system all is not lost. You may try to port or write your algorithm for the video +card. + +This tutorial will give a good grasp on how to approach coding by using the GPU module of OpenCV. As +a prerequisite you should already know how to handle the core, highgui and imgproc modules. So, our +goals are: + +.. container:: enumeratevisibleitemswithsquare + + + What's different compared to the CPU? + + Create the GPU code for the PSNR and SSIM + + Optimize the code for maximal performance + +The source code +=============== + +You may also find the source code and these video file in the +:file:`samples/cpp/tutorial_code/gpu/gpu-basics-similarity/gpu-basics-similarity` folder of the +OpenCV source library or :download:`download it from here +<../../../../samples/cpp/tutorial_code/gpu/gpu-basics-similarity/gpu-basics-similarity.cpp>`. The +full source code is quite long (due to the controlling of the application via the command line +arguments and performance measurement). Therefore, to avoid cluttering up these sections with those +you'll find here only the functions itself. + +The PSNR returns a float number, that if the two inputs are similar between 30 and 50 (higher is +better). + +.. literalinclude:: ../../../../samples/cpp/tutorial_code/gpu/gpu-basics-similarity/gpu-basics-similarity.cpp + :language: cpp + :linenos: + :tab-width: 4 + :lines: 165-210, 18-23, 210-235 + +The SSIM returns the MSSIM of the images. This is too a float number between zero and one (higher is +better), however we have one for each channel. Therefore, we return a *Scalar* OpenCV data +structure: + +.. literalinclude:: ../../../../samples/cpp/tutorial_code/gpu/gpu-basics-similarity/gpu-basics-similarity.cpp + :language: cpp + :linenos: + :tab-width: 4 + :lines: 235-355, 26-42, 357- + +How to do it? - The GPU +======================= + +Now as you can see we have three types of functions for each operation. One for the CPU and two for +the GPU. The reason I made two for the GPU is too illustrate that often simple porting your CPU to +GPU will actually make it slower. If you want some performance gain you will need to remember a few +rules, whose I'm going to detail later on. + +The development of the GPU module was made so that it resembles as much as possible its CPU +counterpart. This is to make porting easy. The first thing you need to do before writing any code is +to link the GPU module to your project, and include the header file for the module. All the +functions and data structures of the GPU are in a *gpu* sub namespace of the *cv* namespace. You may +add this to the default one via the *use namespace* keyword, or mark it everywhere explicitly via +the cv:: to avoid confusion. I'll do the later. + +.. code-block:: cpp + + #include // GPU structures and methods + +GPU stands for **g**\ raphics **p**\ rocessing **u**\ nit. It was originally build to render +graphical scenes. These scenes somehow build on a lot of data. Nevertheless, these aren't all +dependent one from another in a sequential way and as it is possible a parallel processing of them. +Due to this a GPU will contain multiple smaller processing units. These aren't the state of the art +processors and on a one on one test with a CPU it will fall behind. However, its strength lies in +its numbers. In the last years there has been an increasing trend to harvest these massive parallel +powers of the GPU in non-graphical scene rendering too. This gave birth to the general-purpose +computation on graphics processing units (GPGPU). + +The GPU has its own memory. When you read data from the hard drive with OpenCV into a *Mat* object +that takes place in your systems memory. The CPU works somehow directly on this (via its cache), +however the GPU cannot. He has too transferred the information he will use for calculations from the +system memory to its own. This is done via an upload process and takes time. In the end the result +will have to be downloaded back to your system memory for your CPU to see it and use it. Porting +small functions to GPU is not recommended as the upload/download time will be larger than the amount +you gain by a parallel execution. + +Mat objects are stored only in the system memory (or the CPU cache). For getting an OpenCV matrix +to the GPU you'll need to use its GPU counterpart :gpudatastructure:`GpuMat `. It works +similar to the Mat with a 2D only limitation and no reference returning for its functions (cannot +mix GPU references with CPU ones). To upload a Mat object to the GPU you need to call the upload +function after creating an instance of the class. To download you may use simple assignment to a +Mat object or use the download function. + +.. code-block:: cpp + + Mat I1; // Main memory item - read image into with imread for example + gpu::GpuMat gI; // GPU matrix - for now empty + gI1.upload(I1); // Upload a data from the system memory to the GPU memory + + I1 = gI1; // Download, gI1.download(I1) will work too + +Once you have your data up in the GPU memory you may call GPU enabled functions of OpenCV. Most of +the functions keep the same name just as on the CPU, with the difference that they only accept +*GpuMat* inputs. A full list of these you will find in the documentation: `online here +`_ or the OpenCV reference manual that comes with +the source code. + +Another thing to keep in mind is that not for all channel numbers you can make efficient algorithms +on the GPU. Generally, I found that the input images for the GPU images need to be either one or +four channel ones and one of the char or float type for the item sizes. No double support on the +GPU, sorry. Passing other types of objects for some functions will result in an exception thrown, +and an error message on the error output. The documentation details in most of the places the types +accepted for the inputs. If you have three channel images as an input you can do two things: either +adds a new channel (and use char elements) or split up the image and call the function for each +image. The first one isn't really recommended as you waste memory. + +For some functions, where the position of the elements (neighbor items) doesn't matter quick +solution is to just reshape it into a single channel image. This is the case for the PSNR +implementation where for the *absdiff* method the value of the neighbors is not important. However, +for the *GaussianBlur* this isn't an option and such need to use the split method for the SSIM. With +this knowledge you can already make a GPU viable code (like mine GPU one) and run it. You'll be +surprised to see that it might turn out slower than your CPU implementation. + +Optimization +============ + +The reason for this is that you're throwing out on the window the price for memory allocation and +data transfer. And on the GPU this is damn high. Another possibility for optimization is to +introduce asynchronous OpenCV GPU calls too with the help of the +:gpudatastructure:`gpu::Stream`. + +1. Memory allocation on the GPU is considerable. Therefore, if it’s possible allocate new memory as + few times as possible. If you create a function what you intend to call multiple times it is a + good idea to allocate any local parameters for the function only once, during the first call. + To do this you create a data structure containing all the local variables you will use. For + instance in case of the PSNR these are: + + .. code-block:: cpp + + struct BufferPSNR // Optimized GPU versions + { // Data allocations are very expensive on GPU. Use a buffer to solve: allocate once reuse later. + gpu::GpuMat gI1, gI2, gs, t1,t2; + + gpu::GpuMat buf; + }; + + Then create an instance of this in the main program: + + .. code-block:: cpp + + BufferPSNR bufferPSNR; + + And finally pass this to the function each time you call it: + + .. code-block:: cpp + + double getPSNR_GPU_optimized(const Mat& I1, const Mat& I2, BufferPSNR& b) + + Now you access these local parameters as: *b.gI1*, *b.buf* and so on. The GpuMat will only + reallocate itself on a new call if the new matrix size is different from the previous one. + +#. Avoid unnecessary function data transfers. Any small data transfer will be significant one once + you go to the GPU. Therefore, if possible make all calculations in-place (in other words do not + create new memory objects - for reasons explained at the previous point). For example, although + expressing arithmetical operations may be easier to express in one line formulas, it will be + slower. In case of the SSIM at one point I need to calculate: + + .. code-block:: cpp + + b.t1 = 2 * b.mu1_mu2 + C1; + + Although the upper call will succeed observe that there is a hidden data transfer present. Before + it makes the addition it needs to store somewhere the multiplication. Therefore, it will create a + local matrix in the background, add to that the *C1* value and finally assign that to *t1*. To + avoid this we use the gpu functions, instead of the arithmetic operators: + + .. code-block:: cpp + + gpu::multiply(b.mu1_mu2, 2, b.t1); //b.t1 = 2 * b.mu1_mu2 + C1; + gpu::add(b.t1, C1, b.t1); + +#. Use asynchronous calls (the :gpudatastructure:`gpu::Stream `). By default whenever + you call a gpu function it will wait for the call to finish and return with the result + afterwards. However, it is possible to make asynchronous calls, meaning it will call for the + operation execution, make the costly data allocations for the algorithm and return back right + away. Now you can call another function if you wish to do so. For the MSSIM this is a small + optimization point. In our default implementation we split up the image into channels and call + then for each channel the gpu functions. A small degree of parallelization is possible with the + stream. By using a stream we can make the data allocation, upload operations while the GPU is + already executing a given method. For example we need to upload two images. We queue these one + after another and call already the function that processes it. The functions will wait for the + upload to finish, however while that happens makes the output buffer allocations for the function + to be executed next. + + .. code-block:: cpp + + gpu::Stream stream; + + stream.enqueueConvert(b.gI1, b.t1, CV_32F); // Upload + + gpu::split(b.t1, b.vI1, stream); // Methods (pass the stream as final parameter). + gpu::multiply(b.vI1[i], b.vI1[i], b.I1_2, stream); // I1^2 + +Result and conclusion +===================== + +On an Intel P8700 laptop CPU paired with a low end NVidia GT220M here are the performance numbers: + +.. code-block:: cpp + + Time of PSNR CPU (averaged for 10 runs): 41.4122 milliseconds. With result of: 19.2506 + Time of PSNR GPU (averaged for 10 runs): 158.977 milliseconds. With result of: 19.2506 + Initial call GPU optimized: 31.3418 milliseconds. With result of: 19.2506 + Time of PSNR GPU OPTIMIZED ( / 10 runs): 24.8171 milliseconds. With result of: 19.2506 + + Time of MSSIM CPU (averaged for 10 runs): 484.343 milliseconds. With result of B0.890964 G0.903845 R0.936934 + Time of MSSIM GPU (averaged for 10 runs): 745.105 milliseconds. With result of B0.89922 G0.909051 R0.968223 + Time of MSSIM GPU Initial Call 357.746 milliseconds. With result of B0.890964 G0.903845 R0.936934 + Time of MSSIM GPU OPTIMIZED ( / 10 runs): 203.091 milliseconds. With result of B0.890964 G0.903845 R0.936934 + +In both cases we managed a performance increase of almost 100% compared to the CPU implementation. +It may be just the improvement needed for your application to work. You may observe a runtime +instance of this on the `YouTube here `_. + +.. raw:: html + +
    + +
    diff --git a/doc/tutorials/introduction/android_binary_package/O4A_SDK.rst b/doc/tutorials/introduction/android_binary_package/O4A_SDK.rst index b6c859dc34..d2d38ab943 100644 --- a/doc/tutorials/introduction/android_binary_package/O4A_SDK.rst +++ b/doc/tutorials/introduction/android_binary_package/O4A_SDK.rst @@ -48,10 +48,10 @@ The structure of package contents looks as follows: :: - OpenCV-2.4.5-android-sdk + OpenCV-2.4.6-android-sdk |_ apk - | |_ OpenCV_2.4.5_binary_pack_armv7a.apk - | |_ OpenCV_2.4.5_Manager_2.7_XXX.apk + | |_ OpenCV_2.4.6_binary_pack_armv7a.apk + | |_ OpenCV_2.4.6_Manager_2.8_XXX.apk | |_ doc |_ samples @@ -98,7 +98,7 @@ The structure of package contents looks as follows: * :file:`doc` folder contains various OpenCV documentation in PDF format. It's also available online at http://docs.opencv.org. - .. note:: The most recent docs (nightly build) are at http://docs.opencv.org/trunk/. + .. note:: The most recent docs (nightly build) are at http://docs.opencv.org/2.4. Generally, it's more up-to-date, but can refer to not-yet-released functionality. .. TODO: I'm not sure that this is the best place to talk about OpenCV Manager @@ -157,10 +157,10 @@ Get the OpenCV4Android SDK .. code-block:: bash - unzip ~/Downloads/OpenCV-2.4.5-android-sdk.zip + unzip ~/Downloads/OpenCV-2.4.6-android-sdk.zip -.. |opencv_android_bin_pack| replace:: :file:`OpenCV-2.4.5-android-sdk.zip` -.. _opencv_android_bin_pack_url: http://sourceforge.net/projects/opencvlibrary/files/opencv-android/2.4.5/OpenCV-2.4.5-android-sdk.zip/download +.. |opencv_android_bin_pack| replace:: :file:`OpenCV-2.4.6-android-sdk.zip` +.. _opencv_android_bin_pack_url: http://sourceforge.net/projects/opencvlibrary/files/opencv-android/2.4.6/OpenCV-2.4.6-android-sdk.zip/download .. |opencv_android_bin_pack_url| replace:: |opencv_android_bin_pack| .. |seven_zip| replace:: 7-Zip .. _seven_zip: http://www.7-zip.org/ @@ -295,7 +295,7 @@ Well, running samples from Eclipse is very simple: .. code-block:: sh :linenos: - /platform-tools/adb install /apk/OpenCV_2.4.5_Manager_2.7_armv7a-neon.apk + /platform-tools/adb install /apk/OpenCV_2.4.6_Manager_2.8_armv7a-neon.apk .. note:: ``armeabi``, ``armv7a-neon``, ``arm7a-neon-android8``, ``mips`` and ``x86`` stand for platform targets: diff --git a/doc/tutorials/introduction/android_binary_package/dev_with_OCV_on_Android.rst b/doc/tutorials/introduction/android_binary_package/dev_with_OCV_on_Android.rst index 231fe5afa1..c86ae378ca 100644 --- a/doc/tutorials/introduction/android_binary_package/dev_with_OCV_on_Android.rst +++ b/doc/tutorials/introduction/android_binary_package/dev_with_OCV_on_Android.rst @@ -55,14 +55,14 @@ Manager to access OpenCV libraries externally installed in the target system. :guilabel:`File -> Import -> Existing project in your workspace`. Press :guilabel:`Browse` button and locate OpenCV4Android SDK - (:file:`OpenCV-2.4.5-android-sdk/sdk`). + (:file:`OpenCV-2.4.6-android-sdk/sdk`). .. image:: images/eclipse_opencv_dependency0.png :alt: Add dependency from OpenCV library :align: center #. In application project add a reference to the OpenCV Java SDK in - :guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.5``. + :guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.6``. .. image:: images/eclipse_opencv_dependency1.png :alt: Add dependency from OpenCV library @@ -101,7 +101,7 @@ See the "15-puzzle" OpenCV sample for details. public void onResume() { super.onResume(); - OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_5, this, mLoaderCallback); + OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_6, this, mLoaderCallback); } ... @@ -125,30 +125,30 @@ designed mostly for development purposes. This approach is deprecated for the pr release package is recommended to communicate with OpenCV Manager via the async initialization described above. -#. Add the OpenCV library project to your workspace the same way as for the async initialization +#. Add the OpenCV library project to your workspace the same way as for the async initialization above. Use menu :guilabel:`File -> Import -> Existing project in your workspace`, - press :guilabel:`Browse` button and select OpenCV SDK path - (:file:`OpenCV-2.4.5-android-sdk/sdk`). + press :guilabel:`Browse` button and select OpenCV SDK path + (:file:`OpenCV-2.4.6-android-sdk/sdk`). .. image:: images/eclipse_opencv_dependency0.png :alt: Add dependency from OpenCV library :align: center #. In the application project add a reference to the OpenCV4Android SDK in - :guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.5``; + :guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.6``; .. image:: images/eclipse_opencv_dependency1.png :alt: Add dependency from OpenCV library :align: center #. If your application project **doesn't have a JNI part**, just copy the corresponding OpenCV - native libs from :file:`/sdk/native/libs/` to your + native libs from :file:`/sdk/native/libs/` to your project directory to folder :file:`libs/`. In case of the application project **with a JNI part**, instead of manual libraries copying you need to modify your ``Android.mk`` file: add the following two code lines after the ``"include $(CLEAR_VARS)"`` and before - ``"include path_to_OpenCV-2.4.5-android-sdk/sdk/native/jni/OpenCV.mk"`` + ``"include path_to_OpenCV-2.4.6-android-sdk/sdk/native/jni/OpenCV.mk"`` .. code-block:: make :linenos: @@ -221,7 +221,7 @@ taken: .. code-block:: make - include C:\Work\OpenCV4Android\OpenCV-2.4.5-android-sdk\sdk\native\jni\OpenCV.mk + include C:\Work\OpenCV4Android\OpenCV-2.4.6-android-sdk\sdk\native\jni\OpenCV.mk Should be inserted into the :file:`jni/Android.mk` file **after** this line: @@ -379,7 +379,7 @@ result. public void onResume() { super.onResume(); - OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback); + OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_6, this, mLoaderCallback); } #. Defines that your activity implements ``CvViewFrameListener2`` interface and fix activity related diff --git a/doc/tutorials/introduction/how_to_write_a_tutorial/how_to_write_a_tutorial.rst b/doc/tutorials/introduction/how_to_write_a_tutorial/how_to_write_a_tutorial.rst index 26ec076f50..5ae5062492 100644 --- a/doc/tutorials/introduction/how_to_write_a_tutorial/how_to_write_a_tutorial.rst +++ b/doc/tutorials/introduction/how_to_write_a_tutorial/how_to_write_a_tutorial.rst @@ -1 +1,440 @@ -.. _howToWriteTutorial: How to write a tutorial for OpenCV? *********************************** Okay, so assume you have just finished a project of yours implementing something based on OpenCV and you want to present/share it with the community. Luckily, OpenCV is an *open source project*. This means that in theory anyone has access to the full source code and may extend it. While making a robust and practical library (like OpenCV) is great, the success of a library also depends on how user friendly it is. To improve on this aspect, the OpenCV team has already been listening to user feedback from its :opencv_group:`Yahoo user group <>` and by making samples you can find in the source directories sample folder. The addition of the tutorials (in both online and PDF format) is an extension of these efforts. Goal ==== .. _reST: http://docutils.sourceforge.net/rst.html .. |reST| replace:: reStructuredText .. |Sphinx| replace:: Sphinx .. _Sphinx: http://sphinx.pocoo.org/ The tutorials are just as an important part of the library as the implementation of those crafty data structures and algorithms you can find in OpenCV. Therefore, the source codes for the tutorials are part of the library. And yes, I meant source codes. The reason for this formulation is that the tutorials are written by using the |Sphinx|_ documentation generation system. This is based on the popular python documentation system called |reST|_ (reST). ReStructuredText is a really neat language that by using a few simple conventions (indentation, directives) and emulating old school e-mail writing techniques (text only) tries to offer a simple way to create and edit documents. Sphinx extends this with some new features and creates the resulting document in both HTML (for web) and PDF (for offline usage) format. Usually, an OpenCV tutorial has the following parts: 1. A source code demonstration of an OpenCV feature: a. One or more CPP, Python, Java or other type of files depending for what OpenCV offers support and for what language you make the tutorial. #. Occasionaly, input resource files required for running your tutorials application. #. A table of content entry (so people may easily find the tutorial): a. Adding your stuff to the tutorials table of content (**reST** file). #. Add an image file near the TOC entry. #. The content of the tutorial itself: a. The **reST** text of the tutorial #. Images following the idea that "*A picture is worth a thousand words*". #. For more complex demonstrations you may create a video. As you can see you will need at least some basic knowledge of the *reST* system in order to complete the task at hand with success. However, don't worry *reST* (and *Sphinx*) was made with simplicity in mind. It is easy to grasp its basics. I found that the `OpenAlea documentations introduction on this subject `_ (or the `Thomas Cokelaer one `_ ) should enough for this. If for some directive or feature you need a more in-depth description look it up in the official |reST|_ help files or at the |Sphinx|_ documentation. In our world achieving some tasks is possible in multiple ways. However, some of the roads to take may have obvious or hidden advantages over others. Then again, in some other cases it may come down to just simple user preference. Here, I'll present how I decided to write the tutorials, based on my personal experience. If for some of them you know a better solution and you can back it up feel free to use that. I've nothing against it, as long as it gets the job done in an elegant fashion. Now the best would be if you could make the integration yourself. For this you need first to have the source code. I recommend following the guides for your operating system on acquiring OpenCV sources. For Linux users look :ref:`here ` and for :ref:`Windows here `. You must also install python and sphinx with its dependencies in order to be able to build the documentation. Once you have downloaded the repository to your hard drive you can take a look in the OpenCV directory to make sure you have both the samples and doc folder present. Anyone may download the trunk source files from :file:`git://code.opencv.org/opencv.git` . Nevertheless, not everyone has upload (commit/submit) rights. This is to protect the integrity of the library. If you plan doing more than one tutorial, and would like to have an account with commit user rights you should first register an account at http://code.opencv.org/ and then contact dr. Gary Bradski at -delete-bradski@-delete-willowgarage.com. Otherwise, you can just send the resulting files to us via the :opencv_group:`Yahoo user group <>` or to me at -delete-bernat@-delete-primeranks.net and I'll add it. If you have questions, suggestions or constructive critics I will gladly listen to them. If you send it to the OpenCV group please tag its subject with a **[Tutorial]** entry. Format the Source Code ====================== Before I start this let it be clear: the main goal is to have a working sample code. However, for your tutorial to be of a top notch quality you should follow a few guide lines I am going to present here. In case you have an application by using the older interface (with *IplImage*, *CVMat*, *cvLoadImage* and such) consider migrating it to the new C++ interface. The tutorials are intended to be an up to date help for our users. And as of OpenCV 2 the OpenCV emphasis on using the less error prone and clearer C++ interface. Therefore, if possible please convert your code to the C++ interface. For this it may help to read the :ref:`InteroperabilityWithOpenCV1` tutorial. However, once you have an OpenCV 2 working code, then you should make your source code snippet as easy to read as possible. Here're a couple of advices for this: .. container:: enumeratevisibleitemswithsquare + Add a standard output with the description of what your program does. Keep it short and yet, descriptive. This output is at the start of the program. In my example files this usually takes the form of a *help* function containing the output. This way both the source file viewer and application runner can see what all is about in your sample. Here's an instance of this: .. code-block:: cpp void help() { cout << "--------------------------------------------------------------------------" << endl << "This program shows how to write video files. You can extract the R or G or B color channel " << " of the input video. You can choose to use the source codec (Y) or select a custom one. (N)"<< endl << "Usage:" << endl << "./video-write inputvideoName [ R | G | B] [Y | N]" << endl << "--------------------------------------------------------------------------" << endl << endl; } // ... int main(int argc, char *argv[], char *window_name) { help(); // here comes the actual source code } Additionally, finalize the description with a short usage guide. This way the user will know how to call your programs, what leads us to the next point. + Prefer command line argument controlling instead of hard coded one. If your program has some variables that may be changed use command line arguments for this. The tutorials, can be a simple try-out ground for the user. If you offer command line controlling for the input image (for example), then you offer the possibility for the user to try it out with his/her own images, without the need to mess in the source code. In the upper example you can see that the input image, channel and codec selection may all be changed from the command line. Just compile the program and run it with your own input arguments. + Be as verbose as possible. There is no shame in filling the source code with comments. This way the more advanced user may figure out what's happening right from the sample code. This advice goes for the output console too. Specify to the user what's happening. Never leave the user hanging there and thinking on: "Is this program now crashing or just doing some computationally intensive task?." So, if you do a training task that may take some time, make sure you print out a message about this before starting and after finishing it. + Throw out unnecessary stuff from your source code. This is a warning to not take the previous point too seriously. Balance is the key. If it's something that can be done in a fewer lines or simpler than that's the way you should do it. Nevertheless, if for some reason you have such sections notify the user why you have chosen to do so. Keep the amount of information as low as possible, while still getting the job done in an elegant way. + Put your sample file into the :file:`opencv/samples/cpp/tutorial_code/sectionName` folder. If you write a tutorial for other languages than cpp, then change that part of the path. Before completing this you need to decide that to what section (module) does your tutorial goes. Think about on what module relies most heavily your code and that is the one to use. If the answer to this question is more than one modules then the *general* section is the one to use. For finding the *opencv* directory open up your file system and navigate where you downloaded our repository. + If the input resources are hard to acquire for the end user consider adding a few of them to the :file:`opencv/samples/cpp/tutorial_code/images`. Make sure that who reads your code can try it out! Add the TOC entry ================= For this you will need to know some |reST|_. There is no going around this. |reST|_ files have **rst** extensions. However, these are simple text files. Use any text editor you like. Finding a text editor that offers syntax highlighting for |reST|_ was quite a challenge at the time of writing this tutorial. In my experience, `Intype `_ is a solid option on Windows, although there is still place for improvement. Adding your source code to a table of content is important for multiple reasons. First and foremost this will allow for the user base to find your tutorial from our websites tutorial table of content. Secondly, if you omit this *Sphinx* will throw a warning that your tutorial file isn't part of any TOC tree entry. And there is nothing more than the developer team hates than an ever increasing warning/error list for their builds. *Sphinx* also uses this to build up the previous-back-up buttons on the website. Finally, omitting this step will lead to that your tutorial will **not** be added to the PDF version of the tutorials. Navigate to the :file:`opencv/doc/tutorials/section/table_of_content_section` folder (where the section is the module to which you're adding the tutorial). Open the *table_of_content_section* file. Now this may have two forms. If no prior tutorials are present in this section that there is a template message about this and has the following form: .. code-block:: rst .. _Table-Of-Content-Section: Section title ----------------------------------------------------------- Description about the section. .. include:: ../../definitions/noContent.rst .. raw:: latex \pagebreak The first line is a reference to the section title in the reST system. The section title will be a link and you may refer to it via the ``:ref:`` directive. The *include* directive imports the template text from the definitions directories *noContent.rst* file. *Sphinx* does not creates the PDF from scratch. It does this by first creating a latex file. Then creates the PDF from the latex file. With the *raw* directive you can directly add to this output commands. Its unique argument is for what kind of output to add the content of the directive. For the PDFs it may happen that multiple sections will overlap on a single page. To avoid this at the end of the TOC we add a *pagebreak* latex command, that hints to the LATEX system that the next line should be on a new page. If you have one of this, try to transform it to the following form: .. include:: ../../definitions/tocDefinitions.rst .. code-block:: rst .. _Table-Of-Content-Section: Section title ----------------------------------------------------------- .. include:: ../../definitions/tocDefinitions.rst + .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv =============== ====================================================== |MatBasicIma| **Title:** :ref:`matTheBasicImageContainer` *Compatibility:* > OpenCV 2.0 *Author:* |Author_BernatG| You will learn how to store images in the memory and how to print out their content to the console. =============== ===================================================== .. |MatBasicIma| image:: images/matTheBasicImageStructure.jpg :height: 90pt :width: 90pt .. raw:: latex \pagebreak .. toctree:: :hidden: ../mat - the basic image container/mat - the basic image container If this is already present just add a new section of the content between the include and the raw directives (excluding those lines). Here you'll see a new include directive. This should be present only once in a TOC tree and the reST file contains the definitions of all the authors contributing to the OpenCV tutorials. We are a multicultural community and some of our name may contain some funky characters. However, reST **only supports** ANSI characters. Luckily we can specify Unicode characters with the *unicode* directive. Doing this for all of your tutorials is a troublesome procedure. Therefore, the tocDefinitions file contains the definition of your author name. Add it here once and afterwards just use the replace construction. For example here's the definition for my name: .. code-block:: rst .. |Author_BernatG| unicode:: Bern U+00E1 t U+0020 G U+00E1 bor The ``|Author_BernatG|`` is the text definitions alias. I can use later this to add the definition, like I've done in the TOCs *Author* part. After the ``::`` and a space you start the definition. If you want to add an UNICODE character (non-ASCI) leave an empty space and specify it in the format U+(UNICODE code). To find the UNICODE code of a character I recommend using the `FileFormat `_ websites service. Spaces are trimmed from the definition, therefore we add a space by its UNICODE character (U+0020). Until the *raw* directive what you can see is a TOC tree entry. Here's how a TOC entry will look like: + .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv =============== ====================================================== |MatBasicIma| **Title:** :ref:`matTheBasicImageContainer` *Compatibility:* > OpenCV 2.0 *Author:* |Author_BernatG| You will learn how to store images in the memory and how to print out their content to the console. =============== ====================================================== .. |MatBasicIma| image:: images/matTheBasicImageStructure.jpg :height: 90pt :width: 90pt As you can see we have an image to the left and a description box to the right. To create two boxes we use a table with two columns and a single row. In the left column is the image and in the right one the description. However, the image directive is way too long to fit in a column. Therefore, we need to use the substitution definition system. We add this definition after the TOC tree. All images for the TOC tree are to be put in the images folder near its |reST|_ file. We use the point measurement system because we are also creating PDFs. PDFs are printable documents, where there is no such thing that pixels (px), just points (pt). And while generally space is no problem for web pages (we have monitors with **huge** resolutions) the size of the paper (A4 or letter) is constant and will be for a long time in the future. Therefore, size constrains come in play more like for the PDF, than the generated HTML code. Now your images should be as small as possible, while still offering the intended information for the user. Remember that the tutorial will become part of the OpenCV source code. If you add large images (that manifest in form of large image size) it will just increase the size of the repository pointlessly. If someone wants to download it later, its download time will be that much longer. Not to mention the larger PDF size for the tutorials and the longer load time for the web pages. In terms of pixels a TOC image should not be larger than 120 X 120 pixels. Resize your images if they are larger! .. note:: If you add a larger image and specify a smaller image size, *Sphinx* will not resize that. At build time will add the full size image and the resize will be done by your browser after the image is loaded. A 120 X 120 image is somewhere below 10KB. If you add a 110KB image, you have just pointlessly added a 100KB extra data to transfer over the internet for every user! Generally speaking you shouldn't need to specify your images size (excluding the TOC entries). If no such is found *Sphinx* will use the size of the image itself (so no resize occurs). Then again if for some reason you decide to specify a size that should be the **width** of the image rather than its height. The reason for this again goes back to the PDFs. On a PDF page the height is larger than the width. In the PDF the images will not be resized. If you specify a size that does not fit in the page, then what does not fits in **will be cut off**. When creating your images for your tutorial you should try to keep the image widths below 500 pixels, and calculate with around 400 point page width when specifying image widths. The image format depends on the content of the image. If you have some complex scene (many random like colors) then use *jpg*. Otherwise, prefer using *png*. They are even some tools out there that optimize the size of *PNG* images, such as `PNGGauntlet `_. Use them to make your images as small as possible in size. Now on the right side column of the table we add the information about the tutorial: .. container:: enumeratevisibleitemswithsquare + In the first line it is the title of the tutorial. However, there is no need to specify it explicitly. We use the reference system. We'll start up our tutorial with a reference specification, just like in case of this TOC entry with its `` .. _Table-Of-Content-Section:`` . If after this you have a title (pointed out by the following line of -), then Sphinx will replace the ``:ref:`Table-Of-Content-Section``` directive with the tile of the section in reference form (creates a link in web page). Here's how the definition looks in my case: .. code-block:: rst .. _matTheBasicImageContainer: Mat - The Basic Image Container ******************************* Note, that according to the |reST|_ rules the * should be as long as your title. + Compatibility. What version of OpenCV is required to run your sample code. + Author. Use the substitution markup of |reST|_. + A short sentence describing the essence of your tutorial. Now before each TOC entry you need to add the three lines of: .. code-block:: cpp + .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv The plus sign (+) is to enumerate tutorials by using bullet points. So for every TOC entry we have a corresponding bullet point represented by the +. Sphinx is highly indenting sensitive. Indentation is used to express from which point until to which point does a construction last. Un-indentation means end of that construction. So to keep all the bullet points to the same group the following TOC entries (until the next +) should be indented by two spaces. Here, I should also mention that **always** prefer using spaces instead of tabs. Working with only spaces makes possible that if we both use monotype fonts we will see the same thing. Tab size is text editor dependent and as should be avoided. *Sphinx* translates all tabs into 8 spaces before interpreting it. It turns out that the automatic formatting of both the HTML and PDF(LATEX) system messes up our tables. Therefore, we need to help them out a little. For the PDF generation we add the ``.. tabularcolumns:: m{100pt} m{300pt}`` directive. This means that the first column should be 100 points wide and middle aligned. For the HTML look we simply name the following table of a *toctableopencv* class type. Then, we can modify the look of the table by modifying the CSS of our web page. The CSS definitions go into the :file:`opencv/doc/_themes/blue/static/default.css_t` file. .. code-block:: css .toctableopencv { width: 100% ; table-layout: fixed; } .toctableopencv colgroup col:first-child { width: 100pt !important; max-width: 100pt !important; min-width: 100pt !important; } .toctableopencv colgroup col:nth-child(2) { width: 100% !important; } However, you should not need to modify this. Just add these three lines (plus keep the two space indentation) for all TOC entries you add. At the end of the TOC file you'll find: .. code-block:: rst .. raw:: latex \pagebreak .. toctree:: :hidden: ../mat - the basic image container/mat - the basic image container The page break entry comes for separating sections and should be only one in a TOC tree |reST|_ file. Finally, at the end of the TOC tree we need to add our tutorial to the *Sphinx* TOC tree system. *Sphinx* will generate from this the previous-next-up information for the HTML file and add items to the PDF according to the order here. By default this TOC tree directive generates a simple table of contents. However, we already created a fancy looking one so we no longer need this basic one. Therefore, we add the *hidden* option to do not show it. The path is of a relative type. We step back in the file system and then go into the :file:`mat - the basic image container` directory for the :file:`mat - the basic image container.rst` file. Putting out the *rst* extension for the file is optional. Write the tutorial ================== Create a folder with the name of your tutorial. Preferably, use small letters only. Then create a text file in this folder with *rst* extension and the same name. If you have images for the tutorial create an :file:`images` folder and add your images there. When creating your images follow the guidelines described in the previous part! Now here's our recommendation for the structure of the tutorial (although, remember that this is not carved in the stone; if you have a better idea, use it!): .. container:: enumeratevisibleitemswithsquare + Create the reference point and the title. .. code-block:: rst .. _matTheBasicImageContainer: Mat - The Basic Image Container ******************************* You start the tutorial by specifying a reference point by the ``.. _matTheBasicImageContainer:`` and then its title. The name of the reference point should be a unique one over the whole documentation. Therefore, do not use general names like *tutorial1*. Use the * character to underline the title for its full width. The subtitles of the tutorial should be underlined with = charachter. + Goals. You start your tutorial by specifying what you will present. You can also enumerate the sub jobs to be done. For this you can use a bullet point construction. There is a single configuration file for both the reference manual and the tutorial documentation. In the reference manuals at the argument enumeration we do not want any kind of bullet point style enumeration. Therefore, by default all the bullet points at this level are set to do not show the dot before the entries in the HTML. You can override this by putting the bullet point in a container. I've defined a square type bullet point view under the name *enumeratevisibleitemswithsquare*. The CSS style definition for this is again in the :file:`opencv\doc\_themes\blue\static\default.css_t` file. Here's a quick example of using it: .. code-block:: rst .. container:: enumeratevisibleitemswithsquare + Create the reference point and the title. + Second entry + Third entry Note that you need the keep the indentation of the container directive. Directive indentations are always three (3) spaces. Here you may even give usage tips for your sample code. + Source code. Present your samples code to the user. It's a good idea to offer a quick download link for the HTML page by using the *download* directive and pointing out where the user may find your source code in the file system by using the *file* directive: .. code-block:: rst Text :file:`samples/cpp/tutorial_code/highgui/video-write/` folder of the OpenCV source library or :download:`text to appear in the webpage <../../../../samples/cpp/tutorial_code/HighGUI/video-write/video-write.cpp>`. For the download link the path is a relative one, hence the multiple back stepping operations (..). Then you can add the source code either by using the *code block* directive or the *literal include* one. In case of the code block you will need to actually add all the source code text into your |reST|_ text and also apply the required indentation: .. code-block:: rst .. code-block:: cpp int i = 0; l = ++j; The only argument of the directive is the language used (here CPP). Then you add the source code into its content (meaning one empty line after the directive) by keeping the indentation of the directive (3 spaces). With the *literal include* directive you do not need to add the source code of the sample. You just specify the sample and *Sphinx* will load it for you, during build time. Here's an example usage: .. code-block:: rst .. literalinclude:: ../../../../samples/cpp/tutorial_code/HighGUI/video-write/video-write.cpp :language: cpp :linenos: :tab-width: 4 :lines: 1-8, 21-22, 24- After the directive you specify a relative path to the file from what to import. It has four options: the language to use, if you add the ``:linenos:`` the line numbers will be shown, you can specify the tab size with the ``:tab-width:`` and you do not need to load the whole file, you can show just the important lines. Use the *lines* option to do not show redundant information (such as the *help* function). Here basically you specify ranges, if the second range line number is missing than that means that until the end of the file. The ranges specified here do no need to be in an ascending order, you may even reorganize the structure of how you want to show your sample inside the tutorial. + The tutorial. Well here goes the explanation for why and what have you used. Try to be short, clear, concise and yet a thorough one. There's no magic formula. Look into a few already made tutorials and start out from there. Try to mix sample OpenCV code with your explanations. If with words is hard to describe something do not hesitate to add in a reasonable size image, to overcome this issue. When you present OpenCV functionality it's a good idea to give a link to the used OpenCV data structure or function. Because the OpenCV tutorials and reference manual are in separate PDF files it is not possible to make this link work for the PDF format. Therefore, we use here only web page links to the **opencv.itseez.com** website. The OpenCV functions and data structures may be used for multiple tasks. Nevertheless, we want to avoid that every users creates its own reference to a commonly used function. So for this we use the global link collection of *Sphinx*. This is defined in the file:`opencv/doc/conf.py` configuration file. Open it and go all the way down to the last entry: .. code-block:: py # ---- External links for tutorials ----------------- extlinks = { 'hgvideo' : ('http://opencv.itseez.com/modules/highgui/doc/reading_and_writing_images_and_video.html#%s', None) } In short here we defined a new **hgvideo** directive that refers to an external webpage link. Its usage is: .. code-block:: rst A sample function of the highgui modules image write and read page is the :hgvideo:`imread() function `. Which turns to: A sample function of the highgui modules image write and read page is the :hgvideo:`imread() function `. The argument you give between the <> will be put in place of the ``%s`` in the upper definition, and as the link will anchor to the correct function. To find out the anchor of a given function just open up a web page, search for the function and click on it. In the address bar it should appear like: ``http://opencv.itseez.com/modules/highgui/doc/reading_and_writing_images_and_video.html#imread`` . Look here for the name of the directives for each page of the OpenCV reference manual. If none present for one of them feel free to add one for it. For formulas you can add LATEX code that will translate in the web pages into images. You do this by using the *math* directive. A usage tip: .. code-block:: latex .. math:: MSE = \frac{1}{c*i*j} \sum{(I_1-I_2)^2} That after build turns into: .. math:: MSE = \frac{1}{c*i*j} \sum{(I_1-I_2)^2} You can even use it inline as ``:math:` MSE = \frac{1}{c*i*j} \sum{(I_1-I_2)^2}``` that turns into :math:`MSE = \frac{1}{c*i*j} \sum{(I_1-I_2)^2}`. If you use some crazy LATEX library extension you need to add those to the ones to use at build time. Look into the file:`opencv/doc/conf.py` configuration file for more information on this. + Results. Well, here depending on your program show one of more of the following: - Console outputs by using the code block directive. - Output images. - Runtime videos, visualization. For this use your favorite screens capture software. `Camtasia Studio `_ certainly is one of the better choices, however their prices are out of this world. `CamStudio `_ is a free alternative, but less powerful. If you do a video you can upload it to YouTube and then use the raw directive with HTML option to embed it into the generated web page: .. code-block:: rst You may observe a runtime instance of this on the `YouTube here `_. .. raw:: html
    This results in the text and video: You may observe a runtime instance of this on the `YouTube here `_. .. raw:: html
    When these aren't self-explanatory make sure to throw in a few guiding lines about what and why we can see. + Build the documentation and check for errors or warnings. In the CMake make sure you check or pass the option for building documentation. Then simply build the **docs** project for the PDF file and the **docs_html** project for the web page. Read the output of the build and check for errors/warnings for what you have added. This is also the time to observe and correct any kind of *not so good looking* parts. Remember to keep clean our build logs. + Read again your tutorial and check for both programming and spelling errors. If found any, please correct them. Take home the pride and joy of a job well done! =============================================== Once you are done contact me or dr. Gary Bradski with the tutorial. We may submit the tutorial ourselves to the trunk branch of our repository or ask you to do so. Now, to see your work **live** you may need to wait some time. The PDFs are updated usually at the launch of a new OpenCV version. The web pages are a little more diverse. They are automatically rebuilt in each evening. However, the **opencv.itseez.com** website contains only the most recent **stable branch** of OpenCV. Currently this is 2.3. When we add something new (like a tutorial) that first goes to the **trunk branch** of our repository. A build of this you may find on the **opencv.itseez.com/trunk** website. Although, we try to make a build every night occasionally we might freeze any of the branches to fix upcoming issues. During this it may take a little longer to see your work *live*, however if you submited it, be sure that eventually it will show up. If you have any questions or advices relating to this tutorial you can contact me at -delete-bernat@-delete-primeranks.net. Of course, delete the -delete- parts of that e-mail address. \ No newline at end of file +.. _howToWriteTutorial: + +How to write a tutorial for OpenCV +********************************** + +Okay, so assume you have just finished a project of yours implementing something +based on OpenCV and you want to present/share it with the community. Luckily, OpenCV +is an *open source project*. This means that anyone has access to the full source +code and may propose extensions. And a good tutorial is a valuable addition to the +library! Please read instructions on contribution process here: +http://opencv.org/contribute.html. You may also find this page helpful: +:how_to_contribute:`How to contribute <>`. + +While making a robust and practical library (like OpenCV) is great, the success of a +library also depends on how user friendly it is. To improve on this aspect, the +OpenCV team has already been listening to user feedback at :opencv_qa:`OpenCV Q&A +forum <>` and by making samples you can find in the source directories +:file:`samples` folder. The addition of the tutorials (in both online and PDF format) +is an extension of these efforts. + +Goal +==== + +.. _reST: http://docutils.sourceforge.net/rst.html +.. |reST| replace:: reStructuredText +.. |Sphinx| replace:: Sphinx +.. _Sphinx: http://sphinx.pocoo.org/ + +The tutorials are just as an important part of the library as the implementation of +those crafty data structures and algorithms you can find in OpenCV. Therefore, the +source codes for the tutorials are part of the library. And yes, I meant source +codes. The reason for this formulation is that the tutorials are written by using the +|Sphinx|_ documentation generation system. This is based on the popular Python +documentation system called |reST|_ (reST). ReStructuredText is a really neat +language that by using a few simple conventions (indentation, directives) and +emulating old school email writing techniques (text only) tries to offer a simple +way to create and edit documents. Sphinx extends this with some new features and +creates the resulting document in both HTML (for web) and PDF (for offline usage) +format. + + +Usually, an OpenCV tutorial has the following parts: + +1. A source code demonstration of an OpenCV feature: + + a. One or more CPP, Python, Java or other type of files depending for what OpenCV offers support and for what language you make the tutorial. + #. Occasionaly, input resource files required for running your tutorials application. + + +#. A table of content entry (so people may easily find the tutorial): + + a. Adding your stuff to the tutorials table of content (**reST** file). + #. Add an image file near the TOC entry. + + +#. The content of the tutorial itself: + + a. The **reST** text of the tutorial + #. Images following the idea that "*A picture is worth a thousand words*". + #. For more complex demonstrations you may create a video. + +As you can see you will need at least some basic knowledge of the *reST* system in order to complete the task at hand with success. However, don't worry *reST* (and *Sphinx*) was made with simplicity in mind. It is easy to grasp its basics. I found that the `OpenAlea documentations introduction on this subject `_ (or the `Thomas Cokelaer one `_ ) should enough for this. If for some directive or feature you need a more in-depth description look it up in the official |reST|_ help files or at the |Sphinx|_ documentation. + +In our world achieving some tasks is possible in multiple ways. However, some of the roads to take may have obvious or hidden advantages over others. Then again, in some other cases it may come down to just simple user preference. Here, I'll present how I decided to write the tutorials, based on my personal experience. If for some of them you know a better solution and you can back it up feel free to use that. I've nothing against it, as long as it gets the job done in an elegant fashion. + +Now the best would be if you could make the integration yourself. For this you need first to have the source code. I recommend following the guides for your operating system on acquiring OpenCV sources. For Linux users look :ref:`here ` and for :ref:`Windows here `. You must also install python and sphinx with its dependencies in order to be able to build the documentation. + +Once you have downloaded the repository to your hard drive you can take a look in the OpenCV directory to make sure you have both the samples and doc folder present. Anyone may download the latest source files from :file:`git://github.com/Itseez/opencv.git` . Nevertheless, not everyone has upload (commit/submit) rights. This is to protect the integrity of the library. If you plan doing more than one tutorial, and would like to have an account with commit user rights you should first register an account at http://code.opencv.org/ and then contact OpenCV administrator -delete-admin@-delete-opencv.org. Otherwise, you can just send the resulting files to us at -delete-admin@-delete-opencv.org and we'll add it. + + +Format the Source Code +====================== + +Before I start this let it be clear: the main goal is to have a working sample code. However, for your tutorial to be of a top notch quality you should follow a few guide lines I am going to present here. In case you have an application by using the older interface (with *IplImage*, *cvMat*, *cvLoadImage* and such) consider migrating it to the new C++ interface. The tutorials are intended to be an up to date help for our users. And as of OpenCV 2 the OpenCV emphasis on using the less error prone and clearer C++ interface. Therefore, if possible please convert your code to the C++ interface. For this it may help to read the :ref:`InteroperabilityWithOpenCV1` tutorial. However, once you have an OpenCV 2 working code, then you should make your source code snippet as easy to read as possible. Here're a couple of advices for this: + + +.. container:: enumeratevisibleitemswithsquare + + + Add a standard output with the description of what your program does. Keep it short and yet, descriptive. This output is at the start of the program. In my example files this usually takes the form of a *help* function containing the output. This way both the source file viewer and application runner can see what all is about in your sample. Here's an instance of this: + + .. code-block:: cpp + + void help() + { + cout + << "--------------------------------------------------------------------------" << endl + << "This program shows how to write video files. You can extract the R or G or B color channel " + << " of the input video. You can choose to use the source codec (Y) or select a custom one. (N)"<< endl + << "Usage:" << endl + << "./video-write inputvideoName [ R | G | B] [Y | N]" << endl + << "--------------------------------------------------------------------------" << endl + << endl; + } + // ... + int main(int argc, char *argv[], char *window_name) + { + help(); + // here comes the actual source code + } + + Additionally, finalize the description with a short usage guide. This way the user will know how to call your programs, what leads us to the next point. + + + Prefer command line argument controlling instead of hard coded one. If your program has some variables that may be changed use command line arguments for this. The tutorials, can be a simple try-out ground for the user. If you offer command line controlling for the input image (for example), then you offer the possibility for the user to try it out with his/her own images, without the need to mess in the source code. In the upper example you can see that the input image, channel and codec selection may all be changed from the command line. Just compile the program and run it with your own input arguments. + + + Be as verbose as possible. There is no shame in filling the source code with comments. This way the more advanced user may figure out what's happening right from the sample code. This advice goes for the output console too. Specify to the user what's happening. Never leave the user hanging there and thinking on: "Is this program now crashing or just doing some computationally intensive task?." So, if you do a training task that may take some time, make sure you print out a message about this before starting and after finishing it. + + + Throw out unnecessary stuff from your source code. This is a warning to not take the previous point too seriously. Balance is the key. If it's something that can be done in a fewer lines or simpler than that's the way you should do it. Nevertheless, if for some reason you have such sections notify the user why you have chosen to do so. Keep the amount of information as low as possible, while still getting the job done in an elegant way. + + + Put your sample file into the :file:`opencv/samples/cpp/tutorial_code/sectionName` folder. If you write a tutorial for other languages than cpp, then change that part of the path. Before completing this you need to decide that to what section (module) does your tutorial goes. Think about on what module relies most heavily your code and that is the one to use. If the answer to this question is more than one modules then the *general* section is the one to use. For finding the *opencv* directory open up your file system and navigate where you downloaded our repository. + + + If the input resources are hard to acquire for the end user consider adding a few of them to the :file:`opencv/samples/cpp/tutorial_code/images`. Make sure that who reads your code can try it out! + +Add the TOC entry +================= + +For this you will need to know some |reST|_. There is no going around this. |reST|_ files have **rst** extensions. However, these are simple text files. Use any text editor you like. Finding a text editor that offers syntax highlighting for |reST|_ was quite a challenge at the time of writing this tutorial. In my experience, `Intype `_ is a solid option on Windows, although there is still place for improvement. + +Adding your source code to a table of content is important for multiple reasons. First and foremost this will allow for the user base to find your tutorial from our websites tutorial table of content. Secondly, if you omit this *Sphinx* will throw a warning that your tutorial file isn't part of any TOC tree entry. And there is nothing more than the developer team hates than an ever increasing warning/error list for their builds. *Sphinx* also uses this to build up the previous-back-up buttons on the website. Finally, omitting this step will lead to that your tutorial will **not** be added to the PDF version of the tutorials. + +Navigate to the :file:`opencv/doc/tutorials/section/table_of_content_section` folder (where the section is the module to which you're adding the tutorial). Open the *table_of_content_section* file. Now this may have two forms. If no prior tutorials are present in this section that there is a template message about this and has the following form: + +.. code-block:: rst + + .. _Table-Of-Content-Section: + + Section title + ----------------------------------------------------------- + + Description about the section. + + .. include:: ../../definitions/noContent.rst + + .. raw:: latex + + \pagebreak + +The first line is a reference to the section title in the reST system. The section title will be a link and you may refer to it via the ``:ref:`` directive. The *include* directive imports the template text from the definitions directories *noContent.rst* file. *Sphinx* does not creates the PDF from scratch. It does this by first creating a latex file. Then creates the PDF from the latex file. With the *raw* directive you can directly add to this output commands. Its unique argument is for what kind of output to add the content of the directive. For the PDFs it may happen that multiple sections will overlap on a single page. To avoid this at the end of the TOC we add a *pagebreak* latex command, that hints to the LATEX system that the next line should be on a new page. + +If you have one of this, try to transform it to the following form: + +.. include:: ../../definitions/tocDefinitions.rst + +.. code-block:: rst + + .. _Table-Of-Content-Section: + + Section title + ----------------------------------------------------------- + + .. include:: ../../definitions/tocDefinitions.rst + + + + .. tabularcolumns:: m{100pt} m{300pt} + .. cssclass:: toctableopencv + + =============== ====================================================== + |MatBasicIma| **Title:** :ref:`matTheBasicImageContainer` + + *Compatibility:* > OpenCV 2.0 + + *Author:* |Author_BernatG| + + You will learn how to store images in the memory and how to print out their content to the console. + + =============== ===================================================== + + .. |MatBasicIma| image:: images/matTheBasicImageStructure.jpg + :height: 90pt + :width: 90pt + + .. raw:: latex + + \pagebreak + + .. toctree:: + :hidden: + + ../mat - the basic image container/mat - the basic image container + +If this is already present just add a new section of the content between the include and the raw directives (excluding those lines). Here you'll see a new include directive. This should be present only once in a TOC tree and the reST file contains the definitions of all the authors contributing to the OpenCV tutorials. We are a multicultural community and some of our name may contain some funky characters. However, reST **only supports** ANSI characters. Luckily we can specify Unicode characters with the *unicode* directive. Doing this for all of your tutorials is a troublesome procedure. Therefore, the tocDefinitions file contains the definition of your author name. Add it here once and afterwards just use the replace construction. For example here's the definition for my name: + +.. code-block:: rst + + .. |Author_BernatG| unicode:: Bern U+00E1 t U+0020 G U+00E1 bor + +The ``|Author_BernatG|`` is the text definitions alias. I can use later this to add the definition, like I've done in the TOCs *Author* part. After the ``::`` and a space you start the definition. If you want to add an UNICODE character (non-ASCI) leave an empty space and specify it in the format U+(UNICODE code). To find the UNICODE code of a character I recommend using the `FileFormat `_ websites service. Spaces are trimmed from the definition, therefore we add a space by its UNICODE character (U+0020). + +Until the *raw* directive what you can see is a TOC tree entry. Here's how a TOC entry will look like: + ++ + .. tabularcolumns:: m{100pt} m{300pt} + .. cssclass:: toctableopencv + + =============== ====================================================== + |MatBasicIma| **Title:** :ref:`matTheBasicImageContainer` + + *Compatibility:* > OpenCV 2.0 + + *Author:* |Author_BernatG| + + You will learn how to store images in the memory and how to print out their content to the console. + + =============== ====================================================== + + .. |MatBasicIma| image:: images/matTheBasicImageStructure.jpg + :height: 90pt + :width: 90pt + +As you can see we have an image to the left and a description box to the right. To create two boxes we use a table with two columns and a single row. In the left column is the image and in the right one the description. However, the image directive is way too long to fit in a column. Therefore, we need to use the substitution definition system. We add this definition after the TOC tree. All images for the TOC tree are to be put in the images folder near its |reST|_ file. We use the point measurement system because we are also creating PDFs. PDFs are printable documents, where there is no such thing that pixels (px), just points (pt). And while generally space is no problem for web pages (we have monitors with **huge** resolutions) the size of the paper (A4 or letter) is constant and will be for a long time in the future. Therefore, size constrains come in play more like for the PDF, than the generated HTML code. + +Now your images should be as small as possible, while still offering the intended information for the user. Remember that the tutorial will become part of the OpenCV source code. If you add large images (that manifest in form of large image size) it will just increase the size of the repository pointlessly. If someone wants to download it later, its download time will be that much longer. Not to mention the larger PDF size for the tutorials and the longer load time for the web pages. In terms of pixels a TOC image should not be larger than 120 X 120 pixels. Resize your images if they are larger! + +.. note:: + + If you add a larger image and specify a smaller image size, *Sphinx* will not resize that. At build time will add the full size image and the resize will be done by your browser after the image is loaded. A 120 X 120 image is somewhere below 10KB. If you add a 110KB image, you have just pointlessly added a 100KB extra data to transfer over the internet for every user! + +Generally speaking you shouldn't need to specify your images size (excluding the TOC entries). If no such is found *Sphinx* will use the size of the image itself (so no resize occurs). Then again if for some reason you decide to specify a size that should be the **width** of the image rather than its height. The reason for this again goes back to the PDFs. On a PDF page the height is larger than the width. In the PDF the images will not be resized. If you specify a size that does not fit in the page, then what does not fits in **will be cut off**. When creating your images for your tutorial you should try to keep the image widths below 500 pixels, and calculate with around 400 point page width when specifying image widths. + +The image format depends on the content of the image. If you have some complex scene (many random like colors) then use *jpg*. Otherwise, prefer using *png*. They are even some tools out there that optimize the size of *PNG* images, such as `PNGGauntlet `_. Use them to make your images as small as possible in size. + +Now on the right side column of the table we add the information about the tutorial: + +.. container:: enumeratevisibleitemswithsquare + + + In the first line it is the title of the tutorial. However, there is no need to specify it explicitly. We use the reference system. We'll start up our tutorial with a reference specification, just like in case of this TOC entry with its `` .. _Table-Of-Content-Section:`` . If after this you have a title (pointed out by the following line of -), then Sphinx will replace the ``:ref:`Table-Of-Content-Section``` directive with the tile of the section in reference form (creates a link in web page). Here's how the definition looks in my case: + + .. code-block:: rst + + .. _matTheBasicImageContainer: + + Mat - The Basic Image Container + ******************************* + + Note, that according to the |reST|_ rules the * should be as long as your title. + + + Compatibility. What version of OpenCV is required to run your sample code. + + + Author. Use the substitution markup of |reST|_. + + + A short sentence describing the essence of your tutorial. + +Now before each TOC entry you need to add the three lines of: + +.. code-block:: cpp + + + + .. tabularcolumns:: m{100pt} m{300pt} + .. cssclass:: toctableopencv + +The plus sign (+) is to enumerate tutorials by using bullet points. So for every TOC entry we have a corresponding bullet point represented by the +. Sphinx is highly indenting sensitive. Indentation is used to express from which point until to which point does a construction last. Un-indentation means end of that construction. So to keep all the bullet points to the same group the following TOC entries (until the next +) should be indented by two spaces. + +Here, I should also mention that **always** prefer using spaces instead of tabs. Working with only spaces makes possible that if we both use monotype fonts we will see the same thing. Tab size is text editor dependent and as should be avoided. *Sphinx* translates all tabs into 8 spaces before interpreting it. + +It turns out that the automatic formatting of both the HTML and PDF(LATEX) system messes up our tables. Therefore, we need to help them out a little. For the PDF generation we add the ``.. tabularcolumns:: m{100pt} m{300pt}`` directive. This means that the first column should be 100 points wide and middle aligned. For the HTML look we simply name the following table of a *toctableopencv* class type. Then, we can modify the look of the table by modifying the CSS of our web page. The CSS definitions go into the :file:`opencv/doc/_themes/blue/static/default.css_t` file. + +.. code-block:: css + + .toctableopencv + { + width: 100% ; + table-layout: fixed; + } + + + .toctableopencv colgroup col:first-child + { + width: 100pt !important; + max-width: 100pt !important; + min-width: 100pt !important; + } + + .toctableopencv colgroup col:nth-child(2) + { + width: 100% !important; + } + +However, you should not need to modify this. Just add these three lines (plus keep the two space indentation) for all TOC entries you add. At the end of the TOC file you'll find: + +.. code-block:: rst + + .. raw:: latex + + \pagebreak + + .. toctree:: + :hidden: + + ../mat - the basic image container/mat - the basic image container + +The page break entry comes for separating sections and should be only one in a TOC tree |reST|_ file. Finally, at the end of the TOC tree we need to add our tutorial to the *Sphinx* TOC tree system. *Sphinx* will generate from this the previous-next-up information for the HTML file and add items to the PDF according to the order here. By default this TOC tree directive generates a simple table of contents. However, we already created a fancy looking one so we no longer need this basic one. Therefore, we add the *hidden* option to do not show it. + +The path is of a relative type. We step back in the file system and then go into the :file:`mat - the basic image container` directory for the :file:`mat - the basic image container.rst` file. Putting out the *rst* extension for the file is optional. + +Write the tutorial +================== + +Create a folder with the name of your tutorial. Preferably, use small letters only. Then create a text file in this folder with *rst* extension and the same name. If you have images for the tutorial create an :file:`images` folder and add your images there. When creating your images follow the guidelines described in the previous part! + +Now here's our recommendation for the structure of the tutorial (although, remember that this is not carved in the stone; if you have a better idea, use it!): + + +.. container:: enumeratevisibleitemswithsquare + + + Create the reference point and the title. + + .. code-block:: rst + + .. _matTheBasicImageContainer: + + Mat - The Basic Image Container + ******************************* + + You start the tutorial by specifying a reference point by the ``.. _matTheBasicImageContainer:`` and then its title. The name of the reference point should be a unique one over the whole documentation. Therefore, do not use general names like *tutorial1*. Use the * character to underline the title for its full width. The subtitles of the tutorial should be underlined with = charachter. + + + Goals. You start your tutorial by specifying what you will present. You can also enumerate the sub jobs to be done. For this you can use a bullet point construction. There is a single configuration file for both the reference manual and the tutorial documentation. In the reference manuals at the argument enumeration we do not want any kind of bullet point style enumeration. Therefore, by default all the bullet points at this level are set to do not show the dot before the entries in the HTML. You can override this by putting the bullet point in a container. I've defined a square type bullet point view under the name *enumeratevisibleitemswithsquare*. The CSS style definition for this is again in the :file:`opencv\doc\_themes\blue\static\default.css_t` file. Here's a quick example of using it: + + .. code-block:: rst + + .. container:: enumeratevisibleitemswithsquare + + + Create the reference point and the title. + + Second entry + + Third entry + + Note that you need the keep the indentation of the container directive. Directive indentations are always three (3) spaces. Here you may even give usage tips for your sample code. + + + Source code. Present your samples code to the user. It's a good idea to offer a quick download link for the HTML page by using the *download* directive and pointing out where the user may find your source code in the file system by using the *file* directive: + + .. code-block:: rst + + Text :file:`samples/cpp/tutorial_code/highgui/video-write/` folder of the OpenCV source library + or :download:`text to appear in the webpage + <../../../../samples/cpp/tutorial_code/HighGUI/video-write/video-write.cpp>`. + + For the download link the path is a relative one, hence the multiple back stepping operations (..). Then you can add the source code either by using the *code block* directive or the *literal include* one. In case of the code block you will need to actually add all the source code text into your |reST|_ text and also apply the required indentation: + + .. code-block:: rst + + .. code-block:: cpp + + int i = 0; + l = ++j; + + The only argument of the directive is the language used (here CPP). Then you add the source code into its content (meaning one empty line after the directive) by keeping the indentation of the directive (3 spaces). With the *literal include* directive you do not need to add the source code of the sample. You just specify the sample and *Sphinx* will load it for you, during build time. Here's an example usage: + + .. code-block:: rst + + .. literalinclude:: ../../../../samples/cpp/tutorial_code/HighGUI/video-write/video-write.cpp + :language: cpp + :linenos: + :tab-width: 4 + :lines: 1-8, 21-22, 24- + + After the directive you specify a relative path to the file from what to import. It has four options: the language to use, if you add the ``:linenos:`` the line numbers will be shown, you can specify the tab size with the ``:tab-width:`` and you do not need to load the whole file, you can show just the important lines. Use the *lines* option to do not show redundant information (such as the *help* function). Here basically you specify ranges, if the second range line number is missing than that means that until the end of the file. The ranges specified here do no need to be in an ascending order, you may even reorganize the structure of how you want to show your sample inside the tutorial. + + + The tutorial. Well here goes the explanation for why and what have you used. Try to be short, clear, concise and yet a thorough one. There's no magic formula. Look into a few already made tutorials and start out from there. Try to mix sample OpenCV code with your explanations. If with words is hard to describe something do not hesitate to add in a reasonable size image, to overcome this issue. + + When you present OpenCV functionality it's a good idea to give a link to the used OpenCV data structure or function. Because the OpenCV tutorials and reference manual are in separate PDF files it is not possible to make this link work for the PDF format. Therefore, we use here only web page links to the http://docs.opencv.org website. The OpenCV functions and data structures may be used for multiple tasks. Nevertheless, we want to avoid that every users creates its own reference to a commonly used function. So for this we use the global link collection of *Sphinx*. This is defined in the file:`opencv/doc/conf.py` configuration file. Open it and go all the way down to the last entry: + + .. code-block:: py + + # ---- External links for tutorials ----------------- + extlinks = { + 'hgvideo' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#%s', None) + } + + In short here we defined a new **hgvideo** directive that refers to an external webpage link. Its usage is: + + .. code-block:: rst + + A sample function of the highgui modules image write and read page is the :hgvideo:`imread() function `. + + Which turns to: A sample function of the highgui modules image write and read page is the :hgvideo:`imread() function `. The argument you give between the <> will be put in place of the ``%s`` in the upper definition, and as the link will anchor to the correct function. To find out the anchor of a given function just open up a web page, search for the function and click on it. In the address bar it should appear like: ``http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#imread`` . Look here for the name of the directives for each page of the OpenCV reference manual. If none present for one of them feel free to add one for it. + + For formulas you can add LATEX code that will translate in the web pages into images. You do this by using the *math* directive. A usage tip: + + .. code-block:: latex + + .. math:: + + MSE = \frac{1}{c*i*j} \sum{(I_1-I_2)^2} + + That after build turns into: + + .. math:: + + MSE = \frac{1}{c*i*j} \sum{(I_1-I_2)^2} + + You can even use it inline as ``:math:` MSE = \frac{1}{c*i*j} \sum{(I_1-I_2)^2}``` that turns into :math:`MSE = \frac{1}{c*i*j} \sum{(I_1-I_2)^2}`. + + If you use some crazy LATEX library extension you need to add those to the ones to use at build time. Look into the file:`opencv/doc/conf.py` configuration file for more information on this. + + + Results. Well, here depending on your program show one of more of the following: + + - Console outputs by using the code block directive. + - Output images. + - Runtime videos, visualization. For this use your favorite screens capture software. `Camtasia Studio `_ certainly is one of the better choices, however their prices are out of this world. `CamStudio `_ is a free alternative, but less powerful. If you do a video you can upload it to YouTube and then use the raw directive with HTML option to embed it into the generated web page: + + .. code-block:: rst + + You may observe a runtime instance of this on the `YouTube here `_. + + .. raw:: html + +
    + +
    + + This results in the text and video: You may observe a runtime instance of this on the `YouTube here `_. + + .. raw:: html + +
    + +
    + + When these aren't self-explanatory make sure to throw in a few guiding lines about what and why we can see. + + + Build the documentation and check for errors or warnings. In the CMake make sure you check or pass the option for building documentation. Then simply build the **docs** project for the PDF file and the **docs_html** project for the web page. Read the output of the build and check for errors/warnings for what you have added. This is also the time to observe and correct any kind of *not so good looking* parts. Remember to keep clean our build logs. + + + Read again your tutorial and check for both programming and spelling errors. If found any, please correct them. + + +Take home the pride and joy of a job well done! +=============================================== + +Once you are done please make a GitHub pull request with the tutorial. Now, to see +your work **live** you may need to wait some time. The PDFs are updated usually at +the launch of a new OpenCV version. The web pages are a little more diverse. They are +automatically rebuilt nightly. Currently we use ``2.4`` and ``master`` branches for +daily builds. So, if your pull request was merged to any of these branches, your +material will be published at `docs.opencv.org/2.4 `_ or +`docs.opencv.org/master `_ correspondingly. Everything +that was added to ``2.4`` is merged to ``master`` branch every week. Although, we try +to make a build every night, occasionally we might freeze any of the branches to fix +upcoming issues. During this it may take a little longer to see your work online, +however if you submitted it, be sure that eventually it will show up. + +If you have any questions or advices relating to this tutorial you can contact us at +-delete-admin@-delete-opencv.org (delete the -delete- parts of that email address). diff --git a/doc/tutorials/introduction/ios_install/ios_install.rst b/doc/tutorials/introduction/ios_install/ios_install.rst index cb9e0650b2..2973b7ec2b 100644 --- a/doc/tutorials/introduction/ios_install/ios_install.rst +++ b/doc/tutorials/introduction/ios_install/ios_install.rst @@ -37,7 +37,7 @@ Building OpenCV from Source, using CMake and Command Line .. code-block:: bash cd ~/ - python opencv/ios/build_framework.py ios + python opencv/platforms/ios/build_framework.py ios If everything's fine, a few minutes later you will get ~//ios/opencv2.framework. You can add this framework to your Xcode projects. diff --git a/doc/tutorials/introduction/windows_install/windows_install.rst b/doc/tutorials/introduction/windows_install/windows_install.rst index eebda7b066..28b2c81fe5 100644 --- a/doc/tutorials/introduction/windows_install/windows_install.rst +++ b/doc/tutorials/introduction/windows_install/windows_install.rst @@ -292,7 +292,7 @@ Building the library This will create an *Install* directory inside the *Build* one collecting all the built binaries into a single place. Use this only after you built both the *Release* and *Debug* versions. - To test your build just go into the :file:`Build/bin/Debug` or :file:`Build/bin/Release` directory and start a couple of applications like the *contours.exe*. If they run, you are done. Otherwise, something definitely went awfully wrong. In this case you should contact us via our :opencv_group:`user group <>`. + To test your build just go into the :file:`Build/bin/Debug` or :file:`Build/bin/Release` directory and start a couple of applications like the *contours.exe*. If they run, you are done. Otherwise, something definitely went awfully wrong. In this case you should contact us at our :opencv_qa:`Q&A forum <>`. If everything is okay the *contours.exe* output should resemble the following image (if built with Qt support): .. image:: images/WindowsQtContoursOutput.png diff --git a/doc/tutorials/introduction/windows_visual_studio_Opencv/windows_visual_studio_Opencv.rst b/doc/tutorials/introduction/windows_visual_studio_Opencv/windows_visual_studio_Opencv.rst index ec227e7241..c9988df343 100644 --- a/doc/tutorials/introduction/windows_visual_studio_Opencv/windows_visual_studio_Opencv.rst +++ b/doc/tutorials/introduction/windows_visual_studio_Opencv/windows_visual_studio_Opencv.rst @@ -86,7 +86,7 @@ The names of the libraries are as follow: opencv_(The Name of the module)(The version Number of the library you use)d.lib -A full list, for the currently latest trunk version would contain: +A full list, for the latest version would contain: .. code-block:: bash diff --git a/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst b/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst index 051765def1..1b3abd660a 100644 --- a/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst +++ b/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst @@ -129,7 +129,7 @@ Explanation 3. **Train the SVM** - We call the method `CvSVM::train `_ to build the SVM model. + We call the method `CvSVM::train `_ to build the SVM model. .. code-block:: cpp diff --git a/ios/configure-device_xcode.sh b/ios/configure-device_xcode.sh deleted file mode 100755 index 8c28a3e909..0000000000 --- a/ios/configure-device_xcode.sh +++ /dev/null @@ -1 +0,0 @@ -cmake -GXcode -DCMAKE_TOOLCHAIN_FILE=../opencv/ios/cmake/Toolchains/Toolchain-iPhoneOS_Xcode.cmake -DCMAKE_INSTALL_PREFIX=../OpenCV_iPhoneOS ../opencv diff --git a/ios/configure-simulator_xcode.sh b/ios/configure-simulator_xcode.sh deleted file mode 100755 index 50e00261db..0000000000 --- a/ios/configure-simulator_xcode.sh +++ /dev/null @@ -1 +0,0 @@ -cmake -GXcode -DCMAKE_TOOLCHAIN_FILE=../opencv/ios/cmake/Toolchains/Toolchain-iPhoneSimulator_Xcode.cmake -DCMAKE_INSTALL_PREFIX=../OpenCV_iPhoneSimulator ../opencv diff --git a/ios/readme.txt b/ios/readme.txt deleted file mode 100644 index 1441b241b7..0000000000 --- a/ios/readme.txt +++ /dev/null @@ -1,15 +0,0 @@ -Assuming that your build directory is on the same level that opencv source, -From the build directory run - ../opencv/ios/configure-device_xcode.sh -or - ../opencv/ios/configure-simulator_xcode.sh - -Then from the same folder invoke - -xcodebuild -sdk iphoneos -configuration Release -target ALL_BUILD -xcodebuild -sdk iphoneos -configuration Release -target install install - -or - -xcodebuild -sdk iphonesimulator -configuration Release -target ALL_BUILD -xcodebuild -sdk iphonesimulator -configuration Release -target install install \ No newline at end of file diff --git a/modules/contrib/src/inputoutput.cpp b/modules/contrib/src/inputoutput.cpp index f64502e433..f59075631c 100644 --- a/modules/contrib/src/inputoutput.cpp +++ b/modules/contrib/src/inputoutput.cpp @@ -1,7 +1,7 @@ #include "opencv2/contrib.hpp" -#ifdef WIN32 +#if defined(WIN32) || defined(_WIN32) #include #include #else diff --git a/modules/core/doc/basic_structures.rst b/modules/core/doc/basic_structures.rst index 70c7c0ebe2..647a9a2e66 100644 --- a/modules/core/doc/basic_structures.rst +++ b/modules/core/doc/basic_structures.rst @@ -570,6 +570,9 @@ Various Ptr constructors. .. ocv:function:: Ptr::Ptr(_Tp* _obj) .. ocv:function:: Ptr::Ptr(const Ptr& ptr) + :param _obj: Object for copy. + :param ptr: Object for copy. + Ptr::~Ptr --------- The Ptr destructor. @@ -582,6 +585,8 @@ Assignment operator. .. ocv:function:: Ptr& Ptr::operator = (const Ptr& ptr) + :param ptr: Object for assignment. + Decrements own reference counter (with ``release()``) and increments ptr's reference counter. Ptr::addref @@ -1542,6 +1547,7 @@ Adds elements to the bottom of the matrix. .. ocv:function:: void Mat::push_back( const Mat& m ) :param elem: Added element(s). + :param m: Added line(s). The methods add one or more elements to the bottom of the matrix. They emulate the corresponding method of the STL vector class. When ``elem`` is ``Mat`` , its type and the number of columns must be the same as in the container matrix. @@ -2209,7 +2215,6 @@ Various SparseMat constructors. :param dims: Array dimensionality. :param _sizes: Sparce matrix size on all dementions. :param _type: Sparse matrix data type. - :param try1d: if try1d is true and matrix is a single-column matrix (Nx1), then the sparse matrix will be 1-dimensional. SparseMat::~SparseMat --------------------- @@ -2224,6 +2229,8 @@ Provides sparse matrix assignment operators. .. ocv:function:: SparseMat& SparseMat::operator = (const SparseMat& m) .. ocv:function:: SparseMat& SparseMat::operator = (const Mat& m) + :param m: Matrix for assignment. + The last variant is equivalent to the corresponding constructor with try1d=false. @@ -2251,6 +2258,10 @@ Convert sparse matrix with possible type change and scaling. .. ocv:function:: void SparseMat::convertTo( SparseMat& m, int rtype, double alpha=1 ) const .. ocv:function:: void SparseMat::convertTo( Mat& m, int rtype, double alpha=1, double beta=0 ) const + :param m: Destination matrix. + :param rtype: Destination matrix type. + :param alpha: Conversion multiplier. + The first version converts arbitrary sparse matrix to dense matrix and multiplies all the matrix elements by the specified scalar. The second versiob converts sparse matrix to dense matrix with optional type conversion and scaling. When rtype=-1, the destination element type will be the same as the sparse matrix element type. @@ -2343,7 +2354,7 @@ The method returns the number of matrix channels. SparseMat::size --------------- -Returns the array of sizes or matrix size by i dimention and 0 if the matrix is not allocated. +Returns the array of sizes or matrix size by i dimension and 0 if the matrix is not allocated. .. ocv:function:: const int* SparseMat::size() const .. ocv:function:: int SparseMat::size(int i) const @@ -2371,6 +2382,11 @@ Compute element hash value from the element indices. .. ocv:function:: size_t SparseMat::hash(int i0, int i1, int i2) const .. ocv:function:: size_t SparseMat::hash(const int* idx) const + :param i0: The first dimension index. + :param i1: The second dimension index. + :param i2: The third dimension index. + :param idx: Array of element indices for multidimensional matices. + SparseMat::ptr -------------- Low-level element-access functions, special variants for 1D, 2D, 3D cases, and the generic one for n-D case. @@ -2380,6 +2396,12 @@ Low-level element-access functions, special variants for 1D, 2D, 3D cases, and t .. ocv:function:: uchar* SparseMat::ptr(int i0, int i1, int i2, bool createMissing, size_t* hashval=0) .. ocv:function:: uchar* SparseMat::ptr(const int* idx, bool createMissing, size_t* hashval=0) + :param i0: The first dimension index. + :param i1: The second dimension index. + :param i2: The third dimension index. + :param idx: Array of element indices for multidimensional matices. + :param createMissing: Create new element with 0 value if it does not exist in SparseMat. + Return pointer to the matrix element. If the element is there (it is non-zero), the pointer to it is returned. If it is not there and ``createMissing=false``, NULL pointer is returned. If it is not there and ``createMissing=true``, the new elementis created and initialized with 0. Pointer to it is returned. If the optional hashval pointer is not ``NULL``, @@ -2393,6 +2415,11 @@ Erase the specified matrix element. When there is no such an element, the method .. ocv:function:: void SparseMat::erase(int i0, int i1, int i2, size_t* hashval=0) .. ocv:function:: void SparseMat::erase(const int* idx, size_t* hashval=0) + :param i0: The first dimension index. + :param i1: The second dimension index. + :param i2: The third dimension index. + :param idx: Array of element indices for multidimensional matices. + SparseMat\_ ----------- .. ocv:class:: SparseMat_ diff --git a/modules/core/doc/clustering.rst b/modules/core/doc/clustering.rst index 557e92eb44..f078e44f59 100644 --- a/modules/core/doc/clustering.rst +++ b/modules/core/doc/clustering.rst @@ -15,12 +15,18 @@ Finds centers of clusters and groups input samples around the clusters. :param samples: Floating-point matrix of input samples, one row per sample. + :param data: Data for clustering. + :param cluster_count: Number of clusters to split the set by. + :param K: Number of clusters to split the set by. + :param labels: Input/output integer array that stores the cluster indices for every sample. :param criteria: The algorithm termination criteria, that is, the maximum number of iterations and/or the desired accuracy. The accuracy is specified as ``criteria.epsilon``. As soon as each of the cluster centers moves by less than ``criteria.epsilon`` on some iteration, the algorithm stops. + :param termcrit: The algorithm termination criteria, that is, the maximum number of iterations and/or the desired accuracy. + :param attempts: Flag to specify the number of times the algorithm is executed using different initial labellings. The algorithm returns the labels that yield the best compactness (see the last function parameter). :param rng: CvRNG state initialized by RNG(). @@ -35,6 +41,8 @@ Finds centers of clusters and groups input samples around the clusters. :param centers: Output matrix of the cluster centers, one row per each cluster center. + :param _centers: Output matrix of the cluster centers, one row per each cluster center. + :param compactness: The returned value that is described below. The function ``kmeans`` implements a k-means algorithm that finds the diff --git a/modules/core/doc/drawing_functions.rst b/modules/core/doc/drawing_functions.rst index 17e983b6b2..53f4527410 100644 --- a/modules/core/doc/drawing_functions.rst +++ b/modules/core/doc/drawing_functions.rst @@ -225,6 +225,8 @@ Calculates the width and height of a text string. :param text: Input text string. + :param text_string: Input text string in C format. + :param fontFace: Font to use. See the :ocv:func:`putText` for details. :param fontScale: Font scale. See the :ocv:func:`putText` for details. @@ -233,6 +235,12 @@ Calculates the width and height of a text string. :param baseLine: Output parameter - y-coordinate of the baseline relative to the bottom-most text point. + :param baseline: Output parameter - y-coordinate of the baseline relative to the bottom-most text point. + + :param font: Font description in terms of old C API. + + :param text_size: Output parameter - The size of a box that contains the specified text. + The function ``getTextSize`` calculates and returns the size of a box that contains the specified text. That is, the following code renders some text, the tight box surrounding it, and the baseline: :: diff --git a/modules/core/doc/operations_on_arrays.rst b/modules/core/doc/operations_on_arrays.rst index d2c0bba14a..2e844c7e97 100644 --- a/modules/core/doc/operations_on_arrays.rst +++ b/modules/core/doc/operations_on_arrays.rst @@ -1033,6 +1033,8 @@ Returns the determinant of a square floating-point matrix. :param mtx: input matrix that must have ``CV_32FC1`` or ``CV_64FC1`` type and square size. + :param mat: input matrix that must have ``CV_32FC1`` or ``CV_64FC1`` type and square size. + The function ``determinant`` calculates and returns the determinant of the specified matrix. For small matrices ( ``mtx.cols=mtx.rows<=3`` ), the direct method is used. For larger matrices, the function uses LU factorization with partial pivoting. diff --git a/modules/core/doc/utility_and_system_functions_and_macros.rst b/modules/core/doc/utility_and_system_functions_and_macros.rst index 9cecb11a0f..d5052fa5ac 100644 --- a/modules/core/doc/utility_and_system_functions_and_macros.rst +++ b/modules/core/doc/utility_and_system_functions_and_macros.rst @@ -165,6 +165,8 @@ Checks a condition at runtime and throws exception if it fails .. ocv:function:: CV_Assert(expr) + :param expr: Expression for check. + The macros ``CV_Assert`` (and ``CV_DbgAssert``) evaluate the specified expression. If it is 0, the macros raise an error (see :ocv:func:`error` ). The macro ``CV_Assert`` checks the condition in both Debug and Release configurations while ``CV_DbgAssert`` is only retained in the Debug configuration. @@ -180,8 +182,14 @@ Signals an error and raises an exception. :param status: Error code. Normally, it is a negative value. The list of pre-defined error codes can be found in ``cxerror.h`` . + :param func_name: The function name where error occurs. + :param err_msg: Text of the error message. + :param file_name: The file name where error occurs. + + :param line: The line number where error occurs. + :param args: ``printf`` -like formatted error message in parentheses. The function and the helper macros ``CV_Error`` and ``CV_Error_``: :: @@ -241,6 +249,7 @@ Allocates an aligned memory buffer. .. ocv:cfunction:: void* cvAlloc( size_t size ) :param size: Allocated buffer size. + :param bufSize: Allocated buffer size. The function allocates the buffer of the specified size and returns it. When the buffer size is 16 bytes or more, the returned buffer is aligned to 16 bytes. diff --git a/modules/core/doc/xml_yaml_persistence.rst b/modules/core/doc/xml_yaml_persistence.rst index b3938b75c7..fb8a1fff3e 100644 --- a/modules/core/doc/xml_yaml_persistence.rst +++ b/modules/core/doc/xml_yaml_persistence.rst @@ -181,6 +181,17 @@ Opens a file. .. ocv:function:: bool FileStorage::open(const String& filename, int flags, const String& encoding=String()) + :param filename: Name of the file to open or the text string to read the data from. + Extension of the file (``.xml`` or ``.yml``/``.yaml``) determines its format (XML or YAML respectively). + Also you can append ``.gz`` to work with compressed files, for example ``myHugeMatrix.xml.gz``. + If both ``FileStorage::WRITE`` and ``FileStorage::MEMORY`` flags are specified, ``source`` + is used just to specify the output file format (e.g. ``mydata.xml``, ``.yml`` etc.). + + :param flags: Mode of operation. See FileStorage constructor for more details. + + :param encoding: Encoding of the file. Note that UTF-16 XML encoding is not supported currently and you should use 8-bit encoding instead of it. + + See description of parameters in :ocv:func:`FileStorage::FileStorage`. The method calls :ocv:func:`FileStorage::release` before opening the file. diff --git a/modules/core/include/opencv2/core/mat.hpp b/modules/core/include/opencv2/core/mat.hpp index b1162a1426..d5826a9b61 100644 --- a/modules/core/include/opencv2/core/mat.hpp +++ b/modules/core/include/opencv2/core/mat.hpp @@ -1130,8 +1130,6 @@ public: //! converts dense 2d matrix to the sparse form /*! \param m the input matrix - \param try1d if true and m is a single-column matrix (Nx1), - then the sparse matrix will be 1-dimensional. */ explicit SparseMat(const Mat& m); //! converts old-style sparse matrix to the new-style. All the data is copied diff --git a/modules/core/include/opencv2/core/private.hpp b/modules/core/include/opencv2/core/private.hpp index 6d3cd9b41d..c81efafb1f 100644 --- a/modules/core/include/opencv2/core/private.hpp +++ b/modules/core/include/opencv2/core/private.hpp @@ -71,30 +71,6 @@ # endif #endif -#ifdef _OPENMP -# define HAVE_OPENMP -#endif - -#ifdef __APPLE__ -# define HAVE_GCD -#endif - -#if defined _MSC_VER && _MSC_VER >= 1600 -# define HAVE_CONCURRENCY -#endif - -#if defined HAVE_TBB -# define CV_PARALLEL_FRAMEWORK "tbb" -#elif defined HAVE_CSTRIPES -# define CV_PARALLEL_FRAMEWORK "cstripes" -#elif defined HAVE_OPENMP -# define CV_PARALLEL_FRAMEWORK "openmp" -#elif defined HAVE_GCD -# define CV_PARALLEL_FRAMEWORK "gcd" -#elif defined HAVE_CONCURRENCY -# define CV_PARALLEL_FRAMEWORK "ms-concurrency" -#endif - namespace cv { #ifdef HAVE_TBB @@ -145,6 +121,10 @@ namespace cv body(range); } #endif + + // Returns a static string if there is a parallel framework, + // NULL otherwise. + CV_EXPORTS const char* currentParallelFramework(); } //namespace cv #define CV_INIT_ALGORITHM(classname, algname, memberinit) \ diff --git a/modules/core/src/parallel.cpp b/modules/core/src/parallel.cpp index 51b165275f..0a9ed09871 100644 --- a/modules/core/src/parallel.cpp +++ b/modules/core/src/parallel.cpp @@ -61,6 +61,17 @@ #endif #endif +#ifdef _OPENMP + #define HAVE_OPENMP +#endif + +#ifdef __APPLE__ + #define HAVE_GCD +#endif + +#if defined _MSC_VER && _MSC_VER >= 1600 + #define HAVE_CONCURRENCY +#endif /* IMPORTANT: always use the same order of defines 1. HAVE_TBB - 3rdparty library, should be explicitly enabled @@ -99,6 +110,18 @@ #endif #endif +#if defined HAVE_TBB && TBB_VERSION_MAJOR*100 + TBB_VERSION_MINOR >= 202 +# define CV_PARALLEL_FRAMEWORK "tbb" +#elif defined HAVE_CSTRIPES +# define CV_PARALLEL_FRAMEWORK "cstripes" +#elif defined HAVE_OPENMP +# define CV_PARALLEL_FRAMEWORK "openmp" +#elif defined HAVE_GCD +# define CV_PARALLEL_FRAMEWORK "gcd" +#elif defined HAVE_CONCURRENCY +# define CV_PARALLEL_FRAMEWORK "ms-concurrency" +#endif + namespace cv { ParallelLoopBody::~ParallelLoopBody() {} @@ -465,6 +488,14 @@ int cv::getNumberOfCPUs(void) #endif } +const char* cv::currentParallelFramework() { +#ifdef CV_PARALLEL_FRAMEWORK + return CV_PARALLEL_FRAMEWORK; +#else + return NULL; +#endif +} + CV_IMPL void cvSetNumThreads(int nt) { cv::setNumThreads(nt); diff --git a/modules/gpu/perf/perf_main.cpp b/modules/gpu/perf/perf_main.cpp index a7ac1ccce8..53a19ca412 100644 --- a/modules/gpu/perf/perf_main.cpp +++ b/modules/gpu/perf/perf_main.cpp @@ -44,4 +44,11 @@ using namespace perf; -CV_PERF_TEST_MAIN(gpu, printCudaInfo()) +static const char * impls[] = { +#ifdef HAVE_CUDA + "cuda", +#endif + "plain" +}; + +CV_PERF_TEST_MAIN_WITH_IMPLS(gpu, impls, printCudaInfo()) diff --git a/modules/highgui/CMakeLists.txt b/modules/highgui/CMakeLists.txt index 824466095c..7de271622a 100644 --- a/modules/highgui/CMakeLists.txt +++ b/modules/highgui/CMakeLists.txt @@ -233,7 +233,7 @@ endif() if(IOS) add_definitions(-DHAVE_IOS=1) - list(APPEND highgui_srcs src/cap_ios_abstract_camera.mm src/cap_ios_photo_camera.mm src/cap_ios_video_camera.mm) + list(APPEND highgui_srcs src/ios_conversions.mm src/cap_ios_abstract_camera.mm src/cap_ios_photo_camera.mm src/cap_ios_video_camera.mm) list(APPEND HIGHGUI_LIBRARIES "-framework Accelerate" "-framework AVFoundation" "-framework CoreGraphics" "-framework CoreImage" "-framework CoreMedia" "-framework CoreVideo" "-framework QuartzCore" "-framework AssetsLibrary") endif() diff --git a/modules/highgui/include/opencv2/highgui/highgui_c.h b/modules/highgui/include/opencv2/highgui/highgui_c.h index 5c9fde38dc..2ebea0d30b 100644 --- a/modules/highgui/include/opencv2/highgui/highgui_c.h +++ b/modules/highgui/include/opencv2/highgui/highgui_c.h @@ -559,9 +559,11 @@ CVAPI(int) cvGetCaptureDomain( CvCapture* capture); /* "black box" video file writer structure */ typedef struct CvVideoWriter CvVideoWriter; +#define CV_FOURCC_MACRO(c1, c2, c3, c4) (((c1) & 255) + (((c2) & 255) << 8) + (((c3) & 255) << 16) + (((c4) & 255) << 24)) + CV_INLINE int CV_FOURCC(char c1, char c2, char c3, char c4) { - return (c1 & 255) + ((c2 & 255) << 8) + ((c3 & 255) << 16) + ((c4 & 255) << 24); + return CV_FOURCC_MACRO(c1, c2, c3, c4); } #define CV_FOURCC_PROMPT -1 /* Open Codec Selection Dialog (Windows only) */ diff --git a/modules/highgui/include/opencv2/highgui/ios.h b/modules/highgui/include/opencv2/highgui/ios.h new file mode 100644 index 0000000000..a78fbf72c7 --- /dev/null +++ b/modules/highgui/include/opencv2/highgui/ios.h @@ -0,0 +1,49 @@ + +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "opencv2/core/core.hpp" +#import "opencv2/highgui/cap_ios.h" + +UIImage* MatToUIImage(const cv::Mat& image); +void UIImageToMat(const UIImage* image, + cv::Mat& m, bool alphaExist = false); \ No newline at end of file diff --git a/modules/highgui/perf/perf_input.cpp b/modules/highgui/perf/perf_input.cpp index b32f88da97..9dbc27ee7d 100644 --- a/modules/highgui/perf/perf_input.cpp +++ b/modules/highgui/perf/perf_input.cpp @@ -11,11 +11,21 @@ using std::tr1::get; typedef perf::TestBaseWithParam VideoCapture_Reading; +#if defined(HAVE_MSMF) +// MPEG2 is not supported by Media Foundation yet +// http://social.msdn.microsoft.com/Forums/en-US/mediafoundationdevelopment/thread/39a36231-8c01-40af-9af5-3c105d684429 +PERF_TEST_P(VideoCapture_Reading, ReadFile, testing::Values( "highgui/video/big_buck_bunny.avi", + "highgui/video/big_buck_bunny.mov", + "highgui/video/big_buck_bunny.mp4", + "highgui/video/big_buck_bunny.wmv" ) ) + +#else PERF_TEST_P(VideoCapture_Reading, ReadFile, testing::Values( "highgui/video/big_buck_bunny.avi", "highgui/video/big_buck_bunny.mov", "highgui/video/big_buck_bunny.mp4", "highgui/video/big_buck_bunny.mpg", "highgui/video/big_buck_bunny.wmv" ) ) +#endif { string filename = getDataPath(GetParam()); diff --git a/modules/highgui/perf/perf_output.cpp b/modules/highgui/perf/perf_output.cpp index ee26f3a425..8e7ec54a97 100644 --- a/modules/highgui/perf/perf_output.cpp +++ b/modules/highgui/perf/perf_output.cpp @@ -22,13 +22,19 @@ PERF_TEST_P(VideoWriter_Writing, WriteFrame, { string filename = getDataPath(get<0>(GetParam())); bool isColor = get<1>(GetParam()); + Mat image = imread(filename, 1); +#if defined(HAVE_MSMF) && !defined(HAVE_VFW) && !defined(HAVE_FFMPEG) // VFW has greater priority + VideoWriter writer(cv::tempfile(".wmv"), VideoWriter::fourcc('W', 'M', 'V', '3'), + 25, cv::Size(image.cols, image.rows), isColor); +#else + VideoWriter writer(cv::tempfile(".avi"), VideoWriter::fourcc('X', 'V', 'I', 'D'), + 25, cv::Size(image.cols, image.rows), isColor); +#endif - VideoWriter writer(cv::tempfile(".avi"), VideoWriter::fourcc('X', 'V', 'I', 'D'), 25, cv::Size(640, 480), isColor); - - TEST_CYCLE() { Mat image = imread(filename, 1); writer << image; } + TEST_CYCLE() { image = imread(filename, 1); writer << image; } bool dummy = writer.isOpened(); SANITY_CHECK(dummy); } -#endif // BUILD_WITH_VIDEO_OUTPUT_SUPPORT \ No newline at end of file +#endif // BUILD_WITH_VIDEO_OUTPUT_SUPPORT diff --git a/modules/highgui/perf/perf_precomp.hpp b/modules/highgui/perf/perf_precomp.hpp index 16880d1e42..f8e02fa70c 100644 --- a/modules/highgui/perf/perf_precomp.hpp +++ b/modules/highgui/perf/perf_precomp.hpp @@ -21,6 +21,7 @@ defined(HAVE_QUICKTIME) || \ defined(HAVE_AVFOUNDATION) || \ defined(HAVE_FFMPEG) || \ + defined(HAVE_MSMF) || \ defined(HAVE_VFW) /*defined(HAVE_OPENNI) too specialized */ \ @@ -34,6 +35,7 @@ defined(HAVE_QUICKTIME) || \ defined(HAVE_AVFOUNDATION) || \ defined(HAVE_FFMPEG) || \ + defined(HAVE_MSMF) || \ defined(HAVE_VFW) # define BUILD_WITH_VIDEO_OUTPUT_SUPPORT 1 #else diff --git a/modules/highgui/src/cap.cpp b/modules/highgui/src/cap.cpp index 8eb9d0d6ee..a76cb428ea 100644 --- a/modules/highgui/src/cap.cpp +++ b/modules/highgui/src/cap.cpp @@ -117,6 +117,9 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index) #ifdef HAVE_DSHOW CV_CAP_DSHOW, #endif +#ifdef HAVE_MSMF + CV_CAP_MSMF, +#endif #if 1 CV_CAP_IEEE1394, // identical to CV_CAP_DC1394 #endif @@ -196,21 +199,20 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index) switch (domains[i]) { -#ifdef HAVE_MSMF - case CV_CAP_MSMF: - capture = cvCreateCameraCapture_MSMF (index); +#ifdef HAVE_DSHOW + case CV_CAP_DSHOW: + capture = cvCreateCameraCapture_DShow (index); if (capture) return capture; break; #endif -#ifdef HAVE_DSHOW - case CV_CAP_DSHOW: - capture = cvCreateCameraCapture_DShow (index); +#ifdef HAVE_MSMF + case CV_CAP_MSMF: + capture = cvCreateCameraCapture_MSMF (index); if (capture) return capture; break; #endif - #ifdef HAVE_TYZX case CV_CAP_STEREO: capture = cvCreateCameraCapture_TYZX (index); @@ -218,14 +220,12 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index) return capture; break; #endif - - case CV_CAP_VFW: #ifdef HAVE_VFW + case CV_CAP_VFW: capture = cvCreateCameraCapture_VFW (index); if (capture) return capture; #endif - #if defined HAVE_LIBV4L || defined HAVE_CAMV4L || defined HAVE_CAMV4L2 || defined HAVE_VIDEOIO capture = cvCreateCameraCapture_V4L (index); if (capture) @@ -359,6 +359,16 @@ CV_IMPL CvCapture * cvCreateFileCapture (const char * filename) if (! result) result = cvCreateFileCapture_FFMPEG_proxy (filename); +#ifdef HAVE_VFW + if (! result) + result = cvCreateFileCapture_VFW (filename); +#endif + +#ifdef HAVE_MSMF + if (! result) + result = cvCreateFileCapture_MSMF (filename); +#endif + #ifdef HAVE_XINE if (! result) result = cvCreateFileCapture_XINE (filename); @@ -407,6 +417,16 @@ CV_IMPL CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc, if(!result) result = cvCreateVideoWriter_FFMPEG_proxy (filename, fourcc, fps, frameSize, is_color); +#ifdef HAVE_VFW + if(!result) + result = cvCreateVideoWriter_VFW(filename, fourcc, fps, frameSize, is_color); +#endif + +#ifdef HAVE_MSMF + if (!result) + result = cvCreateVideoWriter_MSMF(filename, fourcc, fps, frameSize, is_color); +#endif + /* #ifdef HAVE_XINE if(!result) result = cvCreateVideoWriter_XINE(filename, fourcc, fps, frameSize, is_color); diff --git a/modules/highgui/src/cap_dc1394_v2.cpp b/modules/highgui/src/cap_dc1394_v2.cpp index ea7e4b2b86..0d5f898186 100644 --- a/modules/highgui/src/cap_dc1394_v2.cpp +++ b/modules/highgui/src/cap_dc1394_v2.cpp @@ -304,6 +304,11 @@ bool CvCaptureCAM_DC1394_v2_CPP::startCapture() return false; if (isoSpeed > 0) { + // if capable set operation mode to 1394b for iso speeds above 400 + if (isoSpeed > 400 && dcCam->bmode_capable == DC1394_TRUE) + { + dc1394_video_set_operation_mode(dcCam, DC1394_OPERATION_MODE_1394B); + } code = dc1394_video_set_iso_speed(dcCam, isoSpeed <= 100 ? DC1394_ISO_SPEED_100 : isoSpeed <= 200 ? DC1394_ISO_SPEED_200 : diff --git a/modules/highgui/src/cap_ffmpeg.cpp b/modules/highgui/src/cap_ffmpeg.cpp index 57f67dab9c..3734bb646d 100644 --- a/modules/highgui/src/cap_ffmpeg.cpp +++ b/modules/highgui/src/cap_ffmpeg.cpp @@ -209,11 +209,7 @@ CvCapture* cvCreateFileCapture_FFMPEG_proxy(const char * filename) if( result->open( filename )) return result; delete result; -#ifdef HAVE_VFW - return cvCreateFileCapture_VFW(filename); -#else return 0; -#endif } class CvVideoWriter_FFMPEG_proxy : @@ -263,9 +259,5 @@ CvVideoWriter* cvCreateVideoWriter_FFMPEG_proxy( const char* filename, int fourc if( result->open( filename, fourcc, fps, frameSize, isColor != 0 )) return result; delete result; -#ifdef HAVE_VFW - return cvCreateVideoWriter_VFW(filename, fourcc, fps, frameSize, isColor); - #else return 0; -#endif } diff --git a/modules/highgui/src/cap_libv4l.cpp b/modules/highgui/src/cap_libv4l.cpp index b081621b18..6027caec26 100644 --- a/modules/highgui/src/cap_libv4l.cpp +++ b/modules/highgui/src/cap_libv4l.cpp @@ -14,7 +14,9 @@ It has been tested with the motempl sample program First Patch: August 24, 2004 Travis Wood TravisOCV@tkwood.com For Release: OpenCV-Linux Beta4 opencv-0.9.6 Tested On: LMLBT44 with 8 video inputs -Problems? Post problems/fixes to OpenCV group on groups.yahoo.com +Problems? Post your questions at answers.opencv.org, + Report bugs at code.opencv.org, + Submit your fixes at https://github.com/Itseez/opencv/ Patched Comments: TW: The cv cam utils that came with the initial release of OpenCV for LINUX Beta4 diff --git a/modules/highgui/src/cap_msmf.cpp b/modules/highgui/src/cap_msmf.cpp index 52b780463a..09f65b7e87 100644 --- a/modules/highgui/src/cap_msmf.cpp +++ b/modules/highgui/src/cap_msmf.cpp @@ -53,7 +53,8 @@ #include #include #include -#include "Strsafe.h" +#include +#include #include #include #include @@ -61,18 +62,27 @@ #include #include #include + #pragma warning(disable:4503) #pragma comment(lib, "mfplat") #pragma comment(lib, "mf") #pragma comment(lib, "mfuuid") #pragma comment(lib, "Strmiids") +#pragma comment(lib, "Mfreadwrite") #pragma comment(lib, "MinCore_Downlevel") + +// for ComPtr usage +#include +using namespace Microsoft::WRL; + struct IMFMediaType; struct IMFActivate; struct IMFMediaSource; struct IMFAttributes; + namespace { + template void SafeRelease(T **ppT) { if (*ppT) @@ -81,7 +91,8 @@ template void SafeRelease(T **ppT) *ppT = NULL; } } - /// Class for printing info into consol + +/// Class for printing info into consol class DebugPrintOut { public: @@ -93,6 +104,7 @@ public: private: DebugPrintOut(void); }; + // Structure for collecting info about types of video, which are supported by current video device struct MediaType { @@ -101,14 +113,14 @@ struct MediaType unsigned int width; unsigned int MF_MT_YUV_MATRIX; unsigned int MF_MT_VIDEO_LIGHTING; - unsigned int MF_MT_DEFAULT_STRIDE; + int MF_MT_DEFAULT_STRIDE; // stride is negative if image is bottom-up unsigned int MF_MT_VIDEO_CHROMA_SITING; GUID MF_MT_AM_FORMAT_TYPE; wchar_t *pMF_MT_AM_FORMAT_TYPEName; unsigned int MF_MT_FIXED_SIZE_SAMPLES; unsigned int MF_MT_VIDEO_NOMINAL_RANGE; - unsigned int MF_MT_FRAME_RATE; - unsigned int MF_MT_FRAME_RATE_low; + unsigned int MF_MT_FRAME_RATE_NUMERATOR; + unsigned int MF_MT_FRAME_RATE_DENOMINATOR; unsigned int MF_MT_PIXEL_ASPECT_RATIO; unsigned int MF_MT_PIXEL_ASPECT_RATIO_low; unsigned int MF_MT_ALL_SAMPLES_INDEPENDENT; @@ -127,6 +139,7 @@ struct MediaType ~MediaType(); void Clear(); }; + /// Class for parsing info from IMFMediaType into the local MediaType class FormatReader { @@ -136,9 +149,10 @@ public: private: FormatReader(void); }; + DWORD WINAPI MainThreadFunction( LPVOID lpParam ); typedef void(*emergensyStopEventCallback)(int, void *); -typedef unsigned char BYTE; + class RawImage { public: @@ -156,6 +170,7 @@ private: unsigned char *ri_pixels; RawImage(unsigned int size); }; + // Class for grabbing image from video stream class ImageGrabber : public IMFSampleGrabberSinkCallback { @@ -163,13 +178,21 @@ public: ~ImageGrabber(void); HRESULT initImageGrabber(IMFMediaSource *pSource, GUID VideoFormat); HRESULT startGrabbing(void); + void pauseGrabbing(); + void resumeGrabbing(); void stopGrabbing(); RawImage *getRawImage(); // Function of creation of the instance of the class - static HRESULT CreateInstance(ImageGrabber **ppIG,unsigned int deviceID); + static HRESULT CreateInstance(ImageGrabber **ppIG, unsigned int deviceID, bool synchronous = false); + + const HANDLE ig_hFrameReady; + const HANDLE ig_hFrameGrabbed; + const HANDLE ig_hFinish; + private: bool ig_RIE; bool ig_Close; + bool ig_Synchronous; long m_cRef; unsigned int ig_DeviceID; IMFMediaSource *ig_pSource; @@ -178,19 +201,11 @@ private: RawImage *ig_RIFirst; RawImage *ig_RISecond; RawImage *ig_RIOut; - ImageGrabber(unsigned int deviceID); + ImageGrabber(unsigned int deviceID, bool synchronous); HRESULT CreateTopology(IMFMediaSource *pSource, IMFActivate *pSinkActivate, IMFTopology **ppTopo); - HRESULT AddSourceNode( - IMFTopology *pTopology, - IMFMediaSource *pSource, - IMFPresentationDescriptor *pPD, - IMFStreamDescriptor *pSD, - IMFTopologyNode **ppNode); - HRESULT AddOutputNode( - IMFTopology *pTopology, - IMFActivate *pActivate, - DWORD dwId, - IMFTopologyNode **ppNode); + HRESULT AddSourceNode(IMFTopology *pTopology, IMFMediaSource *pSource, + IMFPresentationDescriptor *pPD, IMFStreamDescriptor *pSD, IMFTopologyNode **ppNode); + HRESULT AddOutputNode(IMFTopology *pTopology, IMFActivate *pActivate, DWORD dwId, IMFTopologyNode **ppNode); // IUnknown methods STDMETHODIMP QueryInterface(REFIID iid, void** ppv); STDMETHODIMP_(ULONG) AddRef(); @@ -208,13 +223,14 @@ private: DWORD dwSampleSize); STDMETHODIMP OnShutdown(); }; + /// Class for controlling of thread of the grabbing raw data from video device class ImageGrabberThread { friend DWORD WINAPI MainThreadFunction( LPVOID lpParam ); public: ~ImageGrabberThread(void); - static HRESULT CreateInstance(ImageGrabberThread **ppIGT, IMFMediaSource *pSource, unsigned int deviceID); + static HRESULT CreateInstance(ImageGrabberThread **ppIGT, IMFMediaSource *pSource, unsigned int deviceID, bool synchronious = false); void start(); void stop(); void setEmergencyStopEvent(void *userData, void(*func)(int, void *)); @@ -222,7 +238,7 @@ public: protected: virtual void run(); private: - ImageGrabberThread(IMFMediaSource *pSource, unsigned int deviceID); + ImageGrabberThread(IMFMediaSource *pSource, unsigned int deviceID, bool synchronious); HANDLE igt_Handle; DWORD igt_ThreadIdArray; ImageGrabber *igt_pImageGrabber; @@ -231,6 +247,7 @@ private: bool igt_stop; unsigned int igt_DeviceID; }; + // Structure for collecting info about one parametr of current video device struct Parametr { @@ -242,6 +259,7 @@ struct Parametr long Flag; Parametr(); }; + // Structure for collecting info about 17 parametrs of current video device struct CamParametrs { @@ -263,11 +281,13 @@ struct CamParametrs Parametr Iris; Parametr Focus; }; + typedef std::wstring String; typedef std::vector vectorNum; typedef std::map SUBTYPEMap; typedef std::map FrameRateMap; typedef void(*emergensyStopEventCallback)(int, void *); + /// Class for controlling of video device class videoDevice { @@ -311,7 +331,7 @@ private: IMFMediaSource *vd_pSource; emergensyStopEventCallback vd_func; void *vd_userData; - long enumerateCaptureFormats(IMFMediaSource *pSource); + HRESULT enumerateCaptureFormats(IMFMediaSource *pSource); long setDeviceFormat(IMFMediaSource *pSource, unsigned long dwFormatIndex); void buildLibraryofTypes(); int findType(unsigned int size, unsigned int frameRate = 0); @@ -319,6 +339,7 @@ private: long initDevice(); long checkDevice(IMFAttributes *pAttributes, IMFActivate **pDevice); }; + /// Class for managing of list of video devices class videoDevices { @@ -334,6 +355,7 @@ private: std::vector vds_Devices; videoDevices(void); }; + // Class for creating of Media Foundation context class Media_Foundation { @@ -344,6 +366,7 @@ public: private: Media_Foundation(void); }; + /// The only visiable class for controlling of video devices in format singelton class videoInput { @@ -393,23 +416,27 @@ public: bool isFrameNew(int deviceID); // Writing of Raw Data pixels from video device with deviceID with correction of RedAndBlue flipping flipRedAndBlue and vertical flipping flipImage bool getPixels(int deviceID, unsigned char * pixels, bool flipRedAndBlue = false, bool flipImage = false); + static void processPixels(unsigned char * src, unsigned char * dst, unsigned int width, unsigned int height, unsigned int bpp, bool bRGB, bool bFlip); private: bool accessToDevices; videoInput(void); - void processPixels(unsigned char * src, unsigned char * dst, unsigned int width, unsigned int height, unsigned int bpp, bool bRGB, bool bFlip); void updateListOfDevices(); }; + DebugPrintOut::DebugPrintOut(void):verbose(true) { } + DebugPrintOut::~DebugPrintOut(void) { } + DebugPrintOut& DebugPrintOut::getInstance() { static DebugPrintOut instance; return instance; } + void DebugPrintOut::printOut(const wchar_t *format, ...) { if(verbose) @@ -430,14 +457,17 @@ void DebugPrintOut::printOut(const wchar_t *format, ...) va_end (args); } } + void DebugPrintOut::setVerbose(bool state) { verbose = state; } + LPCWSTR GetGUIDNameConstNew(const GUID& guid); HRESULT GetGUIDNameNew(const GUID& guid, WCHAR **ppwsz); HRESULT LogAttributeValueByIndexNew(IMFAttributes *pAttr, DWORD index); HRESULT SpecialCaseAttributeValueNew(GUID guid, const PROPVARIANT& var, MediaType &out); + unsigned int *GetParametr(GUID guid, MediaType &out) { if(guid == MF_MT_YUV_MATRIX) @@ -445,7 +475,7 @@ unsigned int *GetParametr(GUID guid, MediaType &out) if(guid == MF_MT_VIDEO_LIGHTING) return &(out.MF_MT_VIDEO_LIGHTING); if(guid == MF_MT_DEFAULT_STRIDE) - return &(out.MF_MT_DEFAULT_STRIDE); + return (unsigned int*)&(out.MF_MT_DEFAULT_STRIDE); if(guid == MF_MT_VIDEO_CHROMA_SITING) return &(out.MF_MT_VIDEO_CHROMA_SITING); if(guid == MF_MT_VIDEO_NOMINAL_RANGE) @@ -462,6 +492,7 @@ unsigned int *GetParametr(GUID guid, MediaType &out) return &(out.MF_MT_INTERLACE_MODE); return NULL; } + HRESULT LogAttributeValueByIndexNew(IMFAttributes *pAttr, DWORD index, MediaType &out) { WCHAR *pGuidName = NULL; @@ -548,6 +579,7 @@ done: PropVariantClear(&var); return hr; } + HRESULT GetGUIDNameNew(const GUID& guid, WCHAR **ppwsz) { HRESULT hr = S_OK; @@ -589,14 +621,17 @@ done: } return hr; } + void LogUINT32AsUINT64New(const PROPVARIANT& var, UINT32 &uHigh, UINT32 &uLow) { Unpack2UINT32AsUINT64(var.uhVal.QuadPart, &uHigh, &uLow); } + float OffsetToFloatNew(const MFOffset& offset) { return offset.value + (static_cast(offset.fract) / 65536.0f); } + HRESULT LogVideoAreaNew(const PROPVARIANT& var) { if (var.caub.cElems < sizeof(MFVideoArea)) @@ -605,8 +640,13 @@ HRESULT LogVideoAreaNew(const PROPVARIANT& var) } return S_OK; } + HRESULT SpecialCaseAttributeValueNew(GUID guid, const PROPVARIANT& var, MediaType &out) { + if (guid == MF_MT_DEFAULT_STRIDE) + { + out.MF_MT_DEFAULT_STRIDE = var.intVal; + } else if (guid == MF_MT_FRAME_SIZE) { UINT32 uHigh = 0, uLow = 0; @@ -620,8 +660,8 @@ HRESULT SpecialCaseAttributeValueNew(GUID guid, const PROPVARIANT& var, MediaTyp { UINT32 uHigh = 0, uLow = 0; LogUINT32AsUINT64New(var, uHigh, uLow); - out.MF_MT_FRAME_RATE = uHigh; - out.MF_MT_FRAME_RATE_low = uLow; + out.MF_MT_FRAME_RATE_NUMERATOR = uHigh; + out.MF_MT_FRAME_RATE_DENOMINATOR = uLow; } else if (guid == MF_MT_FRAME_RATE_RANGE_MAX) @@ -653,9 +693,11 @@ HRESULT SpecialCaseAttributeValueNew(GUID guid, const PROPVARIANT& var, MediaTyp } return S_OK; } + #ifndef IF_EQUAL_RETURN #define IF_EQUAL_RETURN(param, val) if(val == param) return L#val #endif + LPCWSTR GetGUIDNameConstNew(const GUID& guid) { IF_EQUAL_RETURN(guid, MF_MT_MAJOR_TYPE); @@ -800,9 +842,11 @@ LPCWSTR GetGUIDNameConstNew(const GUID& guid) IF_EQUAL_RETURN(guid, MFAudioFormat_ADTS); // WAVE_FORMAT_MPEG_ADTS_AAC return NULL; } + FormatReader::FormatReader(void) { } + MediaType FormatReader::Read(IMFMediaType *pType) { UINT32 count = 0; @@ -833,32 +877,57 @@ MediaType FormatReader::Read(IMFMediaType *pType) } return out; } + FormatReader::~FormatReader(void) { } + #define CHECK_HR(x) if (FAILED(x)) { goto done; } -ImageGrabber::ImageGrabber(unsigned int deviceID): m_cRef(1), ig_DeviceID(deviceID), ig_pSource(NULL), ig_pSession(NULL), ig_pTopology(NULL), ig_RIE(true), ig_Close(false) -{ -} + +ImageGrabber::ImageGrabber(unsigned int deviceID, bool synchronous): + m_cRef(1), + ig_DeviceID(deviceID), + ig_pSource(NULL), + ig_pSession(NULL), + ig_pTopology(NULL), + ig_RIE(true), + ig_Close(false), + ig_Synchronous(synchronous), + ig_hFrameReady(synchronous ? CreateEvent(NULL, FALSE, FALSE, NULL): 0), + ig_hFrameGrabbed(synchronous ? CreateEvent(NULL, FALSE, TRUE, NULL): 0), + ig_hFinish(CreateEvent(NULL, TRUE, FALSE, NULL)) +{} + ImageGrabber::~ImageGrabber(void) { if (ig_pSession) { ig_pSession->Shutdown(); } - //SafeRelease(&ig_pSession); - //SafeRelease(&ig_pTopology); + + CloseHandle(ig_hFinish); + + if (ig_Synchronous) + { + CloseHandle(ig_hFrameReady); + CloseHandle(ig_hFrameGrabbed); + } + + SafeRelease(&ig_pSession); + SafeRelease(&ig_pTopology); DebugPrintOut *DPO = &DebugPrintOut::getInstance(); - DPO->printOut(L"IMAGEGRABBER VIDEODEVICE %i: Destroing instance of the ImageGrabber class \n", ig_DeviceID); + + DPO->printOut(L"IMAGEGRABBER VIDEODEVICE %i: Destroing instance of the ImageGrabber class\n", ig_DeviceID); } + HRESULT ImageGrabber::initImageGrabber(IMFMediaSource *pSource, GUID VideoFormat) { - IMFActivate *pSinkActivate = NULL; - IMFMediaType *pType = NULL; - IMFPresentationDescriptor *pPD = NULL; - IMFStreamDescriptor *pSD = NULL; - IMFMediaTypeHandler *pHandler = NULL; - IMFMediaType *pCurrentType = NULL; + ComPtr pSinkActivate = NULL; + ComPtr pType = NULL; + ComPtr pPD = NULL; + ComPtr pSD = NULL; + ComPtr pHandler = NULL; + ComPtr pCurrentType = NULL; HRESULT hr = S_OK; MediaType MT; // Clean up. @@ -871,30 +940,32 @@ HRESULT ImageGrabber::initImageGrabber(IMFMediaSource *pSource, GUID VideoFormat ig_pSource = pSource; hr = pSource->CreatePresentationDescriptor(&pPD); if (FAILED(hr)) + { goto err; + } BOOL fSelected; hr = pPD->GetStreamDescriptorByIndex(0, &fSelected, &pSD); - if (FAILED(hr)) + if (FAILED(hr)) { goto err; + } hr = pSD->GetMediaTypeHandler(&pHandler); - if (FAILED(hr)) + if (FAILED(hr)) { goto err; + } DWORD cTypes = 0; hr = pHandler->GetMediaTypeCount(&cTypes); - if (FAILED(hr)) + if (FAILED(hr)) { goto err; + } if(cTypes > 0) { hr = pHandler->GetCurrentMediaType(&pCurrentType); - if (FAILED(hr)) + if (FAILED(hr)) { goto err; - MT = FormatReader::Read(pCurrentType); + } + MT = FormatReader::Read(pCurrentType.Get()); } err: - SafeRelease(&pPD); - SafeRelease(&pSD); - SafeRelease(&pHandler); - SafeRelease(&pCurrentType); unsigned int sizeRawImage = 0; if(VideoFormat == MFVideoFormat_RGB24) { @@ -910,17 +981,17 @@ err: // Configure the media type that the Sample Grabber will receive. // Setting the major and subtype is usually enough for the topology loader // to resolve the topology. - CHECK_HR(hr = MFCreateMediaType(&pType)); + CHECK_HR(hr = MFCreateMediaType(pType.GetAddressOf())); CHECK_HR(hr = pType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video)); CHECK_HR(hr = pType->SetGUID(MF_MT_SUBTYPE, VideoFormat)); // Create the sample grabber sink. - CHECK_HR(hr = MFCreateSampleGrabberSinkActivate(pType, this, &pSinkActivate)); + CHECK_HR(hr = MFCreateSampleGrabberSinkActivate(pType.Get(), this, pSinkActivate.GetAddressOf())); // To run as fast as possible, set this attribute (requires Windows 7): CHECK_HR(hr = pSinkActivate->SetUINT32(MF_SAMPLEGRABBERSINK_IGNORE_CLOCK, TRUE)); // Create the Media Session. CHECK_HR(hr = MFCreateMediaSession(NULL, &ig_pSession)); // Create the topology. - CHECK_HR(hr = CreateTopology(pSource, pSinkActivate, &ig_pTopology)); + CHECK_HR(hr = CreateTopology(pSource, pSinkActivate.Get(), &ig_pTopology)); done: // Clean up. if (FAILED(hr)) @@ -932,10 +1003,10 @@ done: SafeRelease(&ig_pSession); SafeRelease(&ig_pTopology); } - SafeRelease(&pSinkActivate); - SafeRelease(&pType); + return hr; } + void ImageGrabber::stopGrabbing() { if(ig_pSession) @@ -943,16 +1014,17 @@ void ImageGrabber::stopGrabbing() DebugPrintOut *DPO = &DebugPrintOut::getInstance(); DPO->printOut(L"IMAGEGRABBER VIDEODEVICE %i: Stopping of of grabbing of images\n", ig_DeviceID); } + HRESULT ImageGrabber::startGrabbing(void) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); - IMFMediaEvent *pEvent = NULL; + ComPtr pEvent = NULL; PROPVARIANT var; PropVariantInit(&var); HRESULT hr = S_OK; - CHECK_HR(hr = ig_pSession->SetTopology(0, ig_pTopology)); - CHECK_HR(hr = ig_pSession->Start(&GUID_NULL, &var)); + hr = ig_pSession->SetTopology(0, ig_pTopology); DPO->printOut(L"IMAGEGRABBER VIDEODEVICE %i: Start Grabbing of the images\n", ig_DeviceID); + hr = ig_pSession->Start(&GUID_NULL, &var); for(;;) { HRESULT hrStatus = S_OK; @@ -992,27 +1064,41 @@ HRESULT ImageGrabber::startGrabbing(void) DPO->printOut(L"IMAGEGRABBER VIDEODEVICE %i: MEVideoCaptureDeviceRemoved \n", ig_DeviceID); break; } - SafeRelease(&pEvent); + if ((met == MEError) || (met == MENonFatalError)) + { + pEvent->GetStatus(&hrStatus); + DPO->printOut(L"IMAGEGRABBER VIDEODEVICE %i: MEError | MENonFatalError: %u\n", ig_DeviceID, hrStatus); + break; + } } DPO->printOut(L"IMAGEGRABBER VIDEODEVICE %i: Finish startGrabbing \n", ig_DeviceID); + done: - SafeRelease(&pEvent); - SafeRelease(&ig_pSession); - SafeRelease(&ig_pTopology); + SetEvent(ig_hFinish); + return hr; } + +void ImageGrabber::pauseGrabbing() +{ +} + +void ImageGrabber::resumeGrabbing() +{ +} + HRESULT ImageGrabber::CreateTopology(IMFMediaSource *pSource, IMFActivate *pSinkActivate, IMFTopology **ppTopo) { - IMFTopology *pTopology = NULL; - IMFPresentationDescriptor *pPD = NULL; - IMFStreamDescriptor *pSD = NULL; - IMFMediaTypeHandler *pHandler = NULL; - IMFTopologyNode *pNode1 = NULL; - IMFTopologyNode *pNode2 = NULL; + IMFTopology* pTopology = NULL; + ComPtr pPD = NULL; + ComPtr pSD = NULL; + ComPtr pHandler = NULL; + ComPtr pNode1 = NULL; + ComPtr pNode2 = NULL; HRESULT hr = S_OK; DWORD cStreams = 0; CHECK_HR(hr = MFCreateTopology(&pTopology)); - CHECK_HR(hr = pSource->CreatePresentationDescriptor(&pPD)); + CHECK_HR(hr = pSource->CreatePresentationDescriptor(pPD.GetAddressOf())); CHECK_HR(hr = pPD->GetStreamDescriptorCount(&cStreams)); for (DWORD i = 0; i < cStreams; i++) { @@ -1024,29 +1110,23 @@ HRESULT ImageGrabber::CreateTopology(IMFMediaSource *pSource, IMFActivate *pSink CHECK_HR(hr = pHandler->GetMajorType(&majorType)); if (majorType == MFMediaType_Video && fSelected) { - CHECK_HR(hr = AddSourceNode(pTopology, pSource, pPD, pSD, &pNode1)); - CHECK_HR(hr = AddOutputNode(pTopology, pSinkActivate, 0, &pNode2)); - CHECK_HR(hr = pNode1->ConnectOutput(0, pNode2, 0)); + CHECK_HR(hr = AddSourceNode(pTopology, pSource, pPD.Get(), pSD.Get(), pNode1.GetAddressOf())); + CHECK_HR(hr = AddOutputNode(pTopology, pSinkActivate, 0, pNode2.GetAddressOf())); + CHECK_HR(hr = pNode1->ConnectOutput(0, pNode2.Get(), 0)); break; } else { CHECK_HR(hr = pPD->DeselectStream(i)); } - SafeRelease(&pSD); - SafeRelease(&pHandler); } *ppTopo = pTopology; (*ppTopo)->AddRef(); + done: - SafeRelease(&pTopology); - SafeRelease(&pNode1); - SafeRelease(&pNode2); - SafeRelease(&pPD); - SafeRelease(&pSD); - SafeRelease(&pHandler); return hr; } + HRESULT ImageGrabber::AddSourceNode( IMFTopology *pTopology, // Topology. IMFMediaSource *pSource, // Media source. @@ -1054,43 +1134,45 @@ HRESULT ImageGrabber::AddSourceNode( IMFStreamDescriptor *pSD, // Stream descriptor. IMFTopologyNode **ppNode) // Receives the node pointer. { - IMFTopologyNode *pNode = NULL; + ComPtr pNode = NULL; HRESULT hr = S_OK; - CHECK_HR(hr = MFCreateTopologyNode(MF_TOPOLOGY_SOURCESTREAM_NODE, &pNode)); + CHECK_HR(hr = MFCreateTopologyNode(MF_TOPOLOGY_SOURCESTREAM_NODE, pNode.GetAddressOf())); CHECK_HR(hr = pNode->SetUnknown(MF_TOPONODE_SOURCE, pSource)); CHECK_HR(hr = pNode->SetUnknown(MF_TOPONODE_PRESENTATION_DESCRIPTOR, pPD)); CHECK_HR(hr = pNode->SetUnknown(MF_TOPONODE_STREAM_DESCRIPTOR, pSD)); - CHECK_HR(hr = pTopology->AddNode(pNode)); + CHECK_HR(hr = pTopology->AddNode(pNode.Get())); // Return the pointer to the caller. - *ppNode = pNode; + *ppNode = pNode.Get(); (*ppNode)->AddRef(); + done: - SafeRelease(&pNode); return hr; } + HRESULT ImageGrabber::AddOutputNode( IMFTopology *pTopology, // Topology. IMFActivate *pActivate, // Media sink activation object. DWORD dwId, // Identifier of the stream sink. IMFTopologyNode **ppNode) // Receives the node pointer. { - IMFTopologyNode *pNode = NULL; + ComPtr pNode = NULL; HRESULT hr = S_OK; - CHECK_HR(hr = MFCreateTopologyNode(MF_TOPOLOGY_OUTPUT_NODE, &pNode)); + CHECK_HR(hr = MFCreateTopologyNode(MF_TOPOLOGY_OUTPUT_NODE, pNode.GetAddressOf())); CHECK_HR(hr = pNode->SetObject(pActivate)); CHECK_HR(hr = pNode->SetUINT32(MF_TOPONODE_STREAMID, dwId)); CHECK_HR(hr = pNode->SetUINT32(MF_TOPONODE_NOSHUTDOWN_ON_REMOVE, FALSE)); - CHECK_HR(hr = pTopology->AddNode(pNode)); + CHECK_HR(hr = pTopology->AddNode(pNode.Get())); // Return the pointer to the caller. - *ppNode = pNode; + *ppNode = pNode.Get(); (*ppNode)->AddRef(); + done: - SafeRelease(&pNode); return hr; } -HRESULT ImageGrabber::CreateInstance(ImageGrabber **ppIG, unsigned int deviceID) + +HRESULT ImageGrabber::CreateInstance(ImageGrabber **ppIG, unsigned int deviceID, bool synchronious) { - *ppIG = new (std::nothrow) ImageGrabber(deviceID); + *ppIG = new (std::nothrow) ImageGrabber(deviceID, synchronious); if (ppIG == NULL) { return E_OUTOFMEMORY; @@ -1099,6 +1181,7 @@ HRESULT ImageGrabber::CreateInstance(ImageGrabber **ppIG, unsigned int deviceID) DPO->printOut(L"IMAGEGRABBER VIDEODEVICE %i: Creating instance of ImageGrabber\n", deviceID); return S_OK; } + STDMETHODIMP ImageGrabber::QueryInterface(REFIID riid, void** ppv) { HRESULT hr = E_NOINTERFACE; @@ -1119,10 +1202,12 @@ STDMETHODIMP ImageGrabber::QueryInterface(REFIID riid, void** ppv) } return hr; } + STDMETHODIMP_(ULONG) ImageGrabber::AddRef() { return InterlockedIncrement(&m_cRef); } + STDMETHODIMP_(ULONG) ImageGrabber::Release() { ULONG cRef = InterlockedDecrement(&m_cRef); @@ -1132,38 +1217,45 @@ STDMETHODIMP_(ULONG) ImageGrabber::Release() } return cRef; } + STDMETHODIMP ImageGrabber::OnClockStart(MFTIME hnsSystemTime, LONGLONG llClockStartOffset) { (void)hnsSystemTime; (void)llClockStartOffset; return S_OK; } + STDMETHODIMP ImageGrabber::OnClockStop(MFTIME hnsSystemTime) { (void)hnsSystemTime; return S_OK; } + STDMETHODIMP ImageGrabber::OnClockPause(MFTIME hnsSystemTime) { (void)hnsSystemTime; return S_OK; } + STDMETHODIMP ImageGrabber::OnClockRestart(MFTIME hnsSystemTime) { (void)hnsSystemTime; return S_OK; } + STDMETHODIMP ImageGrabber::OnClockSetRate(MFTIME hnsSystemTime, float flRate) { (void)flRate; (void)hnsSystemTime; return S_OK; } + STDMETHODIMP ImageGrabber::OnSetPresentationClock(IMFPresentationClock* pClock) { (void)pClock; return S_OK; } + STDMETHODIMP ImageGrabber::OnProcessSample(REFGUID guidMajorMediaType, DWORD dwSampleFlags, LONGLONG llSampleTime, LONGLONG llSampleDuration, const BYTE * pSampleBuffer, DWORD dwSampleSize) @@ -1173,6 +1265,16 @@ STDMETHODIMP ImageGrabber::OnProcessSample(REFGUID guidMajorMediaType, DWORD dwS (void)dwSampleFlags; (void)llSampleDuration; (void)dwSampleSize; + + HANDLE tmp[] = {ig_hFinish, ig_hFrameGrabbed, NULL}; + + DWORD status = WaitForMultipleObjects(2, tmp, FALSE, INFINITE); + if (status == WAIT_OBJECT_0) + { + printf("OnProcessFrame called after ig_hFinish event\n"); + return S_OK; + } + if(ig_RIE) { ig_RIFirst->fastCopy(pSampleBuffer); @@ -1183,27 +1285,41 @@ STDMETHODIMP ImageGrabber::OnProcessSample(REFGUID guidMajorMediaType, DWORD dwS ig_RISecond->fastCopy(pSampleBuffer); ig_RIOut = ig_RISecond; } - ig_RIE = !ig_RIE; + + if (ig_Synchronous) + { + SetEvent(ig_hFrameReady); + } + else + { + ig_RIE = !ig_RIE; + } + return S_OK; } + STDMETHODIMP ImageGrabber::OnShutdown() { + SetEvent(ig_hFinish); return S_OK; } + RawImage *ImageGrabber::getRawImage() { return ig_RIOut; } + DWORD WINAPI MainThreadFunction( LPVOID lpParam ) { ImageGrabberThread *pIGT = (ImageGrabberThread *)lpParam; pIGT->run(); return 0; } -HRESULT ImageGrabberThread::CreateInstance(ImageGrabberThread **ppIGT, IMFMediaSource *pSource, unsigned int deviceID) + +HRESULT ImageGrabberThread::CreateInstance(ImageGrabberThread **ppIGT, IMFMediaSource *pSource, unsigned int deviceID, bool synchronious) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); - *ppIGT = new (std::nothrow) ImageGrabberThread(pSource, deviceID); + *ppIGT = new (std::nothrow) ImageGrabberThread(pSource, deviceID, synchronious); if (ppIGT == NULL) { DPO->printOut(L"IMAGEGRABBERTHREAD VIDEODEVICE %i: Memory cannot be allocated\n", deviceID); @@ -1213,10 +1329,14 @@ HRESULT ImageGrabberThread::CreateInstance(ImageGrabberThread **ppIGT, IMFMediaS DPO->printOut(L"IMAGEGRABBERTHREAD VIDEODEVICE %i: Creating of the instance of ImageGrabberThread\n", deviceID); return S_OK; } -ImageGrabberThread::ImageGrabberThread(IMFMediaSource *pSource, unsigned int deviceID): igt_Handle(NULL), igt_stop(false) + +ImageGrabberThread::ImageGrabberThread(IMFMediaSource *pSource, unsigned int deviceID, bool synchronious): + igt_func(NULL), + igt_Handle(NULL), + igt_stop(false) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); - HRESULT hr = ImageGrabber::CreateInstance(&igt_pImageGrabber, deviceID); + HRESULT hr = ImageGrabber::CreateInstance(&igt_pImageGrabber, deviceID, synchronious); igt_DeviceID = deviceID; if(SUCCEEDED(hr)) { @@ -1235,6 +1355,7 @@ ImageGrabberThread::ImageGrabberThread(IMFMediaSource *pSource, unsigned int dev DPO->printOut(L"IMAGEGRABBERTHREAD VIDEODEVICE %i There is a problem with creation of the instance of the ImageGrabber class\n", deviceID); } } + void ImageGrabberThread::setEmergencyStopEvent(void *userData, void(*func)(int, void *)) { if(func) @@ -1243,12 +1364,16 @@ void ImageGrabberThread::setEmergencyStopEvent(void *userData, void(*func)(int, igt_userData = userData; } } + ImageGrabberThread::~ImageGrabberThread(void) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); DPO->printOut(L"IMAGEGRABBERTHREAD VIDEODEVICE %i: Destroing ImageGrabberThread\n", igt_DeviceID); + if (igt_Handle) + WaitForSingleObject(igt_Handle, INFINITE); delete igt_pImageGrabber; } + void ImageGrabberThread::stop() { igt_stop = true; @@ -1257,16 +1382,18 @@ void ImageGrabberThread::stop() igt_pImageGrabber->stopGrabbing(); } } + void ImageGrabberThread::start() { igt_Handle = CreateThread( - NULL, // default security attributes - 0, // use default stack size - MainThreadFunction, // thread function name - this, // argument to thread function - 0, // use default creation flags + NULL, // default security attributes + 0, // use default stack size + MainThreadFunction, // thread function name + this, // argument to thread function + 0, // use default creation flags &igt_ThreadIdArray); // returns the thread identifier } + void ImageGrabberThread::run() { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -1294,10 +1421,12 @@ void ImageGrabberThread::run() else DPO->printOut(L"IMAGEGRABBERTHREAD VIDEODEVICE %i: Finish thread\n", igt_DeviceID); } + ImageGrabber *ImageGrabberThread::getImageGrabber() { return igt_pImageGrabber; } + Media_Foundation::Media_Foundation(void) { HRESULT hr = MFStartup(MF_VERSION); @@ -1307,6 +1436,7 @@ Media_Foundation::Media_Foundation(void) DPO->printOut(L"MEDIA FOUNDATION: It cannot be created!!!\n"); } } + Media_Foundation::~Media_Foundation(void) { HRESULT hr = MFShutdown(); @@ -1316,12 +1446,13 @@ Media_Foundation::~Media_Foundation(void) DPO->printOut(L"MEDIA FOUNDATION: Resources cannot be released\n"); } } + bool Media_Foundation::buildListOfDevices() { HRESULT hr = S_OK; - IMFAttributes *pAttributes = NULL; + ComPtr pAttributes = NULL; CoInitialize(NULL); - hr = MFCreateAttributes(&pAttributes, 1); + hr = MFCreateAttributes(pAttributes.GetAddressOf(), 1); if (SUCCEEDED(hr)) { hr = pAttributes->SetGUID( @@ -1332,40 +1463,46 @@ bool Media_Foundation::buildListOfDevices() if (SUCCEEDED(hr)) { videoDevices *vDs = &videoDevices::getInstance(); - hr = vDs->initDevices(pAttributes); + hr = vDs->initDevices(pAttributes.Get()); } else { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); DPO->printOut(L"MEDIA FOUNDATION: The access to the video cameras denied\n"); } - SafeRelease(&pAttributes); + return (SUCCEEDED(hr)); } + Media_Foundation& Media_Foundation::getInstance() { static Media_Foundation instance; return instance; } + RawImage::RawImage(unsigned int size): ri_new(false), ri_pixels(NULL) { ri_size = size; ri_pixels = new unsigned char[size]; memset((void *)ri_pixels,0,ri_size); } + bool RawImage::isNew() { return ri_new; } + unsigned int RawImage::getSize() { return ri_size; } + RawImage::~RawImage(void) { delete []ri_pixels; ri_pixels = NULL; } + long RawImage::CreateInstance(RawImage **ppRImage,unsigned int size) { *ppRImage = new (std::nothrow) RawImage(size); @@ -1375,25 +1512,30 @@ long RawImage::CreateInstance(RawImage **ppRImage,unsigned int size) } return S_OK; } + void RawImage::setCopy(const BYTE * pSampleBuffer) { memcpy(ri_pixels, pSampleBuffer, ri_size); ri_new = true; } + void RawImage::fastCopy(const BYTE * pSampleBuffer) { memcpy(ri_pixels, pSampleBuffer, ri_size); ri_new = true; } + unsigned char * RawImage::getpPixels() { ri_new = false; return ri_pixels; } + videoDevice::videoDevice(void): vd_IsSetuped(false), vd_LockOut(OpenLock), vd_pFriendlyName(NULL), vd_Width(0), vd_Height(0), vd_pSource(NULL), vd_func(NULL), vd_userData(NULL) { } + void videoDevice::setParametrs(CamParametrs parametrs) { if(vd_IsSetuped) @@ -1428,6 +1570,7 @@ void videoDevice::setParametrs(CamParametrs parametrs) } } } + CamParametrs videoDevice::getParametrs() { CamParametrs out; @@ -1472,6 +1615,7 @@ CamParametrs videoDevice::getParametrs() } return out; } + long videoDevice::resetDevice(IMFActivate *pActivate) { HRESULT hr = -1; @@ -1503,6 +1647,7 @@ long videoDevice::resetDevice(IMFActivate *pActivate) } return hr; } + long videoDevice::readInfoOfDevice(IMFActivate *pActivate, unsigned int Num) { HRESULT hr = -1; @@ -1510,6 +1655,7 @@ long videoDevice::readInfoOfDevice(IMFActivate *pActivate, unsigned int Num) hr = resetDevice(pActivate); return hr; } + long videoDevice::checkDevice(IMFAttributes *pAttributes, IMFActivate **pDevice) { HRESULT hr = S_OK; @@ -1568,14 +1714,15 @@ long videoDevice::checkDevice(IMFAttributes *pAttributes, IMFActivate **pDevice) } return hr; } + long videoDevice::initDevice() { HRESULT hr = -1; - IMFAttributes *pAttributes = NULL; - IMFActivate * vd_pActivate= NULL; + ComPtr pAttributes = NULL; + IMFActivate *vd_pActivate = NULL; DebugPrintOut *DPO = &DebugPrintOut::getInstance(); CoInitialize(NULL); - hr = MFCreateAttributes(&pAttributes, 1); + hr = MFCreateAttributes(pAttributes.GetAddressOf(), 1); if (SUCCEEDED(hr)) { hr = pAttributes->SetGUID( @@ -1585,7 +1732,7 @@ long videoDevice::initDevice() } if (SUCCEEDED(hr)) { - hr = checkDevice(pAttributes, &vd_pActivate); + hr = checkDevice(pAttributes.Get(), &vd_pActivate); if (SUCCEEDED(hr) && vd_pActivate) { SafeRelease(&vd_pSource); @@ -1607,9 +1754,10 @@ long videoDevice::initDevice() { DPO->printOut(L"VIDEODEVICE %i: The attribute of video cameras cannot be getting \n", vd_CurrentNumber); } - SafeRelease(&pAttributes); + return hr; } + MediaType videoDevice::getFormat(unsigned int id) { if(id < vd_CurrentFormats.size()) @@ -1713,6 +1861,7 @@ int videoDevice::findType(unsigned int size, unsigned int frameRate) return 0; return VN[0]; } + void videoDevice::buildLibraryofTypes() { unsigned int size; @@ -1722,7 +1871,7 @@ void videoDevice::buildLibraryofTypes() for(; i != vd_CurrentFormats.end(); i++) { size = (*i).MF_MT_FRAME_SIZE; - framerate = (*i).MF_MT_FRAME_RATE; + framerate = (*i).MF_MT_FRAME_RATE_NUMERATOR; FrameRateMap FRM = vd_CaptureFormats[size]; SUBTYPEMap STM = FRM[framerate]; String subType((*i).pMF_MT_SUBTYPEName); @@ -1734,45 +1883,45 @@ void videoDevice::buildLibraryofTypes() count++; } } + long videoDevice::setDeviceFormat(IMFMediaSource *pSource, unsigned long dwFormatIndex) { - IMFPresentationDescriptor *pPD = NULL; - IMFStreamDescriptor *pSD = NULL; - IMFMediaTypeHandler *pHandler = NULL; - IMFMediaType *pType = NULL; - HRESULT hr = pSource->CreatePresentationDescriptor(&pPD); + ComPtr pPD = NULL; + ComPtr pSD = NULL; + ComPtr pHandler = NULL; + ComPtr pType = NULL; + HRESULT hr = pSource->CreatePresentationDescriptor(pPD.GetAddressOf()); if (FAILED(hr)) { goto done; } BOOL fSelected; - hr = pPD->GetStreamDescriptorByIndex(0, &fSelected, &pSD); + hr = pPD->GetStreamDescriptorByIndex(0, &fSelected, pSD.GetAddressOf()); if (FAILED(hr)) { goto done; } - hr = pSD->GetMediaTypeHandler(&pHandler); + hr = pSD->GetMediaTypeHandler(pHandler.GetAddressOf()); if (FAILED(hr)) { goto done; } - hr = pHandler->GetMediaTypeByIndex((DWORD)dwFormatIndex, &pType); + hr = pHandler->GetMediaTypeByIndex((DWORD)dwFormatIndex, pType.GetAddressOf()); if (FAILED(hr)) { goto done; } - hr = pHandler->SetCurrentMediaType(pType); + hr = pHandler->SetCurrentMediaType(pType.Get()); + done: - SafeRelease(&pPD); - SafeRelease(&pSD); - SafeRelease(&pHandler); - SafeRelease(&pType); return hr; } + bool videoDevice::isDeviceSetup() { return vd_IsSetuped; } + RawImage * videoDevice::getRawImageOut() { if(!vd_IsSetuped) return NULL; @@ -1785,6 +1934,7 @@ RawImage * videoDevice::getRawImageOut() } return NULL; } + bool videoDevice::isFrameNew() { if(!vd_IsSetuped) return false; @@ -1809,16 +1959,19 @@ bool videoDevice::isFrameNew() } return false; } + bool videoDevice::isDeviceMediaSource() { if(vd_LockOut == MediaSourceLock) return true; return false; } + bool videoDevice::isDeviceRawDataSource() { if(vd_LockOut == RawDataLock) return true; return false; } + bool videoDevice::setupDevice(unsigned int id) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -1849,15 +2002,18 @@ bool videoDevice::setupDevice(unsigned int id) return false; } } + bool videoDevice::setupDevice(unsigned int w, unsigned int h, unsigned int idealFramerate) { unsigned int id = findType(w * h, idealFramerate); return setupDevice(id); } + wchar_t *videoDevice::getName() { return vd_pFriendlyName; } + videoDevice::~videoDevice(void) { closeDevice(); @@ -1865,24 +2021,25 @@ videoDevice::~videoDevice(void) if(vd_pFriendlyName) CoTaskMemFree(vd_pFriendlyName); } -long videoDevice::enumerateCaptureFormats(IMFMediaSource *pSource) + +HRESULT videoDevice::enumerateCaptureFormats(IMFMediaSource *pSource) { - IMFPresentationDescriptor *pPD = NULL; - IMFStreamDescriptor *pSD = NULL; - IMFMediaTypeHandler *pHandler = NULL; - IMFMediaType *pType = NULL; - HRESULT hr = pSource->CreatePresentationDescriptor(&pPD); + ComPtr pPD = NULL; + ComPtr pSD = NULL; + ComPtr pHandler = NULL; + ComPtr pType = NULL; + HRESULT hr = pSource->CreatePresentationDescriptor(pPD.GetAddressOf()); if (FAILED(hr)) { goto done; } BOOL fSelected; - hr = pPD->GetStreamDescriptorByIndex(0, &fSelected, &pSD); + hr = pPD->GetStreamDescriptorByIndex(0, &fSelected, pSD.GetAddressOf()); if (FAILED(hr)) { goto done; } - hr = pSD->GetMediaTypeHandler(&pHandler); + hr = pSD->GetMediaTypeHandler(pHandler.GetAddressOf()); if (FAILED(hr)) { goto done; @@ -1895,24 +2052,22 @@ long videoDevice::enumerateCaptureFormats(IMFMediaSource *pSource) } for (DWORD i = 0; i < cTypes; i++) { - hr = pHandler->GetMediaTypeByIndex(i, &pType); + hr = pHandler->GetMediaTypeByIndex(i, pType.GetAddressOf()); if (FAILED(hr)) { goto done; } - MediaType MT = FormatReader::Read(pType); + MediaType MT = FormatReader::Read(pType.Get()); vd_CurrentFormats.push_back(MT); - SafeRelease(&pType); } + done: - SafeRelease(&pPD); - SafeRelease(&pSD); - SafeRelease(&pHandler); - SafeRelease(&pType); return hr; } + videoDevices::videoDevices(void): count(0) {} + void videoDevices::clearDevices() { std::vector::iterator i = vds_Devices.begin(); @@ -1920,10 +2075,12 @@ void videoDevices::clearDevices() delete (*i); vds_Devices.clear(); } + videoDevices::~videoDevices(void) { clearDevices(); } + videoDevice * videoDevices::getDevice(unsigned int i) { if(i >= vds_Devices.size()) @@ -1936,6 +2093,7 @@ videoDevice * videoDevices::getDevice(unsigned int i) } return vds_Devices[i]; } + long videoDevices::initDevices(IMFAttributes *pAttributes) { HRESULT hr = S_OK; @@ -1965,15 +2123,18 @@ long videoDevices::initDevices(IMFAttributes *pAttributes) } return hr; } + size_t videoDevices::getCount() { return vds_Devices.size(); } + videoDevices& videoDevices::getInstance() { static videoDevices instance; return instance; } + Parametr::Parametr() { CurrentValue = 0; @@ -1983,6 +2144,7 @@ Parametr::Parametr() Default = 0; Flag = 0; } + MediaType::MediaType() { pMF_MT_AM_FORMAT_TYPEName = NULL; @@ -1990,10 +2152,12 @@ MediaType::MediaType() pMF_MT_SUBTYPEName = NULL; Clear(); } + MediaType::~MediaType() { Clear(); } + void MediaType::Clear() { MF_MT_FRAME_SIZE = 0; @@ -2005,8 +2169,8 @@ void MediaType::Clear() MF_MT_VIDEO_CHROMA_SITING = 0; MF_MT_FIXED_SIZE_SAMPLES = 0; MF_MT_VIDEO_NOMINAL_RANGE = 0; - MF_MT_FRAME_RATE = 0; - MF_MT_FRAME_RATE_low = 0; + MF_MT_FRAME_RATE_NUMERATOR = 0; + MF_MT_FRAME_RATE_DENOMINATOR = 0; MF_MT_PIXEL_ASPECT_RATIO = 0; MF_MT_PIXEL_ASPECT_RATIO_low = 0; MF_MT_ALL_SAMPLES_INDEPENDENT = 0; @@ -2021,6 +2185,7 @@ void MediaType::Clear() memset(&MF_MT_AM_FORMAT_TYPE, 0, sizeof(GUID)); memset(&MF_MT_SUBTYPE, 0, sizeof(GUID)); } + videoInput::videoInput(void): accessToDevices(false) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -2029,6 +2194,7 @@ videoInput::videoInput(void): accessToDevices(false) if(!accessToDevices) DPO->printOut(L"INITIALIZATION: Ther is not any suitable video device\n"); } + void videoInput::updateListOfDevices() { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -2037,11 +2203,13 @@ void videoInput::updateListOfDevices() if(!accessToDevices) DPO->printOut(L"UPDATING: Ther is not any suitable video device\n"); } + videoInput::~videoInput(void) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); DPO->printOut(L"\n***** CLOSE VIDEOINPUT LIBRARY - 2013 *****\n\n"); } + IMFMediaSource *videoInput::getMediaSource(int deviceID) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -2063,6 +2231,7 @@ IMFMediaSource *videoInput::getMediaSource(int deviceID) } return NULL; } + bool videoInput::setupDevice(int deviceID, unsigned int id) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -2089,6 +2258,7 @@ bool videoInput::setupDevice(int deviceID, unsigned int id) } return false; } + bool videoInput::setupDevice(int deviceID, unsigned int w, unsigned int h, unsigned int idealFramerate) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -2115,6 +2285,7 @@ bool videoInput::setupDevice(int deviceID, unsigned int w, unsigned int h, unsig } return false; } + MediaType videoInput::getFormat(int deviceID, unsigned int id) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -2136,6 +2307,7 @@ MediaType videoInput::getFormat(int deviceID, unsigned int id) } return MediaType(); } + bool videoInput::isDeviceSetup(int deviceID) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -2157,6 +2329,7 @@ bool videoInput::isDeviceSetup(int deviceID) } return false; } + bool videoInput::isDeviceMediaSource(int deviceID) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -2178,6 +2351,7 @@ bool videoInput::isDeviceMediaSource(int deviceID) } return false; } + bool videoInput::isDeviceRawDataSource(int deviceID) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -2202,6 +2376,7 @@ bool videoInput::isDeviceRawDataSource(int deviceID) } return false; } + bool videoInput::isFrameNew(int deviceID) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -2230,6 +2405,7 @@ bool videoInput::isFrameNew(int deviceID) } return false; } + unsigned int videoInput::getCountFormats(int deviceID) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -2251,12 +2427,14 @@ unsigned int videoInput::getCountFormats(int deviceID) } return 0; } + void videoInput::closeAllDevices() { videoDevices *VDS = &videoDevices::getInstance(); for(unsigned int i = 0; i < VDS->getCount(); i++) closeDevice(i); } + void videoInput::setParametrs(int deviceID, CamParametrs parametrs) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -2277,6 +2455,7 @@ void videoInput::setParametrs(int deviceID, CamParametrs parametrs) DPO->printOut(L"VIDEODEVICE(s): There is not any suitable video device\n"); } } + CamParametrs videoInput::getParametrs(int deviceID) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -2299,6 +2478,7 @@ CamParametrs videoInput::getParametrs(int deviceID) } return out; } + void videoInput::closeDevice(int deviceID) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -2319,6 +2499,7 @@ void videoInput::closeDevice(int deviceID) DPO->printOut(L"VIDEODEVICE(s): There is not any suitable video device\n"); } } + unsigned int videoInput::getWidth(int deviceID) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -2340,6 +2521,7 @@ unsigned int videoInput::getWidth(int deviceID) } return 0; } + unsigned int videoInput::getHeight(int deviceID) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -2361,6 +2543,7 @@ unsigned int videoInput::getHeight(int deviceID) } return 0; } + wchar_t *videoInput::getNameVideoDevice(int deviceID) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -2382,6 +2565,7 @@ wchar_t *videoInput::getNameVideoDevice(int deviceID) } return L"Empty"; } + unsigned int videoInput::listDevices(bool silent) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -2405,20 +2589,24 @@ unsigned int videoInput::listDevices(bool silent) } return out; } + videoInput& videoInput::getInstance() { static videoInput instance; return instance; } + bool videoInput::isDevicesAcceable() { return accessToDevices; } + void videoInput::setVerbose(bool state) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); DPO->setVerbose(state); } + void videoInput::setEmergencyStopEvent(int deviceID, void *userData, void(*func)(int, void *)) { DebugPrintOut *DPO = &DebugPrintOut::getInstance(); @@ -2442,6 +2630,7 @@ void videoInput::setEmergencyStopEvent(int deviceID, void *userData, void(*func) DPO->printOut(L"VIDEODEVICE(s): There is not any suitable video device\n"); } } + bool videoInput::getPixels(int deviceID, unsigned char * dstBuffer, bool flipRedAndBlue, bool flipImage) { bool success = false; @@ -2491,6 +2680,7 @@ bool videoInput::getPixels(int deviceID, unsigned char * dstBuffer, bool flipRed } return success; } + void videoInput::processPixels(unsigned char * src, unsigned char * dst, unsigned int width, unsigned int height, unsigned int bpp, bool bRGB, bool bFlip) { @@ -2553,6 +2743,7 @@ void videoInput::processPixels(unsigned char * src, unsigned char * dst, unsigne } } } + /******* Capturing video from camera via Microsoft Media Foundation **********/ class CvCaptureCAM_MSMF : public CvCapture { @@ -2568,33 +2759,35 @@ public: virtual int getCaptureDomain() { return CV_CAP_MSMF; } // Return the type of the capture object: CV_CAP_VFW, etc... protected: void init(); - int index, width, height,fourcc; - int widthSet, heightSet; + int index, width, height, fourcc; IplImage* frame; videoInput VI; }; + struct SuppressVideoInputMessages { SuppressVideoInputMessages() { videoInput::setVerbose(true); } }; + static SuppressVideoInputMessages do_it; + CvCaptureCAM_MSMF::CvCaptureCAM_MSMF(): index(-1), width(-1), height(-1), fourcc(-1), - widthSet(-1), - heightSet(-1), - frame(0), + frame(NULL), VI(videoInput::getInstance()) { CoInitialize(0); } + CvCaptureCAM_MSMF::~CvCaptureCAM_MSMF() { close(); CoUninitialize(); } + void CvCaptureCAM_MSMF::close() { if( index >= 0 ) @@ -2603,8 +2796,9 @@ void CvCaptureCAM_MSMF::close() index = -1; cvReleaseImage(&frame); } - widthSet = heightSet = width = height = -1; + width = height = -1; } + // Initialize camera input bool CvCaptureCAM_MSMF::open( int _index ) { @@ -2621,10 +2815,14 @@ bool CvCaptureCAM_MSMF::open( int _index ) index = try_index; return true; } + bool CvCaptureCAM_MSMF::grabFrame() { - return true; + while (VI.isDeviceSetup(index) && !VI.isFrameNew(index)) + Sleep(1); + return VI.isDeviceSetup(index); } + IplImage* CvCaptureCAM_MSMF::retrieveFrame(int) { if( !frame || (int)VI.getWidth(index) != frame->width || (int)VI.getHeight(index) != frame->height ) @@ -2637,6 +2835,7 @@ IplImage* CvCaptureCAM_MSMF::retrieveFrame(int) VI.getPixels( index, (uchar*)frame->imageData, false, true ); return frame; } + double CvCaptureCAM_MSMF::getProperty( int property_id ) { // image format proprrties @@ -2646,50 +2845,7 @@ double CvCaptureCAM_MSMF::getProperty( int property_id ) return VI.getWidth(index); case CV_CAP_PROP_FRAME_HEIGHT: return VI.getHeight(index); - case CV_CAP_PROP_FOURCC: - // FIXME: implement method in VideoInput back end - //return VI.getFourcc(index); - ; - case CV_CAP_PROP_FPS: - // FIXME: implement method in VideoInput back end - //return VI.getFPS(index); - ; - } - // video filter properties - switch( property_id ) - { - case CV_CAP_PROP_BRIGHTNESS: - case CV_CAP_PROP_CONTRAST: - case CV_CAP_PROP_HUE: - case CV_CAP_PROP_SATURATION: - case CV_CAP_PROP_SHARPNESS: - case CV_CAP_PROP_GAMMA: - case CV_CAP_PROP_MONOCROME: - case CV_CAP_PROP_WHITE_BALANCE_BLUE_U: - case CV_CAP_PROP_BACKLIGHT: - case CV_CAP_PROP_GAIN: - // FIXME: implement method in VideoInput back end - // if ( VI.getVideoSettingFilter(index, VI.getVideoPropertyFromCV(property_id), min_value, - // max_value, stepping_delta, current_value, flags,defaultValue) ) - // return (double)current_value; - return 0.; - } - // camera properties - switch( property_id ) - { - case CV_CAP_PROP_PAN: - case CV_CAP_PROP_TILT: - case CV_CAP_PROP_ROLL: - case CV_CAP_PROP_ZOOM: - case CV_CAP_PROP_EXPOSURE: - case CV_CAP_PROP_IRIS: - case CV_CAP_PROP_FOCUS: - // FIXME: implement method in VideoInput back end - // if (VI.getVideoSettingCamera(index,VI.getCameraPropertyFromCV(property_id),min_value, - // max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value; - return 0.; } - // unknown parameter or value not available return -1; } bool CvCaptureCAM_MSMF::setProperty( int property_id, double value ) @@ -2706,91 +2862,272 @@ bool CvCaptureCAM_MSMF::setProperty( int property_id, double value ) height = cvRound(value); handled = true; break; - case CV_CAP_PROP_FOURCC: - fourcc = (int)(unsigned long)(value); - if ( fourcc == -1 ) { - // following cvCreateVideo usage will pop up caprturepindialog here if fourcc=-1 - // TODO - how to create a capture pin dialog - } - handled = true; - break; - case CV_CAP_PROP_FPS: - // FIXME: implement method in VideoInput back end - // int fps = cvRound(value); - // if (fps != VI.getFPS(index)) - // { - // VI.stopDevice(index); - // VI.setIdealFramerate(index,fps); - // if (widthSet > 0 && heightSet > 0) - // VI.setupDevice(index, widthSet, heightSet); - // else - // VI.setupDevice(index); - // } - // return VI.isDeviceSetup(index); - ; } + if ( handled ) { - // a stream setting if( width > 0 && height > 0 ) { - if( width != (int)VI.getWidth(index) || height != (int)VI.getHeight(index) )//|| fourcc != VI.getFourcc(index) ) - { - // FIXME: implement method in VideoInput back end - // int fps = static_cast(VI.getFPS(index)); - // VI.stopDevice(index); - // VI.setIdealFramerate(index, fps); - // VI.setupDeviceFourcc(index, width, height, fourcc); - } - bool success = VI.isDeviceSetup(index); - if (success) + if( width != (int)VI.getWidth(index) || height != (int)VI.getHeight(index) && VI.isDeviceSetup(index))//|| fourcc != VI.getFourcc(index) ) { - widthSet = width; - heightSet = height; - width = height = fourcc = -1; + VI.closeDevice(index); + VI.setupDevice(index, width, height); } - return success; + return VI.isDeviceSetup(index); } return true; } - // show video/camera filter dialog + + return false; +} + +class CvCaptureFile_MSMF : public CvCapture +{ +public: + CvCaptureFile_MSMF(); + virtual ~CvCaptureFile_MSMF(); + + virtual bool open( const char* filename ); + virtual void close(); + + virtual double getProperty(int); + virtual bool setProperty(int, double); + virtual bool grabFrame(); + virtual IplImage* retrieveFrame(int); + virtual int getCaptureDomain() { return CV_CAP_MSMF; } +protected: + ImageGrabberThread* grabberThread; + IMFMediaSource* videoFileSource; + std::vector captureFormats; + int captureFormatIndex; + IplImage* frame; + bool isOpened; + + HRESULT enumerateCaptureFormats(IMFMediaSource *pSource); + HRESULT getSourceDuration(IMFMediaSource *pSource, MFTIME *pDuration); +}; + +CvCaptureFile_MSMF::CvCaptureFile_MSMF(): + grabberThread(NULL), + videoFileSource(NULL), + captureFormatIndex(0), + frame(NULL), + isOpened(false) +{ + MFStartup(MF_VERSION); +} + +CvCaptureFile_MSMF::~CvCaptureFile_MSMF() +{ + close(); + MFShutdown(); +} + +bool CvCaptureFile_MSMF::open(const char* filename) +{ + if (!filename) + return false; + + wchar_t* unicodeFileName = new wchar_t[strlen(filename)+1]; + MultiByteToWideChar(CP_ACP, 0, filename, -1, unicodeFileName, strlen(filename)+1); + + HRESULT hr = S_OK; + + MF_OBJECT_TYPE ObjectType = MF_OBJECT_INVALID; + + ComPtr pSourceResolver = NULL; + IUnknown* pUnkSource = NULL; + + hr = MFCreateSourceResolver(pSourceResolver.GetAddressOf()); + + if (SUCCEEDED(hr)) + { + hr = pSourceResolver->CreateObjectFromURL( + unicodeFileName, + MF_RESOLUTION_MEDIASOURCE, + NULL, // Optional property store. + &ObjectType, + &pUnkSource + ); + } + + // Get the IMFMediaSource from the IUnknown pointer. + if (SUCCEEDED(hr)) + { + hr = pUnkSource->QueryInterface(IID_PPV_ARGS(&videoFileSource)); + } + + SafeRelease(&pUnkSource); + + if (SUCCEEDED(hr)) + { + hr = enumerateCaptureFormats(videoFileSource); + } + + if (SUCCEEDED(hr)) + { + hr = ImageGrabberThread::CreateInstance(&grabberThread, videoFileSource, (unsigned int)-2, true); + } + + if (SUCCEEDED(hr)) + { + grabberThread->start(); + } + + isOpened = SUCCEEDED(hr); + + return isOpened; +} + +void CvCaptureFile_MSMF::close() +{ + if (grabberThread) + { + isOpened = false; + SetEvent(grabberThread->getImageGrabber()->ig_hFinish); + grabberThread->stop(); + delete grabberThread; + } + + if (videoFileSource) + { + videoFileSource->Shutdown(); + } +} + +bool CvCaptureFile_MSMF::setProperty(int property_id, double value) +{ + // image capture properties // FIXME: implement method in VideoInput back end - // if ( property_id == CV_CAP_PROP_SETTINGS ) { - // VI.showSettingsWindow(index); - // return true; - // } - //video Filter properties + (void) property_id; + (void) value; + return false; +} + +double CvCaptureFile_MSMF::getProperty(int property_id) +{ + // image format proprrties switch( property_id ) { - case CV_CAP_PROP_BRIGHTNESS: - case CV_CAP_PROP_CONTRAST: - case CV_CAP_PROP_HUE: - case CV_CAP_PROP_SATURATION: - case CV_CAP_PROP_SHARPNESS: - case CV_CAP_PROP_GAMMA: - case CV_CAP_PROP_MONOCROME: - case CV_CAP_PROP_WHITE_BALANCE_BLUE_U: - case CV_CAP_PROP_BACKLIGHT: - case CV_CAP_PROP_GAIN: - // FIXME: implement method in VideoInput back end - //return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(property_id),(long)value); - ; - } - //camera properties - switch( property_id ) + case CV_CAP_PROP_FRAME_WIDTH: + return captureFormats[captureFormatIndex].width; + case CV_CAP_PROP_FRAME_HEIGHT: + return captureFormats[captureFormatIndex].height; + case CV_CAP_PROP_FRAME_COUNT: + { + MFTIME duration; + getSourceDuration(this->videoFileSource, &duration); + double fps = ((double)captureFormats[captureFormatIndex].MF_MT_FRAME_RATE_NUMERATOR) / + ((double)captureFormats[captureFormatIndex].MF_MT_FRAME_RATE_DENOMINATOR); + return (double)floor(((double)duration/1e7)*fps+0.5); + } + case CV_CAP_PROP_FOURCC: + return captureFormats[captureFormatIndex].MF_MT_SUBTYPE.Data1; + case CV_CAP_PROP_FPS: + return ((double)captureFormats[captureFormatIndex].MF_MT_FRAME_RATE_NUMERATOR) / + ((double)captureFormats[captureFormatIndex].MF_MT_FRAME_RATE_DENOMINATOR); + } + + return -1; +} + +bool CvCaptureFile_MSMF::grabFrame() +{ + DWORD waitResult = (DWORD)-1; + if (isOpened) { - case CV_CAP_PROP_PAN: - case CV_CAP_PROP_TILT: - case CV_CAP_PROP_ROLL: - case CV_CAP_PROP_ZOOM: - case CV_CAP_PROP_EXPOSURE: - case CV_CAP_PROP_IRIS: - case CV_CAP_PROP_FOCUS: - // FIXME: implement method in VideoInput back end - //return VI.setVideoSettingCamera(index,VI.getCameraPropertyFromCV(property_id),(long)value); - ; + SetEvent(grabberThread->getImageGrabber()->ig_hFrameGrabbed); + HANDLE tmp[] = {grabberThread->getImageGrabber()->ig_hFrameReady, grabberThread->getImageGrabber()->ig_hFinish, 0}; + waitResult = WaitForMultipleObjects(2, tmp, FALSE, INFINITE); } - return false; + + return isOpened && grabberThread->getImageGrabber()->getRawImage()->isNew() && (waitResult == WAIT_OBJECT_0); +} + +IplImage* CvCaptureFile_MSMF::retrieveFrame(int) +{ + unsigned int width = captureFormats[captureFormatIndex].width; + unsigned int height = captureFormats[captureFormatIndex].height; + unsigned int bytes = 3; + if( !frame || (int)width != frame->width || (int)height != frame->height ) + { + if (frame) + cvReleaseImage( &frame ); + frame = cvCreateImage( cvSize(width,height), 8, 3 ); + } + + RawImage *RIOut = grabberThread->getImageGrabber()->getRawImage(); + unsigned int size = bytes * width * height; + + bool verticalFlip = captureFormats[captureFormatIndex].MF_MT_DEFAULT_STRIDE < 0; + + if(RIOut && size == RIOut->getSize()) + { + videoInput::processPixels(RIOut->getpPixels(), (unsigned char*)frame->imageData, width, + height, bytes, false, verticalFlip); + } + + return frame; } + +HRESULT CvCaptureFile_MSMF::enumerateCaptureFormats(IMFMediaSource *pSource) +{ + ComPtr pPD = NULL; + ComPtr pSD = NULL; + ComPtr pHandler = NULL; + ComPtr pType = NULL; + HRESULT hr = pSource->CreatePresentationDescriptor(pPD.GetAddressOf()); + if (FAILED(hr)) + { + goto done; + } + + BOOL fSelected; + hr = pPD->GetStreamDescriptorByIndex(0, &fSelected, pSD.GetAddressOf()); + if (FAILED(hr)) + { + goto done; + } + hr = pSD->GetMediaTypeHandler(pHandler.GetAddressOf()); + if (FAILED(hr)) + { + goto done; + } + DWORD cTypes = 0; + hr = pHandler->GetMediaTypeCount(&cTypes); + if (FAILED(hr)) + { + goto done; + } + for (DWORD i = 0; i < cTypes; i++) + { + hr = pHandler->GetMediaTypeByIndex(i, pType.GetAddressOf()); + if (FAILED(hr)) + { + goto done; + } + MediaType MT = FormatReader::Read(pType.Get()); + captureFormats.push_back(MT); + } + +done: + return hr; +} + +HRESULT CvCaptureFile_MSMF::getSourceDuration(IMFMediaSource *pSource, MFTIME *pDuration) +{ + *pDuration = 0; + + IMFPresentationDescriptor *pPD = NULL; + + HRESULT hr = pSource->CreatePresentationDescriptor(&pPD); + if (SUCCEEDED(hr)) + { + hr = pPD->GetUINT64(MF_PD_DURATION, (UINT64*)pDuration); + pPD->Release(); + } + return hr; +} + CvCapture* cvCreateCameraCapture_MSMF( int index ) { CvCaptureCAM_MSMF* capture = new CvCaptureCAM_MSMF; @@ -2807,4 +3144,392 @@ CvCapture* cvCreateCameraCapture_MSMF( int index ) delete capture; return 0; } + +CvCapture* cvCreateFileCapture_MSMF (const char* filename) +{ + CvCaptureFile_MSMF* capture = new CvCaptureFile_MSMF; + try + { + if( capture->open(filename) ) + return capture; + else + { + delete capture; + return NULL; + } + } + catch(...) + { + delete capture; + throw; + } +} + +// +// +// Media Foundation-based Video Writer +// +// + +class CvVideoWriter_MSMF : public CvVideoWriter +{ +public: + CvVideoWriter_MSMF(); + virtual ~CvVideoWriter_MSMF(); + virtual bool open(const char* filename, int fourcc, + double fps, CvSize frameSize, bool isColor); + virtual void close(); + virtual bool writeFrame(const IplImage* img); + +private: + UINT32 videoWidth; + UINT32 videoHeight; + double fps; + UINT32 bitRate; + UINT32 frameSize; + GUID encodingFormat; + GUID inputFormat; + + DWORD streamIndex; + ComPtr sinkWriter; + + bool initiated; + + LONGLONG rtStart; + UINT64 rtDuration; + + HRESULT InitializeSinkWriter(const char* filename); + static const GUID FourCC2GUID(int fourcc); + HRESULT WriteFrame(DWORD *videoFrameBuffer, const LONGLONG& rtStart, const LONGLONG& rtDuration); +}; + +CvVideoWriter_MSMF::CvVideoWriter_MSMF(): + initiated(false) +{ +} + +CvVideoWriter_MSMF::~CvVideoWriter_MSMF() +{ + close(); +} + +const GUID CvVideoWriter_MSMF::FourCC2GUID(int fourcc) +{ + switch(fourcc) + { + case CV_FOURCC_MACRO('d', 'v', '2', '5'): + return MFVideoFormat_DV25; break; + case CV_FOURCC_MACRO('d', 'v', '5', '0'): + return MFVideoFormat_DV50; break; + case CV_FOURCC_MACRO('d', 'v', 'c', ' '): + return MFVideoFormat_DVC; break; + case CV_FOURCC_MACRO('d', 'v', 'h', '1'): + return MFVideoFormat_DVH1; break; + case CV_FOURCC_MACRO('d', 'v', 'h', 'd'): + return MFVideoFormat_DVHD; break; + case CV_FOURCC_MACRO('d', 'v', 's', 'd'): + return MFVideoFormat_DVSD; break; + case CV_FOURCC_MACRO('d', 'v', 's', 'l'): + return MFVideoFormat_DVSL; break; + case CV_FOURCC_MACRO('H', '2', '6', '3'): + return MFVideoFormat_H263; break; + case CV_FOURCC_MACRO('H', '2', '6', '4'): + return MFVideoFormat_H264; break; + case CV_FOURCC_MACRO('M', '4', 'S', '2'): + return MFVideoFormat_M4S2; break; + case CV_FOURCC_MACRO('M', 'J', 'P', 'G'): + return MFVideoFormat_MJPG; break; + case CV_FOURCC_MACRO('M', 'P', '4', '3'): + return MFVideoFormat_MP43; break; + case CV_FOURCC_MACRO('M', 'P', '4', 'S'): + return MFVideoFormat_MP4S; break; + case CV_FOURCC_MACRO('M', 'P', '4', 'V'): + return MFVideoFormat_MP4V; break; + case CV_FOURCC_MACRO('M', 'P', 'G', '1'): + return MFVideoFormat_MPG1; break; + case CV_FOURCC_MACRO('M', 'S', 'S', '1'): + return MFVideoFormat_MSS1; break; + case CV_FOURCC_MACRO('M', 'S', 'S', '2'): + return MFVideoFormat_MSS2; break; + case CV_FOURCC_MACRO('W', 'M', 'V', '1'): + return MFVideoFormat_WMV1; break; + case CV_FOURCC_MACRO('W', 'M', 'V', '2'): + return MFVideoFormat_WMV2; break; + case CV_FOURCC_MACRO('W', 'M', 'V', '3'): + return MFVideoFormat_WMV3; break; + case CV_FOURCC_MACRO('W', 'V', 'C', '1'): + return MFVideoFormat_WVC1; break; + default: + return MFVideoFormat_H264; + } +} + +bool CvVideoWriter_MSMF::open( const char* filename, int fourcc, + double _fps, CvSize frameSize, bool /*isColor*/ ) +{ + videoWidth = frameSize.width; + videoHeight = frameSize.height; + fps = _fps; + bitRate = (UINT32)fps*videoWidth*videoHeight; // 1-bit per pixel + encodingFormat = FourCC2GUID(fourcc); + inputFormat = MFVideoFormat_RGB32; + + HRESULT hr = CoInitializeEx(NULL, COINIT_APARTMENTTHREADED); + if (SUCCEEDED(hr)) + { + hr = MFStartup(MF_VERSION); + if (SUCCEEDED(hr)) + { + hr = InitializeSinkWriter(filename); + if (SUCCEEDED(hr)) + { + initiated = true; + rtStart = 0; + MFFrameRateToAverageTimePerFrame((UINT32)fps, 1, &rtDuration); + } + } + } + + return SUCCEEDED(hr); +} + +void CvVideoWriter_MSMF::close() +{ + if (!initiated) + { + return; + } + + initiated = false; + sinkWriter->Finalize(); + MFShutdown(); +} + +bool CvVideoWriter_MSMF::writeFrame(const IplImage* img) +{ + if (!img) + return false; + + int length = img->width * img->height * 4; + DWORD* target = new DWORD[length]; + + for (int rowIdx = 0; rowIdx < img->height; rowIdx++) + { + char* rowStart = img->imageData + rowIdx*img->widthStep; + for (int colIdx = 0; colIdx < img->width; colIdx++) + { + BYTE b = rowStart[colIdx * img->nChannels + 0]; + BYTE g = rowStart[colIdx * img->nChannels + 1]; + BYTE r = rowStart[colIdx * img->nChannels + 2]; + + target[rowIdx*img->width+colIdx] = (r << 16) + (g << 8) + b; + } + } + + // Send frame to the sink writer. + HRESULT hr = WriteFrame(target, rtStart, rtDuration); + if (FAILED(hr)) + { + delete[] target; + return false; + } + rtStart += rtDuration; + + delete[] target; + + return true; +} + +HRESULT CvVideoWriter_MSMF::InitializeSinkWriter(const char* filename) +{ + ComPtr spAttr; + ComPtr mediaTypeOut; + ComPtr mediaTypeIn; + ComPtr spByteStream; + + MFCreateAttributes(&spAttr, 10); + spAttr->SetUINT32(MF_READWRITE_ENABLE_HARDWARE_TRANSFORMS, true); + + wchar_t* unicodeFileName = new wchar_t[strlen(filename)+1]; + MultiByteToWideChar(CP_ACP, 0, filename, -1, unicodeFileName, strlen(filename)+1); + + HRESULT hr = MFCreateSinkWriterFromURL(unicodeFileName, NULL, spAttr.Get(), &sinkWriter); + + delete[] unicodeFileName; + + // Set the output media type. + if (SUCCEEDED(hr)) + { + hr = MFCreateMediaType(&mediaTypeOut); + } + if (SUCCEEDED(hr)) + { + hr = mediaTypeOut->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); + } + if (SUCCEEDED(hr)) + { + hr = mediaTypeOut->SetGUID(MF_MT_SUBTYPE, encodingFormat); + } + if (SUCCEEDED(hr)) + { + hr = mediaTypeOut->SetUINT32(MF_MT_AVG_BITRATE, bitRate); + } + if (SUCCEEDED(hr)) + { + hr = mediaTypeOut->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive); + } + if (SUCCEEDED(hr)) + { + hr = MFSetAttributeSize(mediaTypeOut.Get(), MF_MT_FRAME_SIZE, videoWidth, videoHeight); + } + if (SUCCEEDED(hr)) + { + hr = MFSetAttributeRatio(mediaTypeOut.Get(), MF_MT_FRAME_RATE, (UINT32)fps, 1); + } + if (SUCCEEDED(hr)) + { + hr = MFSetAttributeRatio(mediaTypeOut.Get(), MF_MT_PIXEL_ASPECT_RATIO, 1, 1); + } + + if (SUCCEEDED(hr)) + { + hr = sinkWriter->AddStream(mediaTypeOut.Get(), &streamIndex); + } + + // Set the input media type. + if (SUCCEEDED(hr)) + { + hr = MFCreateMediaType(&mediaTypeIn); + } + if (SUCCEEDED(hr)) + { + hr = mediaTypeIn->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); + } + if (SUCCEEDED(hr)) + { + hr = mediaTypeIn->SetGUID(MF_MT_SUBTYPE, inputFormat); + } + if (SUCCEEDED(hr)) + { + hr = mediaTypeIn->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive); + } + if (SUCCEEDED(hr)) + { + hr = MFSetAttributeSize(mediaTypeIn.Get(), MF_MT_FRAME_SIZE, videoWidth, videoHeight); + } + if (SUCCEEDED(hr)) + { + hr = MFSetAttributeRatio(mediaTypeIn.Get(), MF_MT_FRAME_RATE, (UINT32)fps, 1); + } + if (SUCCEEDED(hr)) + { + hr = MFSetAttributeRatio(mediaTypeIn.Get(), MF_MT_PIXEL_ASPECT_RATIO, 1, 1); + } + + if (SUCCEEDED(hr)) + { + hr = sinkWriter->SetInputMediaType(streamIndex, mediaTypeIn.Get(), NULL); + } + + // Tell the sink writer to start accepting data. + if (SUCCEEDED(hr)) + { + hr = sinkWriter->BeginWriting(); + } + + return hr; +} + +HRESULT CvVideoWriter_MSMF::WriteFrame(DWORD *videoFrameBuffer, const LONGLONG& Start, const LONGLONG& Duration) +{ + ComPtr sample; + ComPtr buffer; + + const LONG cbWidth = 4 * videoWidth; + const DWORD cbBuffer = cbWidth * videoHeight; + + BYTE *pData = NULL; + + // Create a new memory buffer. + HRESULT hr = MFCreateMemoryBuffer(cbBuffer, &buffer); + + // Lock the buffer and copy the video frame to the buffer. + if (SUCCEEDED(hr)) + { + hr = buffer->Lock(&pData, NULL, NULL); + } + + if (SUCCEEDED(hr)) + { +#if defined(_M_ARM) + hr = MFCopyImage( + pData, // Destination buffer. + -cbWidth, // Destination stride. + (BYTE*)videoFrameBuffer, // First row in source image. + cbWidth, // Source stride. + cbWidth, // Image width in bytes. + videoHeight // Image height in pixels. + ); +#else + hr = MFCopyImage( + pData, // Destination buffer. + cbWidth, // Destination stride. + (BYTE*)videoFrameBuffer, // First row in source image. + cbWidth, // Source stride. + cbWidth, // Image width in bytes. + videoHeight // Image height in pixels. + ); +#endif + } + + if (buffer) + { + buffer->Unlock(); + } + + // Set the data length of the buffer. + if (SUCCEEDED(hr)) + { + hr = buffer->SetCurrentLength(cbBuffer); + } + + // Create a media sample and add the buffer to the sample. + if (SUCCEEDED(hr)) + { + hr = MFCreateSample(&sample); + } + if (SUCCEEDED(hr)) + { + hr = sample->AddBuffer(buffer.Get()); + } + + // Set the time stamp and the duration. + if (SUCCEEDED(hr)) + { + hr = sample->SetSampleTime(Start); + } + if (SUCCEEDED(hr)) + { + hr = sample->SetSampleDuration(Duration); + } + + // Send the sample to the Sink Writer. + if (SUCCEEDED(hr)) + { + hr = sinkWriter->WriteSample(streamIndex, sample.Get()); + } + + return hr; +} + +CvVideoWriter* cvCreateVideoWriter_MSMF( const char* filename, int fourcc, + double fps, CvSize frameSize, int isColor ) +{ + CvVideoWriter_MSMF* writer = new CvVideoWriter_MSMF; + if( writer->open( filename, fourcc, fps, frameSize, isColor != 0 )) + return writer; + delete writer; + return NULL; +} + #endif \ No newline at end of file diff --git a/modules/highgui/src/cap_v4l.cpp b/modules/highgui/src/cap_v4l.cpp index 6fc122fd0c..b4461ff25f 100644 --- a/modules/highgui/src/cap_v4l.cpp +++ b/modules/highgui/src/cap_v4l.cpp @@ -14,7 +14,9 @@ It has been tested with the motempl sample program First Patch: August 24, 2004 Travis Wood TravisOCV@tkwood.com For Release: OpenCV-Linux Beta4 opencv-0.9.6 Tested On: LMLBT44 with 8 video inputs -Problems? Post problems/fixes to OpenCV group on groups.yahoo.com +Problems? Post your questions at answers.opencv.org, + Report bugs at code.opencv.org, + Submit your fixes at https://github.com/Itseez/opencv/ Patched Comments: TW: The cv cam utils that came with the initial release of OpenCV for LINUX Beta4 diff --git a/modules/highgui/src/cap_vfw.cpp b/modules/highgui/src/cap_vfw.cpp index d419a48912..d845953f8e 100644 --- a/modules/highgui/src/cap_vfw.cpp +++ b/modules/highgui/src/cap_vfw.cpp @@ -613,8 +613,10 @@ bool CvVideoWriter_VFW::open( const char* filename, int _fourcc, double _fps, Cv close(); return false; } + return true; } - return true; + else + return false; } diff --git a/modules/highgui/src/ios_conversions.mm b/modules/highgui/src/ios_conversions.mm new file mode 100644 index 0000000000..7295743c58 --- /dev/null +++ b/modules/highgui/src/ios_conversions.mm @@ -0,0 +1,117 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#import "opencv2/highgui/cap_ios.h" +#include "precomp.hpp" + +UIImage* MatToUIImage(const cv::Mat& image) { + + NSData *data = [NSData dataWithBytes:image.data + length:image.elemSize()*image.total()]; + + CGColorSpaceRef colorSpace; + + if (image.elemSize() == 1) { + colorSpace = CGColorSpaceCreateDeviceGray(); + } else { + colorSpace = CGColorSpaceCreateDeviceRGB(); + } + + CGDataProviderRef provider = + CGDataProviderCreateWithCFData((__bridge CFDataRef)data); + + // Creating CGImage from cv::Mat + CGImageRef imageRef = CGImageCreate(image.cols, + image.rows, + 8, + 8 * image.elemSize(), + image.step.p[0], + colorSpace, + kCGImageAlphaNone| + kCGBitmapByteOrderDefault, + provider, + NULL, + false, + kCGRenderingIntentDefault + ); + + + // Getting UIImage from CGImage + UIImage *finalImage = [UIImage imageWithCGImage:imageRef]; + CGImageRelease(imageRef); + CGDataProviderRelease(provider); + CGColorSpaceRelease(colorSpace); + + return finalImage; +} + +void UIImageToMat(const UIImage* image, + cv::Mat& m, bool alphaExist) { + CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage); + CGFloat cols = image.size.width, rows = image.size.height; + CGContextRef contextRef; + CGBitmapInfo bitmapInfo = kCGImageAlphaPremultipliedLast; + if (CGColorSpaceGetModel(colorSpace) == 0) + { + m.create(rows, cols, CV_8UC1); // 8 bits per component, 1 channel + bitmapInfo = kCGImageAlphaNone; + if (!alphaExist) + bitmapInfo = kCGImageAlphaNone; + contextRef = CGBitmapContextCreate(m.data, m.cols, m.rows, 8, + m.step[0], colorSpace, + bitmapInfo); + } + else + { + m.create(rows, cols, CV_8UC4); // 8 bits per component, 4 channels + if (!alphaExist) + bitmapInfo = kCGImageAlphaNoneSkipLast | + kCGBitmapByteOrderDefault; + contextRef = CGBitmapContextCreate(m.data, m.cols, m.rows, 8, + m.step[0], colorSpace, + bitmapInfo); + } + CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), + image.CGImage); + CGContextRelease(contextRef); + CGColorSpaceRelease(colorSpace); +} \ No newline at end of file diff --git a/modules/highgui/src/precomp.hpp b/modules/highgui/src/precomp.hpp index 81ccb32030..d225cb3146 100644 --- a/modules/highgui/src/precomp.hpp +++ b/modules/highgui/src/precomp.hpp @@ -120,6 +120,9 @@ CvVideoWriter* cvCreateVideoWriter_VFW( const char* filename, int fourcc, double fps, CvSize frameSize, int is_color ); CvCapture* cvCreateCameraCapture_DShow( int index ); CvCapture* cvCreateCameraCapture_MSMF( int index ); +CvCapture* cvCreateFileCapture_MSMF (const char* filename); +CvVideoWriter* cvCreateVideoWriter_MSMF( const char* filename, int fourcc, + double fps, CvSize frameSize, int is_color ); CvCapture* cvCreateCameraCapture_OpenNI( int index ); CvCapture* cvCreateFileCapture_OpenNI( const char* filename ); CvCapture* cvCreateCameraCapture_Android( int index ); diff --git a/modules/highgui/src/window_QT.cpp b/modules/highgui/src/window_QT.cpp index 0a4e8df0f7..42fe502d4f 100644 --- a/modules/highgui/src/window_QT.cpp +++ b/modules/highgui/src/window_QT.cpp @@ -38,6 +38,7 @@ //--------------------Google Code 2010 -- Yannick Verdie--------------------// +#include "precomp.hpp" #if defined(HAVE_QT) diff --git a/modules/highgui/test/test_precomp.hpp b/modules/highgui/test/test_precomp.hpp index d904b4cb45..34e9f974bb 100644 --- a/modules/highgui/test/test_precomp.hpp +++ b/modules/highgui/test/test_precomp.hpp @@ -46,7 +46,8 @@ defined(HAVE_QUICKTIME) || \ defined(HAVE_AVFOUNDATION) || \ /*defined(HAVE_OPENNI) || too specialized */ \ - defined(HAVE_FFMPEG) + defined(HAVE_FFMPEG) || \ + defined(HAVE_MSMF) # define BUILD_WITH_VIDEO_INPUT_SUPPORT 1 #else # define BUILD_WITH_VIDEO_INPUT_SUPPORT 0 @@ -56,7 +57,8 @@ defined(HAVE_GSTREAMER) || \ defined(HAVE_QUICKTIME) || \ defined(HAVE_AVFOUNDATION) || \ - defined(HAVE_FFMPEG) + defined(HAVE_FFMPEG) || \ + defined(HAVE_MSMF) # define BUILD_WITH_VIDEO_OUTPUT_SUPPORT 1 #else # define BUILD_WITH_VIDEO_OUTPUT_SUPPORT 0 diff --git a/modules/highgui/test/test_video_io.cpp b/modules/highgui/test/test_video_io.cpp index 8738cef3ee..059d1e4fbd 100644 --- a/modules/highgui/test/test_video_io.cpp +++ b/modules/highgui/test/test_video_io.cpp @@ -54,6 +54,35 @@ string fourccToString(int fourcc) return format("%c%c%c%c", fourcc & 255, (fourcc >> 8) & 255, (fourcc >> 16) & 255, (fourcc >> 24) & 255); } +#ifdef HAVE_MSMF +const VideoFormat g_specific_fmt_list[] = +{ + /*VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', '2', '5')), + VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', '5', '0')), + VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', 'c', ' ')), + VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', 'h', '1')), + VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', 'h', 'd')), + VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', 's', 'd')), + VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', 's', 'l')), + VideoFormat("wmv", CV_FOURCC_MACRO('H', '2', '6', '3')), + VideoFormat("wmv", CV_FOURCC_MACRO('M', '4', 'S', '2')), + VideoFormat("avi", CV_FOURCC_MACRO('M', 'J', 'P', 'G')), + VideoFormat("mp4", CV_FOURCC_MACRO('M', 'P', '4', 'S')), + VideoFormat("mp4", CV_FOURCC_MACRO('M', 'P', '4', 'V')), + VideoFormat("wmv", CV_FOURCC_MACRO('M', 'P', '4', '3')), + VideoFormat("wmv", CV_FOURCC_MACRO('M', 'P', 'G', '1')), + VideoFormat("wmv", CV_FOURCC_MACRO('M', 'S', 'S', '1')), + VideoFormat("wmv", CV_FOURCC_MACRO('M', 'S', 'S', '2')),*/ +#if !defined(_M_ARM) + VideoFormat("wmv", CV_FOURCC_MACRO('W', 'M', 'V', '1')), + VideoFormat("wmv", CV_FOURCC_MACRO('W', 'M', 'V', '2')), +#endif + VideoFormat("wmv", CV_FOURCC_MACRO('W', 'M', 'V', '3')), + VideoFormat("avi", CV_FOURCC_MACRO('H', '2', '6', '4')), + //VideoFormat("wmv", CV_FOURCC_MACRO('W', 'V', 'C', '1')), + VideoFormat() +}; +#else const VideoFormat g_specific_fmt_list[] = { VideoFormat("avi", VideoWriter::fourcc('X', 'V', 'I', 'D')), @@ -63,17 +92,17 @@ const VideoFormat g_specific_fmt_list[] = VideoFormat("mkv", VideoWriter::fourcc('X', 'V', 'I', 'D')), VideoFormat("mkv", VideoWriter::fourcc('M', 'P', 'E', 'G')), VideoFormat("mkv", VideoWriter::fourcc('M', 'J', 'P', 'G')), - VideoFormat("mov", VideoWriter::fourcc('m', 'p', '4', 'v')), VideoFormat() }; +#endif } class CV_HighGuiTest : public cvtest::BaseTest { protected: - void ImageTest(const string& dir); + void ImageTest (const string& dir); void VideoTest (const string& dir, const cvtest::VideoFormat& fmt); void SpecificImageTest (const string& dir); void SpecificVideoTest (const string& dir, const cvtest::VideoFormat& fmt); @@ -242,19 +271,19 @@ void CV_HighGuiTest::VideoTest(const string& dir, const cvtest::VideoFormat& fmt for(;;) { - IplImage * img = cvQueryFrame( cap ); + IplImage* img = cvQueryFrame( cap ); if (!img) break; frames.push_back(cv::cvarrToMat(img, true)); - if (writer == 0) + if (writer == NULL) { writer = cvCreateVideoWriter(tmp_name.c_str(), fmt.fourcc, 24, cvGetSize(img)); - if (writer == 0) + if (writer == NULL) { - ts->printf(ts->LOG, "can't create writer (with fourcc : %d)\n", + ts->printf(ts->LOG, "can't create writer (with fourcc : %s)\n", cvtest::fourccToString(fmt.fourcc).c_str()); cvReleaseCapture( &cap ); ts->set_failed_test_info(ts->FAIL_MISMATCH); @@ -290,15 +319,22 @@ void CV_HighGuiTest::VideoTest(const string& dir, const cvtest::VideoFormat& fmt double psnr = PSNR(img1, img); if (psnr < thresDbell) { - printf("Too low psnr = %gdb\n", psnr); - // imwrite("img.png", img); - // imwrite("img1.png", img1); + ts->printf(ts->LOG, "Too low frame %d psnr = %gdb\n", i, psnr); ts->set_failed_test_info(ts->FAIL_MISMATCH); + + //imwrite("original.png", img); + //imwrite("after_test.png", img1); + //Mat diff; + //absdiff(img, img1, diff); + //imwrite("diff.png", diff); + break; } } + printf("Before saved release for %s\n", tmp_name.c_str()); cvReleaseCapture( &saved ); + printf("After release\n"); ts->printf(ts->LOG, "end test function : ImagesVideo \n"); } diff --git a/modules/imgproc/perf/perf_cvt_color.cpp b/modules/imgproc/perf/perf_cvt_color.cpp index a8932dd736..854ed6075d 100644 --- a/modules/imgproc/perf/perf_cvt_color.cpp +++ b/modules/imgproc/perf/perf_cvt_color.cpp @@ -258,7 +258,7 @@ PERF_TEST_P(Size_CvtMode, cvtColor8u, declare.time(100); declare.in(src, WARMUP_RNG).out(dst); - int runs = sz.width <= 320 ? 70 : 1; + int runs = sz.width <= 320 ? 70 : 5; TEST_CYCLE_MULTIRUN(runs) cvtColor(src, dst, mode, ch.dcn); SANITY_CHECK(dst, 1); diff --git a/modules/imgproc/perf/perf_resize.cpp b/modules/imgproc/perf/perf_resize.cpp index 3e6ecbf1fd..ae2ae5487b 100644 --- a/modules/imgproc/perf/perf_resize.cpp +++ b/modules/imgproc/perf/perf_resize.cpp @@ -85,7 +85,8 @@ PERF_TEST_P(MatInfo_Size_Scale, ResizeAreaFast, declare.in(src, WARMUP_RNG).out(dst); - TEST_CYCLE() resize(src, dst, dst.size(), 0, 0, INTER_AREA); + int runs = 15; + TEST_CYCLE_MULTIRUN(runs) resize(src, dst, dst.size(), 0, 0, INTER_AREA); //difference equal to 1 is allowed because of different possible rounding modes: round-to-nearest vs bankers' rounding SANITY_CHECK(dst, 1); diff --git a/modules/imgproc/perf/perf_threshold.cpp b/modules/imgproc/perf/perf_threshold.cpp index 01fff2e8cc..9ccafd6b54 100644 --- a/modules/imgproc/perf/perf_threshold.cpp +++ b/modules/imgproc/perf/perf_threshold.cpp @@ -51,7 +51,8 @@ PERF_TEST_P(Size_Only, threshold_otsu, testing::Values(TYPICAL_MAT_SIZES)) declare.in(src, WARMUP_RNG).out(dst); - TEST_CYCLE() threshold(src, dst, 0, maxval, THRESH_BINARY|THRESH_OTSU); + int runs = 15; + TEST_CYCLE_MULTIRUN(runs) threshold(src, dst, 0, maxval, THRESH_BINARY|THRESH_OTSU); SANITY_CHECK(dst); } diff --git a/modules/java/android_test/src/org/opencv/test/OpenCVTestRunner.java b/modules/java/android_test/src/org/opencv/test/OpenCVTestRunner.java index 2dc4aead8d..9425e2644c 100644 --- a/modules/java/android_test/src/org/opencv/test/OpenCVTestRunner.java +++ b/modules/java/android_test/src/org/opencv/test/OpenCVTestRunner.java @@ -17,8 +17,6 @@ import android.util.Log; /** * This only class is Android specific. - * - * @see OpenCV */ public class OpenCVTestRunner extends InstrumentationTestRunner { diff --git a/modules/java/generator/src/java/android+JavaCameraView.java b/modules/java/generator/src/java/android+JavaCameraView.java index 0dcdad2fb1..3bef2e0c38 100644 --- a/modules/java/generator/src/java/android+JavaCameraView.java +++ b/modules/java/generator/src/java/android+JavaCameraView.java @@ -6,7 +6,6 @@ import android.content.Context; import android.graphics.ImageFormat; import android.graphics.SurfaceTexture; import android.hardware.Camera; -import android.hardware.Camera.CameraInfo; import android.hardware.Camera.PreviewCallback; import android.os.Build; import android.util.AttributeSet; diff --git a/modules/java/generator/src/java/android+OpenCVLoader.java b/modules/java/generator/src/java/android+OpenCVLoader.java index fb05b826ca..a76471eac9 100644 --- a/modules/java/generator/src/java/android+OpenCVLoader.java +++ b/modules/java/generator/src/java/android+OpenCVLoader.java @@ -27,6 +27,11 @@ public class OpenCVLoader */ public static final String OPENCV_VERSION_2_4_5 = "2.4.5"; + /** + * OpenCV Library version 2.4.6. + */ + public static final String OPENCV_VERSION_2_4_6 = "2.4.6"; + /** * Loads and initializes OpenCV library from current application package. Roughly, it's an analog of system.loadLibrary("opencv_java"). diff --git a/modules/java/generator/src/java/highgui+VideoCapture.java b/modules/java/generator/src/java/highgui+VideoCapture.java index b8569bb9c0..6f3b03540d 100644 --- a/modules/java/generator/src/java/highgui+VideoCapture.java +++ b/modules/java/generator/src/java/highgui+VideoCapture.java @@ -56,7 +56,7 @@ public class VideoCapture { * * CV_CAP_PROP_FRAME_WIDTH width of the frames in the video stream. * * CV_CAP_PROP_FRAME_HEIGHT height of the frames in the video stream. * - * @see org.opencv.highgui.VideoCapture.get + * @see org.opencv.highgui.VideoCapture.get */ public double get(int propId) { @@ -178,7 +178,7 @@ public class VideoCapture { * * CV_CAP_PROP_FRAME_HEIGHT height of the frames in the video stream. * @param value value of the property. * - * @see org.opencv.highgui.VideoCapture.set + * @see org.opencv.highgui.VideoCapture.set */ public boolean set(int propId, double value) { diff --git a/modules/nonfree/perf/perf_main.cpp b/modules/nonfree/perf/perf_main.cpp index de1242149e..d5f4a1a512 100644 --- a/modules/nonfree/perf/perf_main.cpp +++ b/modules/nonfree/perf/perf_main.cpp @@ -1,4 +1,11 @@ #include "perf_precomp.hpp" #include "opencv2/ts/gpu_perf.hpp" -CV_PERF_TEST_MAIN(nonfree, perf::printCudaInfo()) +static const char * impls[] = { +#ifdef HAVE_CUDA + "cuda", +#endif + "plain" +}; + +CV_PERF_TEST_MAIN_WITH_IMPLS(nonfree, impls, perf::printCudaInfo()) diff --git a/modules/ocl/include/opencv2/ocl.hpp b/modules/ocl/include/opencv2/ocl.hpp index a59aae1a5e..3da935275e 100644 --- a/modules/ocl/include/opencv2/ocl.hpp +++ b/modules/ocl/include/opencv2/ocl.hpp @@ -180,8 +180,8 @@ namespace cv //! Enable or disable OpenCL program binary caching onto local disk // After a program (*.cl files in opencl/ folder) is built at runtime, we allow the - // compiled OpenCL program to be cached to the path automatically as "path/*.clb" - // binary file, which will be reused when the OpenCV executable is started again. + // compiled OpenCL program to be cached to the path automatically as "path/*.clb" + // binary file, which will be reused when the OpenCV executable is started again. // // Caching mode is controlled by the following enums // Notes @@ -198,7 +198,7 @@ namespace cv }; CV_EXPORTS void setBinaryDiskCache(int mode = CACHE_RELEASE, cv::String path = "./"); - //! set where binary cache to be saved to + //! set where binary cache to be saved to CV_EXPORTS void setBinpath(const char *path); class CV_EXPORTS oclMatExpr; @@ -513,25 +513,10 @@ namespace cv CV_EXPORTS void calcHist(const oclMat &mat_src, oclMat &mat_hist); //! only 8UC1 and 256 bins is supported now CV_EXPORTS void equalizeHist(const oclMat &mat_src, oclMat &mat_dst); - - //! only 8UC1 is supported now - class CV_EXPORTS CLAHE - { - public: - virtual void apply(const oclMat &src, oclMat &dst) = 0; - virtual void setClipLimit(double clipLimit) = 0; - virtual double getClipLimit() const = 0; - - virtual void setTilesGridSize(Size tileGridSize) = 0; - virtual Size getTilesGridSize() const = 0; - - virtual void collectGarbage() = 0; + //! only 8UC1 is supported now + CV_EXPORTS Ptr createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8)); - virtual ~CLAHE() {} - }; - CV_EXPORTS Ptr createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8)); - //! bilateralFilter // supports 8UC1 8UC4 CV_EXPORTS void bilateralFilter(const oclMat& src, oclMat& dst, int d, double sigmaColor, double sigmaSpave, int borderType=BORDER_DEFAULT); @@ -1554,6 +1539,45 @@ namespace cv bool isDeviceArch11_; }; + + class CV_EXPORTS FarnebackOpticalFlow + { + public: + FarnebackOpticalFlow(); + + int numLevels; + double pyrScale; + bool fastPyramids; + int winSize; + int numIters; + int polyN; + double polySigma; + int flags; + + void operator ()(const oclMat &frame0, const oclMat &frame1, oclMat &flowx, oclMat &flowy); + + void releaseMemory(); + + private: + void prepareGaussian( + int n, double sigma, float *g, float *xg, float *xxg, + double &ig11, double &ig03, double &ig33, double &ig55); + + void setPolynomialExpansionConsts(int n, double sigma); + + void updateFlow_boxFilter( + const oclMat& R0, const oclMat& R1, oclMat& flowx, oclMat &flowy, + oclMat& M, oclMat &bufM, int blockSize, bool updateMatrices); + + void updateFlow_gaussianBlur( + const oclMat& R0, const oclMat& R1, oclMat& flowx, oclMat& flowy, + oclMat& M, oclMat &bufM, int blockSize, bool updateMatrices); + + oclMat frames_[2]; + oclMat pyrLevel_[2], M_, bufM_, R_[2], blurredFrame_[2]; + std::vector pyramid0_, pyramid1_; + }; + //////////////// build warping maps //////////////////// //! builds plane warping maps CV_EXPORTS void buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat &R, const Mat &T, float scale, oclMat &map_x, oclMat &map_y); diff --git a/modules/ocl/perf/main.cpp b/modules/ocl/perf/main.cpp index 8b0a406462..bbc6716041 100644 --- a/modules/ocl/perf/main.cpp +++ b/modules/ocl/perf/main.cpp @@ -42,43 +42,31 @@ #include "precomp.hpp" -int main(int argc, const char *argv[]) +static int cvErrorCallback(int /*status*/, const char * /*func_name*/, + const char *err_msg, const char * /*file_name*/, + int /*line*/, void * /*userdata*/) { - vector oclinfo; - int num_devices = getDevice(oclinfo); - - if (num_devices < 1) - { - cerr << "no device found\n"; - return -1; - } - // set this to overwrite binary cache every time the test starts - ocl::setBinaryDiskCache(ocl::CACHE_UPDATE); - - int devidx = 0; - - for (size_t i = 0; i < oclinfo.size(); i++) - { - for (size_t j = 0; j < oclinfo[i].DeviceName.size(); j++) - { - printf("device %d: %s\n", devidx++, oclinfo[i].DeviceName[j].c_str()); - } - } + TestSystem::instance().printError(err_msg); + return 0; +} +int main(int argc, const char *argv[]) +{ const char *keys = "{ h help | false | print help message }" "{ f filter | | filter for test }" "{ w workdir | | set working directory }" "{ l list | false | show all tests }" "{ d device | 0 | device id }" + "{ c cpu_ocl | false | use cpu as ocl device}" "{ i iters | 10 | iteration count }" "{ m warmup | 1 | gpu warm up iteration count}" - "{ t xtop | 1.1 | xfactor top boundary}" - "{ b xbottom | 0.9 | xfactor bottom boundary}" + "{ t xtop | 1.1 | xfactor top boundary}" + "{ b xbottom | 0.9 | xfactor bottom boundary}" "{ v verify | false | only run gpu once to verify if problems occur}"; + redirectError(cvErrorCallback); CommandLineParser cmd(argc, argv, keys); - if (cmd.has("help")) { cout << "Avaible options:" << endl; @@ -86,14 +74,40 @@ int main(int argc, const char *argv[]) return 0; } - int device = cmd.get("device"); + // get ocl devices + bool use_cpu = cmd.get("c"); + vector oclinfo; + int num_devices = 0; + if(use_cpu) + num_devices = getDevice(oclinfo, ocl::CVCL_DEVICE_TYPE_CPU); + else + num_devices = getDevice(oclinfo); + if (num_devices < 1) + { + cerr << "no device found\n"; + return -1; + } + // show device info + int devidx = 0; + for (size_t i = 0; i < oclinfo.size(); i++) + { + for (size_t j = 0; j < oclinfo[i].DeviceName.size(); j++) + { + cout << "device " << devidx++ << ": " << oclinfo[i].DeviceName[j] << endl; + } + } + + int device = cmd.get("device"); if (device < 0 || device >= num_devices) { cerr << "Invalid device ID" << endl; return -1; } + // set this to overwrite binary cache every time the test starts + ocl::setBinaryDiskCache(ocl::CACHE_UPDATE); + if (cmd.get("verify")) { TestSystem::instance().setNumIters(1); @@ -102,7 +116,6 @@ int main(int argc, const char *argv[]) } devidx = 0; - for (size_t i = 0; i < oclinfo.size(); i++) { for (size_t j = 0; j < oclinfo[i].DeviceName.size(); j++, devidx++) @@ -111,7 +124,7 @@ int main(int argc, const char *argv[]) { ocl::setDevice(oclinfo[i], (int)j); TestSystem::instance().setRecordName(oclinfo[i].DeviceName[j]); - printf("\nuse %d: %s\n", devidx, oclinfo[i].DeviceName[j].c_str()); + cout << "use " << devidx << ": " < clahe = cv::createCLAHE(clipLimit); - cv::Ptr d_clahe = cv::ocl::createCLAHE(clipLimit); + cv::Ptr clahe = cv::createCLAHE(clipLimit); + cv::Ptr d_clahe = cv::ocl::createCLAHE(clipLimit); for (int size = Min_Size; size <= Max_Size; size *= Multiple) { diff --git a/modules/ocl/perf/perf_opticalflow.cpp b/modules/ocl/perf/perf_opticalflow.cpp index 97283b206c..936d7a77fc 100644 --- a/modules/ocl/perf/perf_opticalflow.cpp +++ b/modules/ocl/perf/perf_opticalflow.cpp @@ -136,11 +136,13 @@ PERFTEST(PyrLKOpticalFlow) size_t mismatch = 0; for (int i = 0; i < (int)nextPts.size(); ++i) { - if(status[i] != ocl_status.at(0, i)){ + if(status[i] != ocl_status.at(0, i)) + { mismatch++; continue; } - if(status[i]){ + if(status[i]) + { Point2f gpu_rst = ocl_nextPts.at(0, i); Point2f cpu_rst = nextPts[i]; if(fabs(gpu_rst.x - cpu_rst.x) >= 1. || fabs(gpu_rst.y - cpu_rst.y) >= 1.) @@ -193,24 +195,24 @@ PERFTEST(tvl1flow) WARMUP_ON; d_alg(d0, d1, d_flowx, d_flowy); WARMUP_OFF; -/* - double diff1 = 0.0, diff2 = 0.0; - if(ExceptedMatSimilar(gold[0], cv::Mat(d_flowx), 3e-3, diff1) == 1 - &&ExceptedMatSimilar(gold[1], cv::Mat(d_flowy), 3e-3, diff2) == 1) - TestSystem::instance().setAccurate(1); - else - TestSystem::instance().setAccurate(0); + /* + double diff1 = 0.0, diff2 = 0.0; + if(ExceptedMatSimilar(gold[0], cv::Mat(d_flowx), 3e-3, diff1) == 1 + &&ExceptedMatSimilar(gold[1], cv::Mat(d_flowy), 3e-3, diff2) == 1) + TestSystem::instance().setAccurate(1); + else + TestSystem::instance().setAccurate(0); - TestSystem::instance().setDiff(diff1); - TestSystem::instance().setDiff(diff2); -*/ + TestSystem::instance().setDiff(diff1); + TestSystem::instance().setDiff(diff2); + */ GPU_ON; d_alg(d0, d1, d_flowx, d_flowy); d_alg.collectGarbage(); GPU_OFF; - + cv::Mat flowx, flowy; @@ -225,4 +227,130 @@ PERFTEST(tvl1flow) TestSystem::instance().ExceptedMatSimilar(gold[0], flowx, 3e-3); TestSystem::instance().ExceptedMatSimilar(gold[1], flowy, 3e-3); -} \ No newline at end of file +} + +///////////// FarnebackOpticalFlow //////////////////////// +PERFTEST(FarnebackOpticalFlow) +{ + cv::Mat frame0 = imread("rubberwhale1.png", cv::IMREAD_GRAYSCALE); + ASSERT_FALSE(frame0.empty()); + + cv::Mat frame1 = imread("rubberwhale2.png", cv::IMREAD_GRAYSCALE); + ASSERT_FALSE(frame1.empty()); + + cv::ocl::oclMat d_frame0(frame0), d_frame1(frame1); + + int polyNs[2] = { 5, 7 }; + double polySigmas[2] = { 1.1, 1.5 }; + int farneFlags[2] = { 0, cv::OPTFLOW_FARNEBACK_GAUSSIAN }; + bool UseInitFlows[2] = { false, true }; + double pyrScale = 0.5; + + string farneFlagStrs[2] = { "BoxFilter", "GaussianBlur" }; + string useInitFlowStrs[2] = { "", "UseInitFlow" }; + + for ( int i = 0; i < 2; ++i) + { + int polyN = polyNs[i]; + double polySigma = polySigmas[i]; + + for ( int j = 0; j < 2; ++j) + { + int flags = farneFlags[j]; + + for ( int k = 0; k < 2; ++k) + { + bool useInitFlow = UseInitFlows[k]; + SUBTEST << "polyN(" << polyN << "); " << farneFlagStrs[j] << "; " << useInitFlowStrs[k]; + + cv::ocl::FarnebackOpticalFlow farn; + farn.pyrScale = pyrScale; + farn.polyN = polyN; + farn.polySigma = polySigma; + farn.flags = flags; + + cv::ocl::oclMat d_flowx, d_flowy; + cv::Mat flow, flowBuf, flowxBuf, flowyBuf; + + WARMUP_ON; + farn(d_frame0, d_frame1, d_flowx, d_flowy); + + if (useInitFlow) + { + cv::Mat flowxy[] = {cv::Mat(d_flowx), cv::Mat(d_flowy)}; + cv::merge(flowxy, 2, flow); + flow.copyTo(flowBuf); + flowxy[0].copyTo(flowxBuf); + flowxy[1].copyTo(flowyBuf); + + farn.flags |= cv::OPTFLOW_USE_INITIAL_FLOW; + farn(d_frame0, d_frame1, d_flowx, d_flowy); + } + WARMUP_OFF; + + cv::calcOpticalFlowFarneback( + frame0, frame1, flow, farn.pyrScale, farn.numLevels, farn.winSize, + farn.numIters, farn.polyN, farn.polySigma, farn.flags); + + std::vector flowxy; + cv::split(flow, flowxy); + + Mat md_flowx = cv::Mat(d_flowx); + Mat md_flowy = cv::Mat(d_flowy); + TestSystem::instance().ExceptedMatSimilar(flowxy[0], md_flowx, 0.1); + TestSystem::instance().ExceptedMatSimilar(flowxy[1], md_flowy, 0.1); + + if (useInitFlow) + { + cv::Mat flowx, flowy; + farn.flags = (flags | cv::OPTFLOW_USE_INITIAL_FLOW); + + CPU_ON; + cv::calcOpticalFlowFarneback( + frame0, frame1, flowBuf, farn.pyrScale, farn.numLevels, farn.winSize, + farn.numIters, farn.polyN, farn.polySigma, farn.flags); + CPU_OFF; + + GPU_ON; + farn(d_frame0, d_frame1, d_flowx, d_flowy); + GPU_OFF; + + GPU_FULL_ON; + d_frame0.upload(frame0); + d_frame1.upload(frame1); + d_flowx.upload(flowxBuf); + d_flowy.upload(flowyBuf); + farn(d_frame0, d_frame1, d_flowx, d_flowy); + d_flowx.download(flowx); + d_flowy.download(flowy); + GPU_FULL_OFF; + } + else + { + cv::Mat flow, flowx, flowy; + cv::ocl::oclMat d_flowx, d_flowy; + + farn.flags = flags; + + CPU_ON; + cv::calcOpticalFlowFarneback( + frame0, frame1, flow, farn.pyrScale, farn.numLevels, farn.winSize, + farn.numIters, farn.polyN, farn.polySigma, farn.flags); + CPU_OFF; + + GPU_ON; + farn(d_frame0, d_frame1, d_flowx, d_flowy); + GPU_OFF; + + GPU_FULL_ON; + d_frame0.upload(frame0); + d_frame1.upload(frame1); + farn(d_frame0, d_frame1, d_flowx, d_flowy); + d_flowx.download(flowx); + d_flowy.download(flowy); + GPU_FULL_OFF; + } + } + } + } +} diff --git a/modules/ocl/perf/precomp.cpp b/modules/ocl/perf/precomp.cpp index 9601cda58d..daeaad6dbc 100644 --- a/modules/ocl/perf/precomp.cpp +++ b/modules/ocl/perf/precomp.cpp @@ -42,7 +42,9 @@ #include "precomp.hpp" #if GTEST_OS_WINDOWS +#ifndef NOMINMAX #define NOMINMAX +#endif # include #endif @@ -278,7 +280,7 @@ enum GTestColor { }; #if GTEST_OS_WINDOWS&&!GTEST_OS_WINDOWS_MOBILE // Returns the character attribute for the given color. -WORD GetColorAttribute(GTestColor color) { +static WORD GetColorAttribute(GTestColor color) { switch (color) { case COLOR_RED: return FOREGROUND_RED; case COLOR_GREEN: return FOREGROUND_GREEN; diff --git a/modules/ocl/src/haar.cpp b/modules/ocl/src/haar.cpp index 8fb69567af..6f028317d3 100644 --- a/modules/ocl/src/haar.cpp +++ b/modules/ocl/src/haar.cpp @@ -141,7 +141,7 @@ typedef struct int imgoff; float factor; } detect_piramid_info; -#ifdef WIN32 +#ifdef _MSC_VER #define _ALIGNED_ON(_ALIGNMENT) __declspec(align(_ALIGNMENT)) typedef _ALIGNED_ON(128) struct GpuHidHaarTreeNode diff --git a/modules/ocl/src/hog.cpp b/modules/ocl/src/hog.cpp index ff8f09157b..dc799d0419 100644 --- a/modules/ocl/src/hog.cpp +++ b/modules/ocl/src/hog.cpp @@ -1812,8 +1812,14 @@ void cv::ocl::device::hog::normalize_hists(int nbins, openCLExecuteKernel2(clCxt, &objdetect_hog, kernelName, globalThreads, localThreads, args, -1, -1, "-D CPU"); else + { + cl_kernel kernel = openCLGetKernelFromSource(clCxt, &objdetect_hog, kernelName); + int wave_size = queryDeviceInfo(kernel); + char opt[32] = {0}; + sprintf(opt, "-D WAVE_SIZE=%d", wave_size); openCLExecuteKernel2(clCxt, &objdetect_hog, kernelName, globalThreads, - localThreads, args, -1, -1); + localThreads, args, -1, -1, opt); + } } void cv::ocl::device::hog::classify_hists(int win_height, int win_width, @@ -1875,8 +1881,14 @@ void cv::ocl::device::hog::classify_hists(int win_height, int win_width, openCLExecuteKernel2(clCxt, &objdetect_hog, kernelName, globalThreads, localThreads, args, -1, -1, "-D CPU"); else + { + cl_kernel kernel = openCLGetKernelFromSource(clCxt, &objdetect_hog, kernelName); + int wave_size = queryDeviceInfo(kernel); + char opt[32] = {0}; + sprintf(opt, "-D WAVE_SIZE=%d", wave_size); openCLExecuteKernel2(clCxt, &objdetect_hog, kernelName, globalThreads, - localThreads, args, -1, -1); + localThreads, args, -1, -1, opt); + } } void cv::ocl::device::hog::extract_descrs_by_rows(int win_height, int win_width, diff --git a/modules/ocl/src/imgproc.cpp b/modules/ocl/src/imgproc.cpp index 3366cf7261..143e0e8a91 100644 --- a/modules/ocl/src/imgproc.cpp +++ b/modules/ocl/src/imgproc.cpp @@ -1585,14 +1585,14 @@ namespace cv namespace { - class CLAHE_Impl : public cv::ocl::CLAHE + class CLAHE_Impl : public cv::CLAHE { public: CLAHE_Impl(double clipLimit = 40.0, int tilesX = 8, int tilesY = 8); cv::AlgorithmInfo* info() const; - void apply(const oclMat &src, oclMat &dst); + void apply(cv::InputArray src, cv::OutputArray dst); void setClipLimit(double clipLimit); double getClipLimit() const; @@ -1610,14 +1610,19 @@ namespace cv oclMat srcExt_; oclMat lut_; }; - CLAHE_Impl::CLAHE_Impl(double clipLimit, int tilesX, int tilesY) : clipLimit_(clipLimit), tilesX_(tilesX), tilesY_(tilesY) { } - void CLAHE_Impl::apply(const oclMat &src, oclMat &dst) + CV_INIT_ALGORITHM(CLAHE_Impl, "CLAHE_OCL", + obj.info()->addParam(obj, "clipLimit", obj.clipLimit_); + obj.info()->addParam(obj, "tilesX", obj.tilesX_); + obj.info()->addParam(obj, "tilesY", obj.tilesY_)) + void CLAHE_Impl::apply(cv::InputArray src_raw, cv::OutputArray dst_raw) { + oclMat& src = getOclMatRef(src_raw); + oclMat& dst = getOclMatRef(dst_raw); CV_Assert( src.type() == CV_8UC1 ); dst.create( src.size(), src.type() ); @@ -1685,7 +1690,7 @@ namespace cv } } - cv::Ptr createCLAHE(double clipLimit, cv::Size tileGridSize) + cv::Ptr createCLAHE(double clipLimit, cv::Size tileGridSize) { return new CLAHE_Impl(clipLimit, tileGridSize.width, tileGridSize.height); } diff --git a/modules/ocl/src/initialization.cpp b/modules/ocl/src/initialization.cpp index 5d02423a21..7c26dab8b8 100644 --- a/modules/ocl/src/initialization.cpp +++ b/modules/ocl/src/initialization.cpp @@ -120,9 +120,6 @@ namespace cv codeCache.clear(); cacheSize = 0; } - - // not to be exported to dynamic lib - void setBinaryDiskCacheImpl(int mode, String path, Info::Impl * impl); struct Info::Impl { cl_platform_id oclplatform; @@ -141,9 +138,6 @@ namespace cv char extra_options[512]; int double_support; int unified_memory; //1 means integrated GPU, otherwise this value is 0 - bool enable_disk_cache; - bool update_disk_cache; - String binpath; int refcounter; Impl(); @@ -171,6 +165,16 @@ namespace cv void releaseResources(); }; + // global variables to hold binary cache properties + static int enable_disk_cache = +#ifdef _DEBUG + false; +#else + true; +#endif + static int update_disk_cache = false; + static String binpath = ""; + Info::Impl::Impl() :oclplatform(0), oclcontext(0), @@ -181,13 +185,9 @@ namespace cv maxComputeUnits(0), double_support(0), unified_memory(0), - enable_disk_cache(false), - update_disk_cache(false), - binpath("./"), refcounter(1) { memset(extra_options, 0, 512); - setBinaryDiskCacheImpl(CACHE_RELEASE, String("./"), this); } void Info::Impl::releaseResources() @@ -196,7 +196,8 @@ namespace cv if(clCmdQueue) { - openCLSafeCall(clReleaseCommandQueue(clCmdQueue)); + //temporarily disable command queue release as it causes program hang at exit + //openCLSafeCall(clReleaseCommandQueue(clCmdQueue)); clCmdQueue = 0; } @@ -508,29 +509,30 @@ namespace cv return openCLGetKernelFromSource(clCxt, source, kernelName, NULL); } - void setBinaryDiskCacheImpl(int mode, String path, Info::Impl * impl) + void setBinaryDiskCache(int mode, String path) { - impl->update_disk_cache = (mode & CACHE_UPDATE) == CACHE_UPDATE; - impl->enable_disk_cache = -#ifdef _DEBUG + if(mode == CACHE_NONE) + { + update_disk_cache = 0; + enable_disk_cache = 0; + return; + } + update_disk_cache |= (mode & CACHE_UPDATE) == CACHE_UPDATE; + enable_disk_cache |= +#ifdef _DEBUG (mode & CACHE_DEBUG) == CACHE_DEBUG; #else (mode & CACHE_RELEASE) == CACHE_RELEASE; #endif - if(impl->enable_disk_cache && !path.empty()) + if(enable_disk_cache && !path.empty()) { - impl->binpath = path; + binpath = path; } } - void setBinaryDiskCache(int mode, cv::String path) - { - setBinaryDiskCacheImpl(mode, path, Context::getContext()->impl); - } void setBinpath(const char *path) { - Context *clcxt = Context::getContext(); - clcxt->impl->binpath = path; + binpath = path; } int savetofile(const Context*, cl_program &program, const char *fileName) @@ -598,15 +600,15 @@ namespace cv strcat(all_build_options, build_options); if(all_build_options != NULL) { - filename = clCxt->impl->binpath + kernelName + "_" + clCxt->impl->devName[clCxt->impl->devnum] + all_build_options + ".clb"; + filename = binpath + kernelName + "_" + clCxt->impl->devName[clCxt->impl->devnum] + all_build_options + ".clb"; } else { - filename = clCxt->impl->binpath + kernelName + "_" + clCxt->impl->devName[clCxt->impl->devnum] + ".clb"; + filename = binpath + kernelName + "_" + clCxt->impl->devName[clCxt->impl->devnum] + ".clb"; } - FILE *fp = clCxt->impl->enable_disk_cache ? fopen(filename.c_str(), "rb") : NULL; - if(fp == NULL || clCxt->impl->update_disk_cache) + FILE *fp = enable_disk_cache ? fopen(filename.c_str(), "rb") : NULL; + if(fp == NULL || update_disk_cache) { if(fp != NULL) fclose(fp); @@ -615,7 +617,7 @@ namespace cv clCxt->impl->oclcontext, 1, source, NULL, &status); openCLVerifyCall(status); status = clBuildProgram(program, 1, &(clCxt->impl->devices[clCxt->impl->devnum]), all_build_options, NULL, NULL); - if(status == CL_SUCCESS && clCxt->impl->enable_disk_cache) + if(status == CL_SUCCESS && enable_disk_cache) savetofile(clCxt, program, filename.c_str()); } else @@ -952,8 +954,8 @@ namespace cv bool initialized() { - return *((volatile int*)&Context::val) != 0 && - Context::clCxt->impl->clCmdQueue != NULL&& + return *((volatile int*)&Context::val) != 0 && + Context::clCxt->impl->clCmdQueue != NULL&& Context::clCxt->impl->oclcontext != NULL; } @@ -1087,26 +1089,3 @@ namespace cv }//namespace ocl }//namespace cv - -#if defined BUILD_SHARED_LIBS && defined CVAPI_EXPORTS && defined WIN32 && !defined WINCE -#include -BOOL WINAPI DllMain( HINSTANCE, DWORD fdwReason, LPVOID ); - -BOOL WINAPI DllMain( HINSTANCE, DWORD fdwReason, LPVOID ) -{ - if( fdwReason == DLL_PROCESS_DETACH ) - { - // application hangs if call clReleaseCommandQueue here, so release context only - // without context release application hangs as well - context_tear_down = 1; - Context* cv_ctx = Context::getContext(); - if(cv_ctx) - { - cl_context ctx = cv_ctx->impl->oclcontext; - if(ctx) - openCLSafeCall(clReleaseContext(ctx)); - } - } - return TRUE; -} -#endif diff --git a/modules/ocl/src/mcwutil.cpp b/modules/ocl/src/mcwutil.cpp index d4fd47b722..612165165c 100644 --- a/modules/ocl/src/mcwutil.cpp +++ b/modules/ocl/src/mcwutil.cpp @@ -149,7 +149,7 @@ namespace cv cl_image_format format; int err; int depth = mat.depth(); - int channels = mat.channels(); + int channels = mat.oclchannels(); switch(depth) { diff --git a/modules/ocl/src/moments.cpp b/modules/ocl/src/moments.cpp index 66a1a3355d..d9247e388c 100644 --- a/modules/ocl/src/moments.cpp +++ b/modules/ocl/src/moments.cpp @@ -16,7 +16,7 @@ // Third party copyrights are property of their respective owners. // // @Authors -// Sen Liu, sen@multicorewareinc.com +// Sen Liu, swjtuls1987@126.com // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: @@ -280,8 +280,8 @@ static void ocl_cvMoments( const void* array, CvMoments* mom, int binary ) blocky = size.height/TILE_SIZE; else blocky = size.height/TILE_SIZE + 1; - cv::ocl::oclMat dst_m(blocky * 10, blockx, CV_64FC1); - cl_mem sum = openCLCreateBuffer(src.clCxt,CL_MEM_READ_WRITE,10*sizeof(double)); + oclMat dst_m(blocky * 10, blockx, CV_64FC1); + oclMat sum(1, 10, CV_64FC1); int tile_width = std::min(size.width,TILE_SIZE); int tile_height = std::min(size.height,TILE_SIZE); size_t localThreads[3] = { tile_height, 1, 1}; @@ -291,19 +291,16 @@ static void ocl_cvMoments( const void* array, CvMoments* mom, int binary ) args.push_back( std::make_pair( sizeof(cl_int) , (void *)&src.rows )); args.push_back( std::make_pair( sizeof(cl_int) , (void *)&src.cols )); args.push_back( std::make_pair( sizeof(cl_int) , (void *)&src.step )); - args.push_back( std::make_pair( sizeof(cl_int) , (void *)&tileSize.width )); - args.push_back( std::make_pair( sizeof(cl_int) , (void *)&tileSize.height )); args.push_back( std::make_pair( sizeof(cl_mem) , (void *)&dst_m.data )); args.push_back( std::make_pair( sizeof(cl_int) , (void *)&dst_m.cols )); args.push_back( std::make_pair( sizeof(cl_int) , (void *)&dst_m.step )); args.push_back( std::make_pair( sizeof(cl_int) , (void *)&blocky )); - args.push_back( std::make_pair( sizeof(cl_int) , (void *)&type )); args.push_back( std::make_pair( sizeof(cl_int) , (void *)&depth )); args.push_back( std::make_pair( sizeof(cl_int) , (void *)&cn )); args.push_back( std::make_pair( sizeof(cl_int) , (void *)&coi )); args.push_back( std::make_pair( sizeof(cl_int) , (void *)&binary )); args.push_back( std::make_pair( sizeof(cl_int) , (void *)&TILE_SIZE )); - openCLExecuteKernel(dst_m.clCxt, &moments, "CvMoments", globalThreads, localThreads, args, -1, depth); + openCLExecuteKernel(Context::getContext(), &moments, "CvMoments", globalThreads, localThreads, args, -1, depth); size_t localThreadss[3] = { 128, 1, 1}; size_t globalThreadss[3] = { 128, 1, 1}; @@ -315,10 +312,9 @@ static void ocl_cvMoments( const void* array, CvMoments* mom, int binary ) args_sum.push_back( std::make_pair( sizeof(cl_mem) , (void *)&sum )); args_sum.push_back( std::make_pair( sizeof(cl_mem) , (void *)&dst_m.data )); args_sum.push_back( std::make_pair( sizeof(cl_int) , (void *)&dst_m.step )); - openCLExecuteKernel(dst_m.clCxt, &moments, "dst_sum", globalThreadss, localThreadss, args_sum, -1, -1); - double* dstsum = new double[10]; - memset(dstsum,0,10*sizeof(double)); - openCLReadBuffer(dst_m.clCxt,sum,(void *)dstsum,10*sizeof(double)); + openCLExecuteKernel(Context::getContext(), &moments, "dst_sum", globalThreadss, localThreadss, args_sum, -1, -1); + + Mat dstsum(sum); mom->m00 = dstsum[0]; mom->m10 = dstsum[1]; mom->m01 = dstsum[2]; @@ -329,8 +325,7 @@ static void ocl_cvMoments( const void* array, CvMoments* mom, int binary ) mom->m21 = dstsum[7]; mom->m12 = dstsum[8]; mom->m03 = dstsum[9]; - delete [] dstsum; - openCLSafeCall(clReleaseMemObject(sum)); + icvCompleteMomentState( mom ); } diff --git a/modules/ocl/src/opencl/moments.cl b/modules/ocl/src/opencl/moments.cl index c86ae494d2..5ea94e20c8 100644 --- a/modules/ocl/src/opencl/moments.cl +++ b/modules/ocl/src/opencl/moments.cl @@ -173,10 +173,10 @@ __kernel void dst_sum(int src_rows, int src_cols, int tile_height, int tile_widt sum[i] = dst_sum[i][0]; } -__kernel void CvMoments_D0(__global uchar16* src_data, int src_rows, int src_cols, int src_step, int tileSize_width, int tileSize_height, +__kernel void CvMoments_D0(__global uchar16* src_data, int src_rows, int src_cols, int src_step, __global F* dst_m, int dst_cols, int dst_step, int blocky, - int type, int depth, int cn, int coi, int binary, int TILE_SIZE) + int depth, int cn, int coi, int binary, int TILE_SIZE) { uchar tmp_coi[16]; // get the coi data uchar16 tmp[16]; @@ -192,35 +192,43 @@ __kernel void CvMoments_D0(__global uchar16* src_data, int src_rows, int src_col int x = wgidx*TILE_SIZE; // vector length of uchar int kcn = (cn==2)?2:4; int rstep = min(src_step, TILE_SIZE); - tileSize_height = min(TILE_SIZE, src_rows - y); - tileSize_width = min(TILE_SIZE, src_cols - x); - - if( tileSize_width < TILE_SIZE ) - for(int i = tileSize_width; i < rstep; i++ ) - *((__global uchar*)src_data+(y+lidy)*src_step+x+i) = 0; - if( coi > 0 ) //channel of interest - for(int i = 0; i < tileSize_width; i += VLEN_C) - { - for(int j=0; j 0 ) //channel of interest + for(int i = 0; i < tileSize_width; i += VLEN_C) + { + for(int j=0; j TILE_SIZE && tileSize_width < TILE_SIZE) - for(int i=tileSize_width; i < rstep; i++ ) - *((__global ushort*)src_data+(y+lidy)*src_step/2+x+i) = 0; - if( coi > 0 ) - for(int i=0; i < tileSize_width; i+=VLEN_US) - { - for(int j=0; j TILE_SIZE && tileSize_width < TILE_SIZE) + for(int i=tileSize_width; i < rstep && (x+i) < src_cols; i++ ) + *((__global ushort*)src_data+(y+lidy)*src_step/2+x+i) = 0; + if( coi > 0 ) + for(int i=0; i < tileSize_width; i+=VLEN_US) + { + for(int j=0; j= 1; j = j/2 ) { if(lidy < j) for( int i = 0; i < 10; i++ ) lm[i] = lm[i] + m[i][lidy]; - barrier(CLK_LOCAL_MEM_FENCE); + } + barrier(CLK_LOCAL_MEM_FENCE); + for( int j = TILE_SIZE/2; j >= 1; j = j/2 ) + { if(lidy >= j/2&&lidy < j) for( int i = 0; i < 10; i++ ) m[i][lidy-j/2] = lm[i]; - barrier(CLK_LOCAL_MEM_FENCE); } + barrier(CLK_LOCAL_MEM_FENCE); + if(lidy == 0&&lidx == 0) { for(int mt = 0; mt < 10; mt++ ) @@ -482,10 +501,10 @@ __kernel void CvMoments_D2(__global ushort8* src_data, int src_rows, int src_col } } -__kernel void CvMoments_D3(__global short8* src_data, int src_rows, int src_cols, int src_step, int tileSize_width, int tileSize_height, +__kernel void CvMoments_D3(__global short8* src_data, int src_rows, int src_cols, int src_step, __global F* dst_m, int dst_cols, int dst_step, int blocky, - int type, int depth, int cn, int coi, int binary, const int TILE_SIZE) + int depth, int cn, int coi, int binary, const int TILE_SIZE) { short tmp_coi[8]; // get the coi data short8 tmp[32]; @@ -500,21 +519,26 @@ __kernel void CvMoments_D3(__global short8* src_data, int src_rows, int src_cols int x = wgidx*TILE_SIZE; // real X index of pixel int kcn = (cn==2)?2:4; int rstep = min(src_step/2, TILE_SIZE); - tileSize_height = min(TILE_SIZE, src_rows - y); - tileSize_width = min(TILE_SIZE, src_cols -x); - if(tileSize_width < TILE_SIZE) - for(int i = tileSize_width; i < rstep; i++ ) - *((__global short*)src_data+(y+lidy)*src_step/2+x+i) = 0; - if( coi > 0 ) - for(int i=0; i < tileSize_width; i+=VLEN_S) - { - for(int j=0; j 0 ) + for(int i=0; i < tileSize_width; i+=VLEN_S) + { + for(int j=0; j 0 ) - for(int i=0; i < tileSize_width; i+=VLEN_F) - { -#pragma unroll - for(int j=0; j<4; j++) + + if ( y+lidy < src_rows ) + { + if(tileSize_width < TILE_SIZE) + for(int i = tileSize_width; i < rstep && (x+i) < src_cols; i++ ) + *((__global float*)src_data+(y+lidy)*src_step/4+x+i) = 0; + if( coi > 0 ) + for(int i=0; i < tileSize_width; i+=VLEN_F) { - index = yOff+(x+i+j)*kcn+coi-1; - if (index < maxIdx) - tmp_coi[j] = *(src_data+index); - else - tmp_coi[j] = 0; + for(int j=0; j<4; j++) + tmp_coi[j] = *(src_data+(y+lidy)*src_step/4+(x+i+j)*kcn+coi-1); + tmp[i/VLEN_F] = (float4)(tmp_coi[0],tmp_coi[1],tmp_coi[2],tmp_coi[3]); } - tmp[i/VLEN_F] = (float4)(tmp_coi[0],tmp_coi[1],tmp_coi[2],tmp_coi[3]); - } - else - for(int i=0; i < tileSize_width && (yOff+x+i) < maxIdx; i+=VLEN_F) - tmp[i/VLEN_F] = (*(__global float4 *)(src_data+yOff+x+i)); + else + for(int i=0; i < tileSize_width; i+=VLEN_F) + tmp[i/VLEN_F] = (float4)(*(src_data+(y+lidy)*src_step/4+x+i),*(src_data+(y+lidy)*src_step/4+x+i+1),*(src_data+(y+lidy)*src_step/4+x+i+2),*(src_data+(y+lidy)*src_step/4+x+i+3)); + } + float4 zero = (float4)(0); float4 full = (float4)(255); if( binary ) @@ -688,10 +708,9 @@ __kernel void CvMoments_D5( __global float* src_data, int src_rows, int src_cols tmp[i/VLEN_F] = (tmp[i/VLEN_F]!=zero)?full:zero; F mom[10]; __local F m[10][128]; - if(lidy == 0) + if(lidy < 128) for(int i = 0; i < 10; i ++) - for(int j = 0; j < 128; j ++) - m[i][j] = 0; + m[i][lidy] = 0; barrier(CLK_LOCAL_MEM_FENCE); F lm[10] = {0}; F4 x0 = (F4)(0); @@ -770,66 +789,42 @@ __kernel void CvMoments_D5( __global float* src_data, int src_rows, int src_cols // accumulate moments computed in each tile dst_step /= sizeof(F); - int dst_x_off = mad24(wgidy, dst_cols, wgidx); - int dst_off = 0; - int max_dst_index = 10 * blocky * get_global_size(1); - // + m00 ( = m00' ) - dst_off = mad24(DST_ROW_00 * blocky, dst_step, dst_x_off); - if (dst_off < max_dst_index) - *(dst_m + dst_off) = mom[0]; + *(dst_m + mad24(DST_ROW_00 * blocky, dst_step, mad24(wgidy, dst_cols, wgidx))) = mom[0]; // + m10 ( = m10' + x*m00' ) - dst_off = mad24(DST_ROW_10 * blocky, dst_step, dst_x_off); - if (dst_off < max_dst_index) - *(dst_m + dst_off) = mom[1] + xm; + *(dst_m + mad24(DST_ROW_10 * blocky, dst_step, mad24(wgidy, dst_cols, wgidx))) = mom[1] + xm; // + m01 ( = m01' + y*m00' ) - dst_off = mad24(DST_ROW_01 * blocky, dst_step, dst_x_off); - if (dst_off < max_dst_index) - *(dst_m + dst_off) = mom[2] + ym; + *(dst_m + mad24(DST_ROW_01 * blocky, dst_step, mad24(wgidy, dst_cols, wgidx))) = mom[2] + ym; // + m20 ( = m20' + 2*x*m10' + x*x*m00' ) - dst_off = mad24(DST_ROW_20 * blocky, dst_step, dst_x_off); - if (dst_off < max_dst_index) - *(dst_m + dst_off) = mom[3] + x * (mom[1] * 2 + xm); + *(dst_m + mad24(DST_ROW_20 * blocky, dst_step, mad24(wgidy, dst_cols, wgidx))) = mom[3] + x * (mom[1] * 2 + xm); // + m11 ( = m11' + x*m01' + y*m10' + x*y*m00' ) - dst_off = mad24(DST_ROW_11 * blocky, dst_step, dst_x_off); - if (dst_off < max_dst_index) - *(dst_m + dst_off) = mom[4] + x * (mom[2] + ym) + y * mom[1]; + *(dst_m + mad24(DST_ROW_11 * blocky, dst_step, mad24(wgidy, dst_cols, wgidx))) = mom[4] + x * (mom[2] + ym) + y * mom[1]; // + m02 ( = m02' + 2*y*m01' + y*y*m00' ) - dst_off = mad24(DST_ROW_02 * blocky, dst_step, dst_x_off); - if (dst_off < max_dst_index) - *(dst_m + dst_off) = mom[5] + y * (mom[2] * 2 + ym); + *(dst_m + mad24(DST_ROW_02 * blocky, dst_step, mad24(wgidy, dst_cols, wgidx))) = mom[5] + y * (mom[2] * 2 + ym); // + m30 ( = m30' + 3*x*m20' + 3*x*x*m10' + x*x*x*m00' ) - dst_off = mad24(DST_ROW_30 * blocky, dst_step, dst_x_off); - if (dst_off < max_dst_index) - *(dst_m + dst_off) = mom[6] + x * (3. * mom[3] + x * (3. * mom[1] + xm)); + *(dst_m + mad24(DST_ROW_30 * blocky, dst_step, mad24(wgidy, dst_cols, wgidx))) = mom[6] + x * (3. * mom[3] + x * (3. * mom[1] + xm)); // + m21 ( = m21' + x*(2*m11' + 2*y*m10' + x*m01' + x*y*m00') + y*m20') - dst_off = mad24(DST_ROW_21 * blocky, dst_step, dst_x_off); - if (dst_off < max_dst_index) - *(dst_m + dst_off) = mom[7] + x * (2 * (mom[4] + y * mom[1]) + x * (mom[2] + ym)) + y * mom[3]; + *(dst_m + mad24(DST_ROW_21 * blocky, dst_step, mad24(wgidy, dst_cols, wgidx))) = mom[7] + x * (2 * (mom[4] + y * mom[1]) + x * (mom[2] + ym)) + y * mom[3]; // + m12 ( = m12' + y*(2*m11' + 2*x*m01' + y*m10' + x*y*m00') + x*m02') - dst_off = mad24(DST_ROW_12 * blocky, dst_step, dst_x_off); - if (dst_off < max_dst_index) - *(dst_m + dst_off) = mom[8] + y * (2 * (mom[4] + x * mom[2]) + y * (mom[1] + xm)) + x * mom[5]; + *(dst_m + mad24(DST_ROW_12 * blocky, dst_step, mad24(wgidy, dst_cols, wgidx))) = mom[8] + y * (2 * (mom[4] + x * mom[2]) + y * (mom[1] + xm)) + x * mom[5]; // + m03 ( = m03' + 3*y*m02' + 3*y*y*m01' + y*y*y*m00' ) - dst_off = mad24(DST_ROW_03 * blocky, dst_step, dst_x_off); - if (dst_off < max_dst_index) - *(dst_m + dst_off) = mom[9] + y * (3. * mom[5] + y * (3. * mom[2] + ym)); + *(dst_m + mad24(DST_ROW_03 * blocky, dst_step, mad24(wgidy, dst_cols, wgidx))) = mom[9] + y * (3. * mom[5] + y * (3. * mom[2] + ym)); } } -__kernel void CvMoments_D6(__global F* src_data, int src_rows, int src_cols, int src_step, int tileSize_width, int tileSize_height, +__kernel void CvMoments_D6(__global F* src_data, int src_rows, int src_cols, int src_step, __global F* dst_m, int dst_cols, int dst_step, int blocky, - int type, int depth, int cn, int coi, int binary, const int TILE_SIZE) + int depth, int cn, int coi, int binary, const int TILE_SIZE) { F tmp_coi[4]; // get the coi data F4 tmp[64]; @@ -844,22 +839,26 @@ __kernel void CvMoments_D6(__global F* src_data, int src_rows, int src_cols, in int x = wgidx*TILE_SIZE; // real X index of pixel int kcn = (cn==2)?2:4; int rstep = min(src_step/8, TILE_SIZE); - tileSize_height = min(TILE_SIZE, src_rows - y); - tileSize_width = min(TILE_SIZE, src_cols - x); + int tileSize_height = min(TILE_SIZE, src_rows - y); + int tileSize_width = min(TILE_SIZE, src_cols - x); + + if ( y+lidy < src_rows ) + { + if(tileSize_width < TILE_SIZE) + for(int i = tileSize_width; i < rstep && (x+i) < src_cols; i++ ) + *((__global F*)src_data+(y+lidy)*src_step/8+x+i) = 0; + if( coi > 0 ) + for(int i=0; i < tileSize_width; i+=VLEN_D) + { + for(int j=0; j<4 && ((x+i+j)*kcn+coi-1) 0 ) - for(int i=0; i < tileSize_width; i+=VLEN_D) - { - for(int j=0; j<4; j++) - tmp_coi[j] = *(src_data+(y+lidy)*src_step/8+(x+i+j)*kcn+coi-1); - tmp[i/VLEN_D] = (F4)(tmp_coi[0],tmp_coi[1],tmp_coi[2],tmp_coi[3]); - } - else - for(int i=0; i < tileSize_width; i+=VLEN_D) - tmp[i/VLEN_D] = (F4)(*(src_data+(y+lidy)*src_step/8+x+i),*(src_data+(y+lidy)*src_step/8+x+i+1),*(src_data+(y+lidy)*src_step/8+x+i+2),*(src_data+(y+lidy)*src_step/8+x+i+3)); F4 zero = (F4)(0); F4 full = (F4)(255); if( binary ) @@ -867,10 +866,9 @@ __kernel void CvMoments_D6(__global F* src_data, int src_rows, int src_cols, in tmp[i/VLEN_D] = (tmp[i/VLEN_D]!=zero)?full:zero; F mom[10]; __local F m[10][128]; - if(lidy == 0) + if(lidy < 128) for(int i=0; i<10; i++) - for(int j=0; j<128; j++) - m[i][j]=0; + m[i][lidy]=0; barrier(CLK_LOCAL_MEM_FENCE); F lm[10] = {0}; F4 x0 = (F4)(0); @@ -907,7 +905,6 @@ __kernel void CvMoments_D6(__global F* src_data, int src_rows, int src_cols, in m[1][lidy-bheight] = x1.s0; // m10 m[0][lidy-bheight] = x0.s0; // m00 } - else if(lidy < bheight) { lm[9] = ((F)py) * sy; // m03 @@ -922,6 +919,7 @@ __kernel void CvMoments_D6(__global F* src_data, int src_rows, int src_cols, in lm[0] = x0.s0; // m00 } barrier(CLK_LOCAL_MEM_FENCE); + for( int j = TILE_SIZE/2; j >= 1; j = j/2 ) { if(lidy < j) diff --git a/modules/ocl/src/opencl/objdetect_hog.cl b/modules/ocl/src/opencl/objdetect_hog.cl index 509cf13ade..db78eff7b7 100644 --- a/modules/ocl/src/opencl/objdetect_hog.cl +++ b/modules/ocl/src/opencl/objdetect_hog.cl @@ -133,7 +133,9 @@ __kernel void compute_hists_lut_kernel( final_hist[(cell_x * 2 + cell_y) * cnbins + bin_id] = hist_[0] + hist_[1] + hist_[2]; } +#ifdef CPU barrier(CLK_LOCAL_MEM_FENCE); +#endif int tid = (cell_y * CELLS_PER_BLOCK_Y + cell_x) * 12 + cell_thread_x; if ((tid < cblock_hist_size) && (gid < blocks_total)) @@ -225,8 +227,9 @@ __kernel void compute_hists_kernel( final_hist[(cell_x * 2 + cell_y) * cnbins + bin_id] = hist_[0] + hist_[1] + hist_[2]; } +#ifdef CPU barrier(CLK_LOCAL_MEM_FENCE); - +#endif int tid = (cell_y * CELLS_PER_BLOCK_Y + cell_x) * 12 + cell_thread_x; if ((tid < cblock_hist_size) && (gid < blocks_total)) { @@ -318,6 +321,10 @@ float reduce_smem(volatile __local float* smem, int size) if (tid < 32) { if (size >= 64) smem[tid] = sum = sum + smem[tid + 32]; +#if WAVE_SIZE < 32 + } barrier(CLK_LOCAL_MEM_FENCE); + if (tid < 16) { +#endif if (size >= 32) smem[tid] = sum = sum + smem[tid + 16]; if (size >= 16) smem[tid] = sum = sum + smem[tid + 8]; if (size >= 8) smem[tid] = sum = sum + smem[tid + 4]; @@ -418,6 +425,9 @@ __kernel void classify_hists_180_kernel( { smem[tid] = product = product + smem[tid + 32]; } +#if WAVE_SIZE < 32 + barrier(CLK_LOCAL_MEM_FENCE); +#endif if (tid < 16) { smem[tid] = product = product + smem[tid + 16]; @@ -487,6 +497,10 @@ __kernel void classify_hists_252_kernel( if (tid < 32) { smem[tid] = product = product + smem[tid + 32]; +#if WAVE_SIZE < 32 + } barrier(CLK_LOCAL_MEM_FENCE); + if (tid < 16) { +#endif smem[tid] = product = product + smem[tid + 16]; smem[tid] = product = product + smem[tid + 8]; smem[tid] = product = product + smem[tid + 4]; @@ -553,6 +567,10 @@ __kernel void classify_hists_kernel( if (tid < 32) { smem[tid] = product = product + smem[tid + 32]; +#if WAVE_SIZE < 32 + } barrier(CLK_LOCAL_MEM_FENCE); + if (tid < 16) { +#endif smem[tid] = product = product + smem[tid + 16]; smem[tid] = product = product + smem[tid + 8]; smem[tid] = product = product + smem[tid + 4]; diff --git a/modules/ocl/src/opencl/optical_flow_farneback.cl b/modules/ocl/src/opencl/optical_flow_farneback.cl new file mode 100644 index 0000000000..7cc564eded --- /dev/null +++ b/modules/ocl/src/opencl/optical_flow_farneback.cl @@ -0,0 +1,450 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved. +// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// @Authors +// Sen Liu, swjtuls1987@126.com +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other oclMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors as is and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + + +#define tx get_local_id(0) +#define ty get_local_id(1) +#define bx get_group_id(0) +#define bdx get_local_size(0) + +#define BORDER_SIZE 5 +#define MAX_KSIZE_HALF 100 + +#ifndef polyN +#define polyN 5 +#endif + +__kernel void polynomialExpansion(__global float * dst, + __global __const float * src, + __global __const float * c_g, + __global __const float * c_xg, + __global __const float * c_xxg, + __local float * smem, + const float4 ig, + const int height, const int width, + int dstStep, int srcStep) +{ + const int y = get_global_id(1); + const int x = bx * (bdx - 2*polyN) + tx - polyN; + + dstStep /= sizeof(*dst); + srcStep /= sizeof(*src); + + int xWarped; + __local float *row = smem + tx; + + if (y < height && y >= 0) + { + xWarped = min(max(x, 0), width - 1); + + row[0] = src[mad24(y, srcStep, xWarped)] * c_g[0]; + row[bdx] = 0.f; + row[2*bdx] = 0.f; + +#pragma unroll + for (int k = 1; k <= polyN; ++k) + { + float t0 = src[mad24(max(y - k, 0), srcStep, xWarped)]; + float t1 = src[mad24(min(y + k, height - 1), srcStep, xWarped)]; + + row[0] += c_g[k] * (t0 + t1); + row[bdx] += c_xg[k] * (t1 - t0); + row[2*bdx] += c_xxg[k] * (t0 + t1); + } + } + + barrier(CLK_LOCAL_MEM_FENCE); + + if (y < height && y >= 0 && tx >= polyN && tx + polyN < bdx && x < width) + { + float b1 = c_g[0] * row[0]; + float b3 = c_g[0] * row[bdx]; + float b5 = c_g[0] * row[2*bdx]; + float b2 = 0, b4 = 0, b6 = 0; + +#pragma unroll + for (int k = 1; k <= polyN; ++k) + { + b1 += (row[k] + row[-k]) * c_g[k]; + b4 += (row[k] + row[-k]) * c_xxg[k]; + b2 += (row[k] - row[-k]) * c_xg[k]; + b3 += (row[k + bdx] + row[-k + bdx]) * c_g[k]; + b6 += (row[k + bdx] - row[-k + bdx]) * c_xg[k]; + b5 += (row[k + 2*bdx] + row[-k + 2*bdx]) * c_g[k]; + } + + dst[mad24(y, dstStep, xWarped)] = b3*ig.s0; + dst[mad24(height + y, dstStep, xWarped)] = b2*ig.s0; + dst[mad24(2*height + y, dstStep, xWarped)] = b1*ig.s1 + b5*ig.s2; + dst[mad24(3*height + y, dstStep, xWarped)] = b1*ig.s1 + b4*ig.s2; + dst[mad24(4*height + y, dstStep, xWarped)] = b6*ig.s3; + } +} + +inline int idx_row_low(const int y, const int last_row) +{ + return abs(y) % (last_row + 1); +} + +inline int idx_row_high(const int y, const int last_row) +{ + return abs(last_row - abs(last_row - y)) % (last_row + 1); +} + +inline int idx_row(const int y, const int last_row) +{ + return idx_row_low(idx_row_high(y, last_row), last_row); +} + +inline int idx_col_low(const int x, const int last_col) +{ + return abs(x) % (last_col + 1); +} + +inline int idx_col_high(const int x, const int last_col) +{ + return abs(last_col - abs(last_col - x)) % (last_col + 1); +} + +inline int idx_col(const int x, const int last_col) +{ + return idx_col_low(idx_col_high(x, last_col), last_col); +} + +__kernel void gaussianBlur(__global float * dst, + __global const float * src, + __global const float * c_gKer, + __local float * smem, + const int height, const int width, + int dstStep, int srcStep, + const int ksizeHalf) +{ + const int y = get_global_id(1); + const int x = get_global_id(0); + + dstStep /= sizeof(*dst); + srcStep /= sizeof(*src); + + __local float *row = smem + ty * (bdx + 2*ksizeHalf); + + if (y < height) + { + // Vertical pass + for (int i = tx; i < bdx + 2*ksizeHalf; i += bdx) + { + int xExt = (int)(bx * bdx) + i - ksizeHalf; + xExt = idx_col(xExt, width - 1); + row[i] = src[mad24(y, srcStep, xExt)] * c_gKer[0]; + for (int j = 1; j <= ksizeHalf; ++j) + row[i] += (src[mad24(idx_row_low(y - j, height - 1), srcStep, xExt)] + + src[mad24(idx_row_high(y + j, height - 1), srcStep, xExt)]) * c_gKer[j]; + } + } + + barrier(CLK_LOCAL_MEM_FENCE); + + if (y < height && y >= 0 && x < width && x >= 0) + { + // Horizontal pass + row += tx + ksizeHalf; + float res = row[0] * c_gKer[0]; + for (int i = 1; i <= ksizeHalf; ++i) + res += (row[-i] + row[i]) * c_gKer[i]; + + dst[mad24(y, dstStep, x)] = res; + } +} + +__constant float c_border[BORDER_SIZE + 1] = { 0.14f, 0.14f, 0.4472f, 0.4472f, 0.4472f, 1.f }; + +__kernel void updateMatrices(__global float * M, + __global const float * flowx, __global const float * flowy, + __global const float * R0, __global const float * R1, + const int height, const int width, + int mStep, int xStep, int yStep, int R0Step, int R1Step) +{ + const int y = get_global_id(1); + const int x = get_global_id(0); + + mStep /= sizeof(*M); + xStep /= sizeof(*flowx); + yStep /= sizeof(*flowy); + R0Step /= sizeof(*R0); + R1Step /= sizeof(*R1); + + if (y < height && y >= 0 && x < width && x >= 0) + { + float dx = flowx[mad24(y, xStep, x)]; + float dy = flowy[mad24(y, yStep, x)]; + float fx = x + dx; + float fy = y + dy; + + int x1 = convert_int(floor(fx)); + int y1 = convert_int(floor(fy)); + fx -= x1; + fy -= y1; + + float r2, r3, r4, r5, r6; + + if (x1 >= 0 && y1 >= 0 && x1 < width - 1 && y1 < height - 1) + { + float a00 = (1.f - fx) * (1.f - fy); + float a01 = fx * (1.f - fy); + float a10 = (1.f - fx) * fy; + float a11 = fx * fy; + + r2 = a00 * R1[mad24(y1, R1Step, x1)] + + a01 * R1[mad24(y1, R1Step, x1 + 1)] + + a10 * R1[mad24(y1 + 1, R1Step, x1)] + + a11 * R1[mad24(y1 + 1, R1Step, x1 + 1)]; + + r3 = a00 * R1[mad24(height + y1, R1Step, x1)] + + a01 * R1[mad24(height + y1, R1Step, x1 + 1)] + + a10 * R1[mad24(height + y1 + 1, R1Step, x1)] + + a11 * R1[mad24(height + y1 + 1, R1Step, x1 + 1)]; + + r4 = a00 * R1[mad24(2*height + y1, R1Step, x1)] + + a01 * R1[mad24(2*height + y1, R1Step, x1 + 1)] + + a10 * R1[mad24(2*height + y1 + 1, R1Step, x1)] + + a11 * R1[mad24(2*height + y1 + 1, R1Step, x1 + 1)]; + + r5 = a00 * R1[mad24(3*height + y1, R1Step, x1)] + + a01 * R1[mad24(3*height + y1, R1Step, x1 + 1)] + + a10 * R1[mad24(3*height + y1 + 1, R1Step, x1)] + + a11 * R1[mad24(3*height + y1 + 1, R1Step, x1 + 1)]; + + r6 = a00 * R1[mad24(4*height + y1, R1Step, x1)] + + a01 * R1[mad24(4*height + y1, R1Step, x1 + 1)] + + a10 * R1[mad24(4*height + y1 + 1, R1Step, x1)] + + a11 * R1[mad24(4*height + y1 + 1, R1Step, x1 + 1)]; + + r4 = (R0[mad24(2*height + y, R0Step, x)] + r4) * 0.5f; + r5 = (R0[mad24(3*height + y, R0Step, x)] + r5) * 0.5f; + r6 = (R0[mad24(4*height + y, R0Step, x)] + r6) * 0.25f; + } + else + { + r2 = r3 = 0.f; + r4 = R0[mad24(2*height + y, R0Step, x)]; + r5 = R0[mad24(3*height + y, R0Step, x)]; + r6 = R0[mad24(4*height + y, R0Step, x)] * 0.5f; + } + + r2 = (R0[mad24(y, R0Step, x)] - r2) * 0.5f; + r3 = (R0[mad24(height + y, R0Step, x)] - r3) * 0.5f; + + r2 += r4*dy + r6*dx; + r3 += r6*dy + r5*dx; + + float scale = + c_border[min(x, BORDER_SIZE)] * + c_border[min(y, BORDER_SIZE)] * + c_border[min(width - x - 1, BORDER_SIZE)] * + c_border[min(height - y - 1, BORDER_SIZE)]; + + r2 *= scale; + r3 *= scale; + r4 *= scale; + r5 *= scale; + r6 *= scale; + + M[mad24(y, mStep, x)] = r4*r4 + r6*r6; + M[mad24(height + y, mStep, x)] = (r4 + r5)*r6; + M[mad24(2*height + y, mStep, x)] = r5*r5 + r6*r6; + M[mad24(3*height + y, mStep, x)] = r4*r2 + r6*r3; + M[mad24(4*height + y, mStep, x)] = r6*r2 + r5*r3; + } +} + +__kernel void boxFilter5(__global float * dst, + __global const float * src, + __local float * smem, + const int height, const int width, + int dstStep, int srcStep, + const int ksizeHalf) +{ + const int y = get_global_id(1); + const int x = get_global_id(0); + + const float boxAreaInv = 1.f / ((1 + 2*ksizeHalf) * (1 + 2*ksizeHalf)); + const int smw = bdx + 2*ksizeHalf; // shared memory "width" + __local float *row = smem + 5 * ty * smw; + + dstStep /= sizeof(*dst); + srcStep /= sizeof(*src); + + if (y < height) + { + // Vertical pass + for (int i = tx; i < bdx + 2*ksizeHalf; i += bdx) + { + int xExt = (int)(bx * bdx) + i - ksizeHalf; + xExt = min(max(xExt, 0), width - 1); + +#pragma unroll + for (int k = 0; k < 5; ++k) + row[k*smw + i] = src[mad24(k*height + y, srcStep, xExt)]; + + for (int j = 1; j <= ksizeHalf; ++j) +#pragma unroll + for (int k = 0; k < 5; ++k) + row[k*smw + i] += + src[mad24(k*height + max(y - j, 0), srcStep, xExt)] + + src[mad24(k*height + min(y + j, height - 1), srcStep, xExt)]; + } + } + + barrier(CLK_LOCAL_MEM_FENCE); + + if (y < height && y >= 0 && x < width && x >= 0) + { + // Horizontal pass + + row += tx + ksizeHalf; + float res[5]; + +#pragma unroll + for (int k = 0; k < 5; ++k) + res[k] = row[k*smw]; + + for (int i = 1; i <= ksizeHalf; ++i) +#pragma unroll + for (int k = 0; k < 5; ++k) + res[k] += row[k*smw - i] + row[k*smw + i]; + +#pragma unroll + for (int k = 0; k < 5; ++k) + dst[mad24(k*height + y, dstStep, x)] = res[k] * boxAreaInv; + } +} + +__kernel void updateFlow(__global float4 * flowx, __global float4 * flowy, + __global const float4 * M, + const int height, const int width, + int xStep, int yStep, int mStep) +{ + const int y = get_global_id(1); + const int x = get_global_id(0); + + xStep /= sizeof(*flowx); + yStep /= sizeof(*flowy); + mStep /= sizeof(*M); + + if (y < height && y >= 0 && x < width && x >= 0) + { + float4 g11 = M[mad24(y, mStep, x)]; + float4 g12 = M[mad24(height + y, mStep, x)]; + float4 g22 = M[mad24(2*height + y, mStep, x)]; + float4 h1 = M[mad24(3*height + y, mStep, x)]; + float4 h2 = M[mad24(4*height + y, mStep, x)]; + + float4 detInv = (float4)(1.f) / (g11*g22 - g12*g12 + (float4)(1e-3f)); + + flowx[mad24(y, xStep, x)] = (g11*h2 - g12*h1) * detInv; + flowy[mad24(y, yStep, x)] = (g22*h1 - g12*h2) * detInv; + } +} + +__kernel void gaussianBlur5(__global float * dst, + __global const float * src, + __global const float * c_gKer, + __local float * smem, + const int height, const int width, + int dstStep, int srcStep, + const int ksizeHalf) +{ + const int y = get_global_id(1); + const int x = get_global_id(0); + + const int smw = bdx + 2*ksizeHalf; // shared memory "width" + __local volatile float *row = smem + 5 * ty * smw; + + dstStep /= sizeof(*dst); + srcStep /= sizeof(*src); + + if (y < height) + { + // Vertical pass + for (int i = tx; i < bdx + 2*ksizeHalf; i += bdx) + { + int xExt = (int)(bx * bdx) + i - ksizeHalf; + xExt = idx_col(xExt, width - 1); + +#pragma unroll + for (int k = 0; k < 5; ++k) + row[k*smw + i] = src[mad24(k*height + y, srcStep, xExt)] * c_gKer[0]; + + for (int j = 1; j <= ksizeHalf; ++j) +#pragma unroll + for (int k = 0; k < 5; ++k) + row[k*smw + i] += + (src[mad24(k*height + idx_row_low(y - j, height - 1), srcStep, xExt)] + + src[mad24(k*height + idx_row_high(y + j, height - 1), srcStep, xExt)]) * c_gKer[j]; + } + } + + barrier(CLK_LOCAL_MEM_FENCE); + + if (y < height && y >= 0 && x < width && x >= 0) + { + // Horizontal pass + + row += tx + ksizeHalf; + float res[5]; + +#pragma unroll + for (int k = 0; k < 5; ++k) + res[k] = row[k*smw] * c_gKer[0]; + + for (int i = 1; i <= ksizeHalf; ++i) +#pragma unroll + for (int k = 0; k < 5; ++k) + res[k] += (row[k*smw - i] + row[k*smw + i]) * c_gKer[i]; + +#pragma unroll + for (int k = 0; k < 5; ++k) + dst[mad24(k*height + y, dstStep, x)] = res[k]; + } +} diff --git a/modules/ocl/src/opencl/stereobm.cl b/modules/ocl/src/opencl/stereobm.cl index bd86a7f3fb..f1b958812f 100644 --- a/modules/ocl/src/opencl/stereobm.cl +++ b/modules/ocl/src/opencl/stereobm.cl @@ -162,8 +162,8 @@ __kernel void stereoKernel(__global unsigned char *left, __global unsigned char int y_tex; int x_tex = X - radius; - if (x_tex >= cwidth) - return; + //if (x_tex >= cwidth) + // return; for(int d = STEREO_MIND; d < maxdisp; d += STEREO_DISP_STEP) { @@ -258,27 +258,13 @@ float sobel(__global unsigned char *input, int x, int y, int rows, int cols) float CalcSums(__local float *cols, __local float *cols_cache, int winsz) { - float cache = 0; - float cache2 = 0; - int winsz2 = winsz/2; - - int x = get_local_id(0); - int group_size_x = get_local_size(0); + unsigned int cache = cols[0]; - for(int i = 1; i <= winsz2; i++) +#pragma unroll + for(int i = 1; i <= winsz; i++) cache += cols[i]; - cols_cache[0] = cache; - - barrier(CLK_LOCAL_MEM_FENCE); - - if (x < group_size_x - winsz2) - cache2 = cols_cache[winsz2]; - else - for(int i = winsz2 + 1; i < winsz; i++) - cache2 += cols[i]; - - return cols[0] + cache + cache2; + return cache; } #define RpT (2 * ROWSperTHREAD) // got experimentally @@ -301,8 +287,7 @@ __kernel void textureness_kernel(__global unsigned char *disp, int disp_rows, in int beg_row = group_id_y * RpT; int end_row = min(beg_row + RpT, disp_rows); -// if (x < disp_cols) -// { + int y = beg_row; float sum = 0; @@ -340,11 +325,15 @@ __kernel void textureness_kernel(__global unsigned char *disp, int disp_rows, in } barrier(CLK_LOCAL_MEM_FENCE); - float sum_win = CalcSums(cols, cols_cache + local_id_x, winsz) * 255; - if (sum_win < threshold) - disp[y * disp_step + x] = 0; + + if (x < disp_cols) + { + float sum_win = CalcSums(cols, cols_cache + local_id_x, winsz) * 255; + if (sum_win < threshold) + disp[y * disp_step + x] = 0; + } barrier(CLK_LOCAL_MEM_FENCE); } - // } + } diff --git a/modules/ocl/src/optical_flow_farneback.cpp b/modules/ocl/src/optical_flow_farneback.cpp new file mode 100644 index 0000000000..e622446bb8 --- /dev/null +++ b/modules/ocl/src/optical_flow_farneback.cpp @@ -0,0 +1,540 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved. +// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// @Authors +// Sen Liu, swjtuls1987@126.com +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other oclMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + + +#include "precomp.hpp" +#include "opencv2/video/tracking.hpp" + +using namespace std; +using namespace cv; +using namespace cv::ocl; + +#define MIN_SIZE 32 + +namespace cv +{ +namespace ocl +{ +///////////////////////////OpenCL kernel strings/////////////////////////// +extern const char *optical_flow_farneback; +} +} + +namespace cv { +namespace ocl { +namespace optflow_farneback +{ +oclMat g; +oclMat xg; +oclMat xxg; +oclMat gKer; + +float ig[4]; + +inline int divUp(int total, int grain) +{ + return (total + grain - 1) / grain; +} + +inline void setGaussianBlurKernel(const float *c_gKer, int ksizeHalf) +{ + cv::Mat t_gKer(1, ksizeHalf + 1, CV_32FC1, const_cast(c_gKer)); + gKer.upload(t_gKer); +} + +static void gaussianBlurOcl(const oclMat &src, int ksizeHalf, oclMat &dst) +{ + string kernelName("gaussianBlur"); + size_t localThreads[3] = { 256, 1, 1 }; + size_t globalThreads[3] = { divUp(src.cols, localThreads[0]) * localThreads[0], src.rows, 1 }; + int smem_size = (localThreads[0] + 2*ksizeHalf) * sizeof(float); + + CV_Assert(dst.size() == src.size()); + std::vector< std::pair > args; + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&dst.data)); + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&src.data)); + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&gKer.data)); + args.push_back(std::make_pair(smem_size, (void *)NULL)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&dst.rows)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&dst.cols)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&dst.step)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.step)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&ksizeHalf)); + + openCLExecuteKernel(Context::getContext(), &optical_flow_farneback, kernelName, + globalThreads, localThreads, args, -1, -1); +} + +static void polynomialExpansionOcl(const oclMat &src, int polyN, oclMat &dst) +{ + string kernelName("polynomialExpansion"); + size_t localThreads[3] = { 256, 1, 1 }; + size_t globalThreads[3] = { divUp(src.cols, localThreads[0] - 2*polyN) * localThreads[0], src.rows, 1 }; + int smem_size = 3 * localThreads[0] * sizeof(float); + + std::vector< std::pair > args; + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&dst.data)); + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&src.data)); + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&g.data)); + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&xg.data)); + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&xxg.data)); + args.push_back(std::make_pair(smem_size, (void *)NULL)); + args.push_back(std::make_pair(sizeof(cl_float4), (void *)&ig)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.rows)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.cols)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&dst.step)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.step)); + + char opt [128]; + sprintf(opt, "-D polyN=%d", polyN); + + openCLExecuteKernel(Context::getContext(), &optical_flow_farneback, kernelName, + globalThreads, localThreads, args, -1, -1, opt); +} + +static void updateMatricesOcl(const oclMat &flowx, const oclMat &flowy, const oclMat &R0, const oclMat &R1, oclMat &M) +{ + string kernelName("updateMatrices"); + size_t localThreads[3] = { 32, 8, 1 }; + size_t globalThreads[3] = { divUp(flowx.cols, localThreads[0]) * localThreads[0], + divUp(flowx.rows, localThreads[1]) * localThreads[1], + 1 + }; + + std::vector< std::pair > args; + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&M.data)); + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&flowx.data)); + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&flowy.data)); + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&R0.data)); + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&R1.data)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&flowx.rows)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&flowx.cols)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&M.step)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&flowx.step)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&flowy.step)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&R0.step)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&R1.step)); + + openCLExecuteKernel(Context::getContext(), &optical_flow_farneback, kernelName, + globalThreads, localThreads, args, -1, -1); +} + +static void boxFilter5Ocl(const oclMat &src, int ksizeHalf, oclMat &dst) +{ + string kernelName("boxFilter5"); + int height = src.rows / 5; + size_t localThreads[3] = { 256, 1, 1 }; + size_t globalThreads[3] = { divUp(src.cols, localThreads[0]) * localThreads[0], height, 1 }; + int smem_size = (localThreads[0] + 2*ksizeHalf) * 5 * sizeof(float); + + std::vector< std::pair > args; + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&dst.data)); + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&src.data)); + args.push_back(std::make_pair(smem_size, (void *)NULL)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&height)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.cols)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&dst.step)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.step)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&ksizeHalf)); + + openCLExecuteKernel(Context::getContext(), &optical_flow_farneback, kernelName, + globalThreads, localThreads, args, -1, -1); +} + +static void updateFlowOcl(const oclMat &M, oclMat &flowx, oclMat &flowy) +{ + string kernelName("updateFlow"); + int cols = divUp(flowx.cols, 4); + size_t localThreads[3] = { 32, 8, 1 }; + size_t globalThreads[3] = { divUp(cols, localThreads[0]) * localThreads[0], + divUp(flowx.rows, localThreads[1]) * localThreads[0], + 1 + }; + + std::vector< std::pair > args; + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&flowx.data)); + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&flowy.data)); + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&M.data)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&flowx.rows)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&cols)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&flowx.step)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&flowy.step)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&M.step)); + + openCLExecuteKernel(Context::getContext(), &optical_flow_farneback, kernelName, + globalThreads, localThreads, args, -1, -1); +} + +static void gaussianBlur5Ocl(const oclMat &src, int ksizeHalf, oclMat &dst) +{ + string kernelName("gaussianBlur5"); + int height = src.rows / 5; + int width = src.cols; + size_t localThreads[3] = { 256, 1, 1 }; + size_t globalThreads[3] = { divUp(width, localThreads[0]) * localThreads[0], height, 1 }; + int smem_size = (localThreads[0] + 2*ksizeHalf) * 5 * sizeof(float); + + std::vector< std::pair > args; + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&dst.data)); + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&src.data)); + args.push_back(std::make_pair(sizeof(cl_mem), (void *)&gKer.data)); + args.push_back(std::make_pair(smem_size, (void *)NULL)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&height)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&width)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&dst.step)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.step)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&ksizeHalf)); + + openCLExecuteKernel(Context::getContext(), &optical_flow_farneback, kernelName, + globalThreads, localThreads, args, -1, -1); +} +} +} +} // namespace cv { namespace ocl { namespace optflow_farneback + +static oclMat allocMatFromBuf(int rows, int cols, int type, oclMat &mat) +{ + if (!mat.empty() && mat.type() == type && mat.rows >= rows && mat.cols >= cols) + return mat(Rect(0, 0, cols, rows)); + return mat = oclMat(rows, cols, type); +} + +cv::ocl::FarnebackOpticalFlow::FarnebackOpticalFlow() +{ + numLevels = 5; + pyrScale = 0.5; + fastPyramids = false; + winSize = 13; + numIters = 10; + polyN = 5; + polySigma = 1.1; + flags = 0; +} + +void cv::ocl::FarnebackOpticalFlow::releaseMemory() +{ + frames_[0].release(); + frames_[1].release(); + pyrLevel_[0].release(); + pyrLevel_[1].release(); + M_.release(); + bufM_.release(); + R_[0].release(); + R_[1].release(); + blurredFrame_[0].release(); + blurredFrame_[1].release(); + pyramid0_.clear(); + pyramid1_.clear(); +} + +void cv::ocl::FarnebackOpticalFlow::prepareGaussian( + int n, double sigma, float *g, float *xg, float *xxg, + double &ig11, double &ig03, double &ig33, double &ig55) +{ + double s = 0.; + for (int x = -n; x <= n; x++) + { + g[x] = (float)std::exp(-x*x/(2*sigma*sigma)); + s += g[x]; + } + + s = 1./s; + for (int x = -n; x <= n; x++) + { + g[x] = (float)(g[x]*s); + xg[x] = (float)(x*g[x]); + xxg[x] = (float)(x*x*g[x]); + } + + Mat_ G(6, 6); + G.setTo(0); + + for (int y = -n; y <= n; y++) + { + for (int x = -n; x <= n; x++) + { + G(0,0) += g[y]*g[x]; + G(1,1) += g[y]*g[x]*x*x; + G(3,3) += g[y]*g[x]*x*x*x*x; + G(5,5) += g[y]*g[x]*x*x*y*y; + } + } + + //G[0][0] = 1.; + G(2,2) = G(0,3) = G(0,4) = G(3,0) = G(4,0) = G(1,1); + G(4,4) = G(3,3); + G(3,4) = G(4,3) = G(5,5); + + // invG: + // [ x e e ] + // [ y ] + // [ y ] + // [ e z ] + // [ e z ] + // [ u ] + Mat_ invG = G.inv(DECOMP_CHOLESKY); + + ig11 = invG(1,1); + ig03 = invG(0,3); + ig33 = invG(3,3); + ig55 = invG(5,5); +} + +void cv::ocl::FarnebackOpticalFlow::setPolynomialExpansionConsts(int n, double sigma) +{ + vector buf(n*6 + 3); + float* g = &buf[0] + n; + float* xg = g + n*2 + 1; + float* xxg = xg + n*2 + 1; + + if (sigma < FLT_EPSILON) + sigma = n*0.3; + + double ig11, ig03, ig33, ig55; + prepareGaussian(n, sigma, g, xg, xxg, ig11, ig03, ig33, ig55); + + cv::Mat t_g(1, n + 1, CV_32FC1, g); + cv::Mat t_xg(1, n + 1, CV_32FC1, xg); + cv::Mat t_xxg(1, n + 1, CV_32FC1, xxg); + + optflow_farneback::g.upload(t_g); + optflow_farneback::xg.upload(t_xg); + optflow_farneback::xxg.upload(t_xxg); + + optflow_farneback::ig[0] = static_cast(ig11); + optflow_farneback::ig[1] = static_cast(ig03); + optflow_farneback::ig[2] = static_cast(ig33); + optflow_farneback::ig[3] = static_cast(ig55); +} + +void cv::ocl::FarnebackOpticalFlow::updateFlow_boxFilter( + const oclMat& R0, const oclMat& R1, oclMat& flowx, oclMat &flowy, + oclMat& M, oclMat &bufM, int blockSize, bool updateMatrices) +{ + optflow_farneback::boxFilter5Ocl(M, blockSize/2, bufM); + + swap(M, bufM); + + finish(); + + optflow_farneback::updateFlowOcl(M, flowx, flowy); + + if (updateMatrices) + optflow_farneback::updateMatricesOcl(flowx, flowy, R0, R1, M); +} + + +void cv::ocl::FarnebackOpticalFlow::updateFlow_gaussianBlur( + const oclMat& R0, const oclMat& R1, oclMat& flowx, oclMat& flowy, + oclMat& M, oclMat &bufM, int blockSize, bool updateMatrices) +{ + optflow_farneback::gaussianBlur5Ocl(M, blockSize/2, bufM); + + swap(M, bufM); + + optflow_farneback::updateFlowOcl(M, flowx, flowy); + + if (updateMatrices) + optflow_farneback::updateMatricesOcl(flowx, flowy, R0, R1, M); +} + + +void cv::ocl::FarnebackOpticalFlow::operator ()( + const oclMat &frame0, const oclMat &frame1, oclMat &flowx, oclMat &flowy) +{ + CV_Assert(frame0.channels() == 1 && frame1.channels() == 1); + CV_Assert(frame0.size() == frame1.size()); + CV_Assert(polyN == 5 || polyN == 7); + CV_Assert(!fastPyramids || std::abs(pyrScale - 0.5) < 1e-6); + + Size size = frame0.size(); + oclMat prevFlowX, prevFlowY, curFlowX, curFlowY; + + flowx.create(size, CV_32F); + flowy.create(size, CV_32F); + oclMat flowx0 = flowx; + oclMat flowy0 = flowy; + + // Crop unnecessary levels + double scale = 1; + int numLevelsCropped = 0; + for (; numLevelsCropped < numLevels; numLevelsCropped++) + { + scale *= pyrScale; + if (size.width*scale < MIN_SIZE || size.height*scale < MIN_SIZE) + break; + } + + frame0.convertTo(frames_[0], CV_32F); + frame1.convertTo(frames_[1], CV_32F); + + if (fastPyramids) + { + // Build Gaussian pyramids using pyrDown() + pyramid0_.resize(numLevelsCropped + 1); + pyramid1_.resize(numLevelsCropped + 1); + pyramid0_[0] = frames_[0]; + pyramid1_[0] = frames_[1]; + for (int i = 1; i <= numLevelsCropped; ++i) + { + pyrDown(pyramid0_[i - 1], pyramid0_[i]); + pyrDown(pyramid1_[i - 1], pyramid1_[i]); + } + } + + setPolynomialExpansionConsts(polyN, polySigma); + + for (int k = numLevelsCropped; k >= 0; k--) + { + scale = 1; + for (int i = 0; i < k; i++) + scale *= pyrScale; + + double sigma = (1./scale - 1) * 0.5; + int smoothSize = cvRound(sigma*5) | 1; + smoothSize = std::max(smoothSize, 3); + + int width = cvRound(size.width*scale); + int height = cvRound(size.height*scale); + + if (fastPyramids) + { + width = pyramid0_[k].cols; + height = pyramid0_[k].rows; + } + + if (k > 0) + { + curFlowX.create(height, width, CV_32F); + curFlowY.create(height, width, CV_32F); + } + else + { + curFlowX = flowx0; + curFlowY = flowy0; + } + + if (!prevFlowX.data) + { + if (flags & cv::OPTFLOW_USE_INITIAL_FLOW) + { + resize(flowx0, curFlowX, Size(width, height), 0, 0, INTER_LINEAR); + resize(flowy0, curFlowY, Size(width, height), 0, 0, INTER_LINEAR); + multiply(scale, curFlowX, curFlowX); + multiply(scale, curFlowY, curFlowY); + } + else + { + curFlowX.setTo(0); + curFlowY.setTo(0); + } + } + else + { + resize(prevFlowX, curFlowX, Size(width, height), 0, 0, INTER_LINEAR); + resize(prevFlowY, curFlowY, Size(width, height), 0, 0, INTER_LINEAR); + multiply(1./pyrScale, curFlowX, curFlowX); + multiply(1./pyrScale, curFlowY, curFlowY); + } + + oclMat M = allocMatFromBuf(5*height, width, CV_32F, M_); + oclMat bufM = allocMatFromBuf(5*height, width, CV_32F, bufM_); + oclMat R[2] = + { + allocMatFromBuf(5*height, width, CV_32F, R_[0]), + allocMatFromBuf(5*height, width, CV_32F, R_[1]) + }; + + if (fastPyramids) + { + optflow_farneback::polynomialExpansionOcl(pyramid0_[k], polyN, R[0]); + optflow_farneback::polynomialExpansionOcl(pyramid1_[k], polyN, R[1]); + } + else + { + oclMat blurredFrame[2] = + { + allocMatFromBuf(size.height, size.width, CV_32F, blurredFrame_[0]), + allocMatFromBuf(size.height, size.width, CV_32F, blurredFrame_[1]) + }; + oclMat pyrLevel[2] = + { + allocMatFromBuf(height, width, CV_32F, pyrLevel_[0]), + allocMatFromBuf(height, width, CV_32F, pyrLevel_[1]) + }; + + Mat g = getGaussianKernel(smoothSize, sigma, CV_32F); + optflow_farneback::setGaussianBlurKernel(g.ptr(smoothSize/2), smoothSize/2); + + for (int i = 0; i < 2; i++) + { + optflow_farneback::gaussianBlurOcl(frames_[i], smoothSize/2, blurredFrame[i]); + resize(blurredFrame[i], pyrLevel[i], Size(width, height), INTER_LINEAR); + optflow_farneback::polynomialExpansionOcl(pyrLevel[i], polyN, R[i]); + } + } + + optflow_farneback::updateMatricesOcl(curFlowX, curFlowY, R[0], R[1], M); + + if (flags & OPTFLOW_FARNEBACK_GAUSSIAN) + { + Mat g = getGaussianKernel(winSize, winSize/2*0.3f, CV_32F); + optflow_farneback::setGaussianBlurKernel(g.ptr(winSize/2), winSize/2); + } + for (int i = 0; i < numIters; i++) + { + if (flags & OPTFLOW_FARNEBACK_GAUSSIAN) + updateFlow_gaussianBlur(R[0], R[1], curFlowX, curFlowY, M, bufM, winSize, i < numIters-1); + else + updateFlow_boxFilter(R[0], R[1], curFlowX, curFlowY, M, bufM, winSize, i < numIters-1); + } + + prevFlowX = curFlowX; + prevFlowY = curFlowY; + } + + flowx = curFlowX; + flowy = curFlowY; +} diff --git a/modules/ocl/test/main.cpp b/modules/ocl/test/main.cpp index 3b16cffdd1..5f7b2bae61 100644 --- a/modules/ocl/test/main.cpp +++ b/modules/ocl/test/main.cpp @@ -117,6 +117,8 @@ int main(int argc, char **argv) } setDevice(oclinfo[pid], device); + setBinaryDiskCache(CACHE_UPDATE); + cout << "Platform name:" << oclinfo[pid].PlatformName << endl; cout << "Device type:" << type << endl << "Device name:" << oclinfo[pid].DeviceName[device] << endl; return RUN_ALL_TESTS(); diff --git a/modules/ocl/test/test_imgproc.cpp b/modules/ocl/test/test_imgproc.cpp index 2fb29318fa..2734871d17 100644 --- a/modules/ocl/test/test_imgproc.cpp +++ b/modules/ocl/test/test_imgproc.cpp @@ -1425,7 +1425,7 @@ PARAM_TEST_CASE(CLAHE, cv::Size, ClipLimit) TEST_P(CLAHE, Accuracy) { - cv::Ptr clahe = cv::ocl::createCLAHE(clipLimit); + cv::Ptr clahe = cv::ocl::createCLAHE(clipLimit); clahe->apply(g_src, g_dst); cv::Mat dst(g_dst); diff --git a/modules/ocl/test/test_moments.cpp b/modules/ocl/test/test_moments.cpp index 23a9a8eb2b..16237a05f5 100644 --- a/modules/ocl/test/test_moments.cpp +++ b/modules/ocl/test/test_moments.cpp @@ -44,12 +44,12 @@ TEST_P(MomentsTest, Mat) { if(test_contours) { - Mat src = imread( workdir + "../cpp/pic3.png", 1 ); - Mat src_gray, canny_output; - cvtColor( src, src_gray, COLOR_BGR2GRAY ); + Mat src = imread( workdir + "../cpp/pic3.png", IMREAD_GRAYSCALE ); + ASSERT_FALSE(src.empty()); + Mat canny_output; vector > contours; vector hierarchy; - Canny( src_gray, canny_output, 100, 200, 3 ); + Canny( src, canny_output, 100, 200, 3 ); findContours( canny_output, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0) ); for( size_t i = 0; i < contours.size(); i++ ) { diff --git a/modules/ocl/test/test_objdetect.cpp b/modules/ocl/test/test_objdetect.cpp index ad35270c92..91ec165d6b 100644 --- a/modules/ocl/test/test_objdetect.cpp +++ b/modules/ocl/test/test_objdetect.cpp @@ -149,17 +149,17 @@ TEST_P(HOG, Detect) if (winSize.width == 48 && winSize.height == 96) { // daimler's base - ocl_hog.setSVMDetector(ocl_hog.getPeopleDetector48x96()); + ocl_hog.setSVMDetector(hog.getDaimlerPeopleDetector()); hog.setSVMDetector(hog.getDaimlerPeopleDetector()); } else if (winSize.width == 64 && winSize.height == 128) { - ocl_hog.setSVMDetector(ocl_hog.getPeopleDetector64x128()); + ocl_hog.setSVMDetector(hog.getDefaultPeopleDetector()); hog.setSVMDetector(hog.getDefaultPeopleDetector()); } else { - ocl_hog.setSVMDetector(ocl_hog.getDefaultPeopleDetector()); + ocl_hog.setSVMDetector(hog.getDefaultPeopleDetector()); hog.setSVMDetector(hog.getDefaultPeopleDetector()); } diff --git a/modules/ocl/test/test_optflow.cpp b/modules/ocl/test/test_optflow.cpp index 0121be8f9e..34adb352c2 100644 --- a/modules/ocl/test/test_optflow.cpp +++ b/modules/ocl/test/test_optflow.cpp @@ -272,6 +272,78 @@ TEST_P(Sparse, Mat) INSTANTIATE_TEST_CASE_P(OCL_Video, Sparse, Combine( Values(false, true), Values(false, true))); +////////////////////////////////////////////////////// +// FarnebackOpticalFlow + +namespace +{ + IMPLEMENT_PARAM_CLASS(PyrScale, double) + IMPLEMENT_PARAM_CLASS(PolyN, int) + CV_FLAGS(FarnebackOptFlowFlags, 0, OPTFLOW_FARNEBACK_GAUSSIAN) + IMPLEMENT_PARAM_CLASS(UseInitFlow, bool) +} + +PARAM_TEST_CASE(Farneback, PyrScale, PolyN, FarnebackOptFlowFlags, UseInitFlow) +{ + double pyrScale; + int polyN; + int flags; + bool useInitFlow; + + virtual void SetUp() + { + pyrScale = GET_PARAM(0); + polyN = GET_PARAM(1); + flags = GET_PARAM(2); + useInitFlow = GET_PARAM(3); + } +}; + +TEST_P(Farneback, Accuracy) +{ + cv::Mat frame0 = imread(workdir + "/rubberwhale1.png", cv::IMREAD_GRAYSCALE); + ASSERT_FALSE(frame0.empty()); + + cv::Mat frame1 = imread(workdir + "/rubberwhale2.png", cv::IMREAD_GRAYSCALE); + ASSERT_FALSE(frame1.empty()); + + double polySigma = polyN <= 5 ? 1.1 : 1.5; + + cv::ocl::FarnebackOpticalFlow farn; + farn.pyrScale = pyrScale; + farn.polyN = polyN; + farn.polySigma = polySigma; + farn.flags = flags; + + cv::ocl::oclMat d_flowx, d_flowy; + farn(oclMat(frame0), oclMat(frame1), d_flowx, d_flowy); + + cv::Mat flow; + if (useInitFlow) + { + cv::Mat flowxy[] = {cv::Mat(d_flowx), cv::Mat(d_flowy)}; + cv::merge(flowxy, 2, flow); + + farn.flags |= cv::OPTFLOW_USE_INITIAL_FLOW; + farn(oclMat(frame0), oclMat(frame1), d_flowx, d_flowy); + } + + cv::calcOpticalFlowFarneback( + frame0, frame1, flow, farn.pyrScale, farn.numLevels, farn.winSize, + farn.numIters, farn.polyN, farn.polySigma, farn.flags); + + std::vector flowxy; + cv::split(flow, flowxy); + + EXPECT_MAT_SIMILAR(flowxy[0], d_flowx, 0.1); + EXPECT_MAT_SIMILAR(flowxy[1], d_flowy, 0.1); +} + +INSTANTIATE_TEST_CASE_P(OCL_Video, Farneback, testing::Combine( + testing::Values(PyrScale(0.3), PyrScale(0.5), PyrScale(0.8)), + testing::Values(PolyN(5), PolyN(7)), + testing::Values(FarnebackOptFlowFlags(0), FarnebackOptFlowFlags(cv::OPTFLOW_FARNEBACK_GAUSSIAN)), + testing::Values(UseInitFlow(false), UseInitFlow(true)))); #endif // HAVE_OPENCL diff --git a/modules/superres/perf/perf_main.cpp b/modules/superres/perf/perf_main.cpp index adc69e6e8b..0a8ab5deaa 100644 --- a/modules/superres/perf/perf_main.cpp +++ b/modules/superres/perf/perf_main.cpp @@ -44,4 +44,11 @@ using namespace perf; -CV_PERF_TEST_MAIN(superres, printCudaInfo()) +static const char * impls[] = { +#ifdef HAVE_CUDA + "cuda", +#endif + "plain" +}; + +CV_PERF_TEST_MAIN_WITH_IMPLS(superres, impls, printCudaInfo()) diff --git a/modules/ts/include/opencv2/ts/ts_gtest.h b/modules/ts/include/opencv2/ts/ts_gtest.h index 3ccf48584a..60f30b8f20 100644 --- a/modules/ts/include/opencv2/ts/ts_gtest.h +++ b/modules/ts/include/opencv2/ts/ts_gtest.h @@ -17566,6 +17566,9 @@ GTEST_DECLARE_string_(color); // the tests to run. If the filter is not given all tests are executed. GTEST_DECLARE_string_(filter); +// OpenCV extension: same as filter, but for the parameters string. +GTEST_DECLARE_string_(param_filter); + // This flag causes the Google Test to list tests. None of the tests listed // are actually run if the flag is provided. GTEST_DECLARE_bool_(list_tests); diff --git a/modules/ts/include/opencv2/ts/ts_perf.hpp b/modules/ts/include/opencv2/ts/ts_perf.hpp index 2faba1263b..29e440d8db 100644 --- a/modules/ts/include/opencv2/ts/ts_perf.hpp +++ b/modules/ts/include/opencv2/ts/ts_perf.hpp @@ -218,8 +218,7 @@ public: static bool targetDevice(); }; -# define PERF_RUN_GPU() ::perf::GpuPerf::targetDevice() - +#define PERF_RUN_GPU() ::perf::GpuPerf::targetDevice() /*****************************************************************************************\ * Container for performance metrics * @@ -261,7 +260,11 @@ public: TestBase(); static void Init(int argc, const char* const argv[]); + static void Init(const std::vector & availableImpls, + int argc, const char* const argv[]); + static void RecordRunParameters(); static std::string getDataPath(const std::string& relativePath); + static std::string getSelectedImpl(); protected: virtual void PerfTestBody() = 0; @@ -475,15 +478,29 @@ CV_EXPORTS void PrintTo(const Size& sz, ::std::ostream* os); void fixture##_##name::PerfTestBody() -#define CV_PERF_TEST_MAIN(testsuitname, ...) \ -int main(int argc, char **argv)\ -{\ +#define CV_PERF_TEST_MAIN_INTERNALS(modulename, impls, ...) \ while (++argc >= (--argc,-1)) {__VA_ARGS__; break;} /*this ugly construction is needed for VS 2005*/\ - ::perf::Regression::Init(#testsuitname);\ - ::perf::TestBase::Init(argc, argv);\ + ::perf::Regression::Init(#modulename);\ + ::perf::TestBase::Init(std::vector(impls, impls + sizeof impls / sizeof *impls),\ + argc, argv);\ ::testing::InitGoogleTest(&argc, argv);\ cvtest::printVersionInfo();\ - return RUN_ALL_TESTS();\ + ::testing::Test::RecordProperty("cv_module_name", #modulename);\ + ::perf::TestBase::RecordRunParameters();\ + return RUN_ALL_TESTS(); + +// impls must be an array, not a pointer; "plain" should always be one of the implementations +#define CV_PERF_TEST_MAIN_WITH_IMPLS(modulename, impls, ...) \ +int main(int argc, char **argv)\ +{\ + CV_PERF_TEST_MAIN_INTERNALS(modulename, impls, __VA_ARGS__)\ +} + +#define CV_PERF_TEST_MAIN(modulename, ...) \ +int main(int argc, char **argv)\ +{\ + const char * plain_only[] = { "plain" };\ + CV_PERF_TEST_MAIN_INTERNALS(modulename, plain_only, __VA_ARGS__)\ } #define TEST_CYCLE_N(n) for(declare.iterations(n); startTimer(), next(); stopTimer()) diff --git a/modules/ts/misc/testlog_parser.py b/modules/ts/misc/testlog_parser.py index 8ab21417ca..5d478645b2 100755 --- a/modules/ts/misc/testlog_parser.py +++ b/modules/ts/misc/testlog_parser.py @@ -1,6 +1,9 @@ #!/usr/bin/env python -import sys, re, os.path +import collections +import re +import os.path +import sys from xml.dom.minidom import parse class TestInfo(object): @@ -159,12 +162,31 @@ class TestInfo(object): return 1 return 0 +# This is a Sequence for compatibility with old scripts, +# which treat parseLogFile's return value as a list. +class TestRunInfo(collections.Sequence): + def __init__(self, properties, tests): + self.properties = properties + self.tests = tests + + def __len__(self): + return len(self.tests) + + def __getitem__(self, key): + return self.tests[key] + def parseLogFile(filename): - tests = [] log = parse(filename) - for case in log.getElementsByTagName("testcase"): - tests.append(TestInfo(case)) - return tests + + properties = { + attr_name[3:]: attr_value + for (attr_name, attr_value) in log.documentElement.attributes.items() + if attr_name.startswith('cv_') + } + + tests = map(TestInfo, log.getElementsByTagName("testcase")) + + return TestRunInfo(properties, tests) if __name__ == "__main__": @@ -173,8 +195,18 @@ if __name__ == "__main__": exit(0) for arg in sys.argv[1:]: - print "Tests found in", arg - tests = parseLogFile(arg) - for t in sorted(tests): + print "Processing {}...".format(arg) + + run = parseLogFile(arg) + + print "Properties:" + + for (prop_name, prop_value) in run.properties.items(): + print "\t{} = {}".format(prop_name, prop_value) + + print "Tests:" + + for t in sorted(run.tests): t.dump() + print diff --git a/modules/ts/misc/xls-report.py b/modules/ts/misc/xls-report.py index e79bb123dd..e911314e92 100755 --- a/modules/ts/misc/xls-report.py +++ b/modules/ts/misc/xls-report.py @@ -1,8 +1,73 @@ #!/usr/bin/env python +""" + This script can generate XLS reports from OpenCV tests' XML output files. + + To use it, first, create a directory for each machine you ran tests on. + Each such directory will become a sheet in the report. Put each XML file + into the corresponding directory. + + Then, create your configuration file(s). You can have a global configuration + file (specified with the -c option), and per-sheet configuration files, which + must be called sheet.conf and placed in the directory corresponding to the sheet. + The settings in the per-sheet configuration file will override those in the + global configuration file, if both are present. + + A configuration file must consist of a Python dictionary. The following keys + will be recognized: + + * 'comparisons': [{'from': string, 'to': string}] + List of configurations to compare performance between. For each item, + the sheet will have a column showing speedup from configuration named + 'from' to configuration named "to". + + * 'configuration_matchers': [{'properties': {string: object}, 'name': string}] + Instructions for matching test run property sets to configuration names. + + For each found XML file: + + 1) All attributes of the root element starting with the prefix 'cv_' are + placed in a dictionary, with the cv_ prefix stripped and the cv_module_name + element deleted. + + 2) The first matcher for which the XML's file property set contains the same + keys with equal values as its 'properties' dictionary is searched for. + A missing property can be matched by using None as the value. + + Corollary 1: you should place more specific matchers before less specific + ones. + + Corollary 2: an empty 'properties' dictionary matches every property set. + + 3) If a matching matcher is found, its 'name' string is presumed to be the name + of the configuration the XML file corresponds to. Otherwise, a warning is + printed. A warning is also printed if two different property sets match to the + same configuration name. + + * 'configurations': [string] + List of names for compile-time and runtime configurations of OpenCV. + Each item will correspond to a column of the sheet. + + * 'module_colors': {string: string} + Mapping from module name to color name. In the sheet, cells containing module + names from this mapping will be colored with the corresponding color. You can + find the list of available colors here: + . + + * 'sheet_name': string + Name for the sheet. If this parameter is missing, the name of sheet's directory + will be used. + + Note that all keys are optional, although to get useful results, you'll want to + specify at least 'configurations' and 'configuration_matchers'. + + Finally, run the script. Use the --help option for usage information. +""" + from __future__ import division import ast +import fnmatch import logging import numbers import os, os.path @@ -17,21 +82,6 @@ import xlwt from testlog_parser import parseLogFile -# To build XLS report you neet to put your xmls (OpenCV tests output) in the -# following way: -# -# "root" --- folder, representing the whole XLS document. It contains several -# subfolders --- sheet-paths of the XLS document. Each sheet-path contains it's -# subfolders --- config-paths. Config-paths are columns of the sheet and -# they contains xmls files --- output of OpenCV modules testing. -# Config-path means OpenCV build configuration, including different -# options such as NEON, TBB, GPU enabling/disabling. -# -# root -# root\sheet_path -# root\sheet_path\configuration1 (column 1) -# root\sheet_path\configuration2 (column 2) - re_image_size = re.compile(r'^ \d+ x \d+$', re.VERBOSE) re_data_type = re.compile(r'^ (?: 8 | 16 | 32 | 64 ) [USF] C [1234] $', re.VERBOSE) @@ -45,15 +95,64 @@ no_speedup_style = no_time_style error_speedup_style = xlwt.easyxf('pattern: pattern solid, fore_color orange') header_style = xlwt.easyxf('font: bold true; alignment: horizontal centre, vertical top, wrap True') -def collect_xml(collection, configuration, xml_fullname): - xml_fname = os.path.split(xml_fullname)[1] - module = xml_fname[:xml_fname.index('_')] +class Collector(object): + def __init__(self, config_match_func): + self.__config_cache = {} + self.config_match_func = config_match_func + self.tests = {} + + # Format a sorted sequence of pairs as if it was a dictionary. + # We can't just use a dictionary instead, since we want to preserve the sorted order of the keys. + @staticmethod + def __format_config_cache_key(pairs): + return '{' + ', '.join(repr(k) + ': ' + repr(v) for (k, v) in pairs) + '}' + + def collect_from(self, xml_path): + run = parseLogFile(xml_path) + + module = run.properties['module_name'] + + properties = run.properties.copy() + del properties['module_name'] + + props_key = tuple(sorted(properties.iteritems())) # dicts can't be keys + + if props_key in self.__config_cache: + configuration = self.__config_cache[props_key] + else: + configuration = self.config_match_func(properties) + + if configuration is None: + logging.warning('failed to match properties to a configuration: %s', + Collector.__format_config_cache_key(props_key)) + else: + same_config_props = [it[0] for it in self.__config_cache.iteritems() if it[1] == configuration] + if len(same_config_props) > 0: + logging.warning('property set %s matches the same configuration %r as property set %s', + Collector.__format_config_cache_key(props_key), + configuration, + Collector.__format_config_cache_key(same_config_props[0])) + + self.__config_cache[props_key] = configuration + + if configuration is None: return - module_tests = collection.setdefault(module, OrderedDict()) + module_tests = self.tests.setdefault(module, OrderedDict()) - for test in sorted(parseLogFile(xml_fullname)): - test_results = module_tests.setdefault((test.shortName(), test.param()), {}) - test_results[configuration] = test.get("gmean") if test.status == 'run' else test.status + for test in run.tests: + test_results = module_tests.setdefault((test.shortName(), test.param()), {}) + test_results[configuration] = test.get("gmean") if test.status == 'run' else test.status + +def make_match_func(matchers): + def match_func(properties): + for matcher in matchers: + if all(properties.get(name) == value + for (name, value) in matcher['properties'].iteritems()): + return matcher['name'] + + return None + + return match_func def main(): arg_parser = ArgumentParser(description='Build an XLS performance report.') @@ -83,23 +182,15 @@ def main(): sheet_conf = dict(global_conf.items() + sheet_conf.items()) - if 'configurations' in sheet_conf: - config_names = sheet_conf['configurations'] - else: - try: - config_names = [p for p in os.listdir(sheet_path) - if os.path.isdir(os.path.join(sheet_path, p))] - except Exception as e: - logging.warning('error while determining configuration names for %s: %s', sheet_path, e) - continue + config_names = sheet_conf.get('configurations', []) + config_matchers = sheet_conf.get('configuration_matchers', []) - collection = {} + collector = Collector(make_match_func(config_matchers)) - for configuration, configuration_path in \ - [(c, os.path.join(sheet_path, c)) for c in config_names]: - logging.info('processing %s', configuration_path) - for xml_fullname in glob(os.path.join(configuration_path, '*.xml')): - collect_xml(collection, configuration, xml_fullname) + for root, _, filenames in os.walk(sheet_path): + logging.info('looking in %s', root) + for filename in fnmatch.filter(filenames, '*.xml'): + collector.collect_from(os.path.join(root, filename)) sheet = wb.add_sheet(sheet_conf.get('sheet_name', os.path.basename(os.path.abspath(sheet_path)))) @@ -126,7 +217,7 @@ def main(): module_styles = {module: xlwt.easyxf('pattern: pattern solid, fore_color {}'.format(color)) for module, color in module_colors.iteritems()} - for module, tests in sorted(collection.iteritems()): + for module, tests in sorted(collector.tests.iteritems()): for ((test, param), configs) in tests.iteritems(): sheet.write(row, 0, module, module_styles.get(module, xlwt.Style.default_style)) sheet.write(row, 1, test) diff --git a/modules/ts/src/precomp.hpp b/modules/ts/src/precomp.hpp index 3fef1804a7..d719472d02 100644 --- a/modules/ts/src/precomp.hpp +++ b/modules/ts/src/precomp.hpp @@ -1,6 +1,6 @@ -#include "opencv2/ts.hpp" #include "opencv2/core/utility.hpp" #include "opencv2/core/private.hpp" +#include "opencv2/ts.hpp" #ifdef GTEST_LINKED_AS_SHARED_LIBRARY #error ts module should not have GTEST_LINKED_AS_SHARED_LIBRARY defined diff --git a/modules/ts/src/ts_func.cpp b/modules/ts/src/ts_func.cpp index 3b1c7cac29..35482f3124 100644 --- a/modules/ts/src/ts_func.cpp +++ b/modules/ts/src/ts_func.cpp @@ -2968,13 +2968,12 @@ void printVersionInfo(bool useStdOut) if(useStdOut) std::cout << "Inner VCS version: " << ver << std::endl; } -#ifdef CV_PARALLEL_FRAMEWORK - ::testing::Test::RecordProperty("cv_parallel_framework", CV_PARALLEL_FRAMEWORK); - if (useStdOut) - { - std::cout << "Parallel framework: " << CV_PARALLEL_FRAMEWORK << std::endl; + const char* parallel_framework = currentParallelFramework(); + + if (parallel_framework) { + ::testing::Test::RecordProperty("cv_parallel_framework", parallel_framework); + if (useStdOut) std::cout << "Parallel framework: " << parallel_framework << std::endl; } -#endif std::string cpu_features; diff --git a/modules/ts/src/ts_gtest.cpp b/modules/ts/src/ts_gtest.cpp index 7c388cbd4a..48870913c3 100644 --- a/modules/ts/src/ts_gtest.cpp +++ b/modules/ts/src/ts_gtest.cpp @@ -497,6 +497,7 @@ const char kBreakOnFailureFlag[] = "break_on_failure"; const char kCatchExceptionsFlag[] = "catch_exceptions"; const char kColorFlag[] = "color"; const char kFilterFlag[] = "filter"; +const char kParamFilterFlag[] = "param_filter"; const char kListTestsFlag[] = "list_tests"; const char kOutputFlag[] = "output"; const char kPrintTimeFlag[] = "print_time"; @@ -575,6 +576,7 @@ class GTestFlagSaver { death_test_style_ = GTEST_FLAG(death_test_style); death_test_use_fork_ = GTEST_FLAG(death_test_use_fork); filter_ = GTEST_FLAG(filter); + param_filter_ = GTEST_FLAG(param_filter); internal_run_death_test_ = GTEST_FLAG(internal_run_death_test); list_tests_ = GTEST_FLAG(list_tests); output_ = GTEST_FLAG(output); @@ -596,6 +598,7 @@ class GTestFlagSaver { GTEST_FLAG(death_test_style) = death_test_style_; GTEST_FLAG(death_test_use_fork) = death_test_use_fork_; GTEST_FLAG(filter) = filter_; + GTEST_FLAG(param_filter) = param_filter_; GTEST_FLAG(internal_run_death_test) = internal_run_death_test_; GTEST_FLAG(list_tests) = list_tests_; GTEST_FLAG(output) = output_; @@ -617,6 +620,7 @@ class GTestFlagSaver { std::string death_test_style_; bool death_test_use_fork_; std::string filter_; + std::string param_filter_; std::string internal_run_death_test_; bool list_tests_; std::string output_; @@ -1699,6 +1703,12 @@ GTEST_DEFINE_string_( "exclude). A test is run if it matches one of the positive " "patterns and does not match any of the negative patterns."); +GTEST_DEFINE_string_( + param_filter, + internal::StringFromGTestEnv("param_filter", kUniversalFilter), + "Same syntax and semantics as for param, but these patterns " + "have to match the test's parameters."); + GTEST_DEFINE_bool_(list_tests, false, "List all tests without running them."); @@ -4188,6 +4198,14 @@ void PrettyUnitTestResultPrinter::OnTestIterationStart( "Note: %s filter = %s\n", GTEST_NAME_, filter); } + const char* const param_filter = GTEST_FLAG(param_filter).c_str(); + + // Ditto. + if (!String::CStringEquals(param_filter, kUniversalFilter)) { + ColoredPrintf(COLOR_YELLOW, + "Note: %s parameter filter = %s\n", GTEST_NAME_, param_filter); + } + if (internal::ShouldShard(kTestTotalShards, kTestShardIndex, false)) { const Int32 shard_index = Int32FromEnvOrDie(kTestShardIndex, -1); ColoredPrintf(COLOR_YELLOW, @@ -5873,9 +5891,15 @@ int UnitTestImpl::FilterTests(ReactionToSharding shard_tests) { kDisableTestFilter); test_info->is_disabled_ = is_disabled; + const std::string value_param(test_info->value_param() == NULL ? + "" : test_info->value_param()); + const bool matches_filter = internal::UnitTestOptions::FilterMatchesTest(test_case_name, - test_name); + test_name) && + internal::UnitTestOptions::MatchesFilter(value_param, + GTEST_FLAG(param_filter).c_str()); + test_info->matches_filter_ = matches_filter; const bool is_runnable = @@ -6223,6 +6247,12 @@ static const char kColorEncodedHelpMessage[] = " Run only the tests whose name matches one of the positive patterns but\n" " none of the negative patterns. '?' matches any single character; '*'\n" " matches any substring; ':' separates two patterns.\n" +" @G--" GTEST_FLAG_PREFIX_ "param_filter=@YPOSITIVE_PATTERNS" + "[@G-@YNEGATIVE_PATTERNS]@D\n" +" Like @G--" GTEST_FLAG_PREFIX_ + "filter@D, but applies to the test's parameter. If a\n" +" test is not parameterized, its parameter is considered to be the\n" +" empty string.\n" " @G--" GTEST_FLAG_PREFIX_ "also_run_disabled_tests@D\n" " Run all disabled tests too.\n" "\n" @@ -6300,6 +6330,7 @@ void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) { ParseBoolFlag(arg, kDeathTestUseFork, >EST_FLAG(death_test_use_fork)) || ParseStringFlag(arg, kFilterFlag, >EST_FLAG(filter)) || + ParseStringFlag(arg, kParamFilterFlag, >EST_FLAG(param_filter)) || ParseStringFlag(arg, kInternalRunDeathTestFlag, >EST_FLAG(internal_run_death_test)) || ParseBoolFlag(arg, kListTestsFlag, >EST_FLAG(list_tests)) || diff --git a/modules/ts/src/ts_perf.cpp b/modules/ts/src/ts_perf.cpp index cd5a239d57..aafdf30c30 100644 --- a/modules/ts/src/ts_perf.cpp +++ b/modules/ts/src/ts_perf.cpp @@ -14,30 +14,10 @@ int64 TestBase::timeLimitDefault = 0; unsigned int TestBase::iterationsLimitDefault = (unsigned int)(-1); int64 TestBase::_timeadjustment = 0; -const std::string command_line_keys = - "{ perf_max_outliers |8 |percent of allowed outliers}" - "{ perf_min_samples |10 |minimal required numer of samples}" - "{ perf_force_samples |100 |force set maximum number of samples for all tests}" - "{ perf_seed |809564 |seed for random numbers generator}" - "{ perf_threads |-1 |the number of worker threads, if parallel execution is enabled}" - "{ perf_write_sanity | |create new records for sanity checks}" - "{ perf_verify_sanity | |fail tests having no regression data for sanity checks}" -#ifdef ANDROID - "{ perf_time_limit |6.0 |default time limit for a single test (in seconds)}" - "{ perf_affinity_mask |0 |set affinity mask for the main thread}" - "{ perf_log_power_checkpoints | |additional xml logging for power measurement}" -#else - "{ perf_time_limit |3.0 |default time limit for a single test (in seconds)}" -#endif - "{ perf_max_deviation |1.0 |}" - "{ help h | |print help info}" -#ifdef HAVE_CUDA - "{ perf_run_cpu |false |run GPU performance tests for analogical CPU functions}" - "{ perf_cuda_device |0 |run GPU test suite onto specific CUDA capable device}" - "{ perf_cuda_info_only |false |print an information about system and an available CUDA devices and then exit.}" -#endif -; +// Item [0] will be considered the default implementation. +static std::vector available_impls; +static std::string param_impl; static double param_max_outliers; static double param_max_deviation; static unsigned int param_min_samples; @@ -48,7 +28,6 @@ static int param_threads; static bool param_write_sanity; static bool param_verify_sanity; #ifdef HAVE_CUDA -static bool param_run_cpu; static int param_cuda_device; #endif @@ -573,11 +552,12 @@ Regression& Regression::operator() (const std::string& name, cv::InputArray arra std::string nodename = getCurrentTestNodeName(); -#ifdef HAVE_CUDA - static const std::string prefix = (param_run_cpu)? "CPU_" : "GPU_"; + // This is a hack for compatibility and it should eventually get removed. + // gpu's tests don't even have CPU sanity data anymore. if(suiteName == "gpu") - nodename = prefix + nodename; -#endif + { + nodename = (PERF_RUN_GPU() ? "GPU_" : "CPU_") + nodename; + } cv::FileNode n = rootIn[nodename]; if(n.isNone()) @@ -642,6 +622,43 @@ performance_metrics::performance_metrics() void TestBase::Init(int argc, const char* const argv[]) { + std::vector plain_only; + plain_only.push_back("plain"); + TestBase::Init(plain_only, argc, argv); +} + +void TestBase::Init(const std::vector & availableImpls, + int argc, const char* const argv[]) +{ + available_impls = availableImpls; + + const std::string command_line_keys = + "{ perf_max_outliers |8 |percent of allowed outliers}" + "{ perf_min_samples |10 |minimal required numer of samples}" + "{ perf_force_samples |100 |force set maximum number of samples for all tests}" + "{ perf_seed |809564 |seed for random numbers generator}" + "{ perf_threads |-1 |the number of worker threads, if parallel execution is enabled}" + "{ perf_write_sanity |false |create new records for sanity checks}" + "{ perf_verify_sanity |false |fail tests having no regression data for sanity checks}" + "{ perf_impl |" + available_impls[0] + + "|the implementation variant of functions under test}" + "{ perf_list_impls |false |list available implementation variants and exit}" + "{ perf_run_cpu |false |deprecated, equivalent to --perf_impl=plain}" +#ifdef ANDROID + "{ perf_time_limit |6.0 |default time limit for a single test (in seconds)}" + "{ perf_affinity_mask |0 |set affinity mask for the main thread}" + "{ perf_log_power_checkpoints | |additional xml logging for power measurement}" +#else + "{ perf_time_limit |3.0 |default time limit for a single test (in seconds)}" +#endif + "{ perf_max_deviation |1.0 |}" + "{ help h |false |print help info}" +#ifdef HAVE_CUDA + "{ perf_cuda_device |0 |run GPU test suite onto specific CUDA capable device}" + "{ perf_cuda_info_only |false |print an information about system and an available CUDA devices and then exit.}" +#endif + ; + cv::CommandLineParser args(argc, argv, command_line_keys); if (args.has("help")) { @@ -651,6 +668,7 @@ void TestBase::Init(int argc, const char* const argv[]) ::testing::AddGlobalTestEnvironment(new PerfEnvironment); + param_impl = args.has("perf_run_cpu") ? "plain" : args.get("perf_impl"); param_max_outliers = std::min(100., std::max(0., args.get("perf_max_outliers"))); param_min_samples = std::max(1u, args.get("perf_min_samples")); param_max_deviation = std::max(0., args.get("perf_max_deviation")); @@ -665,19 +683,41 @@ void TestBase::Init(int argc, const char* const argv[]) log_power_checkpoints = args.has("perf_log_power_checkpoints"); #endif + bool param_list_impls = args.has("perf_list_impls"); + + if (param_list_impls) + { + fputs("Available implementation variants:", stdout); + for (size_t i = 0; i < available_impls.size(); ++i) { + putchar(' '); + fputs(available_impls[i].c_str(), stdout); + } + putchar('\n'); + exit(0); + } + + if (std::find(available_impls.begin(), available_impls.end(), param_impl) == available_impls.end()) + { + printf("No such implementation: %s\n", param_impl.c_str()); + exit(1); + } + #ifdef HAVE_CUDA bool printOnly = args.has("perf_cuda_info_only"); if (printOnly) exit(0); +#endif + + if (available_impls.size() > 1) + printf("[----------]\n[ INFO ] \tImplementation variant: %s.\n[----------]\n", param_impl.c_str()), fflush(stdout); + +#ifdef HAVE_CUDA - param_run_cpu = args.has("perf_run_cpu"); param_cuda_device = std::max(0, std::min(cv::gpu::getCudaEnabledDeviceCount(), args.get("perf_cuda_device"))); - if (param_run_cpu) - printf("[----------]\n[ GPU INFO ] \tRun test suite on CPU.\n[----------]\n"), fflush(stdout); - else + if (param_impl == "cuda") { cv::gpu::DeviceInfo info(param_cuda_device); if (!info.isCompatible()) @@ -703,6 +743,18 @@ void TestBase::Init(int argc, const char* const argv[]) _timeadjustment = _calibrate(); } +void TestBase::RecordRunParameters() +{ + ::testing::Test::RecordProperty("cv_implementation", param_impl); + ::testing::Test::RecordProperty("cv_num_threads", param_threads); +} + +std::string TestBase::getSelectedImpl() +{ + return param_impl; +} + + int64 TestBase::_calibrate() { class _helper : public ::perf::TestBase @@ -1322,11 +1374,7 @@ void perf::sort(std::vector& pts, cv::InputOutputArray descriptors \*****************************************************************************************/ bool perf::GpuPerf::targetDevice() { -#ifdef HAVE_CUDA - return !param_run_cpu; -#else - return false; -#endif + return param_impl == "cuda"; } /*****************************************************************************************\ @@ -1365,4 +1413,3 @@ void PrintTo(const Size& sz, ::std::ostream* os) } } // namespace cv - diff --git a/platforms/android/service/doc/BaseLoaderCallback.rst b/platforms/android/service/doc/BaseLoaderCallback.rst index 71915c449d..f8c30a8008 100644 --- a/platforms/android/service/doc/BaseLoaderCallback.rst +++ b/platforms/android/service/doc/BaseLoaderCallback.rst @@ -48,7 +48,7 @@ See the "15-puzzle" OpenCV sample for details. super.onResume(); Log.i(TAG, "Trying to load OpenCV library"); - if (!OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_4, this, mOpenCVCallBack)) + if (!OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_6, this, mOpenCVCallBack)) { Log.e(TAG, "Cannot connect to OpenCV Manager"); } diff --git a/platforms/android/service/doc/JavaHelper.rst b/platforms/android/service/doc/JavaHelper.rst index e90b016e54..9262a7cf73 100644 --- a/platforms/android/service/doc/JavaHelper.rst +++ b/platforms/android/service/doc/JavaHelper.rst @@ -55,3 +55,7 @@ OpenCV version constants .. data:: OPENCV_VERSION_2_4_5 OpenCV Library version 2.4.5 + +.. data:: OPENCV_VERSION_2_4_6 + + OpenCV Library version 2.4.6 diff --git a/platforms/android/service/engine/AndroidManifest.xml b/platforms/android/service/engine/AndroidManifest.xml index 9549556782..b2141e0ce3 100644 --- a/platforms/android/service/engine/AndroidManifest.xml +++ b/platforms/android/service/engine/AndroidManifest.xml @@ -1,8 +1,8 @@ + android:versionCode="28@ANDROID_PLATFORM_VERSION_CODE@" + android:versionName="2.8" > diff --git a/platforms/android/service/engine/jni/BinderComponent/OpenCVEngine.cpp b/platforms/android/service/engine/jni/BinderComponent/OpenCVEngine.cpp index 274e36a4b5..b0b2b5137f 100644 --- a/platforms/android/service/engine/jni/BinderComponent/OpenCVEngine.cpp +++ b/platforms/android/service/engine/jni/BinderComponent/OpenCVEngine.cpp @@ -15,7 +15,7 @@ using namespace android; const int OpenCVEngine::Platform = DetectKnownPlatforms(); const int OpenCVEngine::CpuID = GetCpuID(); -const int OpenCVEngine::KnownVersions[] = {2040000, 2040100, 2040200, 2040300, 2040301, 2040302, 2040400, 2040500}; +const int OpenCVEngine::KnownVersions[] = {2040000, 2040100, 2040200, 2040300, 2040301, 2040302, 2040400, 2040500, 2040600}; bool OpenCVEngine::ValidateVersion(int version) { diff --git a/platforms/android/service/readme.txt b/platforms/android/service/readme.txt index f4e65eb369..e68bf5249c 100644 --- a/platforms/android/service/readme.txt +++ b/platforms/android/service/readme.txt @@ -14,20 +14,20 @@ manually using adb tool: .. code-block:: sh - adb install OpenCV-2.4.5-android-sdk/apk/OpenCV_2.4.5_Manager_2.7_.apk + adb install OpenCV-2.4.6-android-sdk/apk/OpenCV_2.4.6_Manager_2.8_.apk Use the table below to determine proper OpenCV Manager package for your device: +------------------------------+--------------+---------------------------------------------------+ | Hardware Platform | Android ver. | Package name | +==============================+==============+===================================================+ -| armeabi-v7a (ARMv7-A + NEON) | >= 2.3 | OpenCV_2.4.5_Manager_2.7_armv7a-neon.apk | +| armeabi-v7a (ARMv7-A + NEON) | >= 2.3 | OpenCV_2.4.6_Manager_2.8_armv7a-neon.apk | +------------------------------+--------------+---------------------------------------------------+ -| armeabi-v7a (ARMv7-A + NEON) | = 2.2 | OpenCV_2.4.5_Manager_2.7_armv7a-neon-android8.apk | +| armeabi-v7a (ARMv7-A + NEON) | = 2.2 | OpenCV_2.4.6_Manager_2.8_armv7a-neon-android8.apk | +------------------------------+--------------+---------------------------------------------------+ -| armeabi (ARMv5, ARMv6) | >= 2.3 | OpenCV_2.4.5_Manager_2.7_armeabi.apk | +| armeabi (ARMv5, ARMv6) | >= 2.3 | OpenCV_2.4.6_Manager_2.8_armeabi.apk | +------------------------------+--------------+---------------------------------------------------+ -| Intel x86 | >= 2.3 | OpenCV_2.4.5_Manager_2.7_x86.apk | +| Intel x86 | >= 2.3 | OpenCV_2.4.6_Manager_2.8_x86.apk | +------------------------------+--------------+---------------------------------------------------+ -| MIPS | >= 2.3 | OpenCV_2.4.5_Manager_2.7_mips.apk | +| MIPS | >= 2.3 | OpenCV_2.4.6_Manager_2.8_mips.apk | +------------------------------+--------------+---------------------------------------------------+ diff --git a/ios/Info.plist.in b/platforms/ios/Info.plist.in similarity index 93% rename from ios/Info.plist.in rename to platforms/ios/Info.plist.in index 89ef38625d..6bcfe862d0 100644 --- a/ios/Info.plist.in +++ b/platforms/ios/Info.plist.in @@ -5,7 +5,7 @@ CFBundleName OpenCV CFBundleIdentifier - com.itseez.opencv + org.opencv CFBundleVersion ${VERSION} CFBundleShortVersionString diff --git a/ios/build_framework.py b/platforms/ios/build_framework.py similarity index 95% rename from ios/build_framework.py rename to platforms/ios/build_framework.py index ceef4b71d7..bc385bb1bb 100755 --- a/ios/build_framework.py +++ b/platforms/ios/build_framework.py @@ -38,7 +38,7 @@ def build_opencv(srcroot, buildroot, target, arch): # for some reason, if you do not specify CMAKE_BUILD_TYPE, it puts libs to "RELEASE" rather than "Release" cmakeargs = ("-GXcode " + "-DCMAKE_BUILD_TYPE=Release " + - "-DCMAKE_TOOLCHAIN_FILE=%s/ios/cmake/Toolchains/Toolchain-%s_Xcode.cmake " + + "-DCMAKE_TOOLCHAIN_FILE=%s/platforms/ios/cmake/Toolchains/Toolchain-%s_Xcode.cmake " + "-DBUILD_opencv_world=ON " + "-DCMAKE_INSTALL_PREFIX=install") % (srcroot, target) # if cmake cache exists, just rerun cmake to update OpenCV.xproj if necessary @@ -92,16 +92,13 @@ def put_framework_together(srcroot, dstroot): os.system("lipo -create " + wlist + " -o " + dstdir + "/opencv2") # form Info.plist - srcfile = open(srcroot + "/ios/Info.plist.in", "rt") + srcfile = open(srcroot + "/platforms/ios/Info.plist.in", "rt") dstfile = open(dstdir + "/Resources/Info.plist", "wt") for l in srcfile.readlines(): dstfile.write(l.replace("${VERSION}", opencv_version)) srcfile.close() dstfile.close() - # copy cascades - # TODO ... - # make symbolic links os.symlink("A", "Versions/Current") os.symlink("Versions/Current/Headers", "Headers") @@ -125,4 +122,4 @@ if __name__ == "__main__": print "Usage:\n\t./build_framework.py \n\n" sys.exit(0) - build_framework(os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..")), os.path.abspath(sys.argv[1])) \ No newline at end of file + build_framework(os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../..")), os.path.abspath(sys.argv[1])) \ No newline at end of file diff --git a/ios/cmake/Modules/Platform/iOS.cmake b/platforms/ios/cmake/Modules/Platform/iOS.cmake similarity index 100% rename from ios/cmake/Modules/Platform/iOS.cmake rename to platforms/ios/cmake/Modules/Platform/iOS.cmake diff --git a/ios/cmake/Toolchains/Toolchain-iPhoneOS_Xcode.cmake b/platforms/ios/cmake/Toolchains/Toolchain-iPhoneOS_Xcode.cmake similarity index 80% rename from ios/cmake/Toolchains/Toolchain-iPhoneOS_Xcode.cmake rename to platforms/ios/cmake/Toolchains/Toolchain-iPhoneOS_Xcode.cmake index 67343253bd..6493deb459 100644 --- a/ios/cmake/Toolchains/Toolchain-iPhoneOS_Xcode.cmake +++ b/platforms/ios/cmake/Toolchains/Toolchain-iPhoneOS_Xcode.cmake @@ -4,12 +4,12 @@ set (IPHONEOS TRUE) # Standard settings set (CMAKE_SYSTEM_NAME iOS) # Include extra modules for the iOS platform files -set (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/ios/cmake/Modules") +set (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/platforms/ios/cmake/Modules") -# Force the compilers to gcc for iOS +# Force the compilers to clang for iOS include (CMakeForceCompiler) -#CMAKE_FORCE_C_COMPILER (gcc gcc) -#CMAKE_FORCE_CXX_COMPILER (g++ g++) +#CMAKE_FORCE_C_COMPILER (clang GNU) +#CMAKE_FORCE_CXX_COMPILER (clang++ GNU) set (CMAKE_C_SIZEOF_DATA_PTR 4) set (CMAKE_C_HAS_ISYSROOT 1) diff --git a/ios/cmake/Toolchains/Toolchain-iPhoneSimulator_Xcode.cmake b/platforms/ios/cmake/Toolchains/Toolchain-iPhoneSimulator_Xcode.cmake similarity index 80% rename from ios/cmake/Toolchains/Toolchain-iPhoneSimulator_Xcode.cmake rename to platforms/ios/cmake/Toolchains/Toolchain-iPhoneSimulator_Xcode.cmake index 7ef8113edb..0056c8dbd4 100644 --- a/ios/cmake/Toolchains/Toolchain-iPhoneSimulator_Xcode.cmake +++ b/platforms/ios/cmake/Toolchains/Toolchain-iPhoneSimulator_Xcode.cmake @@ -4,12 +4,12 @@ set (IPHONESIMULATOR TRUE) # Standard settings set (CMAKE_SYSTEM_NAME iOS) # Include extra modules for the iOS platform files -set (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/ios/cmake/Modules") +set (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/platforms/ios/cmake/Modules") -# Force the compilers to gcc for iOS +# Force the compilers to clang for iOS include (CMakeForceCompiler) -#CMAKE_FORCE_C_COMPILER (gcc gcc) -#CMAKE_FORCE_CXX_COMPILER (g++ g++) +#CMAKE_FORCE_C_COMPILER (clang GNU) +#CMAKE_FORCE_CXX_COMPILER (clang++ GNU) set (CMAKE_C_SIZEOF_DATA_PTR 4) set (CMAKE_C_HAS_ISYSROOT 1) diff --git a/platforms/ios/readme.txt b/platforms/ios/readme.txt new file mode 100644 index 0000000000..8f1f206b03 --- /dev/null +++ b/platforms/ios/readme.txt @@ -0,0 +1,7 @@ +Building OpenCV from Source, using CMake and Command Line +========================================================= + +cd ~/ +python opencv/platforms/ios/build_framework.py ios + +If everything's fine, a few minutes later you will get ~//ios/opencv2.framework. You can add this framework to your Xcode projects. \ No newline at end of file diff --git a/platforms/readme.txt b/platforms/readme.txt index 7e1c4555c5..dfe0461422 100644 --- a/platforms/readme.txt +++ b/platforms/readme.txt @@ -1 +1,3 @@ -This folder contains toolchains and additional files that are needed for cross compitation. \ No newline at end of file +This folder contains toolchains and additional files that are needed for cross compilation. +For more information see introduction tutorials for target platform in documentation: +http://docs.opencv.org/doc/tutorials/introduction/table_of_content_introduction/table_of_content_introduction.html#table-of-content-introduction \ No newline at end of file diff --git a/platforms/scripts/ABI_compat_generator.py b/platforms/scripts/ABI_compat_generator.py index fdabf00611..d7cc3728aa 100755 --- a/platforms/scripts/ABI_compat_generator.py +++ b/platforms/scripts/ABI_compat_generator.py @@ -6,7 +6,7 @@ import os architecture = 'armeabi' -excludedHeaders = set(['hdf5.h', 'cap_ios.h', 'eigen.hpp', 'cxeigen.hpp']) #TOREMOVE +excludedHeaders = set(['hdf5.h', 'cap_ios.h', 'ios.h', 'eigen.hpp', 'cxeigen.hpp']) #TOREMOVE systemIncludes = ['sources/cxx-stl/gnu-libstdc++/4.6/include', \ '/opt/android-ndk-r8c/platforms/android-8/arch-arm', # TODO: check if this one could be passed as command line arg 'sources/cxx-stl/gnu-libstdc++/4.6/libs/armeabi-v7a/include'] diff --git a/platforms/scripts/cmake_winrt.cmd b/platforms/scripts/cmake_winrt.cmd index aafed7d09d..df70e856c5 100644 --- a/platforms/scripts/cmake_winrt.cmd +++ b/platforms/scripts/cmake_winrt.cmd @@ -1,6 +1,6 @@ -mkdir build -cd build +mkdir build_winrt_arm +cd build_winrt_arm rem call "C:\Program Files\Microsoft Visual Studio 11.0\VC\bin\x86_arm\vcvarsx86_arm.bat" -cmake.exe -GNinja -DCMAKE_BUILD_TYPE=Release -DWITH_FFMPEG=OFF -DBUILD_opencv_gpu=OFF -DBUILD_opencv_python=OFF -DCMAKE_TOOLCHAIN_FILE=..\..\winrt\arm.winrt.toolchain.cmake ..\..\.. +cmake.exe -GNinja -DWITH_TBB=ON -DBUILD_TBB=ON -DCMAKE_BUILD_TYPE=Release -DWITH_FFMPEG=OFF -DBUILD_opencv_gpu=OFF -DBUILD_opencv_python=OFF -DCMAKE_TOOLCHAIN_FILE=..\winrt\arm.winrt.toolchain.cmake ..\.. diff --git a/samples/cpp/freak_demo.cpp b/samples/cpp/freak_demo.cpp index 41604865b2..fa58d20385 100644 --- a/samples/cpp/freak_demo.cpp +++ b/samples/cpp/freak_demo.cpp @@ -56,7 +56,7 @@ static void help( char** argv ) } int main( int argc, char** argv ) { - // check http://opencv.itseez.com/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.html + // check http://docs.opencv.org/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.html // for OpenCV general detection/matching framework details if( argc != 3 ) { diff --git a/samples/cpp/latentsvm_multidetect.cpp b/samples/cpp/latentsvm_multidetect.cpp index 4da5506d8d..f5a8bc56da 100644 --- a/samples/cpp/latentsvm_multidetect.cpp +++ b/samples/cpp/latentsvm_multidetect.cpp @@ -3,7 +3,7 @@ #include "opencv2/highgui/highgui.hpp" #include "opencv2/contrib/contrib.hpp" -#ifdef WIN32 +#if defined(WIN32) || defined(_WIN32) #include #else #include @@ -59,7 +59,7 @@ static void readDirectory( const string& directoryName, vector& filename { filenames.clear(); -#ifdef WIN32 +#if defined(WIN32) | defined(_WIN32) struct _finddata_t s_file; string str = directoryName + "\\*.*"; diff --git a/samples/cpp/stereo_calib.cpp b/samples/cpp/stereo_calib.cpp index 44be2d64e0..39863adaa4 100644 --- a/samples/cpp/stereo_calib.cpp +++ b/samples/cpp/stereo_calib.cpp @@ -14,15 +14,12 @@ Or: http://oreilly.com/catalog/9780596516130/ ISBN-10: 0596516134 or: ISBN-13: 978-0596516130 - OTHER OPENCV SITES: - * The source code is on sourceforge at: - http://sourceforge.net/projects/opencvlibrary/ - * The OpenCV wiki page (As of Oct 1, 2008 this is down for changing over servers, but should come back): - http://opencvlibrary.sourceforge.net/ - * An active user group is at: - http://tech.groups.yahoo.com/group/OpenCV/ - * The minutes of weekly OpenCV development meetings are at: - http://code.opencv.org/projects/opencv/wiki/Meeting_notes + OPENCV WEBSITES: + Homepage: http://opencv.org + Online docs: http://docs.opencv.org + Q&A forum: http://answers.opencv.org + Issue tracker: http://code.opencv.org + GitHub: https://github.com/Itseez/opencv/ ************************************************** */ #include "opencv2/calib3d/calib3d.hpp" diff --git a/samples/ocl/CMakeLists.txt b/samples/ocl/CMakeLists.txt index cdcf2f3e51..a201d8338d 100644 --- a/samples/ocl/CMakeLists.txt +++ b/samples/ocl/CMakeLists.txt @@ -27,7 +27,7 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND) target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${OPENCV_OCL_SAMPLES_REQUIRED_DEPS}) set_target_properties(${the_target} PROPERTIES - OUTPUT_NAME "${name}_${project}" + OUTPUT_NAME "${project}-example-${name}" PROJECT_LABEL "(EXAMPLE_${project_upper}) ${name}") if(ENABLE_SOLUTION_FOLDERS) diff --git a/samples/ocl/clahe.cpp b/samples/ocl/clahe.cpp new file mode 100644 index 0000000000..b3d2ebd65f --- /dev/null +++ b/samples/ocl/clahe.cpp @@ -0,0 +1,109 @@ +#include +#include "opencv2/core/core.hpp" +#include "opencv2/core/utility.hpp" +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/highgui/highgui.hpp" +#include "opencv2/ocl/ocl.hpp" +using namespace cv; +using namespace std; + +Ptr pFilter; +int tilesize; +int cliplimit; +string outfile; + +static void TSize_Callback(int pos) +{ + if(pos==0) + { + pFilter->setTilesGridSize(Size(1,1)); + } + pFilter->setTilesGridSize(Size(tilesize,tilesize)); +} + +static void Clip_Callback(int) +{ + pFilter->setClipLimit(cliplimit); +} + +int main(int argc, char** argv) +{ + const char* keys = + "{ i input | | specify input image }" + "{ c camera | 0 | specify camera id }" + "{ s use_cpu | false | use cpu algorithm }" + "{ o output | clahe_output.jpg | specify output save path}"; + + CommandLineParser cmd(argc, argv, keys); + string infile = cmd.get("i"); + outfile = cmd.get("o"); + int camid = cmd.get("c"); + bool use_cpu = cmd.get("s"); + VideoCapture capture; + bool running = true; + + namedWindow("CLAHE"); + createTrackbar("Tile Size", "CLAHE", &tilesize, 32, (TrackbarCallback)TSize_Callback); + createTrackbar("Clip Limit", "CLAHE", &cliplimit, 20, (TrackbarCallback)Clip_Callback); + Mat frame, outframe; + ocl::oclMat d_outframe; + + int cur_clip; + Size cur_tilesize; + if(use_cpu) + { + pFilter = createCLAHE(); + } + else + { + pFilter = ocl::createCLAHE(); + } + cur_clip = (int)pFilter->getClipLimit(); + cur_tilesize = pFilter->getTilesGridSize(); + setTrackbarPos("Tile Size", "CLAHE", cur_tilesize.width); + setTrackbarPos("Clip Limit", "CLAHE", cur_clip); + if(infile != "") + { + frame = imread(infile); + if(frame.empty()) + { + cout << "error read image: " << infile << endl; + return -1; + } + } + else + { + capture.open(camid); + } + cout << "\nControls:\n" + << "\to - save output image\n" + << "\tESC - exit\n"; + while(running) + { + if(capture.isOpened()) + capture.read(frame); + else + frame = imread(infile); + if(frame.empty()) + { + continue; + } + if(use_cpu) + { + cvtColor(frame, frame, COLOR_BGR2GRAY); + pFilter->apply(frame, outframe); + } + else + { + ocl::oclMat d_frame(frame); + ocl::cvtColor(d_frame, d_outframe, COLOR_BGR2GRAY); + pFilter->apply(d_outframe, d_outframe); + d_outframe.download(outframe); + } + imshow("CLAHE", outframe); + char key = (char)waitKey(3); + if(key == 'o') imwrite(outfile, outframe); + else if(key == 27) running = false; + } + return 0; +} diff --git a/samples/ocl/facedetect.cpp b/samples/ocl/facedetect.cpp index 4ab92af5e0..34aeeeaf52 100644 --- a/samples/ocl/facedetect.cpp +++ b/samples/ocl/facedetect.cpp @@ -257,8 +257,13 @@ void Draw(Mat& img, vector& faces, double scale) radius = cvRound((r->width + r->height)*0.25*scale); circle( img, center, radius, color, 3, 8, 0 ); } - imshow( "result", img ); imwrite( outputName, img ); + if(abs(scale-1.0)>.001) + { + resize(img, img, Size((int)(img.cols/scale), (int)(img.rows/scale))); + } + imshow( "result", img ); + } diff --git a/samples/ocl/hog.cpp b/samples/ocl/hog.cpp index b26c98c337..12280b1f80 100644 --- a/samples/ocl/hog.cpp +++ b/samples/ocl/hog.cpp @@ -58,6 +58,7 @@ private: string vdo_source; string output; int camera_id; + bool write_once; }; int main(int argc, char** argv) @@ -98,6 +99,7 @@ App::App(CommandLineParser& cmd) << "\tESC - exit\n" << "\tm - change mode GPU <-> CPU\n" << "\tg - convert image to gray or not\n" + << "\to - save output image once, or switch on/off video save\n" << "\t1/q - increase/decrease HOG scale\n" << "\t2/w - increase/decrease levels count\n" << "\t3/e - increase/decrease HOG group threshold\n" @@ -121,6 +123,7 @@ App::App(CommandLineParser& cmd) hit_threshold = win_width == 48 ? 1.4 : 0.; scale = 1.05; gamma_corr = true; + write_once = false; cout << "Group threshold: " << gr_threshold << endl; cout << "Levels number: " << nlevels << endl; @@ -255,10 +258,11 @@ void App::run() workEnd(); - if (output!="") + if (output!="" && write_once) { if (img_source!="") // wirte image { + write_once = false; imwrite(output, img_to_show); } else //write video @@ -341,6 +345,10 @@ void App::handleKey(char key) gamma_corr = !gamma_corr; cout << "Gamma correction: " << gamma_corr << endl; break; + case 'o': + case 'O': + write_once = !write_once; + break; } } diff --git a/samples/ocl/stereo_match.cpp b/samples/ocl/stereo_match.cpp index 8737a047aa..100e62a5ba 100644 --- a/samples/ocl/stereo_match.cpp +++ b/samples/ocl/stereo_match.cpp @@ -51,7 +51,7 @@ struct App return ss.str(); } private: - bool running; + bool running, write_once; Mat left_src, right_src; Mat left, right; @@ -117,6 +117,7 @@ App::App(CommandLineParser& cmd) cout << "stereo_match_ocl sample\n"; cout << "\nControls:\n" << "\tesc - exit\n" + << "\to - save output image once\n" << "\tp - print current parameters\n" << "\tg - convert source images into gray\n" << "\tm - change stereo match method\n" @@ -134,6 +135,7 @@ App::App(CommandLineParser& cmd) else cout << "unknown method!\n"; ndisp = cmd.get("n"); out_img = cmd.get("o"); + write_once = false; } @@ -163,10 +165,8 @@ void App::run() printParams(); running = true; - bool written = false; while (running) { - // Prepare disparity map of specified type Mat disp; oclMat d_disp; @@ -194,19 +194,21 @@ void App::run() csbp(d_left, d_right, d_disp); break; } + // Show results d_disp.download(disp); workEnd(); + if (method != BM) { disp.convertTo(disp, 0); } putText(disp, text(), Point(5, 25), FONT_HERSHEY_SIMPLEX, 1.0, Scalar::all(255)); imshow("disparity", disp); - if(!written) + if(write_once) { imwrite(out_img, disp); - written = true; + write_once = false; } handleKey((char)waitKey(3)); } @@ -380,5 +382,9 @@ void App::handleKey(char key) cout << "level_count: " << csbp.levels << endl; } break; + case 'o': + case 'O': + write_once = true; + break; } } diff --git a/samples/winrt/ImageManipulations/AdvancedCapture.xaml b/samples/winrt/ImageManipulations/AdvancedCapture.xaml new file mode 100644 index 0000000000..07db96f275 --- /dev/null +++ b/samples/winrt/ImageManipulations/AdvancedCapture.xaml @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + +