diff --git a/CMakeLists.txt b/CMakeLists.txt index 36622ee7e1..c19689f8e4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -295,8 +295,8 @@ OCV_OPTION(BUILD_ANDROID_EXAMPLES "Build examples for Android platform" OCV_OPTION(BUILD_DOCS "Create build rules for OpenCV Documentation" OFF IF (NOT WINRT AND NOT APPLE_FRAMEWORK)) OCV_OPTION(BUILD_EXAMPLES "Build all examples" OFF ) OCV_OPTION(BUILD_PACKAGE "Enables 'make package_source' command" ON IF NOT WINRT) -OCV_OPTION(BUILD_PERF_TESTS "Build performance tests" ON IF (NOT APPLE_FRAMEWORK) ) -OCV_OPTION(BUILD_TESTS "Build accuracy & regression tests" ON IF (NOT APPLE_FRAMEWORK) ) +OCV_OPTION(BUILD_PERF_TESTS "Build performance tests" NOT INSTALL_CREATE_DISTRIB IF (NOT APPLE_FRAMEWORK) ) +OCV_OPTION(BUILD_TESTS "Build accuracy & regression tests" NOT INSTALL_CREATE_DISTRIB IF (NOT APPLE_FRAMEWORK) ) OCV_OPTION(BUILD_WITH_DEBUG_INFO "Include debug info into release binaries ('OFF' means default settings)" OFF ) OCV_OPTION(BUILD_WITH_STATIC_CRT "Enables use of statically linked CRT for statically linked OpenCV" ON IF MSVC ) OCV_OPTION(BUILD_WITH_DYNAMIC_IPP "Enables dynamic linking of IPP (only for standalone IPP)" OFF ) @@ -461,6 +461,7 @@ else() ocv_update(OPENCV_OTHER_INSTALL_PATH "${CMAKE_INSTALL_DATAROOTDIR}/opencv4") ocv_update(OPENCV_LICENSES_INSTALL_PATH "${CMAKE_INSTALL_DATAROOTDIR}/licenses/opencv4") endif() + ocv_update(OPENCV_PYTHON_INSTALL_PATH "python") endif() ocv_update(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${OPENCV_LIB_INSTALL_PATH}") diff --git a/cmake/OpenCVDetectInferenceEngine.cmake b/cmake/OpenCVDetectInferenceEngine.cmake index e36eb0852b..d41f9243b4 100644 --- a/cmake/OpenCVDetectInferenceEngine.cmake +++ b/cmake/OpenCVDetectInferenceEngine.cmake @@ -78,9 +78,9 @@ endif() if(INF_ENGINE_TARGET) if(NOT INF_ENGINE_RELEASE) - message(WARNING "InferenceEngine version have not been set, 2018R3 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.") + message(WARNING "InferenceEngine version have not been set, 2018R4 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.") endif() - set(INF_ENGINE_RELEASE "2018030000" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2018R2.0.2 -> 2018020002)") + set(INF_ENGINE_RELEASE "2018040000" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2018R2.0.2 -> 2018020002)") set_target_properties(${INF_ENGINE_TARGET} PROPERTIES INTERFACE_COMPILE_DEFINITIONS "HAVE_INF_ENGINE=1;INF_ENGINE_RELEASE=${INF_ENGINE_RELEASE}" ) diff --git a/cmake/OpenCVGenSetupVars.cmake b/cmake/OpenCVGenSetupVars.cmake index 23c785f1bb..977d5d8326 100644 --- a/cmake/OpenCVGenSetupVars.cmake +++ b/cmake/OpenCVGenSetupVars.cmake @@ -43,7 +43,12 @@ else() endif() file(RELATIVE_PATH OPENCV_PYTHON_DIR_RELATIVE_CMAKECONFIG "${CMAKE_INSTALL_PREFIX}/${OPENCV_SETUPVARS_INSTALL_PATH}/" "${CMAKE_INSTALL_PREFIX}/") -ocv_path_join(OPENCV_PYTHON_DIR_RELATIVE_CMAKECONFIG "${OPENCV_PYTHON_DIR_RELATIVE_CMAKECONFIG}" "python_loader") # https://github.com/opencv/opencv/pull/12977 +if(IS_ABSOLUTE "${OPENCV_PYTHON_INSTALL_PATH}") + set(OPENCV_PYTHON_DIR_RELATIVE_CMAKECONFIG "${OPENCV_PYTHON_INSTALL_PATH}") + message(WARNING "CONFIGURATION IS NOT SUPPORTED: validate setupvars script in install directory") +else() + ocv_path_join(OPENCV_PYTHON_DIR_RELATIVE_CMAKECONFIG "${OPENCV_PYTHON_DIR_RELATIVE_CMAKECONFIG}" "${OPENCV_PYTHON_INSTALL_PATH}") +endif() configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/${OPENCV_SETUPVARS_TEMPLATE}" "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/install/${OPENCV_SETUPVARS_FILENAME}" @ONLY) install(FILES "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/install/${OPENCV_SETUPVARS_FILENAME}" DESTINATION "${OPENCV_SETUPVARS_INSTALL_PATH}" diff --git a/cmake/OpenCVUtils.cmake b/cmake/OpenCVUtils.cmake index 1a0e292e57..124d32262d 100644 --- a/cmake/OpenCVUtils.cmake +++ b/cmake/OpenCVUtils.cmake @@ -605,7 +605,9 @@ macro(OCV_OPTION variable description value) option(${variable} "${description}" ${__value}) endif() else() - if(DEFINED ${variable} AND NOT OPENCV_HIDE_WARNING_UNSUPPORTED_OPTION) + if(DEFINED ${variable} AND "${${variable}}" # emit warnings about turned ON options only. + AND NOT (OPENCV_HIDE_WARNING_UNSUPPORTED_OPTION OR "$ENV{OPENCV_HIDE_WARNING_UNSUPPORTED_OPTION}") + ) message(WARNING "Unexpected option: ${variable} (=${${variable}})\nCondition: IF (${__condition})") endif() if(OPENCV_UNSET_UNSUPPORTED_OPTION) diff --git a/cmake/templates/setup_vars_win32.cmd.in b/cmake/templates/setup_vars_win32.cmd.in index f6722535ac..b0dc8d2261 100644 --- a/cmake/templates/setup_vars_win32.cmd.in +++ b/cmake/templates/setup_vars_win32.cmd.in @@ -1,18 +1,36 @@ @ECHO OFF -SETLOCAL EnableDelayedExpansion SET "SCRIPT_DIR=%~dp0" IF NOT DEFINED OPENCV_QUIET ( ECHO Setting vars for OpenCV @OPENCV_VERSION@ ) -SET "PATH=!SCRIPT_DIR!\@OPENCV_LIB_RUNTIME_DIR_RELATIVE_CMAKECONFIG@;%PATH%" +SET "PATH=%SCRIPT_DIR%\@OPENCV_LIB_RUNTIME_DIR_RELATIVE_CMAKECONFIG@;%PATH%" -IF NOT DEFINED OPENCV_SKIP_PYTHON ( - SET "PYTHONPATH_OPENCV=!SCRIPT_DIR!\@OPENCV_PYTHON_DIR_RELATIVE_CMAKECONFIG@" - IF NOT DEFINED OPENCV_QUIET ( ECHO Append PYTHONPATH: !PYTHONPATH_OPENCV! ) - SET "PYTHONPATH=!PYTHONPATH_OPENCV!;%PYTHONPATH%" -) +IF NOT DEFINED OPENCV_SKIP_PYTHON CALL :SET_PYTHON + +SET SCRIPT_DIR= + +IF NOT [%1] == [] GOTO :RUN_COMMAND + +GOTO :EOF -IF NOT [%1] == [] ( - %* - EXIT /B !errorlevel! +:RUN_COMMAND +SET RUN_INTERACTIVE=1 +echo %CMDCMDLINE% | find /i "%~0" >nul +IF NOT errorlevel 1 set RUN_INTERACTIVE=0 + +%* +SET RESULT=%ERRORLEVEL% +IF %RESULT% NEQ 0 ( + IF _%RUN_INTERACTIVE%_==_0_ ( IF NOT DEFINED OPENCV_BATCH_MODE ( pause ) ) ) +EXIT /B %RESULT% + +:SET_PYTHON +SET "PYTHONPATH_OPENCV=%SCRIPT_DIR%\@OPENCV_PYTHON_DIR_RELATIVE_CMAKECONFIG@" +IF NOT DEFINED OPENCV_QUIET ( ECHO Append PYTHONPATH: %PYTHONPATH_OPENCV% ) +SET "PYTHONPATH=%PYTHONPATH_OPENCV%;%PYTHONPATH%" +SET PYTHONPATH_OPENCV= +EXIT /B + + +:EOF diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in index 3127733313..d051d60b1e 100644 --- a/doc/Doxyfile.in +++ b/doc/Doxyfile.in @@ -257,6 +257,7 @@ PREDEFINED = __cplusplus=1 \ CV_SSE2=1 \ CV__DEBUG_NS_BEGIN= \ CV__DEBUG_NS_END= \ + CV_DEPRECATED_EXTERNAL= \ CV_DEPRECATED= EXPAND_AS_DEFINED = SKIP_FUNCTION_MACROS = YES diff --git a/doc/tutorials/dnn/dnn_android/dnn_android.markdown b/doc/tutorials/dnn/dnn_android/dnn_android.markdown index a432b38204..5dd6e2d664 100644 --- a/doc/tutorials/dnn/dnn_android/dnn_android.markdown +++ b/doc/tutorials/dnn/dnn_android/dnn_android.markdown @@ -12,7 +12,7 @@ Tutorial was written for the following versions of corresponding software: - Download and install Android Studio from https://developer.android.com/studio. -- Get the latest pre-built OpenCV for Android release from https://github.com/opencv/opencv/releases and unpack it (for example, `opencv-3.4.3-android-sdk.zip`). +- Get the latest pre-built OpenCV for Android release from https://github.com/opencv/opencv/releases and unpack it (for example, `opencv-3.4.4-android-sdk.zip`). - Download MobileNet object detection model from https://github.com/chuanqi305/MobileNet-SSD. We need a configuration file `MobileNetSSD_deploy.prototxt` and weights `MobileNetSSD_deploy.caffemodel`. diff --git a/doc/tutorials/introduction/cross_referencing/tutorial_cross_referencing.markdown b/doc/tutorials/introduction/cross_referencing/tutorial_cross_referencing.markdown index ec44a0f59f..b3e161cae6 100644 --- a/doc/tutorials/introduction/cross_referencing/tutorial_cross_referencing.markdown +++ b/doc/tutorials/introduction/cross_referencing/tutorial_cross_referencing.markdown @@ -36,14 +36,14 @@ Open your Doxyfile using your favorite text editor and search for the key `TAGFILES`. Change it as follows: @code -TAGFILES = ./docs/doxygen-tags/opencv.tag=http://docs.opencv.org/3.4.3 +TAGFILES = ./docs/doxygen-tags/opencv.tag=http://docs.opencv.org/4.0.0 @endcode If you had other definitions already, you can append the line using a `\`: @code TAGFILES = ./docs/doxygen-tags/libstdc++.tag=https://gcc.gnu.org/onlinedocs/libstdc++/latest-doxygen \ - ./docs/doxygen-tags/opencv.tag=http://docs.opencv.org/3.4.3 + ./docs/doxygen-tags/opencv.tag=http://docs.opencv.org/4.0.0 @endcode Doxygen can now use the information from the tag file to link to the OpenCV diff --git a/doc/tutorials/videoio/intelperc.markdown b/doc/tutorials/videoio/intelperc.markdown index a36511a978..69e316d535 100644 --- a/doc/tutorials/videoio/intelperc.markdown +++ b/doc/tutorials/videoio/intelperc.markdown @@ -78,5 +78,5 @@ there are two flags that should be used to set/get property of the needed genera flag value is assumed by default if neither of the two possible values of the property is set. For more information please refer to the example of usage -[intelperc_capture.cpp](https://github.com/opencv/opencv/tree/master/samples/cpp/intelperc_capture.cpp) +[videocapture_intelperc.cpp](https://github.com/opencv/opencv/tree/master/samples/cpp/videocapture_intelperc.cpp) in opencv/samples/cpp folder. diff --git a/doc/tutorials/videoio/kinect_openni.markdown b/doc/tutorials/videoio/kinect_openni.markdown index 97fbd7ed2b..b815970f7a 100644 --- a/doc/tutorials/videoio/kinect_openni.markdown +++ b/doc/tutorials/videoio/kinect_openni.markdown @@ -134,5 +134,5 @@ property. The following properties of cameras available through OpenNI interface - CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_REGISTRATION For more information please refer to the example of usage -[openni_capture.cpp](https://github.com/opencv/opencv/tree/master/samples/cpp/openni_capture.cpp) in +[videocapture_openni.cpp](https://github.com/opencv/opencv/tree/master/samples/cpp/videocapture_openni.cpp) in opencv/samples/cpp folder. diff --git a/modules/core/CMakeLists.txt b/modules/core/CMakeLists.txt index 332595897d..20fdf96c59 100644 --- a/modules/core/CMakeLists.txt +++ b/modules/core/CMakeLists.txt @@ -86,3 +86,47 @@ ocv_add_accuracy_tests() ocv_add_perf_tests() ocv_install_3rdparty_licenses(SoftFloat "${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/SoftFloat/COPYING.txt") + + +# generate data (samples data) config file +set(OPENCV_DATA_CONFIG_FILE "${CMAKE_BINARY_DIR}/opencv_data_config.hpp") +set(OPENCV_DATA_CONFIG_STR "") + +if(CMAKE_INSTALL_PREFIX) + set(OPENCV_DATA_CONFIG_STR "${OPENCV_DATA_CONFIG_STR} +#define OPENCV_INSTALL_PREFIX \"${CMAKE_INSTALL_PREFIX}\" +") +endif() +if(OPENCV_OTHER_INSTALL_PATH) + set(OPENCV_DATA_CONFIG_STR "${OPENCV_DATA_CONFIG_STR} +#define OPENCV_DATA_INSTALL_PATH \"${OPENCV_OTHER_INSTALL_PATH}\" +") +endif() + +set(OPENCV_DATA_CONFIG_STR "${OPENCV_DATA_CONFIG_STR} +#define OPENCV_BUILD_DIR \"${CMAKE_BINARY_DIR}\" +") + +file(RELATIVE_PATH SOURCE_DIR_RELATIVE ${CMAKE_BINARY_DIR} ${CMAKE_SOURCE_DIR}) +set(OPENCV_DATA_CONFIG_STR "${OPENCV_DATA_CONFIG_STR} +#define OPENCV_DATA_BUILD_DIR_SEARCH_PATHS \\ + \"${SOURCE_DIR_RELATIVE}/\" +") + +if(WIN32) + file(RELATIVE_PATH INSTALL_DATA_DIR_RELATIVE "${CMAKE_INSTALL_PREFIX}/${OPENCV_BIN_INSTALL_PATH}" "${CMAKE_INSTALL_PREFIX}/${OPENCV_OTHER_INSTALL_PATH}") +else() + file(RELATIVE_PATH INSTALL_DATA_DIR_RELATIVE "${CMAKE_INSTALL_PREFIX}/${OPENCV_LIB_INSTALL_PATH}" "${CMAKE_INSTALL_PREFIX}/${OPENCV_OTHER_INSTALL_PATH}") +endif() +list(APPEND OPENCV_INSTALL_DATA_DIR_RELATIVE "${INSTALL_DATA_DIR_RELATIVE}") +string(REPLACE ";" "\",\\\n \"" OPENCV_INSTALL_DATA_DIR_RELATIVE_STR "\"${OPENCV_INSTALL_DATA_DIR_RELATIVE}\"") +set(OPENCV_DATA_CONFIG_STR "${OPENCV_DATA_CONFIG_STR} +#define OPENCV_INSTALL_DATA_DIR_RELATIVE ${OPENCV_INSTALL_DATA_DIR_RELATIVE_STR} +") + +if(EXISTS "${OPENCV_DATA_CONFIG_FILE}") + file(READ "${OPENCV_DATA_CONFIG_FILE}" __content) +endif() +if(NOT OPENCV_DATA_CONFIG_STR STREQUAL "${__content}") + file(WRITE "${OPENCV_DATA_CONFIG_FILE}" "${OPENCV_DATA_CONFIG_STR}") +endif() diff --git a/modules/core/include/opencv2/core.hpp b/modules/core/include/opencv2/core.hpp index 9660269828..24490e0d7b 100644 --- a/modules/core/include/opencv2/core.hpp +++ b/modules/core/include/opencv2/core.hpp @@ -75,6 +75,7 @@ @defgroup core_utils_sse SSE utilities @defgroup core_utils_neon NEON utilities @defgroup core_utils_softfloat Softfloat support + @defgroup core_utils_samples Utility functions for OpenCV samples @} @defgroup core_opengl OpenGL interoperability @defgroup core_ipp Intel IPP Asynchronous C/C++ Converters diff --git a/modules/core/include/opencv2/core/cvdef.h b/modules/core/include/opencv2/core/cvdef.h index 7341598af1..6623a1c2d4 100644 --- a/modules/core/include/opencv2/core/cvdef.h +++ b/modules/core/include/opencv2/core/cvdef.h @@ -349,6 +349,15 @@ Cv64suf; # endif #endif +#ifndef CV_DEPRECATED_EXTERNAL +# if defined(__OPENCV_BUILD) +# define CV_DEPRECATED_EXTERNAL /* nothing */ +# else +# define CV_DEPRECATED_EXTERNAL CV_DEPRECATED +# endif +#endif + + #ifndef CV_EXTERN_C # ifdef __cplusplus # define CV_EXTERN_C extern "C" diff --git a/modules/core/include/opencv2/core/hal/intrin_avx.hpp b/modules/core/include/opencv2/core/hal/intrin_avx.hpp index 58b3e7fae7..f8cc7a4d00 100644 --- a/modules/core/include/opencv2/core/hal/intrin_avx.hpp +++ b/modules/core/include/opencv2/core/hal/intrin_avx.hpp @@ -1363,25 +1363,22 @@ inline v_float64x4 v_cvt_f64_high(const v_float32x8& a) inline v_int32x8 v_lut(const int* tab, const v_int32x8& idxvec) { - int CV_DECL_ALIGNED(32) idx[8]; - v_store_aligned(idx, idxvec); - return v_int32x8(_mm256_setr_epi32(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]], - tab[idx[4]], tab[idx[5]], tab[idx[6]], tab[idx[7]])); + return v_int32x8(_mm256_i32gather_epi32(tab, idxvec.val, 4)); +} + +inline v_uint32x8 v_lut(const unsigned* tab, const v_int32x8& idxvec) +{ + return v_reinterpret_as_u32(v_lut((const int *)tab, idxvec)); } inline v_float32x8 v_lut(const float* tab, const v_int32x8& idxvec) { - int CV_DECL_ALIGNED(32) idx[8]; - v_store_aligned(idx, idxvec); - return v_float32x8(_mm256_setr_ps(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]], - tab[idx[4]], tab[idx[5]], tab[idx[6]], tab[idx[7]])); + return v_float32x8(_mm256_i32gather_ps(tab, idxvec.val, 4)); } inline v_float64x4 v_lut(const double* tab, const v_int32x8& idxvec) { - int CV_DECL_ALIGNED(32) idx[8]; - v_store_aligned(idx, idxvec); - return v_float64x4(_mm256_setr_pd(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]])); + return v_float64x4(_mm256_i32gather_pd(tab, _mm256_castsi256_si128(idxvec.val), 8)); } inline void v_lut_deinterleave(const float* tab, const v_int32x8& idxvec, v_float32x8& x, v_float32x8& y) diff --git a/modules/core/include/opencv2/core/private.hpp b/modules/core/include/opencv2/core/private.hpp index 73096aed31..b451875a2b 100644 --- a/modules/core/include/opencv2/core/private.hpp +++ b/modules/core/include/opencv2/core/private.hpp @@ -794,6 +794,82 @@ CV_EXPORTS InstrNode* getCurrentNode(); #define CV_INSTRUMENT_REGION(); CV_INSTRUMENT_REGION_(); #endif +namespace cv { + +namespace utils { + +//! @addtogroup core_utils +//! @{ + +/** @brief Try to find requested data file + +Search directories: + +1. Directories passed via `addDataSearchPath()` +2. Check path specified by configuration parameter with "_HINT" suffix (name of environment variable). +3. Check path specified by configuration parameter (name of environment variable). + If parameter value is not empty and nothing is found then stop searching. +4. Detects build/install path based on: + a. current working directory (CWD) + b. and/or binary module location (opencv_core/opencv_world, doesn't work with static linkage) +5. Scan `/{,data}` directories if build directory is detected or the current directory is in source tree. +6. Scan `/share/OpenCV` directory if install directory is detected. + +@param relative_path Relative path to data file +@param required Specify "file not found" handling. + If true, function prints information message and raises cv::Exception. + If false, function returns empty result +@param configuration_parameter specify configuration parameter name. Default NULL value means "OPENCV_DATA_PATH". +@return Returns path (absolute or relative to the current directory) or empty string if file is not found + +@note Implementation is not thread-safe. +*/ +CV_EXPORTS +cv::String findDataFile(const cv::String& relative_path, bool required = true, + const char* configuration_parameter = NULL); + +/** @overload +@param relative_path Relative path to data file +@param configuration_parameter specify configuration parameter name. Default NULL value means "OPENCV_DATA_PATH". +@param search_paths override addDataSearchPath() settings. +@param subdir_paths override addDataSearchSubDirectory() settings. +@return Returns path (absolute or relative to the current directory) or empty string if file is not found + +@note Implementation is not thread-safe. +*/ +CV_EXPORTS +cv::String findDataFile(const cv::String& relative_path, + const char* configuration_parameter, + const std::vector* search_paths, + const std::vector* subdir_paths); + +/** @brief Override default search data path by adding new search location + +Use this only to override default behavior +Passed paths are used in LIFO order. + +@param path Path to used samples data + +@note Implementation is not thread-safe. +*/ +CV_EXPORTS void addDataSearchPath(const cv::String& path); + +/** @brief Append default search data sub directory + +General usage is to add OpenCV modules name (`/modules//data` -> `modules//data` + `/data`). +Passed subdirectories are used in LIFO order. + +@param subdir samples data sub directory + +@note Implementation is not thread-safe. +*/ +CV_EXPORTS void addDataSearchSubDirectory(const cv::String& subdir); + +//! @} + +} // namespace utils +} // namespace cv + //! @endcond #endif // OPENCV_CORE_PRIVATE_HPP diff --git a/modules/core/include/opencv2/core/utility.hpp b/modules/core/include/opencv2/core/utility.hpp index ae08a2609e..29dd8fb2c2 100644 --- a/modules/core/include/opencv2/core/utility.hpp +++ b/modules/core/include/opencv2/core/utility.hpp @@ -1234,8 +1234,75 @@ enum FLAGS CV_EXPORTS void setFlags(FLAGS modeFlags); static inline void setFlags(int modeFlags) { setFlags((FLAGS)modeFlags); } CV_EXPORTS FLAGS getFlags(); + +} // namespace instr + + +namespace samples { + +//! @addtogroup core_utils_samples +// This section describes utility functions for OpenCV samples. +// +// @note Implementation of these utilities is not thread-safe. +// +//! @{ + +/** @brief Try to find requested data file + +Search directories: + +1. Directories passed via `addSamplesDataSearchPath()` +2. OPENCV_SAMPLES_DATA_PATH_HINT environment variable +3. OPENCV_SAMPLES_DATA_PATH environment variable + If parameter value is not empty and nothing is found then stop searching. +4. Detects build/install path based on: + a. current working directory (CWD) + b. and/or binary module location (opencv_core/opencv_world, doesn't work with static linkage) +5. Scan `/{,data,samples/data}` directories if build directory is detected or the current directory is in source tree. +6. Scan `/share/OpenCV` directory if install directory is detected. + +@see cv::utils::findDataFile + +@param relative_path Relative path to data file +@param required Specify "file not found" handling. + If true, function prints information message and raises cv::Exception. + If false, function returns empty result +@param silentMode Disables messages +@return Returns path (absolute or relative to the current directory) or empty string if file is not found +*/ +CV_EXPORTS_W cv::String findFile(const cv::String& relative_path, bool required = true, bool silentMode = false); + +CV_EXPORTS_W cv::String findFileOrKeep(const cv::String& relative_path, bool silentMode = false); + +inline cv::String findFileOrKeep(const cv::String& relative_path, bool silentMode) +{ + cv::String res = findFile(relative_path, false, silentMode); + if (res.empty()) + return relative_path; + return res; } +/** @brief Override search data path by adding new search location + +Use this only to override default behavior +Passed paths are used in LIFO order. + +@param path Path to used samples data +*/ +CV_EXPORTS_W void addSamplesDataSearchPath(const cv::String& path); + +/** @brief Append samples search data sub directory + +General usage is to add OpenCV modules name (`/modules//samples/data` -> `/samples/data` + `modules//samples/data`). +Passed subdirectories are used in LIFO order. + +@param subdir samples data sub directory +*/ +CV_EXPORTS_W void addSamplesDataSearchSubDirectory(const cv::String& subdir); + +//! @} +} // namespace samples + namespace utils { CV_EXPORTS int getThreadID(); diff --git a/modules/core/include/opencv2/core/utils/filesystem.hpp b/modules/core/include/opencv2/core/utils/filesystem.hpp index 12b10a76ae..00b0dd1c12 100644 --- a/modules/core/include/opencv2/core/utils/filesystem.hpp +++ b/modules/core/include/opencv2/core/utils/filesystem.hpp @@ -16,6 +16,13 @@ CV_EXPORTS void remove_all(const cv::String& path); CV_EXPORTS cv::String getcwd(); +/** @brief Converts path p to a canonical absolute path + * Symlinks are processed if there is support for them on running platform. + * + * @param path input path. Target file/directory should exist. + */ +CV_EXPORTS cv::String canonical(const cv::String& path); + /** Join path components */ CV_EXPORTS cv::String join(const cv::String& base, const cv::String& path); diff --git a/modules/core/src/batch_distance.cpp b/modules/core/src/batch_distance.cpp index 71d0e9e3ff..1ce2edb769 100644 --- a/modules/core/src/batch_distance.cpp +++ b/modules/core/src/batch_distance.cpp @@ -297,19 +297,21 @@ void cv::batchDistance( InputArray _src1, InputArray _src2, nidx = Scalar::all(-1); } + if( crosscheck ) { CV_Assert( K == 1 && update == 0 && mask.empty() ); CV_Assert(!nidx.empty()); - Mat tdist, tidx; + Mat tdist, tidx, sdist, sidx; batchDistance(src2, src1, tdist, dtype, tidx, normType, K, mask, 0, false); + batchDistance(src1, src2, sdist, dtype, sidx, normType, K, mask, 0, false); // if an idx-th element from src1 appeared to be the nearest to i-th element of src2, // we update the minimum mutual distance between idx-th element of src1 and the whole src2 set. // As a result, if nidx[idx] = i*, it means that idx-th element of src1 is the nearest // to i*-th element of src2 and i*-th element of src2 is the closest to idx-th element of src1. // If nidx[idx] = -1, it means that there is no such ideal couple for it in src2. - // This O(N) procedure is called cross-check and it helps to eliminate some false matches. + // This O(2N) procedure is called cross-check and it helps to eliminate some false matches. if( dtype == CV_32S ) { for( int i = 0; i < tdist.rows; i++ ) @@ -336,6 +338,13 @@ void cv::batchDistance( InputArray _src1, InputArray _src2, } } } + for( int i = 0; i < sdist.rows; i++ ) + { + if( tidx.at(sidx.at(i)) != i ) + { + nidx.at(i) = -1; + } + } return; } diff --git a/modules/core/src/copy.cpp b/modules/core/src/copy.cpp index 6d6aaff112..487d6f51da 100644 --- a/modules/core/src/copy.cpp +++ b/modules/core/src/copy.cpp @@ -1183,9 +1183,9 @@ void cv::copyMakeBorder( InputArray _src, OutputArray _dst, int top, int bottom, { CV_INSTRUMENT_REGION(); - CV_Assert( top >= 0 && bottom >= 0 && left >= 0 && right >= 0 ); + CV_Assert( top >= 0 && bottom >= 0 && left >= 0 && right >= 0 && _src.dims() <= 2); - CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2, + CV_OCL_RUN(_dst.isUMat(), ocl_copyMakeBorder(_src, _dst, top, bottom, left, right, borderType, value)) Mat src = _src.getMat(); diff --git a/modules/core/src/matmul.cpp b/modules/core/src/matmul.cpp index 9d1a220997..19ab907c9e 100644 --- a/modules/core/src/matmul.cpp +++ b/modules/core/src/matmul.cpp @@ -1699,7 +1699,7 @@ transform_( const T* src, T* dst, const WT* m, int len, int scn, int dcn ) } } -#if CV_SIMD128 +#if CV_SIMD128 && !defined(__aarch64__) static inline void load3x3Matrix(const float* m, v_float32x4& m0, v_float32x4& m1, v_float32x4& m2, v_float32x4& m3) { @@ -1708,7 +1708,9 @@ load3x3Matrix(const float* m, v_float32x4& m0, v_float32x4& m1, v_float32x4& m2, m2 = v_float32x4(m[2], m[6], m[10], 0); m3 = v_float32x4(m[3], m[7], m[11], 0); } +#endif +#if CV_SIMD128 static inline v_int16x8 v_matmulvec(const v_int16x8 &v0, const v_int16x8 &m0, const v_int16x8 &m1, const v_int16x8 &m2, const v_int32x4 &m3, const int BITS) { diff --git a/modules/core/src/utils/datafile.cpp b/modules/core/src/utils/datafile.cpp new file mode 100644 index 0000000000..4973a55422 --- /dev/null +++ b/modules/core/src/utils/datafile.cpp @@ -0,0 +1,398 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include "../precomp.hpp" + +#include "opencv_data_config.hpp" + +#include +#include + +#include +#undef CV_LOG_STRIP_LEVEL +#define CV_LOG_STRIP_LEVEL CV_LOG_LEVEL_VERBOSE + 1 +#include "opencv2/core/utils/logger.hpp" +#include "opencv2/core/utils/filesystem.hpp" + +#include + +#ifdef _WIN32 +#define WIN32_LEAN_AND_MEAN +#include +#undef small +#undef min +#undef max +#undef abs +#elif defined(__APPLE__) +#include +#if TARGET_OS_MAC +#include +#endif +#endif + +namespace cv { namespace utils { + +static cv::Ptr< std::vector > g_data_search_path; +static cv::Ptr< std::vector > g_data_search_subdir; + +static std::vector& _getDataSearchPath() +{ + if (g_data_search_path.empty()) + g_data_search_path.reset(new std::vector()); + return *(g_data_search_path.get()); +} + +static std::vector& _getDataSearchSubDirectory() +{ + if (g_data_search_subdir.empty()) + { + g_data_search_subdir.reset(new std::vector()); + g_data_search_subdir->push_back("data"); + g_data_search_subdir->push_back(""); + } + return *(g_data_search_subdir.get()); +} + + +CV_EXPORTS void addDataSearchPath(const cv::String& path) +{ + if (utils::fs::isDirectory(path)) + _getDataSearchPath().push_back(path); +} +CV_EXPORTS void addDataSearchSubDirectory(const cv::String& subdir) +{ + _getDataSearchSubDirectory().push_back(subdir); +} + +static bool isPathSep(char c) +{ + return c == '/' || c == '\\'; +} +static bool isSubDirectory_(const cv::String& base_path, const cv::String& path) +{ + size_t N = base_path.size(); + if (N == 0) + return false; + if (isPathSep(base_path[N - 1])) + N--; + if (path.size() < N) + return false; + for (size_t i = 0; i < N; i++) + { + if (path[i] == base_path[i]) + continue; + if (isPathSep(path[i]) && isPathSep(base_path[i])) + continue; + return false; + } + size_t M = path.size(); + if (M > N) + { + if (!isPathSep(path[N])) + return false; + } + return true; +} +static bool isSubDirectory(const cv::String& base_path, const cv::String& path) +{ + bool res = isSubDirectory_(base_path, path); + CV_LOG_VERBOSE(NULL, 0, "isSubDirectory(): base: " << base_path << " path: " << path << " => result: " << (res ? "TRUE" : "FALSE")); + return res; +} + +static cv::String getModuleLocation(const void* addr) +{ + CV_UNUSED(addr); +#ifdef _WIN32 + HMODULE m = 0; +#if _WIN32_WINNT >= 0x0501 + ::GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, + reinterpret_cast(addr), + &m); +#endif + if (m) + { + char path[MAX_PATH]; + const size_t path_size = sizeof(path)/sizeof(*path); + size_t sz = GetModuleFileNameA(m, path, path_size); // no unicode support + if (sz > 0 && sz < path_size) + { + path[sz] = '\0'; + return cv::String(path); + } + } +#elif defined(__linux__) + std::ifstream fs("/proc/self/maps"); + std::string line; + while (std::getline(fs, line, '\n')) + { + long long int addr_begin = 0, addr_end = 0; + if (2 == sscanf(line.c_str(), "%llx-%llx", &addr_begin, &addr_end)) + { + if ((intptr_t)addr >= (intptr_t)addr_begin && (intptr_t)addr < (intptr_t)addr_end) + { + size_t pos = line.rfind(" "); // 2 spaces + if (pos == cv::String::npos) + pos = line.rfind(' '); // 1 spaces + else + pos++; + if (pos == cv::String::npos) + { + CV_LOG_DEBUG(NULL, "Can't parse module path: '" << line << '\''); + } + return line.substr(pos + 1); + } + } + } +#elif defined(__APPLE__) +# if TARGET_OS_MAC + Dl_info info; + if (0 != dladdr(addr, &info)) + { + return cv::String(info.dli_fname); + } +# endif +#else + // not supported, skip +#endif + return cv::String(); +} + +cv::String findDataFile(const cv::String& relative_path, + const char* configuration_parameter, + const std::vector* search_paths, + const std::vector* subdir_paths) +{ + configuration_parameter = configuration_parameter ? configuration_parameter : "OPENCV_DATA_PATH"; + CV_LOG_DEBUG(NULL, cv::format("utils::findDataFile('%s', %s)", relative_path.c_str(), configuration_parameter)); + +#define TRY_FILE_WITH_PREFIX(prefix) \ +{ \ + cv::String path = utils::fs::join(prefix, relative_path); \ + CV_LOG_DEBUG(NULL, cv::format("... Line %d: trying open '%s'", __LINE__, path.c_str())); \ + FILE* f = fopen(path.c_str(), "rb"); \ + if(f) { \ + fclose(f); \ + return path; \ + } \ +} + + + // Step 0: check current directory or absolute path at first + TRY_FILE_WITH_PREFIX(""); + + + // Step 1 + const std::vector& search_path = search_paths ? *search_paths : _getDataSearchPath(); + for(size_t i = search_path.size(); i > 0; i--) + { + const cv::String& prefix = search_path[i - 1]; + TRY_FILE_WITH_PREFIX(prefix); + } + + const std::vector& search_subdir = subdir_paths ? *subdir_paths : _getDataSearchSubDirectory(); + + + // Step 2 + const cv::String configuration_parameter_s(configuration_parameter ? configuration_parameter : ""); + const cv::utils::Paths& search_hint = configuration_parameter_s.empty() ? cv::utils::Paths() + : getConfigurationParameterPaths((configuration_parameter_s + "_HINT").c_str()); + for (size_t k = 0; k < search_hint.size(); k++) + { + cv::String datapath = search_hint[k]; + if (datapath.empty()) + continue; + if (utils::fs::isDirectory(datapath)) + { + CV_LOG_DEBUG(NULL, "utils::findDataFile(): trying " << configuration_parameter << "_HINT=" << datapath); + for(size_t i = search_subdir.size(); i > 0; i--) + { + const cv::String& subdir = search_subdir[i - 1]; + cv::String prefix = utils::fs::join(datapath, subdir); + TRY_FILE_WITH_PREFIX(prefix); + } + } + else + { + CV_LOG_WARNING(NULL, configuration_parameter << "_HINT is specified but it is not a directory: " << datapath); + } + } + + + // Step 3 + const cv::utils::Paths& override_paths = configuration_parameter_s.empty() ? cv::utils::Paths() + : getConfigurationParameterPaths(configuration_parameter); + for (size_t k = 0; k < override_paths.size(); k++) + { + cv::String datapath = override_paths[k]; + if (datapath.empty()) + continue; + if (utils::fs::isDirectory(datapath)) + { + CV_LOG_DEBUG(NULL, "utils::findDataFile(): trying " << configuration_parameter << "=" << datapath); + for(size_t i = search_subdir.size(); i > 0; i--) + { + const cv::String& subdir = search_subdir[i - 1]; + cv::String prefix = utils::fs::join(datapath, subdir); + TRY_FILE_WITH_PREFIX(prefix); + } + } + else + { + CV_LOG_WARNING(NULL, configuration_parameter << " is specified but it is not a directory: " << datapath); + } + } + if (!override_paths.empty()) + { + CV_LOG_INFO(NULL, "utils::findDataFile(): can't find data file via " << configuration_parameter << " configuration override: " << relative_path); + return cv::String(); + } + + + // Steps: 4, 5, 6 + cv::String cwd = utils::fs::getcwd(); + cv::String build_dir(OPENCV_BUILD_DIR); + bool has_tested_build_directory = false; + if (isSubDirectory(build_dir, cwd) || isSubDirectory(utils::fs::canonical(build_dir), utils::fs::canonical(cwd))) + { + CV_LOG_DEBUG(NULL, "utils::findDataFile(): the current directory is build sub-directory: " << cwd); + const char* build_subdirs[] = { OPENCV_DATA_BUILD_DIR_SEARCH_PATHS }; + for (size_t k = 0; k < sizeof(build_subdirs)/sizeof(build_subdirs[0]); k++) + { + CV_LOG_DEBUG(NULL, "utils::findDataFile(): /" << build_subdirs[k]); + cv::String datapath = utils::fs::join(build_dir, build_subdirs[k]); + if (utils::fs::isDirectory(datapath)) + { + for(size_t i = search_subdir.size(); i > 0; i--) + { + const cv::String& subdir = search_subdir[i - 1]; + cv::String prefix = utils::fs::join(datapath, subdir); + TRY_FILE_WITH_PREFIX(prefix); + } + } + } + has_tested_build_directory = true; + } + + cv::String source_dir; + cv::String try_source_dir = cwd; + for (int levels = 0; levels < 3; ++levels) + { + if (utils::fs::exists(utils::fs::join(try_source_dir, "modules/core/include/opencv2/core/version.hpp"))) + { + source_dir = try_source_dir; + break; + } + try_source_dir = utils::fs::join(try_source_dir, "/.."); + } + if (!source_dir.empty()) + { + CV_LOG_DEBUG(NULL, "utils::findDataFile(): the current directory is source sub-directory: " << source_dir); + CV_LOG_DEBUG(NULL, "utils::findDataFile(): " << source_dir); + cv::String datapath = source_dir; + if (utils::fs::isDirectory(datapath)) + { + for(size_t i = search_subdir.size(); i > 0; i--) + { + const cv::String& subdir = search_subdir[i - 1]; + cv::String prefix = utils::fs::join(datapath, subdir); + TRY_FILE_WITH_PREFIX(prefix); + } + } + } + + cv::String module_path = getModuleLocation((void*)getModuleLocation); // use code addr, doesn't work with static linkage! + CV_LOG_DEBUG(NULL, "Detected module path: '" << module_path << '\''); + + if (!has_tested_build_directory && + (isSubDirectory(build_dir, module_path) || isSubDirectory(utils::fs::canonical(build_dir), utils::fs::canonical(module_path))) + ) + { + CV_LOG_DEBUG(NULL, "utils::findDataFile(): the binary module directory is build sub-directory: " << module_path); + const char* build_subdirs[] = { OPENCV_DATA_BUILD_DIR_SEARCH_PATHS }; + for (size_t k = 0; k < sizeof(build_subdirs)/sizeof(build_subdirs[0]); k++) + { + CV_LOG_DEBUG(NULL, "utils::findDataFile(): /" << build_subdirs[k]); + cv::String datapath = utils::fs::join(build_dir, build_subdirs[k]); + if (utils::fs::isDirectory(datapath)) + { + for(size_t i = search_subdir.size(); i > 0; i--) + { + const cv::String& subdir = search_subdir[i - 1]; + cv::String prefix = utils::fs::join(datapath, subdir); + TRY_FILE_WITH_PREFIX(prefix); + } + } + } + } + +#if defined OPENCV_INSTALL_DATA_DIR_RELATIVE + if (!module_path.empty()) // require module path + { + size_t pos = module_path.rfind('/'); + if (pos == cv::String::npos) + pos = module_path.rfind('\\'); + cv::String module_dir = (pos == cv::String::npos) ? module_path : module_path.substr(0, pos); + const char* install_subdirs[] = { OPENCV_INSTALL_DATA_DIR_RELATIVE }; + for (size_t k = 0; k < sizeof(install_subdirs)/sizeof(install_subdirs[0]); k++) + { + cv::String datapath = utils::fs::join(module_dir, install_subdirs[k]); + CV_LOG_DEBUG(NULL, "utils::findDataFile(): trying install path (from binary path): " << datapath); + if (utils::fs::isDirectory(datapath)) + { + for(size_t i = search_subdir.size(); i > 0; i--) + { + const cv::String& subdir = search_subdir[i - 1]; + cv::String prefix = utils::fs::join(datapath, subdir); + TRY_FILE_WITH_PREFIX(prefix); + } + } + else + { + CV_LOG_DEBUG(NULL, "utils::findDataFile(): ... skip, not a valid directory: " << datapath); + } + } + } +#endif + +#if defined OPENCV_INSTALL_PREFIX && defined OPENCV_DATA_INSTALL_PATH + cv::String install_dir(OPENCV_INSTALL_PREFIX); + // use core/world module path and verify that library is running from installation directory + // It is neccessary to avoid touching of unrelated common /usr/local path + if (module_path.empty()) // can't determine + module_path = install_dir; + if (isSubDirectory(install_dir, module_path) || isSubDirectory(utils::fs::canonical(install_dir), utils::fs::canonical(module_path))) + { + cv::String datapath = utils::fs::join(install_dir, OPENCV_DATA_INSTALL_PATH); + if (utils::fs::isDirectory(datapath)) + { + CV_LOG_DEBUG(NULL, "utils::findDataFile(): trying install path: " << datapath); + for(size_t i = search_subdir.size(); i > 0; i--) + { + const cv::String& subdir = search_subdir[i - 1]; + cv::String prefix = utils::fs::join(datapath, subdir); + TRY_FILE_WITH_PREFIX(prefix); + } + } + } +#endif + + return cv::String(); // not found +} + +cv::String findDataFile(const cv::String& relative_path, bool required, const char* configuration_parameter) +{ + CV_LOG_DEBUG(NULL, cv::format("cv::utils::findDataFile('%s', %s, %s)", + relative_path.c_str(), required ? "true" : "false", + configuration_parameter ? configuration_parameter : "NULL")); + cv::String result = cv::utils::findDataFile(relative_path, + configuration_parameter, + NULL, + NULL); + if (result.empty() && required) + CV_Error(cv::Error::StsError, cv::format("OpenCV: Can't find required data file: %s", relative_path.c_str())); + return result; +} + +}} // namespace diff --git a/modules/core/src/utils/filesystem.cpp b/modules/core/src/utils/filesystem.cpp index deb13872c8..606819eb17 100644 --- a/modules/core/src/utils/filesystem.cpp +++ b/modules/core/src/utils/filesystem.cpp @@ -85,6 +85,23 @@ cv::String join(const cv::String& base, const cv::String& path) #if OPENCV_HAVE_FILESYSTEM_SUPPORT +cv::String canonical(const cv::String& path) +{ + cv::String result; +#ifdef _WIN32 + const char* result_str = _fullpath(NULL, path.c_str(), 0); +#else + const char* result_str = realpath(path.c_str(), NULL); +#endif + if (result_str) + { + result = cv::String(result_str); + free((void*)result_str); + } + return result.empty() ? path : result; +} + + bool exists(const cv::String& path) { CV_INSTRUMENT_REGION(); @@ -543,11 +560,12 @@ cv::String getCacheDirectory(const char* sub_directory_name, const char* configu #else #define NOT_IMPLEMENTED CV_Error(Error::StsNotImplemented, ""); -CV_EXPORTS bool exists(const cv::String& /*path*/) { NOT_IMPLEMENTED } -CV_EXPORTS void remove_all(const cv::String& /*path*/) { NOT_IMPLEMENTED } -CV_EXPORTS bool createDirectory(const cv::String& /*path*/) { NOT_IMPLEMENTED } -CV_EXPORTS bool createDirectories(const cv::String& /*path*/) { NOT_IMPLEMENTED } -CV_EXPORTS cv::String getCacheDirectory(const char* /*sub_directory_name*/, const char* /*configuration_name = NULL*/) { NOT_IMPLEMENTED } +cv::String canonical(const cv::String& /*path*/) { NOT_IMPLEMENTED } +bool exists(const cv::String& /*path*/) { NOT_IMPLEMENTED } +void remove_all(const cv::String& /*path*/) { NOT_IMPLEMENTED } +bool createDirectory(const cv::String& /*path*/) { NOT_IMPLEMENTED } +bool createDirectories(const cv::String& /*path*/) { NOT_IMPLEMENTED } +cv::String getCacheDirectory(const char* /*sub_directory_name*/, const char* /*configuration_name = NULL*/) { NOT_IMPLEMENTED } #undef NOT_IMPLEMENTED #endif // OPENCV_HAVE_FILESYSTEM_SUPPORT diff --git a/modules/core/src/utils/samples.cpp b/modules/core/src/utils/samples.cpp new file mode 100644 index 0000000000..c1162f85fe --- /dev/null +++ b/modules/core/src/utils/samples.cpp @@ -0,0 +1,67 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include "../precomp.hpp" + +#include + +#include +#undef CV_LOG_STRIP_LEVEL +#define CV_LOG_STRIP_LEVEL CV_LOG_LEVEL_VERBOSE + 1 +#include "opencv2/core/utils/logger.hpp" +#include "opencv2/core/utils/filesystem.hpp" + +namespace cv { namespace samples { + +static cv::Ptr< std::vector > g_data_search_path; +static cv::Ptr< std::vector > g_data_search_subdir; + +static std::vector& _getDataSearchPath() +{ + if (g_data_search_path.empty()) + g_data_search_path.reset(new std::vector()); + return *(g_data_search_path.get()); +} + +static std::vector& _getDataSearchSubDirectory() +{ + if (g_data_search_subdir.empty()) + { + g_data_search_subdir.reset(new std::vector()); + g_data_search_subdir->push_back("samples/data"); + g_data_search_subdir->push_back("data"); + g_data_search_subdir->push_back(""); + } + return *(g_data_search_subdir.get()); +} + + +CV_EXPORTS void addSamplesDataSearchPath(const cv::String& path) +{ + if (utils::fs::isDirectory(path)) + _getDataSearchPath().push_back(path); +} +CV_EXPORTS void addSamplesDataSearchSubDirectory(const cv::String& subdir) +{ + _getDataSearchSubDirectory().push_back(subdir); +} + +cv::String findFile(const cv::String& relative_path, bool required, bool silentMode) +{ + CV_LOG_DEBUG(NULL, cv::format("cv::samples::findFile('%s', %s)", relative_path.c_str(), required ? "true" : "false")); + cv::String result = cv::utils::findDataFile(relative_path, + "OPENCV_SAMPLES_DATA_PATH", + &_getDataSearchPath(), + &_getDataSearchSubDirectory()); + if (result != relative_path && !silentMode) + { + CV_LOG_WARNING(NULL, "cv::samples::findFile('" << relative_path << "') => '" << result << "'"); + } + if (result.empty() && required) + CV_Error(cv::Error::StsError, cv::format("OpenCV samples: Can't find required data file: %s", relative_path.c_str())); + return result; +} + + +}} // namespace diff --git a/modules/core/test/test_utils.cpp b/modules/core/test/test_utils.cpp index 2a1503a33c..c566762925 100644 --- a/modules/core/test/test_utils.cpp +++ b/modules/core/test/test_utils.cpp @@ -2,6 +2,7 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include "test_precomp.hpp" +#include "opencv2/core/utils/logger.hpp" namespace opencv_test { namespace { @@ -283,4 +284,21 @@ TEST(CommandLineParser, testScalar) EXPECT_EQ(parser.get("s5"), Scalar(5, -4, 3, 2)); } +TEST(Samples, findFile) +{ + cv::utils::logging::LogLevel prev = cv::utils::logging::setLogLevel(cv::utils::logging::LOG_LEVEL_VERBOSE); + cv::String path; + ASSERT_NO_THROW(path = samples::findFile("lena.jpg", false)); + EXPECT_NE(std::string(), path.c_str()); + cv::utils::logging::setLogLevel(prev); +} + +TEST(Samples, findFile_missing) +{ + cv::utils::logging::LogLevel prev = cv::utils::logging::setLogLevel(cv::utils::logging::LOG_LEVEL_VERBOSE); + cv::String path; + ASSERT_ANY_THROW(path = samples::findFile("non-existed.file", true)); + cv::utils::logging::setLogLevel(prev); +} + }} // namespace diff --git a/modules/dnn/CMakeLists.txt b/modules/dnn/CMakeLists.txt index 720f1a89ab..00554b2b69 100644 --- a/modules/dnn/CMakeLists.txt +++ b/modules/dnn/CMakeLists.txt @@ -20,11 +20,6 @@ else() ocv_cmake_hook_append(INIT_MODULE_SOURCES_opencv_dnn "${CMAKE_CURRENT_LIST_DIR}/cmake/hooks/INIT_MODULE_SOURCES_opencv_dnn.cmake") endif() -ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-shadow -Wno-parentheses -Wmaybe-uninitialized -Wsign-promo - -Wmissing-declarations -Wmissing-prototypes -) -ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4701 /wd4100) - if(MSVC) add_definitions( -D_CRT_SECURE_NO_WARNINGS=1 ) ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4244 /wd4267 /wd4018 /wd4355 /wd4800 /wd4251 /wd4996 /wd4146 @@ -33,12 +28,14 @@ if(MSVC) ) else() ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-deprecated -Wmissing-prototypes -Wmissing-declarations -Wshadow - -Wunused-parameter -Wunused-local-typedefs -Wsign-compare -Wsign-promo - -Wundef -Wtautological-undefined-compare -Wignored-qualifiers -Wextra - -Wunused-function -Wunused-const-variable -Wdeprecated-declarations + -Wunused-parameter -Wsign-compare ) endif() +if(NOT HAVE_CXX11) + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-undef) # LANG_CXX11 from protobuf files +endif() + if(APPLE_FRAMEWORK) ocv_warnings_disable(CMAKE_CXX_FLAGS -Wshorten-64-to-32) endif() @@ -55,8 +52,6 @@ add_definitions(-DHAVE_PROTOBUF=1) #suppress warnings in autogenerated caffe.pb.* files ocv_warnings_disable(CMAKE_CXX_FLAGS - -Wunused-parameter -Wundef -Wignored-qualifiers -Wno-enum-compare - -Wdeprecated-declarations /wd4125 /wd4267 /wd4127 /wd4244 /wd4512 /wd4702 /wd4456 /wd4510 /wd4610 /wd4800 /wd4701 /wd4703 # potentially uninitialized local/pointer variable 'value' used diff --git a/modules/dnn/include/opencv2/dnn/all_layers.hpp b/modules/dnn/include/opencv2/dnn/all_layers.hpp index d77dd181dc..5a07be6419 100644 --- a/modules/dnn/include/opencv2/dnn/all_layers.hpp +++ b/modules/dnn/include/opencv2/dnn/all_layers.hpp @@ -236,7 +236,7 @@ CV__DNN_INLINE_NS_BEGIN int type; Size kernel, stride; int pad_l, pad_t, pad_r, pad_b; - CV_DEPRECATED Size pad; + CV_DEPRECATED_EXTERNAL Size pad; bool globalPooling; bool computeMaxIdx; String padMode; @@ -578,7 +578,7 @@ CV__DNN_INLINE_NS_BEGIN { public: float pnorm, epsilon; - CV_DEPRECATED bool acrossSpatial; + CV_DEPRECATED_EXTERNAL bool acrossSpatial; static Ptr create(const LayerParams& params); }; diff --git a/modules/dnn/include/opencv2/dnn/dict.hpp b/modules/dnn/include/opencv2/dnn/dict.hpp index 72ae5e3a15..463d314bee 100644 --- a/modules/dnn/include/opencv2/dnn/dict.hpp +++ b/modules/dnn/include/opencv2/dnn/dict.hpp @@ -60,12 +60,13 @@ CV__DNN_INLINE_NS_BEGIN struct CV_EXPORTS_W DictValue { DictValue(const DictValue &r); + DictValue(bool i) : type(Param::INT), pi(new AutoBuffer) { (*pi)[0] = i ? 1 : 0; } //!< Constructs integer scalar DictValue(int64 i = 0) : type(Param::INT), pi(new AutoBuffer) { (*pi)[0] = i; } //!< Constructs integer scalar - CV_WRAP DictValue(int i) : type(Param::INT), pi(new AutoBuffer) { (*pi)[0] = i; } //!< Constructs integer scalar + CV_WRAP DictValue(int i) : type(Param::INT), pi(new AutoBuffer) { (*pi)[0] = i; } //!< Constructs integer scalar DictValue(unsigned p) : type(Param::INT), pi(new AutoBuffer) { (*pi)[0] = p; } //!< Constructs integer scalar CV_WRAP DictValue(double p) : type(Param::REAL), pd(new AutoBuffer) { (*pd)[0] = p; } //!< Constructs floating point scalar CV_WRAP DictValue(const String &s) : type(Param::STRING), ps(new AutoBuffer) { (*ps)[0] = s; } //!< Constructs string scalar - DictValue(const char *s) : type(Param::STRING), ps(new AutoBuffer) { (*ps)[0] = s; } //!< @overload + DictValue(const char *s) : type(Param::STRING), ps(new AutoBuffer) { (*ps)[0] = s; } //!< @overload template static DictValue arrayInt(TypeIter begin, int size); //!< Constructs integer array diff --git a/modules/dnn/include/opencv2/dnn/dnn.hpp b/modules/dnn/include/opencv2/dnn/dnn.hpp index 1a214f384a..bb8e761311 100644 --- a/modules/dnn/include/opencv2/dnn/dnn.hpp +++ b/modules/dnn/include/opencv2/dnn/dnn.hpp @@ -181,7 +181,8 @@ CV__DNN_INLINE_NS_BEGIN * If this method is called after network has allocated all memory for input and output blobs * and before inferencing. */ - CV_DEPRECATED virtual void finalize(const std::vector &input, std::vector &output); + CV_DEPRECATED_EXTERNAL + virtual void finalize(const std::vector &input, std::vector &output); /** @brief Computes and sets internal parameters according to inputs, outputs and blobs. * @param[in] inputs vector of already allocated input blobs @@ -198,7 +199,8 @@ CV__DNN_INLINE_NS_BEGIN * @param[out] output allocated output blobs, which will store results of the computation. * @param[out] internals allocated internal blobs */ - CV_DEPRECATED virtual void forward(std::vector &input, std::vector &output, std::vector &internals); + CV_DEPRECATED_EXTERNAL + virtual void forward(std::vector &input, std::vector &output, std::vector &internals); /** @brief Given the @p input blobs, computes the output @p blobs. * @param[in] inputs the input blobs. @@ -218,7 +220,8 @@ CV__DNN_INLINE_NS_BEGIN * @overload * @deprecated Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead */ - CV_DEPRECATED void finalize(const std::vector &inputs, CV_OUT std::vector &outputs); + CV_DEPRECATED_EXTERNAL + void finalize(const std::vector &inputs, CV_OUT std::vector &outputs); /** @brief * @overload diff --git a/modules/dnn/perf/perf_net.cpp b/modules/dnn/perf/perf_net.cpp index 192604b861..1647db3b31 100644 --- a/modules/dnn/perf/perf_net.cpp +++ b/modules/dnn/perf/perf_net.cpp @@ -175,8 +175,7 @@ PERF_TEST_P_(DNNTestNetwork, MobileNet_SSD_v2_TensorFlow) PERF_TEST_P_(DNNTestNetwork, DenseNet_121) { if (backend == DNN_BACKEND_HALIDE || - backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL_FP16 || - target == DNN_TARGET_MYRIAD)) + (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD))) throw SkipTestException(""); processNet("dnn/DenseNet_121.caffemodel", "dnn/DenseNet_121.prototxt", "", Mat(cv::Size(224, 224), CV_32FC3)); @@ -185,7 +184,7 @@ PERF_TEST_P_(DNNTestNetwork, DenseNet_121) PERF_TEST_P_(DNNTestNetwork, OpenPose_pose_coco) { if (backend == DNN_BACKEND_HALIDE || - backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) + (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)) throw SkipTestException(""); processNet("dnn/openpose_pose_coco.caffemodel", "dnn/openpose_pose_coco.prototxt", "", Mat(cv::Size(368, 368), CV_32FC3)); @@ -194,7 +193,7 @@ PERF_TEST_P_(DNNTestNetwork, OpenPose_pose_coco) PERF_TEST_P_(DNNTestNetwork, OpenPose_pose_mpi) { if (backend == DNN_BACKEND_HALIDE || - backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) + (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)) throw SkipTestException(""); processNet("dnn/openpose_pose_mpi.caffemodel", "dnn/openpose_pose_mpi.prototxt", "", Mat(cv::Size(368, 368), CV_32FC3)); @@ -203,7 +202,7 @@ PERF_TEST_P_(DNNTestNetwork, OpenPose_pose_mpi) PERF_TEST_P_(DNNTestNetwork, OpenPose_pose_mpi_faster_4_stages) { if (backend == DNN_BACKEND_HALIDE || - backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) + (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)) throw SkipTestException(""); // The same .caffemodel but modified .prototxt // See https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/pose/poseParameters.cpp @@ -230,7 +229,7 @@ PERF_TEST_P_(DNNTestNetwork, Inception_v2_SSD_TensorFlow) PERF_TEST_P_(DNNTestNetwork, YOLOv3) { if (backend == DNN_BACKEND_HALIDE || - backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) + (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)) throw SkipTestException(""); Mat sample = imread(findDataFile("dnn/dog416.png", false)); Mat inp; @@ -241,7 +240,7 @@ PERF_TEST_P_(DNNTestNetwork, YOLOv3) PERF_TEST_P_(DNNTestNetwork, EAST_text_detection) { if (backend == DNN_BACKEND_HALIDE || - backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) + (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)) throw SkipTestException(""); processNet("dnn/frozen_east_text_detection.pb", "", "", Mat(cv::Size(320, 320), CV_32FC3)); } diff --git a/modules/dnn/src/caffe/caffe_io.cpp b/modules/dnn/src/caffe/caffe_io.cpp index 9f4e31c7b0..501e49e72f 100644 --- a/modules/dnn/src/caffe/caffe_io.cpp +++ b/modules/dnn/src/caffe/caffe_io.cpp @@ -404,7 +404,7 @@ bool UpgradeV0LayerParameter(V1LayerParameter* v0_layer_connection_, PoolingParameter_PoolMethod_STOCHASTIC); break; default: - LOG(ERROR) << "Unknown pool method " << pool; + LOG(ERROR) << "Unknown pool method " << (int)pool; is_fully_compatible = false; } } else { @@ -863,7 +863,7 @@ bool UpgradeV1LayerParameter(V1LayerParameter* v1_layer_param_, while (layer_param->param_size() <= i) { layer_param->add_param(); } layer_param->mutable_param(i)->set_name(v1_layer_param.param(i)); } - ParamSpec_DimCheckMode mode; + ParamSpec_DimCheckMode mode = ParamSpec_DimCheckMode_STRICT; for (int i = 0; i < v1_layer_param.blob_share_mode_size(); ++i) { while (layer_param->param_size() <= i) { layer_param->add_param(); } switch (v1_layer_param.blob_share_mode(i)) { @@ -875,8 +875,8 @@ bool UpgradeV1LayerParameter(V1LayerParameter* v1_layer_param_, break; default: LOG(FATAL) << "Unknown blob_share_mode: " - << v1_layer_param.blob_share_mode(i); - break; + << (int)v1_layer_param.blob_share_mode(i); + CV_Error_(Error::StsError, ("Unknown blob_share_mode: %d", (int)v1_layer_param.blob_share_mode(i))); } layer_param->mutable_param(i)->set_share_mode(mode); } @@ -1102,12 +1102,12 @@ const char* UpgradeV1LayerType(const V1LayerParameter_LayerType type) { case V1LayerParameter_LayerType_THRESHOLD: return "Threshold"; default: - LOG(FATAL) << "Unknown V1LayerParameter layer type: " << type; + LOG(FATAL) << "Unknown V1LayerParameter layer type: " << (int)type; return ""; } } -const int kProtoReadBytesLimit = INT_MAX; // Max size of 2 GB minus 1 byte. +static const int kProtoReadBytesLimit = INT_MAX; // Max size of 2 GB minus 1 byte. bool ReadProtoFromBinary(ZeroCopyInputStream* input, Message *proto) { CodedInputStream coded_input(input); diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index 11c65ce9b4..a8b1c40b4b 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -353,7 +353,7 @@ struct LayerPin bool operator<(const LayerPin &r) const { - return lid < r.lid || lid == r.lid && oid < r.oid; + return lid < r.lid || (lid == r.lid && oid < r.oid); } bool operator ==(const LayerPin &r) const @@ -428,7 +428,7 @@ struct DataLayer : public Layer virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_OPENCV || - backendId == DNN_BACKEND_INFERENCE_ENGINE && inputsData.size() == 1; + (backendId == DNN_BACKEND_INFERENCE_ENGINE && inputsData.size() == 1); } void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE @@ -1665,6 +1665,23 @@ struct Net::Impl if (!ieNode->net->isInitialized()) { +#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3) + // For networks which is built in runtime we need to specify a + // version of it's hyperparameters. + std::string versionTrigger = "" + "" + "" + "" + "" + "1" + "" + "" + "" + "" + ""; + InferenceEngine::CNNNetReader reader; + reader.ReadNetwork(versionTrigger.data(), versionTrigger.size()); +#endif ieNode->net->init(preferableTarget); ld.skip = false; } @@ -1787,8 +1804,8 @@ struct Net::Impl void fuseLayers(const std::vector& blobsToKeep_) { - if( !fusion || preferableBackend != DNN_BACKEND_OPENCV && - preferableBackend != DNN_BACKEND_INFERENCE_ENGINE) + if( !fusion || (preferableBackend != DNN_BACKEND_OPENCV && + preferableBackend != DNN_BACKEND_INFERENCE_ENGINE)) return; CV_TRACE_FUNCTION(); diff --git a/modules/dnn/src/layers/batch_norm_layer.cpp b/modules/dnn/src/layers/batch_norm_layer.cpp index 01ea493fda..9a1707a3e8 100644 --- a/modules/dnn/src/layers/batch_norm_layer.cpp +++ b/modules/dnn/src/layers/batch_norm_layer.cpp @@ -151,8 +151,8 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_OPENCV || - backendId == DNN_BACKEND_HALIDE && haveHalide() || - backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine(); + (backendId == DNN_BACKEND_HALIDE && haveHalide()) || + (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine()); } #ifdef HAVE_OPENCL diff --git a/modules/dnn/src/layers/blank_layer.cpp b/modules/dnn/src/layers/blank_layer.cpp index b85621f9a0..178a2a4f2d 100644 --- a/modules/dnn/src/layers/blank_layer.cpp +++ b/modules/dnn/src/layers/blank_layer.cpp @@ -57,7 +57,7 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_OPENCV || - backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine(); + (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine()); } bool getMemoryShapes(const std::vector &inputs, @@ -107,14 +107,21 @@ public: inputs[i].copyTo(outputs[i]); } - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE + virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE + InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); + CV_Assert(!input->dims.empty()); + InferenceEngine::LayerParams lp; lp.name = name; lp.type = "Split"; lp.precision = InferenceEngine::Precision::FP32; std::shared_ptr ieLayer(new InferenceEngine::SplitLayer(lp)); +#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3) + ieLayer->params["axis"] = format("%d", input->dims.size() - 1); + ieLayer->params["out_sizes"] = format("%d", input->dims[0]); +#endif return Ptr(new InfEngineBackendNode(ieLayer)); #endif // HAVE_INF_ENGINE return Ptr(); diff --git a/modules/dnn/src/layers/concat_layer.cpp b/modules/dnn/src/layers/concat_layer.cpp index 7389684d53..bea2017729 100644 --- a/modules/dnn/src/layers/concat_layer.cpp +++ b/modules/dnn/src/layers/concat_layer.cpp @@ -105,9 +105,9 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_OPENCV || - backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding || // By channels - backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !padding || - backendId == DNN_BACKEND_VKCOM && haveVulkan() && !padding; + (backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding) || // By channels + (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !padding) || + (backendId == DNN_BACKEND_VKCOM && haveVulkan() && !padding); } class ChannelConcatInvoker : public ParallelLoopBody diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp index cffb5d3637..9af8f436ac 100644 --- a/modules/dnn/src/layers/convolution_layer.cpp +++ b/modules/dnn/src/layers/convolution_layer.cpp @@ -225,7 +225,7 @@ public: else return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || - backendId == DNN_BACKEND_VKCOM && haveVulkan(); + (backendId == DNN_BACKEND_VKCOM && haveVulkan()); } bool getMemoryShapes(const std::vector &inputs, @@ -530,6 +530,12 @@ public: ieLayer->_pads_end.insert(InferenceEngine::Y_AXIS, pad.height); ieLayer->_dilation.insert(InferenceEngine::X_AXIS, dilation.width); ieLayer->_dilation.insert(InferenceEngine::Y_AXIS, dilation.height); + ieLayer->params["output"] = format("%d", outCn); + ieLayer->params["kernel"] = format("%d,%d,%d,%d", outCn, inpGroupCn, kernel.height, kernel.width); + ieLayer->params["pads_begin"] = format("%d,%d", pad.height, pad.width); + ieLayer->params["pads_end"] = format("%d,%d", pad.height, pad.width); + ieLayer->params["strides"] = format("%d,%d", stride.height, stride.width); + ieLayer->params["dilations"] = format("%d,%d", dilation.height, dilation.width); #else ieLayer->_kernel_x = kernel.width; ieLayer->_kernel_y = kernel.height; diff --git a/modules/dnn/src/layers/crop_layer.cpp b/modules/dnn/src/layers/crop_layer.cpp index fb878bb5e6..32cdbbaa00 100644 --- a/modules/dnn/src/layers/crop_layer.cpp +++ b/modules/dnn/src/layers/crop_layer.cpp @@ -68,7 +68,7 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_OPENCV || - backendId == DNN_BACKEND_INFERENCE_ENGINE && crop_ranges.size() == 4; + (backendId == DNN_BACKEND_INFERENCE_ENGINE && crop_ranges.size() == 4); } bool getMemoryShapes(const std::vector &inputs, @@ -156,6 +156,14 @@ public: CV_Assert(crop_ranges.size() == 4); +#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3) + for (int i = 0; i < 4; ++i) + { + ieLayer->axis.push_back(i); + ieLayer->offset.push_back(crop_ranges[i].start); + ieLayer->dim.push_back(crop_ranges[i].end - crop_ranges[i].start); + } +#else ieLayer->axis.push_back(0); // batch ieLayer->offset.push_back(crop_ranges[0].start); ieLayer->dim.push_back(crop_ranges[0].end - crop_ranges[0].start); @@ -171,7 +179,7 @@ public: ieLayer->axis.push_back(2); // width ieLayer->offset.push_back(crop_ranges[3].start); ieLayer->dim.push_back(crop_ranges[3].end - crop_ranges[3].start); - +#endif return Ptr(new InfEngineBackendNode(ieLayer)); #endif // HAVE_INF_ENGINE return Ptr(); diff --git a/modules/dnn/src/layers/detection_output_layer.cpp b/modules/dnn/src/layers/detection_output_layer.cpp index 6361343f9a..2a21619d6c 100644 --- a/modules/dnn/src/layers/detection_output_layer.cpp +++ b/modules/dnn/src/layers/detection_output_layer.cpp @@ -198,7 +198,7 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_OPENCV || - backendId == DNN_BACKEND_INFERENCE_ENGINE && !_locPredTransposed && _bboxesNormalized && !_clip; + (backendId == DNN_BACKEND_INFERENCE_ENGINE && !_locPredTransposed && _bboxesNormalized && !_clip); } bool getMemoryShapes(const std::vector &inputs, diff --git a/modules/dnn/src/layers/eltwise_layer.cpp b/modules/dnn/src/layers/eltwise_layer.cpp index 8e2e96b424..61eb5e405a 100644 --- a/modules/dnn/src/layers/eltwise_layer.cpp +++ b/modules/dnn/src/layers/eltwise_layer.cpp @@ -98,7 +98,7 @@ public: { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || - backendId == DNN_BACKEND_INFERENCE_ENGINE && (op != SUM || coeffs.empty()); + (backendId == DNN_BACKEND_INFERENCE_ENGINE && (op != SUM || coeffs.empty())); } bool getMemoryShapes(const std::vector &inputs, diff --git a/modules/dnn/src/layers/flatten_layer.cpp b/modules/dnn/src/layers/flatten_layer.cpp index 632cb7aace..e3382f2d53 100644 --- a/modules/dnn/src/layers/flatten_layer.cpp +++ b/modules/dnn/src/layers/flatten_layer.cpp @@ -65,7 +65,7 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_OPENCV || - backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine(); + (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine()); } bool getMemoryShapes(const std::vector &inputs, diff --git a/modules/dnn/src/layers/fully_connected_layer.cpp b/modules/dnn/src/layers/fully_connected_layer.cpp index f36813ff18..78d3e809b5 100644 --- a/modules/dnn/src/layers/fully_connected_layer.cpp +++ b/modules/dnn/src/layers/fully_connected_layer.cpp @@ -123,8 +123,8 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_OPENCV || - backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 || - backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && axis == 1; + (backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1) || + (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && axis == 1); } virtual bool setActivation(const Ptr& layer) CV_OVERRIDE @@ -449,6 +449,9 @@ public: std::shared_ptr ieLayer(new InferenceEngine::FullyConnectedLayer(lp)); ieLayer->_out_num = blobs[0].size[0]; +#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3) + ieLayer->params["out-size"] = format("%d", blobs[0].size[0]); +#endif ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {(size_t)blobs[0].size[0], (size_t)blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW); if (blobs.size() > 1) ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {(size_t)ieLayer->_out_num}, InferenceEngine::Layout::C); diff --git a/modules/dnn/src/layers/lrn_layer.cpp b/modules/dnn/src/layers/lrn_layer.cpp index d3a4c26547..b92610272b 100644 --- a/modules/dnn/src/layers/lrn_layer.cpp +++ b/modules/dnn/src/layers/lrn_layer.cpp @@ -93,8 +93,8 @@ public: { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || - backendId == DNN_BACKEND_INFERENCE_ENGINE && (preferableTarget != DNN_TARGET_MYRIAD || type == CHANNEL_NRM) || - backendId == DNN_BACKEND_VKCOM && haveVulkan() && (size % 2 == 1) && (type == CHANNEL_NRM); + (backendId == DNN_BACKEND_INFERENCE_ENGINE && (preferableTarget != DNN_TARGET_MYRIAD || type == CHANNEL_NRM)) || + (backendId == DNN_BACKEND_VKCOM && haveVulkan() && (size % 2 == 1) && (type == CHANNEL_NRM)); } #ifdef HAVE_OPENCL diff --git a/modules/dnn/src/layers/max_unpooling_layer.cpp b/modules/dnn/src/layers/max_unpooling_layer.cpp index 0d9d62c44e..b9c1f2da73 100644 --- a/modules/dnn/src/layers/max_unpooling_layer.cpp +++ b/modules/dnn/src/layers/max_unpooling_layer.cpp @@ -35,8 +35,7 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_OPENCV || - backendId == DNN_BACKEND_HALIDE && haveHalide() && - !poolPad.width && !poolPad.height; + (backendId == DNN_BACKEND_HALIDE && haveHalide() && !poolPad.width && !poolPad.height); } bool getMemoryShapes(const std::vector &inputs, diff --git a/modules/dnn/src/layers/padding_layer.cpp b/modules/dnn/src/layers/padding_layer.cpp index b837d4ccd5..30f04e029a 100644 --- a/modules/dnn/src/layers/padding_layer.cpp +++ b/modules/dnn/src/layers/padding_layer.cpp @@ -91,7 +91,7 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_OPENCV || - backendId == DNN_BACKEND_HALIDE && haveHalide() && dstRanges.size() == 4; + (backendId == DNN_BACKEND_HALIDE && haveHalide() && dstRanges.size() == 4); } void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE diff --git a/modules/dnn/src/layers/permute_layer.cpp b/modules/dnn/src/layers/permute_layer.cpp index bdf27b5f96..ace567e182 100644 --- a/modules/dnn/src/layers/permute_layer.cpp +++ b/modules/dnn/src/layers/permute_layer.cpp @@ -106,8 +106,8 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_OPENCV || - backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() || - backendId == DNN_BACKEND_VKCOM && haveVulkan(); + (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine()) || + (backendId == DNN_BACKEND_VKCOM && haveVulkan()); } bool getMemoryShapes(const std::vector &inputs, diff --git a/modules/dnn/src/layers/pooling_layer.cpp b/modules/dnn/src/layers/pooling_layer.cpp index 40a87dbbe4..11fa7eaeab 100644 --- a/modules/dnn/src/layers/pooling_layer.cpp +++ b/modules/dnn/src/layers/pooling_layer.cpp @@ -155,10 +155,10 @@ public: } else return backendId == DNN_BACKEND_OPENCV || - backendId == DNN_BACKEND_HALIDE && haveHalide() && - (type == MAX || type == AVE && !pad_t && !pad_l && !pad_b && !pad_r) || - backendId == DNN_BACKEND_VKCOM && haveVulkan() && - (type == MAX || type == AVE); + (backendId == DNN_BACKEND_HALIDE && haveHalide() && + (type == MAX || (type == AVE && !pad_t && !pad_l && !pad_b && !pad_r))) || + (backendId == DNN_BACKEND_VKCOM && haveVulkan() && + (type == MAX || type == AVE)); } #ifdef HAVE_OPENCL @@ -313,6 +313,10 @@ public: poolLayer->_padding.insert(InferenceEngine::Y_AXIS, pad_t); poolLayer->_pads_end.insert(InferenceEngine::X_AXIS, pad_r); poolLayer->_pads_end.insert(InferenceEngine::Y_AXIS, pad_b); + poolLayer->params["kernel"] = format("%d,%d", kernel.height, kernel.width); + poolLayer->params["pads_begin"] = format("%d,%d", pad_t, pad_l); + poolLayer->params["pads_end"] = format("%d,%d", pad_b, pad_r); + poolLayer->params["strides"] = format("%d,%d", stride.height, stride.width); #else poolLayer->_kernel_x = kernel.width; poolLayer->_kernel_y = kernel.height; @@ -380,8 +384,8 @@ public: src.isContinuous(), dst.isContinuous(), src.type() == CV_32F, src.type() == dst.type(), src.dims == 4, dst.dims == 4, - ((poolingType == ROI || poolingType == PSROI) && dst.size[0] ==rois.size[0] || src.size[0] == dst.size[0]), - poolingType == PSROI || src.size[1] == dst.size[1], + (((poolingType == ROI || poolingType == PSROI) && dst.size[0] == rois.size[0]) || src.size[0] == dst.size[0]), + poolingType == PSROI || src.size[1] == dst.size[1], (mask.empty() || (mask.type() == src.type() && mask.size == dst.size))); PoolingInvoker p; diff --git a/modules/dnn/src/layers/prior_box_layer.cpp b/modules/dnn/src/layers/prior_box_layer.cpp index 3b7faa7320..93b39827d6 100644 --- a/modules/dnn/src/layers/prior_box_layer.cpp +++ b/modules/dnn/src/layers/prior_box_layer.cpp @@ -272,8 +272,8 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_OPENCV || - backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() || - backendId == DNN_BACKEND_VKCOM && haveVulkan(); + (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine()) || + (backendId == DNN_BACKEND_VKCOM && haveVulkan()); } bool getMemoryShapes(const std::vector &inputs, diff --git a/modules/dnn/src/layers/proposal_layer.cpp b/modules/dnn/src/layers/proposal_layer.cpp index ad9ea9a97e..f559ee40e2 100644 --- a/modules/dnn/src/layers/proposal_layer.cpp +++ b/modules/dnn/src/layers/proposal_layer.cpp @@ -87,7 +87,7 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_OPENCV || - backendId == DNN_BACKEND_INFERENCE_ENGINE && preferableTarget != DNN_TARGET_MYRIAD; + (backendId == DNN_BACKEND_INFERENCE_ENGINE && preferableTarget != DNN_TARGET_MYRIAD); } bool getMemoryShapes(const std::vector &inputs, diff --git a/modules/dnn/src/layers/recurrent_layers.cpp b/modules/dnn/src/layers/recurrent_layers.cpp index 6a6cf0ce81..f5ba861c47 100644 --- a/modules/dnn/src/layers/recurrent_layers.cpp +++ b/modules/dnn/src/layers/recurrent_layers.cpp @@ -175,7 +175,7 @@ public: std::vector &outputs, std::vector &internals) const CV_OVERRIDE { - CV_Assert(!usePeephole && blobs.size() == 3 || usePeephole && blobs.size() == 6); + CV_Assert((!usePeephole && blobs.size() == 3) || (usePeephole && blobs.size() == 6)); CV_Assert(inputs.size() == 1); const MatShape& inp0 = inputs[0]; @@ -221,7 +221,7 @@ public: std::vector input; inputs_arr.getMatVector(input); - CV_Assert(!usePeephole && blobs.size() == 3 || usePeephole && blobs.size() == 6); + CV_Assert((!usePeephole && blobs.size() == 3) || (usePeephole && blobs.size() == 6)); CV_Assert(input.size() == 1); const Mat& inp0 = input[0]; diff --git a/modules/dnn/src/layers/reshape_layer.cpp b/modules/dnn/src/layers/reshape_layer.cpp index ec1f8cf4a8..4109802a66 100644 --- a/modules/dnn/src/layers/reshape_layer.cpp +++ b/modules/dnn/src/layers/reshape_layer.cpp @@ -178,7 +178,7 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_OPENCV || - backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine(); + (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine()); } bool getMemoryShapes(const std::vector &inputs, diff --git a/modules/dnn/src/layers/resize_layer.cpp b/modules/dnn/src/layers/resize_layer.cpp index c090ad82ff..6aa32150b6 100644 --- a/modules/dnn/src/layers/resize_layer.cpp +++ b/modules/dnn/src/layers/resize_layer.cpp @@ -51,9 +51,14 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { +#ifdef HAVE_INF_ENGINE if (backendId == DNN_BACKEND_INFERENCE_ENGINE) - return interpolation == "nearest" && preferableTarget != DNN_TARGET_MYRIAD; + { + return (interpolation == "nearest" && preferableTarget != DNN_TARGET_MYRIAD) || + (interpolation == "bilinear" && INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R4)); + } else +#endif return backendId == DNN_BACKEND_OPENCV; } @@ -160,15 +165,27 @@ public: #ifdef HAVE_INF_ENGINE InferenceEngine::LayerParams lp; lp.name = name; - lp.type = "Resample"; lp.precision = InferenceEngine::Precision::FP32; - - std::shared_ptr ieLayer(new InferenceEngine::CNNLayer(lp)); - ieLayer->params["type"] = "caffe.ResampleParameter.NEAREST"; - ieLayer->params["antialias"] = "0"; + std::shared_ptr ieLayer; + if (interpolation == "nearest") + { + lp.type = "Resample"; + ieLayer = std::shared_ptr(new InferenceEngine::CNNLayer(lp)); + ieLayer->params["type"] = "caffe.ResampleParameter.NEAREST"; + ieLayer->params["antialias"] = "0"; + } + else if (interpolation == "bilinear") + { + lp.type = "Interp"; + ieLayer = std::shared_ptr(new InferenceEngine::CNNLayer(lp)); + ieLayer->params["pad_beg"] = "0"; + ieLayer->params["pad_end"] = "0"; + ieLayer->params["align_corners"] = "0"; + } + else + CV_Error(Error::StsNotImplemented, "Unsupported interpolation: " + interpolation); ieLayer->params["width"] = cv::format("%d", outWidth); ieLayer->params["height"] = cv::format("%d", outHeight); - return Ptr(new InfEngineBackendNode(ieLayer)); #endif // HAVE_INF_ENGINE return Ptr(); diff --git a/modules/dnn/src/layers/scale_layer.cpp b/modules/dnn/src/layers/scale_layer.cpp index b554c2274a..b217632584 100644 --- a/modules/dnn/src/layers/scale_layer.cpp +++ b/modules/dnn/src/layers/scale_layer.cpp @@ -45,13 +45,13 @@ public: std::vector inputs; inputs_arr.getMatVector(inputs); hasWeights = blobs.size() == 2 || (blobs.size() == 1 && !hasBias); - CV_Assert(inputs.size() == 2 && blobs.empty() || blobs.size() == (int)hasWeights + (int)hasBias); + CV_Assert((inputs.size() == 2 && blobs.empty()) || blobs.size() == (int)hasWeights + (int)hasBias); } virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || - backendId == DNN_BACKEND_INFERENCE_ENGINE && axis == 1; + (backendId == DNN_BACKEND_INFERENCE_ENGINE && axis == 1); } void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE diff --git a/modules/dnn/src/layers/slice_layer.cpp b/modules/dnn/src/layers/slice_layer.cpp index 4818d9dfc7..66f9aea440 100644 --- a/modules/dnn/src/layers/slice_layer.cpp +++ b/modules/dnn/src/layers/slice_layer.cpp @@ -111,7 +111,7 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_OPENCV || - backendId == DNN_BACKEND_INFERENCE_ENGINE && sliceRanges.size() == 1 && sliceRanges[0].size() == 4; + (backendId == DNN_BACKEND_INFERENCE_ENGINE && sliceRanges.size() == 1 && sliceRanges[0].size() == 4); } bool getMemoryShapes(const std::vector &inputs, diff --git a/modules/dnn/src/layers/softmax_layer.cpp b/modules/dnn/src/layers/softmax_layer.cpp index 7e2026531b..ab4fd6d7ce 100644 --- a/modules/dnn/src/layers/softmax_layer.cpp +++ b/modules/dnn/src/layers/softmax_layer.cpp @@ -90,9 +90,9 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_OPENCV || - backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1 || - backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !logSoftMax || - backendId == DNN_BACKEND_VKCOM && haveVulkan(); + (backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1) || + (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !logSoftMax) || + (backendId == DNN_BACKEND_VKCOM && haveVulkan()); } #ifdef HAVE_OPENCL diff --git a/modules/dnn/src/ocl4dnn/src/ocl4dnn_conv_spatial.cpp b/modules/dnn/src/ocl4dnn/src/ocl4dnn_conv_spatial.cpp index f8893b4ad2..2b253d067e 100644 --- a/modules/dnn/src/ocl4dnn/src/ocl4dnn_conv_spatial.cpp +++ b/modules/dnn/src/ocl4dnn/src/ocl4dnn_conv_spatial.cpp @@ -638,7 +638,7 @@ void OCL4DNNConvSpatial::generateKey() << "p" << pad_w_ << "x" << pad_h_ << "_" << "num" << num_ << "_" << "M" << M_ << "_" - << "activ" << fused_activ_ << "_" + << "activ" << (int)fused_activ_ << "_" << "eltwise" << fused_eltwise_ << "_" << precision; diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp index 2730054df7..6b720458ed 100644 --- a/modules/dnn/src/onnx/onnx_importer.cpp +++ b/modules/dnn/src/onnx/onnx_importer.cpp @@ -508,6 +508,16 @@ void ONNXImporter::populateNet(Net dstNet) layerParams.set("num_output", layerParams.blobs[0].size[0]); layerParams.set("bias_term", node_proto.input_size() == 3); } + else if (layer_type == "ConvTranspose") + { + CV_Assert(node_proto.input_size() >= 2); + layerParams.type = "Deconvolution"; + for (int j = 1; j < node_proto.input_size(); j++) { + layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j)); + } + layerParams.set("num_output", layerParams.blobs[0].size[1]); + layerParams.set("bias_term", node_proto.input_size() == 3); + } else if (layer_type == "Transpose") { layerParams.type = "Permute"; diff --git a/modules/dnn/src/op_inf_engine.cpp b/modules/dnn/src/op_inf_engine.cpp index df44ab2303..81d6c67dcc 100644 --- a/modules/dnn/src/op_inf_engine.cpp +++ b/modules/dnn/src/op_inf_engine.cpp @@ -309,7 +309,7 @@ void InfEngineBackendNet::setTargetDevice(InferenceEngine::TargetDevice device) InferenceEngine::TargetDevice InfEngineBackendNet::getTargetDevice() CV_NOEXCEPT { - return targetDevice; + return const_cast(this)->getTargetDevice(); } InferenceEngine::TargetDevice InfEngineBackendNet::getTargetDevice() const CV_NOEXCEPT @@ -387,6 +387,27 @@ void InfEngineBackendNet::init(int targetId) } } CV_Assert(!inputs.empty()); + +#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3) + for (const auto& inp : inputs) + { + InferenceEngine::LayerParams lp; + lp.name = inp.first; + lp.type = "Input"; + lp.precision = InferenceEngine::Precision::FP32; + std::shared_ptr inpLayer(new InferenceEngine::CNNLayer(lp)); + + layers.push_back(inpLayer); + + InferenceEngine::DataPtr dataPtr = inp.second->getInputData(); + // TODO: remove precision dependency (see setInput.normalization tests) + if (dataPtr->precision == InferenceEngine::Precision::FP32) + { + inpLayer->outData.assign(1, dataPtr); + dataPtr->creatorLayer = InferenceEngine::CNNLayerWeakPtr(inpLayer); + } + } +#endif } if (outputs.empty()) @@ -559,7 +580,7 @@ bool InfEngineBackendLayer::getMemoryShapes(const std::vector &inputs, bool InfEngineBackendLayer::supportBackend(int backendId) { return backendId == DNN_BACKEND_DEFAULT || - backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine(); + (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine()); } void InfEngineBackendLayer::forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs, diff --git a/modules/dnn/src/op_inf_engine.hpp b/modules/dnn/src/op_inf_engine.hpp index 34d4755765..20b13a390b 100644 --- a/modules/dnn/src/op_inf_engine.hpp +++ b/modules/dnn/src/op_inf_engine.hpp @@ -25,10 +25,11 @@ #define INF_ENGINE_RELEASE_2018R1 2018010000 #define INF_ENGINE_RELEASE_2018R2 2018020000 #define INF_ENGINE_RELEASE_2018R3 2018030000 +#define INF_ENGINE_RELEASE_2018R4 2018040000 #ifndef INF_ENGINE_RELEASE -#warning("IE version have not been provided via command-line. Using 2018R2 by default") -#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2018R2 +#warning("IE version have not been provided via command-line. Using 2018R4 by default") +#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2018R4 #endif #define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000)) diff --git a/modules/dnn/src/tensorflow/tf_importer.cpp b/modules/dnn/src/tensorflow/tf_importer.cpp index 145af366f8..237e9750f1 100644 --- a/modules/dnn/src/tensorflow/tf_importer.cpp +++ b/modules/dnn/src/tensorflow/tf_importer.cpp @@ -156,6 +156,7 @@ void blobFromTensor(const tensorflow::TensorProto &tensor, Mat &dstBlob) } } +#if 0 void printList(const tensorflow::AttrValue::ListValue &val) { std::cout << "("; @@ -235,6 +236,7 @@ void printLayerAttr(const tensorflow::NodeDef &layer) std::cout << std::endl; } } +#endif bool hasLayerAttr(const tensorflow::NodeDef &layer, const std::string &name) { diff --git a/modules/dnn/src/tensorflow/tf_io.cpp b/modules/dnn/src/tensorflow/tf_io.cpp index 41fb1a68b2..de6852f234 100644 --- a/modules/dnn/src/tensorflow/tf_io.cpp +++ b/modules/dnn/src/tensorflow/tf_io.cpp @@ -37,8 +37,6 @@ using namespace tensorflow; using namespace ::google::protobuf; using namespace ::google::protobuf::io; -const int kProtoReadBytesLimit = INT_MAX; // Max size of 2 GB minus 1 byte. - void ReadTFNetParamsFromBinaryFileOrDie(const char* param_file, tensorflow::GraphDef* param) { CHECK(ReadProtoFromBinaryFile(param_file, param)) diff --git a/modules/dnn/test/test_backends.cpp b/modules/dnn/test/test_backends.cpp index 919f5779e7..f23917c6d3 100644 --- a/modules/dnn/test/test_backends.cpp +++ b/modules/dnn/test/test_backends.cpp @@ -174,7 +174,7 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_v2_TensorFlow) throw SkipTestException(""); Mat sample = imread(findDataFile("dnn/street.png", false)); Mat inp = blobFromImage(sample, 1.0f, Size(300, 300), Scalar(), false); - float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.011 : 0.0; + float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.013 : 0.0; float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.062 : 0.0; processNet("dnn/ssd_mobilenet_v2_coco_2018_03_29.pb", "dnn/ssd_mobilenet_v2_coco_2018_03_29.pbtxt", inp, "detection_out", "", l1, lInf, 0.25); @@ -184,7 +184,7 @@ TEST_P(DNNTestNetwork, SSD_VGG16) { if (backend == DNN_BACKEND_HALIDE && target == DNN_TARGET_CPU) throw SkipTestException(""); - double scoreThreshold = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0252 : 0.0; + double scoreThreshold = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0325 : 0.0; Mat sample = imread(findDataFile("dnn/street.png", false)); Mat inp = blobFromImage(sample, 1.0f, Size(300, 300), Scalar(), false); processNet("dnn/VGG_ILSVRC2016_SSD_300x300_iter_440000.caffemodel", @@ -194,7 +194,7 @@ TEST_P(DNNTestNetwork, SSD_VGG16) TEST_P(DNNTestNetwork, OpenPose_pose_coco) { if (backend == DNN_BACKEND_HALIDE || - backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) + (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)) throw SkipTestException(""); processNet("dnn/openpose_pose_coco.caffemodel", "dnn/openpose_pose_coco.prototxt", Size(368, 368)); @@ -203,7 +203,7 @@ TEST_P(DNNTestNetwork, OpenPose_pose_coco) TEST_P(DNNTestNetwork, OpenPose_pose_mpi) { if (backend == DNN_BACKEND_HALIDE || - backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) + (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)) throw SkipTestException(""); processNet("dnn/openpose_pose_mpi.caffemodel", "dnn/openpose_pose_mpi.prototxt", Size(368, 368)); @@ -212,7 +212,7 @@ TEST_P(DNNTestNetwork, OpenPose_pose_mpi) TEST_P(DNNTestNetwork, OpenPose_pose_mpi_faster_4_stages) { if (backend == DNN_BACKEND_HALIDE || - backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) + (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)) throw SkipTestException(""); // The same .caffemodel but modified .prototxt // See https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/pose/poseParameters.cpp diff --git a/modules/dnn/test/test_caffe_importer.cpp b/modules/dnn/test/test_caffe_importer.cpp index 6563ac0663..4ad3d54067 100644 --- a/modules/dnn/test/test_caffe_importer.cpp +++ b/modules/dnn/test/test_caffe_importer.cpp @@ -512,7 +512,11 @@ INSTANTIATE_TEST_CASE_P(Test_Caffe, opencv_face_detector, TEST_P(Test_Caffe_nets, FasterRCNN_vgg16) { - if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) + if ((backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE > 2018030000 + || (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16) +#endif + ) throw SkipTestException(""); static Mat ref = (Mat_(3, 7) << 0, 2, 0.949398, 99.2454, 210.141, 601.205, 462.849, 0, 7, 0.997022, 481.841, 92.3218, 722.685, 175.953, diff --git a/modules/dnn/test/test_common.hpp b/modules/dnn/test/test_common.hpp index 69ade1a50e..987a68116e 100644 --- a/modules/dnn/test/test_common.hpp +++ b/modules/dnn/test/test_common.hpp @@ -57,7 +57,7 @@ static inline void PrintTo(const cv::dnn::Backend& v, std::ostream* os) case DNN_BACKEND_OPENCV: *os << "OCV"; return; case DNN_BACKEND_VKCOM: *os << "VKCOM"; return; } // don't use "default:" to emit compiler warnings - *os << "DNN_BACKEND_UNKNOWN(" << v << ")"; + *os << "DNN_BACKEND_UNKNOWN(" << (int)v << ")"; } static inline void PrintTo(const cv::dnn::Target& v, std::ostream* os) @@ -69,7 +69,7 @@ static inline void PrintTo(const cv::dnn::Target& v, std::ostream* os) case DNN_TARGET_MYRIAD: *os << "MYRIAD"; return; case DNN_TARGET_VULKAN: *os << "VULKAN"; return; } // don't use "default:" to emit compiler warnings - *os << "DNN_TARGET_UNKNOWN(" << v << ")"; + *os << "DNN_TARGET_UNKNOWN(" << (int)v << ")"; } using opencv_test::tuple; @@ -237,7 +237,8 @@ namespace opencv_test { using namespace cv::dnn; -static testing::internal::ParamGenerator > dnnBackendsAndTargets( +static inline +testing::internal::ParamGenerator > dnnBackendsAndTargets( bool withInferenceEngine = true, bool withHalide = false, bool withCpuOCV = true, @@ -290,4 +291,103 @@ static testing::internal::ParamGenerator > dnnBackendsAnd } // namespace + +namespace opencv_test { +using namespace cv::dnn; + +static inline +testing::internal::ParamGenerator availableDnnTargets() +{ + static std::vector targets; + if (targets.empty()) + { + targets.push_back(DNN_TARGET_CPU); +#ifdef HAVE_OPENCL + if (cv::ocl::useOpenCL()) + targets.push_back(DNN_TARGET_OPENCL); +#endif + } + return testing::ValuesIn(targets); +} + +class DNNTestLayer : public TestWithParam > +{ +public: + dnn::Backend backend; + dnn::Target target; + double default_l1, default_lInf; + + DNNTestLayer() + { + backend = (dnn::Backend)(int)get<0>(GetParam()); + target = (dnn::Target)(int)get<1>(GetParam()); + getDefaultThresholds(backend, target, &default_l1, &default_lInf); + } + + static void getDefaultThresholds(int backend, int target, double* l1, double* lInf) + { + if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) + { + *l1 = 4e-3; + *lInf = 2e-2; + } + else + { + *l1 = 1e-5; + *lInf = 1e-4; + } + } + + static void checkBackend(int backend, int target, Mat* inp = 0, Mat* ref = 0) + { + if (backend == DNN_BACKEND_OPENCV && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) + { +#ifdef HAVE_OPENCL + if (!cv::ocl::useOpenCL()) +#endif + { + throw SkipTestException("OpenCL is not available/disabled in OpenCV"); + } + } + if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) + { + if (!checkMyriadTarget()) + { + throw SkipTestException("Myriad is not available/disabled in OpenCV"); + } +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000 + if (inp && ref && inp->size[0] != 1) + { + // Myriad plugin supports only batch size 1. Slice a single sample. + if (inp->size[0] == ref->size[0]) + { + std::vector range(inp->dims, Range::all()); + range[0] = Range(0, 1); + *inp = inp->operator()(range); + + range = std::vector(ref->dims, Range::all()); + range[0] = Range(0, 1); + *ref = ref->operator()(range); + } + else + throw SkipTestException("Myriad plugin supports only batch size 1"); + } +#else + if (inp && ref && inp->dims == 4 && ref->dims == 4 && + inp->size[0] != 1 && inp->size[0] != ref->size[0]) + throw SkipTestException("Inconsistent batch size of input and output blobs for Myriad plugin"); + +#endif + } + } + +protected: + void checkBackend(Mat* inp = 0, Mat* ref = 0) + { + checkBackend(backend, target, inp, ref); + } +}; + +} // namespace + #endif diff --git a/modules/dnn/test/test_darknet_importer.cpp b/modules/dnn/test/test_darknet_importer.cpp index ab4a0e708c..415e7780fc 100644 --- a/modules/dnn/test/test_darknet_importer.cpp +++ b/modules/dnn/test/test_darknet_importer.cpp @@ -306,6 +306,9 @@ TEST_P(Test_Darknet_nets, TinyYoloVoc) // batch size 1 testDarknetModel(config_file, weights_file, ref.rowRange(0, 2), scoreDiff, iouDiff); +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018040000 + if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_MYRIAD) +#endif // batch size 2 testDarknetModel(config_file, weights_file, ref, scoreDiff, iouDiff); } diff --git a/modules/dnn/test/test_halide_layers.cpp b/modules/dnn/test/test_halide_layers.cpp index 1be30cda64..082cf62314 100644 --- a/modules/dnn/test/test_halide_layers.cpp +++ b/modules/dnn/test/test_halide_layers.cpp @@ -166,6 +166,11 @@ TEST_P(Deconvolution, Accuracy) if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_CPU && dilation.width == 2 && dilation.height == 2) throw SkipTestException(""); +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018040000 + if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_CPU && + hasBias && group != 1) + throw SkipTestException("Test is disabled for OpenVINO 2018R4"); +#endif int sz[] = {inChannels, outChannels / group, kernel.height, kernel.width}; Mat weights(4, &sz[0], CV_32F); diff --git a/modules/dnn/test/test_ie_models.cpp b/modules/dnn/test/test_ie_models.cpp index 01ecb72986..a50fed8d58 100644 --- a/modules/dnn/test/test_ie_models.cpp +++ b/modules/dnn/test/test_ie_models.cpp @@ -177,10 +177,20 @@ TEST_P(DNNTestOpenVINO, models) Target target = (dnn::Target)(int)get<0>(GetParam()); std::string modelName = get<1>(GetParam()); +#ifdef INF_ENGINE_RELEASE +#if INF_ENGINE_RELEASE <= 2018030000 if (target == DNN_TARGET_MYRIAD && (modelName == "landmarks-regression-retail-0001" || modelName == "semantic-segmentation-adas-0001" || modelName == "face-reidentification-retail-0001")) throw SkipTestException(""); +#elif INF_ENGINE_RELEASE == 2018040000 + if (modelName == "single-image-super-resolution-0034" || + (target == DNN_TARGET_MYRIAD && (modelName == "license-plate-recognition-barrier-0001" || + modelName == "landmarks-regression-retail-0009" || + modelName == "semantic-segmentation-adas-0001"))) + throw SkipTestException(""); +#endif +#endif std::string precision = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? "FP16" : "FP32"; std::string prefix = utils::fs::join("intel_models", diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp index be0e37e294..cf94fad701 100644 --- a/modules/dnn/test/test_layers.cpp +++ b/modules/dnn/test/test_layers.cpp @@ -137,6 +137,10 @@ TEST_P(Test_Caffe_layers, Convolution) TEST_P(Test_Caffe_layers, DeConvolution) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018040000 + if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_CPU) + throw SkipTestException("Test is disabled for OpenVINO 2018R4"); +#endif testLayerUsingCaffeModels("layer_deconvolution", true, false); } @@ -558,7 +562,9 @@ TEST_P(Test_Caffe_layers, FasterRCNN_Proposal) normAssert(outs[i].rowRange(0, numDets), ref); if (numDets < outs[i].size[0]) + { EXPECT_EQ(countNonZero(outs[i].rowRange(numDets, outs[i].size[0])), 0); + } } } diff --git a/modules/dnn/test/test_misc.cpp b/modules/dnn/test/test_misc.cpp index 07f97ed4ef..859a47b26c 100644 --- a/modules/dnn/test/test_misc.cpp +++ b/modules/dnn/test/test_misc.cpp @@ -140,9 +140,9 @@ TEST(LayerFactory, custom_layers) net.setPreferableBackend(DNN_BACKEND_OPENCV); Mat output = net.forward(); - if (i == 0) EXPECT_EQ(output.at(0), 1); - else if (i == 1) EXPECT_EQ(output.at(0), 2); - else if (i == 2) EXPECT_EQ(output.at(0), 1); + if (i == 0) { EXPECT_EQ(output.at(0), 1); } + else if (i == 1) { EXPECT_EQ(output.at(0), 2); } + else if (i == 2) { EXPECT_EQ(output.at(0), 1); } } LayerFactory::unregisterLayer("CustomType"); } diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index c7a48fe139..61b06cc7cf 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -68,6 +68,12 @@ TEST_P(Test_ONNX_layers, Convolution) testONNXModels("two_convolution"); } +TEST_P(Test_ONNX_layers, Deconvolution) +{ + testONNXModels("deconvolution"); + testONNXModels("two_deconvolution"); +} + TEST_P(Test_ONNX_layers, Dropout) { testONNXModels("dropout"); @@ -118,8 +124,8 @@ TEST_P(Test_ONNX_layers, Transpose) TEST_P(Test_ONNX_layers, Multiplication) { - if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16 || - backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) + if ((backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) || + (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)) throw SkipTestException(""); testONNXModels("mul"); } @@ -296,7 +302,7 @@ TEST_P(Test_ONNX_nets, ResNet101_DUC_HDC) TEST_P(Test_ONNX_nets, TinyYolov2) { if (cvtest::skipUnstableTests || - backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) { + (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))) { throw SkipTestException(""); } // output range: [-11; 8] diff --git a/modules/dnn/test/test_precomp.hpp b/modules/dnn/test/test_precomp.hpp index 46299908d8..cc1ea639f7 100644 --- a/modules/dnn/test/test_precomp.hpp +++ b/modules/dnn/test/test_precomp.hpp @@ -49,100 +49,4 @@ #include "opencv2/dnn.hpp" #include "test_common.hpp" -namespace opencv_test { -using namespace cv::dnn; - -static testing::internal::ParamGenerator availableDnnTargets() -{ - static std::vector targets; - if (targets.empty()) - { - targets.push_back(DNN_TARGET_CPU); -#ifdef HAVE_OPENCL - if (cv::ocl::useOpenCL()) - targets.push_back(DNN_TARGET_OPENCL); -#endif - } - return testing::ValuesIn(targets); -} - -class DNNTestLayer : public TestWithParam > -{ -public: - dnn::Backend backend; - dnn::Target target; - double default_l1, default_lInf; - - DNNTestLayer() - { - backend = (dnn::Backend)(int)get<0>(GetParam()); - target = (dnn::Target)(int)get<1>(GetParam()); - getDefaultThresholds(backend, target, &default_l1, &default_lInf); - } - - static void getDefaultThresholds(int backend, int target, double* l1, double* lInf) - { - if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) - { - *l1 = 4e-3; - *lInf = 2e-2; - } - else - { - *l1 = 1e-5; - *lInf = 1e-4; - } - } - - static void checkBackend(int backend, int target, Mat* inp = 0, Mat* ref = 0) - { - if (backend == DNN_BACKEND_OPENCV && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) - { -#ifdef HAVE_OPENCL - if (!cv::ocl::useOpenCL()) -#endif - { - throw SkipTestException("OpenCL is not available/disabled in OpenCV"); - } - } - if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) - { - if (!checkMyriadTarget()) - { - throw SkipTestException("Myriad is not available/disabled in OpenCV"); - } -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000 - if (inp && ref && inp->size[0] != 1) - { - // Myriad plugin supports only batch size 1. Slice a single sample. - if (inp->size[0] == ref->size[0]) - { - std::vector range(inp->dims, Range::all()); - range[0] = Range(0, 1); - *inp = inp->operator()(range); - - range = std::vector(ref->dims, Range::all()); - range[0] = Range(0, 1); - *ref = ref->operator()(range); - } - else - throw SkipTestException("Myriad plugin supports only batch size 1"); - } -#else - if (inp && ref && inp->dims == 4 && ref->dims == 4 && - inp->size[0] != 1 && inp->size[0] != ref->size[0]) - throw SkipTestException("Inconsistent batch size of input and output blobs for Myriad plugin"); - -#endif - } - } - -protected: - void checkBackend(Mat* inp = 0, Mat* ref = 0) - { - checkBackend(backend, target, inp, ref); - } -}; - -} // namespace #endif diff --git a/modules/dnn/test/test_tf_importer.cpp b/modules/dnn/test/test_tf_importer.cpp index f98d78c3bb..adb45b86f0 100644 --- a/modules/dnn/test/test_tf_importer.cpp +++ b/modules/dnn/test/test_tf_importer.cpp @@ -101,7 +101,9 @@ public: string dataConfig; if (hasText) + { ASSERT_TRUE(readFileInMemory(netConfig, dataConfig)); + } net = readNetFromTensorflow(dataModel.c_str(), dataModel.size(), dataConfig.c_str(), dataConfig.size()); @@ -473,7 +475,7 @@ TEST_P(Test_TensorFlow_nets, EAST_text_detection) double l1_geometry = default_l1, lInf_geometry = default_lInf; if (target == DNN_TARGET_OPENCL_FP16) { - lInf_scores = 0.11; + lInf_scores = backend == DNN_BACKEND_INFERENCE_ENGINE ? 0.16 : 0.11; l1_geometry = 0.28; lInf_geometry = 5.94; } else if (target == DNN_TARGET_MYRIAD) diff --git a/modules/dnn/test/test_torch_importer.cpp b/modules/dnn/test/test_torch_importer.cpp index 0b844452e2..c640c90ed3 100644 --- a/modules/dnn/test/test_torch_importer.cpp +++ b/modules/dnn/test/test_torch_importer.cpp @@ -136,6 +136,10 @@ TEST_P(Test_Torch_layers, run_reshape_change_batch_size) TEST_P(Test_Torch_layers, run_reshape) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018040000 + if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) + throw SkipTestException("Test is disabled for OpenVINO 2018R4"); +#endif runTorchNet("net_reshape_batch"); runTorchNet("net_reshape_channels", "", false, true); } @@ -168,6 +172,10 @@ TEST_P(Test_Torch_layers, run_depth_concat) TEST_P(Test_Torch_layers, run_deconv) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018040000 + if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) + throw SkipTestException("Test is disabled for OpenVINO 2018R4"); +#endif runTorchNet("net_deconv"); } diff --git a/modules/features2d/test/test_matchers_algorithmic.cpp b/modules/features2d/test/test_matchers_algorithmic.cpp index f8a17995ca..4210f9ef9d 100644 --- a/modules/features2d/test/test_matchers_algorithmic.cpp +++ b/modules/features2d/test/test_matchers_algorithmic.cpp @@ -595,4 +595,23 @@ TEST( Features2d_FlannBasedMatcher, read_write ) EXPECT_EQ(ymlfile, out); } + +TEST(Features2d_DMatch, issue_11855) +{ + Mat sources = (Mat_(2, 3) << 1, 1, 0, + 1, 1, 1); + Mat targets = (Mat_(2, 3) << 1, 1, 1, + 0, 0, 0); + + Ptr bf = BFMatcher::create(NORM_HAMMING, true); + vector > match; + bf->knnMatch(sources, targets, match, 1, noArray(), true); + + ASSERT_EQ((size_t)1, match.size()); + ASSERT_EQ((size_t)1, match[0].size()); + EXPECT_EQ(1, match[0][0].queryIdx); + EXPECT_EQ(0, match[0][0].trainIdx); + EXPECT_EQ(0.0f, match[0][0].distance); +} + }} // namespace diff --git a/modules/imgproc/src/bilateral_filter.cpp b/modules/imgproc/src/bilateral_filter.cpp new file mode 100644 index 0000000000..5e39fa4de5 --- /dev/null +++ b/modules/imgproc/src/bilateral_filter.cpp @@ -0,0 +1,759 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, 2018, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2014-2015, Itseez Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +#include + +#include "opencv2/core/hal/intrin.hpp" +#include "opencl_kernels_imgproc.hpp" + +/****************************************************************************************\ + Bilateral Filtering +\****************************************************************************************/ + +namespace cv +{ + +class BilateralFilter_8u_Invoker : + public ParallelLoopBody +{ +public: + BilateralFilter_8u_Invoker(Mat& _dest, const Mat& _temp, int _radius, int _maxk, + int* _space_ofs, float *_space_weight, float *_color_weight) : + temp(&_temp), dest(&_dest), radius(_radius), + maxk(_maxk), space_ofs(_space_ofs), space_weight(_space_weight), color_weight(_color_weight) + { + } + + virtual void operator() (const Range& range) const CV_OVERRIDE + { + int i, j, cn = dest->channels(), k; + Size size = dest->size(); + + for( i = range.start; i < range.end; i++ ) + { + const uchar* sptr = temp->ptr(i+radius) + radius*cn; + uchar* dptr = dest->ptr(i); + + if( cn == 1 ) + { + AutoBuffer buf(alignSize(size.width, CV_SIMD_WIDTH) + size.width + CV_SIMD_WIDTH - 1); + memset(buf.data(), 0, buf.size() * sizeof(float)); + float *sum = alignPtr(buf.data(), CV_SIMD_WIDTH); + float *wsum = sum + alignSize(size.width, CV_SIMD_WIDTH); + for( k = 0; k < maxk; k++ ) + { + const uchar* ksptr = sptr + space_ofs[k]; + j = 0; +#if CV_SIMD + v_float32 kweight = vx_setall_f32(space_weight[k]); + for (; j <= size.width - v_float32::nlanes; j += v_float32::nlanes) + { + v_uint32 val = vx_load_expand_q(ksptr + j); + v_float32 w = kweight * v_lut(color_weight, v_reinterpret_as_s32(v_absdiff(val, vx_load_expand_q(sptr + j)))); + v_store_aligned(wsum + j, vx_load_aligned(wsum + j) + w); + v_store_aligned(sum + j, v_muladd(v_cvt_f32(v_reinterpret_as_s32(val)), w, vx_load_aligned(sum + j))); + } +#endif + for (; j < size.width; j++) + { + int val = ksptr[j]; + float w = space_weight[k] * color_weight[std::abs(val - sptr[j])]; + wsum[j] += w; + sum[j] += val * w; + } + } + j = 0; +#if CV_SIMD + for (; j <= size.width - 2*v_float32::nlanes; j += 2*v_float32::nlanes) + v_pack_u_store(dptr + j, v_pack(v_round(vx_load_aligned(sum + j ) / vx_load_aligned(wsum + j )), + v_round(vx_load_aligned(sum + j + v_float32::nlanes) / vx_load_aligned(wsum + j + v_float32::nlanes)))); +#endif + for (; j < size.width; j++) + { + // overflow is not possible here => there is no need to use cv::saturate_cast + CV_DbgAssert(fabs(wsum[j]) > 0); + dptr[j] = (uchar)cvRound(sum[j]/wsum[j]); + } + } + else + { + assert( cn == 3 ); + AutoBuffer buf(alignSize(size.width, CV_SIMD_WIDTH)*3 + size.width + CV_SIMD_WIDTH - 1); + memset(buf.data(), 0, buf.size() * sizeof(float)); + float *sum_b = alignPtr(buf.data(), CV_SIMD_WIDTH); + float *sum_g = sum_b + alignSize(size.width, CV_SIMD_WIDTH); + float *sum_r = sum_g + alignSize(size.width, CV_SIMD_WIDTH); + float *wsum = sum_r + alignSize(size.width, CV_SIMD_WIDTH); + for(k = 0; k < maxk; k++ ) + { + const uchar* ksptr = sptr + space_ofs[k]; + const uchar* rsptr = sptr; + j = 0; +#if CV_SIMD + v_float32 kweight = vx_setall_f32(space_weight[k]); + for (; j <= size.width - v_uint8::nlanes; j += v_uint8::nlanes, ksptr += 3*v_uint8::nlanes, rsptr += 3*v_uint8::nlanes) + { + v_uint8 kb, kg, kr, rb, rg, rr; + v_load_deinterleave(ksptr, kb, kg, kr); + v_load_deinterleave(rsptr, rb, rg, rr); + + v_uint16 b_l, b_h, g_l, g_h, r_l, r_h; + v_expand(v_absdiff(kb, rb), b_l, b_h); + v_expand(v_absdiff(kg, rg), g_l, g_h); + v_expand(v_absdiff(kr, rr), r_l, r_h); + + v_uint32 val0, val1, val2, val3; + v_expand(b_l + g_l + r_l, val0, val1); + v_expand(b_h + g_h + r_h, val2, val3); + + v_expand(kb, b_l, b_h); + v_expand(kg, g_l, g_h); + v_expand(kr, r_l, r_h); + + v_float32 w0 = kweight * v_lut(color_weight, v_reinterpret_as_s32(val0)); + v_float32 w1 = kweight * v_lut(color_weight, v_reinterpret_as_s32(val1)); + v_float32 w2 = kweight * v_lut(color_weight, v_reinterpret_as_s32(val2)); + v_float32 w3 = kweight * v_lut(color_weight, v_reinterpret_as_s32(val3)); + v_store_aligned(wsum + j , w0 + vx_load_aligned(wsum + j)); + v_store_aligned(wsum + j + v_float32::nlanes, w1 + vx_load_aligned(wsum + j + v_float32::nlanes)); + v_store_aligned(wsum + j + 2*v_float32::nlanes, w2 + vx_load_aligned(wsum + j + 2*v_float32::nlanes)); + v_store_aligned(wsum + j + 3*v_float32::nlanes, w3 + vx_load_aligned(wsum + j + 3*v_float32::nlanes)); + v_expand(b_l, val0, val1); + v_expand(b_h, val2, val3); + v_store_aligned(sum_b + j , v_muladd(v_cvt_f32(v_reinterpret_as_s32(val0)), w0, vx_load_aligned(sum_b + j))); + v_store_aligned(sum_b + j + v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(val1)), w1, vx_load_aligned(sum_b + j + v_float32::nlanes))); + v_store_aligned(sum_b + j + 2*v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(val2)), w2, vx_load_aligned(sum_b + j + 2*v_float32::nlanes))); + v_store_aligned(sum_b + j + 3*v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(val3)), w3, vx_load_aligned(sum_b + j + 3*v_float32::nlanes))); + v_expand(g_l, val0, val1); + v_expand(g_h, val2, val3); + v_store_aligned(sum_g + j , v_muladd(v_cvt_f32(v_reinterpret_as_s32(val0)), w0, vx_load_aligned(sum_g + j))); + v_store_aligned(sum_g + j + v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(val1)), w1, vx_load_aligned(sum_g + j + v_float32::nlanes))); + v_store_aligned(sum_g + j + 2*v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(val2)), w2, vx_load_aligned(sum_g + j + 2*v_float32::nlanes))); + v_store_aligned(sum_g + j + 3*v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(val3)), w3, vx_load_aligned(sum_g + j + 3*v_float32::nlanes))); + v_expand(r_l, val0, val1); + v_expand(r_h, val2, val3); + v_store_aligned(sum_r + j , v_muladd(v_cvt_f32(v_reinterpret_as_s32(val0)), w0, vx_load_aligned(sum_r + j))); + v_store_aligned(sum_r + j + v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(val1)), w1, vx_load_aligned(sum_r + j + v_float32::nlanes))); + v_store_aligned(sum_r + j + 2*v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(val2)), w2, vx_load_aligned(sum_r + j + 2*v_float32::nlanes))); + v_store_aligned(sum_r + j + 3*v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(val3)), w3, vx_load_aligned(sum_r + j + 3*v_float32::nlanes))); + } +#endif + for(; j < size.width; j++, ksptr += 3, rsptr += 3) + { + int b = ksptr[0], g = ksptr[1], r = ksptr[2]; + float w = space_weight[k]*color_weight[std::abs(b - rsptr[0]) + std::abs(g - rsptr[1]) + std::abs(r - rsptr[2])]; + wsum[j] += w; + sum_b[j] += b*w; sum_g[j] += g*w; sum_r[j] += r*w; + } + } + j = 0; +#if CV_SIMD + v_float32 v_one = vx_setall_f32(1.f); + for(; j <= size.width - v_uint8::nlanes; j += v_uint8::nlanes, dptr += 3*v_uint8::nlanes) + { + v_float32 w0 = v_one / vx_load_aligned(wsum + j); + v_float32 w1 = v_one / vx_load_aligned(wsum + j + v_float32::nlanes); + v_float32 w2 = v_one / vx_load_aligned(wsum + j + 2*v_float32::nlanes); + v_float32 w3 = v_one / vx_load_aligned(wsum + j + 3*v_float32::nlanes); + + v_store_interleave(dptr, v_pack_u(v_pack(v_round(w0 * vx_load_aligned(sum_b + j)), + v_round(w1 * vx_load_aligned(sum_b + j + v_float32::nlanes))), + v_pack(v_round(w2 * vx_load_aligned(sum_b + j + 2*v_float32::nlanes)), + v_round(w3 * vx_load_aligned(sum_b + j + 3*v_float32::nlanes)))), + v_pack_u(v_pack(v_round(w0 * vx_load_aligned(sum_g + j)), + v_round(w1 * vx_load_aligned(sum_g + j + v_float32::nlanes))), + v_pack(v_round(w2 * vx_load_aligned(sum_g + j + 2*v_float32::nlanes)), + v_round(w3 * vx_load_aligned(sum_g + j + 3*v_float32::nlanes)))), + v_pack_u(v_pack(v_round(w0 * vx_load_aligned(sum_r + j)), + v_round(w1 * vx_load_aligned(sum_r + j + v_float32::nlanes))), + v_pack(v_round(w2 * vx_load_aligned(sum_r + j + 2*v_float32::nlanes)), + v_round(w3 * vx_load_aligned(sum_r + j + 3*v_float32::nlanes))))); + } +#endif + for(; j < size.width; j++) + { + CV_DbgAssert(fabs(wsum[j]) > 0); + wsum[j] = 1.f/wsum[j]; + *(dptr++) = (uchar)cvRound(sum_b[j]*wsum[j]); + *(dptr++) = (uchar)cvRound(sum_g[j]*wsum[j]); + *(dptr++) = (uchar)cvRound(sum_r[j]*wsum[j]); + } + } + } +#if CV_SIMD + vx_cleanup(); +#endif + } + +private: + const Mat *temp; + Mat *dest; + int radius, maxk, *space_ofs; + float *space_weight, *color_weight; +}; + +#ifdef HAVE_OPENCL + +static bool ocl_bilateralFilter_8u(InputArray _src, OutputArray _dst, int d, + double sigma_color, double sigma_space, + int borderType) +{ +#ifdef __ANDROID__ + if (ocl::Device::getDefault().isNVidia()) + return false; +#endif + + int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); + int i, j, maxk, radius; + + if (depth != CV_8U || cn > 4) + return false; + + if (sigma_color <= 0) + sigma_color = 1; + if (sigma_space <= 0) + sigma_space = 1; + + double gauss_color_coeff = -0.5 / (sigma_color * sigma_color); + double gauss_space_coeff = -0.5 / (sigma_space * sigma_space); + + if ( d <= 0 ) + radius = cvRound(sigma_space * 1.5); + else + radius = d / 2; + radius = MAX(radius, 1); + d = radius * 2 + 1; + + UMat src = _src.getUMat(), dst = _dst.getUMat(), temp; + if (src.u == dst.u) + return false; + + copyMakeBorder(src, temp, radius, radius, radius, radius, borderType); + std::vector _space_weight(d * d); + std::vector _space_ofs(d * d); + float * const space_weight = &_space_weight[0]; + int * const space_ofs = &_space_ofs[0]; + + // initialize space-related bilateral filter coefficients + for( i = -radius, maxk = 0; i <= radius; i++ ) + for( j = -radius; j <= radius; j++ ) + { + double r = std::sqrt((double)i * i + (double)j * j); + if ( r > radius ) + continue; + space_weight[maxk] = (float)std::exp(r * r * gauss_space_coeff); + space_ofs[maxk++] = (int)(i * temp.step + j * cn); + } + + char cvt[3][40]; + String cnstr = cn > 1 ? format("%d", cn) : ""; + String kernelName("bilateral"); + size_t sizeDiv = 1; + if ((ocl::Device::getDefault().isIntel()) && + (ocl::Device::getDefault().type() == ocl::Device::TYPE_GPU)) + { + //Intel GPU + if (dst.cols % 4 == 0 && cn == 1) // For single channel x4 sized images. + { + kernelName = "bilateral_float4"; + sizeDiv = 4; + } + } + ocl::Kernel k(kernelName.c_str(), ocl::imgproc::bilateral_oclsrc, + format("-D radius=%d -D maxk=%d -D cn=%d -D int_t=%s -D uint_t=uint%s -D convert_int_t=%s" + " -D uchar_t=%s -D float_t=%s -D convert_float_t=%s -D convert_uchar_t=%s -D gauss_color_coeff=(float)%f", + radius, maxk, cn, ocl::typeToStr(CV_32SC(cn)), cnstr.c_str(), + ocl::convertTypeStr(CV_8U, CV_32S, cn, cvt[0]), + ocl::typeToStr(type), ocl::typeToStr(CV_32FC(cn)), + ocl::convertTypeStr(CV_32S, CV_32F, cn, cvt[1]), + ocl::convertTypeStr(CV_32F, CV_8U, cn, cvt[2]), gauss_color_coeff)); + if (k.empty()) + return false; + + Mat mspace_weight(1, d * d, CV_32FC1, space_weight); + Mat mspace_ofs(1, d * d, CV_32SC1, space_ofs); + UMat ucolor_weight, uspace_weight, uspace_ofs; + + mspace_weight.copyTo(uspace_weight); + mspace_ofs.copyTo(uspace_ofs); + + k.args(ocl::KernelArg::ReadOnlyNoSize(temp), ocl::KernelArg::WriteOnly(dst), + ocl::KernelArg::PtrReadOnly(uspace_weight), + ocl::KernelArg::PtrReadOnly(uspace_ofs)); + + size_t globalsize[2] = { (size_t)dst.cols / sizeDiv, (size_t)dst.rows }; + return k.run(2, globalsize, NULL, false); +} + +#endif +static void +bilateralFilter_8u( const Mat& src, Mat& dst, int d, + double sigma_color, double sigma_space, + int borderType ) +{ + int cn = src.channels(); + int i, j, maxk, radius; + Size size = src.size(); + + CV_Assert( (src.type() == CV_8UC1 || src.type() == CV_8UC3) && src.data != dst.data ); + + if( sigma_color <= 0 ) + sigma_color = 1; + if( sigma_space <= 0 ) + sigma_space = 1; + + double gauss_color_coeff = -0.5/(sigma_color*sigma_color); + double gauss_space_coeff = -0.5/(sigma_space*sigma_space); + + if( d <= 0 ) + radius = cvRound(sigma_space*1.5); + else + radius = d/2; + radius = MAX(radius, 1); + d = radius*2 + 1; + + Mat temp; + copyMakeBorder( src, temp, radius, radius, radius, radius, borderType ); + + std::vector _color_weight(cn*256); + std::vector _space_weight(d*d); + std::vector _space_ofs(d*d); + float* color_weight = &_color_weight[0]; + float* space_weight = &_space_weight[0]; + int* space_ofs = &_space_ofs[0]; + + // initialize color-related bilateral filter coefficients + + for( i = 0; i < 256*cn; i++ ) + color_weight[i] = (float)std::exp(i*i*gauss_color_coeff); + + // initialize space-related bilateral filter coefficients + for( i = -radius, maxk = 0; i <= radius; i++ ) + { + j = -radius; + + for( ; j <= radius; j++ ) + { + double r = std::sqrt((double)i*i + (double)j*j); + if( r > radius ) + continue; + space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff); + space_ofs[maxk++] = (int)(i*temp.step + j*cn); + } + } + + BilateralFilter_8u_Invoker body(dst, temp, radius, maxk, space_ofs, space_weight, color_weight); + parallel_for_(Range(0, size.height), body, dst.total()/(double)(1<<16)); +} + + +class BilateralFilter_32f_Invoker : + public ParallelLoopBody +{ +public: + + BilateralFilter_32f_Invoker(int _cn, int _radius, int _maxk, int *_space_ofs, + const Mat& _temp, Mat& _dest, float _scale_index, float *_space_weight, float *_expLUT) : + cn(_cn), radius(_radius), maxk(_maxk), space_ofs(_space_ofs), + temp(&_temp), dest(&_dest), scale_index(_scale_index), space_weight(_space_weight), expLUT(_expLUT) + { + } + + virtual void operator() (const Range& range) const CV_OVERRIDE + { + int i, j, k; + Size size = dest->size(); + + for( i = range.start; i < range.end; i++ ) + { + const float* sptr = temp->ptr(i+radius) + radius*cn; + float* dptr = dest->ptr(i); + + if( cn == 1 ) + { + AutoBuffer buf(alignSize(size.width, CV_SIMD_WIDTH) + size.width + CV_SIMD_WIDTH - 1); + memset(buf.data(), 0, buf.size() * sizeof(float)); + float *sum = alignPtr(buf.data(), CV_SIMD_WIDTH); + float *wsum = sum + alignSize(size.width, CV_SIMD_WIDTH); +#if CV_SIMD + v_float32 v_one = vx_setall_f32(1.f); + v_float32 sindex = vx_setall_f32(scale_index); +#endif + for( k = 0; k < maxk; k++ ) + { + const float* ksptr = sptr + space_ofs[k]; + j = 0; +#if CV_SIMD + v_float32 kweight = vx_setall_f32(space_weight[k]); + for (; j <= size.width - v_float32::nlanes; j += v_float32::nlanes) + { + v_float32 val = vx_load(ksptr + j); + + v_float32 alpha = v_absdiff(val, vx_load(sptr + j)) * sindex; + v_int32 idx = v_trunc(alpha); + alpha -= v_cvt_f32(idx); + + v_float32 w = kweight * v_muladd(v_lut(expLUT + 1, idx), alpha, v_lut(expLUT, idx) * (v_one-alpha)); + v_store_aligned(wsum + j, vx_load_aligned(wsum + j) + w); + v_store_aligned(sum + j, v_muladd(val, w, vx_load_aligned(sum + j))); + } +#endif + for (; j < size.width; j++) + { + float val = ksptr[j]; + float alpha = std::abs(val - sptr[j]) * scale_index; + int idx = cvFloor(alpha); + alpha -= idx; + float w = space_weight[k] * (expLUT[idx] + alpha*(expLUT[idx+1] - expLUT[idx])); + wsum[j] += w; + sum[j] += val * w; + } + } + j = 0; +#if CV_SIMD + for (; j <= size.width - v_float32::nlanes; j += v_float32::nlanes) + v_store(dptr + j, vx_load_aligned(sum + j) / vx_load_aligned(wsum + j)); +#endif + for (; j < size.width; j++) + { + CV_DbgAssert(fabs(wsum[j]) > 0); + dptr[j] = sum[j] / wsum[j]; + } + } + else + { + CV_Assert( cn == 3 ); + AutoBuffer buf(alignSize(size.width, CV_SIMD_WIDTH)*3 + size.width + CV_SIMD_WIDTH - 1); + memset(buf.data(), 0, buf.size() * sizeof(float)); + float *sum_b = alignPtr(buf.data(), CV_SIMD_WIDTH); + float *sum_g = sum_b + alignSize(size.width, CV_SIMD_WIDTH); + float *sum_r = sum_g + alignSize(size.width, CV_SIMD_WIDTH); + float *wsum = sum_r + alignSize(size.width, CV_SIMD_WIDTH); +#if CV_SIMD + v_float32 v_one = vx_setall_f32(1.f); + v_float32 sindex = vx_setall_f32(scale_index); +#endif + for (k = 0; k < maxk; k++) + { + const float* ksptr = sptr + space_ofs[k]; + const float* rsptr = sptr; + j = 0; +#if CV_SIMD + v_float32 kweight = vx_setall_f32(space_weight[k]); + for (; j <= size.width - v_float32::nlanes; j += v_float32::nlanes, ksptr += 3*v_float32::nlanes, rsptr += 3*v_float32::nlanes) + { + v_float32 kb, kg, kr, rb, rg, rr; + v_load_deinterleave(ksptr, kb, kg, kr); + v_load_deinterleave(rsptr, rb, rg, rr); + + v_float32 alpha = (v_absdiff(kb, rb) + v_absdiff(kg, rg) + v_absdiff(kr, rr)) * sindex; + v_int32 idx = v_trunc(alpha); + alpha -= v_cvt_f32(idx); + + v_float32 w = kweight * v_muladd(v_lut(expLUT + 1, idx), alpha, v_lut(expLUT, idx) * (v_one - alpha)); + v_store_aligned(wsum + j, vx_load_aligned(wsum + j) + w); + v_store_aligned(sum_b + j, v_muladd(kb, w, vx_load_aligned(sum_b + j))); + v_store_aligned(sum_g + j, v_muladd(kg, w, vx_load_aligned(sum_g + j))); + v_store_aligned(sum_r + j, v_muladd(kr, w, vx_load_aligned(sum_r + j))); + } +#endif + for (; j < size.width; j++, ksptr += 3, rsptr += 3) + { + float b = ksptr[0], g = ksptr[1], r = ksptr[2]; + float alpha = (std::abs(b - rsptr[0]) + std::abs(g - rsptr[1]) + std::abs(r - rsptr[2])) * scale_index; + int idx = cvFloor(alpha); + alpha -= idx; + float w = space_weight[k] * (expLUT[idx] + alpha*(expLUT[idx + 1] - expLUT[idx])); + wsum[j] += w; + sum_b[j] += b*w; + sum_g[j] += g*w; + sum_r[j] += r*w; + } + } + j = 0; +#if CV_SIMD + for (; j <= size.width - v_float32::nlanes; j += v_float32::nlanes, dptr += 3*v_float32::nlanes) + { + v_float32 w = v_one / vx_load_aligned(wsum + j); + v_store_interleave(dptr, vx_load_aligned(sum_b + j) * w, vx_load_aligned(sum_g + j) * w, vx_load_aligned(sum_r + j) * w); + } +#endif + for (; j < size.width; j++) + { + CV_DbgAssert(fabs(wsum[j]) > 0); + wsum[j] = 1.f / wsum[j]; + *(dptr++) = sum_b[j] * wsum[j]; + *(dptr++) = sum_g[j] * wsum[j]; + *(dptr++) = sum_r[j] * wsum[j]; + } + } + } +#if CV_SIMD + vx_cleanup(); +#endif + } + +private: + int cn, radius, maxk, *space_ofs; + const Mat* temp; + Mat *dest; + float scale_index, *space_weight, *expLUT; +}; + + +static void +bilateralFilter_32f( const Mat& src, Mat& dst, int d, + double sigma_color, double sigma_space, + int borderType ) +{ + int cn = src.channels(); + int i, j, maxk, radius; + double minValSrc=-1, maxValSrc=1; + const int kExpNumBinsPerChannel = 1 << 12; + int kExpNumBins = 0; + float lastExpVal = 1.f; + float len, scale_index; + Size size = src.size(); + + CV_Assert( (src.type() == CV_32FC1 || src.type() == CV_32FC3) && src.data != dst.data ); + + if( sigma_color <= 0 ) + sigma_color = 1; + if( sigma_space <= 0 ) + sigma_space = 1; + + double gauss_color_coeff = -0.5/(sigma_color*sigma_color); + double gauss_space_coeff = -0.5/(sigma_space*sigma_space); + + if( d <= 0 ) + radius = cvRound(sigma_space*1.5); + else + radius = d/2; + radius = MAX(radius, 1); + d = radius*2 + 1; + // compute the min/max range for the input image (even if multichannel) + + minMaxLoc( src.reshape(1), &minValSrc, &maxValSrc ); + if(std::abs(minValSrc - maxValSrc) < FLT_EPSILON) + { + src.copyTo(dst); + return; + } + + // temporary copy of the image with borders for easy processing + Mat temp; + copyMakeBorder( src, temp, radius, radius, radius, radius, borderType ); + minValSrc -= 5. * sigma_color; + patchNaNs( temp, minValSrc ); // this replacement of NaNs makes the assumption that depth values are nonnegative + // TODO: make replacement parameter avalible in the outside function interface + // allocate lookup tables + std::vector _space_weight(d*d); + std::vector _space_ofs(d*d); + float* space_weight = &_space_weight[0]; + int* space_ofs = &_space_ofs[0]; + + // assign a length which is slightly more than needed + len = (float)(maxValSrc - minValSrc) * cn; + kExpNumBins = kExpNumBinsPerChannel * cn; + std::vector _expLUT(kExpNumBins+2); + float* expLUT = &_expLUT[0]; + + scale_index = kExpNumBins/len; + + // initialize the exp LUT + for( i = 0; i < kExpNumBins+2; i++ ) + { + if( lastExpVal > 0.f ) + { + double val = i / scale_index; + expLUT[i] = (float)std::exp(val * val * gauss_color_coeff); + lastExpVal = expLUT[i]; + } + else + expLUT[i] = 0.f; + } + + // initialize space-related bilateral filter coefficients + for( i = -radius, maxk = 0; i <= radius; i++ ) + for( j = -radius; j <= radius; j++ ) + { + double r = std::sqrt((double)i*i + (double)j*j); + if( r > radius ) + continue; + space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff); + space_ofs[maxk++] = (int)(i*(temp.step/sizeof(float)) + j*cn); + } + + // parallel_for usage + + BilateralFilter_32f_Invoker body(cn, radius, maxk, space_ofs, temp, dst, scale_index, space_weight, expLUT); + parallel_for_(Range(0, size.height), body, dst.total()/(double)(1<<16)); +} + +#ifdef HAVE_IPP +#define IPP_BILATERAL_PARALLEL 1 + +#ifdef HAVE_IPP_IW +class ipp_bilateralFilterParallel: public ParallelLoopBody +{ +public: + ipp_bilateralFilterParallel(::ipp::IwiImage &_src, ::ipp::IwiImage &_dst, int _radius, Ipp32f _valSquareSigma, Ipp32f _posSquareSigma, ::ipp::IwiBorderType _borderType, bool *_ok): + src(_src), dst(_dst) + { + pOk = _ok; + + radius = _radius; + valSquareSigma = _valSquareSigma; + posSquareSigma = _posSquareSigma; + borderType = _borderType; + + *pOk = true; + } + ~ipp_bilateralFilterParallel() {} + + virtual void operator() (const Range& range) const CV_OVERRIDE + { + if(*pOk == false) + return; + + try + { + ::ipp::IwiTile tile = ::ipp::IwiRoi(0, range.start, dst.m_size.width, range.end - range.start); + CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterBilateral, src, dst, radius, valSquareSigma, posSquareSigma, ::ipp::IwDefault(), borderType, tile); + } + catch(const ::ipp::IwException &) + { + *pOk = false; + return; + } + } +private: + ::ipp::IwiImage &src; + ::ipp::IwiImage &dst; + + int radius; + Ipp32f valSquareSigma; + Ipp32f posSquareSigma; + ::ipp::IwiBorderType borderType; + + bool *pOk; + const ipp_bilateralFilterParallel& operator= (const ipp_bilateralFilterParallel&); +}; +#endif + +static bool ipp_bilateralFilter(Mat &src, Mat &dst, int d, double sigmaColor, double sigmaSpace, int borderType) +{ +#ifdef HAVE_IPP_IW + CV_INSTRUMENT_REGION_IPP(); + + int radius = IPP_MAX(((d <= 0)?cvRound(sigmaSpace*1.5):d/2), 1); + Ipp32f valSquareSigma = (Ipp32f)((sigmaColor <= 0)?1:sigmaColor*sigmaColor); + Ipp32f posSquareSigma = (Ipp32f)((sigmaSpace <= 0)?1:sigmaSpace*sigmaSpace); + + // Acquire data and begin processing + try + { + ::ipp::IwiImage iwSrc = ippiGetImage(src); + ::ipp::IwiImage iwDst = ippiGetImage(dst); + ::ipp::IwiBorderSize borderSize(radius); + ::ipp::IwiBorderType ippBorder(ippiGetBorder(iwSrc, borderType, borderSize)); + if(!ippBorder) + return false; + + const int threads = ippiSuggestThreadsNum(iwDst, 2); + if(IPP_BILATERAL_PARALLEL && threads > 1) { + bool ok = true; + Range range(0, (int)iwDst.m_size.height); + ipp_bilateralFilterParallel invoker(iwSrc, iwDst, radius, valSquareSigma, posSquareSigma, ippBorder, &ok); + if(!ok) + return false; + + parallel_for_(range, invoker, threads*4); + + if(!ok) + return false; + } else { + CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterBilateral, iwSrc, iwDst, radius, valSquareSigma, posSquareSigma, ::ipp::IwDefault(), ippBorder); + } + } + catch (const ::ipp::IwException &) + { + return false; + } + return true; +#else + CV_UNUSED(src); CV_UNUSED(dst); CV_UNUSED(d); CV_UNUSED(sigmaColor); CV_UNUSED(sigmaSpace); CV_UNUSED(borderType); + return false; +#endif +} +#endif + +} + +void cv::bilateralFilter( InputArray _src, OutputArray _dst, int d, + double sigmaColor, double sigmaSpace, + int borderType ) +{ + CV_INSTRUMENT_REGION(); + + _dst.create( _src.size(), _src.type() ); + + CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(), + ocl_bilateralFilter_8u(_src, _dst, d, sigmaColor, sigmaSpace, borderType)) + + Mat src = _src.getMat(), dst = _dst.getMat(); + + CV_IPP_RUN_FAST(ipp_bilateralFilter(src, dst, d, sigmaColor, sigmaSpace, borderType)); + + if( src.depth() == CV_8U ) + bilateralFilter_8u( src, dst, d, sigmaColor, sigmaSpace, borderType ); + else if( src.depth() == CV_32F ) + bilateralFilter_32f( src, dst, d, sigmaColor, sigmaSpace, borderType ); + else + CV_Error( CV_StsUnsupportedFormat, + "Bilateral filtering is only implemented for 8u and 32f images" ); +} + +/* End of file. */ diff --git a/modules/imgproc/src/smooth.cpp b/modules/imgproc/src/smooth.cpp index 4cb1914241..ed3e93c6cb 100644 --- a/modules/imgproc/src/smooth.cpp +++ b/modules/imgproc/src/smooth.cpp @@ -2505,762 +2505,6 @@ void cv::GaussianBlur( InputArray _src, OutputArray _dst, Size ksize, sepFilter2D(src, dst, sdepth, kx, ky, Point(-1, -1), 0, borderType); } -/****************************************************************************************\ - Bilateral Filtering -\****************************************************************************************/ - -namespace cv -{ - -class BilateralFilter_8u_Invoker : - public ParallelLoopBody -{ -public: - BilateralFilter_8u_Invoker(Mat& _dest, const Mat& _temp, int _radius, int _maxk, - int* _space_ofs, float *_space_weight, float *_color_weight) : - temp(&_temp), dest(&_dest), radius(_radius), - maxk(_maxk), space_ofs(_space_ofs), space_weight(_space_weight), color_weight(_color_weight) - { - } - - virtual void operator() (const Range& range) const CV_OVERRIDE - { - int i, j, cn = dest->channels(), k; - Size size = dest->size(); -#if CV_SIMD128 - int CV_DECL_ALIGNED(16) buf[4]; - bool haveSIMD128 = hasSIMD128(); -#endif - - for( i = range.start; i < range.end; i++ ) - { - const uchar* sptr = temp->ptr(i+radius) + radius*cn; - uchar* dptr = dest->ptr(i); - - if( cn == 1 ) - { - for( j = 0; j < size.width; j++ ) - { - float sum = 0, wsum = 0; - int val0 = sptr[j]; - k = 0; -#if CV_SIMD128 - if( haveSIMD128 ) - { - v_float32x4 _val0 = v_setall_f32(static_cast(val0)); - v_float32x4 vsumw = v_setzero_f32(); - v_float32x4 vsumc = v_setzero_f32(); - - for( ; k <= maxk - 4; k += 4 ) - { - v_float32x4 _valF = v_float32x4(sptr[j + space_ofs[k]], - sptr[j + space_ofs[k + 1]], - sptr[j + space_ofs[k + 2]], - sptr[j + space_ofs[k + 3]]); - v_float32x4 _val = v_abs(_valF - _val0); - v_store(buf, v_round(_val)); - - v_float32x4 _cw = v_float32x4(color_weight[buf[0]], - color_weight[buf[1]], - color_weight[buf[2]], - color_weight[buf[3]]); - v_float32x4 _sw = v_load(space_weight+k); -#if defined(_MSC_VER) && _MSC_VER == 1700/* MSVS 2012 */ && CV_AVX - // details: https://github.com/opencv/opencv/issues/11004 - vsumw += _cw * _sw; - vsumc += _cw * _sw * _valF; -#else - v_float32x4 _w = _cw * _sw; - _cw = _w * _valF; - - vsumw += _w; - vsumc += _cw; -#endif - } - float *bufFloat = (float*)buf; - v_float32x4 sum4 = v_reduce_sum4(vsumw, vsumc, vsumw, vsumc); - v_store(bufFloat, sum4); - sum += bufFloat[1]; - wsum += bufFloat[0]; - } -#endif - for( ; k < maxk; k++ ) - { - int val = sptr[j + space_ofs[k]]; - float w = space_weight[k]*color_weight[std::abs(val - val0)]; - sum += val*w; - wsum += w; - } - // overflow is not possible here => there is no need to use cv::saturate_cast - CV_DbgAssert(fabs(wsum) > 0); - dptr[j] = (uchar)cvRound(sum/wsum); - } - } - else - { - assert( cn == 3 ); - for( j = 0; j < size.width*3; j += 3 ) - { - float sum_b = 0, sum_g = 0, sum_r = 0, wsum = 0; - int b0 = sptr[j], g0 = sptr[j+1], r0 = sptr[j+2]; - k = 0; -#if CV_SIMD128 - if( haveSIMD128 ) - { - v_float32x4 vsumw = v_setzero_f32(); - v_float32x4 vsumb = v_setzero_f32(); - v_float32x4 vsumg = v_setzero_f32(); - v_float32x4 vsumr = v_setzero_f32(); - const v_float32x4 _b0 = v_setall_f32(static_cast(b0)); - const v_float32x4 _g0 = v_setall_f32(static_cast(g0)); - const v_float32x4 _r0 = v_setall_f32(static_cast(r0)); - - for( ; k <= maxk - 4; k += 4 ) - { - const uchar* const sptr_k0 = sptr + j + space_ofs[k]; - const uchar* const sptr_k1 = sptr + j + space_ofs[k+1]; - const uchar* const sptr_k2 = sptr + j + space_ofs[k+2]; - const uchar* const sptr_k3 = sptr + j + space_ofs[k+3]; - - v_float32x4 __b = v_cvt_f32(v_reinterpret_as_s32(v_load_expand_q(sptr_k0))); - v_float32x4 __g = v_cvt_f32(v_reinterpret_as_s32(v_load_expand_q(sptr_k1))); - v_float32x4 __r = v_cvt_f32(v_reinterpret_as_s32(v_load_expand_q(sptr_k2))); - v_float32x4 __z = v_cvt_f32(v_reinterpret_as_s32(v_load_expand_q(sptr_k3))); - v_float32x4 _b, _g, _r, _z; - - v_transpose4x4(__b, __g, __r, __z, _b, _g, _r, _z); - - v_float32x4 bt = v_abs(_b -_b0); - v_float32x4 gt = v_abs(_g -_g0); - v_float32x4 rt = v_abs(_r -_r0); - - bt = rt + bt + gt; - v_store(buf, v_round(bt)); - - v_float32x4 _w = v_float32x4(color_weight[buf[0]],color_weight[buf[1]], - color_weight[buf[2]],color_weight[buf[3]]); - v_float32x4 _sw = v_load(space_weight+k); - -#if defined(_MSC_VER) && _MSC_VER == 1700/* MSVS 2012 */ && CV_AVX - // details: https://github.com/opencv/opencv/issues/11004 - vsumw += _w * _sw; - vsumb += _w * _sw * _b; - vsumg += _w * _sw * _g; - vsumr += _w * _sw * _r; -#else - _w *= _sw; - _b *= _w; - _g *= _w; - _r *= _w; - - vsumw += _w; - vsumb += _b; - vsumg += _g; - vsumr += _r; -#endif - } - float *bufFloat = (float*)buf; - v_float32x4 sum4 = v_reduce_sum4(vsumw, vsumb, vsumg, vsumr); - v_store(bufFloat, sum4); - wsum += bufFloat[0]; - sum_b += bufFloat[1]; - sum_g += bufFloat[2]; - sum_r += bufFloat[3]; - } -#endif - - for( ; k < maxk; k++ ) - { - const uchar* sptr_k = sptr + j + space_ofs[k]; - int b = sptr_k[0], g = sptr_k[1], r = sptr_k[2]; - float w = space_weight[k]*color_weight[std::abs(b - b0) + - std::abs(g - g0) + std::abs(r - r0)]; - sum_b += b*w; sum_g += g*w; sum_r += r*w; - wsum += w; - } - CV_DbgAssert(fabs(wsum) > 0); - wsum = 1.f/wsum; - b0 = cvRound(sum_b*wsum); - g0 = cvRound(sum_g*wsum); - r0 = cvRound(sum_r*wsum); - dptr[j] = (uchar)b0; dptr[j+1] = (uchar)g0; dptr[j+2] = (uchar)r0; - } - } - } - } - -private: - const Mat *temp; - Mat *dest; - int radius, maxk, *space_ofs; - float *space_weight, *color_weight; -}; - -#ifdef HAVE_OPENCL - -static bool ocl_bilateralFilter_8u(InputArray _src, OutputArray _dst, int d, - double sigma_color, double sigma_space, - int borderType) -{ -#ifdef __ANDROID__ - if (ocl::Device::getDefault().isNVidia()) - return false; -#endif - - int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); - int i, j, maxk, radius; - - if (depth != CV_8U || cn > 4) - return false; - - if (sigma_color <= 0) - sigma_color = 1; - if (sigma_space <= 0) - sigma_space = 1; - - double gauss_color_coeff = -0.5 / (sigma_color * sigma_color); - double gauss_space_coeff = -0.5 / (sigma_space * sigma_space); - - if ( d <= 0 ) - radius = cvRound(sigma_space * 1.5); - else - radius = d / 2; - radius = MAX(radius, 1); - d = radius * 2 + 1; - - UMat src = _src.getUMat(), dst = _dst.getUMat(), temp; - if (src.u == dst.u) - return false; - - copyMakeBorder(src, temp, radius, radius, radius, radius, borderType); - std::vector _space_weight(d * d); - std::vector _space_ofs(d * d); - float * const space_weight = &_space_weight[0]; - int * const space_ofs = &_space_ofs[0]; - - // initialize space-related bilateral filter coefficients - for( i = -radius, maxk = 0; i <= radius; i++ ) - for( j = -radius; j <= radius; j++ ) - { - double r = std::sqrt((double)i * i + (double)j * j); - if ( r > radius ) - continue; - space_weight[maxk] = (float)std::exp(r * r * gauss_space_coeff); - space_ofs[maxk++] = (int)(i * temp.step + j * cn); - } - - char cvt[3][40]; - String cnstr = cn > 1 ? format("%d", cn) : ""; - String kernelName("bilateral"); - size_t sizeDiv = 1; - if ((ocl::Device::getDefault().isIntel()) && - (ocl::Device::getDefault().type() == ocl::Device::TYPE_GPU)) - { - //Intel GPU - if (dst.cols % 4 == 0 && cn == 1) // For single channel x4 sized images. - { - kernelName = "bilateral_float4"; - sizeDiv = 4; - } - } - ocl::Kernel k(kernelName.c_str(), ocl::imgproc::bilateral_oclsrc, - format("-D radius=%d -D maxk=%d -D cn=%d -D int_t=%s -D uint_t=uint%s -D convert_int_t=%s" - " -D uchar_t=%s -D float_t=%s -D convert_float_t=%s -D convert_uchar_t=%s -D gauss_color_coeff=(float)%f", - radius, maxk, cn, ocl::typeToStr(CV_32SC(cn)), cnstr.c_str(), - ocl::convertTypeStr(CV_8U, CV_32S, cn, cvt[0]), - ocl::typeToStr(type), ocl::typeToStr(CV_32FC(cn)), - ocl::convertTypeStr(CV_32S, CV_32F, cn, cvt[1]), - ocl::convertTypeStr(CV_32F, CV_8U, cn, cvt[2]), gauss_color_coeff)); - if (k.empty()) - return false; - - Mat mspace_weight(1, d * d, CV_32FC1, space_weight); - Mat mspace_ofs(1, d * d, CV_32SC1, space_ofs); - UMat ucolor_weight, uspace_weight, uspace_ofs; - - mspace_weight.copyTo(uspace_weight); - mspace_ofs.copyTo(uspace_ofs); - - k.args(ocl::KernelArg::ReadOnlyNoSize(temp), ocl::KernelArg::WriteOnly(dst), - ocl::KernelArg::PtrReadOnly(uspace_weight), - ocl::KernelArg::PtrReadOnly(uspace_ofs)); - - size_t globalsize[2] = { (size_t)dst.cols / sizeDiv, (size_t)dst.rows }; - return k.run(2, globalsize, NULL, false); -} - -#endif -static void -bilateralFilter_8u( const Mat& src, Mat& dst, int d, - double sigma_color, double sigma_space, - int borderType ) -{ - int cn = src.channels(); - int i, j, maxk, radius; - Size size = src.size(); - - CV_Assert( (src.type() == CV_8UC1 || src.type() == CV_8UC3) && src.data != dst.data ); - - if( sigma_color <= 0 ) - sigma_color = 1; - if( sigma_space <= 0 ) - sigma_space = 1; - - double gauss_color_coeff = -0.5/(sigma_color*sigma_color); - double gauss_space_coeff = -0.5/(sigma_space*sigma_space); - - if( d <= 0 ) - radius = cvRound(sigma_space*1.5); - else - radius = d/2; - radius = MAX(radius, 1); - d = radius*2 + 1; - - Mat temp; - copyMakeBorder( src, temp, radius, radius, radius, radius, borderType ); - - std::vector _color_weight(cn*256); - std::vector _space_weight(d*d); - std::vector _space_ofs(d*d); - float* color_weight = &_color_weight[0]; - float* space_weight = &_space_weight[0]; - int* space_ofs = &_space_ofs[0]; - - // initialize color-related bilateral filter coefficients - - for( i = 0; i < 256*cn; i++ ) - color_weight[i] = (float)std::exp(i*i*gauss_color_coeff); - - // initialize space-related bilateral filter coefficients - for( i = -radius, maxk = 0; i <= radius; i++ ) - { - j = -radius; - - for( ; j <= radius; j++ ) - { - double r = std::sqrt((double)i*i + (double)j*j); - if( r > radius ) - continue; - space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff); - space_ofs[maxk++] = (int)(i*temp.step + j*cn); - } - } - - BilateralFilter_8u_Invoker body(dst, temp, radius, maxk, space_ofs, space_weight, color_weight); - parallel_for_(Range(0, size.height), body, dst.total()/(double)(1<<16)); -} - - -class BilateralFilter_32f_Invoker : - public ParallelLoopBody -{ -public: - - BilateralFilter_32f_Invoker(int _cn, int _radius, int _maxk, int *_space_ofs, - const Mat& _temp, Mat& _dest, float _scale_index, float *_space_weight, float *_expLUT) : - cn(_cn), radius(_radius), maxk(_maxk), space_ofs(_space_ofs), - temp(&_temp), dest(&_dest), scale_index(_scale_index), space_weight(_space_weight), expLUT(_expLUT) - { - } - - virtual void operator() (const Range& range) const CV_OVERRIDE - { - int i, j, k; - Size size = dest->size(); -#if CV_SIMD128 - int CV_DECL_ALIGNED(16) idxBuf[4]; - bool haveSIMD128 = hasSIMD128(); -#endif - - for( i = range.start; i < range.end; i++ ) - { - const float* sptr = temp->ptr(i+radius) + radius*cn; - float* dptr = dest->ptr(i); - - if( cn == 1 ) - { - for( j = 0; j < size.width; j++ ) - { - float sum = 0, wsum = 0; - float val0 = sptr[j]; - k = 0; -#if CV_SIMD128 - if( haveSIMD128 ) - { - v_float32x4 vecwsum = v_setzero_f32(); - v_float32x4 vecvsum = v_setzero_f32(); - const v_float32x4 _val0 = v_setall_f32(sptr[j]); - const v_float32x4 _scale_index = v_setall_f32(scale_index); - - for (; k <= maxk - 4; k += 4) - { - v_float32x4 _sw = v_load(space_weight + k); - v_float32x4 _val = v_float32x4(sptr[j + space_ofs[k]], - sptr[j + space_ofs[k + 1]], - sptr[j + space_ofs[k + 2]], - sptr[j + space_ofs[k + 3]]); - v_float32x4 _alpha = v_abs(_val - _val0) * _scale_index; - - v_int32x4 _idx = v_round(_alpha); - v_store(idxBuf, _idx); - _alpha -= v_cvt_f32(_idx); - - v_float32x4 _explut = v_float32x4(expLUT[idxBuf[0]], - expLUT[idxBuf[1]], - expLUT[idxBuf[2]], - expLUT[idxBuf[3]]); - v_float32x4 _explut1 = v_float32x4(expLUT[idxBuf[0] + 1], - expLUT[idxBuf[1] + 1], - expLUT[idxBuf[2] + 1], - expLUT[idxBuf[3] + 1]); - - v_float32x4 _w = _sw * (_explut + (_alpha * (_explut1 - _explut))); - _val *= _w; - - vecwsum += _w; - vecvsum += _val; - } - float *bufFloat = (float*)idxBuf; - v_float32x4 sum4 = v_reduce_sum4(vecwsum, vecvsum, vecwsum, vecvsum); - v_store(bufFloat, sum4); - sum += bufFloat[1]; - wsum += bufFloat[0]; - } -#endif - - for( ; k < maxk; k++ ) - { - float val = sptr[j + space_ofs[k]]; - float alpha = (float)(std::abs(val - val0)*scale_index); - int idx = cvFloor(alpha); - alpha -= idx; - float w = space_weight[k]*(expLUT[idx] + alpha*(expLUT[idx+1] - expLUT[idx])); - sum += val*w; - wsum += w; - } - CV_DbgAssert(fabs(wsum) > 0); - dptr[j] = (float)(sum/wsum); - } - } - else - { - CV_Assert( cn == 3 ); - for( j = 0; j < size.width*3; j += 3 ) - { - float sum_b = 0, sum_g = 0, sum_r = 0, wsum = 0; - float b0 = sptr[j], g0 = sptr[j+1], r0 = sptr[j+2]; - k = 0; -#if CV_SIMD128 - if( haveSIMD128 ) - { - v_float32x4 sumw = v_setzero_f32(); - v_float32x4 sumb = v_setzero_f32(); - v_float32x4 sumg = v_setzero_f32(); - v_float32x4 sumr = v_setzero_f32(); - const v_float32x4 _b0 = v_setall_f32(b0); - const v_float32x4 _g0 = v_setall_f32(g0); - const v_float32x4 _r0 = v_setall_f32(r0); - const v_float32x4 _scale_index = v_setall_f32(scale_index); - - for( ; k <= maxk-4; k += 4 ) - { - v_float32x4 _sw = v_load(space_weight + k); - - const float* const sptr_k0 = sptr + j + space_ofs[k]; - const float* const sptr_k1 = sptr + j + space_ofs[k+1]; - const float* const sptr_k2 = sptr + j + space_ofs[k+2]; - const float* const sptr_k3 = sptr + j + space_ofs[k+3]; - - v_float32x4 _v0 = v_load(sptr_k0); - v_float32x4 _v1 = v_load(sptr_k1); - v_float32x4 _v2 = v_load(sptr_k2); - v_float32x4 _v3 = v_load(sptr_k3); - v_float32x4 _b, _g, _r, _dummy; - - v_transpose4x4(_v0, _v1, _v2, _v3, _b, _g, _r, _dummy); - - v_float32x4 _bt = v_abs(_b - _b0); - v_float32x4 _gt = v_abs(_g - _g0); - v_float32x4 _rt = v_abs(_r - _r0); - v_float32x4 _alpha = _scale_index * (_bt + _gt + _rt); - - v_int32x4 _idx = v_round(_alpha); - v_store((int*)idxBuf, _idx); - _alpha -= v_cvt_f32(_idx); - - v_float32x4 _explut = v_float32x4(expLUT[idxBuf[0]], - expLUT[idxBuf[1]], - expLUT[idxBuf[2]], - expLUT[idxBuf[3]]); - v_float32x4 _explut1 = v_float32x4(expLUT[idxBuf[0] + 1], - expLUT[idxBuf[1] + 1], - expLUT[idxBuf[2] + 1], - expLUT[idxBuf[3] + 1]); - - v_float32x4 _w = _sw * (_explut + (_alpha * (_explut1 - _explut))); - - _b *= _w; - _g *= _w; - _r *= _w; - sumw += _w; - sumb += _b; - sumg += _g; - sumr += _r; - } - v_float32x4 sum4 = v_reduce_sum4(sumw, sumb, sumg, sumr); - float *bufFloat = (float*)idxBuf; - v_store(bufFloat, sum4); - wsum += bufFloat[0]; - sum_b += bufFloat[1]; - sum_g += bufFloat[2]; - sum_r += bufFloat[3]; - } -#endif - - for(; k < maxk; k++ ) - { - const float* sptr_k = sptr + j + space_ofs[k]; - float b = sptr_k[0], g = sptr_k[1], r = sptr_k[2]; - float alpha = (float)((std::abs(b - b0) + - std::abs(g - g0) + std::abs(r - r0))*scale_index); - int idx = cvFloor(alpha); - alpha -= idx; - float w = space_weight[k]*(expLUT[idx] + alpha*(expLUT[idx+1] - expLUT[idx])); - sum_b += b*w; sum_g += g*w; sum_r += r*w; - wsum += w; - } - CV_DbgAssert(fabs(wsum) > 0); - wsum = 1.f/wsum; - b0 = sum_b*wsum; - g0 = sum_g*wsum; - r0 = sum_r*wsum; - dptr[j] = b0; dptr[j+1] = g0; dptr[j+2] = r0; - } - } - } - } - -private: - int cn, radius, maxk, *space_ofs; - const Mat* temp; - Mat *dest; - float scale_index, *space_weight, *expLUT; -}; - - -static void -bilateralFilter_32f( const Mat& src, Mat& dst, int d, - double sigma_color, double sigma_space, - int borderType ) -{ - int cn = src.channels(); - int i, j, maxk, radius; - double minValSrc=-1, maxValSrc=1; - const int kExpNumBinsPerChannel = 1 << 12; - int kExpNumBins = 0; - float lastExpVal = 1.f; - float len, scale_index; - Size size = src.size(); - - CV_Assert( (src.type() == CV_32FC1 || src.type() == CV_32FC3) && src.data != dst.data ); - - if( sigma_color <= 0 ) - sigma_color = 1; - if( sigma_space <= 0 ) - sigma_space = 1; - - double gauss_color_coeff = -0.5/(sigma_color*sigma_color); - double gauss_space_coeff = -0.5/(sigma_space*sigma_space); - - if( d <= 0 ) - radius = cvRound(sigma_space*1.5); - else - radius = d/2; - radius = MAX(radius, 1); - d = radius*2 + 1; - // compute the min/max range for the input image (even if multichannel) - - minMaxLoc( src.reshape(1), &minValSrc, &maxValSrc ); - if(std::abs(minValSrc - maxValSrc) < FLT_EPSILON) - { - src.copyTo(dst); - return; - } - - // temporary copy of the image with borders for easy processing - Mat temp; - copyMakeBorder( src, temp, radius, radius, radius, radius, borderType ); - const double insteadNaNValue = -5. * sigma_color; - patchNaNs( temp, insteadNaNValue ); // this replacement of NaNs makes the assumption that depth values are nonnegative - // TODO: make insteadNaNValue avalible in the outside function interface to control the cases breaking the assumption - // allocate lookup tables - std::vector _space_weight(d*d); - std::vector _space_ofs(d*d); - float* space_weight = &_space_weight[0]; - int* space_ofs = &_space_ofs[0]; - - // assign a length which is slightly more than needed - len = (float)(maxValSrc - minValSrc) * cn; - kExpNumBins = kExpNumBinsPerChannel * cn; - std::vector _expLUT(kExpNumBins+2); - float* expLUT = &_expLUT[0]; - - scale_index = kExpNumBins/len; - - // initialize the exp LUT - for( i = 0; i < kExpNumBins+2; i++ ) - { - if( lastExpVal > 0.f ) - { - double val = i / scale_index; - expLUT[i] = (float)std::exp(val * val * gauss_color_coeff); - lastExpVal = expLUT[i]; - } - else - expLUT[i] = 0.f; - } - - // initialize space-related bilateral filter coefficients - for( i = -radius, maxk = 0; i <= radius; i++ ) - for( j = -radius; j <= radius; j++ ) - { - double r = std::sqrt((double)i*i + (double)j*j); - if( r > radius ) - continue; - space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff); - space_ofs[maxk++] = (int)(i*(temp.step/sizeof(float)) + j*cn); - } - - // parallel_for usage - - BilateralFilter_32f_Invoker body(cn, radius, maxk, space_ofs, temp, dst, scale_index, space_weight, expLUT); - parallel_for_(Range(0, size.height), body, dst.total()/(double)(1<<16)); -} - -#ifdef HAVE_IPP -#define IPP_BILATERAL_PARALLEL 1 - -#ifdef HAVE_IPP_IW -class ipp_bilateralFilterParallel: public ParallelLoopBody -{ -public: - ipp_bilateralFilterParallel(::ipp::IwiImage &_src, ::ipp::IwiImage &_dst, int _radius, Ipp32f _valSquareSigma, Ipp32f _posSquareSigma, ::ipp::IwiBorderType _borderType, bool *_ok): - src(_src), dst(_dst) - { - pOk = _ok; - - radius = _radius; - valSquareSigma = _valSquareSigma; - posSquareSigma = _posSquareSigma; - borderType = _borderType; - - *pOk = true; - } - ~ipp_bilateralFilterParallel() {} - - virtual void operator() (const Range& range) const CV_OVERRIDE - { - if(*pOk == false) - return; - - try - { - ::ipp::IwiTile tile = ::ipp::IwiRoi(0, range.start, dst.m_size.width, range.end - range.start); - CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterBilateral, src, dst, radius, valSquareSigma, posSquareSigma, ::ipp::IwDefault(), borderType, tile); - } - catch(const ::ipp::IwException &) - { - *pOk = false; - return; - } - } -private: - ::ipp::IwiImage &src; - ::ipp::IwiImage &dst; - - int radius; - Ipp32f valSquareSigma; - Ipp32f posSquareSigma; - ::ipp::IwiBorderType borderType; - - bool *pOk; - const ipp_bilateralFilterParallel& operator= (const ipp_bilateralFilterParallel&); -}; -#endif - -static bool ipp_bilateralFilter(Mat &src, Mat &dst, int d, double sigmaColor, double sigmaSpace, int borderType) -{ -#ifdef HAVE_IPP_IW - CV_INSTRUMENT_REGION_IPP(); - - int radius = IPP_MAX(((d <= 0)?cvRound(sigmaSpace*1.5):d/2), 1); - Ipp32f valSquareSigma = (Ipp32f)((sigmaColor <= 0)?1:sigmaColor*sigmaColor); - Ipp32f posSquareSigma = (Ipp32f)((sigmaSpace <= 0)?1:sigmaSpace*sigmaSpace); - - // Acquire data and begin processing - try - { - ::ipp::IwiImage iwSrc = ippiGetImage(src); - ::ipp::IwiImage iwDst = ippiGetImage(dst); - ::ipp::IwiBorderSize borderSize(radius); - ::ipp::IwiBorderType ippBorder(ippiGetBorder(iwSrc, borderType, borderSize)); - if(!ippBorder) - return false; - - const int threads = ippiSuggestThreadsNum(iwDst, 2); - if(IPP_BILATERAL_PARALLEL && threads > 1) { - bool ok = true; - Range range(0, (int)iwDst.m_size.height); - ipp_bilateralFilterParallel invoker(iwSrc, iwDst, radius, valSquareSigma, posSquareSigma, ippBorder, &ok); - if(!ok) - return false; - - parallel_for_(range, invoker, threads*4); - - if(!ok) - return false; - } else { - CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterBilateral, iwSrc, iwDst, radius, valSquareSigma, posSquareSigma, ::ipp::IwDefault(), ippBorder); - } - } - catch (const ::ipp::IwException &) - { - return false; - } - return true; -#else - CV_UNUSED(src); CV_UNUSED(dst); CV_UNUSED(d); CV_UNUSED(sigmaColor); CV_UNUSED(sigmaSpace); CV_UNUSED(borderType); - return false; -#endif -} -#endif - -} - -void cv::bilateralFilter( InputArray _src, OutputArray _dst, int d, - double sigmaColor, double sigmaSpace, - int borderType ) -{ - CV_INSTRUMENT_REGION(); - - _dst.create( _src.size(), _src.type() ); - - CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(), - ocl_bilateralFilter_8u(_src, _dst, d, sigmaColor, sigmaSpace, borderType)) - - Mat src = _src.getMat(), dst = _dst.getMat(); - - CV_IPP_RUN_FAST(ipp_bilateralFilter(src, dst, d, sigmaColor, sigmaSpace, borderType)); - - if( src.depth() == CV_8U ) - bilateralFilter_8u( src, dst, d, sigmaColor, sigmaSpace, borderType ); - else if( src.depth() == CV_32F ) - bilateralFilter_32f( src, dst, d, sigmaColor, sigmaSpace, borderType ); - else - CV_Error( CV_StsUnsupportedFormat, - "Bilateral filtering is only implemented for 8u and 32f images" ); -} - ////////////////////////////////////////////////////////////////////////////////////////// CV_IMPL void diff --git a/modules/python/CMakeLists.txt b/modules/python/CMakeLists.txt index bcaa7d957a..27874283e7 100644 --- a/modules/python/CMakeLists.txt +++ b/modules/python/CMakeLists.txt @@ -3,11 +3,10 @@ # ---------------------------------------------------------------------------- if(DEFINED OPENCV_INITIAL_PASS) # OpenCV build -add_subdirectory(bindings) - if(ANDROID OR APPLE_FRAMEWORK OR WINRT) - set(__disable_python2 ON) - set(__disable_python3 ON) + ocv_module_disable_(python2) + ocv_module_disable_(python3) + return() elseif(BUILD_opencv_world OR (WIN32 AND CMAKE_BUILD_TYPE STREQUAL "Debug")) if(NOT DEFINED BUILD_opencv_python2) set(__disable_python2 ON) @@ -17,6 +16,12 @@ elseif(BUILD_opencv_world OR (WIN32 AND CMAKE_BUILD_TYPE STREQUAL "Debug")) endif() endif() +add_subdirectory(bindings) + +if(NOT OPENCV_SKIP_PYTHON_LOADER) + include("./python_loader.cmake") +endif() + if(__disable_python2) ocv_module_disable_(python2) endif() diff --git a/modules/python/bindings/CMakeLists.txt b/modules/python/bindings/CMakeLists.txt index dd9caacbf8..4e777e9043 100644 --- a/modules/python/bindings/CMakeLists.txt +++ b/modules/python/bindings/CMakeLists.txt @@ -42,6 +42,7 @@ ocv_list_filterout(opencv_hdrs "modules/.*\\\\.inl\\\\.h*") ocv_list_filterout(opencv_hdrs "modules/.*_inl\\\\.h*") ocv_list_filterout(opencv_hdrs "modules/.*\\\\.details\\\\.h*") ocv_list_filterout(opencv_hdrs "modules/.*\\\\.private\\\\.h*") +ocv_list_filterout(opencv_hdrs "modules/.*/private\\\\.h*") ocv_list_filterout(opencv_hdrs "modules/.*/detection_based_tracker\\\\.hpp") # Conditional compilation if(NOT HAVE_CUDA) ocv_list_filterout(opencv_hdrs "modules/cuda.*") @@ -104,6 +105,7 @@ ocv_cmake_script_append_var(PYTHON_CONFIG_SCRIPT CMAKE_MODULE_LINKER_FLAGS CMAKE_INSTALL_PREFIX + OPENCV_PYTHON_INSTALL_PATH OpenCV_SOURCE_DIR diff --git a/modules/python/common.cmake b/modules/python/common.cmake index 4300abecc9..14912297ea 100644 --- a/modules/python/common.cmake +++ b/modules/python/common.cmake @@ -56,8 +56,10 @@ else() endif() endif() +ocv_update(OPENCV_PYTHON_EXTENSION_BUILD_PATH "${LIBRARY_OUTPUT_PATH}/${MODULE_INSTALL_SUBDIR}") + set_target_properties(${the_module} PROPERTIES - LIBRARY_OUTPUT_DIRECTORY "${LIBRARY_OUTPUT_PATH}/${MODULE_INSTALL_SUBDIR}" + LIBRARY_OUTPUT_DIRECTORY "${OPENCV_PYTHON_EXTENSION_BUILD_PATH}" ARCHIVE_OUTPUT_NAME ${the_module} # prevent name conflict for python2/3 outputs DEFINE_SYMBOL CVAPI_EXPORTS PREFIX "" @@ -110,33 +112,67 @@ else() set(PYTHON_INSTALL_ARCHIVE ARCHIVE DESTINATION ${${PYTHON}_PACKAGES_PATH} COMPONENT python) endif() -if(DEFINED OPENCV_${PYTHON}_INSTALL_PATH) - set(__dst "${OPENCV_${PYTHON}_INSTALL_PATH}") -elseif(NOT INSTALL_CREATE_DISTRIB AND DEFINED ${PYTHON}_PACKAGES_PATH) - set(__dst "${${PYTHON}_PACKAGES_PATH}") +ocv_assert(${PYTHON}_VERSION_MAJOR) +ocv_assert(${PYTHON}_VERSION_MINOR) + +set(__python_loader_subdir "") +if(NOT OPENCV_SKIP_PYTHON_LOADER) + set(__python_loader_subdir "cv2/") endif() -if(NOT __dst) - if(DEFINED ${PYTHON}_VERSION_MAJOR) - set(__ver "${${PYTHON}_VERSION_MAJOR}.${${PYTHON}_VERSION_MINOR}") - elseif(DEFINED ${PYTHON}_VERSION_STRING) - set(__ver "${${PYTHON}_VERSION_STRING}") - else() - set(__ver "unknown") - endif() - if(INSTALL_CREATE_DISTRIB) - set(__dst "python/${__ver}/${OpenCV_ARCH}") - else() - set(__dst "python/${__ver}") - endif() + +if(NOT " ${PYTHON}" STREQUAL " PYTHON" AND DEFINED OPENCV_${PYTHON}_INSTALL_PATH) + set(__python_binary_install_path "${OPENCV_${PYTHON}_INSTALL_PATH}") +else() + ocv_assert(DEFINED OPENCV_PYTHON_INSTALL_PATH) + set(__python_binary_install_path "${OPENCV_PYTHON_INSTALL_PATH}/${__python_loader_subdir}python-${${PYTHON}_VERSION_MAJOR}.${${PYTHON}_VERSION_MINOR}") endif() install(TARGETS ${the_module} ${PYTHON_INSTALL_CONFIGURATIONS} - RUNTIME DESTINATION "${__dst}" COMPONENT python - LIBRARY DESTINATION "${__dst}" COMPONENT python + RUNTIME DESTINATION "${__python_binary_install_path}" COMPONENT python + LIBRARY DESTINATION "${__python_binary_install_path}" COMPONENT python ${PYTHON_INSTALL_ARCHIVE} ) +if(NOT OPENCV_SKIP_PYTHON_LOADER) + ocv_assert(DEFINED OPENCV_PYTHON_INSTALL_PATH) + if(OpenCV_FOUND) + set(__loader_path "${OpenCV_BINARY_DIR}/python_loader") + else() + set(__loader_path "${CMAKE_BINARY_DIR}/python_loader") + endif() + + set(__python_loader_install_tmp_path "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/install/python_loader/") + if(IS_ABSOLUTE "${OPENCV_PYTHON_INSTALL_PATH}") + set(OpenCV_PYTHON_INSTALL_PATH_RELATIVE_CONFIGCMAKE "${CMAKE_INSTALL_PREFIX}/") + set(CMAKE_PYTHON_EXTENSION_INSTALL_PATH_BASE "'${CMAKE_INSTALL_PREFIX}'") + else() + file(RELATIVE_PATH OpenCV_PYTHON_INSTALL_PATH_RELATIVE_CONFIGCMAKE "${CMAKE_INSTALL_PREFIX}/${OPENCV_PYTHON_INSTALL_PATH}/cv2" ${CMAKE_INSTALL_PREFIX}) + set(CMAKE_PYTHON_EXTENSION_INSTALL_PATH_BASE "os.path.join(LOADER_DIR, '${OpenCV_PYTHON_INSTALL_PATH_RELATIVE_CONFIGCMAKE}')") + endif() + + if(DEFINED ${PYTHON}_VERSION_MINOR) + set(__target_config "config-${${PYTHON}_VERSION_MAJOR}.${${PYTHON}_VERSION_MINOR}.py") + else() + set(__target_config "config-${${PYTHON}_VERSION_MAJOR}.py") + endif() + + if(CMAKE_GENERATOR MATCHES "Visual Studio") + set(CMAKE_PYTHON_EXTENSION_PATH "'${OPENCV_PYTHON_EXTENSION_BUILD_PATH}/Release'") # TODO: CMAKE_BUILD_TYPE is not defined + else() + set(CMAKE_PYTHON_EXTENSION_PATH "'${OPENCV_PYTHON_EXTENSION_BUILD_PATH}'") + endif() + configure_file("${PYTHON_SOURCE_DIR}/package/template/config-x.y.py.in" "${__loader_path}/cv2/${__target_config}" @ONLY) + + if(IS_ABSOLUTE __python_binary_install_path) + set(CMAKE_PYTHON_EXTENSION_PATH "'${__python_binary_install_path}'") + else() + set(CMAKE_PYTHON_EXTENSION_PATH "os.path.join(${CMAKE_PYTHON_EXTENSION_INSTALL_PATH_BASE}, '${__python_binary_install_path}')") + endif() + configure_file("${PYTHON_SOURCE_DIR}/package/template/config-x.y.py.in" "${__python_loader_install_tmp_path}/cv2/${__target_config}" @ONLY) + install(FILES "${__python_loader_install_tmp_path}/cv2/${__target_config}" DESTINATION "${OPENCV_PYTHON_INSTALL_PATH}/cv2/" COMPONENT python) +endif() # NOT OPENCV_SKIP_PYTHON_LOADER + unset(PYTHON_SRC_DIR) unset(PYTHON_CVPY_PROCESS) unset(CVPY_SUFFIX) diff --git a/modules/python/package/.gitignore b/modules/python/package/.gitignore new file mode 100644 index 0000000000..6030dc226d --- /dev/null +++ b/modules/python/package/.gitignore @@ -0,0 +1,4 @@ +__pycache__ +*.pyc +*.egg-info +*dist diff --git a/modules/python/package/cv2/__init__.py b/modules/python/package/cv2/__init__.py new file mode 100644 index 0000000000..b176c0d954 --- /dev/null +++ b/modules/python/package/cv2/__init__.py @@ -0,0 +1,89 @@ +''' +OpenCV Python binary extension loader +''' +import os +import sys + +try: + import numpy + import numpy.core.multiarray +except ImportError: + print('OpenCV bindings requires "numpy" package.') + print('Install it via command:') + print(' pip install numpy') + raise + +# TODO +# is_x64 = sys.maxsize > 2**32 + +def bootstrap(): + import sys + if hasattr(sys, 'OpenCV_LOADER'): + print(sys.path) + raise ImportError('ERROR: recursion is detected during loading of "cv2" binary extensions. Check OpenCV installation.') + sys.OpenCV_LOADER = True + + DEBUG = False + if hasattr(sys, 'OpenCV_LOADER_DEBUG'): + DEBUG = True + + import platform + if DEBUG: print('OpenCV loader: os.name="{}" platform.system()="{}"'.format(os.name, str(platform.system()))) + + LOADER_DIR=os.path.dirname(os.path.abspath(__file__)) + + PYTHON_EXTENSIONS_PATHS = [] + BINARIES_PATHS = [] + + g_vars = globals() + l_vars = locals() + + if sys.version_info[:2] < (3, 0): + from cv2.load_config_py2 import exec_file_wrapper + else: + from . load_config_py3 import exec_file_wrapper + + def load_first_config(fnames, required=True): + for fname in fnames: + fpath = os.path.join(LOADER_DIR, fname) + if not os.path.exists(fpath): + if DEBUG: print('OpenCV loader: config not found, skip: {}'.format(fpath)) + continue + if DEBUG: print('OpenCV loader: loading config: {}'.format(fpath)) + exec_file_wrapper(fpath, g_vars, l_vars) + return True + if required: + raise ImportError('OpenCV loader: missing configuration file: {}. Check OpenCV installation.'.format(fnames)) + + load_first_config(['config.py'], True) + load_first_config([ + 'config-{}.{}.py'.format(sys.version_info[0], sys.version_info[1]), + 'config-{}.py'.format(sys.version_info[0]) + ], True) + + if DEBUG: print('OpenCV loader: PYTHON_EXTENSIONS_PATHS={}'.format(str(l_vars['PYTHON_EXTENSIONS_PATHS']))) + if DEBUG: print('OpenCV loader: BINARIES_PATHS={}'.format(str(l_vars['BINARIES_PATHS']))) + + for p in reversed(l_vars['PYTHON_EXTENSIONS_PATHS']): + sys.path.insert(0, p) + + if os.name == 'nt': + os.environ['PATH'] = ';'.join(l_vars['BINARIES_PATHS']) + ';' + os.environ.get('PATH', '') + if DEBUG: print('OpenCV loader: PATH={}'.format(str(os.environ['PATH']))) + else: + # amending of LD_LIBRARY_PATH works for sub-processes only + os.environ['LD_LIBRARY_PATH'] = ':'.join(l_vars['BINARIES_PATHS']) + ':' + os.environ.get('LD_LIBRARY_PATH', '') + + if DEBUG: print('OpenCV loader: replacing cv2 module') + del sys.modules['cv2'] + import cv2 + + try: + import sys + del sys.OpenCV_LOADER + except: + pass + + if DEBUG: print('OpenCV loader: DONE') + +bootstrap() diff --git a/modules/python/package/cv2/load_config_py2.py b/modules/python/package/cv2/load_config_py2.py new file mode 100644 index 0000000000..07fbae9f7a --- /dev/null +++ b/modules/python/package/cv2/load_config_py2.py @@ -0,0 +1,6 @@ +# flake8: noqa +import sys + +if sys.version_info[:2] < (3, 0): + def exec_file_wrapper(fpath, g_vars, l_vars): + execfile(fpath, g_vars, l_vars) diff --git a/modules/python/package/cv2/load_config_py3.py b/modules/python/package/cv2/load_config_py3.py new file mode 100644 index 0000000000..6f3b21ab86 --- /dev/null +++ b/modules/python/package/cv2/load_config_py3.py @@ -0,0 +1,9 @@ +# flake8: noqa +import os +import sys + +if sys.version_info[:2] >= (3, 0): + def exec_file_wrapper(fpath, g_vars, l_vars): + with open(fpath) as f: + code = compile(f.read(), os.path.basename(fpath), 'exec') + exec(code, g_vars, l_vars) diff --git a/modules/python/package/setup.py b/modules/python/package/setup.py new file mode 100644 index 0000000000..1c0637c81a --- /dev/null +++ b/modules/python/package/setup.py @@ -0,0 +1,57 @@ +import os +import sys +import platform +import setuptools + +SCRIPT_DIR=os.path.dirname(os.path.abspath(__file__)) + +def main(): + os.chdir(SCRIPT_DIR) + + package_name = 'opencv' + package_version = os.environ.get('OPENCV_VERSION', '4.0.0') # TODO + + long_description = 'Open Source Computer Vision Library Python bindings' # TODO + + setuptools.setup( + name=package_name, + version=package_version, + url='https://github.com/opencv/opencv', + license='BSD', + description='OpenCV python bindings', + long_description=long_description, + long_description_content_type="text/markdown", + packages=setuptools.find_packages(), + maintainer="OpenCV Team", + install_requires="numpy", + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'Environment :: Console', + 'Intended Audience :: Developers', + 'Intended Audience :: Education', + 'Intended Audience :: Information Technology', + 'Intended Audience :: Science/Research', + 'License :: BSD License', + 'Operating System :: MacOS', + 'Operating System :: Microsoft :: Windows', + 'Operating System :: POSIX', + 'Operating System :: Unix', + 'Programming Language :: Python', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: C++', + 'Programming Language :: Python :: Implementation :: CPython', + 'Topic :: Scientific/Engineering', + 'Topic :: Scientific/Engineering :: Image Recognition', + 'Topic :: Software Development', + 'Topic :: Software Development :: Libraries', + ], + ) + +if __name__ == '__main__': + main() diff --git a/modules/python/package/template/config-x.y.py.in b/modules/python/package/template/config-x.y.py.in new file mode 100644 index 0000000000..d2fc72db41 --- /dev/null +++ b/modules/python/package/template/config-x.y.py.in @@ -0,0 +1,3 @@ +PYTHON_EXTENSIONS_PATHS = [ + @CMAKE_PYTHON_EXTENSION_PATH@ +] + PYTHON_EXTENSIONS_PATHS diff --git a/modules/python/package/template/config.py.in b/modules/python/package/template/config.py.in new file mode 100644 index 0000000000..5fc444f175 --- /dev/null +++ b/modules/python/package/template/config.py.in @@ -0,0 +1,3 @@ +BINARIES_PATHS = [ + @CMAKE_PYTHON_BINARIES_PATH@ +] + BINARIES_PATHS diff --git a/modules/python/python_loader.cmake b/modules/python/python_loader.cmake new file mode 100644 index 0000000000..59ce8e5d69 --- /dev/null +++ b/modules/python/python_loader.cmake @@ -0,0 +1,54 @@ +ocv_assert(NOT OPENCV_SKIP_PYTHON_LOADER) + +set(PYTHON_SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}") + +ocv_assert(DEFINED OPENCV_PYTHON_INSTALL_PATH) +if(OpenCV_FOUND) + set(__loader_path "${OpenCV_BINARY_DIR}/python_loader") +else() + set(__loader_path "${CMAKE_BINARY_DIR}/python_loader") +endif() + +set(__python_loader_install_tmp_path "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/install/python_loader/") +if(IS_ABSOLUTE "${OPENCV_PYTHON_INSTALL_PATH}") + set(OpenCV_PYTHON_INSTALL_PATH_RELATIVE_CONFIGCMAKE "${CMAKE_INSTALL_PREFIX}/") + set(CMAKE_PYTHON_EXTENSION_INSTALL_PATH_BASE "'${CMAKE_INSTALL_PREFIX}'") +else() + file(RELATIVE_PATH OpenCV_PYTHON_INSTALL_PATH_RELATIVE_CONFIGCMAKE "${CMAKE_INSTALL_PREFIX}/${OPENCV_PYTHON_INSTALL_PATH}/cv2" ${CMAKE_INSTALL_PREFIX}) + set(CMAKE_PYTHON_EXTENSION_INSTALL_PATH_BASE "os.path.join(LOADER_DIR, '${OpenCV_PYTHON_INSTALL_PATH_RELATIVE_CONFIGCMAKE}')") +endif() + +set(PYTHON_LOADER_FILES + "setup.py" "cv2/__init__.py" + "cv2/load_config_py2.py" "cv2/load_config_py3.py" +) +foreach(fname ${PYTHON_LOADER_FILES}) + get_filename_component(__dir "${fname}" DIRECTORY) + file(COPY "${PYTHON_SOURCE_DIR}/package/${fname}" DESTINATION "${__loader_path}/${__dir}") + install(FILES "${PYTHON_SOURCE_DIR}/package/${fname}" DESTINATION "${OPENCV_PYTHON_INSTALL_PATH}/${__dir}" COMPONENT python) +endforeach() + +if(NOT OpenCV_FOUND) # Ignore "standalone" builds of Python bindings + if(WIN32) + if(CMAKE_GENERATOR MATCHES "Visual Studio") + list(APPEND CMAKE_PYTHON_BINARIES_PATH "'${EXECUTABLE_OUTPUT_PATH}/Release'") # TODO: CMAKE_BUILD_TYPE is not defined + else() + list(APPEND CMAKE_PYTHON_BINARIES_PATH "'${EXECUTABLE_OUTPUT_PATH}'") + endif() + else() + list(APPEND CMAKE_PYTHON_BINARIES_PATH "'${LIBRARY_OUTPUT_PATH}'") + endif() + string(REPLACE ";" ",\n " CMAKE_PYTHON_BINARIES_PATH "${CMAKE_PYTHON_BINARIES_PATH}") + configure_file("${PYTHON_SOURCE_DIR}/package/template/config.py.in" "${__loader_path}/cv2/config.py" @ONLY) + + if(WIN32) + list(APPEND CMAKE_PYTHON_BINARIES_INSTALL_PATH "os.path.join(${CMAKE_PYTHON_EXTENSION_INSTALL_PATH_BASE}, '${OPENCV_BIN_INSTALL_PATH}')") + else() + list(APPEND CMAKE_PYTHON_BINARIES_INSTALL_PATH "os.path.join(${CMAKE_PYTHON_EXTENSION_INSTALL_PATH_BASE}, '${OPENCV_LIB_INSTALL_PATH}')") + endif() + string(REPLACE ";" ",\n " CMAKE_PYTHON_BINARIES_PATH "${CMAKE_PYTHON_BINARIES_INSTALL_PATH}") + configure_file("${PYTHON_SOURCE_DIR}/package/template/config.py.in" "${__python_loader_install_tmp_path}/cv2/config.py" @ONLY) + install(FILES "${__python_loader_install_tmp_path}/cv2/config.py" DESTINATION "${OPENCV_PYTHON_INSTALL_PATH}/cv2/" COMPONENT python) + + message(STATUS "OpenCV Python: during development append to PYTHONPATH: ${__loader_path}") +endif() diff --git a/modules/python/src2/hdr_parser.py b/modules/python/src2/hdr_parser.py index f5364fc46a..9d339afebd 100755 --- a/modules/python/src2/hdr_parser.py +++ b/modules/python/src2/hdr_parser.py @@ -432,7 +432,7 @@ class CppHeaderParser(object): # it means class methods, not instance methods decl_str = self.batch_replace(decl_str, [("static inline", ""), ("inline", ""),\ ("CV_EXPORTS_W", ""), ("CV_EXPORTS", ""), ("CV_CDECL", ""), ("CV_WRAP ", " "), ("CV_INLINE", ""), - ("CV_DEPRECATED", "")]).strip() + ("CV_DEPRECATED", ""), ("CV_DEPRECATED_EXTERNAL", "")]).strip() if decl_str.strip().startswith('virtual'): diff --git a/modules/python/test/test_misc.py b/modules/python/test/test_misc.py index e84de50247..892215b9a1 100644 --- a/modules/python/test/test_misc.py +++ b/modules/python/test/test_misc.py @@ -84,6 +84,23 @@ class Arguments(NewOpenCVTests): self.assertEqual(res4, "InputArrayOfArrays: empty()=false kind=0x00050000 flags=0x01050000 total(-1)=3 dims(-1)=1 size(-1)=3x1 type(0)=CV_32FC2 dims(0)=2 size(0)=3x1 type(0)=CV_32FC2") +class SamplesFindFile(NewOpenCVTests): + + def test_ExistedFile(self): + res = cv.samples.findFile('lena.jpg', False) + self.assertNotEqual(res, '') + + def test_MissingFile(self): + res = cv.samples.findFile('non_existed.file', False) + self.assertEqual(res, '') + + def test_MissingFileException(self): + try: + res = cv.samples.findFile('non_existed.file', True) + self.assertEqual("Dead code", 0) + except cv.error as _e: + pass + if __name__ == '__main__': NewOpenCVTests.bootstrap() diff --git a/platforms/android/build_sdk.py b/platforms/android/build_sdk.py index df3503e09f..4a5b8b19d6 100755 --- a/platforms/android/build_sdk.py +++ b/platforms/android/build_sdk.py @@ -210,7 +210,7 @@ class Builder: # Add extra data apkxmldest = check_dir(os.path.join(apkdest, "res", "xml"), create=True) apklibdest = check_dir(os.path.join(apkdest, "libs", abi.name), create=True) - for ver, d in self.extra_packs + [("3.4.3", os.path.join(self.libdest, "lib"))]: + for ver, d in self.extra_packs + [("3.4.4", os.path.join(self.libdest, "lib"))]: r = ET.Element("library", attrib={"version": ver}) log.info("Adding libraries from %s", d) diff --git a/platforms/android/service/engine/AndroidManifest.xml b/platforms/android/service/engine/AndroidManifest.xml index 6b78d5cfa7..04f9ca568a 100644 --- a/platforms/android/service/engine/AndroidManifest.xml +++ b/platforms/android/service/engine/AndroidManifest.xml @@ -1,8 +1,8 @@ + android:versionCode="344@ANDROID_PLATFORM_ID@" + android:versionName="3.44"> diff --git a/platforms/android/service/engine/src/org/opencv/engine/OpenCVEngineService.java b/platforms/android/service/engine/src/org/opencv/engine/OpenCVEngineService.java index 5ecc107197..1b810029d4 100644 --- a/platforms/android/service/engine/src/org/opencv/engine/OpenCVEngineService.java +++ b/platforms/android/service/engine/src/org/opencv/engine/OpenCVEngineService.java @@ -137,7 +137,7 @@ public class OpenCVEngineService extends Service { @Override public int getEngineVersion() throws RemoteException { - int version = 3430; + int version = 3440; try { version = getPackageManager().getPackageInfo(getPackageName(), 0).versionCode; } catch (NameNotFoundException e) { diff --git a/platforms/android/service/readme.txt b/platforms/android/service/readme.txt index 144869052d..c9c2c66096 100644 --- a/platforms/android/service/readme.txt +++ b/platforms/android/service/readme.txt @@ -12,7 +12,7 @@ manually using adb tool: adb install /apk/OpenCV__Manager__.apk -Example: OpenCV_3.4.3-dev_Manager_3.43_armeabi-v7a.apk +Example: OpenCV_3.4.4-dev_Manager_3.44_armeabi-v7a.apk Use the list of platforms below to determine proper OpenCV Manager package for your device: diff --git a/platforms/maven/opencv-it/pom.xml b/platforms/maven/opencv-it/pom.xml index f6d495a75b..d039d47fa7 100644 --- a/platforms/maven/opencv-it/pom.xml +++ b/platforms/maven/opencv-it/pom.xml @@ -4,7 +4,7 @@ org.opencv opencv-parent - 3.4.3 + 4.0.0 org.opencv opencv-it diff --git a/platforms/maven/opencv/pom.xml b/platforms/maven/opencv/pom.xml index f0e1e43c6c..65d72af83a 100644 --- a/platforms/maven/opencv/pom.xml +++ b/platforms/maven/opencv/pom.xml @@ -4,7 +4,7 @@ org.opencv opencv-parent - 3.4.3 + 4.0.0 org.opencv opencv diff --git a/platforms/maven/pom.xml b/platforms/maven/pom.xml index fe4c22db7a..2488a025fa 100644 --- a/platforms/maven/pom.xml +++ b/platforms/maven/pom.xml @@ -3,7 +3,7 @@ 4.0.0 org.opencv opencv-parent - 3.4.3 + 4.0.0 pom OpenCV Parent POM diff --git a/platforms/scripts/valgrind.supp b/platforms/scripts/valgrind.supp index 074c2013c5..7a4341467f 100644 --- a/platforms/scripts/valgrind.supp +++ b/platforms/scripts/valgrind.supp @@ -134,6 +134,13 @@ fun:_ZNK2cv7TLSDataINS_11CoreTLSDataEE18createDataInstanceEv } +{ + OpenCV-UMatDataAutoLockerTLS + Memcheck:Leak + ... + fun:_ZN2cvL21getUMatDataAutoLockerEv +} + { OpenCV-haveOpenCL Memcheck:Leak diff --git a/samples/_winpack_build_sample.cmd b/samples/_winpack_build_sample.cmd index 84a3b56428..f48143d95a 100644 --- a/samples/_winpack_build_sample.cmd +++ b/samples/_winpack_build_sample.cmd @@ -7,7 +7,7 @@ :: - MSVS 2015/2017 :: (tools are searched on default paths or environment should be pre-configured) @echo off -setlocal ENABLEEXTENSIONS ENABLEDELAYEDEXPANSION +setlocal set SCRIPTDIR=%~dp0 if NOT exist "%SCRIPTDIR%\..\..\build" ( @@ -28,20 +28,20 @@ if NOT "%~x1" == ".cpp" ( goto die ) set SRC_FILENAME=%~dpnx1 -echo SRC_FILENAME=!SRC_FILENAME! -call :dirname "!SRC_FILENAME!" SRC_DIR -echo SRC_DIR=!SRC_DIR! +echo SRC_FILENAME=%SRC_FILENAME% +call :dirname "%SRC_FILENAME%" SRC_DIR +echo SRC_DIR=%SRC_DIR% set "SRC_NAME=%~n1" -echo SRC_NAME=!SRC_NAME! +echo SRC_NAME=%SRC_NAME% echo ================================================================================ :: Path to FFMPEG binary files -set "PATH=!PATH!;!SCRIPTDIR!\..\..\build\bin\" +set "PATH=%PATH%;%SCRIPTDIR%\..\..\build\bin\" :: Detect compiler cl /? >NUL 2>NUL NUL 2>NUL NUL 2>NUL -if !ERRORLEVEL! EQU 0 ( - set CMAKE_FOUND=1 -) else ( - if exist "C:\Program Files\CMake\bin" ( - set "PATH=!PATH!;C:\Program Files\CMake\bin" - cmake --version >NUL 2>NUL - if !ERRORLEVEL! EQU 0 ( - set CMAKE_FOUND=1 - ) - ) -) -if NOT DEFINED CMAKE_FOUND ( - set "MSG=CMake is required to build OpenCV samples. Download it from here: https://cmake.org/download/ and install into 'C:\Program Files\CMake'" - goto die -) else ( - call :execute cmake --version - echo CMake is detected -) +if %ERRORLEVEL% EQU 0 GOTO :CMAKE_FOUND + +if NOT exist "C:\Program Files\CMake\bin" GOTO CMAKE_NOT_FOUND +set "PATH=%PATH%;C:\Program Files\CMake\bin" +cmake --version >NUL 2>NUL +if %ERRORLEVEL% EQU 0 GOTO :CMAKE_FOUND + +:CMAKE_NOT_FOUND +set "MSG=CMake is required to build OpenCV samples. Download it from here: https://cmake.org/download/ and install into 'C:\Program Files\CMake'" +goto die + +:CMAKE_FOUND +set CMAKE_FOUND=1 +call :execute cmake --version +echo CMake is detected :: Detect available MSVS version if NOT DEFINED VisualStudioVersion ( set "MSG=Can't determine MSVS version. 'VisualStudioVersion' is not defined" goto die ) -if "!VisualStudioVersion!" == "14.0" ( +if "%VisualStudioVersion%" == "14.0" ( set CMAKE_GENERATOR="Visual Studio 14 Win64" - set "PATH=!PATH!;!SCRIPTDIR!\..\..\build\x64\vc14\bin\" + set "PATH=%PATH%;%SCRIPTDIR%\..\..\build\x64\vc14\bin\" ) else ( - if "!VisualStudioVersion!" == "15.0" ( + if "%VisualStudioVersion%" == "15.0" ( set CMAKE_GENERATOR="Visual Studio 15 Win64" - set "PATH=!PATH!;!SCRIPTDIR!\..\..\build\x64\vc15\bin\" + set "PATH=%PATH%;%SCRIPTDIR%\..\..\build\x64\vc15\bin\" ) else ( - set "MSG=Unsupported MSVS version. VisualStudioVersion=!VisualStudioVersion!" + set "MSG=Unsupported MSVS version. VisualStudioVersion=%VisualStudioVersion%" goto die ) ) -set "BUILD_DIR=!SRC_DIR!\build_!SRC_NAME!" +set "BUILD_DIR=%SRC_DIR%\build_%SRC_NAME%" call :set_title Create build directory -if NOT exist "!BUILD_DIR!" ( call :execute md "!BUILD_DIR!" ) -PUSHD "!BUILD_DIR!" -if NOT exist "!BUILD_DIR!/sample" ( call :execute md "!BUILD_DIR!/sample" ) -call :execute copy /Y "!SCRIPTDIR!/CMakeLists.example.in" "!BUILD_DIR!/sample/CMakeLists.txt" +if NOT exist "%BUILD_DIR%" ( call :execute md "%BUILD_DIR%" ) +PUSHD "%BUILD_DIR%" +if NOT exist "%BUILD_DIR%/sample" ( call :execute md "%BUILD_DIR%/sample" ) +call :execute copy /Y "%SCRIPTDIR%/CMakeLists.example.in" "%BUILD_DIR%/sample/CMakeLists.txt" call :set_title Configuring via CMake -call :execute cmake -G!CMAKE_GENERATOR! "!BUILD_DIR!\sample" -DEXAMPLE_NAME=!SRC_NAME! "-DEXAMPLE_FILE=!SRC_FILENAME!" "-DOpenCV_DIR=!SCRIPTDIR!\..\..\build" -if !ERRORLEVEL! NEQ 0 ( - set "MSG=CMake configuration step failed: !BUILD_DIR!" +call :execute cmake -G%CMAKE_GENERATOR% "%BUILD_DIR%\sample" -DEXAMPLE_NAME=%SRC_NAME% "-DEXAMPLE_FILE=%SRC_FILENAME%" "-DOpenCV_DIR=%SCRIPTDIR%\..\..\build" +if %ERRORLEVEL% NEQ 0 ( + set "MSG=CMake configuration step failed: %BUILD_DIR%" goto die ) call :set_title Build sample project via CMake call :execute cmake --build . --config Release -if !ERRORLEVEL! NEQ 0 ( - set "MSG=Build step failed: !BUILD_DIR!" +if %ERRORLEVEL% NEQ 0 ( + set "MSG=Build step failed: %BUILD_DIR%" goto die ) -call :set_title Launch !SRC_NAME! -if NOT exist "!BUILD_DIR!\Release\!SRC_NAME!.exe" ( - echo. "ERROR: Can't find executable file (build seems broken): !SRC_NAME!.exe" +call :set_title Launch %SRC_NAME% +if NOT exist "%BUILD_DIR%\Release\%SRC_NAME%.exe" ( + echo. "ERROR: Can't find executable file (build seems broken): %SRC_NAME%.exe" ) else ( - cd "!BUILD_DIR!\Release" - call :execute "!SRC_NAME!.exe" --help + cd "%BUILD_DIR%\Release" + call :execute "%SRC_NAME%.exe" --help echo ================================================================================ - echo ** Type '!SRC_NAME!.exe' to run sample application - echo ** Type '!SRC_NAME!.exe --help' to get list of available options (if available) - echo ** Type 'start ..\!SRC_NAME!.sln' to launch MSVS IDE + echo ** Type '%SRC_NAME%.exe' to run sample application + echo ** Type '%SRC_NAME%.exe --help' to get list of available options (if available) + echo ** Type 'start ..\%SRC_NAME%.sln' to launch MSVS IDE echo ** Type 'cmake --build .. --config Release' to rebuild sample echo ** Type 'exit' to exit from interactive shell and open the build directory echo ================================================================================ ) -call :set_title Hands-on: !SRC_NAME! -cmd /k echo Current directory: !CD! +call :set_title Hands-on: %SRC_NAME% +cmd /k echo Current directory: %CD% -call :set_title Done: !SRC_NAME! +call :set_title Done: %SRC_NAME% echo Opening build directory with project files... -explorer "!BUILD_DIR!" +explorer "%BUILD_DIR%" POPD -echo Done! +echo Done% pause exit /B 0 @@ -166,7 +163,7 @@ exit /B 0 :execute echo ================================================================================= - setlocal enableextensions disabledelayedexpansion + setlocal echo %* call %* endlocal diff --git a/samples/_winpack_run_python_sample.cmd b/samples/_winpack_run_python_sample.cmd new file mode 100644 index 0000000000..b7075e322a --- /dev/null +++ b/samples/_winpack_run_python_sample.cmd @@ -0,0 +1,124 @@ +@ECHO OFF +SETLOCAL + +SET SCRIPT_DIR=%~dp0 +IF NOT EXIST "%SCRIPT_DIR%\..\..\build\setup_vars_opencv4.cmd" ( + ECHO ERROR: OpenCV Winpack installation is required + pause + exit +) + +:: Detect Python binary +python -V 2>nul +IF %ERRORLEVEL% EQU 0 ( + SET PYTHON=python + GOTO :PYTHON_FOUND +) + +CALL :QUERY_PYTHON 3.7 +IF %ERRORLEVEL% EQU 0 GOTO :PYTHON_FOUND +CALL :QUERY_PYTHON 3.6 +IF %ERRORLEVEL% EQU 0 GOTO :PYTHON_FOUND +CALL :QUERY_PYTHON 3.5 +IF %ERRORLEVEL% EQU 0 GOTO :PYTHON_FOUND +CALL :QUERY_PYTHON 3.4 +IF %ERRORLEVEL% EQU 0 GOTO :PYTHON_FOUND +CALL :QUERY_PYTHON 2.7 +IF %ERRORLEVEL% EQU 0 GOTO :PYTHON_FOUND +GOTO :PYTHON_NOT_FOUND + +:QUERY_PYTHON +SETLOCAL +SET PY_VERSION=%1 +SET PYTHON_DIR= +CALL :regquery "HKCU\SOFTWARE\Python\PythonCore\%PY_VERSION%\InstallPath" PYTHON_DIR +IF EXIST "%PYTHON_DIR%\python.exe" ( + SET "PYTHON=%PYTHON_DIR%\python.exe" + GOTO :QUERY_PYTHON_FOUND +) +CALL :regquery "HKLM\SOFTWARE\Python\PythonCore\%PY_VERSION%\InstallPath" PYTHON_DIR +IF EXIST "%PYTHON_DIR%\python.exe" ( + SET "PYTHON=%PYTHON_DIR%\python.exe" + GOTO :QUERY_PYTHON_FOUND +) + +::echo Python %PY_VERSION% is not detected +ENDLOCAL +EXIT /B 1 + +:QUERY_PYTHON_FOUND +ECHO Found Python %PY_VERSION% from Windows Registry: %PYTHON% +ENDLOCAL & SET PYTHON=%PYTHON% +EXIT /B 0 + +IF exist C:\Python27-x64\python.exe ( + SET PYTHON=C:\Python27-x64\python.exe + GOTO :PYTHON_FOUND +) +IF exist C:\Python27\python.exe ( + SET PYTHON=C:\Python27\python.exe + GOTO :PYTHON_FOUND +) + +:PYTHON_NOT_FOUND +ECHO ERROR: Python not found +IF NOT DEFINED OPENCV_BATCH_MODE ( pause ) +EXIT /B + +:PYTHON_FOUND +ECHO Using Python: %PYTHON% + +:: Don't generate unnecessary .pyc cache files +SET PYTHONDONTWRITEBYTECODE=1 + +IF [%1]==[] goto rundemo + +set SRC_FILENAME=%~dpnx1 +echo SRC_FILENAME=%SRC_FILENAME% +call :dirname "%SRC_FILENAME%" SRC_DIR +call :dirname "%PYTHON%" PYTHON_DIR +PUSHD %SRC_DIR% +CALL "%SCRIPT_DIR%\..\..\build\setup_vars_opencv4.cmd" +ECHO Run: %* +%PYTHON% %* +SET result=%errorlevel% +IF %result% NEQ 0 ( + IF NOT DEFINED OPENCV_BATCH_MODE ( + SET "PATH=%PYTHON_DIR%;%PATH%" + echo ================================================================================ + echo ** Type 'python sample_name.py' to run sample + echo ** Type 'exit' to exit from interactive shell and open the build directory + echo ================================================================================ + cmd /k echo Current directory: %CD% + ) +) +POPD +EXIT /B %result% + +:rundemo +PUSHD "%SCRIPT_DIR%\python" +CALL "%SCRIPT_DIR%\..\..\build\setup_vars_opencv4.cmd" +%PYTHON% demo.py +SET result=%errorlevel% +IF %result% NEQ 0 ( + IF NOT DEFINED OPENCV_BATCH_MODE ( pause ) +) +POPD +EXIT /B %result% + + +:dirname file resultVar + setlocal + set _dir=%~dp1 + set _dir=%_dir:~0,-1% + endlocal & set %2=%_dir% + EXIT /B 0 + +:regquery name resultVar + SETLOCAL + FOR /F "tokens=*" %%A IN ('REG QUERY "%1" /reg:64 /ve 2^>NUL ^| FIND "REG_SZ"') DO SET _val=%%A + IF "x%_val%x"=="xx" EXIT /B 1 + SET _val=%_val:*REG_SZ=% + FOR /F "tokens=*" %%A IN ("%_val%") DO SET _val=%%A + ENDLOCAL & SET %2=%_val% + EXIT /B 0 diff --git a/samples/cpp/application_trace.cpp b/samples/cpp/application_trace.cpp index fa77542dbd..4c03e48c0c 100644 --- a/samples/cpp/application_trace.cpp +++ b/samples/cpp/application_trace.cpp @@ -41,7 +41,7 @@ int main(int argc, char** argv) if (video.size() == 1 && isdigit(video[0])) capture.open(parser.get("@video")); else - capture.open(video); + capture.open(samples::findFileOrKeep(video)); // keep GStreamer pipelines int nframes = 0; if (capture.isOpened()) { diff --git a/samples/cpp/bgfg_segm.cpp b/samples/cpp/bgfg_segm.cpp index 9fc2780b07..0537775731 100644 --- a/samples/cpp/bgfg_segm.cpp +++ b/samples/cpp/bgfg_segm.cpp @@ -38,7 +38,10 @@ int main(int argc, const char** argv) if (file.empty()) cap.open(camera); else + { + file = samples::findFileOrKeep(file); // ignore gstreamer pipelines cap.open(file.c_str()); + } if (!cap.isOpened()) { cout << "Can not open video stream: '" << (file.empty() ? "" : file) << "'" << endl; diff --git a/samples/cpp/calibration.cpp b/samples/cpp/calibration.cpp index 5ff08d7c23..3f38c8bc38 100644 --- a/samples/cpp/calibration.cpp +++ b/samples/cpp/calibration.cpp @@ -285,12 +285,31 @@ static bool readStringList( const string& filename, vector& l ) FileStorage fs(filename, FileStorage::READ); if( !fs.isOpened() ) return false; + size_t dir_pos = filename.rfind('/'); + if (dir_pos == string::npos) + dir_pos = filename.rfind('\\'); FileNode n = fs.getFirstTopLevelNode(); if( n.type() != FileNode::SEQ ) return false; FileNodeIterator it = n.begin(), it_end = n.end(); for( ; it != it_end; ++it ) - l.push_back((string)*it); + { + string fname = (string)*it; + if (dir_pos != string::npos) + { + string fpath = samples::findFile(filename.substr(0, dir_pos + 1) + fname, false); + if (fpath.empty()) + { + fpath = samples::findFile(fname); + } + fname = fpath; + } + else + { + fname = samples::findFile(fname); + } + l.push_back(fname); + } return true; } @@ -427,10 +446,10 @@ int main( int argc, char** argv ) if( !inputFilename.empty() ) { - if( !videofile && readStringList(inputFilename, imageList) ) + if( !videofile && readStringList(samples::findFile(inputFilename), imageList) ) mode = CAPTURING; else - capture.open(inputFilename); + capture.open(samples::findFileOrKeep(inputFilename)); } else capture.open(cameraId); diff --git a/samples/cpp/cloning_demo.cpp b/samples/cpp/cloning_demo.cpp index 25729db296..43f6e20255 100644 --- a/samples/cpp/cloning_demo.cpp +++ b/samples/cpp/cloning_demo.cpp @@ -17,8 +17,7 @@ * The program takes as input a source and a destination image (for 1-3 methods) * and outputs the cloned image. * -* Download test images from opencv_extra folder @github. -* +* Download test images from opencv_extra repository. */ #include "opencv2/photo.hpp" @@ -27,7 +26,6 @@ #include "opencv2/highgui.hpp" #include "opencv2/core.hpp" #include -#include using namespace std; using namespace cv; @@ -35,6 +33,7 @@ using namespace cv; int main() { cout << endl; + cout << "Note: specify OPENCV_SAMPLES_DATA_PATH_HINT=/testdata/cv" << endl << endl; cout << "Cloning Module" << endl; cout << "---------------" << endl; cout << "Options: " << endl; @@ -54,9 +53,9 @@ int main() if(num == 1) { string folder = "cloning/Normal_Cloning/"; - string original_path1 = folder + "source1.png"; - string original_path2 = folder + "destination1.png"; - string original_path3 = folder + "mask.png"; + string original_path1 = samples::findFile(folder + "source1.png"); + string original_path2 = samples::findFile(folder + "destination1.png"); + string original_path3 = samples::findFile(folder + "mask.png"); Mat source = imread(original_path1, IMREAD_COLOR); Mat destination = imread(original_path2, IMREAD_COLOR); @@ -86,14 +85,14 @@ int main() seamlessClone(source, destination, mask, p, result, 1); imshow("Output",result); - imwrite(folder + "cloned.png", result); + imwrite("cloned.png", result); } else if(num == 2) { string folder = "cloning/Mixed_Cloning/"; - string original_path1 = folder + "source1.png"; - string original_path2 = folder + "destination1.png"; - string original_path3 = folder + "mask.png"; + string original_path1 = samples::findFile(folder + "source1.png"); + string original_path2 = samples::findFile(folder + "destination1.png"); + string original_path3 = samples::findFile(folder + "mask.png"); Mat source = imread(original_path1, IMREAD_COLOR); Mat destination = imread(original_path2, IMREAD_COLOR); @@ -123,14 +122,14 @@ int main() seamlessClone(source, destination, mask, p, result, 2); imshow("Output",result); - imwrite(folder + "cloned.png", result); + imwrite("cloned.png", result); } else if(num == 3) { string folder = "cloning/Monochrome_Transfer/"; - string original_path1 = folder + "source1.png"; - string original_path2 = folder + "destination1.png"; - string original_path3 = folder + "mask.png"; + string original_path1 = samples::findFile(folder + "source1.png"); + string original_path2 = samples::findFile(folder + "destination1.png"); + string original_path3 = samples::findFile(folder + "mask.png"); Mat source = imread(original_path1, IMREAD_COLOR); Mat destination = imread(original_path2, IMREAD_COLOR); @@ -160,13 +159,13 @@ int main() seamlessClone(source, destination, mask, p, result, 3); imshow("Output",result); - imwrite(folder + "cloned.png", result); + imwrite("cloned.png", result); } else if(num == 4) { - string folder = "cloning/Color_Change/"; - string original_path1 = folder + "source1.png"; - string original_path2 = folder + "mask.png"; + string folder = "cloning/color_change/"; + string original_path1 = samples::findFile(folder + "source1.png"); + string original_path2 = samples::findFile(folder + "mask.png"); Mat source = imread(original_path1, IMREAD_COLOR); Mat mask = imread(original_path2, IMREAD_COLOR); @@ -187,13 +186,13 @@ int main() colorChange(source, mask, result, 1.5, .5, .5); imshow("Output",result); - imwrite(folder + "cloned.png", result); + imwrite("cloned.png", result); } else if(num == 5) { string folder = "cloning/Illumination_Change/"; - string original_path1 = folder + "source1.png"; - string original_path2 = folder + "mask.png"; + string original_path1 = samples::findFile(folder + "source1.png"); + string original_path2 = samples::findFile(folder + "mask.png"); Mat source = imread(original_path1, IMREAD_COLOR); Mat mask = imread(original_path2, IMREAD_COLOR); @@ -214,13 +213,13 @@ int main() illuminationChange(source, mask, result, 0.2f, 0.4f); imshow("Output",result); - imwrite(folder + "cloned.png", result); + imwrite("cloned.png", result); } else if(num == 6) { string folder = "cloning/Texture_Flattening/"; - string original_path1 = folder + "source1.png"; - string original_path2 = folder + "mask.png"; + string original_path1 = samples::findFile(folder + "source1.png"); + string original_path2 = samples::findFile(folder + "mask.png"); Mat source = imread(original_path1, IMREAD_COLOR); Mat mask = imread(original_path2, IMREAD_COLOR); @@ -241,7 +240,12 @@ int main() textureFlattening(source, mask, result, 30, 45, 3); imshow("Output",result); - imwrite(folder + "cloned.png", result); + imwrite("cloned.png", result); + } + else + { + cerr << "Invalid selection: " << num << endl; + exit(1); } waitKey(0); } diff --git a/samples/cpp/cloning_gui.cpp b/samples/cpp/cloning_gui.cpp index 14a5079ac0..c4e4de7351 100644 --- a/samples/cpp/cloning_gui.cpp +++ b/samples/cpp/cloning_gui.cpp @@ -30,14 +30,12 @@ * Result: The cloned image will be displayed. */ -#include #include "opencv2/photo.hpp" #include "opencv2/imgproc.hpp" #include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include "opencv2/core.hpp" #include -#include // we're NOT "using namespace std;" here, to avoid collisions between the beta variable and std::beta in c++17 using std::cin; @@ -320,9 +318,9 @@ int main() cout << "Enter Destination Image: "; cin >> dest; - img0 = imread(src); + img0 = imread(samples::findFile(src)); - img2 = imread(dest); + img2 = imread(samples::findFile(dest)); if(img0.empty()) { @@ -370,7 +368,7 @@ int main() cout << "Blue: "; cin >> blue; - img0 = imread(src); + img0 = imread(samples::findFile(src)); if(img0.empty()) { @@ -400,7 +398,7 @@ int main() cout << "beta: "; cin >> beta; - img0 = imread(src); + img0 = imread(samples::findFile(src)); if(img0.empty()) { @@ -433,7 +431,7 @@ int main() cout << "kernel_size: "; cin >> kernel_size; - img0 = imread(src); + img0 = imread(samples::findFile(src)); if(img0.empty()) { diff --git a/samples/cpp/connected_components.cpp b/samples/cpp/connected_components.cpp index 711b0a97c8..74afb29d6c 100644 --- a/samples/cpp/connected_components.cpp +++ b/samples/cpp/connected_components.cpp @@ -35,14 +35,14 @@ static void on_trackbar(int, void*) int main( int argc, const char** argv ) { - CommandLineParser parser(argc, argv, "{@image|../data/stuff.jpg|image for converting to a grayscale}"); + CommandLineParser parser(argc, argv, "{@image|stuff.jpg|image for converting to a grayscale}"); parser.about("\nThis program demonstrates connected components and use of the trackbar\n"); parser.printMessage(); cout << "\nThe image is converted to grayscale and displayed, another image has a trackbar\n" "that controls thresholding and thereby the extracted contours which are drawn in color\n"; String inputImage = parser.get(0); - img = imread(inputImage, IMREAD_GRAYSCALE); + img = imread(samples::findFile(inputImage), IMREAD_GRAYSCALE); if(img.empty()) { diff --git a/samples/cpp/create_mask.cpp b/samples/cpp/create_mask.cpp index b90a7b70f2..dc54953678 100644 --- a/samples/cpp/create_mask.cpp +++ b/samples/cpp/create_mask.cpp @@ -95,7 +95,7 @@ void mouseHandler(int event, int x, int y, int, void*) int main(int argc, char **argv) { - CommandLineParser parser(argc, argv, "{@input | ../data/lena.jpg | input image}"); + CommandLineParser parser(argc, argv, "{@input | lena.jpg | input image}"); parser.about("This program demonstrates using mouse events\n"); parser.printMessage(); cout << "\n\tleft mouse button - set a point to create mask shape\n" @@ -103,13 +103,13 @@ int main(int argc, char **argv) "\tmiddle mouse button - reset\n"; String input_image = parser.get("@input"); - src = imread(input_image); + src = imread(samples::findFile(input_image)); if (src.empty()) - { + { printf("Error opening image: %s\n", input_image.c_str()); return 0; - } + } namedWindow("Source", WINDOW_AUTOSIZE); setMouseCallback("Source", mouseHandler, NULL); diff --git a/samples/cpp/dbt_face_detection.cpp b/samples/cpp/dbt_face_detection.cpp index 8e5112f121..d5707d4b7d 100644 --- a/samples/cpp/dbt_face_detection.cpp +++ b/samples/cpp/dbt_face_detection.cpp @@ -49,7 +49,7 @@ int main(int , char** ) return 1; } - std::string cascadeFrontalfilename = "../../data/lbpcascades/lbpcascade_frontalface.xml"; + std::string cascadeFrontalfilename = samples::findFile("data/lbpcascades/lbpcascade_frontalface.xml"); cv::Ptr cascade = makePtr(cascadeFrontalfilename); cv::Ptr MainDetector = makePtr(cascade); if ( cascade->empty() ) diff --git a/samples/cpp/demhist.cpp b/samples/cpp/demhist.cpp index d4a4c92042..3e416b32ad 100644 --- a/samples/cpp/demhist.cpp +++ b/samples/cpp/demhist.cpp @@ -59,12 +59,12 @@ static void updateBrightnessContrast( int /*arg*/, void* ) static void help() { std::cout << "\nThis program demonstrates the use of calcHist() -- histogram creation.\n" - << "Usage: \n" << "demhist [image_name -- Defaults to ../data/baboon.jpg]" << std::endl; + << "Usage: \n" << "demhist [image_name -- Defaults to baboon.jpg]" << std::endl; } const char* keys = { - "{help h||}{@image|../data/baboon.jpg|input image file}" + "{help h||}{@image|baboon.jpg|input image file}" }; int main( int argc, const char** argv ) @@ -78,7 +78,7 @@ int main( int argc, const char** argv ) string inputImage = parser.get(0); // Load the source image. HighGUI use. - image = imread( inputImage, 0 ); + image = imread(samples::findFile(inputImage), IMREAD_GRAYSCALE); if(image.empty()) { std::cerr << "Cannot read image file: " << inputImage << std::endl; diff --git a/samples/cpp/detect_blob.cpp b/samples/cpp/detect_blob.cpp index a53743f44e..e969cc0684 100644 --- a/samples/cpp/detect_blob.cpp +++ b/samples/cpp/detect_blob.cpp @@ -14,7 +14,7 @@ static void help() { cout << "\n This program demonstrates how to use BLOB to detect and filter region \n" "Usage: \n" - " ./detect_blob \n" + " ./detect_blob \n" "Press a key when image window is active to change descriptor"; } @@ -70,20 +70,19 @@ static String Legende(SimpleBlobDetector::Params &pAct) int main(int argc, char *argv[]) { - vector fileName; - Mat img(600, 800, CV_8UC1); - cv::CommandLineParser parser(argc, argv, "{@input |../data/detect_blob.png| }{h help | | }"); + String fileName; + cv::CommandLineParser parser(argc, argv, "{@input |detect_blob.png| }{h help | | }"); if (parser.has("h")) { help(); return 0; } - fileName.push_back(parser.get("@input")); - img = imread(fileName[0], IMREAD_COLOR); - if (img.rows*img.cols <= 0) + fileName = parser.get("@input"); + Mat img = imread(samples::findFile(fileName), IMREAD_COLOR); + if (img.empty()) { - cout << "Image " << fileName[0] << " is empty or cannot be found\n"; - return(0); + cout << "Image " << fileName << " is empty or cannot be found\n"; + return 1; } SimpleBlobDetector::Params pDefaultBLOB; @@ -116,14 +115,17 @@ int main(int argc, char *argv[]) vector< Vec3b > palette; for (int i = 0; i<65536; i++) { - palette.push_back(Vec3b((uchar)rand(), (uchar)rand(), (uchar)rand())); + uchar c1 = (uchar)rand(); + uchar c2 = (uchar)rand(); + uchar c3 = (uchar)rand(); + palette.push_back(Vec3b(c1, c2, c3)); } help(); // These descriptors are going to be detecting and computing BLOBS with 6 different params // Param for first BLOB detector we want all - typeDesc.push_back("BLOB"); // see http://docs.opencv.org/trunk/d0/d7a/classcv_1_1SimpleBlobDetector.html + typeDesc.push_back("BLOB"); // see http://docs.opencv.org/master/d0/d7a/classcv_1_1SimpleBlobDetector.html pBLOB.push_back(pDefaultBLOB); pBLOB.back().filterByArea = true; pBLOB.back().minArea = 1; @@ -150,7 +152,7 @@ int main(int argc, char *argv[]) pBLOB.back().filterByConvexity = true; pBLOB.back().minConvexity = 0.; pBLOB.back().maxConvexity = (float)0.9; - // Param for six BLOB detector we want blob with gravity center color equal to 0 bug #4321 must be fixed + // Param for six BLOB detector we want blob with gravity center color equal to 0 typeDesc.push_back("BLOB"); pBLOB.push_back(pDefaultBLOB); pBLOB.back().filterByColor = true; diff --git a/samples/cpp/detect_mser.cpp b/samples/cpp/detect_mser.cpp index d42e18b5b0..4f6f9ef4de 100644 --- a/samples/cpp/detect_mser.cpp +++ b/samples/cpp/detect_mser.cpp @@ -412,7 +412,7 @@ int main(int argc, char *argv[]) string input = parser.get("@input"); if (!input.empty()) { - imgOrig = imread(input, IMREAD_GRAYSCALE); + imgOrig = imread(samples::findFile(input), IMREAD_GRAYSCALE); blur(imgOrig, img, blurSize); } else diff --git a/samples/cpp/dft.cpp b/samples/cpp/dft.cpp index ddb56acf09..652ffb3dca 100644 --- a/samples/cpp/dft.cpp +++ b/samples/cpp/dft.cpp @@ -14,12 +14,12 @@ static void help() printf("\nThis program demonstrated the use of the discrete Fourier transform (dft)\n" "The dft of an image is taken and it's power spectrum is displayed.\n" "Usage:\n" - "./dft [image_name -- default ../data/lena.jpg]\n"); + "./dft [image_name -- default lena.jpg]\n"); } const char* keys = { - "{help h||}{@image|../data/lena.jpg|input image file}" + "{help h||}{@image|lena.jpg|input image file}" }; int main(int argc, const char ** argv) @@ -32,7 +32,7 @@ int main(int argc, const char ** argv) return 0; } string filename = parser.get(0); - Mat img = imread(filename, IMREAD_GRAYSCALE); + Mat img = imread(samples::findFile(filename), IMREAD_GRAYSCALE); if( img.empty() ) { help(); diff --git a/samples/cpp/distrans.cpp b/samples/cpp/distrans.cpp index 21432642ec..67547f25e3 100644 --- a/samples/cpp/distrans.cpp +++ b/samples/cpp/distrans.cpp @@ -91,7 +91,7 @@ static void help() { printf("\nProgram to demonstrate the use of the distance transform function between edge images.\n" "Usage:\n" - "./distrans [image_name -- default image is ../data/stuff.jpg]\n" + "./distrans [image_name -- default image is stuff.jpg]\n" "\nHot keys: \n" "\tESC - quit the program\n" "\tC - use C/Inf metric\n" @@ -107,7 +107,7 @@ static void help() const char* keys = { - "{help h||}{@image |../data/stuff.jpg|input image file}" + "{help h||}{@image |stuff.jpg|input image file}" }; int main( int argc, const char** argv ) @@ -117,7 +117,7 @@ int main( int argc, const char** argv ) if (parser.has("help")) return 0; string filename = parser.get(0); - gray = imread(filename, 0); + gray = imread(samples::findFile(filename), 0); if(gray.empty()) { printf("Cannot read image file: %s\n", filename.c_str()); diff --git a/samples/cpp/edge.cpp b/samples/cpp/edge.cpp index 78bc4b6f7b..ba21cb512a 100644 --- a/samples/cpp/edge.cpp +++ b/samples/cpp/edge.cpp @@ -43,12 +43,12 @@ static void help() { printf("\nThis sample demonstrates Canny edge detection\n" "Call:\n" - " /.edge [image_name -- Default is ../data/fruits.jpg]\n\n"); + " /.edge [image_name -- Default is fruits.jpg]\n\n"); } const char* keys = { - "{help h||}{@image |../data/fruits.jpg|input image name}" + "{help h||}{@image |fruits.jpg|input image name}" }; int main( int argc, const char** argv ) @@ -57,7 +57,7 @@ int main( int argc, const char** argv ) CommandLineParser parser(argc, argv, keys); string filename = parser.get(0); - image = imread(filename, IMREAD_COLOR); + image = imread(samples::findFile(filename), IMREAD_COLOR); if(image.empty()) { printf("Cannot read image file: %s\n", filename.c_str()); diff --git a/samples/cpp/facedetect.cpp b/samples/cpp/facedetect.cpp index f6de738d43..88d632b408 100644 --- a/samples/cpp/facedetect.cpp +++ b/samples/cpp/facedetect.cpp @@ -18,7 +18,7 @@ static void help() " [--try-flip]\n" " [filename|camera_index]\n\n" "see facedetect.cmd for one call:\n" - "./facedetect --cascade=\"../../data/haarcascades/haarcascade_frontalface_alt.xml\" --nested-cascade=\"../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml\" --scale=1.3\n\n" + "./facedetect --cascade=\"data/haarcascades/haarcascade_frontalface_alt.xml\" --nested-cascade=\"data/haarcascades/haarcascade_eye_tree_eyeglasses.xml\" --scale=1.3\n\n" "During execution:\n\tHit any key to quit.\n" "\tUsing OpenCV version " << CV_VERSION << "\n" << endl; } @@ -41,8 +41,8 @@ int main( int argc, const char** argv ) cv::CommandLineParser parser(argc, argv, "{help h||}" - "{cascade|../../data/haarcascades/haarcascade_frontalface_alt.xml|}" - "{nested-cascade|../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml|}" + "{cascade|data/haarcascades/haarcascade_frontalface_alt.xml|}" + "{nested-cascade|data/haarcascades/haarcascade_eye_tree_eyeglasses.xml|}" "{scale|1|}{try-flip||}{@filename||}" ); if (parser.has("help")) @@ -62,9 +62,9 @@ int main( int argc, const char** argv ) parser.printErrors(); return 0; } - if ( !nestedCascade.load( nestedCascadeName ) ) + if (!nestedCascade.load(samples::findFileOrKeep(nestedCascadeName))) cerr << "WARNING: Could not load classifier cascade for nested objects" << endl; - if( !cascade.load( cascadeName ) ) + if (!cascade.load(samples::findFile(cascadeName))) { cerr << "ERROR: Could not load classifier cascade" << endl; help(); @@ -74,21 +74,31 @@ int main( int argc, const char** argv ) { int camera = inputName.empty() ? 0 : inputName[0] - '0'; if(!capture.open(camera)) + { cout << "Capture from camera #" << camera << " didn't work" << endl; + return 1; + } } - else if( inputName.size() ) + else if (!inputName.empty()) { - image = imread( inputName, 1 ); - if( image.empty() ) + image = imread(samples::findFileOrKeep(inputName), IMREAD_COLOR); + if (image.empty()) { - if(!capture.open( inputName )) + if (!capture.open(samples::findFileOrKeep(inputName))) + { cout << "Could not read " << inputName << endl; + return 1; + } } } else { - image = imread( "../data/lena.jpg", 1 ); - if(image.empty()) cout << "Couldn't read ../data/lena.jpg" << endl; + image = imread(samples::findFile("lena.jpg"), IMREAD_COLOR); + if (image.empty()) + { + cout << "Couldn't read lena.jpg" << endl; + return 1; + } } if( capture.isOpened() ) diff --git a/samples/cpp/facial_features.cpp b/samples/cpp/facial_features.cpp index 6dbef75c49..5495bbbf66 100644 --- a/samples/cpp/facial_features.cpp +++ b/samples/cpp/facial_features.cpp @@ -32,14 +32,14 @@ string face_cascade_path, eye_cascade_path, nose_cascade_path, mouth_cascade_pat int main(int argc, char** argv) { cv::CommandLineParser parser(argc, argv, - "{eyes||}{nose||}{mouth||}{help h||}"); + "{eyes||}{nose||}{mouth||}{help h||}{@image||}{@facexml||}"); if (parser.has("help")) { help(); return 0; } - input_image_path = parser.get(0); - face_cascade_path = parser.get(1); + input_image_path = parser.get("@image"); + face_cascade_path = parser.get("@facexml"); eye_cascade_path = parser.has("eyes") ? parser.get("eyes") : ""; nose_cascade_path = parser.has("nose") ? parser.get("nose") : ""; mouth_cascade_path = parser.has("mouth") ? parser.get("mouth") : ""; @@ -50,7 +50,7 @@ int main(int argc, char** argv) } // Load image and cascade classifier files Mat image; - image = imread(input_image_path); + image = imread(samples::findFile(input_image_path)); // Detect faces and facial features vector > faces; @@ -98,9 +98,10 @@ static void help() static void detectFaces(Mat& img, vector >& faces, string cascade_path) { CascadeClassifier face_cascade; - face_cascade.load(cascade_path); + face_cascade.load(samples::findFile(cascade_path)); - face_cascade.detectMultiScale(img, faces, 1.15, 3, 0|CASCADE_SCALE_IMAGE, Size(30, 30)); + if (!face_cascade.empty()) + face_cascade.detectMultiScale(img, faces, 1.15, 3, 0|CASCADE_SCALE_IMAGE, Size(30, 30)); return; } @@ -186,26 +187,29 @@ static void detectFacialFeaures(Mat& img, const vector > faces, strin static void detectEyes(Mat& img, vector >& eyes, string cascade_path) { CascadeClassifier eyes_cascade; - eyes_cascade.load(cascade_path); + eyes_cascade.load(samples::findFile(cascade_path, !cascade_path.empty())); - eyes_cascade.detectMultiScale(img, eyes, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30)); + if (!eyes_cascade.empty()) + eyes_cascade.detectMultiScale(img, eyes, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30)); return; } static void detectNose(Mat& img, vector >& nose, string cascade_path) { CascadeClassifier nose_cascade; - nose_cascade.load(cascade_path); + nose_cascade.load(samples::findFile(cascade_path, !cascade_path.empty())); - nose_cascade.detectMultiScale(img, nose, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30)); + if (!nose_cascade.empty()) + nose_cascade.detectMultiScale(img, nose, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30)); return; } static void detectMouth(Mat& img, vector >& mouth, string cascade_path) { CascadeClassifier mouth_cascade; - mouth_cascade.load(cascade_path); + mouth_cascade.load(samples::findFile(cascade_path, !cascade_path.empty())); - mouth_cascade.detectMultiScale(img, mouth, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30)); + if (!mouth_cascade.empty()) + mouth_cascade.detectMultiScale(img, mouth, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30)); return; } diff --git a/samples/cpp/falsecolor.cpp b/samples/cpp/falsecolor.cpp index 1a2c0726b6..cb074bdcf0 100644 --- a/samples/cpp/falsecolor.cpp +++ b/samples/cpp/falsecolor.cpp @@ -87,7 +87,7 @@ int main(int argc, char** argv) Mat img; if (argc > 1) - img = imread(argv[1], IMREAD_GRAYSCALE); + img = imread(samples::findFile(argv[1]), IMREAD_GRAYSCALE); else img = DrawMyImage(2,256); diff --git a/samples/cpp/ffilldemo.cpp b/samples/cpp/ffilldemo.cpp index 0cf315568d..074a9ae340 100644 --- a/samples/cpp/ffilldemo.cpp +++ b/samples/cpp/ffilldemo.cpp @@ -12,7 +12,7 @@ static void help() { cout << "\nThis program demonstrated the floodFill() function\n" "Call:\n" - "./ffilldemo [image_name -- Default: ../data/fruits.jpg]\n" << endl; + "./ffilldemo [image_name -- Default: fruits.jpg]\n" << endl; cout << "Hot keys: \n" "\tESC - quit the program\n" @@ -74,7 +74,7 @@ static void onMouse( int event, int x, int y, int, void* ) int main( int argc, char** argv ) { cv::CommandLineParser parser (argc, argv, - "{help h | | show help message}{@image|../data/fruits.jpg| input image}" + "{help h | | show help message}{@image|fruits.jpg| input image}" ); if (parser.has("help")) { @@ -82,7 +82,7 @@ int main( int argc, char** argv ) return 0; } string filename = parser.get("@image"); - image0 = imread(filename, 1); + image0 = imread(samples::findFile(filename), 1); if( image0.empty() ) { diff --git a/samples/cpp/filestorage.cpp b/samples/cpp/filestorage.cpp index 3315455f48..e0b462bba6 100644 --- a/samples/cpp/filestorage.cpp +++ b/samples/cpp/filestorage.cpp @@ -92,8 +92,8 @@ int main(int ac, char** av) cout << "writing images\n"; fs << "images" << "["; - fs << "image1.jpg" << "myfi.png" << "../data/baboon.jpg"; - cout << "image1.jpg" << " myfi.png" << " ../data/baboon.jpg" << endl; + fs << "image1.jpg" << "myfi.png" << "baboon.jpg"; + cout << "image1.jpg" << " myfi.png" << " baboon.jpg" << endl; fs << "]"; diff --git a/samples/cpp/fitellipse.cpp b/samples/cpp/fitellipse.cpp index 8aaf0bae5c..6d3346be1a 100644 --- a/samples/cpp/fitellipse.cpp +++ b/samples/cpp/fitellipse.cpp @@ -171,7 +171,7 @@ static void help() "contours and approximate it by ellipses. Three methods are used to find the \n" "elliptical fits: fitEllipse, fitEllipseAMS and fitEllipseDirect.\n" "Call:\n" - "./fitellipse [image_name -- Default ../data/stuff.jpg]\n" << endl; + "./fitellipse [image_name -- Default ellipses.jpg]\n" << endl; } int sliderPos = 70; @@ -192,14 +192,14 @@ int main( int argc, char** argv ) fitEllipseAMSQ = true; fitEllipseDirectQ = true; - cv::CommandLineParser parser(argc, argv,"{help h||}{@image|../data/ellipses.jpg|}"); + cv::CommandLineParser parser(argc, argv,"{help h||}{@image|ellipses.jpg|}"); if (parser.has("help")) { help(); return 0; } string filename = parser.get("@image"); - image = imread(filename, 0); + image = imread(samples::findFile(filename), 0); if( image.empty() ) { cout << "Couldn't open image " << filename << "\n"; diff --git a/samples/cpp/grabcut.cpp b/samples/cpp/grabcut.cpp index c5b68fca72..03ca7be23b 100644 --- a/samples/cpp/grabcut.cpp +++ b/samples/cpp/grabcut.cpp @@ -276,7 +276,7 @@ static void on_mouse( int event, int x, int y, int flags, void* param ) int main( int argc, char** argv ) { - cv::CommandLineParser parser(argc, argv, "{@input| ../data/messi5.jpg |}"); + cv::CommandLineParser parser(argc, argv, "{@input| messi5.jpg |}"); help(); string filename = parser.get("@input"); @@ -285,7 +285,7 @@ int main( int argc, char** argv ) cout << "\nDurn, empty filename" << endl; return 1; } - Mat image = imread( filename, 1 ); + Mat image = imread(samples::findFile(filename), IMREAD_COLOR); if( image.empty() ) { cout << "\n Durn, couldn't read image filename " << filename << endl; diff --git a/samples/cpp/image_alignment.cpp b/samples/cpp/image_alignment.cpp index c55d1d6ac2..ab0c129219 100644 --- a/samples/cpp/image_alignment.cpp +++ b/samples/cpp/image_alignment.cpp @@ -3,7 +3,7 @@ * findTransformECC that implements the image alignment ECC algorithm * * -* The demo loads an image (defaults to ../data/fruits.jpg) and it artificially creates +* The demo loads an image (defaults to fruits.jpg) and it artificially creates * a template image based on the given motion type. When two images are given, * the first image is the input image and the second one defines the template image. * In the latter case, you can also parse the warp's initialization. @@ -44,7 +44,7 @@ static void draw_warped_roi(Mat& image, const int width, const int height, Mat& const std::string keys = - "{@inputImage | ../data/fruits.jpg | input image filename }" + "{@inputImage | fruits.jpg | input image filename }" "{@templateImage | | template image filename (optional)}" "{@inputWarp | | input warp (matrix) filename (optional)}" "{n numOfIter | 50 | ECC's iterations }" @@ -65,10 +65,10 @@ static void help(void) " are given, the initialization of the warp by command line parsing is possible. " "If inputWarp is missing, the identity transformation initializes the algorithm. \n" << endl; - cout << "\nUsage example (one image): \n./ecc ../data/fruits.jpg -o=outWarp.ecc " + cout << "\nUsage example (one image): \n./image_alignment fruits.jpg -o=outWarp.ecc " "-m=euclidean -e=1e-6 -N=70 -v=1 \n" << endl; - cout << "\nUsage example (two images with initialization): \n./ecc yourInput.png yourTemplate.png " + cout << "\nUsage example (two images with initialization): \n./image_alignment yourInput.png yourTemplate.png " "yourInitialWarp.ecc -o=outWarp.ecc -m=homography -e=1e-6 -N=70 -v=1 -w=yourFinalImage.png \n" << endl; } @@ -212,7 +212,7 @@ int main (const int argc, const char * argv[]) else mode_temp = MOTION_HOMOGRAPHY; - Mat inputImage = imread(imgFile,0); + Mat inputImage = imread(samples::findFile(imgFile), IMREAD_GRAYSCALE); if (inputImage.empty()) { cerr << "Unable to load the inputImage" << endl; @@ -224,7 +224,7 @@ int main (const int argc, const char * argv[]) if (tempImgFile!="") { inputImage.copyTo(target_image); - template_image = imread(tempImgFile,0); + template_image = imread(samples::findFile(tempImgFile), IMREAD_GRAYSCALE); if (template_image.empty()){ cerr << "Unable to load the template image" << endl; return -1; diff --git a/samples/cpp/starter_imagelist.cpp b/samples/cpp/imagelist_reader.cpp similarity index 87% rename from samples/cpp/starter_imagelist.cpp rename to samples/cpp/imagelist_reader.cpp index 6f4f71466c..1d209b93a7 100644 --- a/samples/cpp/starter_imagelist.cpp +++ b/samples/cpp/imagelist_reader.cpp @@ -1,6 +1,4 @@ /* - * starter_imagelist.cpp - * * Created on: Nov 23, 2010 * Author: Ethan Rublee * @@ -16,10 +14,7 @@ using namespace cv; using namespace std; -//hide the local functions in an unnamed namespace -namespace -{ -void help(char** av) +static void help(char** av) { cout << "\nThis program gets you started being able to read images from a list in a file\n" "Usage:\n./" << av[0] << " image_list.yaml\n" @@ -30,7 +25,7 @@ void help(char** av) "Using OpenCV version %s\n" << CV_VERSION << "\n" << endl; } -bool readStringList(const string& filename, vector& l) +static bool readStringList(const string& filename, vector& l) { l.resize(0); FileStorage fs(filename, FileStorage::READ); @@ -45,7 +40,7 @@ bool readStringList(const string& filename, vector& l) return true; } -int process(vector images) +static int process(const vector& images) { namedWindow("image", WINDOW_KEEPRATIO); //resizable window; for (size_t i = 0; i < images.size(); i++) @@ -53,13 +48,11 @@ int process(vector images) Mat image = imread(images[i], IMREAD_GRAYSCALE); // do grayscale processing? imshow("image",image); cout << "Press a key to see the next image in the list." << endl; - waitKey(); // wait indefinitely for a key to be pressed + waitKey(); // wait infinitely for a key to be pressed } return 0; } -} - int main(int ac, char** av) { cv::CommandLineParser parser(ac, av, "{help h||}{@input||}"); diff --git a/samples/cpp/inpaint.cpp b/samples/cpp/inpaint.cpp index 61c4c45cbd..175bff828c 100644 --- a/samples/cpp/inpaint.cpp +++ b/samples/cpp/inpaint.cpp @@ -14,7 +14,7 @@ static void help() << "with surrounding image areas.\n" "Using OpenCV version %s\n" << CV_VERSION << "\n" "Usage:\n" - "./inpaint [image_name -- Default ../data/fruits.jpg]\n" << endl; + "./inpaint [image_name -- Default fruits.jpg]\n" << endl; cout << "Hot keys: \n" "\tESC - quit the program\n" @@ -47,24 +47,24 @@ static void onMouse( int event, int x, int y, int flags, void* ) int main( int argc, char** argv ) { - cv::CommandLineParser parser(argc, argv, "{@image|../data/fruits.jpg|}"); + cv::CommandLineParser parser(argc, argv, "{@image|fruits.jpg|}"); help(); - string filename = parser.get("@image"); - Mat img0 = imread(filename, -1); + string filename = samples::findFile(parser.get("@image")); + Mat img0 = imread(filename, IMREAD_COLOR); if(img0.empty()) { cout << "Couldn't open the image " << filename << ". Usage: inpaint \n" << endl; return 0; } - namedWindow( "image", 1 ); + namedWindow("image", WINDOW_AUTOSIZE); img = img0.clone(); inpaintMask = Mat::zeros(img.size(), CV_8U); imshow("image", img); - setMouseCallback( "image", onMouse, 0 ); + setMouseCallback( "image", onMouse, NULL); for(;;) { diff --git a/samples/cpp/laplace.cpp b/samples/cpp/laplace.cpp index d50029cb31..1bf3e518d1 100644 --- a/samples/cpp/laplace.cpp +++ b/samples/cpp/laplace.cpp @@ -25,39 +25,46 @@ int smoothType = GAUSSIAN; int main( int argc, char** argv ) { - VideoCapture cap; cv::CommandLineParser parser(argc, argv, "{ c | 0 | }{ p | | }"); help(); - if( parser.get("c").size() == 1 && isdigit(parser.get("c")[0]) ) + VideoCapture cap; + string camera = parser.get("c"); + if (camera.size() == 1 && isdigit(camera[0])) cap.open(parser.get("c")); else - cap.open(parser.get("c")); - if( cap.isOpened() ) - cout << "Video " << parser.get("c") << - ": width=" << cap.get(CAP_PROP_FRAME_WIDTH) << - ", height=" << cap.get(CAP_PROP_FRAME_HEIGHT) << - ", nframes=" << cap.get(CAP_PROP_FRAME_COUNT) << endl; - if( parser.has("p") ) + cap.open(samples::findFileOrKeep(camera)); + if (!cap.isOpened()) { - int pos = parser.get("p"); - if (!parser.check()) - { - parser.printErrors(); - return -1; - } - cout << "seeking to frame #" << pos << endl; - cap.set(CAP_PROP_POS_FRAMES, pos); + cerr << "Can't open camera/video stream: " << camera << endl; + return 1; } - - if( !cap.isOpened() ) + cout << "Video " << parser.get("c") << + ": width=" << cap.get(CAP_PROP_FRAME_WIDTH) << + ", height=" << cap.get(CAP_PROP_FRAME_HEIGHT) << + ", nframes=" << cap.get(CAP_PROP_FRAME_COUNT) << endl; + int pos = 0; + if (parser.has("p")) + { + pos = parser.get("p"); + } + if (!parser.check()) { - cout << "Could not initialize capturing...\n"; + parser.printErrors(); return -1; } - namedWindow( "Laplacian", 0 ); - createTrackbar( "Sigma", "Laplacian", &sigma, 15, 0 ); + if (pos != 0) + { + cout << "seeking to frame #" << pos << endl; + if (!cap.set(CAP_PROP_POS_FRAMES, pos)) + { + cerr << "ERROR: seekeing is not supported" << endl; + } + } + + namedWindow("Laplacian", WINDOW_AUTOSIZE); + createTrackbar("Sigma", "Laplacian", &sigma, 15, 0); Mat smoothed, laplace, result; diff --git a/samples/cpp/letter_recog.cpp b/samples/cpp/letter_recog.cpp index 32ccf6b09b..66756b61cb 100644 --- a/samples/cpp/letter_recog.cpp +++ b/samples/cpp/letter_recog.cpp @@ -520,13 +520,13 @@ int main( int argc, char *argv[] ) string data_filename; int method = 0; - cv::CommandLineParser parser(argc, argv, "{data|../data/letter-recognition.data|}{save||}{load||}{boost||}" + cv::CommandLineParser parser(argc, argv, "{data|letter-recognition.data|}{save||}{load||}{boost||}" "{mlp||}{knn knearest||}{nbayes||}{svm||}"); - data_filename = parser.get("data"); + data_filename = samples::findFile(parser.get("data")); if (parser.has("save")) filename_to_save = parser.get("save"); if (parser.has("load")) - filename_to_load = parser.get("load"); + filename_to_load = samples::findFile(parser.get("load")); if (parser.has("boost")) method = 1; else if (parser.has("mlp")) diff --git a/samples/cpp/logistic_regression.cpp b/samples/cpp/logistic_regression.cpp index b567dd2d25..365b32e523 100644 --- a/samples/cpp/logistic_regression.cpp +++ b/samples/cpp/logistic_regression.cpp @@ -83,7 +83,7 @@ static float calculateAccuracyPercent(const Mat &original, const Mat &predicted) int main() { - const String filename = "../data/data01.xml"; + const String filename = samples::findFile("data01.xml"); cout << "**********************************************************************" << endl; cout << filename << " contains digits 0 and 1 of 20 samples each, collected on an Android device" << endl; diff --git a/samples/cpp/lsd_lines.cpp b/samples/cpp/lsd_lines.cpp index a5c7326639..3feed9cbc2 100644 --- a/samples/cpp/lsd_lines.cpp +++ b/samples/cpp/lsd_lines.cpp @@ -9,7 +9,7 @@ using namespace cv; int main(int argc, char** argv) { cv::CommandLineParser parser(argc, argv, - "{input i|../data/building.jpg|input image}" + "{input i|building.jpg|input image}" "{refine r|false|if true use LSD_REFINE_STD method, if false use LSD_REFINE_NONE method}" "{canny c|false|use Canny edge detector}" "{overlay o|false|show result on input image}" @@ -23,7 +23,7 @@ int main(int argc, char** argv) parser.printMessage(); - String filename = parser.get("input"); + String filename = samples::findFile(parser.get("input")); bool useRefine = parser.get("refine"); bool useCanny = parser.get("canny"); bool overlay = parser.get("overlay"); diff --git a/samples/cpp/mask_tmpl.cpp b/samples/cpp/mask_tmpl.cpp index 7216cbd842..dbc9f44600 100644 --- a/samples/cpp/mask_tmpl.cpp +++ b/samples/cpp/mask_tmpl.cpp @@ -8,17 +8,27 @@ using namespace cv; int main( int argc, const char** argv ) { CommandLineParser parser(argc, argv, - "{ i | ../data/lena_tmpl.jpg |image name }" - "{ t | ../data/tmpl.png |template name }" - "{ m | ../data/mask.png |mask name }" + "{ i | lena_tmpl.jpg |image name }" + "{ t | tmpl.png |template name }" + "{ m | mask.png |mask name }" "{ cm| 3 |comparison method }"); - cout << "This program demonstrates the use of template matching with mask.\n\n"; + cout << "This program demonstrates the use of template matching with mask." << endl + << endl + << "Available methods: https://docs.opencv.org/master/df/dfb/group__imgproc__object.html#ga3a7850640f1fe1f58fe91a2d7583695d" << endl + << " TM_SQDIFF = " << (int)TM_SQDIFF << endl + << " TM_SQDIFF_NORMED = " << (int)TM_SQDIFF_NORMED << endl + << " TM_CCORR = " << (int)TM_CCORR << endl + << " TM_CCORR_NORMED = " << (int)TM_CCORR_NORMED << endl + << " TM_CCOEFF = " << (int)TM_CCOEFF << endl + << " TM_CCOEFF_NORMED = " << (int)TM_CCOEFF_NORMED << endl + << endl; + parser.printMessage(); - string filename = parser.get("i"); - string tmplname = parser.get("t"); - string maskname = parser.get("m"); + string filename = samples::findFile(parser.get("i")); + string tmplname = samples::findFile(parser.get("t")); + string maskname = samples::findFile(parser.get("m")); Mat img = imread(filename); Mat tmpl = imread(tmplname); Mat mask = imread(maskname); diff --git a/samples/cpp/matchmethod_orb_akaze_brisk.cpp b/samples/cpp/matchmethod_orb_akaze_brisk.cpp index 1eb0ded535..890d673e91 100644 --- a/samples/cpp/matchmethod_orb_akaze_brisk.cpp +++ b/samples/cpp/matchmethod_orb_akaze_brisk.cpp @@ -12,7 +12,7 @@ static void help() { cout << "\n This program demonstrates how to detect compute and match ORB BRISK and AKAZE descriptors \n" "Usage: \n" - " ./matchmethod_orb_akaze_brisk --image1= --image2=\n" + " ./matchmethod_orb_akaze_brisk --image1= --image2=\n" "Press a key when image window is active to change algorithm or descriptor"; } @@ -28,34 +28,34 @@ int main(int argc, char *argv[]) typeDesc.push_back("AKAZE"); // see http://docs.opencv.org/trunk/d8/d30/classcv_1_1AKAZE.html typeDesc.push_back("ORB"); // see http://docs.opencv.org/trunk/de/dbf/classcv_1_1BRISK.html typeDesc.push_back("BRISK"); // see http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html - // This algorithm would be used to match descriptors see http://docs.opencv.org/trunk/db/d39/classcv_1_1DescriptorMatcher.html#ab5dc5036569ecc8d47565007fa518257 + // This algorithm would be used to match descriptors see http://docs.opencv.org/trunk/db/d39/classcv_1_1DescriptorMatcher.html#ab5dc5036569ecc8d47565007fa518257 typeAlgoMatch.push_back("BruteForce"); typeAlgoMatch.push_back("BruteForce-L1"); typeAlgoMatch.push_back("BruteForce-Hamming"); typeAlgoMatch.push_back("BruteForce-Hamming(2)"); cv::CommandLineParser parser(argc, argv, - "{ @image1 | ../data/basketball1.png | }" - "{ @image2 | ../data/basketball2.png | }" + "{ @image1 | basketball1.png | }" + "{ @image2 | basketball2.png | }" "{help h ||}"); if (parser.has("help")) { help(); return 0; } - fileName.push_back(parser.get(0)); - fileName.push_back(parser.get(1)); + fileName.push_back(samples::findFile(parser.get(0))); + fileName.push_back(samples::findFile(parser.get(1))); Mat img1 = imread(fileName[0], IMREAD_GRAYSCALE); Mat img2 = imread(fileName[1], IMREAD_GRAYSCALE); - if (img1.rows*img1.cols <= 0) - { - cout << "Image " << fileName[0] << " is empty or cannot be found\n"; - return(0); - } - if (img2.rows*img2.cols <= 0) - { - cout << "Image " << fileName[1] << " is empty or cannot be found\n"; - return(0); - } + if (img1.empty()) + { + cerr << "Image " << fileName[0] << " is empty or cannot be found" << endl; + return 1; + } + if (img2.empty()) + { + cerr << "Image " << fileName[1] << " is empty or cannot be found" << endl; + return 1; + } vector desMethCmp; Ptr b; @@ -74,10 +74,10 @@ int main(int argc, char *argv[]) vector::iterator itMatcher = typeAlgoMatch.end(); if (*itDesc == "AKAZE-DESCRIPTOR_KAZE_UPRIGHT"){ b = AKAZE::create(AKAZE::DESCRIPTOR_KAZE_UPRIGHT); - } + } if (*itDesc == "AKAZE"){ b = AKAZE::create(); - } + } if (*itDesc == "ORB"){ b = ORB::create(); } @@ -157,12 +157,12 @@ int main(int argc, char *argv[]) } catch (const Exception& e) { + cerr << "Exception: " << e.what() << endl; cout << "Feature : " << *itDesc << "\n"; if (itMatcher != typeAlgoMatch.end()) { cout << "Matcher : " << *itMatcher << "\n"; } - cout << e.msg << endl; } } int i=0; diff --git a/samples/cpp/minarea.cpp b/samples/cpp/minarea.cpp index 133b684e69..97264721bf 100644 --- a/samples/cpp/minarea.cpp +++ b/samples/cpp/minarea.cpp @@ -18,7 +18,7 @@ int main( int /*argc*/, char** /*argv*/ ) { help(); - Mat img(500, 500, CV_8UC3); + Mat img(500, 500, CV_8UC3, Scalar::all(0)); RNG& rng = theRNG(); for(;;) diff --git a/samples/cpp/morphology2.cpp b/samples/cpp/morphology2.cpp index b7dc68f6c0..2464e30328 100644 --- a/samples/cpp/morphology2.cpp +++ b/samples/cpp/morphology2.cpp @@ -33,8 +33,8 @@ int erode_dilate_pos = 0; // callback function for open/close trackbar static void OpenClose(int, void*) { - int n = open_close_pos - max_iters; - int an = n > 0 ? n : -n; + int n = open_close_pos; + int an = abs(n); Mat element = getStructuringElement(element_shape, Size(an*2+1, an*2+1), Point(an, an) ); if( n < 0 ) morphologyEx(src, dst, MORPH_OPEN, element); @@ -46,8 +46,8 @@ static void OpenClose(int, void*) // callback function for erode/dilate trackbar static void ErodeDilate(int, void*) { - int n = erode_dilate_pos - max_iters; - int an = n > 0 ? n : -n; + int n = erode_dilate_pos; + int an = abs(n); Mat element = getStructuringElement(element_shape, Size(an*2+1, an*2+1), Point(an, an) ); if( n < 0 ) erode(src, dst, element); @@ -59,13 +59,13 @@ static void ErodeDilate(int, void*) int main( int argc, char** argv ) { - cv::CommandLineParser parser(argc, argv, "{help h||}{ @image | ../data/baboon.jpg | }"); + cv::CommandLineParser parser(argc, argv, "{help h||}{ @image | baboon.jpg | }"); if (parser.has("help")) { help(); return 0; } - std::string filename = parser.get("@image"); + std::string filename = samples::findFile(parser.get("@image")); if( (src = imread(filename,IMREAD_COLOR)).empty() ) { help(); @@ -78,7 +78,14 @@ int main( int argc, char** argv ) open_close_pos = erode_dilate_pos = max_iters; createTrackbar("iterations", "Open/Close",&open_close_pos,max_iters*2+1,OpenClose); + setTrackbarMin("iterations", "Open/Close", -max_iters); + setTrackbarMax("iterations", "Open/Close", max_iters); + setTrackbarPos("iterations", "Open/Close", 0); + createTrackbar("iterations", "Erode/Dilate",&erode_dilate_pos,max_iters*2+1,ErodeDilate); + setTrackbarMin("iterations", "Erode/Dilate", -max_iters); + setTrackbarMax("iterations", "Erode/Dilate", max_iters); + setTrackbarPos("iterations", "Erode/Dilate", 0); for(;;) { diff --git a/samples/cpp/npr_demo.cpp b/samples/cpp/npr_demo.cpp index c343969ff6..4d8d7879bb 100644 --- a/samples/cpp/npr_demo.cpp +++ b/samples/cpp/npr_demo.cpp @@ -28,26 +28,22 @@ using namespace cv; int main(int argc, char* argv[]) { - cv::CommandLineParser parser(argc, argv, "{help h||show help message}{@image|../data/lena.jpg|input image}"); + cv::CommandLineParser parser(argc, argv, "{help h||show help message}{@image|lena.jpg|input image}"); if (parser.has("help")) { parser.printMessage(); - exit(0); - } - if (parser.get("@image").empty()) - { - parser.printMessage(); - exit(0); + return 0; } + string filename = samples::findFile(parser.get("@image")); - Mat I = imread(parser.get("@image")); + Mat I = imread(filename); int num,type; if(I.empty()) { cout << "Image not found" << endl; - exit(0); + return 1; } cout << endl; diff --git a/samples/cpp/peopledetect.cpp b/samples/cpp/peopledetect.cpp index ea45ae9c9b..c7640db0b7 100644 --- a/samples/cpp/peopledetect.cpp +++ b/samples/cpp/peopledetect.cpp @@ -72,7 +72,10 @@ int main(int argc, char** argv) if (file.empty()) cap.open(camera); else - cap.open(file.c_str()); + { + file = samples::findFileOrKeep(file); + cap.open(file); + } if (!cap.isOpened()) { cout << "Can not open video stream: '" << (file.empty() ? "" : file) << "'" << endl; diff --git a/samples/cpp/points_classifier.cpp b/samples/cpp/points_classifier.cpp index 9945ba24f6..02e393495d 100644 --- a/samples/cpp/points_classifier.cpp +++ b/samples/cpp/points_classifier.cpp @@ -2,11 +2,6 @@ #include "opencv2/imgproc.hpp" #include "opencv2/ml.hpp" #include "opencv2/highgui.hpp" -#ifdef HAVE_OPENCV_OCL -#define _OCL_KNN_ 1 // select whether using ocl::KNN method or not, default is using -#define _OCL_SVM_ 1 // select whether using ocl::svm method or not, default is using -#include "opencv2/ocl/ocl.hpp" -#endif #include diff --git a/samples/cpp/polar_transforms.cpp b/samples/cpp/polar_transforms.cpp index 4a6014f433..b9b73c64cf 100644 --- a/samples/cpp/polar_transforms.cpp +++ b/samples/cpp/polar_transforms.cpp @@ -24,7 +24,7 @@ int main( int argc, char** argv ) if( arg.size() == 1 && isdigit(arg[0]) ) capture.open( arg[0] - '0' ); else - capture.open( arg.c_str() ); + capture.open(samples::findFileOrKeep(arg)); if( !capture.isOpened() ) { diff --git a/samples/cpp/live_detect_qrcode.cpp b/samples/cpp/qrcode.cpp similarity index 61% rename from samples/cpp/live_detect_qrcode.cpp rename to samples/cpp/qrcode.cpp index 07101da9fa..41c19f1e5d 100644 --- a/samples/cpp/live_detect_qrcode.cpp +++ b/samples/cpp/qrcode.cpp @@ -7,10 +7,10 @@ using namespace std; using namespace cv; -void getMatWithQRCodeContour(Mat &color_image, vector transform); -void getMatWithFPS(Mat &color_image, double fps); -int liveQRCodeDetect(); -int showImageQRCodeDetect(string in, string out); +static void drawQRCodeContour(Mat &color_image, vector transform); +static void drawFPS(Mat &color_image, double fps); +static int liveQRCodeDetect(const string& out_file); +static int imageQRCodeDetect(const string& in_file, const string& out_file); int main(int argc, char *argv[]) { @@ -28,7 +28,9 @@ int main(int argc, char *argv[]) } string in_file_name = cmd_parser.get("in"); // input path to image - string out_file_name = cmd_parser.get("out"); // output path to image + string out_file_name; + if (cmd_parser.has("out")) + out_file_name = cmd_parser.get("out"); // output path to image if (!cmd_parser.check()) { @@ -39,16 +41,16 @@ int main(int argc, char *argv[]) int return_code = 0; if (in_file_name.empty()) { - return_code = liveQRCodeDetect(); + return_code = liveQRCodeDetect(out_file_name); } else { - return_code = showImageQRCodeDetect(in_file_name, out_file_name); + return_code = imageQRCodeDetect(samples::findFile(in_file_name), out_file_name); } return return_code; } -void getMatWithQRCodeContour(Mat &color_image, vector transform) +void drawQRCodeContour(Mat &color_image, vector transform) { if (!transform.empty()) { @@ -70,19 +72,19 @@ void getMatWithQRCodeContour(Mat &color_image, vector transform) } } -void getMatWithFPS(Mat &color_image, double fps) +void drawFPS(Mat &color_image, double fps) { ostringstream convert; - convert << cvRound(fps) << " FPS."; + convert << cvRound(fps) << " FPS (QR detection)"; putText(color_image, convert.str(), Point(25, 25), FONT_HERSHEY_DUPLEX, 1, Scalar(0, 0, 255), 2); } -int liveQRCodeDetect() +int liveQRCodeDetect(const string& out_file) { VideoCapture cap(0); if(!cap.isOpened()) { - cout << "Cannot open a camera" << '\n'; + cout << "Cannot open a camera" << endl; return -4; } @@ -94,7 +96,11 @@ int liveQRCodeDetect() string decode_info; vector transform; cap >> frame; - if(frame.empty()) { break; } + if (frame.empty()) + { + cout << "End of video stream" << endl; + break; + } cvtColor(frame, src, COLOR_BGR2GRAY); total.start(); @@ -102,24 +108,30 @@ int liveQRCodeDetect() if (result_detection) { decode_info = qrcode.decode(src, transform, straight_barcode); - if (!decode_info.empty()) { cout << decode_info << '\n'; } + if (!decode_info.empty()) { cout << decode_info << endl; } } total.stop(); double fps = 1 / total.getTimeSec(); total.reset(); - if (result_detection) { getMatWithQRCodeContour(frame, transform); } - getMatWithFPS(frame, fps); + if (result_detection) { drawQRCodeContour(frame, transform); } + drawFPS(frame, fps); imshow("Live QR code detector", frame); - if( waitKey(30) > 0 ) { break; } + char c = (char)waitKey(30); + if (c == 27) + break; + if (c == ' ' && !out_file.empty()) + imwrite(out_file, frame); // TODO write original frame too } return 0; } -int showImageQRCodeDetect(string in, string out) +int imageQRCodeDetect(const string& in_file, const string& out_file) { - Mat src = imread(in, IMREAD_GRAYSCALE), straight_barcode; + Mat color_src = imread(in_file, IMREAD_COLOR), src; + cvtColor(color_src, src, COLOR_BGR2GRAY); + Mat straight_barcode; string decoded_info; vector transform; const int count_experiments = 10; @@ -135,54 +147,40 @@ int showImageQRCodeDetect(string in, string out) total.stop(); transform_time += total.getTimeSec(); total.reset(); - if (!result_detection) { break; } + if (!result_detection) + continue; total.start(); decoded_info = qrcode.decode(src, transform, straight_barcode); total.stop(); transform_time += total.getTimeSec(); total.reset(); - if (decoded_info.empty()) { break; } - } double fps = count_experiments / transform_time; - if (!result_detection) { cout << "QR code not found\n"; return -2; } - if (decoded_info.empty()) { cout << "QR code cannot be decoded\n"; return -3; } + if (!result_detection) + cout << "QR code not found" << endl; + if (decoded_info.empty()) + cout << "QR code cannot be decoded" << endl; - Mat color_src = imread(in); - getMatWithQRCodeContour(color_src, transform); - getMatWithFPS(color_src, fps); + drawQRCodeContour(color_src, transform); + drawFPS(color_src, fps); - for(;;) + cout << "Input image file path: " << in_file << endl; + cout << "Output image file path: " << out_file << endl; + cout << "Size: " << color_src.size() << endl; + cout << "FPS: " << fps << endl; + cout << "Decoded info: " << decoded_info << endl; + + if (!out_file.empty()) { - imshow("Detect QR code on image", color_src); - if( waitKey(30) > 0 ) { break; } + imwrite(out_file, color_src); } - if (!out.empty()) + for(;;) { - getMatWithQRCodeContour(color_src, transform); - getMatWithFPS(color_src, fps); - - cout << "Input image file path: " << in << '\n'; - cout << "Output image file path: " << out << '\n'; - cout << "Size: " << color_src.size() << '\n'; - cout << "FPS: " << fps << '\n'; - cout << "Decoded info: " << decoded_info << '\n'; - - vector compression_params; - compression_params.push_back(IMWRITE_PNG_COMPRESSION); - compression_params.push_back(9); - try - { - imwrite(out, color_src, compression_params); - } - catch (const cv::Exception& ex) - { - cout << "Exception converting image to PNG format: "; - cout << ex.what() << '\n'; - return -3; - } + imshow("Detect QR code on image", color_src); + if (waitKey(0) == 27) + break; } return 0; } diff --git a/samples/cpp/segment_objects.cpp b/samples/cpp/segment_objects.cpp index 32b2598740..3053bb8efe 100644 --- a/samples/cpp/segment_objects.cpp +++ b/samples/cpp/segment_objects.cpp @@ -73,7 +73,7 @@ int main(int argc, char** argv) if (input.empty()) cap.open(0); else - cap.open(input); + cap.open(samples::findFileOrKeep(input)); if( !cap.isOpened() ) { diff --git a/samples/cpp/select3dobj.cpp b/samples/cpp/select3dobj.cpp index 2953c1b294..4f32f195a4 100644 --- a/samples/cpp/select3dobj.cpp +++ b/samples/cpp/select3dobj.cpp @@ -416,7 +416,7 @@ int main(int argc, char** argv) if ( parser.get("@input").size() == 1 && isdigit(parser.get("@input")[0]) ) cameraId = parser.get("@input"); else - inputName = parser.get("@input"); + inputName = samples::findFileOrKeep(parser.get("@input")); if (!parser.check()) { puts(help); diff --git a/samples/cpp/smiledetect.cpp b/samples/cpp/smiledetect.cpp index a60d64283a..ceac5e9686 100644 --- a/samples/cpp/smiledetect.cpp +++ b/samples/cpp/smiledetect.cpp @@ -16,7 +16,7 @@ static void help() " [--try-flip]\n" " [video_filename|camera_index]\n\n" "Example:\n" - "./smiledetect --cascade=\"../../data/haarcascades/haarcascade_frontalface_alt.xml\" --smile-cascade=\"../../data/haarcascades/haarcascade_smile.xml\" --scale=2.0\n\n" + "./smiledetect --cascade=\"data/haarcascades/haarcascade_frontalface_alt.xml\" --smile-cascade=\"data/haarcascades/haarcascade_smile.xml\" --scale=2.0\n\n" "During execution:\n\tHit any key to quit.\n" "\tUsing OpenCV version " << CV_VERSION << "\n" << endl; } @@ -41,16 +41,16 @@ int main( int argc, const char** argv ) double scale; cv::CommandLineParser parser(argc, argv, "{help h||}{scale|1|}" - "{cascade|../../data/haarcascades/haarcascade_frontalface_alt.xml|}" - "{smile-cascade|../../data/haarcascades/haarcascade_smile.xml|}" + "{cascade|data/haarcascades/haarcascade_frontalface_alt.xml|}" + "{smile-cascade|data/haarcascades/haarcascade_smile.xml|}" "{try-flip||}{@input||}"); if (parser.has("help")) { help(); return 0; } - cascadeName = parser.get("cascade"); - nestedCascadeName = parser.get("smile-cascade"); + cascadeName = samples::findFile(parser.get("cascade")); + nestedCascadeName = samples::findFile(parser.get("smile-cascade")); tryflip = parser.has("try-flip"); inputName = parser.get("@input"); scale = parser.get("scale"); @@ -81,6 +81,7 @@ int main( int argc, const char** argv ) } else if( inputName.size() ) { + inputName = samples::findFileOrKeep(inputName); if(!capture.open( inputName )) cout << "Could not read " << inputName << endl; } diff --git a/samples/cpp/squares.cpp b/samples/cpp/squares.cpp index 480fe0a029..b466e32e97 100644 --- a/samples/cpp/squares.cpp +++ b/samples/cpp/squares.cpp @@ -138,8 +138,8 @@ static void drawSquares( Mat& image, const vector >& squares ) int main(int argc, char** argv) { - static const char* names[] = { "../data/pic1.png", "../data/pic2.png", "../data/pic3.png", - "../data/pic4.png", "../data/pic5.png", "../data/pic6.png", 0 }; + static const char* names[] = { "data/pic1.png", "data/pic2.png", "data/pic3.png", + "data/pic4.png", "data/pic5.png", "data/pic6.png", 0 }; help(argv[0]); if( argc > 1) @@ -152,10 +152,11 @@ int main(int argc, char** argv) for( int i = 0; names[i] != 0; i++ ) { - Mat image = imread(names[i], IMREAD_COLOR); + string filename = samples::findFile(names[i]); + Mat image = imread(filename, IMREAD_COLOR); if( image.empty() ) { - cout << "Couldn't load " << names[i] << endl; + cout << "Couldn't load " << filename << endl; continue; } diff --git a/samples/cpp/stereo_calib.cpp b/samples/cpp/stereo_calib.cpp index ddc3a795e2..894261dab8 100644 --- a/samples/cpp/stereo_calib.cpp +++ b/samples/cpp/stereo_calib.cpp @@ -18,7 +18,6 @@ Homepage: http://opencv.org Online docs: http://docs.opencv.org Q&A forum: http://answers.opencv.org - Issue tracker: http://code.opencv.org GitHub: https://github.com/opencv/opencv/ ************************************************** */ @@ -46,11 +45,11 @@ static int print_help() " on the chessboards, and a flag: useCalibrated for \n" " calibrated (0) or\n" " uncalibrated \n" - " (1: use cvStereoCalibrate(), 2: compute fundamental\n" + " (1: use stereoCalibrate(), 2: compute fundamental\n" " matrix separately) stereo. \n" " Calibrate the cameras and display the\n" " rectified results along with the computed disparity images. \n" << endl; - cout << "Usage:\n ./stereo_calib -w= -h= -s= \n" << endl; + cout << "Usage:\n ./stereo_calib -w= -h= -s= \n" << endl; return 0; } @@ -347,11 +346,11 @@ int main(int argc, char** argv) Size boardSize; string imagelistfn; bool showRectified; - cv::CommandLineParser parser(argc, argv, "{w|9|}{h|6|}{s|1.0|}{nr||}{help||}{@input|../data/stereo_calib.xml|}"); + cv::CommandLineParser parser(argc, argv, "{w|9|}{h|6|}{s|1.0|}{nr||}{help||}{@input|stereo_calib.xml|}"); if (parser.has("help")) return print_help(); showRectified = !parser.has("nr"); - imagelistfn = parser.get("@input"); + imagelistfn = samples::findFile(parser.get("@input")); boardSize.width = parser.get("w"); boardSize.height = parser.get("h"); float squareSize = parser.get("s"); diff --git a/samples/cpp/stereo_match.cpp b/samples/cpp/stereo_match.cpp index 4868a63950..166a45086c 100644 --- a/samples/cpp/stereo_match.cpp +++ b/samples/cpp/stereo_match.cpp @@ -65,8 +65,8 @@ int main(int argc, char** argv) print_help(); return 0; } - img1_filename = parser.get(0); - img2_filename = parser.get(1); + img1_filename = samples::findFile(parser.get(0)); + img2_filename = samples::findFile(parser.get(1)); if (parser.has("algorithm")) { std::string _alg = parser.get("algorithm"); diff --git a/samples/cpp/stitching.cpp b/samples/cpp/stitching.cpp index fca31dc188..5bf34f45b1 100644 --- a/samples/cpp/stitching.cpp +++ b/samples/cpp/stitching.cpp @@ -95,7 +95,7 @@ int parseCmdArgs(int argc, char** argv) } else { - Mat img = imread(argv[i]); + Mat img = imread(samples::findFile(argv[i])); if (img.empty()) { cout << "Can't read image '" << argv[i] << "'\n"; diff --git a/samples/cpp/stitching_detailed.cpp b/samples/cpp/stitching_detailed.cpp index 7806446141..9e3103f954 100644 --- a/samples/cpp/stitching_detailed.cpp +++ b/samples/cpp/stitching_detailed.cpp @@ -406,7 +406,7 @@ int main(int argc, char* argv[]) for (int i = 0; i < num_images; ++i) { - full_img = imread(img_names[i]); + full_img = imread(samples::findFile(img_names[i])); full_img_sizes[i] = full_img.size(); if (full_img.empty()) @@ -727,7 +727,7 @@ int main(int argc, char* argv[]) LOGLN("Compositing image #" << indices[img_idx]+1); // Read image and resize it if necessary - full_img = imread(img_names[img_idx]); + full_img = imread(samples::findFile(img_names[img_idx])); if (!is_compose_scale_set) { if (compose_megapix > 0) diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_detection.cpp b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_detection.cpp index 2ed4ce2d90..7d6ab999fe 100644 --- a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_detection.cpp +++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_detection.cpp @@ -2,7 +2,7 @@ #include #include // OpenCV -#include +#include #include #include #include diff --git a/samples/cpp/autofocus.cpp b/samples/cpp/videocapture_gphoto2_autofocus.cpp similarity index 100% rename from samples/cpp/autofocus.cpp rename to samples/cpp/videocapture_gphoto2_autofocus.cpp diff --git a/samples/cpp/gstreamer_pipeline.cpp b/samples/cpp/videocapture_gstreamer_pipeline.cpp similarity index 99% rename from samples/cpp/gstreamer_pipeline.cpp rename to samples/cpp/videocapture_gstreamer_pipeline.cpp index 4ad1daa4c2..bc6b6590f0 100644 --- a/samples/cpp/gstreamer_pipeline.cpp +++ b/samples/cpp/videocapture_gstreamer_pipeline.cpp @@ -266,6 +266,7 @@ int main(int argc, char *argv[]) cout << "Unsupported mode: " << mode << endl; return -1; } + file_name = samples::findFile(file_name); cout << "Mode: " << mode << ", Backend: " << backend << ", File: " << file_name << ", Codec: " << codec << endl; TickMeter total; diff --git a/samples/cpp/image_sequence.cpp b/samples/cpp/videocapture_image_sequence.cpp similarity index 100% rename from samples/cpp/image_sequence.cpp rename to samples/cpp/videocapture_image_sequence.cpp diff --git a/samples/cpp/intelperc_capture.cpp b/samples/cpp/videocapture_intelperc.cpp similarity index 99% rename from samples/cpp/intelperc_capture.cpp rename to samples/cpp/videocapture_intelperc.cpp index b6e66745c4..7ac3a7c184 100644 --- a/samples/cpp/intelperc_capture.cpp +++ b/samples/cpp/videocapture_intelperc.cpp @@ -1,6 +1,3 @@ -// testOpenCVCam.cpp : Defines the entry point for the console application. -// - #include "opencv2/videoio.hpp" #include "opencv2/highgui.hpp" diff --git a/samples/cpp/openni_capture.cpp b/samples/cpp/videocapture_openni.cpp similarity index 100% rename from samples/cpp/openni_capture.cpp rename to samples/cpp/videocapture_openni.cpp diff --git a/samples/cpp/warpPerspective_demo.cpp b/samples/cpp/warpPerspective_demo.cpp index 591e03d59b..04e1e95d74 100644 --- a/samples/cpp/warpPerspective_demo.cpp +++ b/samples/cpp/warpPerspective_demo.cpp @@ -20,7 +20,7 @@ static void help(char** argv) cout << "\nThis is a demo program shows how perspective transformation applied on an image, \n" "Using OpenCV version " << CV_VERSION << endl; - cout << "\nUsage:\n" << argv[0] << " [image_name -- Default ../data/right.jpg]\n" << endl; + cout << "\nUsage:\n" << argv[0] << " [image_name -- Default data/right.jpg]\n" << endl; cout << "\nHot keys: \n" "\tESC, q - quit the program\n" @@ -45,9 +45,9 @@ bool validation_needed = true; int main(int argc, char** argv) { help(argv); - CommandLineParser parser(argc, argv, "{@input| ../data/right.jpg |}"); + CommandLineParser parser(argc, argv, "{@input| data/right.jpg |}"); - string filename = parser.get("@input"); + string filename = samples::findFile(parser.get("@input")); Mat original_image = imread( filename ); Mat image; diff --git a/samples/cpp/watershed.cpp b/samples/cpp/watershed.cpp index 0991bb0424..f3fefa7d4e 100644 --- a/samples/cpp/watershed.cpp +++ b/samples/cpp/watershed.cpp @@ -13,7 +13,7 @@ static void help() { cout << "\nThis program demonstrates the famous watershed segmentation algorithm in OpenCV: watershed()\n" "Usage:\n" - "./watershed [image_name -- default is ../data/fruits.jpg]\n" << endl; + "./watershed [image_name -- default is fruits.jpg]\n" << endl; cout << "Hot keys: \n" @@ -48,18 +48,18 @@ static void onMouse( int event, int x, int y, int flags, void* ) int main( int argc, char** argv ) { - cv::CommandLineParser parser(argc, argv, "{help h | | }{ @input | ../data/fruits.jpg | }"); + cv::CommandLineParser parser(argc, argv, "{help h | | }{ @input | fruits.jpg | }"); if (parser.has("help")) { help(); return 0; } - string filename = parser.get("@input"); + string filename = samples::findFile(parser.get("@input")); Mat img0 = imread(filename, 1), imgGray; if( img0.empty() ) { - cout << "Couldn'g open image " << filename << ". Usage: watershed \n"; + cout << "Couldn't open image " << filename << ". Usage: watershed \n"; return 0; } help(); diff --git a/samples/data/calibration.yml b/samples/data/calibration.yml new file mode 100644 index 0000000000..8d30f53e50 --- /dev/null +++ b/samples/data/calibration.yml @@ -0,0 +1,15 @@ +%YAML:1.0 +images: + - left01.jpg + - left02.jpg + - left03.jpg + - left04.jpg + - left05.jpg + - left06.jpg + - left07.jpg + - left08.jpg + - left09.jpg + - left11.jpg + - left12.jpg + - left13.jpg + - left14.jpg diff --git a/samples/dnn/colorization.cpp b/samples/dnn/colorization.cpp index 3f1c66127f..b68e0ec4d8 100644 --- a/samples/dnn/colorization.cpp +++ b/samples/dnn/colorization.cpp @@ -64,9 +64,9 @@ int main(int argc, char **argv) parser.printMessage(); return 0; } - string modelTxt = parser.get("proto"); - string modelBin = parser.get("model"); - string imageFile = parser.get("image"); + string modelTxt = samples::findFile(parser.get("proto")); + string modelBin = samples::findFile(parser.get("model")); + string imageFile = samples::findFile(parser.get("image")); bool useOpenCL = parser.has("opencl"); if (!parser.check()) { diff --git a/samples/dnn/common.py b/samples/dnn/common.py index feafdc9d02..db9283b5d8 100644 --- a/samples/dnn/common.py +++ b/samples/dnn/common.py @@ -86,6 +86,10 @@ def findFile(filename): if os.path.exists(filename): return filename + fpath = cv.samples.findFile(filename, False) + if fpath: + return fpath + samplesDataDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', diff --git a/samples/dnn/edge_detection.py b/samples/dnn/edge_detection.py index 26119782db..f242aab238 100644 --- a/samples/dnn/edge_detection.py +++ b/samples/dnn/edge_detection.py @@ -43,7 +43,7 @@ cv.dnn_registerLayer('Crop', CropLayer) #! [Register] # Load the model. -net = cv.dnn.readNet(args.prototxt, args.caffemodel) +net = cv.dnn.readNet(cv.samples.findFile(args.prototxt), cv.samples.findFile(args.caffemodel)) kWinName = 'Holistically-Nested Edge Detection' cv.namedWindow('Input', cv.WINDOW_NORMAL) diff --git a/samples/dnn/fast_neural_style.py b/samples/dnn/fast_neural_style.py index ab5d67f5fd..6afd166be5 100644 --- a/samples/dnn/fast_neural_style.py +++ b/samples/dnn/fast_neural_style.py @@ -13,7 +13,7 @@ parser.add_argument('--height', default=-1, type=int, help='Resize input to spec parser.add_argument('--median_filter', default=0, type=int, help='Kernel size of postprocessing blurring.') args = parser.parse_args() -net = cv.dnn.readNetFromTorch(args.model) +net = cv.dnn.readNetFromTorch(cv.samples.findFile(args.model)) net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV); if args.input: diff --git a/samples/dnn/mask_rcnn.py b/samples/dnn/mask_rcnn.py index cac8d6d1f0..a67f19519e 100644 --- a/samples/dnn/mask_rcnn.py +++ b/samples/dnn/mask_rcnn.py @@ -68,13 +68,13 @@ def drawBox(frame, classId, conf, left, top, right, bottom): # Load a network -net = cv.dnn.readNet(args.model, args.config) +net = cv.dnn.readNet(cv.samples.findFile(args.model), cv.samples.findFile(args.config)) net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) winName = 'Mask-RCNN in OpenCV' cv.namedWindow(winName, cv.WINDOW_NORMAL) -cap = cv.VideoCapture(args.input if args.input else 0) +cap = cv.VideoCapture(cv.samples.findFileOrKeep(args.input) if args.input else 0) legend = None while cv.waitKey(1) < 0: hasFrame, frame = cap.read() diff --git a/samples/dnn/mobilenet_ssd_accuracy.py b/samples/dnn/mobilenet_ssd_accuracy.py index c522c5a3c9..58395acbdf 100644 --- a/samples/dnn/mobilenet_ssd_accuracy.py +++ b/samples/dnn/mobilenet_ssd_accuracy.py @@ -26,12 +26,12 @@ parser.add_argument('--annotations', help='Path to COCO annotations file.', requ args = parser.parse_args() ### Get OpenCV predictions ##################################################### -net = cv.dnn.readNetFromTensorflow(args.weights, args.prototxt) +net = cv.dnn.readNetFromTensorflow(cv.samples.findFile(args.weights), cv.samples.findFile(args.prototxt)) net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV); detections = [] for imgName in os.listdir(args.images): - inp = cv.imread(os.path.join(args.images, imgName)) + inp = cv.imread(cv.samples.findFile(os.path.join(args.images, imgName))) rows = inp.shape[0] cols = inp.shape[1] inp = cv.resize(inp, (300, 300)) diff --git a/samples/dnn/object_detection.py b/samples/dnn/object_detection.py index 2a89b8c66c..bf1c2e4236 100644 --- a/samples/dnn/object_detection.py +++ b/samples/dnn/object_detection.py @@ -67,7 +67,7 @@ if args.classes: classes = f.read().rstrip('\n').split('\n') # Load a network -net = cv.dnn.readNet(args.model, args.config, args.framework) +net = cv.dnn.readNet(cv.samples.findFile(args.model), cv.samples.findFile(args.config), args.framework) net.setPreferableBackend(args.backend) net.setPreferableTarget(args.target) outNames = net.getUnconnectedOutLayersNames() @@ -182,7 +182,7 @@ def callback(pos): cv.createTrackbar('Confidence threshold, %', winName, int(confThreshold * 100), 99, callback) -cap = cv.VideoCapture(args.input if args.input else 0) +cap = cv.VideoCapture(cv.samples.findFileOrKeep(args.input) if args.input else 0) while cv.waitKey(1) < 0: hasFrame, frame = cap.read() if not hasFrame: diff --git a/samples/dnn/openpose.cpp b/samples/dnn/openpose.cpp index da9315426a..b4934d76e4 100644 --- a/samples/dnn/openpose.cpp +++ b/samples/dnn/openpose.cpp @@ -66,9 +66,9 @@ int main(int argc, char **argv) "{ t threshold | 0.1 | threshold or confidence value for the heatmap }" ); - String modelTxt = parser.get("proto"); - String modelBin = parser.get("model"); - String imageFile = parser.get("image"); + String modelTxt = samples::findFile(parser.get("proto")); + String modelBin = samples::findFile(parser.get("model")); + String imageFile = samples::findFile(parser.get("image")); int W_in = parser.get("width"); int H_in = parser.get("height"); float thresh = parser.get("threshold"); diff --git a/samples/dnn/openpose.py b/samples/dnn/openpose.py index 4f367c10ec..9fcca1350a 100644 --- a/samples/dnn/openpose.py +++ b/samples/dnn/openpose.py @@ -45,7 +45,7 @@ else: inWidth = args.width inHeight = args.height -net = cv.dnn.readNetFromCaffe(args.proto, args.model) +net = cv.dnn.readNetFromCaffe(cv.samples.findFile(args.proto), cv.samples.findFile(args.model)) cap = cv.VideoCapture(args.input if args.input else 0) diff --git a/samples/python/_run_winpack_demo.cmd b/samples/python/_run_winpack_demo.cmd new file mode 100644 index 0000000000..afc314bd11 --- /dev/null +++ b/samples/python/_run_winpack_demo.cmd @@ -0,0 +1,3 @@ +@echo off +call ..\_winpack_run_python_sample.cmd %* +exit /B diff --git a/samples/python/_run_winpack_demo_python27.cmd b/samples/python/_run_winpack_demo_python27.cmd deleted file mode 100644 index 5ff7db97d0..0000000000 --- a/samples/python/_run_winpack_demo_python27.cmd +++ /dev/null @@ -1,62 +0,0 @@ -@echo off -setlocal enableDelayedExpansion - -set SCRIPTDIR=%~dp0 -if NOT exist %SCRIPTDIR%\..\..\..\build ( - echo ERROR: OpenCV Winpack installation is required - pause - exit -) - -:: Path to FFMPEG binary files -set PATH=%PATH%;%SCRIPTDIR%\..\..\..\build\bin\ - -:: Detect Python binary -python -V -if %ERRORLEVEL% EQU 0 ( - set PYTHON=python -) else ( - if exist C:\Python27-x64\python.exe ( - set PYTHON=C:\Python27-x64\python.exe - ) else ( - if exist C:\Python27\python.exe ( - set PYTHON=C:\Python27\python.exe - ) else ( - echo ERROR: Python not found - pause - exit - ) - ) -) -echo Using python: %PYTHON% - -:: Detect python architecture -%PYTHON% -c "import platform; exit(64 if platform.architecture()[0] == '64bit' else 32)" -if %ERRORLEVEL% EQU 32 ( - echo Detected: Python 32-bit - set PYTHONPATH=%CD%\..\..\..\build\python\2.7\x86 -) else ( - if %ERRORLEVEL% EQU 64 ( - echo Detected: Python 64-bit - set PYTHONPATH=%CD%\..\..\..\build\python\2.7\x64 - ) else ( - echo ERROR: Unknown python arch - pause - exit - ) -) - -:: Don't generate unnecessary .pyc cache files -set PYTHONDONTWRITEBYTECODE=1 - -if [%1]==[] goto rundemo -%PYTHON% %* -set result=%errorlevel% -IF %result% NEQ 0 (pause) -EXIT /B %result% - -:rundemo -%PYTHON% demo.py -set result=%errorlevel% -IF %result% NEQ 0 (pause) -EXIT /B %result% diff --git a/samples/python/asift.py b/samples/python/asift.py index 042e9bda3f..8555ed7019 100755 --- a/samples/python/asift.py +++ b/samples/python/asift.py @@ -116,11 +116,11 @@ if __name__ == '__main__': try: fn1, fn2 = args except: - fn1 = '../data/aero1.jpg' - fn2 = '../data/aero3.jpg' + fn1 = 'aero1.jpg' + fn2 = 'aero3.jpg' - img1 = cv.imread(fn1, 0) - img2 = cv.imread(fn2, 0) + img1 = cv.imread(cv.samples.findFile(fn1), cv.IMREAD_GRAYSCALE) + img2 = cv.imread(cv.samples.findFile(fn2), cv.IMREAD_GRAYSCALE) detector, matcher = init_feature(feature_name) if img1 is None: diff --git a/samples/python/browse.py b/samples/python/browse.py index 261d154253..35d1c27280 100755 --- a/samples/python/browse.py +++ b/samples/python/browse.py @@ -32,7 +32,7 @@ if __name__ == '__main__': print() if len(sys.argv) > 1: - fn = sys.argv[1] + fn = cv.samples.findFile(sys.argv[1]) print('loading %s ...' % fn) img = cv.imread(fn) if img is None: diff --git a/samples/python/calibrate.py b/samples/python/calibrate.py index a2970a95e7..e0896ea7ef 100755 --- a/samples/python/calibrate.py +++ b/samples/python/calibrate.py @@ -53,7 +53,7 @@ if __name__ == '__main__': obj_points = [] img_points = [] - h, w = cv.imread(img_names[0], 0).shape[:2] # TODO: use imquery call to retrieve results + h, w = cv.imread(img_names[0], cv.IMREAD_GRAYSCALE).shape[:2] # TODO: use imquery call to retrieve results def processImage(fn): print('processing %s... ' % fn) diff --git a/samples/python/camera_calibration_show_extrinsics.py b/samples/python/camera_calibration_show_extrinsics.py index 75274aea9e..6304aa7931 100755 --- a/samples/python/camera_calibration_show_extrinsics.py +++ b/samples/python/camera_calibration_show_extrinsics.py @@ -160,7 +160,7 @@ def draw_camera_boards(ax, camera_matrix, cam_width, cam_height, scale_focal, def main(): parser = argparse.ArgumentParser(description='Plot camera calibration extrinsics.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('--calibration', type=str, default="../data/left_intrinsics.yml", + parser.add_argument('--calibration', type=str, default='left_intrinsics.yml', help='YAML camera calibration file.') parser.add_argument('--cam_width', type=float, default=0.064/2, help='Width/2 of the displayed camera.') @@ -172,7 +172,7 @@ def main(): help='The calibration board is static and the camera is moving.') args = parser.parse_args() - fs = cv.FileStorage(args.calibration, cv.FILE_STORAGE_READ) + fs = cv.FileStorage(cv.samples.findFile(args.calibration), cv.FILE_STORAGE_READ) board_width = int(fs.getNode('board_width').real()) board_height = int(fs.getNode('board_height').real()) square_size = fs.getNode('square_size').real() diff --git a/samples/python/coherence.py b/samples/python/coherence.py index 225fc13f38..59db351d8d 100755 --- a/samples/python/coherence.py +++ b/samples/python/coherence.py @@ -51,9 +51,9 @@ if __name__ == '__main__': try: fn = sys.argv[1] except: - fn = '../data/baboon.jpg' + fn = 'baboon.jpg' - src = cv.imread(fn) + src = cv.imread(cv.samples.findFile(fn)) def nothing(*argv): pass diff --git a/samples/python/color_histogram.py b/samples/python/color_histogram.py index d997241f50..3a2e532c7e 100755 --- a/samples/python/color_histogram.py +++ b/samples/python/color_histogram.py @@ -39,7 +39,7 @@ if __name__ == '__main__': fn = sys.argv[1] except: fn = 0 - cam = video.create_capture(fn, fallback='synth:bg=../data/baboon.jpg:class=chess:noise=0.05') + cam = video.create_capture(fn, fallback='synth:bg=baboon.jpg:class=chess:noise=0.05') while True: flag, frame = cam.read() diff --git a/samples/python/deconvolution.py b/samples/python/deconvolution.py index 91d10243bb..109f46cddb 100755 --- a/samples/python/deconvolution.py +++ b/samples/python/deconvolution.py @@ -19,11 +19,11 @@ Usage: ESC - exit Examples: - deconvolution.py --angle 135 --d 22 ../data/licenseplate_motion.jpg + deconvolution.py --angle 135 --d 22 licenseplate_motion.jpg (image source: http://www.topazlabs.com/infocus/_images/licenseplate_compare.jpg) - deconvolution.py --angle 86 --d 31 ../data/text_motion.jpg - deconvolution.py --circle --d 19 ../data/text_defocus.jpg + deconvolution.py --angle 86 --d 31 text_motion.jpg + deconvolution.py --circle --d 19 text_defocus.jpg (image source: compact digital photo camera, no artificial distortion) @@ -73,11 +73,11 @@ if __name__ == '__main__': try: fn = args[0] except: - fn = '../data/licenseplate_motion.jpg' + fn = 'licenseplate_motion.jpg' win = 'deconvolution' - img = cv.imread(fn, 0) + img = cv.imread(cv.samples.findFile(fn), cv.IMREAD_GRAYSCALE) if img is None: print('Failed to load file:', fn) sys.exit(1) diff --git a/samples/python/dft.py b/samples/python/dft.py index 51206cf7ab..d5a0f03e06 100755 --- a/samples/python/dft.py +++ b/samples/python/dft.py @@ -38,8 +38,8 @@ def shift_dft(src, dst=None): h, w = src.shape[:2] - cx1 = cx2 = w/2 - cy1 = cy2 = h/2 + cx1 = cx2 = w // 2 + cy1 = cy2 = h // 2 # if the size is odd, then adjust the bottom/right quadrants if w % 2 != 0: @@ -65,11 +65,13 @@ def shift_dft(src, dst=None): if __name__ == "__main__": if len(sys.argv) > 1: - im = cv.imread(sys.argv[1]) + fname = sys.argv[1] else: - im = cv.imread('../data/baboon.jpg') + fname = 'baboon.jpg' print("usage : python dft.py ") + im = cv.imread(cv.samples.findFile(fname)) + # convert to grayscale im = cv.cvtColor(im, cv.COLOR_BGR2GRAY) h, w = im.shape[:2] diff --git a/samples/python/digits.py b/samples/python/digits.py index f9f1e0be39..ea9844e312 100755 --- a/samples/python/digits.py +++ b/samples/python/digits.py @@ -3,7 +3,7 @@ ''' SVM and KNearest digit recognition. -Sample loads a dataset of handwritten digits from '../data/digits.png'. +Sample loads a dataset of handwritten digits from 'digits.png'. Then it trains a SVM and KNearest classifiers on it and evaluates their accuracy. @@ -42,7 +42,7 @@ from common import clock, mosaic SZ = 20 # size of each digit is SZ x SZ CLASS_N = 10 -DIGITS_FN = '../data/digits.png' +DIGITS_FN = 'digits.png' def split2d(img, cell_size, flatten=True): h, w = img.shape[:2] @@ -54,8 +54,9 @@ def split2d(img, cell_size, flatten=True): return cells def load_digits(fn): + fn = cv.samples.findFile(fn) print('loading "%s" ...' % fn) - digits_img = cv.imread(fn, 0) + digits_img = cv.imread(fn, cv.IMREAD_GRAYSCALE) digits = split2d(digits_img, (SZ, SZ)) labels = np.repeat(np.arange(CLASS_N), len(digits)/CLASS_N) return digits, labels diff --git a/samples/python/distrans.py b/samples/python/distrans.py index 02a51d500d..939ecfdc82 100755 --- a/samples/python/distrans.py +++ b/samples/python/distrans.py @@ -24,10 +24,11 @@ if __name__ == '__main__': try: fn = sys.argv[1] except: - fn = '../data/fruits.jpg' + fn = 'fruits.jpg' print(__doc__) - img = cv.imread(fn, 0) + fn = cv.samples.findFile(fn) + img = cv.imread(fn, cv.IMREAD_GRAYSCALE) if img is None: print('Failed to load fn:', fn) sys.exit(1) diff --git a/samples/python/facedetect.py b/samples/python/facedetect.py index 4067dc81c3..73d9341273 100755 --- a/samples/python/facedetect.py +++ b/samples/python/facedetect.py @@ -40,13 +40,13 @@ if __name__ == '__main__': except: video_src = 0 args = dict(args) - cascade_fn = args.get('--cascade', "../../data/haarcascades/haarcascade_frontalface_alt.xml") - nested_fn = args.get('--nested-cascade', "../../data/haarcascades/haarcascade_eye.xml") + cascade_fn = args.get('--cascade', "data/haarcascades/haarcascade_frontalface_alt.xml") + nested_fn = args.get('--nested-cascade', "data/haarcascades/haarcascade_eye.xml") - cascade = cv.CascadeClassifier(cascade_fn) - nested = cv.CascadeClassifier(nested_fn) + cascade = cv.CascadeClassifier(cv.samples.findFile(cascade_fn)) + nested = cv.CascadeClassifier(cv.samples.findFile(nested_fn)) - cam = create_capture(video_src, fallback='synth:bg=../data/lena.jpg:noise=0.05') + cam = create_capture(video_src, fallback='synth:bg={}:noise=0.05'.format(cv.samples.findFile('samples/data/lena.jpg'))) while True: ret, img = cam.read() diff --git a/samples/python/find_obj.py b/samples/python/find_obj.py index c5cd7c66a9..d0c67b085c 100755 --- a/samples/python/find_obj.py +++ b/samples/python/find_obj.py @@ -147,11 +147,11 @@ if __name__ == '__main__': try: fn1, fn2 = args except: - fn1 = '../data/box.png' - fn2 = '../data/box_in_scene.png' + fn1 = 'box.png' + fn2 = 'box_in_scene.png' - img1 = cv.imread(fn1, 0) - img2 = cv.imread(fn2, 0) + img1 = cv.imread(cv.samples.findFile(fn1), cv.IMREAD_GRAYSCALE) + img2 = cv.imread(cv.samples.findFile(fn2), cv.IMREAD_GRAYSCALE) detector, matcher = init_feature(feature_name) if img1 is None: diff --git a/samples/python/floodfill.py b/samples/python/floodfill.py index f03beef928..29986c5d84 100755 --- a/samples/python/floodfill.py +++ b/samples/python/floodfill.py @@ -25,10 +25,10 @@ if __name__ == '__main__': try: fn = sys.argv[1] except: - fn = '../data/fruits.jpg' + fn = 'fruits.jpg' print(__doc__) - img = cv.imread(fn, True) + img = cv.imread(cv.samples.findFile(fn)) if img is None: print('Failed to load image file:', fn) sys.exit(1) diff --git a/samples/python/gabor_threads.py b/samples/python/gabor_threads.py index 5c1cf11e1e..377fa9ad0b 100755 --- a/samples/python/gabor_threads.py +++ b/samples/python/gabor_threads.py @@ -55,9 +55,9 @@ if __name__ == '__main__': try: img_fn = sys.argv[1] except: - img_fn = '../data/baboon.jpg' + img_fn = 'baboon.jpg' - img = cv.imread(img_fn) + img = cv.imread(cv.samples.findFile(img_fn)) if img is None: print('Failed to load image file:', img_fn) sys.exit(1) diff --git a/samples/python/grabcut.py b/samples/python/grabcut.py index 37bc2e0f17..318e23453f 100644 --- a/samples/python/grabcut.py +++ b/samples/python/grabcut.py @@ -107,11 +107,11 @@ if __name__ == '__main__': if len(sys.argv) == 2: filename = sys.argv[1] # for drawing purposes else: - print("No input image given, so loading default image, ../data/lena.jpg \n") + print("No input image given, so loading default image, lena.jpg \n") print("Correct Usage: python grabcut.py \n") - filename = '../data/lena.jpg' + filename = 'lena.jpg' - img = cv.imread(filename) + img = cv.imread(cv.samples.findFile(filename)) img2 = img.copy() # a copy of original image mask = np.zeros(img.shape[:2],dtype = np.uint8) # mask initialized to PR_BG output = np.zeros(img.shape,np.uint8) # output image to be shown diff --git a/samples/python/hist.py b/samples/python/hist.py index 266fa12e0d..e085fdc55e 100755 --- a/samples/python/hist.py +++ b/samples/python/hist.py @@ -60,10 +60,10 @@ if __name__ == '__main__': if len(sys.argv)>1: fname = sys.argv[1] else : - fname = '../data/lena.jpg' + fname = 'lena.jpg' print("usage : python hist.py ") - im = cv.imread(fname) + im = cv.imread(cv.samples.findFile(fname)) if im is None: print('Failed to load image file:', fname) diff --git a/samples/python/houghcircles.py b/samples/python/houghcircles.py index 2c24c002c6..bf0aa43de3 100755 --- a/samples/python/houghcircles.py +++ b/samples/python/houghcircles.py @@ -5,7 +5,7 @@ This example illustrates how to use cv.HoughCircles() function. Usage: houghcircles.py [] - image argument defaults to ../data/board.jpg + image argument defaults to board.jpg ''' # Python 2/3 compatibility @@ -21,9 +21,9 @@ if __name__ == '__main__': try: fn = sys.argv[1] except IndexError: - fn = "../data/board.jpg" + fn = 'board.jpg' - src = cv.imread(fn, 1) + src = cv.imread(cv.samples.findFile(fn)) img = cv.cvtColor(src, cv.COLOR_BGR2GRAY) img = cv.medianBlur(img, 5) cimg = src.copy() # numpy function diff --git a/samples/python/houghlines.py b/samples/python/houghlines.py index 84351072dd..e662854693 100755 --- a/samples/python/houghlines.py +++ b/samples/python/houghlines.py @@ -5,7 +5,7 @@ This example illustrates how to use Hough Transform to find lines Usage: houghlines.py [] - image argument defaults to ../data/pic1.png + image argument defaults to pic1.png ''' # Python 2/3 compatibility @@ -22,9 +22,9 @@ if __name__ == '__main__': try: fn = sys.argv[1] except IndexError: - fn = "../data/pic1.png" + fn = 'pic1.png' - src = cv.imread(fn) + src = cv.imread(cv.samples.findFile(fn)) dst = cv.Canny(src, 50, 200) cdst = cv.cvtColor(dst, cv.COLOR_GRAY2BGR) diff --git a/samples/python/inpaint.py b/samples/python/inpaint.py index fb0140cc77..3e2fd5635e 100755 --- a/samples/python/inpaint.py +++ b/samples/python/inpaint.py @@ -27,11 +27,11 @@ if __name__ == '__main__': try: fn = sys.argv[1] except: - fn = '../data/fruits.jpg' + fn = 'fruits.jpg' print(__doc__) - img = cv.imread(fn) + img = cv.imread(cv.samples.findFile(fn)) if img is None: print('Failed to load image file:', fn) sys.exit(1) diff --git a/samples/python/letter_recog.py b/samples/python/letter_recog.py index d4987369bc..67e4266f28 100755 --- a/samples/python/letter_recog.py +++ b/samples/python/letter_recog.py @@ -41,7 +41,7 @@ class LetterStatModel(object): train_ratio = 0.5 def load(self, fn): - self.model.load(fn) + self.model = self.model.load(fn) def save(self, fn): self.model.save(fn) @@ -158,10 +158,12 @@ if __name__ == '__main__': args, dummy = getopt.getopt(sys.argv[1:], '', ['model=', 'data=', 'load=', 'save=']) args = dict(args) args.setdefault('--model', 'svm') - args.setdefault('--data', '../data/letter-recognition.data') + args.setdefault('--data', 'letter-recognition.data') - print('loading data %s ...' % args['--data']) - samples, responses = load_base(args['--data']) + datafile = cv.samples.findFile(args['--data']) + + print('loading data %s ...' % datafile) + samples, responses = load_base(datafile) Model = models[args['--model']] model = Model() diff --git a/samples/python/logpolar.py b/samples/python/logpolar.py index 1af0f11753..09b2cbc99f 100644 --- a/samples/python/logpolar.py +++ b/samples/python/logpolar.py @@ -22,9 +22,9 @@ if __name__ == '__main__': try: fn = sys.argv[1] except IndexError: - fn = '../data/fruits.jpg' + fn = 'fruits.jpg' - img = cv.imread(fn) + img = cv.imread(cv.samples.findFile(fn)) if img is None: print('Failed to load image file:', fn) sys.exit(1) diff --git a/samples/python/morphology.py b/samples/python/morphology.py index 1d95fa9b6d..e368c28576 100755 --- a/samples/python/morphology.py +++ b/samples/python/morphology.py @@ -31,9 +31,9 @@ if __name__ == '__main__': try: fn = sys.argv[1] except: - fn = '../data/baboon.jpg' + fn = 'baboon.jpg' - img = cv.imread(fn) + img = cv.imread(cv.samples.findFile(fn)) if img is None: print('Failed to load image file:', fn) diff --git a/samples/python/peopledetect.py b/samples/python/peopledetect.py index d0ddc1b7b2..a6f0538daf 100755 --- a/samples/python/peopledetect.py +++ b/samples/python/peopledetect.py @@ -40,7 +40,7 @@ if __name__ == '__main__': hog = cv.HOGDescriptor() hog.setSVMDetector( cv.HOGDescriptor_getDefaultPeopleDetector() ) - default = ['../data/basketball2.png '] if len(sys.argv[1:]) == 0 else [] + default = [cv.samples.findFile('basketball2.png')] if len(sys.argv[1:]) == 0 else [] for fn in it.chain(*map(glob, default + sys.argv[1:])): print(fn, ' - ',) diff --git a/samples/python/stereo_match.py b/samples/python/stereo_match.py index 5b0867003a..2d539318f5 100755 --- a/samples/python/stereo_match.py +++ b/samples/python/stereo_match.py @@ -35,8 +35,8 @@ def write_ply(fn, verts, colors): if __name__ == '__main__': print('loading images...') - imgL = cv.pyrDown( cv.imread('../data/aloeL.jpg') ) # downscale images for faster processing - imgR = cv.pyrDown( cv.imread('../data/aloeR.jpg') ) + imgL = cv.pyrDown(cv.imread(cv.samples.findFile('aloeL.jpg'))) # downscale images for faster processing + imgR = cv.pyrDown(cv.imread(cv.samples.findFile('aloeR.jpg'))) # disparity range is tuned for 'aloe' image pair window_size = 3 diff --git a/samples/python/texture_flow.py b/samples/python/texture_flow.py index c3220805b3..70259a8129 100755 --- a/samples/python/texture_flow.py +++ b/samples/python/texture_flow.py @@ -21,9 +21,9 @@ if __name__ == '__main__': try: fn = sys.argv[1] except: - fn = '../data/starry_night.jpg' + fn = 'starry_night.jpg' - img = cv.imread(fn) + img = cv.imread(cv.samples.findFile(fn)) if img is None: print('Failed to load image file:', fn) sys.exit(1) diff --git a/samples/python/tst_scene_render.py b/samples/python/tst_scene_render.py index 6955d16012..33cbd0dc1c 100644 --- a/samples/python/tst_scene_render.py +++ b/samples/python/tst_scene_render.py @@ -98,8 +98,8 @@ class TestSceneRender(): if __name__ == '__main__': - backGr = cv.imread('../data/graf1.png') - fgr = cv.imread('../data/box.png') + backGr = cv.imread(cv.samples.findFile('graf1.png')) + fgr = cv.imread(cv.samples.findFile('box.png')) render = TestSceneRender(backGr, fgr) diff --git a/samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py b/samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py index 63302abbbe..992ca90141 100644 --- a/samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py +++ b/samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py @@ -6,10 +6,10 @@ import argparse ## [Load image] parser = argparse.ArgumentParser(description='Code for Histogram Calculation tutorial.') -parser.add_argument('--input', help='Path to input image.', default='../data/lena.jpg') +parser.add_argument('--input', help='Path to input image.', default='lena.jpg') args = parser.parse_args() -src = cv.imread(args.input) +src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image:', args.input) exit(0) diff --git a/samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py b/samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py index fb87cce75a..9bb82cee40 100644 --- a/samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py +++ b/samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py @@ -4,10 +4,10 @@ import argparse ## [Load image] parser = argparse.ArgumentParser(description='Code for Histogram Equalization tutorial.') -parser.add_argument('--input', help='Path to input image.', default='../data/lena.jpg') +parser.add_argument('--input', help='Path to input image.', default='lena.jpg') args = parser.parse_args() -src = cv.imread(args.input) +src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image:', args.input) exit(0) diff --git a/samples/python/tutorial_code/ImgTrans/Filter2D/filter2D.py b/samples/python/tutorial_code/ImgTrans/Filter2D/filter2D.py index 407cd8b2fc..658e4636a7 100644 --- a/samples/python/tutorial_code/ImgTrans/Filter2D/filter2D.py +++ b/samples/python/tutorial_code/ImgTrans/Filter2D/filter2D.py @@ -11,15 +11,15 @@ def main(argv): window_name = 'filter2D Demo' ## [load] - imageName = argv[0] if len(argv) > 0 else "../data/lena.jpg" + imageName = argv[0] if len(argv) > 0 else 'lena.jpg' # Loads an image - src = cv.imread(imageName, cv.IMREAD_COLOR) + src = cv.imread(cv.samples.findFile(imageName), cv.IMREAD_COLOR) # Check if image is loaded fine if src is None: print ('Error opening image!') - print ('Usage: filter2D.py [image_name -- default ../data/lena.jpg] \n') + print ('Usage: filter2D.py [image_name -- default lena.jpg] \n') return -1 ## [load] ## [init_arguments] diff --git a/samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py b/samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py index 1f0a74cbf8..8da7eefed6 100644 --- a/samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py +++ b/samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py @@ -5,11 +5,11 @@ import numpy as np def main(argv): ## [load] - default_file = "../../../../data/smarties.png" + default_file = 'smarties.png' filename = argv[0] if len(argv) > 0 else default_file # Loads an image - src = cv.imread(filename, cv.IMREAD_COLOR) + src = cv.imread(cv.samples.findFile(filename), cv.IMREAD_COLOR) # Check if image is loaded fine if src is None: diff --git a/samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py b/samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py index 697f388e2f..71e5a92239 100644 --- a/samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py +++ b/samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py @@ -10,11 +10,11 @@ import numpy as np def main(argv): ## [load] - default_file = "../../../../data/sudoku.png" + default_file = 'sudoku.png' filename = argv[0] if len(argv) > 0 else default_file # Loads an image - src = cv.imread(filename, cv.IMREAD_GRAYSCALE) + src = cv.imread(cv.samples.findFile(filename), cv.IMREAD_GRAYSCALE) # Check if image is loaded fine if src is None: diff --git a/samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py b/samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py index 1cff041d36..a90af4da1d 100644 --- a/samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py +++ b/samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py @@ -14,14 +14,14 @@ def main(argv): # [variables] # [load] - imageName = argv[0] if len(argv) > 0 else "../data/lena.jpg" + imageName = argv[0] if len(argv) > 0 else 'lena.jpg' - src = cv.imread(imageName, cv.IMREAD_COLOR) # Load an image + src = cv.imread(cv.samples.findFile(imageName), cv.IMREAD_COLOR) # Load an image # Check if image is loaded fine if src is None: print ('Error opening image') - print ('Program Arguments: [image_name -- default ../data/lena.jpg]') + print ('Program Arguments: [image_name -- default lena.jpg]') return -1 # [load] diff --git a/samples/python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py b/samples/python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py index 453037fa37..ff1f3669c7 100644 --- a/samples/python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py +++ b/samples/python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py @@ -14,15 +14,15 @@ def main(argv): window_name = "copyMakeBorder Demo" ## [variables] ## [load] - imageName = argv[0] if len(argv) > 0 else "../data/lena.jpg" + imageName = argv[0] if len(argv) > 0 else 'lena.jpg' # Loads an image - src = cv.imread(imageName, cv.IMREAD_COLOR) + src = cv.imread(cv.samples.findFile(imageName), cv.IMREAD_COLOR) # Check if image is loaded fine if src is None: print ('Error opening image!') - print ('Usage: copy_make_border.py [image_name -- default ../data/lena.jpg] \n') + print ('Usage: copy_make_border.py [image_name -- default lena.jpg] \n') return -1 ## [load] # Brief how-to for this program diff --git a/samples/python/tutorial_code/ImgTrans/canny_detector/CannyDetector_Demo.py b/samples/python/tutorial_code/ImgTrans/canny_detector/CannyDetector_Demo.py index 6699512975..391572816b 100644 --- a/samples/python/tutorial_code/ImgTrans/canny_detector/CannyDetector_Demo.py +++ b/samples/python/tutorial_code/ImgTrans/canny_detector/CannyDetector_Demo.py @@ -17,10 +17,10 @@ def CannyThreshold(val): cv.imshow(window_name, dst) parser = argparse.ArgumentParser(description='Code for Canny Edge Detector tutorial.') -parser.add_argument('--input', help='Path to input image.', default='../data/fruits.jpg') +parser.add_argument('--input', help='Path to input image.', default='fruits.jpg') args = parser.parse_args() -src = cv.imread(args.input) +src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image: ', args.input) exit(0) diff --git a/samples/python/tutorial_code/ImgTrans/distance_transformation/imageSegmentation.py b/samples/python/tutorial_code/ImgTrans/distance_transformation/imageSegmentation.py index eafd588a44..2388a414c0 100644 --- a/samples/python/tutorial_code/ImgTrans/distance_transformation/imageSegmentation.py +++ b/samples/python/tutorial_code/ImgTrans/distance_transformation/imageSegmentation.py @@ -11,10 +11,10 @@ rng.seed(12345) parser = argparse.ArgumentParser(description='Code for Image Segmentation with Distance Transform and Watershed Algorithm.\ Sample code showing how to segment overlapping objects using Laplacian filtering, \ in addition to Watershed and Distance Transformation') -parser.add_argument('--input', help='Path to input image.', default='../data/cards.png') +parser.add_argument('--input', help='Path to input image.', default='cards.png') args = parser.parse_args() -src = cv.imread(args.input) +src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image:', args.input) exit(0) diff --git a/samples/python/tutorial_code/ImgTrans/remap/Remap_Demo.py b/samples/python/tutorial_code/ImgTrans/remap/Remap_Demo.py index e9c764d2e0..f50aa36571 100644 --- a/samples/python/tutorial_code/ImgTrans/remap/Remap_Demo.py +++ b/samples/python/tutorial_code/ImgTrans/remap/Remap_Demo.py @@ -32,11 +32,11 @@ def update_map(ind, map_x, map_y): ## [Update] parser = argparse.ArgumentParser(description='Code for Remapping tutorial.') -parser.add_argument('--input', help='Path to input image.', default='../data/chicky_512.png') +parser.add_argument('--input', help='Path to input image.', default='chicky_512.png') args = parser.parse_args() ## [Load] -src = cv.imread(args.input, cv.IMREAD_COLOR) +src = cv.imread(cv.samples.findFile(args.input), cv.IMREAD_COLOR) if src is None: print('Could not open or find the image: ', args.input) exit(0) diff --git a/samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py b/samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py index 462a412f47..d141470c92 100644 --- a/samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py +++ b/samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py @@ -5,10 +5,10 @@ import argparse ## [Load the image] parser = argparse.ArgumentParser(description='Code for Affine Transformations tutorial.') -parser.add_argument('--input', help='Path to input image.', default='../data/lena.jpg') +parser.add_argument('--input', help='Path to input image.', default='lena.jpg') args = parser.parse_args() -src = cv.imread(args.input) +src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image:', args.input) exit(0) diff --git a/samples/python/tutorial_code/ShapeDescriptors/bounding_rects_circles/generalContours_demo1.py b/samples/python/tutorial_code/ShapeDescriptors/bounding_rects_circles/generalContours_demo1.py index 1549b33ce5..0f6cb8ec99 100644 --- a/samples/python/tutorial_code/ShapeDescriptors/bounding_rects_circles/generalContours_demo1.py +++ b/samples/python/tutorial_code/ShapeDescriptors/bounding_rects_circles/generalContours_demo1.py @@ -53,10 +53,10 @@ def thresh_callback(val): ## [setup] # Load source image parser = argparse.ArgumentParser(description='Code for Creating Bounding boxes and circles for contours tutorial.') -parser.add_argument('--input', help='Path to input image.', default='../data/stuff.jpg') +parser.add_argument('--input', help='Path to input image.', default='stuff.jpg') args = parser.parse_args() -src = cv.imread(args.input) +src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image:', args.input) exit(0) diff --git a/samples/python/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/generalContours_demo2.py b/samples/python/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/generalContours_demo2.py index 2bc46635a1..3d123f0569 100644 --- a/samples/python/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/generalContours_demo2.py +++ b/samples/python/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/generalContours_demo2.py @@ -53,10 +53,10 @@ def thresh_callback(val): ## [setup] # Load source image parser = argparse.ArgumentParser(description='Code for Creating Bounding rotated boxes and ellipses for contours tutorial.') -parser.add_argument('--input', help='Path to input image.', default='../data/stuff.jpg') +parser.add_argument('--input', help='Path to input image.', default='stuff.jpg') args = parser.parse_args() -src = cv.imread(args.input) +src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image:', args.input) exit(0) diff --git a/samples/python/tutorial_code/ShapeDescriptors/find_contours/findContours_demo.py b/samples/python/tutorial_code/ShapeDescriptors/find_contours/findContours_demo.py index 23d2f467bb..c4480a65f4 100644 --- a/samples/python/tutorial_code/ShapeDescriptors/find_contours/findContours_demo.py +++ b/samples/python/tutorial_code/ShapeDescriptors/find_contours/findContours_demo.py @@ -26,10 +26,10 @@ def thresh_callback(val): # Load source image parser = argparse.ArgumentParser(description='Code for Finding contours in your image tutorial.') -parser.add_argument('--input', help='Path to input image.', default='../data/HappyFish.jpg') +parser.add_argument('--input', help='Path to input image.', default='HappyFish.jpg') args = parser.parse_args() -src = cv.imread(args.input) +src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image:', args.input) exit(0) diff --git a/samples/python/tutorial_code/ShapeDescriptors/hull/hull_demo.py b/samples/python/tutorial_code/ShapeDescriptors/hull/hull_demo.py index e9806a749f..cd00131cbc 100644 --- a/samples/python/tutorial_code/ShapeDescriptors/hull/hull_demo.py +++ b/samples/python/tutorial_code/ShapeDescriptors/hull/hull_demo.py @@ -33,10 +33,10 @@ def thresh_callback(val): # Load source image parser = argparse.ArgumentParser(description='Code for Convex Hull tutorial.') -parser.add_argument('--input', help='Path to input image.', default='../data/stuff.jpg') +parser.add_argument('--input', help='Path to input image.', default='stuff.jpg') args = parser.parse_args() -src = cv.imread(args.input) +src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image:', args.input) exit(0) diff --git a/samples/python/tutorial_code/ShapeDescriptors/moments/moments_demo.py b/samples/python/tutorial_code/ShapeDescriptors/moments/moments_demo.py index 13d915b883..eac12c8a08 100644 --- a/samples/python/tutorial_code/ShapeDescriptors/moments/moments_demo.py +++ b/samples/python/tutorial_code/ShapeDescriptors/moments/moments_demo.py @@ -54,10 +54,10 @@ def thresh_callback(val): ## [setup] # Load source image parser = argparse.ArgumentParser(description='Code for Image Moments tutorial.') -parser.add_argument('--input', help='Path to input image.', default='../data/stuff.jpg') +parser.add_argument('--input', help='Path to input image.', default='stuff.jpg') args = parser.parse_args() -src = cv.imread(args.input) +src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image:', args.input) exit(0) diff --git a/samples/python/tutorial_code/TrackingMotion/corner_subpixels/cornerSubPix_Demo.py b/samples/python/tutorial_code/TrackingMotion/corner_subpixels/cornerSubPix_Demo.py index 72ce96b1a6..6d8738b310 100644 --- a/samples/python/tutorial_code/TrackingMotion/corner_subpixels/cornerSubPix_Demo.py +++ b/samples/python/tutorial_code/TrackingMotion/corner_subpixels/cornerSubPix_Demo.py @@ -50,10 +50,10 @@ def goodFeaturesToTrack_Demo(val): # Load source image and convert it to gray parser = argparse.ArgumentParser(description='Code for Shi-Tomasi corner detector tutorial.') -parser.add_argument('--input', help='Path to input image.', default='../data/pic3.png') +parser.add_argument('--input', help='Path to input image.', default='pic3.png') args = parser.parse_args() -src = cv.imread(args.input) +src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image:', args.input) exit(0) diff --git a/samples/python/tutorial_code/TrackingMotion/generic_corner_detector/cornerDetector_Demo.py b/samples/python/tutorial_code/TrackingMotion/generic_corner_detector/cornerDetector_Demo.py index d135367fc2..653307879c 100644 --- a/samples/python/tutorial_code/TrackingMotion/generic_corner_detector/cornerDetector_Demo.py +++ b/samples/python/tutorial_code/TrackingMotion/generic_corner_detector/cornerDetector_Demo.py @@ -35,10 +35,10 @@ def myShiTomasi_function(val): # Load source image and convert it to gray parser = argparse.ArgumentParser(description='Code for Creating your own corner detector tutorial.') -parser.add_argument('--input', help='Path to input image.', default='../data/building.jpg') +parser.add_argument('--input', help='Path to input image.', default='building.jpg') args = parser.parse_args() -src = cv.imread(args.input) +src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image:', args.input) exit(0) diff --git a/samples/python/tutorial_code/TrackingMotion/good_features_to_track/goodFeaturesToTrack_Demo.py b/samples/python/tutorial_code/TrackingMotion/good_features_to_track/goodFeaturesToTrack_Demo.py index 57e767ccee..3fb8441d92 100644 --- a/samples/python/tutorial_code/TrackingMotion/good_features_to_track/goodFeaturesToTrack_Demo.py +++ b/samples/python/tutorial_code/TrackingMotion/good_features_to_track/goodFeaturesToTrack_Demo.py @@ -38,10 +38,10 @@ def goodFeaturesToTrack_Demo(val): # Load source image and convert it to gray parser = argparse.ArgumentParser(description='Code for Shi-Tomasi corner detector tutorial.') -parser.add_argument('--input', help='Path to input image.', default='../data/pic3.png') +parser.add_argument('--input', help='Path to input image.', default='pic3.png') args = parser.parse_args() -src = cv.imread(args.input) +src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image:', args.input) exit(0) diff --git a/samples/python/tutorial_code/TrackingMotion/harris_detector/cornerHarris_Demo.py b/samples/python/tutorial_code/TrackingMotion/harris_detector/cornerHarris_Demo.py index cee7679adf..63d765f202 100644 --- a/samples/python/tutorial_code/TrackingMotion/harris_detector/cornerHarris_Demo.py +++ b/samples/python/tutorial_code/TrackingMotion/harris_detector/cornerHarris_Demo.py @@ -35,10 +35,10 @@ def cornerHarris_demo(val): # Load source image and convert it to gray parser = argparse.ArgumentParser(description='Code for Harris corner detector tutorial.') -parser.add_argument('--input', help='Path to input image.', default='../data/building.jpg') +parser.add_argument('--input', help='Path to input image.', default='building.jpg') args = parser.parse_args() -src = cv.imread(args.input) +src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image:', args.input) exit(0) diff --git a/samples/python/tutorial_code/core/AddingImages/adding_images.py b/samples/python/tutorial_code/core/AddingImages/adding_images.py index 65c6154d62..928686d18e 100644 --- a/samples/python/tutorial_code/core/AddingImages/adding_images.py +++ b/samples/python/tutorial_code/core/AddingImages/adding_images.py @@ -16,8 +16,8 @@ input_alpha = float(raw_input().strip()) if 0 <= alpha <= 1: alpha = input_alpha # [load] -src1 = cv.imread('../../../../data/LinuxLogo.jpg') -src2 = cv.imread('../../../../data/WindowsLogo.jpg') +src1 = cv.imread(cv.samples.findFile('LinuxLogo.jpg')) +src2 = cv.imread(cv.samples.findFile('WindowsLogo.jpg')) # [load] if src1 is None: print("Error loading src1") diff --git a/samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py b/samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py index 96535acbcd..a89a1097ac 100644 --- a/samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py +++ b/samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py @@ -10,16 +10,16 @@ def print_help(): This program demonstrated the use of the discrete Fourier transform (DFT). The dft of an image is taken and it's power spectrum is displayed. Usage: - discrete_fourier_transform.py [image_name -- default ../../../../data/lena.jpg]''') + discrete_fourier_transform.py [image_name -- default lena.jpg]''') def main(argv): print_help() - filename = argv[0] if len(argv) > 0 else "../../../../data/lena.jpg" + filename = argv[0] if len(argv) > 0 else 'lena.jpg' - I = cv.imread(filename, cv.IMREAD_GRAYSCALE) + I = cv.imread(cv.samples.findFile(filename), cv.IMREAD_GRAYSCALE) if I is None: print('Error opening image') return -1 diff --git a/samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py b/samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py index b151575f15..3b9385020e 100644 --- a/samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py +++ b/samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py @@ -45,7 +45,7 @@ def sharpen(my_image): ## [basic_method] def main(argv): - filename = "../../../../data/lena.jpg" + filename = 'lena.jpg' img_codec = cv.IMREAD_COLOR if argv: @@ -53,12 +53,12 @@ def main(argv): if len(argv) >= 2 and sys.argv[2] == "G": img_codec = cv.IMREAD_GRAYSCALE - src = cv.imread(filename, img_codec) + src = cv.imread(cv.samples.findFile(filename), img_codec) if src is None: print("Can't open image [" + filename + "]") print("Usage:") - print("mat_mask_operations.py [image_path -- default ../../../../data/lena.jpg] [G -- grayscale]") + print("mat_mask_operations.py [image_path -- default lena.jpg] [G -- grayscale]") return -1 cv.namedWindow("Input", cv.WINDOW_AUTOSIZE) diff --git a/samples/python/tutorial_code/features2D/akaze_matching/AKAZE_match.py b/samples/python/tutorial_code/features2D/akaze_matching/AKAZE_match.py index 0d9818a3c1..61d1e93a67 100644 --- a/samples/python/tutorial_code/features2D/akaze_matching/AKAZE_match.py +++ b/samples/python/tutorial_code/features2D/akaze_matching/AKAZE_match.py @@ -6,18 +6,18 @@ from math import sqrt ## [load] parser = argparse.ArgumentParser(description='Code for AKAZE local features matching tutorial.') -parser.add_argument('--input1', help='Path to input image 1.', default='../data/graf1.png') -parser.add_argument('--input2', help='Path to input image 2.', default='../data/graf3.png') -parser.add_argument('--homography', help='Path to the homography matrix.', default='../data/H1to3p.xml') +parser.add_argument('--input1', help='Path to input image 1.', default='graf1.png') +parser.add_argument('--input2', help='Path to input image 2.', default='graf3.png') +parser.add_argument('--homography', help='Path to the homography matrix.', default='H1to3p.xml') args = parser.parse_args() -img1 = cv.imread(args.input1, cv.IMREAD_GRAYSCALE) -img2 = cv.imread(args.input2, cv.IMREAD_GRAYSCALE) +img1 = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE) +img2 = cv.imread(cv.samples.findFile(args.input2), cv.IMREAD_GRAYSCALE) if img1 is None or img2 is None: print('Could not open or find the images!') exit(0) -fs = cv.FileStorage(args.homography, cv.FILE_STORAGE_READ) +fs = cv.FileStorage(cv.samples.findFile(args.homography), cv.FILE_STORAGE_READ) homography = fs.getFirstTopLevelNode().mat() ## [load] diff --git a/samples/python/tutorial_code/features2D/feature_description/SURF_matching_Demo.py b/samples/python/tutorial_code/features2D/feature_description/SURF_matching_Demo.py index f50e48d858..d7f84814a7 100644 --- a/samples/python/tutorial_code/features2D/feature_description/SURF_matching_Demo.py +++ b/samples/python/tutorial_code/features2D/feature_description/SURF_matching_Demo.py @@ -4,12 +4,12 @@ import numpy as np import argparse parser = argparse.ArgumentParser(description='Code for Feature Detection tutorial.') -parser.add_argument('--input1', help='Path to input image 1.', default='../data/box.png') -parser.add_argument('--input2', help='Path to input image 2.', default='../data/box_in_scene.png') +parser.add_argument('--input1', help='Path to input image 1.', default='box.png') +parser.add_argument('--input2', help='Path to input image 2.', default='box_in_scene.png') args = parser.parse_args() -img1 = cv.imread(args.input1, cv.IMREAD_GRAYSCALE) -img2 = cv.imread(args.input2, cv.IMREAD_GRAYSCALE) +img1 = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE) +img2 = cv.imread(cv.samples.findFile(args.input2), cv.IMREAD_GRAYSCALE) if img1 is None or img2 is None: print('Could not open or find the images!') exit(0) diff --git a/samples/python/tutorial_code/features2D/feature_detection/SURF_detection_Demo.py b/samples/python/tutorial_code/features2D/feature_detection/SURF_detection_Demo.py index 717d9f13c0..7a30e11577 100644 --- a/samples/python/tutorial_code/features2D/feature_detection/SURF_detection_Demo.py +++ b/samples/python/tutorial_code/features2D/feature_detection/SURF_detection_Demo.py @@ -4,10 +4,10 @@ import numpy as np import argparse parser = argparse.ArgumentParser(description='Code for Feature Detection tutorial.') -parser.add_argument('--input', help='Path to input image.', default='../data/box.png') +parser.add_argument('--input', help='Path to input image.', default='box.png') args = parser.parse_args() -src = cv.imread(args.input, cv.IMREAD_GRAYSCALE) +src = cv.imread(cv.samples.findFile(args.input), cv.IMREAD_GRAYSCALE) if src is None: print('Could not open or find the image:', args.input) exit(0) diff --git a/samples/python/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.py b/samples/python/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.py index fbe35675d7..e0a684cd8e 100644 --- a/samples/python/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.py +++ b/samples/python/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.py @@ -4,12 +4,12 @@ import numpy as np import argparse parser = argparse.ArgumentParser(description='Code for Feature Matching with FLANN tutorial.') -parser.add_argument('--input1', help='Path to input image 1.', default='../data/box.png') -parser.add_argument('--input2', help='Path to input image 2.', default='../data/box_in_scene.png') +parser.add_argument('--input1', help='Path to input image 1.', default='box.png') +parser.add_argument('--input2', help='Path to input image 2.', default='box_in_scene.png') args = parser.parse_args() -img1 = cv.imread(args.input1, cv.IMREAD_GRAYSCALE) -img2 = cv.imread(args.input2, cv.IMREAD_GRAYSCALE) +img1 = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE) +img2 = cv.imread(cv.samples.findFile(args.input2), cv.IMREAD_GRAYSCALE) if img1 is None or img2 is None: print('Could not open or find the images!') exit(0) diff --git a/samples/python/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.py b/samples/python/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.py index 8ef500e7af..72cc4633e4 100644 --- a/samples/python/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.py +++ b/samples/python/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.py @@ -4,12 +4,12 @@ import numpy as np import argparse parser = argparse.ArgumentParser(description='Code for Feature Matching with FLANN tutorial.') -parser.add_argument('--input1', help='Path to input image 1.', default='../data/box.png') -parser.add_argument('--input2', help='Path to input image 2.', default='../data/box_in_scene.png') +parser.add_argument('--input1', help='Path to input image 1.', default='box.png') +parser.add_argument('--input2', help='Path to input image 2.', default='box_in_scene.png') args = parser.parse_args() -img_object = cv.imread(args.input1, cv.IMREAD_GRAYSCALE) -img_scene = cv.imread(args.input2, cv.IMREAD_GRAYSCALE) +img_object = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE) +img_scene = cv.imread(cv.samples.findFile(args.input2), cv.IMREAD_GRAYSCALE) if img_object is None or img_scene is None: print('Could not open or find the images!') exit(0) diff --git a/samples/python/tutorial_code/highgui/trackbar/AddingImagesTrackbar.py b/samples/python/tutorial_code/highgui/trackbar/AddingImagesTrackbar.py index 2ccc978a8e..ffb978049c 100644 --- a/samples/python/tutorial_code/highgui/trackbar/AddingImagesTrackbar.py +++ b/samples/python/tutorial_code/highgui/trackbar/AddingImagesTrackbar.py @@ -15,14 +15,14 @@ def on_trackbar(val): ## [on_trackbar] parser = argparse.ArgumentParser(description='Code for Adding a Trackbar to our applications tutorial.') -parser.add_argument('--input1', help='Path to the first input image.', default='../data/LinuxLogo.jpg') -parser.add_argument('--input2', help='Path to the second input image.', default='../data/WindowsLogo.jpg') +parser.add_argument('--input1', help='Path to the first input image.', default='LinuxLogo.jpg') +parser.add_argument('--input2', help='Path to the second input image.', default='WindowsLogo.jpg') args = parser.parse_args() ## [load] # Read images ( both have to be of the same size and type ) -src1 = cv.imread(args.input1) -src2 = cv.imread(args.input2) +src1 = cv.imread(cv.samples.findFile(args.input1)) +src2 = cv.imread(cv.samples.findFile(args.input2)) ## [load] if src1 is None: print('Could not open or find the image: ', args.input1) diff --git a/samples/python/tutorial_code/imgProc/Pyramids/pyramids.py b/samples/python/tutorial_code/imgProc/Pyramids/pyramids.py index 387e26cc94..429156b573 100644 --- a/samples/python/tutorial_code/imgProc/Pyramids/pyramids.py +++ b/samples/python/tutorial_code/imgProc/Pyramids/pyramids.py @@ -11,10 +11,10 @@ def main(argv): * [ESC] -> Close program """) ## [load] - filename = argv[0] if len(argv) > 0 else "../data/chicky_512.png" + filename = argv[0] if len(argv) > 0 else 'chicky_512.png' # Load the image - src = cv.imread(filename) + src = cv.imread(cv.samples.findFile(filename)) # Check if image is loaded fine if src is None: diff --git a/samples/python/tutorial_code/imgProc/Smoothing/smoothing.py b/samples/python/tutorial_code/imgProc/Smoothing/smoothing.py index e7096580ad..a67b92dc5e 100644 --- a/samples/python/tutorial_code/imgProc/Smoothing/smoothing.py +++ b/samples/python/tutorial_code/imgProc/Smoothing/smoothing.py @@ -17,10 +17,10 @@ def main(argv): cv.namedWindow(window_name, cv.WINDOW_AUTOSIZE) # Load the source image - imageName = argv[0] if len(argv) > 0 else "../data/lena.jpg" + imageName = argv[0] if len(argv) > 0 else 'lena.jpg' global src - src = cv.imread(imageName, 1) + src = cv.imread(cv.samples.findFile(imageName)) if src is None: print ('Error opening image') print ('Usage: smoothing.py [image_name -- default ../data/lena.jpg] \n') diff --git a/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/BasicLinearTransforms.py b/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/BasicLinearTransforms.py index 28baf3f8a0..6b299a6203 100644 --- a/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/BasicLinearTransforms.py +++ b/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/BasicLinearTransforms.py @@ -7,10 +7,10 @@ import argparse # Read image given by user ## [basic-linear-transform-load] parser = argparse.ArgumentParser(description='Code for Changing the contrast and brightness of an image! tutorial.') -parser.add_argument('--input', help='Path to input image.', default='../data/lena.jpg') +parser.add_argument('--input', help='Path to input image.', default='lena.jpg') args = parser.parse_args() -image = cv.imread(args.input) +image = cv.imread(cv.samples.findFile(args.input)) if image is None: print('Could not open or find the image: ', args.input) exit(0) diff --git a/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/changing_contrast_brightness_image.py b/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/changing_contrast_brightness_image.py index 704df1aecb..b3f316396a 100644 --- a/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/changing_contrast_brightness_image.py +++ b/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/changing_contrast_brightness_image.py @@ -44,10 +44,10 @@ def on_gamma_correction_trackbar(val): gammaCorrection() parser = argparse.ArgumentParser(description='Code for Changing the contrast and brightness of an image! tutorial.') -parser.add_argument('--input', help='Path to input image.', default='../data/lena.jpg') +parser.add_argument('--input', help='Path to input image.', default='lena.jpg') args = parser.parse_args() -img_original = cv.imread(args.input) +img_original = cv.imread(cv.samples.findFile(args.input)) if img_original is None: print('Could not open or find the image: ', args.input) exit(0) diff --git a/samples/python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py b/samples/python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py index cb3af732e8..502457b471 100644 --- a/samples/python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py +++ b/samples/python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py @@ -42,10 +42,10 @@ def dilatation(val): cv.imshow(title_dilatation_window, dilatation_dst) parser = argparse.ArgumentParser(description='Code for Eroding and Dilating tutorial.') -parser.add_argument('--input', help='Path to input image.', default='../data/LinuxLogo.jpg') +parser.add_argument('--input', help='Path to input image.', default='LinuxLogo.jpg') args = parser.parse_args() -src = cv.imread(args.input) +src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image: ', args.input) exit(0) diff --git a/samples/python/tutorial_code/imgProc/hough_line_transform/hough_line_transform.py b/samples/python/tutorial_code/imgProc/hough_line_transform/hough_line_transform.py index 6af4e2865b..9a30923c7b 100644 --- a/samples/python/tutorial_code/imgProc/hough_line_transform/hough_line_transform.py +++ b/samples/python/tutorial_code/imgProc/hough_line_transform/hough_line_transform.py @@ -1,7 +1,7 @@ import cv2 as cv import numpy as np -img = cv.imread('../data/sudoku.png') +img = cv.imread(cv.samples.findFile('sudoku.png')) gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) edges = cv.Canny(gray,50,150,apertureSize = 3) diff --git a/samples/python/tutorial_code/imgProc/hough_line_transform/probabilistic_hough_line_transform.py b/samples/python/tutorial_code/imgProc/hough_line_transform/probabilistic_hough_line_transform.py index 7e510db32b..d01fe8952f 100644 --- a/samples/python/tutorial_code/imgProc/hough_line_transform/probabilistic_hough_line_transform.py +++ b/samples/python/tutorial_code/imgProc/hough_line_transform/probabilistic_hough_line_transform.py @@ -1,7 +1,7 @@ import cv2 as cv import numpy as np -img = cv.imread('../data/sudoku.png') +img = cv.imread(cv.samples.findFile('sudoku.png')) gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) edges = cv.Canny(gray,50,150,apertureSize = 3) lines = cv.HoughLinesP(edges,1,np.pi/180,100,minLineLength=100,maxLineGap=10) diff --git a/samples/python/tutorial_code/imgProc/opening_closing_hats/morphology_2.py b/samples/python/tutorial_code/imgProc/opening_closing_hats/morphology_2.py index 5dfdece1b6..e0fc758467 100644 --- a/samples/python/tutorial_code/imgProc/opening_closing_hats/morphology_2.py +++ b/samples/python/tutorial_code/imgProc/opening_closing_hats/morphology_2.py @@ -31,10 +31,10 @@ def morphology_operations(val): cv.imshow(title_window, dst) parser = argparse.ArgumentParser(description='Code for More Morphology Transformations tutorial.') -parser.add_argument('--input', help='Path to input image.', default='../data/LinuxLogo.jpg') +parser.add_argument('--input', help='Path to input image.', default='LinuxLogo.jpg') args = parser.parse_args() -src = cv.imread(args.input) +src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image: ', args.input) exit(0) diff --git a/samples/python/tutorial_code/imgProc/threshold/threshold.py b/samples/python/tutorial_code/imgProc/threshold/threshold.py index 1ba38126c9..209ff75705 100644 --- a/samples/python/tutorial_code/imgProc/threshold/threshold.py +++ b/samples/python/tutorial_code/imgProc/threshold/threshold.py @@ -23,12 +23,12 @@ def Threshold_Demo(val): ## [Threshold_Demo] parser = argparse.ArgumentParser(description='Code for Basic Thresholding Operations tutorial.') -parser.add_argument('--input', help='Path to input image.', default='../data/stuff.jpg') +parser.add_argument('--input', help='Path to input image.', default='stuff.jpg') args = parser.parse_args() ## [load] # Load an image -src = cv.imread(args.input) +src = cv.imread(cv.samples.findFile(args.input)) if src is None: print('Could not open or find the image: ', args.input) exit(0) diff --git a/samples/python/tutorial_code/ml/introduction_to_pca/introduction_to_pca.py b/samples/python/tutorial_code/ml/introduction_to_pca/introduction_to_pca.py index dcadbe6b62..c83f7980f5 100644 --- a/samples/python/tutorial_code/ml/introduction_to_pca/introduction_to_pca.py +++ b/samples/python/tutorial_code/ml/introduction_to_pca/introduction_to_pca.py @@ -61,10 +61,10 @@ def getOrientation(pts, img): # Load image parser = argparse.ArgumentParser(description='Code for Introduction to Principal Component Analysis (PCA) tutorial.\ This program demonstrates how to use OpenCV PCA to extract the orientation of an object.') -parser.add_argument('--input', help='Path to input image.', default='../data/pca_test1.jpg') +parser.add_argument('--input', help='Path to input image.', default='pca_test1.jpg') args = parser.parse_args() -src = cv.imread(args.input) +src = cv.imread(cv.samples.findFile(args.input)) # Check if image is loaded successfully if src is None: print('Could not open or find the image: ', args.input) diff --git a/samples/python/tutorial_code/objectDetection/cascade_classifier/objectDetection.py b/samples/python/tutorial_code/objectDetection/cascade_classifier/objectDetection.py index 343c9d66fa..5ac5575a9e 100644 --- a/samples/python/tutorial_code/objectDetection/cascade_classifier/objectDetection.py +++ b/samples/python/tutorial_code/objectDetection/cascade_classifier/objectDetection.py @@ -23,8 +23,8 @@ def detectAndDisplay(frame): cv.imshow('Capture - Face detection', frame) parser = argparse.ArgumentParser(description='Code for Cascade Classifier tutorial.') -parser.add_argument('--face_cascade', help='Path to face cascade.', default='../../data/haarcascades/haarcascade_frontalface_alt.xml') -parser.add_argument('--eyes_cascade', help='Path to eyes cascade.', default='../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml') +parser.add_argument('--face_cascade', help='Path to face cascade.', default='data/haarcascades/haarcascade_frontalface_alt.xml') +parser.add_argument('--eyes_cascade', help='Path to eyes cascade.', default='data/haarcascades/haarcascade_eye_tree_eyeglasses.xml') parser.add_argument('--camera', help='Camera devide number.', type=int, default=0) args = parser.parse_args() @@ -35,10 +35,10 @@ face_cascade = cv.CascadeClassifier() eyes_cascade = cv.CascadeClassifier() #-- 1. Load the cascades -if not face_cascade.load(face_cascade_name): +if not face_cascade.load(cv.samples.findFile(face_cascade_name)): print('--(!)Error loading face cascade') exit(0) -if not eyes_cascade.load(eyes_cascade_name): +if not eyes_cascade.load(cv.samples.findFile(eyes_cascade_name)): print('--(!)Error loading eyes cascade') exit(0) diff --git a/samples/python/tutorial_code/video/background_subtraction/bg_sub.py b/samples/python/tutorial_code/video/background_subtraction/bg_sub.py index dd14c11539..15330fc8b0 100644 --- a/samples/python/tutorial_code/video/background_subtraction/bg_sub.py +++ b/samples/python/tutorial_code/video/background_subtraction/bg_sub.py @@ -4,7 +4,7 @@ import argparse parser = argparse.ArgumentParser(description='This program shows how to use background subtraction methods provided by \ OpenCV. You can process both videos and images.') -parser.add_argument('--input', type=str, help='Path to a video or a sequence of image.', default='../data/vtest.avi') +parser.add_argument('--input', type=str, help='Path to a video or a sequence of image.', default='vtest.avi') parser.add_argument('--algo', type=str, help='Background subtraction method (KNN, MOG2).', default='MOG2') args = parser.parse_args() @@ -17,7 +17,7 @@ else: ## [create] ## [capture] -capture = cv.VideoCapture(args.input) +capture = cv.VideoCapture(cv.samples.findFileOrKeep(args.input)) if not capture.isOpened: print('Unable to open: ' + args.input) exit(0) diff --git a/samples/python/video.py b/samples/python/video.py index e4eb2d39e1..50ba4743f9 100755 --- a/samples/python/video.py +++ b/samples/python/video.py @@ -20,8 +20,8 @@ Usage: - synth: for procedural video Synth examples: - synth:bg=../data/lena.jpg:noise=0.1 - synth:class=chess:bg=../data/lena.jpg:noise=0.1:size=640x480 + synth:bg=lena.jpg:noise=0.1 + synth:class=chess:bg=lena.jpg:noise=0.1:size=640x480 Keys: ESC - exit @@ -32,6 +32,8 @@ Keys: # Python 2/3 compatibility from __future__ import print_function +import re + import numpy as np from numpy import pi, sin, cos @@ -49,7 +51,7 @@ class VideoSynthBase(object): self.bg = None self.frame_size = (640, 480) if bg is not None: - self.bg = cv.imread(bg, 1) + self.bg = cv.imread(cv.samples.findFile(bg)) h, w = self.bg.shape[:2] self.frame_size = (w, h) @@ -85,8 +87,8 @@ class VideoSynthBase(object): class Book(VideoSynthBase): def __init__(self, **kw): super(Book, self).__init__(**kw) - backGr = cv.imread('../data/graf1.png') - fgr = cv.imread('../data/box.png') + backGr = cv.imread(cv.samples.findFile('graf1.png')) + fgr = cv.imread(cv.samples.findFile('box.png')) self.render = TestSceneRender(backGr, fgr, speed = 1) def read(self, dst=None): @@ -98,7 +100,7 @@ class Book(VideoSynthBase): class Cube(VideoSynthBase): def __init__(self, **kw): super(Cube, self).__init__(**kw) - self.render = TestSceneRender(cv.imread('../data/pca_test1.jpg'), deformation = True, speed = 1) + self.render = TestSceneRender(cv.imread(cv.samples.findFile('pca_test1.jpg')), deformation = True, speed = 1) def read(self, dst=None): noise = np.zeros(self.render.sceneBg.shape, np.int8) @@ -158,10 +160,10 @@ classes = dict(chess=Chess, book=Book, cube=Cube) presets = dict( empty = 'synth:', - lena = 'synth:bg=../data/lena.jpg:noise=0.1', - chess = 'synth:class=chess:bg=../data/lena.jpg:noise=0.1:size=640x480', - book = 'synth:class=book:bg=../data/graf1.png:noise=0.1:size=640x480', - cube = 'synth:class=cube:bg=../data/pca_test1.jpg:noise=0.0:size=640x480' + lena = 'synth:bg=lena.jpg:noise=0.1', + chess = 'synth:class=chess:bg=lena.jpg:noise=0.1:size=640x480', + book = 'synth:class=book:bg=graf1.png:noise=0.1:size=640x480', + cube = 'synth:class=cube:bg=pca_test1.jpg:noise=0.0:size=640x480' ) @@ -169,11 +171,11 @@ def create_capture(source = 0, fallback = presets['chess']): '''source: or '||synth [:= [:...]]' ''' source = str(source).strip() + + # Win32: handle drive letter ('c:', ...) + source = re.sub(r'(^|=)([a-zA-Z]):([/\\a-zA-Z0-9])', r'\1?disk\2?\3', source) chunks = source.split(':') - # handle drive letter ('c:', ...) - if len(chunks) > 1 and len(chunks[0]) == 1 and chunks[0].isalpha(): - chunks[1] = chunks[0] + ':' + chunks[1] - del chunks[0] + chunks = [re.sub(r'\?disk([a-zA-Z])\?', r'\1:', s) for s in chunks] source = chunks[0] try: source = int(source) diff --git a/samples/python/watershed.py b/samples/python/watershed.py index 9ca254ed8a..b2bfbddcf9 100755 --- a/samples/python/watershed.py +++ b/samples/python/watershed.py @@ -80,6 +80,6 @@ if __name__ == '__main__': try: fn = sys.argv[1] except: - fn = '../data/fruits.jpg' + fn = 'fruits.jpg' print(__doc__) - App(fn).run() + App(cv.samples.findFile(fn)).run() diff --git a/samples/tapi/clahe.cpp b/samples/tapi/clahe.cpp index 0b7de45a60..ebef65f4e4 100644 --- a/samples/tapi/clahe.cpp +++ b/samples/tapi/clahe.cpp @@ -63,8 +63,9 @@ int main(int argc, char** argv) setTrackbarPos("Tile Size", "CLAHE", cur_tilesize.width); setTrackbarPos("Clip Limit", "CLAHE", cur_clip); - if(infile != "") + if(!infile.empty()) { + infile = samples::findFile(infile); imread(infile).copyTo(frame); if(frame.empty()) { @@ -87,7 +88,10 @@ int main(int argc, char** argv) else imread(infile).copyTo(frame); if(frame.empty()) - continue; + { + waitKey(); + break; + } cvtColor(frame, frame, COLOR_BGR2GRAY); pFilter->apply(frame, outframe); diff --git a/samples/tapi/hog.cpp b/samples/tapi/hog.cpp index 657036cfde..9ed93cb238 100644 --- a/samples/tapi/hog.cpp +++ b/samples/tapi/hog.cpp @@ -61,7 +61,7 @@ int main(int argc, char** argv) "{ h help | | print help message }" "{ i input | | specify input image}" "{ c camera | -1 | enable camera capturing }" - "{ v video | ../data/vtest.avi | use video as input }" + "{ v video | vtest.avi | use video as input }" "{ g gray | | convert image to gray one or not}" "{ s scale | 1.0 | resize the image before detect}" "{ o output | output.avi | specify output path when input is images}"; @@ -107,7 +107,7 @@ App::App(CommandLineParser& cmd) make_gray = cmd.has("gray"); resize_scale = cmd.get("s"); - vdo_source = cmd.get("v"); + vdo_source = samples::findFileOrKeep(cmd.get("v")); img_source = cmd.get("i"); output = cmd.get("o"); camera_id = cmd.get("c"); diff --git a/samples/tapi/opencl_custom_kernel.cpp b/samples/tapi/opencl_custom_kernel.cpp index 2395061a9a..c4e893a8a8 100644 --- a/samples/tapi/opencl_custom_kernel.cpp +++ b/samples/tapi/opencl_custom_kernel.cpp @@ -72,7 +72,7 @@ int main(int argc, char** argv) string image_file = args.get("i"); if (!image_file.empty()) { - Mat image = imread(image_file); + Mat image = imread(samples::findFile(image_file)); if (image.empty()) { cout << "error read image: " << image_file << endl; diff --git a/samples/tapi/pyrlk_optical_flow.cpp b/samples/tapi/pyrlk_optical_flow.cpp index bb426cbf76..9c1364d180 100644 --- a/samples/tapi/pyrlk_optical_flow.cpp +++ b/samples/tapi/pyrlk_optical_flow.cpp @@ -96,8 +96,8 @@ int main(int argc, const char* argv[]) } bool defaultPicturesFail = true; - string fname0 = cmd.get("left"); - string fname1 = cmd.get("right"); + string fname0 = samples::findFile(cmd.get("left")); + string fname1 = samples::findFile(cmd.get("right")); string vdofile = cmd.get("video"); string outfile = cmd.get("output"); int points = cmd.get("points"); @@ -105,9 +105,9 @@ int main(int argc, const char* argv[]) int inputName = cmd.get("c"); UMat frame0; - imread(fname0, cv::IMREAD_GRAYSCALE).copyTo(frame0); + imread(fname0, IMREAD_GRAYSCALE).copyTo(frame0); UMat frame1; - imread(fname1, cv::IMREAD_GRAYSCALE).copyTo(frame1); + imread(fname1, IMREAD_GRAYSCALE).copyTo(frame1); vector pts(points); vector nextPts(points); diff --git a/samples/tapi/squares.cpp b/samples/tapi/squares.cpp index 42a40c3f6f..4cdce2251b 100644 --- a/samples/tapi/squares.cpp +++ b/samples/tapi/squares.cpp @@ -156,7 +156,7 @@ int main(int argc, char** argv) cout << "OpenCL was disabled" << endl; } - string inputName = cmd.get("i"); + string inputName = samples::findFile(cmd.get("i")); string outfile = cmd.get("o"); int iterations = 10; @@ -164,7 +164,7 @@ int main(int argc, char** argv) vector > squares; UMat image; - imread(inputName, 1).copyTo(image); + imread(inputName, IMREAD_COLOR).copyTo(image); if( image.empty() ) { cout << "Couldn't load " << inputName << endl; diff --git a/samples/tapi/ufacedetect.cpp b/samples/tapi/ufacedetect.cpp index 3eeddb94b1..0a6d91c3d6 100644 --- a/samples/tapi/ufacedetect.cpp +++ b/samples/tapi/ufacedetect.cpp @@ -28,9 +28,6 @@ void detectAndDraw( UMat& img, Mat& canvas, CascadeClassifier& cascade, CascadeClassifier& nestedCascade, double scale, bool tryflip ); -string cascadeName = "../../data/haarcascades/haarcascade_frontalface_alt.xml"; -string nestedCascadeName = "../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml"; - int main( int argc, const char** argv ) { VideoCapture capture; @@ -44,8 +41,8 @@ int main( int argc, const char** argv ) double scale; cv::CommandLineParser parser(argc, argv, - "{cascade|../../data/haarcascades/haarcascade_frontalface_alt.xml|}" - "{nested-cascade|../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml|}" + "{cascade|data/haarcascades/haarcascade_frontalface_alt.xml|}" + "{nested-cascade|data/haarcascades/haarcascade_eye_tree_eyeglasses.xml|}" "{help h ||}{scale|1|}{try-flip||}{@filename||}" ); if (parser.has("help")) @@ -53,8 +50,8 @@ int main( int argc, const char** argv ) help(); return 0; } - cascadeName = parser.get("cascade"); - nestedCascadeName = parser.get("nested-cascade"); + string cascadeName = samples::findFile(parser.get("cascade")); + string nestedCascadeName = samples::findFileOrKeep(parser.get("nested-cascade")); scale = parser.get("scale"); tryflip = parser.has("try-flip"); inputName = parser.get("@filename"); @@ -66,10 +63,10 @@ int main( int argc, const char** argv ) } if ( !nestedCascade.load( nestedCascadeName ) ) - cerr << "WARNING: Could not load classifier cascade for nested objects" << endl; + cerr << "WARNING: Could not load classifier cascade for nested objects: " << nestedCascadeName << endl; if( !cascade.load( cascadeName ) ) { - cerr << "ERROR: Could not load classifier cascade" << endl; + cerr << "ERROR: Could not load classifier cascade: " << cascadeName << endl; help(); return -1; } @@ -84,9 +81,8 @@ int main( int argc, const char** argv ) } else { - if( inputName.empty() ) - inputName = "../data/lena.jpg"; - image = imread( inputName, 1 ).getUMat(ACCESS_READ); + inputName = samples::findFileOrKeep(inputName); + imread(inputName, IMREAD_COLOR).copyTo(image); if( image.empty() ) { if(!capture.open( inputName )) @@ -133,7 +129,7 @@ int main( int argc, const char** argv ) len--; buf[len] = '\0'; cout << "file " << buf << endl; - image = imread( buf, 1 ).getUMat(ACCESS_READ); + imread(samples::findFile(buf), IMREAD_COLOR).copyTo(image); if( !image.empty() ) { detectAndDraw( image, canvas, cascade, nestedCascade, scale, tryflip );