Merge pull request #25197 from invarrow:invbranch-cleanup

Remove OpenVX  #25197

resolves https://github.com/opencv/opencv/issues/24995
OpenCV cleanup https://github.com/opencv/opencv/issues/25007
pull/25225/head
Abdul Rahman ArM 8 months ago committed by GitHub
parent 5319772a56
commit 55426ee195
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 7
      3rdparty/openvx/CMakeLists.txt
  2. 97
      3rdparty/openvx/README.md
  3. 18
      3rdparty/openvx/hal/CMakeLists.txt
  4. 4
      3rdparty/openvx/hal/README.md
  5. 1147
      3rdparty/openvx/hal/openvx_hal.cpp
  6. 145
      3rdparty/openvx/hal/openvx_hal.hpp
  7. 3277
      3rdparty/openvx/include/ivx.hpp
  8. 42
      3rdparty/openvx/include/ivx_lib_debug.hpp
  9. 21
      CMakeLists.txt
  10. 46
      cmake/FindOpenVX.cmake
  11. 2
      cmake/OpenCVFindTIMVX.cmake
  12. 5
      cmake/checks/openvx_refenum_test.cpp
  13. 3
      cmake/templates/cvconfig.h.in
  14. 1
      doc/tutorials/introduction/config_reference/config_reference.markdown
  15. 1
      modules/core/include/opencv2/core.hpp
  16. 48
      modules/core/include/opencv2/core/openvx/ovx_defs.hpp
  17. 28
      modules/core/include/opencv2/core/ovx.hpp
  18. 1
      modules/core/include/opencv2/core/utils/trace.hpp
  19. 34
      modules/core/include/opencv2/core/utils/trace.private.hpp
  20. 36
      modules/core/src/lut.cpp
  21. 67
      modules/core/src/mean.dispatch.cpp
  22. 1
      modules/core/src/minmax.dispatch.cpp
  23. 105
      modules/core/src/ovx.cpp
  24. 6
      modules/core/src/precomp.hpp
  25. 32
      modules/core/src/trace.cpp
  26. 69
      modules/features2d/src/fast.cpp
  27. 84
      modules/imgproc/src/accum.cpp
  28. 78
      modules/imgproc/src/box_filter.dispatch.cpp
  29. 76
      modules/imgproc/src/canny.cpp
  30. 80
      modules/imgproc/src/deriv.cpp
  31. 96
      modules/imgproc/src/featureselect.cpp
  32. 108
      modules/imgproc/src/histogram.cpp
  33. 100
      modules/imgproc/src/imgwarp.cpp
  34. 96
      modules/imgproc/src/median_blur.dispatch.cpp
  35. 83
      modules/imgproc/src/pyramids.cpp
  36. 1
      modules/imgproc/src/resize.cpp
  37. 87
      modules/imgproc/src/smooth.dispatch.cpp
  38. 96
      modules/imgproc/src/thresh.cpp
  39. 103
      modules/imgproc/test/test_canny.cpp
  40. 154
      modules/video/src/lkpyramid.cpp
  41. 1
      platforms/js/build_js.py
  42. 4
      samples/CMakeLists.txt
  43. 25
      samples/openvx/CMakeLists.txt
  44. 385
      samples/openvx/no_wrappers.cpp
  45. 214
      samples/openvx/wrappers.cpp
  46. 250
      samples/openvx/wrappers_video.cpp

@ -1,7 +0,0 @@
if(NOT HAVE_OPENVX)
message(STATUS "OpenVX is not available, disabling openvx-related HAL and stuff")
return()
endif()
set(OPENCV_3P_OPENVX_DIR ${CMAKE_CURRENT_SOURCE_DIR})
add_subdirectory(hal)

@ -1,97 +0,0 @@
# C++ wrappers for OpenVX-1.x C API
## Core ideas:
* lightweight - minimal overhead vs standard C API
* automatic references counting
* exceptions instead of return codes
* object-oriented design
* (NYI) helpers for user-defined kernels & nodes
* C++ 11 friendly
## Quick start sample
The following short sample gives basic knowledges on the wrappers usage:
```cpp
#include "ivx.hpp"
#include "ivx_lib_debug.hpp" // ivx::debug::*
int main()
{
vx_uint32 width = 640, height = 480;
try
{
ivx::Context context = ivx::Context::create();
ivx::Graph graph = ivx::Graph::create(context);
ivx::Image
gray = ivx::Image::create(context, width, height, VX_DF_IMAGE_U8),
gb = ivx::Image::createVirtual(graph),
res = ivx::Image::create(context, width, height, VX_DF_IMAGE_U8);
context.loadKernels("openvx-debug"); // ivx::debug::*
ivx::debug::fReadImage(context, inputPath, gray);
ivx::Node::create(graph, VX_KERNEL_GAUSSIAN_3x3, gray, gb);
ivx::Node::create(
graph,
VX_KERNEL_THRESHOLD,
gb,
ivx::Threshold::createBinary(context, VX_TYPE_UINT8, 50),
res
);
graph.verify();
graph.process();
ivx::debug::fWriteImage(context, res, "ovx-res-cpp.pgm");
}
catch (const ivx::RuntimeError& e)
{
printf("ErrorRuntime: code = %d(%x), message = %s\n", e.status(), e.status(), e.what());
return e.status();
}
catch (const ivx::WrapperError& e)
{
printf("ErrorWrapper: message = %s\n", e.what());
return -1;
}
catch(const std::exception& e)
{
printf("runtime_error: message = %s\n", e.what());
return -1;
}
return 0;
}
```
## C++ API overview
The wrappers have **header-only** implementation that simplifies their integration to projects.
All the API is inside `ivx` namespace (E.g. `class ivx::Graph`).
While the C++ API is pretty much the same for underlying OpenVX version **1.0** and **1.1**, there are alternative code branches for some features implementation that are selected at **compile time** via `#ifdef` preprocessor directives.
E.g. external ref-counting is implemented for 1.0 version and native OpenVX one is used (via `vxRetainReference()` and `vxReleaseXYZ()`) for version 1.1.
Also there are some **C++ 11** features are used (e.g. rvalue ref-s) when their availability is detected at ***compile time***.
C++ exceptions are used for errors indication instead of return codes. There are two types of exceptions are defined: `RuntimeError` is thrown when OpenVX C call returned unsuccessful result and `WrapperError` is thrown when a problem is occurred in the wrappers code. Both exception calsses are derived from `std::exception` (actually from its inheritants).
The so called **OpenVX objects** (e.g. `vx_image`) are represented as C++ classes in wrappers.
All these classes use automatic ref-counting that allows development of exception-safe code.
All these classes have `create()` or `createXYZ()` `static` methods for instances creation. (E.g. `Image::create()`, `Image::createVirtual()` and `Image::createFromHandle()`)
Most of the wrapped OpenVX functions are represented as methods of the corresponding C++ classes, but in most cases they still accept C "object" types (e.g. `vx_image` or `vx_context`) that allows mixing of C and C++ OpenVX API use.
E.g.:
```cpp
class Image
{
static Image create(vx_context context, vx_uint32 width, vx_uint32 height, vx_df_image format);
static Image createVirtual(vx_graph graph, vx_uint32 width = 0, vx_uint32 height = 0, vx_df_image format = VX_DF_IMAGE_VIRT);
// ...
}
```
All the classes instances can automatically be converted to the corresponding C "object" types.
For more details please refer to C++ wrappers reference manual or directly to their source code.

@ -1,18 +0,0 @@
add_library(openvx_hal STATIC openvx_hal.cpp openvx_hal.hpp ${OPENCV_3P_OPENVX_DIR}/include/ivx.hpp ${OPENCV_3P_OPENVX_DIR}/include/ivx_lib_debug.hpp)
target_include_directories(openvx_hal PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}
${OPENCV_3P_OPENVX_DIR}/include
${CMAKE_SOURCE_DIR}/modules/core/include
${CMAKE_SOURCE_DIR}/modules/imgproc/include
${OPENVX_INCLUDE_DIR})
target_link_libraries(openvx_hal PUBLIC ${OPENVX_LIBRARIES})
set_target_properties(openvx_hal PROPERTIES ARCHIVE_OUTPUT_DIRECTORY ${3P_LIBRARY_OUTPUT_PATH})
if(NOT BUILD_SHARED_LIBS)
ocv_install_target(openvx_hal EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
endif()
set(OPENVX_HAL_FOUND TRUE CACHE INTERNAL "")
set(OPENVX_HAL_VERSION 0.0.1 CACHE INTERNAL "")
set(OPENVX_HAL_LIBRARIES "openvx_hal" CACHE INTERNAL "")
set(OPENVX_HAL_HEADERS "${CMAKE_CURRENT_SOURCE_DIR}/openvx_hal.hpp" CACHE INTERNAL "")
set(OPENVX_HAL_INCLUDE_DIRS "${CMAKE_CURRENT_SOURCE_DIR}" "${OPENCV_3P_OPENVX_DIR}/include" "${OPENVX_INCLUDE_DIR}" CACHE INTERNAL "")

@ -1,4 +0,0 @@
#OpenVX-based HAL implementation.
It's built when OpenVX is available (`HAVE_OPENVX`).
To build OpenCV with OpenVX support add the following **cmake** options:
`-DOPENVX_ROOT=/path/to/prebuilt/openvx -DWITH_OPENVX=YES`

File diff suppressed because it is too large Load Diff

@ -1,145 +0,0 @@
#ifndef OPENCV_OPENVX_HAL_HPP_INCLUDED
#define OPENCV_OPENVX_HAL_HPP_INCLUDED
#include "opencv2/core/hal/interface.h"
#include "VX/vx.h"
template <typename T>
int ovx_hal_add(const T *a, size_t astep, const T *b, size_t bstep, T *c, size_t cstep, int w, int h);
template <typename T>
int ovx_hal_sub(const T *a, size_t astep, const T *b, size_t bstep, T *c, size_t cstep, int w, int h);
template <typename T>
int ovx_hal_absdiff(const T *a, size_t astep, const T *b, size_t bstep, T *c, size_t cstep, int w, int h);
template <typename T>
int ovx_hal_and(const T *a, size_t astep, const T *b, size_t bstep, T *c, size_t cstep, int w, int h);
template <typename T>
int ovx_hal_or(const T *a, size_t astep, const T *b, size_t bstep, T *c, size_t cstep, int w, int h);
template <typename T>
int ovx_hal_xor(const T *a, size_t astep, const T *b, size_t bstep, T *c, size_t cstep, int w, int h);
int ovx_hal_not(const uchar *a, size_t astep, uchar *c, size_t cstep, int w, int h);
template <typename T>
int ovx_hal_mul(const T *a, size_t astep, const T *b, size_t bstep, T *c, size_t cstep, int w, int h, double scale);
int ovx_hal_merge8u(const uchar **src_data, uchar *dst_data, int len, int cn);
int ovx_hal_resize(int atype, const uchar *a, size_t astep, int aw, int ah, uchar *b, size_t bstep, int bw, int bh, double inv_scale_x, double inv_scale_y, int interpolation);
int ovx_hal_warpAffine(int atype, const uchar *a, size_t astep, int aw, int ah, uchar *b, size_t bstep, int bw, int bh, const double M[6], int interpolation, int borderType, const double borderValue[4]);
int ovx_hal_warpPerspective(int atype, const uchar *a, size_t astep, int aw, int ah, uchar *b, size_t bstep, int bw, int bh, const double M[9], int interpolation, int borderType, const double borderValue[4]);
struct cvhalFilter2D;
int ovx_hal_filterInit(cvhalFilter2D **filter_context, uchar *kernel_data, size_t kernel_step, int kernel_type, int kernel_width, int kernel_height,
int, int, int src_type, int dst_type, int borderType, double delta, int anchor_x, int anchor_y, bool allowSubmatrix, bool allowInplace);
int ovx_hal_filterFree(cvhalFilter2D *filter_context);
int ovx_hal_filter(cvhalFilter2D *filter_context, uchar *a, size_t astep, uchar *b, size_t bstep, int w, int h, int, int, int, int);
int ovx_hal_sepFilterInit(cvhalFilter2D **filter_context, int src_type, int dst_type,
int kernel_type, uchar *kernelx_data, int kernelx_length, uchar *kernely_data, int kernely_length,
int anchor_x, int anchor_y, double delta, int borderType);
#if VX_VERSION > VX_VERSION_1_0
int ovx_hal_morphInit(cvhalFilter2D **filter_context, int operation, int src_type, int dst_type, int , int ,
int kernel_type, uchar *kernel_data, size_t kernel_step, int kernel_width, int kernel_height, int anchor_x, int anchor_y,
int borderType, const double borderValue[4], int iterations, bool allowSubmatrix, bool allowInplace);
int ovx_hal_morphFree(cvhalFilter2D *filter_context);
int ovx_hal_morph(cvhalFilter2D *filter_context, uchar *a, size_t astep, uchar *b, size_t bstep, int w, int h, int , int , int , int , int , int , int , int );
#endif // 1.0 guard
int ovx_hal_cvtBGRtoBGR(const uchar * a, size_t astep, uchar * b, size_t bstep, int w, int h, int depth, int acn, int bcn, bool swapBlue);
int ovx_hal_cvtGraytoBGR(const uchar * a, size_t astep, uchar * b, size_t bstep, int w, int h, int depth, int bcn);
int ovx_hal_cvtTwoPlaneYUVtoBGR(const uchar * a, size_t astep, uchar * b, size_t bstep, int w, int h, int bcn, bool swapBlue, int uIdx);
int ovx_hal_cvtTwoPlaneYUVtoBGREx(const uchar * a, size_t astep, const uchar * b, size_t bstep, uchar * c, size_t cstep, int w, int h, int bcn, bool swapBlue, int uIdx);
int ovx_hal_cvtThreePlaneYUVtoBGR(const uchar * a, size_t astep, uchar * b, size_t bstep, int w, int h, int bcn, bool swapBlue, int uIdx);
int ovx_hal_cvtBGRtoThreePlaneYUV(const uchar * a, size_t astep, uchar * b, size_t bstep, int w, int h, int acn, bool swapBlue, int uIdx);
int ovx_hal_cvtOnePlaneYUVtoBGR(const uchar * a, size_t astep, uchar * b, size_t bstep, int w, int h, int bcn, bool swapBlue, int uIdx, int ycn);
int ovx_hal_integral(int depth, int sdepth, int, const uchar * a, size_t astep, uchar * b, size_t bstep, uchar * c, size_t, uchar * d, size_t, int w, int h, int cn);
//==================================================================================================
// functions redefinition
// ...
#undef cv_hal_add8u
#define cv_hal_add8u ovx_hal_add<uchar>
#undef cv_hal_add16s
#define cv_hal_add16s ovx_hal_add<short>
#undef cv_hal_sub8u
#define cv_hal_sub8u ovx_hal_sub<uchar>
#undef cv_hal_sub16s
#define cv_hal_sub16s ovx_hal_sub<short>
#undef cv_hal_absdiff8u
#define cv_hal_absdiff8u ovx_hal_absdiff<uchar>
#undef cv_hal_absdiff16s
#define cv_hal_absdiff16s ovx_hal_absdiff<short>
#undef cv_hal_and8u
#define cv_hal_and8u ovx_hal_and<uchar>
#undef cv_hal_or8u
#define cv_hal_or8u ovx_hal_or<uchar>
#undef cv_hal_xor8u
#define cv_hal_xor8u ovx_hal_xor<uchar>
#undef cv_hal_not8u
#define cv_hal_not8u ovx_hal_not
#undef cv_hal_mul8u
#define cv_hal_mul8u ovx_hal_mul<uchar>
#undef cv_hal_mul16s
#define cv_hal_mul16s ovx_hal_mul<short>
#undef cv_hal_merge8u
#define cv_hal_merge8u ovx_hal_merge8u
//#undef cv_hal_resize
//#define cv_hal_resize ovx_hal_resize
//OpenVX warps use round to zero policy at least in sample implementation
//while OpenCV require round to nearest
//#undef cv_hal_warpAffine
//#define cv_hal_warpAffine ovx_hal_warpAffine
//#undef cv_hal_warpPerspective
//#define cv_hal_warpPerspective ovx_hal_warpPerspective
#undef cv_hal_filterInit
#define cv_hal_filterInit ovx_hal_filterInit
#undef cv_hal_filter
#define cv_hal_filter ovx_hal_filter
#undef cv_hal_filterFree
#define cv_hal_filterFree ovx_hal_filterFree
//#undef cv_hal_sepFilterInit
//#define cv_hal_sepFilterInit ovx_hal_sepFilterInit
//#undef cv_hal_sepFilter
//#define cv_hal_sepFilter ovx_hal_filter
//#undef cv_hal_sepFilterFree
//#define cv_hal_sepFilterFree ovx_hal_filterFree
#if VX_VERSION > VX_VERSION_1_0
#undef cv_hal_morphInit
#define cv_hal_morphInit ovx_hal_morphInit
#undef cv_hal_morph
#define cv_hal_morph ovx_hal_morph
#undef cv_hal_morphFree
#define cv_hal_morphFree ovx_hal_morphFree
#endif // 1.0 guard
#undef cv_hal_cvtBGRtoBGR
#define cv_hal_cvtBGRtoBGR ovx_hal_cvtBGRtoBGR
#undef cv_hal_cvtGraytoBGR
#define cv_hal_cvtGraytoBGR ovx_hal_cvtGraytoBGR
#undef cv_hal_cvtTwoPlaneYUVtoBGR
#define cv_hal_cvtTwoPlaneYUVtoBGR ovx_hal_cvtTwoPlaneYUVtoBGR
#undef cv_hal_cvtTwoPlaneYUVtoBGREx
#define cv_hal_cvtTwoPlaneYUVtoBGREx ovx_hal_cvtTwoPlaneYUVtoBGREx
#undef cv_hal_cvtThreePlaneYUVtoBGR
#define cv_hal_cvtThreePlaneYUVtoBGR ovx_hal_cvtThreePlaneYUVtoBGR
#undef cv_hal_cvtBGRtoThreePlaneYUV
#define cv_hal_cvtBGRtoThreePlaneYUV ovx_hal_cvtBGRtoThreePlaneYUV
#undef cv_hal_cvtOnePlaneYUVtoBGR
#define cv_hal_cvtOnePlaneYUVtoBGR ovx_hal_cvtOnePlaneYUVtoBGR
#undef cv_hal_integral
#define cv_hal_integral ovx_hal_integral
#endif

File diff suppressed because it is too large Load Diff

@ -1,42 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
/*
C++ wrappers over OpenVX 1.x C API ("openvx-debug" module)
Details: TBD
*/
#pragma once
#ifndef IVX_LIB_DEBUG_HPP
#define IVX_LIB_DEBUG_HPP
#include "ivx.hpp"
namespace ivx
{
namespace debug
{
/*
* "openvx-debug" module
*/
//
void fReadImage(vx_context c, const std::string& path, vx_image img)
{
IVX_CHECK_STATUS( vxuFReadImage(c, (vx_char*)path.c_str(), img) );
}
//
void fWriteImage(vx_context c, vx_image img, const std::string& path)
{
IVX_CHECK_STATUS( vxuFWriteImage(c, img, (vx_char*)path.c_str()) );
}
} // namespace debug
} // namespace ivx
#endif //IVX_LIB_DEBUG_HPP

@ -332,9 +332,6 @@ OCV_OPTION(WITH_OPENEXR "Include ILM support via OpenEXR" ((WIN32 OR ANDROID OR
OCV_OPTION(WITH_OPENGL "Include OpenGL support" OFF
VISIBLE_IF NOT ANDROID AND NOT WINRT
VERIFY HAVE_OPENGL)
OCV_OPTION(WITH_OPENVX "Include OpenVX support" OFF
VISIBLE_IF TRUE
VERIFY HAVE_OPENVX)
OCV_OPTION(WITH_OPENNI "Include OpenNI support" OFF
VISIBLE_IF NOT ANDROID AND NOT IOS AND NOT XROS AND NOT WINRT
VERIFY HAVE_OPENNI)
@ -913,10 +910,6 @@ if(WITH_VTK)
include(cmake/OpenCVDetectVTK.cmake)
endif()
if(WITH_OPENVX)
include(cmake/FindOpenVX.cmake)
endif()
if(WITH_QUIRC)
add_subdirectory(3rdparty/quirc)
set(HAVE_QUIRC TRUE)
@ -950,12 +943,6 @@ if(NOT DEFINED OpenCV_HAL)
set(OpenCV_HAL "OpenCV_HAL")
endif()
if(HAVE_OPENVX)
if(NOT ";${OpenCV_HAL};" MATCHES ";openvx;")
set(OpenCV_HAL "openvx;${OpenCV_HAL}")
endif()
endif()
if(WITH_CAROTENE)
ocv_debug_message(STATUS "Enable carotene acceleration")
if(NOT ";${OpenCV_HAL};" MATCHES ";carotene;")
@ -980,10 +967,6 @@ foreach(hal ${OpenCV_HAL})
else()
message(STATUS "Carotene: NEON is not available, disabling carotene...")
endif()
elseif(hal STREQUAL "openvx")
add_subdirectory(3rdparty/openvx)
ocv_hal_register(OPENVX_HAL_LIBRARIES OPENVX_HAL_HEADERS OPENVX_HAL_INCLUDE_DIRS)
list(APPEND OpenCV_USED_HAL "openvx (ver ${OPENVX_HAL_VERSION})")
else()
ocv_debug_message(STATUS "OpenCV HAL: ${hal} ...")
ocv_clear_vars(OpenCV_HAL_LIBRARIES OpenCV_HAL_HEADERS OpenCV_HAL_INCLUDE_DIRS)
@ -1751,10 +1734,6 @@ if(WITH_EIGEN OR HAVE_EIGEN)
status(" Eigen:" HAVE_EIGEN THEN "YES (ver ${EIGEN_WORLD_VERSION}.${EIGEN_MAJOR_VERSION}.${EIGEN_MINOR_VERSION})" ELSE NO)
endif()
if(WITH_OPENVX OR HAVE_OPENVX)
status(" OpenVX:" HAVE_OPENVX THEN "YES (${OPENVX_LIBRARIES})" ELSE "NO")
endif()
status(" Custom HAL:" OpenCV_USED_HAL THEN "YES (${OpenCV_USED_HAL})" ELSE "NO")
foreach(s ${CUSTOM_STATUS})

@ -1,46 +0,0 @@
ocv_clear_vars(HAVE_OPENVX)
set(OPENVX_ROOT "" CACHE PATH "OpenVX install directory")
set(OPENVX_LIB_CANDIDATES "openvx;vxu" CACHE STRING "OpenVX library candidates list")
function(find_openvx_libs _found)
foreach(one ${OPENVX_LIB_CANDIDATES})
find_library(OPENVX_${one}_LIBRARY ${one} PATHS "${OPENVX_ROOT}/lib" "${OPENVX_ROOT}/bin")
if(OPENVX_${one}_LIBRARY)
list(APPEND _list ${OPENVX_${one}_LIBRARY})
endif()
endforeach()
set(${_found} ${_list} PARENT_SCOPE)
endfunction()
if(OPENVX_ROOT)
find_path(OPENVX_INCLUDE_DIR "VX/vx.h" PATHS "${OPENVX_ROOT}/include" DOC "OpenVX include path")
if(NOT DEFINED OPENVX_LIBRARIES)
find_openvx_libs(found)
if(found)
set(OPENVX_LIBRARIES "${found}" CACHE STRING "OpenVX libraries")
endif()
endif()
endif()
if(OPENVX_INCLUDE_DIR AND OPENVX_LIBRARIES)
set(HAVE_OPENVX TRUE)
try_compile(OPENVX_RENAMED_REF
"${OpenCV_BINARY_DIR}"
"${OpenCV_SOURCE_DIR}/cmake/checks/openvx_refenum_test.cpp"
CMAKE_FLAGS "-DINCLUDE_DIRECTORIES:STRING=${OPENVX_INCLUDE_DIR}"
LINK_LIBRARIES ${OPENVX_LIBRARIES}
OUTPUT_VARIABLE OUTPUT
)
if(OPENVX_RENAMED_REF)
add_definitions(-DIVX_RENAMED_REFS=1)
message(STATUS "OpenVX: Checking reference attribute name convention... New")
else()
message(STATUS "OpenVX: Checking reference attribute name convention... Old")
endif()
endif()
if(NOT HAVE_OPENVX)
ocv_clear_vars(HAVE_OPENVX OPENVX_LIBRARIES OPENVX_INCLUDE_DIR)
endif()

@ -1,6 +1,6 @@
set(TIMVX_INSTALL_DIR "" CACHE PATH "Path to libtim-vx installation")
set(VIVANTE_SDK_DIR "" CACHE PATH "Path to VIVANTE SDK needed by TIM-VX.")
set(VIVANTE_SDK_LIB_CANDIDATES "OpenVX;VSC;GAL;ArchModelSw;NNArchPerf" CACHE STRING "VIVANTE SDK library candidates")
set(VIVANTE_SDK_LIB_CANDIDATES "VSC;GAL;ArchModelSw;NNArchPerf" CACHE STRING "VIVANTE SDK library candidates")
# Ensure VIVANTE SDK library candidates are present in given search path
function(find_vivante_sdk_libs _viv_notfound _viv_search_path)

@ -1,5 +0,0 @@
#include <VX/vx.h>
int main()
{
return VX_REFERENCE_COUNT == VX_REFERENCE_TYPE ? VX_REFERENCE_NAME : 0;
}

@ -137,9 +137,6 @@
/* Library was compiled with functions instrumentation */
#cmakedefine ENABLE_INSTRUMENTATION
/* OpenVX */
#cmakedefine HAVE_OPENVX
/* OpenCV trace utilities */
#cmakedefine OPENCV_TRACE

@ -623,7 +623,6 @@ Following build options are utilized in `opencv_contrib` modules, as stated [pre
`WITH_CAROTENE`
`WITH_CPUFEATURES`
`WITH_EIGEN`
`WITH_OPENVX`
`WITH_DIRECTX`
`WITH_VA`
`WITH_LAPACK`

@ -3404,6 +3404,5 @@ struct ParamType<_Tp, typename std::enable_if< std::is_enum<_Tp>::value >::type>
#include "opencv2/core/cvstd.inl.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/core/optim.hpp"
#include "opencv2/core/ovx.hpp"
#endif /*OPENCV_CORE_HPP*/

@ -1,48 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
// OpenVX related definitions and declarations
#pragma once
#ifndef OPENCV_OVX_DEFS_HPP
#define OPENCV_OVX_DEFS_HPP
#include "cvconfig.h"
// utility macro for running OpenVX-based implementations
#ifdef HAVE_OPENVX
#define IVX_HIDE_INFO_WARNINGS
#define IVX_USE_OPENCV
#include "ivx.hpp"
namespace cv{
namespace ovx{
// Get common thread local OpenVX context
CV_EXPORTS_W ivx::Context& getOpenVXContext();
template <int kernel_id> inline bool skipSmallImages(int w, int h) { return w*h < 3840 * 2160; }
}}
#define CV_OVX_RUN(condition, func, ...) \
if (cv::useOpenVX() && (condition) && func) \
{ \
return __VA_ARGS__; \
}
#else
#define CV_OVX_RUN(condition, func, ...)
#endif // HAVE_OPENVX
// Throw an error in debug mode or try another implementation in release
#ifdef _DEBUG
#define VX_DbgThrow(s) CV_Error(cv::Error::StsInternal, (s))
#else
#define VX_DbgThrow(s) return false
#endif
#endif // OPENCV_OVX_DEFS_HPP

@ -1,28 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
// OpenVX related definitions and declarations
#pragma once
#ifndef OPENCV_OVX_HPP
#define OPENCV_OVX_HPP
#include "cvdef.h"
namespace cv
{
/// Check if use of OpenVX is possible
CV_EXPORTS_W bool haveOpenVX();
/// Check if use of OpenVX is enabled
CV_EXPORTS_W bool useOpenVX();
/// Enable/disable use of OpenVX
CV_EXPORTS_W void setUseOpenVX(bool flag);
} // namespace cv
#endif // OPENCV_OVX_HPP

@ -106,7 +106,6 @@ enum RegionLocationFlag {
REGION_FLAG_IMPL_IPP = (1 << 16), ///< region is part of IPP code path
REGION_FLAG_IMPL_OPENCL = (2 << 16), ///< region is part of OpenCL code path
REGION_FLAG_IMPL_OPENVX = (3 << 16), ///< region is part of OpenVX code path
REGION_FLAG_IMPL_MASK = (15 << 16),

@ -79,9 +79,6 @@ struct RegionStatistics
#ifdef HAVE_OPENCL
int64 durationImplOpenCL;
#endif
#ifdef HAVE_OPENVX
int64 durationImplOpenVX;
#endif
RegionStatistics() :
currentSkippedRegions(0),
@ -91,9 +88,6 @@ struct RegionStatistics
#endif
#ifdef HAVE_OPENCL
,durationImplOpenCL(0)
#endif
#ifdef HAVE_OPENVX
,durationImplOpenVX(0)
#endif
{}
@ -106,9 +100,6 @@ struct RegionStatistics
#endif
#ifdef HAVE_OPENCL
result.durationImplOpenCL = durationImplOpenCL; durationImplOpenCL = 0;
#endif
#ifdef HAVE_OPENVX
result.durationImplOpenVX = durationImplOpenVX; durationImplOpenVX = 0;
#endif
}
@ -121,9 +112,6 @@ struct RegionStatistics
#endif
#ifdef HAVE_OPENCL
durationImplOpenCL += stat.durationImplOpenCL;
#endif
#ifdef HAVE_OPENVX
durationImplOpenVX += stat.durationImplOpenVX;
#endif
}
@ -135,9 +123,6 @@ struct RegionStatistics
#endif
#ifdef HAVE_OPENCL
durationImplOpenCL = (int64)(durationImplOpenCL * c);
#endif
#ifdef HAVE_OPENVX
durationImplOpenVX = (int64)(durationImplOpenVX * c);
#endif
}
};
@ -152,9 +137,6 @@ std::ostream& operator<<(std::ostream& out, const RegionStatistics& stat)
#endif
#ifdef HAVE_OPENCL
<< " durationImplOpenCL=" << stat.durationImplOpenCL
#endif
#ifdef HAVE_OPENVX
<< " durationImplOpenVX=" << stat.durationImplOpenVX
#endif
;
return out;
@ -169,9 +151,6 @@ struct RegionStatisticsStatus
#ifdef HAVE_OPENCL
int ignoreDepthImplOpenCL;
#endif
#ifdef HAVE_OPENVX
int ignoreDepthImplOpenVX;
#endif
RegionStatisticsStatus() { reset(); }
@ -183,9 +162,6 @@ struct RegionStatisticsStatus
#endif
#ifdef HAVE_OPENCL
ignoreDepthImplOpenCL = 0;
#endif
#ifdef HAVE_OPENVX
ignoreDepthImplOpenVX = 0;
#endif
}
@ -199,9 +175,6 @@ struct RegionStatisticsStatus
#endif
#ifdef HAVE_OPENCL
ignoreDepthImplOpenCL = src.ignoreDepthImplOpenCL ? 1 : 0;
#endif
#ifdef HAVE_OPENVX
ignoreDepthImplOpenVX = src.ignoreDepthImplOpenVX ? 1 : 0;
#endif
}
@ -222,10 +195,6 @@ std::ostream& operator<<(std::ostream& out, const RegionStatisticsStatus& s)
#ifdef HAVE_OPENCL
if (s.ignoreDepthImplOpenCL)
out << " OpenCL=" << s.ignoreDepthImplOpenCL;
#endif
#ifdef HAVE_OPENVX
if (s.ignoreDepthImplOpenVX)
out << " OpenVX=" << s.ignoreDepthImplOpenVX;
#endif
out << "}";
return out;
@ -389,8 +358,7 @@ public:
enum OptimizationPath {
CODE_PATH_PLAIN = 0,
CODE_PATH_IPP,
CODE_PATH_OPENCL,
CODE_PATH_OPENVX
CODE_PATH_OPENCL
};
#ifdef OPENCV_WITH_ITT

@ -6,7 +6,6 @@
#include "precomp.hpp"
#include "opencl_kernels_core.hpp"
#include "convert.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
/****************************************************************************************\
* LUT Transform *
@ -100,39 +99,6 @@ static bool ocl_LUT(InputArray _src, InputArray _lut, OutputArray _dst)
#endif
#ifdef HAVE_OPENVX
static bool openvx_LUT(Mat src, Mat dst, Mat _lut)
{
if (src.type() != CV_8UC1 || dst.type() != src.type() || _lut.type() != src.type() || !_lut.isContinuous())
return false;
try
{
ivx::Context ctx = ovx::getOpenVXContext();
ivx::Image
ia = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(src.cols, src.rows, 1, (vx_int32)(src.step)), src.data),
ib = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
ivx::LUT lut = ivx::LUT::create(ctx);
lut.copyFrom(_lut);
ivx::IVX_CHECK_STATUS(vxuTableLookup(ctx, ia, lut, ib));
}
catch (const ivx::RuntimeError& e)
{
VX_DbgThrow(e.what());
}
catch (const ivx::WrapperError& e)
{
VX_DbgThrow(e.what());
}
return true;
}
#endif
#if defined(HAVE_IPP)
#if !IPP_DISABLE_PERF_LUT // there are no performance benefits (PR #2653)
namespace ipp {
@ -374,8 +340,6 @@ void cv::LUT( InputArray _src, InputArray _lut, OutputArray _dst )
_dst.createSameSize(_src, CV_MAKETYPE(_lut.depth(), cn));
Mat dst = _dst.getMat();
CV_OVX_RUN(!ovx::skipSmallImages<VX_KERNEL_TABLE_LOOKUP>(src.cols, src.rows),
openvx_LUT(src, dst, lut))
#if !IPP_DISABLE_PERF_LUT
CV_IPP_RUN(_src.dims() <= 2, ipp_lut(src, lut, dst));

@ -5,7 +5,6 @@
#include "precomp.hpp"
#include "opencl_kernels_core.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
#include "stat.hpp"
#ifndef OPENCV_IPP_MEAN
@ -319,69 +318,6 @@ static bool ocl_meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv
}
#endif
#ifdef HAVE_OPENVX
static bool openvx_meanStdDev(Mat& src, OutputArray _mean, OutputArray _sdv, Mat& mask)
{
size_t total_size = src.total();
int rows = src.size[0], cols = rows ? (int)(total_size / rows) : 0;
if (src.type() != CV_8UC1|| !mask.empty() ||
(src.dims != 2 && !(src.isContinuous() && cols > 0 && (size_t)rows*cols == total_size))
)
return false;
try
{
ivx::Context ctx = ovx::getOpenVXContext();
#ifndef VX_VERSION_1_1
if (ctx.vendorID() == VX_ID_KHRONOS)
return false; // Do not use OpenVX meanStdDev estimation for sample 1.0.1 implementation due to lack of accuracy
#endif
ivx::Image
ia = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(cols, rows, 1, (vx_int32)(src.step[0])), src.ptr());
vx_float32 mean_temp, stddev_temp;
ivx::IVX_CHECK_STATUS(vxuMeanStdDev(ctx, ia, &mean_temp, &stddev_temp));
if (_mean.needed())
{
if (!_mean.fixedSize())
_mean.create(1, 1, CV_64F, -1, true);
Mat mean = _mean.getMat();
CV_Assert(mean.type() == CV_64F && mean.isContinuous() &&
(mean.cols == 1 || mean.rows == 1) && mean.total() >= 1);
double *pmean = mean.ptr<double>();
pmean[0] = mean_temp;
for (int c = 1; c < (int)mean.total(); c++)
pmean[c] = 0;
}
if (_sdv.needed())
{
if (!_sdv.fixedSize())
_sdv.create(1, 1, CV_64F, -1, true);
Mat stddev = _sdv.getMat();
CV_Assert(stddev.type() == CV_64F && stddev.isContinuous() &&
(stddev.cols == 1 || stddev.rows == 1) && stddev.total() >= 1);
double *pstddev = stddev.ptr<double>();
pstddev[0] = stddev_temp;
for (int c = 1; c < (int)stddev.total(); c++)
pstddev[c] = 0;
}
}
catch (const ivx::RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const ivx::WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
#endif
#ifdef HAVE_IPP
static bool ipp_meanStdDev(Mat& src, OutputArray _mean, OutputArray _sdv, Mat& mask)
@ -532,9 +468,6 @@ void meanStdDev(InputArray _src, OutputArray _mean, OutputArray _sdv, InputArray
Mat src = _src.getMat(), mask = _mask.getMat();
CV_OVX_RUN(!ovx::skipSmallImages<VX_KERNEL_MEAN_STDDEV>(src.cols, src.rows),
openvx_meanStdDev(src, _mean, _sdv, mask))
CV_IPP_RUN(IPP_VERSION_X100 >= 700, ipp_meanStdDev(src, _mean, _sdv, mask));
int k, cn = src.channels(), depth = src.depth();

@ -5,7 +5,6 @@
#include "precomp.hpp"
#include "opencl_kernels_core.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
#include "stat.hpp"
#include "opencv2/core/detail/dispatch_helper.impl.hpp"
#include <algorithm>

@ -1,105 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
// OpenVX related functions
#include "precomp.hpp"
#include "opencv2/core/utils/tls.hpp"
#include "opencv2/core/ovx.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
namespace cv
{
namespace ovx
{
#ifdef HAVE_OPENVX
// Simple TLSData<ivx::Context> doesn't work, because default constructor doesn't create any OpenVX context.
struct OpenVXTLSData
{
OpenVXTLSData() : ctx(ivx::Context::create()) {}
ivx::Context ctx;
};
static TLSData<OpenVXTLSData>& getOpenVXTLSData()
{
CV_SINGLETON_LAZY_INIT_REF(TLSData<OpenVXTLSData>, new TLSData<OpenVXTLSData>())
}
struct OpenVXCleanupFunctor
{
~OpenVXCleanupFunctor() { getOpenVXTLSData().cleanup(); }
};
static OpenVXCleanupFunctor g_openvx_cleanup_functor;
ivx::Context& getOpenVXContext()
{
return getOpenVXTLSData().get()->ctx;
}
#endif
} // namespace
bool haveOpenVX()
{
#ifdef HAVE_OPENVX
static int g_haveOpenVX = -1;
if(g_haveOpenVX < 0)
{
try
{
ivx::Context context = ovx::getOpenVXContext();
vx_uint16 vComp = ivx::compiledWithVersion();
vx_uint16 vCurr = context.version();
g_haveOpenVX =
VX_VERSION_MAJOR(vComp) == VX_VERSION_MAJOR(vCurr) &&
VX_VERSION_MINOR(vComp) == VX_VERSION_MINOR(vCurr)
? 1 : 0;
}
catch(const ivx::WrapperError&)
{ g_haveOpenVX = 0; }
catch(const ivx::RuntimeError&)
{ g_haveOpenVX = 0; }
}
return g_haveOpenVX == 1;
#else
return false;
#endif
}
bool useOpenVX()
{
#ifdef HAVE_OPENVX
CoreTLSData& data = getCoreTlsData();
if (data.useOpenVX < 0)
{
// enabled (if available) by default
data.useOpenVX = haveOpenVX() ? 1 : 0;
}
return data.useOpenVX > 0;
#else
return false;
#endif
}
void setUseOpenVX(bool flag)
{
#ifdef HAVE_OPENVX
if( haveOpenVX() )
{
CoreTLSData& data = getCoreTlsData();
data.useOpenVX = flag ? 1 : 0;
}
#else
CV_Assert(!flag && "OpenVX support isn't enabled at compile time");
#endif
}
} // namespace cv

@ -353,9 +353,6 @@ struct CoreTLSData
//#endif
useIPP(-1),
useIPP_NE(-1)
#ifdef HAVE_OPENVX
,useOpenVX(-1)
#endif
{}
RNG rng;
@ -366,9 +363,6 @@ struct CoreTLSData
//#endif
int useIPP; // 1 - use, 0 - do not use, -1 - auto/not initialized
int useIPP_NE; // 1 - use, 0 - do not use, -1 - auto/not initialized
#ifdef HAVE_OPENVX
int useOpenVX; // 1 - use, 0 - do not use, -1 - auto/not initialized
#endif
};
CoreTLSData& getCoreTlsData();

@ -171,10 +171,6 @@ public:
#ifdef HAVE_OPENCL
if (result.durationImplOpenCL)
ok &= this->printf(",tOCL=%lld", (long long int)result.durationImplOpenCL);
#endif
#ifdef HAVE_OPENVX
if (result.durationImplOpenVX)
ok &= this->printf(",tOVX=%lld", (long long int)result.durationImplOpenVX);
#endif
ok &= this->printf("\n");
return ok;
@ -359,10 +355,6 @@ void Region::Impl::leaveRegion(TraceManagerThreadLocal& ctx)
#ifdef HAVE_OPENCL
if (result.durationImplOpenCL)
__itt_metadata_add(domain, itt_id, __itt_string_handle_create("tOpenCL"), __itt_metadata_u64, 1, &result.durationImplOpenCL);
#endif
#ifdef HAVE_OPENVX
if (result.durationImplOpenVX)
__itt_metadata_add(domain, itt_id, __itt_string_handle_create("tOpenVX"), __itt_metadata_u64, 1, &result.durationImplOpenVX);
#endif
__itt_task_end(domain);
}
@ -492,12 +484,6 @@ Region::Region(const LocationStaticStorage& location) :
if (!ctx.stat_status.ignoreDepthImplOpenCL)
ctx.stat_status.ignoreDepthImplOpenCL = currentDepth;
break;
#endif
#ifdef HAVE_OPENVX
case REGION_FLAG_IMPL_OPENVX:
if (!ctx.stat_status.ignoreDepthImplOpenVX)
ctx.stat_status.ignoreDepthImplOpenVX = currentDepth;
break;
#endif
default:
break;
@ -615,11 +601,6 @@ void Region::destroy()
cv::ocl::finish();
myCodePath = Impl::CODE_PATH_OPENCL;
break;
#endif
#ifdef HAVE_OPENVX
case REGION_FLAG_IMPL_OPENVX:
myCodePath = Impl::CODE_PATH_OPENVX;
break;
#endif
default:
break;
@ -665,19 +646,6 @@ void Region::destroy()
ctx.stat.durationImplOpenCL = duration;
}
break;
#endif
#ifdef HAVE_OPENVX
case Impl::CODE_PATH_OPENVX:
if (ctx.stat_status.ignoreDepthImplOpenVX == currentDepth)
{
ctx.stat.durationImplOpenVX += duration;
ctx.stat_status.ignoreDepthImplOpenVX = 0;
}
else if (active)
{
ctx.stat.durationImplOpenVX = duration;
}
break;
#endif
default:
break;

@ -49,8 +49,6 @@ The references are:
#include "opencv2/core/hal/intrin.hpp"
#include "opencv2/core/utils/buffer_area.private.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
namespace cv
{
@ -371,70 +369,6 @@ static bool ocl_FAST( InputArray _img, std::vector<KeyPoint>& keypoints,
#endif
#ifdef HAVE_OPENVX
namespace ovx {
template <> inline bool skipSmallImages<VX_KERNEL_FAST_CORNERS>(int w, int h) { return w*h < 800 * 600; }
}
static bool openvx_FAST(InputArray _img, std::vector<KeyPoint>& keypoints,
int _threshold, bool nonmaxSuppression, int type)
{
using namespace ivx;
// Nonmax suppression is done differently in OpenCV than in OpenVX
// 9/16 is the only supported mode in OpenVX
if(nonmaxSuppression || type != FastFeatureDetector::TYPE_9_16)
return false;
Mat imgMat = _img.getMat();
if(imgMat.empty() || imgMat.type() != CV_8UC1)
return false;
if (ovx::skipSmallImages<VX_KERNEL_FAST_CORNERS>(imgMat.cols, imgMat.rows))
return false;
try
{
Context context = ovx::getOpenVXContext();
Image img = Image::createFromHandle(context, Image::matTypeToFormat(imgMat.type()),
Image::createAddressing(imgMat), (void*)imgMat.data);
ivx::Scalar threshold = ivx::Scalar::create<VX_TYPE_FLOAT32>(context, _threshold);
vx_size capacity = imgMat.cols * imgMat.rows;
Array corners = Array::create(context, VX_TYPE_KEYPOINT, capacity);
ivx::Scalar numCorners = ivx::Scalar::create<VX_TYPE_SIZE>(context, 0);
IVX_CHECK_STATUS(vxuFastCorners(context, img, threshold, (vx_bool)nonmaxSuppression, corners, numCorners));
size_t nPoints = numCorners.getValue<vx_size>();
keypoints.clear(); keypoints.reserve(nPoints);
std::vector<vx_keypoint_t> vxCorners;
corners.copyTo(vxCorners);
for(size_t i = 0; i < nPoints; i++)
{
vx_keypoint_t kp = vxCorners[i];
//if nonmaxSuppression is false, kp.strength is undefined
keypoints.push_back(KeyPoint((float)kp.x, (float)kp.y, 7.f, -1, kp.strength));
}
#ifdef VX_VERSION_1_1
//we should take user memory back before release
//(it's not done automatically according to standard)
img.swapHandle();
#endif
}
catch (const RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
#endif
static inline int hal_FAST(cv::Mat& src, std::vector<KeyPoint>& keypoints, int threshold, bool nonmax_suppression, FastFeatureDetector::DetectorType type)
{
@ -507,9 +441,6 @@ void FAST(InputArray _img, std::vector<KeyPoint>& keypoints, int threshold, bool
CALL_HAL(fast, cv_hal_FAST, img.data, img.step, img.cols, img.rows,
(uchar*)(keypoints.data()), &keypoints_count, threshold, nonmax_suppression, type);
CV_OVX_RUN(true,
openvx_FAST(_img, keypoints, threshold, nonmax_suppression, type))
switch(type) {
case FastFeatureDetector::TYPE_5_8:
FAST_t<8>(_img, keypoints, threshold, nonmax_suppression);

@ -47,7 +47,6 @@
#define CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
#include "accum.simd.hpp"
#include "accum.simd_declarations.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
namespace cv
{
@ -231,80 +230,6 @@ static bool ipp_accumulate(InputArray _src, InputOutputArray _dst, InputArray _m
}
#endif
#ifdef HAVE_OPENVX
namespace cv
{
enum
{
VX_ACCUMULATE_OP = 0,
VX_ACCUMULATE_SQUARE_OP = 1,
VX_ACCUMULATE_WEIGHTED_OP = 2
};
namespace ovx {
template <> inline bool skipSmallImages<VX_KERNEL_ACCUMULATE>(int w, int h) { return w*h < 120 * 60; }
}
static bool openvx_accumulate(InputArray _src, InputOutputArray _dst, InputArray _mask, double _weight, int opType)
{
Mat srcMat = _src.getMat(), dstMat = _dst.getMat();
if (ovx::skipSmallImages<VX_KERNEL_ACCUMULATE>(srcMat.cols, srcMat.rows))
return false;
if(!_mask.empty() ||
(opType == VX_ACCUMULATE_WEIGHTED_OP && dstMat.type() != CV_8UC1 ) ||
(opType != VX_ACCUMULATE_WEIGHTED_OP && dstMat.type() != CV_16SC1 ) ||
srcMat.type() != CV_8UC1)
{
return false;
}
//TODO: handle different number of channels (channel extract && channel combine)
//TODO: handle mask (threshold mask to 0xff && bitwise AND with src)
//(both things can be done by creating a graph)
try
{
ivx::Context context = ovx::getOpenVXContext();
ivx::Image srcImage = ivx::Image::createFromHandle(context, ivx::Image::matTypeToFormat(srcMat.type()),
ivx::Image::createAddressing(srcMat), srcMat.data);
ivx::Image dstImage = ivx::Image::createFromHandle(context, ivx::Image::matTypeToFormat(dstMat.type()),
ivx::Image::createAddressing(dstMat), dstMat.data);
ivx::Scalar shift = ivx::Scalar::create<VX_TYPE_UINT32>(context, 0);
ivx::Scalar alpha = ivx::Scalar::create<VX_TYPE_FLOAT32>(context, _weight);
switch (opType)
{
case VX_ACCUMULATE_OP:
ivx::IVX_CHECK_STATUS(vxuAccumulateImage(context, srcImage, dstImage));
break;
case VX_ACCUMULATE_SQUARE_OP:
ivx::IVX_CHECK_STATUS(vxuAccumulateSquareImage(context, srcImage, shift, dstImage));
break;
case VX_ACCUMULATE_WEIGHTED_OP:
ivx::IVX_CHECK_STATUS(vxuAccumulateWeightedImage(context, srcImage, alpha, dstImage));
break;
default:
break;
}
#ifdef VX_VERSION_1_1
//we should take user memory back before release
//(it's not done automatically according to standard)
srcImage.swapHandle(); dstImage.swapHandle();
#endif
}
catch (const ivx::RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const ivx::WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
}
#endif
void cv::accumulate( InputArray _src, InputOutputArray _dst, InputArray _mask )
{
CV_INSTRUMENT_REGION();
@ -321,9 +246,6 @@ void cv::accumulate( InputArray _src, InputOutputArray _dst, InputArray _mask )
CV_IPP_RUN((_src.dims() <= 2 || (_src.isContinuous() && _dst.isContinuous() && (_mask.empty() || _mask.isContinuous()))),
ipp_accumulate(_src, _dst, _mask));
CV_OVX_RUN(_src.dims() <= 2,
openvx_accumulate(_src, _dst, _mask, 0.0, VX_ACCUMULATE_OP))
Mat src = _src.getMat(), dst = _dst.getMat(), mask = _mask.getMat();
@ -420,9 +342,6 @@ void cv::accumulateSquare( InputArray _src, InputOutputArray _dst, InputArray _m
CV_IPP_RUN((_src.dims() <= 2 || (_src.isContinuous() && _dst.isContinuous() && (_mask.empty() || _mask.isContinuous()))),
ipp_accumulate_square(_src, _dst, _mask));
CV_OVX_RUN(_src.dims() <= 2,
openvx_accumulate(_src, _dst, _mask, 0.0, VX_ACCUMULATE_SQUARE_OP))
Mat src = _src.getMat(), dst = _dst.getMat(), mask = _mask.getMat();
int fidx = getAccTabIdx(sdepth, ddepth);
@ -624,9 +543,6 @@ void cv::accumulateWeighted( InputArray _src, InputOutputArray _dst,
CV_IPP_RUN((_src.dims() <= 2 || (_src.isContinuous() && _dst.isContinuous() && _mask.isContinuous())), ipp_accumulate_weighted(_src, _dst, alpha, _mask));
CV_OVX_RUN(_src.dims() <= 2,
openvx_accumulate(_src, _dst, _mask, alpha, VX_ACCUMULATE_WEIGHTED_OP))
Mat src = _src.getMat(), dst = _dst.getMat(), mask = _mask.getMat();

@ -48,8 +48,6 @@
#include "opencv2/core/hal/intrin.hpp"
#include "opencl_kernels_imgproc.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
#include "box_filter.simd.hpp"
#include "box_filter.simd_declarations.hpp" // defines CV_CPU_DISPATCH_MODES_ALL=AVX2,...,BASELINE based on CMakeLists.txt content
@ -315,79 +313,6 @@ Ptr<FilterEngine> createBoxFilter(int srcType, int dstType, Size ksize,
CV_CPU_DISPATCH_MODES_ALL);
}
#ifdef HAVE_OPENVX
namespace ovx {
template <> inline bool skipSmallImages<VX_KERNEL_BOX_3x3>(int w, int h) { return w*h < 640 * 480; }
}
static bool openvx_boxfilter(InputArray _src, OutputArray _dst, int ddepth,
Size ksize, Point anchor,
bool normalize, int borderType)
{
if (ddepth < 0)
ddepth = CV_8UC1;
if (_src.type() != CV_8UC1 || ddepth != CV_8U || !normalize ||
_src.cols() < 3 || _src.rows() < 3 ||
ksize.width != 3 || ksize.height != 3 ||
(anchor.x >= 0 && anchor.x != 1) ||
(anchor.y >= 0 && anchor.y != 1) ||
ovx::skipSmallImages<VX_KERNEL_BOX_3x3>(_src.cols(), _src.rows()))
return false;
Mat src = _src.getMat();
if ((borderType & BORDER_ISOLATED) == 0 && src.isSubmatrix())
return false; //Process isolated borders only
vx_enum border;
switch (borderType & ~BORDER_ISOLATED)
{
case BORDER_CONSTANT:
border = VX_BORDER_CONSTANT;
break;
case BORDER_REPLICATE:
border = VX_BORDER_REPLICATE;
break;
default:
return false;
}
_dst.create(src.size(), CV_8UC1);
Mat dst = _dst.getMat();
try
{
ivx::Context ctx = ovx::getOpenVXContext();
Mat a;
if (dst.data != src.data)
a = src;
else
src.copyTo(a);
ivx::Image
ia = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(a.cols, a.rows, 1, (vx_int32)(a.step)), a.data),
ib = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
//since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(border, (vx_uint8)(0));
ivx::IVX_CHECK_STATUS(vxuBox3x3(ctx, ia, ib));
ctx.setImmediateBorder(prevBorder);
}
catch (const ivx::RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const ivx::WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
#endif
#if 0 //defined(HAVE_IPP)
static bool ipp_boxfilter(Mat &src, Mat &dst, Size ksize, Point anchor, bool normalize, int borderType)
@ -475,9 +400,6 @@ void boxFilter(InputArray _src, OutputArray _dst, int ddepth,
ofs.x, ofs.y, wsz.width - src.cols - ofs.x, wsz.height - src.rows - ofs.y, ksize.width, ksize.height,
anchor.x, anchor.y, normalize, borderType&~BORDER_ISOLATED);
CV_OVX_RUN(true,
openvx_boxfilter(src, dst, ddepth, ksize, anchor, normalize, borderType))
//CV_IPP_RUN_FAST(ipp_boxfilter(src, dst, ksize, anchor, normalize, borderType));
borderType = (borderType&~BORDER_ISOLATED);

@ -45,8 +45,6 @@
#include "opencv2/core/hal/intrin.hpp"
#include <deque>
#include "opencv2/core/openvx/ovx_defs.hpp"
namespace cv
{
@ -761,65 +759,6 @@ private:
finalPass& operator=(const finalPass&); // = delete
};
#ifdef HAVE_OPENVX
namespace ovx {
template <> inline bool skipSmallImages<VX_KERNEL_CANNY_EDGE_DETECTOR>(int w, int h) { return w*h < 640 * 480; }
}
static bool openvx_canny(const Mat& src, Mat& dst, int loVal, int hiVal, int kSize, bool useL2)
{
using namespace ivx;
Context context = ovx::getOpenVXContext();
try
{
Image _src = Image::createFromHandle(
context,
Image::matTypeToFormat(src.type()),
Image::createAddressing(src),
src.data );
Image _dst = Image::createFromHandle(
context,
Image::matTypeToFormat(dst.type()),
Image::createAddressing(dst),
dst.data );
Threshold threshold = Threshold::createRange(context, VX_TYPE_UINT8, saturate_cast<uchar>(loVal), saturate_cast<uchar>(hiVal));
#if 0
// the code below is disabled because vxuCannyEdgeDetector()
// ignores context attribute VX_CONTEXT_IMMEDIATE_BORDER
// FIXME: may fail in multithread case
border_t prevBorder = context.immediateBorder();
context.setImmediateBorder(VX_BORDER_REPLICATE);
IVX_CHECK_STATUS( vxuCannyEdgeDetector(context, _src, threshold, kSize, (useL2 ? VX_NORM_L2 : VX_NORM_L1), _dst) );
context.setImmediateBorder(prevBorder);
#else
// alternative code without vxuCannyEdgeDetector()
Graph graph = Graph::create(context);
ivx::Node node = ivx::Node(vxCannyEdgeDetectorNode(graph, _src, threshold, kSize, (useL2 ? VX_NORM_L2 : VX_NORM_L1), _dst) );
node.setBorder(VX_BORDER_REPLICATE);
graph.verify();
graph.process();
#endif
#ifdef VX_VERSION_1_1
_src.swapHandle();
_dst.swapHandle();
#endif
}
catch(const WrapperError& e)
{
VX_DbgThrow(e.what());
}
catch(const RuntimeError& e)
{
VX_DbgThrow(e.what());
}
return true;
}
#endif // HAVE_OPENVX
void Canny( InputArray _src, OutputArray _dst,
double low_thresh, double high_thresh,
int aperture_size, bool L2gradient )
@ -864,21 +803,6 @@ void Canny( InputArray _src, OutputArray _dst,
CALL_HAL(canny, cv_hal_canny, src.data, src.step, dst.data, dst.step, src.cols, src.rows, src.channels(),
low_thresh, high_thresh, aperture_size, L2gradient);
CV_OVX_RUN(
false && /* disabling due to accuracy issues */
src.type() == CV_8UC1 &&
!src.isSubmatrix() &&
src.cols >= aperture_size &&
src.rows >= aperture_size &&
!ovx::skipSmallImages<VX_KERNEL_CANNY_EDGE_DETECTOR>(src.cols, src.rows),
openvx_canny(
src,
dst,
cvFloor(low_thresh),
cvFloor(high_thresh),
aperture_size,
L2gradient ) )
CV_IPP_RUN_FAST(ipp_Canny(src, Mat(), Mat(), dst, (float)low_thresh, (float)high_thresh, L2gradient, aperture_size))
if (L2gradient)

@ -43,7 +43,6 @@
#include "precomp.hpp"
#include "opencl_kernels_imgproc.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
#include "filter.hpp"
/****************************************************************************************\
@ -182,83 +181,6 @@ cv::Ptr<cv::FilterEngine> cv::createDerivFilter(int srcType, int dstType,
kx, ky, Point(-1,-1), 0, borderType );
}
#ifdef HAVE_OPENVX
namespace cv
{
namespace ovx {
template <> inline bool skipSmallImages<VX_KERNEL_SOBEL_3x3>(int w, int h) { return w*h < 320 * 240; }
}
static bool openvx_sobel(InputArray _src, OutputArray _dst,
int dx, int dy, int ksize,
double scale, double delta, int borderType)
{
if (_src.type() != CV_8UC1 || _dst.type() != CV_16SC1 ||
ksize != 3 || scale != 1.0 || delta != 0.0 ||
(dx | dy) != 1 || (dx + dy) != 1 ||
_src.cols() < ksize || _src.rows() < ksize ||
ovx::skipSmallImages<VX_KERNEL_SOBEL_3x3>(_src.cols(), _src.rows())
)
return false;
Mat src = _src.getMat();
Mat dst = _dst.getMat();
if ((borderType & BORDER_ISOLATED) == 0 && src.isSubmatrix())
return false; //Process isolated borders only
vx_enum border;
switch (borderType & ~BORDER_ISOLATED)
{
case BORDER_CONSTANT:
border = VX_BORDER_CONSTANT;
break;
case BORDER_REPLICATE:
// border = VX_BORDER_REPLICATE;
// break;
default:
return false;
}
try
{
ivx::Context ctx = ovx::getOpenVXContext();
//if ((vx_size)ksize > ctx.convolutionMaxDimension())
// return false;
Mat a;
if (dst.data != src.data)
a = src;
else
src.copyTo(a);
ivx::Image
ia = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(a.cols, a.rows, 1, (vx_int32)(a.step)), a.data),
ib = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_S16,
ivx::Image::createAddressing(dst.cols, dst.rows, 2, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
//since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(border, (vx_uint8)(0));
if(dx)
ivx::IVX_CHECK_STATUS(vxuSobel3x3(ctx, ia, ib, NULL));
else
ivx::IVX_CHECK_STATUS(vxuSobel3x3(ctx, ia, NULL, ib));
ctx.setImmediateBorder(prevBorder);
}
catch (const ivx::RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const ivx::WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
}
#endif
#if 0 //defined HAVE_IPP
namespace cv
@ -456,8 +378,6 @@ void cv::Sobel( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy,
CALL_HAL(sobel, cv_hal_sobel, src.ptr(), src.step, dst.ptr(), dst.step, src.cols, src.rows, sdepth, ddepth, cn,
ofs.x, ofs.y, wsz.width - src.cols - ofs.x, wsz.height - src.rows - ofs.y, dx, dy, ksize, scale, delta, borderType&~BORDER_ISOLATED);
CV_OVX_RUN(true,
openvx_sobel(src, dst, dx, dy, ksize, scale, delta, borderType))
//CV_IPP_RUN_FAST(ipp_Deriv(src, dst, dx, dy, ksize, scale, delta, borderType));

@ -42,8 +42,6 @@
#include "precomp.hpp"
#include "opencl_kernels_imgproc.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
#include <cstdio>
#include <vector>
#include <iostream>
@ -274,95 +272,6 @@ static bool ocl_goodFeaturesToTrack( InputArray _image, OutputArray _corners,
#endif
#ifdef HAVE_OPENVX
struct VxKeypointsComparator
{
bool operator () (const vx_keypoint_t& a, const vx_keypoint_t& b)
{
return a.strength > b.strength;
}
};
static bool openvx_harris(Mat image, OutputArray _corners,
int _maxCorners, double _qualityLevel, double _minDistance,
int _blockSize, int _gradientSize, double _harrisK)
{
using namespace ivx;
if(image.type() != CV_8UC1) return false;
//OpenVX implementations don't have to provide other sizes
if(!(_blockSize == 3 || _blockSize == 5 || _blockSize == 7)) return false;
try
{
Context context = ovx::getOpenVXContext();
Image ovxImage = Image::createFromHandle(context, Image::matTypeToFormat(image.type()),
Image::createAddressing(image), image.data);
//The minimum threshold which to eliminate Harris Corner scores (computed using the normalized Sobel kernel).
//set to 0, we'll filter it later by threshold
ivx::Scalar strengthThresh = ivx::Scalar::create<VX_TYPE_FLOAT32>(context, 0);
//The gradient window size to use on the input.
vx_int32 gradientSize = _gradientSize;
//The block window size used to compute the harris corner score
vx_int32 blockSize = _blockSize;
//The scalar sensitivity threshold k from the Harris-Stephens equation
ivx::Scalar sensivity = ivx::Scalar::create<VX_TYPE_FLOAT32>(context, _harrisK);
//The radial Euclidean distance for non-maximum suppression
ivx::Scalar minDistance = ivx::Scalar::create<VX_TYPE_FLOAT32>(context, _minDistance);
vx_size capacity = image.cols * image.rows;
Array corners = Array::create(context, VX_TYPE_KEYPOINT, capacity);
ivx::Scalar numCorners = ivx::Scalar::create<VX_TYPE_SIZE>(context, 0);
IVX_CHECK_STATUS(vxuHarrisCorners(context, ovxImage, strengthThresh, minDistance, sensivity,
gradientSize, blockSize, corners, numCorners));
std::vector<vx_keypoint_t> vxKeypoints;
corners.copyTo(vxKeypoints);
std::sort(vxKeypoints.begin(), vxKeypoints.end(), VxKeypointsComparator());
vx_float32 maxStrength = 0.0f;
if(vxKeypoints.size() > 0)
maxStrength = vxKeypoints[0].strength;
size_t maxKeypoints = min((size_t)_maxCorners, vxKeypoints.size());
std::vector<Point2f> keypoints;
keypoints.reserve(maxKeypoints);
for(size_t i = 0; i < maxKeypoints; i++)
{
vx_keypoint_t kp = vxKeypoints[i];
if(kp.strength < maxStrength*_qualityLevel) break;
keypoints.push_back(Point2f((float)kp.x, (float)kp.y));
}
Mat(keypoints).convertTo(_corners, _corners.fixedType() ? _corners.type() : CV_32F);
#ifdef VX_VERSION_1_1
//we should take user memory back before release
//(it's not done automatically according to standard)
ovxImage.swapHandle();
#endif
}
catch (const RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
#endif
}
void cv::goodFeaturesToTrack( InputArray image, OutputArray corners,
@ -403,11 +312,6 @@ void cv::goodFeaturesToTrack( InputArray _image, OutputArray _corners,
return;
}
// Disabled due to bad accuracy
CV_OVX_RUN(false && useHarrisDetector && _mask.empty() &&
!ovx::skipSmallImages<VX_KERNEL_HARRIS_CORNERS>(image.cols, image.rows),
openvx_harris(image, _corners, maxCorners, qualityLevel, minDistance, blockSize, gradientSize, harrisK))
if( useHarrisDetector )
cornerHarris( image, eig, blockSize, gradientSize, harrisK );
else

@ -43,8 +43,6 @@
#include "opencl_kernels_imgproc.hpp"
#include "opencv2/core/hal/intrin.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
#include "opencv2/core/utils/tls.hpp"
void cvSetHistBinRanges( CvHistogram* hist, float** ranges, int uniform );
@ -838,64 +836,6 @@ private:
}
#ifdef HAVE_OPENVX
namespace cv
{
namespace ovx {
template <> inline bool skipSmallImages<VX_KERNEL_HISTOGRAM>(int w, int h) { return w*h < 2048 * 1536; }
}
static bool openvx_calchist(const Mat& image, OutputArray _hist, const int histSize,
const float* _range)
{
vx_int32 offset = (vx_int32)(_range[0]);
vx_uint32 range = (vx_uint32)(_range[1] - _range[0]);
if (float(offset) != _range[0] || float(range) != (_range[1] - _range[0]))
return false;
size_t total_size = image.total();
int rows = image.dims > 1 ? image.size[0] : 1, cols = rows ? (int)(total_size / rows) : 0;
if (image.dims > 2 && !(image.isContinuous() && cols > 0 && (size_t)rows*cols == total_size))
return false;
try
{
ivx::Context ctx = ovx::getOpenVXContext();
#if VX_VERSION <= VX_VERSION_1_0
if (ctx.vendorID() == VX_ID_KHRONOS && (range % histSize))
return false;
#endif
ivx::Image
img = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(cols, rows, 1, (vx_int32)(image.step[0])), image.data);
ivx::Distribution vxHist = ivx::Distribution::create(ctx, histSize, offset, range);
ivx::IVX_CHECK_STATUS(vxuHistogram(ctx, img, vxHist));
_hist.create(1, &histSize, CV_32F);
Mat hist = _hist.getMat(), ihist = hist;
ihist.flags = (ihist.flags & ~CV_MAT_TYPE_MASK) | CV_32S;
vxHist.copyTo(ihist);
ihist.convertTo(hist, CV_32F);
#ifdef VX_VERSION_1_1
img.swapHandle();
#endif
}
catch (const ivx::RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const ivx::WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
}
#endif
#ifdef HAVE_IPP
#define IPP_HISTOGRAM_PARALLEL 1
namespace cv
@ -956,14 +896,6 @@ void cv::calcHist( const Mat* images, int nimages, const int* channels,
CV_Assert(images && nimages > 0);
CV_OVX_RUN(
images && histSize &&
nimages == 1 && images[0].type() == CV_8UC1 && dims == 1 && _mask.getMat().empty() &&
(!channels || channels[0] == 0) && !accumulate && uniform &&
ranges && ranges[0] &&
!ovx::skipSmallImages<VX_KERNEL_HISTOGRAM>(images[0].cols, images[0].rows),
openvx_calchist(images[0], _hist, histSize[0], ranges[0]))
Mat mask = _mask.getMat();
CV_Assert(dims > 0 && histSize);
@ -2611,43 +2543,6 @@ static bool ocl_equalizeHist(InputArray _src, OutputArray _dst)
#endif
#ifdef HAVE_OPENVX
namespace cv
{
static bool openvx_equalize_hist(Mat srcMat, Mat dstMat)
{
using namespace ivx;
try
{
Context context = ovx::getOpenVXContext();
Image srcImage = Image::createFromHandle(context, Image::matTypeToFormat(srcMat.type()),
Image::createAddressing(srcMat), srcMat.data);
Image dstImage = Image::createFromHandle(context, Image::matTypeToFormat(dstMat.type()),
Image::createAddressing(dstMat), dstMat.data);
IVX_CHECK_STATUS(vxuEqualizeHist(context, srcImage, dstImage));
#ifdef VX_VERSION_1_1
//we should take user memory back before release
//(it's not done automatically according to standard)
srcImage.swapHandle(); dstImage.swapHandle();
#endif
}
catch (const RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
}
#endif
void cv::equalizeHist( InputArray _src, OutputArray _dst )
{
CV_INSTRUMENT_REGION();
@ -2664,9 +2559,6 @@ void cv::equalizeHist( InputArray _src, OutputArray _dst )
_dst.create( src.size(), src.type() );
Mat dst = _dst.getMat();
CV_OVX_RUN(!ovx::skipSmallImages<VX_KERNEL_EQUALIZE_HISTOGRAM>(src.cols, src.rows),
openvx_equalize_hist(src, dst))
Mutex histogramLockInstance;
const int hist_sz = EqualizeHistCalcHist_Invoker::HIST_SZ;

@ -52,7 +52,6 @@
#include "hal_replacement.hpp"
#include <opencv2/core/utils/configuration.private.hpp>
#include "opencv2/core/hal/intrin.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
#include "opencv2/core/softfloat.hpp"
#include "imgwarp.hpp"
@ -1573,94 +1572,6 @@ static bool ocl_logPolar(InputArray _src, OutputArray _dst,
#endif
#ifdef HAVE_OPENVX
static bool openvx_remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation, const Scalar& borderValue)
{
vx_interpolation_type_e inter_type;
switch (interpolation)
{
case INTER_LINEAR:
#if VX_VERSION > VX_VERSION_1_0
inter_type = VX_INTERPOLATION_BILINEAR;
#else
inter_type = VX_INTERPOLATION_TYPE_BILINEAR;
#endif
break;
case INTER_NEAREST:
/* NEAREST_NEIGHBOR mode disabled since OpenCV round half to even while OpenVX sample implementation round half up
#if VX_VERSION > VX_VERSION_1_0
inter_type = VX_INTERPOLATION_NEAREST_NEIGHBOR;
#else
inter_type = VX_INTERPOLATION_TYPE_NEAREST_NEIGHBOR;
#endif
if (!map1.empty())
for (int y = 0; y < map1.rows; ++y)
{
float* line = map1.ptr<float>(y);
for (int x = 0; x < map1.cols; ++x)
line[x] = cvRound(line[x]);
}
if (!map2.empty())
for (int y = 0; y < map2.rows; ++y)
{
float* line = map2.ptr<float>(y);
for (int x = 0; x < map2.cols; ++x)
line[x] = cvRound(line[x]);
}
break;
*/
case INTER_AREA://AREA interpolation mode is unsupported
default:
return false;
}
try
{
ivx::Context ctx = ovx::getOpenVXContext();
Mat a;
if (dst.data != src.data)
a = src;
else
src.copyTo(a);
ivx::Image
ia = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(a.cols, a.rows, 1, (vx_int32)(a.step)), a.data),
ib = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
//since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(VX_BORDER_CONSTANT, (vx_uint8)(borderValue[0]));
ivx::Remap map = ivx::Remap::create(ctx, src.cols, src.rows, dst.cols, dst.rows);
if (map1.empty()) map.setMappings(map2);
else if (map2.empty()) map.setMappings(map1);
else map.setMappings(map1, map2);
ivx::IVX_CHECK_STATUS(vxuRemap(ctx, ia, map, inter_type, ib));
#ifdef VX_VERSION_1_1
ib.swapHandle();
ia.swapHandle();
#endif
ctx.setImmediateBorder(prevBorder);
}
catch (const ivx::RuntimeError & e)
{
CV_Error(cv::Error::StsInternal, e.what());
return false;
}
catch (const ivx::WrapperError & e)
{
CV_Error(cv::Error::StsInternal, e.what());
return false;
}
return true;
}
#endif
#if defined HAVE_IPP && !IPP_DISABLE_REMAP
typedef IppStatus (CV_STDCALL * ippiRemap)(const void * pSrc, IppiSize srcSize, int srcStep, IppiRect srcRoi,
@ -1800,17 +1711,6 @@ void cv::remap( InputArray _src, OutputArray _dst,
Mat dst = _dst.getMat();
CV_OVX_RUN(
src.type() == CV_8UC1 && dst.type() == CV_8UC1 &&
!ovx::skipSmallImages<VX_KERNEL_REMAP>(src.cols, src.rows) &&
(borderType& ~BORDER_ISOLATED) == BORDER_CONSTANT &&
((map1.type() == CV_32FC2 && map2.empty() && map1.size == dst.size) ||
(map1.type() == CV_32FC1 && map2.type() == CV_32FC1 && map1.size == dst.size && map2.size == dst.size) ||
(map1.empty() && map2.type() == CV_32FC2 && map2.size == dst.size)) &&
((borderType & BORDER_ISOLATED) != 0 || !src.isSubmatrix()) &&
!hasRelativeFlag,
openvx_remap(src, dst, map1, map2, interpolation, borderValue));
CV_Assert( dst.cols < SHRT_MAX && dst.rows < SHRT_MAX && src.cols < SHRT_MAX && src.rows < SHRT_MAX );
if( dst.data == src.data )

@ -48,8 +48,6 @@
#include "opencv2/core/hal/intrin.hpp"
#include "opencl_kernels_imgproc.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
#include "median_blur.simd.hpp"
#include "median_blur.simd_declarations.hpp" // defines CV_CPU_DISPATCH_MODES_ALL=AVX2,...,BASELINE based on CMakeLists.txt content
@ -112,97 +110,6 @@ static bool ocl_medianFilter(InputArray _src, OutputArray _dst, int m)
#endif
#ifdef HAVE_OPENVX
namespace ovx {
template <> inline bool skipSmallImages<VX_KERNEL_MEDIAN_3x3>(int w, int h) { return w*h < 1280 * 720; }
}
static bool openvx_medianFilter(InputArray _src, OutputArray _dst, int ksize)
{
if (_src.type() != CV_8UC1 || _dst.type() != CV_8U
#ifndef VX_VERSION_1_1
|| ksize != 3
#endif
)
return false;
Mat src = _src.getMat();
Mat dst = _dst.getMat();
if (
#ifdef VX_VERSION_1_1
ksize != 3 ? ovx::skipSmallImages<VX_KERNEL_NON_LINEAR_FILTER>(src.cols, src.rows) :
#endif
ovx::skipSmallImages<VX_KERNEL_MEDIAN_3x3>(src.cols, src.rows)
)
return false;
try
{
ivx::Context ctx = ovx::getOpenVXContext();
#ifdef VX_VERSION_1_1
if ((vx_size)ksize > ctx.nonlinearMaxDimension())
return false;
#endif
Mat a;
if (dst.data != src.data)
a = src;
else
src.copyTo(a);
ivx::Image
ia = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(a.cols, a.rows, 1, (vx_int32)(a.step)), a.data),
ib = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
//since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(VX_BORDER_REPLICATE);
#ifdef VX_VERSION_1_1
if (ksize == 3)
#endif
{
ivx::IVX_CHECK_STATUS(vxuMedian3x3(ctx, ia, ib));
}
#ifdef VX_VERSION_1_1
else
{
ivx::Matrix mtx;
if(ksize == 5)
mtx = ivx::Matrix::createFromPattern(ctx, VX_PATTERN_BOX, ksize, ksize);
else
{
vx_size supportedSize;
ivx::IVX_CHECK_STATUS(vxQueryContext(ctx, VX_CONTEXT_NONLINEAR_MAX_DIMENSION, &supportedSize, sizeof(supportedSize)));
if ((vx_size)ksize > supportedSize)
{
ctx.setImmediateBorder(prevBorder);
return false;
}
Mat mask(ksize, ksize, CV_8UC1, Scalar(255));
mtx = ivx::Matrix::create(ctx, VX_TYPE_UINT8, ksize, ksize);
mtx.copyFrom(mask);
}
ivx::IVX_CHECK_STATUS(vxuNonLinearFilter(ctx, VX_NONLINEAR_FILTER_MEDIAN, ia, mtx, ib));
}
#endif
ctx.setImmediateBorder(prevBorder);
}
catch (const ivx::RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const ivx::WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
#endif
#if 0 //defined HAVE_IPP
static bool ipp_medianFilter(Mat &src0, Mat &dst, int ksize)
{
@ -300,9 +207,6 @@ void medianBlur( InputArray _src0, OutputArray _dst, int ksize )
CALL_HAL(medianBlur, cv_hal_medianBlur, src0.data, src0.step, dst.data, dst.step, src0.cols, src0.rows, src0.depth(),
src0.channels(), ksize);
CV_OVX_RUN(true,
openvx_medianFilter(_src0, _dst, ksize))
//CV_IPP_RUN_FAST(ipp_medianFilter(src0, dst, ksize));
CV_CPU_DISPATCH(medianBlur, (src0, dst, ksize),

@ -45,7 +45,6 @@
#include "opencl_kernels_imgproc.hpp"
#include "opencv2/core/hal/intrin.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
namespace cv
{
@ -1329,85 +1328,6 @@ static bool ipp_pyrdown( InputArray _src, OutputArray _dst, const Size& _dsz, in
}
#endif
#ifdef HAVE_OPENVX
namespace cv
{
static bool openvx_pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType )
{
using namespace ivx;
Mat srcMat = _src.getMat();
if (ovx::skipSmallImages<VX_KERNEL_HALFSCALE_GAUSSIAN>(srcMat.cols, srcMat.rows))
return false;
CV_Assert(!srcMat.empty());
Size ssize = _src.size();
Size acceptableSize = Size((ssize.width + 1) / 2, (ssize.height + 1) / 2);
// OpenVX limitations
if((srcMat.type() != CV_8U) ||
(borderType != BORDER_REPLICATE) ||
(_dsz != acceptableSize && !_dsz.empty()))
return false;
// The only border mode which is supported by both cv::pyrDown() and OpenVX
// and produces predictable results
ivx::border_t borderMode;
borderMode.mode = VX_BORDER_REPLICATE;
_dst.create( acceptableSize, srcMat.type() );
Mat dstMat = _dst.getMat();
CV_Assert( ssize.width > 0 && ssize.height > 0 &&
std::abs(acceptableSize.width*2 - ssize.width) <= 2 &&
std::abs(acceptableSize.height*2 - ssize.height) <= 2 );
try
{
Context context = ovx::getOpenVXContext();
if(context.vendorID() == VX_ID_KHRONOS)
{
// This implementation performs floor-like rounding
// (OpenCV uses floor(x+0.5)-like rounding)
// and ignores border mode (and loses 1px size border)
return false;
}
Image srcImg = Image::createFromHandle(context, Image::matTypeToFormat(srcMat.type()),
Image::createAddressing(srcMat), (void*)srcMat.data);
Image dstImg = Image::createFromHandle(context, Image::matTypeToFormat(dstMat.type()),
Image::createAddressing(dstMat), (void*)dstMat.data);
ivx::Scalar kernelSize = ivx::Scalar::create<VX_TYPE_INT32>(context, 5);
Graph graph = Graph::create(context);
ivx::Node halfNode = ivx::Node::create(graph, VX_KERNEL_HALFSCALE_GAUSSIAN, srcImg, dstImg, kernelSize);
halfNode.setBorder(borderMode);
graph.verify();
graph.process();
#ifdef VX_VERSION_1_1
//we should take user memory back before release
//(it's not done automatically according to standard)
srcImg.swapHandle(); dstImg.swapHandle();
#endif
}
catch (const RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
}
#endif
void cv::pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType )
{
CV_INSTRUMENT_REGION();
@ -1417,9 +1337,6 @@ void cv::pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borde
CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
ocl_pyrDown(_src, _dst, _dsz, borderType))
CV_OVX_RUN(_src.dims() <= 2,
openvx_pyrDown(_src, _dst, _dsz, borderType))
Mat src = _src.getMat();
Size dsz = _dsz.empty() ? Size((src.cols + 1)/2, (src.rows + 1)/2) : _dsz;
_dst.create( dsz, src.type() );

@ -53,7 +53,6 @@
#include "opencv2/core/hal/intrin.hpp"
#include "opencv2/core/utils/buffer_area.private.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
#include "resize.hpp"
#include "opencv2/core/softfloat.hpp"

@ -53,8 +53,6 @@
#include "opencv2/core/hal/intrin.hpp"
#include "opencl_kernels_imgproc.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
#include "filter.hpp"
#include "opencv2/core/softfloat.hpp"
@ -386,88 +384,6 @@ static bool ocl_GaussianBlur_8UC1(InputArray _src, OutputArray _dst, Size ksize,
#endif
#ifdef HAVE_OPENVX
namespace ovx {
template <> inline bool skipSmallImages<VX_KERNEL_GAUSSIAN_3x3>(int w, int h) { return w*h < 320 * 240; }
}
static bool openvx_gaussianBlur(InputArray _src, OutputArray _dst, Size ksize,
double sigma1, double sigma2, int borderType)
{
if (sigma2 <= 0)
sigma2 = sigma1;
// automatic detection of kernel size from sigma
if (ksize.width <= 0 && sigma1 > 0)
ksize.width = cvRound(sigma1*6 + 1) | 1;
if (ksize.height <= 0 && sigma2 > 0)
ksize.height = cvRound(sigma2*6 + 1) | 1;
if (_src.type() != CV_8UC1 ||
_src.cols() < 3 || _src.rows() < 3 ||
ksize.width != 3 || ksize.height != 3)
return false;
sigma1 = std::max(sigma1, 0.);
sigma2 = std::max(sigma2, 0.);
if (!(sigma1 == 0.0 || (sigma1 - 0.8) < DBL_EPSILON) || !(sigma2 == 0.0 || (sigma2 - 0.8) < DBL_EPSILON) ||
ovx::skipSmallImages<VX_KERNEL_GAUSSIAN_3x3>(_src.cols(), _src.rows()))
return false;
Mat src = _src.getMat();
Mat dst = _dst.getMat();
if ((borderType & BORDER_ISOLATED) == 0 && src.isSubmatrix())
return false; //Process isolated borders only
vx_enum border;
switch (borderType & ~BORDER_ISOLATED)
{
case BORDER_CONSTANT:
border = VX_BORDER_CONSTANT;
break;
case BORDER_REPLICATE:
border = VX_BORDER_REPLICATE;
break;
default:
return false;
}
try
{
ivx::Context ctx = ovx::getOpenVXContext();
Mat a;
if (dst.data != src.data)
a = src;
else
src.copyTo(a);
ivx::Image
ia = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(a.cols, a.rows, 1, (vx_int32)(a.step)), a.data),
ib = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
//since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(border, (vx_uint8)(0));
ivx::IVX_CHECK_STATUS(vxuGaussian3x3(ctx, ia, ib));
ctx.setImmediateBorder(prevBorder);
}
catch (const ivx::RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const ivx::WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
#endif
#if defined ENABLE_IPP_GAUSSIAN_BLUR // see CMake's OPENCV_IPP_GAUSSIAN_BLUR option
#define IPP_DISABLE_GAUSSIAN_BLUR_LARGE_KERNELS_1TH 1
@ -746,9 +662,6 @@ void GaussianBlur(InputArray _src, OutputArray _dst, Size ksize,
ofs.x, ofs.y, wsz.width - src.cols - ofs.x, wsz.height - src.rows - ofs.y, ksize.width, ksize.height,
sigma1, sigma2, borderType&~BORDER_ISOLATED);
CV_OVX_RUN(true,
openvx_gaussianBlur(src, dst, ksize, sigma1, sigma2, borderType))
#if defined ENABLE_IPP_GAUSSIAN_BLUR
// IPP is not bit-exact to OpenCV implementation
CV_IPP_RUN_FAST(ipp_GaussianBlur(src, dst, ksize, sigma1, sigma2, borderType));

@ -44,8 +44,6 @@
#include "opencl_kernels_imgproc.hpp"
#include "opencv2/core/hal/intrin.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
namespace cv
{
@ -1443,98 +1441,6 @@ static bool ocl_threshold( InputArray _src, OutputArray _dst, double & thresh, d
}
#endif
#ifdef HAVE_OPENVX
#define IMPL_OPENVX_TOZERO 1
static bool openvx_threshold(Mat src, Mat dst, int thresh, int maxval, int type)
{
Mat a = src;
int trueVal, falseVal;
switch (type)
{
case THRESH_BINARY:
#ifndef VX_VERSION_1_1
if (maxval != 255)
return false;
#endif
trueVal = maxval;
falseVal = 0;
break;
case THRESH_TOZERO:
#if IMPL_OPENVX_TOZERO
trueVal = 255;
falseVal = 0;
if (dst.data == src.data)
{
a = Mat(src.size(), src.type());
src.copyTo(a);
}
break;
#endif
case THRESH_BINARY_INV:
#ifdef VX_VERSION_1_1
trueVal = 0;
falseVal = maxval;
break;
#endif
case THRESH_TOZERO_INV:
#ifdef VX_VERSION_1_1
#if IMPL_OPENVX_TOZERO
trueVal = 0;
falseVal = 255;
if (dst.data == src.data)
{
a = Mat(src.size(), src.type());
src.copyTo(a);
}
break;
#endif
#endif
case THRESH_TRUNC:
default:
return false;
}
try
{
ivx::Context ctx = ovx::getOpenVXContext();
ivx::Threshold thh = ivx::Threshold::createBinary(ctx, VX_TYPE_UINT8, thresh);
thh.setValueTrue(trueVal);
thh.setValueFalse(falseVal);
ivx::Image
ia = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(a.cols*a.channels(), a.rows, 1, (vx_int32)(a.step)), src.data),
ib = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(dst.cols*dst.channels(), dst.rows, 1, (vx_int32)(dst.step)), dst.data);
ivx::IVX_CHECK_STATUS(vxuThreshold(ctx, ia, thh, ib));
#if IMPL_OPENVX_TOZERO
if (type == THRESH_TOZERO || type == THRESH_TOZERO_INV)
{
ivx::Image
ic = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(dst.cols*dst.channels(), dst.rows, 1, (vx_int32)(dst.step)), dst.data);
ivx::IVX_CHECK_STATUS(vxuAnd(ctx, ib, ia, ic));
}
#endif
}
catch (const ivx::RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const ivx::WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
#endif
}
double cv::threshold( InputArray _src, OutputArray _dst, double thresh, double maxval, int type )
@ -1590,8 +1496,6 @@ double cv::threshold( InputArray _src, OutputArray _dst, double thresh, double m
return thresh;
}
CV_OVX_RUN(!ovx::skipSmallImages<VX_KERNEL_THRESHOLD>(src.cols, src.rows),
openvx_threshold(src, dst, ithresh, imaxval, type), (double)ithresh)
thresh = ithresh;
maxval = imaxval;

@ -316,108 +316,5 @@ int CV_CannyTest::validate_test_results( int test_case_idx )
TEST(Imgproc_Canny, accuracy) { CV_CannyTest test; test.safe_run(); }
TEST(Imgproc_Canny, accuracy_deriv) { CV_CannyTest test(true); test.safe_run(); }
/*
* Comparing OpenVX based implementation with the main one
*/
#ifndef IMPLEMENT_PARAM_CLASS
#define IMPLEMENT_PARAM_CLASS(name, type) \
class name \
{ \
public: \
name ( type arg = type ()) : val_(arg) {} \
operator type () const {return val_;} \
private: \
type val_; \
}; \
inline void PrintTo( name param, std::ostream* os) \
{ \
*os << #name << "(" << testing::PrintToString(static_cast< type >(param)) << ")"; \
}
#endif // IMPLEMENT_PARAM_CLASS
IMPLEMENT_PARAM_CLASS(ImagePath, string)
IMPLEMENT_PARAM_CLASS(ApertureSize, int)
IMPLEMENT_PARAM_CLASS(L2gradient, bool)
PARAM_TEST_CASE(CannyVX, ImagePath, ApertureSize, L2gradient)
{
string imgPath;
int kSize;
bool useL2;
Mat src, dst;
virtual void SetUp()
{
imgPath = GET_PARAM(0);
kSize = GET_PARAM(1);
useL2 = GET_PARAM(2);
}
void loadImage()
{
src = cv::imread(cvtest::TS::ptr()->get_data_path() + imgPath, IMREAD_GRAYSCALE);
ASSERT_FALSE(src.empty()) << "can't load image: " << imgPath;
}
};
TEST_P(CannyVX, Accuracy)
{
if(haveOpenVX())
{
loadImage();
setUseOpenVX(false);
Mat canny;
cv::Canny(src, canny, 100, 150, 3);
setUseOpenVX(true);
Mat cannyVX;
cv::Canny(src, cannyVX, 100, 150, 3);
// 'smart' diff check (excluding isolated pixels)
Mat diff, diff1;
absdiff(canny, cannyVX, diff);
boxFilter(diff, diff1, -1, Size(3,3));
const int minPixelsAroud = 3; // empirical number
diff1 = diff1 > 255/9 * minPixelsAroud;
erode(diff1, diff1, Mat());
double error = cv::norm(diff1, NORM_L1) / 255;
const int maxError = std::min(10, diff.size().area()/100); // empirical number
if(error > maxError)
{
string outPath =
string("CannyVX-diff-") +
imgPath + '-' +
'k' + char(kSize+'0') + '-' +
(useL2 ? "l2" : "l1");
std::replace(outPath.begin(), outPath.end(), '/', '_');
std::replace(outPath.begin(), outPath.end(), '\\', '_');
std::replace(outPath.begin(), outPath.end(), '.', '_');
imwrite(outPath+".png", diff);
}
ASSERT_LE(error, maxError);
}
}
INSTANTIATE_TEST_CASE_P(
ImgProc, CannyVX,
testing::Combine(
testing::Values(
string("shared/baboon.png"),
string("shared/fruits.png"),
string("shared/lena.png"),
string("shared/pic1.png"),
string("shared/pic3.png"),
string("shared/pic5.png"),
string("shared/pic6.png")
),
testing::Values(ApertureSize(3), ApertureSize(5)),
testing::Values(L2gradient(false), L2gradient(true))
)
);
}} // namespace
/* End of file. */

@ -49,8 +49,6 @@
#include "opencv2/3d.hpp"
#endif
#include "opencv2/core/openvx/ovx_defs.hpp"
#define CV_DESCALE(x,n) (((x) + (1 << ((n)-1))) >> (n))
namespace
@ -1084,154 +1082,6 @@ namespace
}
#endif
#ifdef HAVE_OPENVX
bool openvx_pyrlk(InputArray _prevImg, InputArray _nextImg, InputArray _prevPts, InputOutputArray _nextPts,
OutputArray _status, OutputArray _err)
{
using namespace ivx;
// Pyramids as inputs are not acceptable because there's no (direct or simple) way
// to build vx_pyramid on user data
if(_prevImg.kind() != _InputArray::MAT || _nextImg.kind() != _InputArray::MAT)
return false;
Mat prevImgMat = _prevImg.getMat(), nextImgMat = _nextImg.getMat();
if(prevImgMat.type() != CV_8UC1 || nextImgMat.type() != CV_8UC1)
return false;
if (ovx::skipSmallImages<VX_KERNEL_OPTICAL_FLOW_PYR_LK>(prevImgMat.cols, prevImgMat.rows))
return false;
CV_Assert(prevImgMat.size() == nextImgMat.size());
Mat prevPtsMat = _prevPts.getMat();
int checkPrev = prevPtsMat.checkVector(2, CV_32F, false);
CV_Assert( checkPrev >= 0 );
size_t npoints = checkPrev;
if( !(flags & OPTFLOW_USE_INITIAL_FLOW) )
_nextPts.create(prevPtsMat.size(), prevPtsMat.type(), -1, true);
Mat nextPtsMat = _nextPts.getMat();
CV_Assert( nextPtsMat.checkVector(2, CV_32F, false) == (int)npoints );
_status.create((int)npoints, 1, CV_8U, -1, true);
Mat statusMat = _status.getMat();
uchar* status = statusMat.ptr();
for(size_t i = 0; i < npoints; i++ )
status[i] = true;
// OpenVX doesn't return detection errors
if( _err.needed() )
{
return false;
}
try
{
Context context = ovx::getOpenVXContext();
if(context.vendorID() == VX_ID_KHRONOS)
{
// PyrLK in OVX 1.0.1 performs vxCommitImagePatch incorrecty and crashes
if(VX_VERSION == VX_VERSION_1_0)
return false;
// Implementation ignores border mode
// So check that minimal size of image in pyramid is big enough
int width = prevImgMat.cols, height = prevImgMat.rows;
for(int i = 0; i < maxLevel+1; i++)
{
if(width < winSize.width + 1 || height < winSize.height + 1)
return false;
else
{
width /= 2; height /= 2;
}
}
}
Image prevImg = Image::createFromHandle(context, Image::matTypeToFormat(prevImgMat.type()),
Image::createAddressing(prevImgMat), (void*)prevImgMat.data);
Image nextImg = Image::createFromHandle(context, Image::matTypeToFormat(nextImgMat.type()),
Image::createAddressing(nextImgMat), (void*)nextImgMat.data);
Graph graph = Graph::create(context);
Pyramid prevPyr = Pyramid::createVirtual(graph, (vx_size)maxLevel+1, VX_SCALE_PYRAMID_HALF,
prevImg.width(), prevImg.height(), prevImg.format());
Pyramid nextPyr = Pyramid::createVirtual(graph, (vx_size)maxLevel+1, VX_SCALE_PYRAMID_HALF,
nextImg.width(), nextImg.height(), nextImg.format());
ivx::Node::create(graph, VX_KERNEL_GAUSSIAN_PYRAMID, prevImg, prevPyr);
ivx::Node::create(graph, VX_KERNEL_GAUSSIAN_PYRAMID, nextImg, nextPyr);
Array prevPts = Array::create(context, VX_TYPE_KEYPOINT, npoints);
Array estimatedPts = Array::create(context, VX_TYPE_KEYPOINT, npoints);
Array nextPts = Array::create(context, VX_TYPE_KEYPOINT, npoints);
std::vector<vx_keypoint_t> vxPrevPts(npoints), vxEstPts(npoints), vxNextPts(npoints);
for(size_t i = 0; i < npoints; i++)
{
vx_keypoint_t& prevPt = vxPrevPts[i]; vx_keypoint_t& estPt = vxEstPts[i];
prevPt.x = prevPtsMat.at<Point2f>(i).x; prevPt.y = prevPtsMat.at<Point2f>(i).y;
estPt.x = nextPtsMat.at<Point2f>(i).x; estPt.y = nextPtsMat.at<Point2f>(i).y;
prevPt.tracking_status = estPt.tracking_status = vx_true_e;
}
prevPts.addItems(vxPrevPts); estimatedPts.addItems(vxEstPts);
if( (criteria.type & TermCriteria::COUNT) == 0 )
criteria.maxCount = 30;
else
criteria.maxCount = std::min(std::max(criteria.maxCount, 0), 100);
if( (criteria.type & TermCriteria::EPS) == 0 )
criteria.epsilon = 0.01;
else
criteria.epsilon = std::min(std::max(criteria.epsilon, 0.), 10.);
criteria.epsilon *= criteria.epsilon;
vx_enum termEnum = (criteria.type == TermCriteria::COUNT) ? VX_TERM_CRITERIA_ITERATIONS :
(criteria.type == TermCriteria::EPS) ? VX_TERM_CRITERIA_EPSILON :
VX_TERM_CRITERIA_BOTH;
//minEigThreshold is fixed to 0.0001f
ivx::Scalar termination = ivx::Scalar::create<VX_TYPE_ENUM>(context, termEnum);
ivx::Scalar epsilon = ivx::Scalar::create<VX_TYPE_FLOAT32>(context, criteria.epsilon);
ivx::Scalar numIterations = ivx::Scalar::create<VX_TYPE_UINT32>(context, criteria.maxCount);
ivx::Scalar useInitial = ivx::Scalar::create<VX_TYPE_BOOL>(context, (vx_bool)(flags & OPTFLOW_USE_INITIAL_FLOW));
//assume winSize is square
ivx::Scalar windowSize = ivx::Scalar::create<VX_TYPE_SIZE>(context, (vx_size)winSize.width);
ivx::Node::create(graph, VX_KERNEL_OPTICAL_FLOW_PYR_LK, prevPyr, nextPyr, prevPts, estimatedPts,
nextPts, termination, epsilon, numIterations, useInitial, windowSize);
graph.verify();
graph.process();
nextPts.copyTo(vxNextPts);
for(size_t i = 0; i < npoints; i++)
{
vx_keypoint_t kp = vxNextPts[i];
nextPtsMat.at<Point2f>(i) = Point2f(kp.x, kp.y);
statusMat.at<uchar>(i) = (bool)kp.tracking_status;
}
#ifdef VX_VERSION_1_1
//we should take user memory back before release
//(it's not done automatically according to standard)
prevImg.swapHandle(); nextImg.swapHandle();
#endif
}
catch (const RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
#endif
};
@ -1247,10 +1097,6 @@ void SparsePyrLKOpticalFlowImpl::calc( InputArray _prevImg, InputArray _nextImg,
ocl::Image2D::isFormatSupported(CV_32F, 1, false),
ocl_calcOpticalFlowPyrLK(_prevImg, _nextImg, _prevPts, _nextPts, _status, _err))
// Disabled due to bad accuracy
CV_OVX_RUN(false,
openvx_pyrlk(_prevImg, _nextImg, _prevPts, _nextPts, _status, _err))
Mat prevPtsMat = _prevPts.getMat();
const int derivDepth = DataType<cv::detail::deriv_type>::depth;

@ -103,7 +103,6 @@ class Builder:
"-DWITH_WEBP=OFF",
"-DWITH_OPENEXR=OFF",
"-DWITH_OPENGL=OFF",
"-DWITH_OPENVX=OFF",
"-DWITH_OPENNI=OFF",
"-DWITH_OPENNI2=OFF",
"-DWITH_PNG=OFF",

@ -33,9 +33,6 @@ endif()
if((NOT ANDROID) AND HAVE_OPENGL)
add_subdirectory(opengl)
endif()
if(HAVE_OPENVX)
add_subdirectory(openvx)
endif()
if(UNIX AND NOT ANDROID AND HAVE_VA)
add_subdirectory(va_intel)
endif()
@ -131,7 +128,6 @@ add_subdirectory(dnn)
add_subdirectory(opencl)
add_subdirectory(sycl)
# add_subdirectory(opengl)
# add_subdirectory(openvx)
add_subdirectory(tapi)
# add_subdirectory(va_intel)

@ -1,25 +0,0 @@
ocv_install_example_src(cpp *.cpp *.hpp CMakeLists.txt)
cmake_minimum_required(VERSION 2.8.12.2)
set(OPENCV_OPENVX_SAMPLE_REQUIRED_DEPS
opencv_core
opencv_imgproc
opencv_imgcodecs
opencv_videoio
opencv_highgui)
ocv_check_dependencies(${OPENCV_OPENVX_SAMPLE_REQUIRED_DEPS})
if(NOT BUILD_EXAMPLES OR NOT OCV_DEPENDENCIES_FOUND)
return()
endif()
project(openvx_samples)
ocv_include_modules_recurse(${OPENCV_OPENVX_SAMPLE_REQUIRED_DEPS})
add_definitions(-DIVX_USE_OPENCV)
add_definitions(-DIVX_HIDE_INFO_WARNINGS)
file(GLOB_RECURSE cpp_samples RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cpp)
foreach(sample_filename ${cpp_samples})
ocv_define_sample(tgt ${sample_filename} openvx)
ocv_target_link_libraries(${tgt} PRIVATE ${OPENCV_LINKER_LIBS} ${OPENCV_OPENVX_SAMPLE_REQUIRED_DEPS})
endforeach()

@ -1,385 +0,0 @@
#include <iostream>
#include <stdexcept>
//OpenVX includes
#include <VX/vx.h>
//OpenCV includes
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#ifndef VX_VERSION_1_1
const vx_enum VX_IMAGE_FORMAT = VX_IMAGE_ATTRIBUTE_FORMAT;
const vx_enum VX_IMAGE_WIDTH = VX_IMAGE_ATTRIBUTE_WIDTH;
const vx_enum VX_IMAGE_HEIGHT = VX_IMAGE_ATTRIBUTE_HEIGHT;
const vx_enum VX_MEMORY_TYPE_HOST = VX_IMPORT_TYPE_HOST;
const vx_enum VX_MEMORY_TYPE_NONE = VX_IMPORT_TYPE_NONE;
const vx_enum VX_THRESHOLD_THRESHOLD_VALUE = VX_THRESHOLD_ATTRIBUTE_THRESHOLD_VALUE;
const vx_enum VX_THRESHOLD_THRESHOLD_LOWER = VX_THRESHOLD_ATTRIBUTE_THRESHOLD_LOWER;
const vx_enum VX_THRESHOLD_THRESHOLD_UPPER = VX_THRESHOLD_ATTRIBUTE_THRESHOLD_UPPER;
typedef uintptr_t vx_map_id;
#endif
enum UserMemoryMode
{
COPY, USER_MEM
};
vx_image convertCvMatToVxImage(vx_context context, cv::Mat image, bool toCopy);
cv::Mat copyVxImageToCvMat(vx_image ovxImage);
void swapVxImage(vx_image ovxImage);
vx_status createProcessingGraph(vx_image inputImage, vx_image outputImage, vx_graph& graph);
int ovxDemo(std::string inputPath, UserMemoryMode mode);
vx_image convertCvMatToVxImage(vx_context context, cv::Mat image, bool toCopy)
{
if (!(!image.empty() && image.dims <= 2 && image.channels() == 1))
throw std::runtime_error("Invalid format");
vx_uint32 width = image.cols;
vx_uint32 height = image.rows;
vx_df_image color;
switch (image.depth())
{
case CV_8U:
color = VX_DF_IMAGE_U8;
break;
case CV_16U:
color = VX_DF_IMAGE_U16;
break;
case CV_16S:
color = VX_DF_IMAGE_S16;
break;
case CV_32S:
color = VX_DF_IMAGE_S32;
break;
default:
throw std::runtime_error("Invalid format");
break;
}
vx_imagepatch_addressing_t addr;
addr.dim_x = width;
addr.dim_y = height;
addr.stride_x = (vx_uint32)image.elemSize();
addr.stride_y = (vx_uint32)image.step.p[0];
vx_uint8* ovxData = image.data;
vx_image ovxImage;
if (toCopy)
{
ovxImage = vxCreateImage(context, width, height, color);
if (vxGetStatus((vx_reference)ovxImage) != VX_SUCCESS)
throw std::runtime_error("Failed to create image");
vx_rectangle_t rect;
vx_status status = vxGetValidRegionImage(ovxImage, &rect);
if (status != VX_SUCCESS)
throw std::runtime_error("Failed to get valid region");
#ifdef VX_VERSION_1_1
status = vxCopyImagePatch(ovxImage, &rect, 0, &addr, ovxData, VX_WRITE_ONLY, VX_MEMORY_TYPE_HOST);
if (status != VX_SUCCESS)
throw std::runtime_error("Failed to copy image patch");
#else
status = vxAccessImagePatch(ovxImage, &rect, 0, &addr, (void**)&ovxData, VX_WRITE_ONLY);
if (status != VX_SUCCESS)
throw std::runtime_error("Failed to access image patch");
status = vxCommitImagePatch(ovxImage, &rect, 0, &addr, ovxData);
if (status != VX_SUCCESS)
throw std::runtime_error("Failed to commit image patch");
#endif
}
else
{
ovxImage = vxCreateImageFromHandle(context, color, &addr, (void**)&ovxData, VX_MEMORY_TYPE_HOST);
if (vxGetStatus((vx_reference)ovxImage) != VX_SUCCESS)
throw std::runtime_error("Failed to create image from handle");
}
return ovxImage;
}
cv::Mat copyVxImageToCvMat(vx_image ovxImage)
{
vx_status status;
vx_df_image df_image = 0;
vx_uint32 width, height;
status = vxQueryImage(ovxImage, VX_IMAGE_FORMAT, &df_image, sizeof(vx_df_image));
if (status != VX_SUCCESS)
throw std::runtime_error("Failed to query image");
status = vxQueryImage(ovxImage, VX_IMAGE_WIDTH, &width, sizeof(vx_uint32));
if (status != VX_SUCCESS)
throw std::runtime_error("Failed to query image");
status = vxQueryImage(ovxImage, VX_IMAGE_HEIGHT, &height, sizeof(vx_uint32));
if (status != VX_SUCCESS)
throw std::runtime_error("Failed to query image");
if (!(width > 0 && height > 0)) throw std::runtime_error("Invalid format");
int depth;
switch (df_image)
{
case VX_DF_IMAGE_U8:
depth = CV_8U;
break;
case VX_DF_IMAGE_U16:
depth = CV_16U;
break;
case VX_DF_IMAGE_S16:
depth = CV_16S;
break;
case VX_DF_IMAGE_S32:
depth = CV_32S;
break;
default:
throw std::runtime_error("Invalid format");
break;
}
cv::Mat image(height, width, CV_MAKE_TYPE(depth, 1));
vx_rectangle_t rect;
rect.start_x = rect.start_y = 0;
rect.end_x = width; rect.end_y = height;
vx_imagepatch_addressing_t addr;
addr.dim_x = width;
addr.dim_y = height;
addr.stride_x = (vx_uint32)image.elemSize();
addr.stride_y = (vx_uint32)image.step.p[0];
vx_uint8* matData = image.data;
#ifdef VX_VERSION_1_1
status = vxCopyImagePatch(ovxImage, &rect, 0, &addr, matData, VX_READ_ONLY, VX_MEMORY_TYPE_HOST);
if (status != VX_SUCCESS)
throw std::runtime_error("Failed to copy image patch");
#else
status = vxAccessImagePatch(ovxImage, &rect, 0, &addr, (void**)&matData, VX_READ_ONLY);
if (status != VX_SUCCESS)
throw std::runtime_error("Failed to access image patch");
status = vxCommitImagePatch(ovxImage, &rect, 0, &addr, matData);
if (status != VX_SUCCESS)
throw std::runtime_error("Failed to commit image patch");
#endif
return image;
}
void swapVxImage(vx_image ovxImage)
{
#ifdef VX_VERSION_1_1
vx_status status;
vx_memory_type_e memType;
status = vxQueryImage(ovxImage, VX_IMAGE_MEMORY_TYPE, &memType, sizeof(vx_memory_type_e));
if (status != VX_SUCCESS)
throw std::runtime_error("Failed to query image");
if (memType == VX_MEMORY_TYPE_NONE)
{
//was created by copying user data
throw std::runtime_error("Image wasn't created from user handle");
}
else
{
//was created from user handle
status = vxSwapImageHandle(ovxImage, NULL, NULL, 0);
if (status != VX_SUCCESS)
throw std::runtime_error("Failed to swap image handle");
}
#else
//not supported until OpenVX 1.1
(void) ovxImage;
#endif
}
vx_status createProcessingGraph(vx_image inputImage, vx_image outputImage, vx_graph& graph)
{
vx_status status;
vx_context context = vxGetContext((vx_reference)inputImage);
status = vxGetStatus((vx_reference)context);
if(status != VX_SUCCESS) return status;
graph = vxCreateGraph(context);
status = vxGetStatus((vx_reference)graph);
if (status != VX_SUCCESS) return status;
vx_uint32 width, height;
status = vxQueryImage(inputImage, VX_IMAGE_WIDTH, &width, sizeof(vx_uint32));
if (status != VX_SUCCESS) return status;
status = vxQueryImage(inputImage, VX_IMAGE_HEIGHT, &height, sizeof(vx_uint32));
if (status != VX_SUCCESS) return status;
// Intermediate images
vx_image
smoothed = vxCreateVirtualImage(graph, 0, 0, VX_DF_IMAGE_VIRT),
cannied = vxCreateVirtualImage(graph, 0, 0, VX_DF_IMAGE_VIRT),
halfImg = vxCreateImage(context, width, height, VX_DF_IMAGE_U8),
halfCanny = vxCreateImage(context, width, height, VX_DF_IMAGE_U8);
vx_image virtualImages[] = {smoothed, cannied, halfImg, halfCanny};
for(size_t i = 0; i < sizeof(virtualImages)/sizeof(vx_image); i++)
{
status = vxGetStatus((vx_reference)virtualImages[i]);
if (status != VX_SUCCESS) return status;
}
// Constants
vx_uint32 threshValue = 50;
vx_threshold thresh = vxCreateThreshold(context, VX_THRESHOLD_TYPE_BINARY, VX_TYPE_UINT8);
vxSetThresholdAttribute(thresh, VX_THRESHOLD_THRESHOLD_VALUE,
&threshValue, sizeof(threshValue));
vx_uint32 threshCannyMin = 127;
vx_uint32 threshCannyMax = 192;
vx_threshold threshCanny = vxCreateThreshold(context, VX_THRESHOLD_TYPE_RANGE, VX_TYPE_UINT8);
vxSetThresholdAttribute(threshCanny, VX_THRESHOLD_THRESHOLD_LOWER, &threshCannyMin,
sizeof(threshCannyMin));
vxSetThresholdAttribute(threshCanny, VX_THRESHOLD_THRESHOLD_UPPER, &threshCannyMax,
sizeof(threshCannyMax));
vx_float32 alphaValue = 0.5;
vx_scalar alpha = vxCreateScalar(context, VX_TYPE_FLOAT32, &alphaValue);
// Sequence of meaningless image operations
vx_node nodes[] = {
vxGaussian3x3Node(graph, inputImage, smoothed),
vxCannyEdgeDetectorNode(graph, smoothed, threshCanny, 3, VX_NORM_L2, cannied),
vxAccumulateWeightedImageNode(graph, inputImage, alpha, halfImg),
vxAccumulateWeightedImageNode(graph, cannied, alpha, halfCanny),
vxAddNode(graph, halfImg, halfCanny, VX_CONVERT_POLICY_SATURATE, outputImage)
};
for (size_t i = 0; i < sizeof(nodes) / sizeof(vx_node); i++)
{
status = vxGetStatus((vx_reference)nodes[i]);
if (status != VX_SUCCESS) return status;
}
status = vxVerifyGraph(graph);
return status;
}
int ovxDemo(std::string inputPath, UserMemoryMode mode)
{
cv::Mat image = cv::imread(inputPath, cv::IMREAD_GRAYSCALE);
if (image.empty()) return -1;
//check image format
if (image.depth() != CV_8U || image.channels() != 1) return -1;
vx_status status;
vx_context context = vxCreateContext();
status = vxGetStatus((vx_reference)context);
if (status != VX_SUCCESS) return status;
//put user data from cv::Mat to vx_image
vx_image ovxImage;
ovxImage = convertCvMatToVxImage(context, image, mode == COPY);
vx_uint32 width = image.cols, height = image.rows;
vx_image ovxResult;
cv::Mat output;
if (mode == COPY)
{
//we will copy data from vx_image to cv::Mat
ovxResult = vxCreateImage(context, width, height, VX_DF_IMAGE_U8);
if (vxGetStatus((vx_reference)ovxResult) != VX_SUCCESS)
throw std::runtime_error("Failed to create image");
}
else
{
//create vx_image based on user data, no copying required
output = cv::Mat(height, width, CV_8U, cv::Scalar(0));
ovxResult = convertCvMatToVxImage(context, output, false);
}
vx_graph graph;
status = createProcessingGraph(ovxImage, ovxResult, graph);
if (status != VX_SUCCESS) return status;
// Graph execution
status = vxProcessGraph(graph);
if (status != VX_SUCCESS) return status;
//getting resulting image in cv::Mat
if (mode == COPY)
{
output = copyVxImageToCvMat(ovxResult);
}
else
{
//we should take user memory back from vx_image before using it (even before reading)
swapVxImage(ovxResult);
}
//here output goes
cv::imshow("processing result", output);
cv::waitKey(0);
//we need to take user memory back before releasing the image
if (mode == USER_MEM)
swapVxImage(ovxImage);
cv::destroyAllWindows();
status = vxReleaseContext(&context);
return status;
}
int main(int argc, char *argv[])
{
const std::string keys =
"{help h usage ? | | }"
"{image | <none> | image to be processed}"
"{mode | copy | user memory interaction mode: \n"
"copy: create VX images and copy data to/from them\n"
"user_mem: use handles to user-allocated memory}"
;
cv::CommandLineParser parser(argc, argv, keys);
parser.about("OpenVX interoperability sample demonstrating standard OpenVX API."
"The application loads an image, processes it with OpenVX graph and outputs result in a window");
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
std::string imgPath = parser.get<std::string>("image");
std::string modeString = parser.get<std::string>("mode");
UserMemoryMode mode;
if(modeString == "copy")
{
mode = COPY;
}
else if(modeString == "user_mem")
{
mode = USER_MEM;
}
else if(modeString == "map")
{
std::cerr << modeString << " is not implemented in this sample" << std::endl;
return -1;
}
else
{
std::cerr << modeString << ": unknown memory mode" << std::endl;
return -1;
}
if (!parser.check())
{
parser.printErrors();
return -1;
}
return ovxDemo(imgPath, mode);
}

@ -1,214 +0,0 @@
#include <iostream>
#include <stdexcept>
//wrappers
#include "ivx.hpp"
//OpenCV includes
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
enum UserMemoryMode
{
COPY, USER_MEM, MAP
};
ivx::Graph createProcessingGraph(ivx::Image& inputImage, ivx::Image& outputImage);
int ovxDemo(std::string inputPath, UserMemoryMode mode);
ivx::Graph createProcessingGraph(ivx::Image& inputImage, ivx::Image& outputImage)
{
using namespace ivx;
Context context = inputImage.get<Context>();
Graph graph = Graph::create(context);
vx_uint32 width = inputImage.width();
vx_uint32 height = inputImage.height();
// Intermediate images
Image
smoothed = Image::createVirtual(graph),
cannied = Image::createVirtual(graph),
halfImg = Image::create(context, width, height, VX_DF_IMAGE_U8),
halfCanny = Image::create(context, width, height, VX_DF_IMAGE_U8);
// Constants
vx_uint32 threshCannyMin = 127;
vx_uint32 threshCannyMax = 192;
Threshold threshCanny = Threshold::createRange(context, VX_TYPE_UINT8, threshCannyMin, threshCannyMax);
ivx::Scalar alpha = ivx::Scalar::create<VX_TYPE_FLOAT32>(context, 0.5);
// Sequence of some image operations
// Node can also be added in function-like style
nodes::gaussian3x3(graph, inputImage, smoothed);
Node::create(graph, VX_KERNEL_CANNY_EDGE_DETECTOR, smoothed, threshCanny,
ivx::Scalar::create<VX_TYPE_INT32>(context, 3),
ivx::Scalar::create<VX_TYPE_ENUM>(context, VX_NORM_L2), cannied);
Node::create(graph, VX_KERNEL_ACCUMULATE_WEIGHTED, inputImage, alpha, halfImg);
Node::create(graph, VX_KERNEL_ACCUMULATE_WEIGHTED, cannied, alpha, halfCanny);
Node::create(graph, VX_KERNEL_ADD, halfImg, halfCanny,
ivx::Scalar::create<VX_TYPE_ENUM>(context, VX_CONVERT_POLICY_SATURATE), outputImage);
graph.verify();
return graph;
}
int ovxDemo(std::string inputPath, UserMemoryMode mode)
{
using namespace cv;
using namespace ivx;
Mat image = imread(inputPath, IMREAD_GRAYSCALE);
if (image.empty()) return -1;
//check image format
if (image.depth() != CV_8U || image.channels() != 1) return -1;
try
{
Context context = Context::create();
//put user data from cv::Mat to vx_image
vx_df_image color = Image::matTypeToFormat(image.type());
vx_uint32 width = image.cols, height = image.rows;
Image ivxImage;
if (mode == COPY)
{
ivxImage = Image::create(context, width, height, color);
ivxImage.copyFrom(0, image);
}
else
{
ivxImage = Image::createFromHandle(context, color, Image::createAddressing(image), image.data);
}
Image ivxResult;
Image::Patch resultPatch;
Mat output;
if (mode == COPY || mode == MAP)
{
//we will copy or map data from vx_image to cv::Mat
ivxResult = ivx::Image::create(context, width, height, VX_DF_IMAGE_U8);
}
else // if (mode == MAP_TO_VX)
{
//create vx_image based on user data, no copying required
output = cv::Mat(height, width, CV_8U, cv::Scalar(0));
ivxResult = Image::createFromHandle(context, Image::matTypeToFormat(CV_8U),
Image::createAddressing(output), output.data);
}
Graph graph = createProcessingGraph(ivxImage, ivxResult);
// Graph execution
graph.process();
//getting resulting image in cv::Mat
if (mode == COPY)
{
ivxResult.copyTo(0, output);
}
else if (mode == MAP)
{
//create cv::Mat based on vx_image mapped data
resultPatch.map(ivxResult, 0, ivxResult.getValidRegion());
//generally this is very bad idea!
//but in our case unmap() won't happen until output is in use
output = resultPatch.getMat();
}
else // if (mode == MAP_TO_VX)
{
#ifdef VX_VERSION_1_1
//we should take user memory back from vx_image before using it (even before reading)
ivxResult.swapHandle();
#endif
}
//here output goes
cv::imshow("processing result", output);
cv::waitKey(0);
cv::destroyAllWindows();
#ifdef VX_VERSION_1_1
if (mode != COPY)
{
//we should take user memory back before release
//(it's not done automatically according to standard)
ivxImage.swapHandle();
if (mode == USER_MEM) ivxResult.swapHandle();
}
#endif
//the line is unnecessary since unmapping is done on destruction of patch
//resultPatch.unmap();
}
catch (const ivx::RuntimeError& e)
{
std::cerr << "Error: code = " << e.status() << ", message = " << e.what() << std::endl;
return e.status();
}
catch (const ivx::WrapperError& e)
{
std::cerr << "Error: message = " << e.what() << std::endl;
return -1;
}
return 0;
}
int main(int argc, char *argv[])
{
const std::string keys =
"{help h usage ? | | }"
"{image | <none> | image to be processed}"
"{mode | copy | user memory interaction mode: \n"
"copy: create VX images and copy data to/from them\n"
"user_mem: use handles to user-allocated memory\n"
"map: map resulting VX image to user memory}"
;
cv::CommandLineParser parser(argc, argv, keys);
parser.about("OpenVX interoperability sample demonstrating OpenVX wrappers usage."
"The application loads an image, processes it with OpenVX graph and outputs result in a window");
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
std::string imgPath = parser.get<std::string>("image");
std::string modeString = parser.get<std::string>("mode");
UserMemoryMode mode;
if(modeString == "copy")
{
mode = COPY;
}
else if(modeString == "user_mem")
{
mode = USER_MEM;
}
else if(modeString == "map")
{
mode = MAP;
}
else
{
std::cerr << modeString << ": unknown memory mode" << std::endl;
return -1;
}
if (!parser.check())
{
parser.printErrors();
return -1;
}
return ovxDemo(imgPath, mode);
}

@ -1,250 +0,0 @@
#include <iostream>
#include <stdexcept>
//wrappers
#include "ivx.hpp"
//OpenCV includes
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
enum UserMemoryMode
{
COPY, USER_MEM, MAP
};
ivx::Graph createProcessingGraph(ivx::Image& inputImage, ivx::Image& outputImage);
int ovxDemo(std::string inputPath, UserMemoryMode mode);
ivx::Graph createProcessingGraph(ivx::Image& inputImage, ivx::Image& outputImage)
{
using namespace ivx;
Context context = inputImage.get<Context>();
Graph graph = Graph::create(context);
vx_uint32 width = inputImage.width();
vx_uint32 height = inputImage.height();
// Intermediate images
Image
yuv = Image::createVirtual(graph, 0, 0, VX_DF_IMAGE_YUV4),
gray = Image::createVirtual(graph),
smoothed = Image::createVirtual(graph),
cannied = Image::createVirtual(graph),
halfImg = Image::create(context, width, height, VX_DF_IMAGE_U8),
halfCanny = Image::create(context, width, height, VX_DF_IMAGE_U8);
// Constants
vx_uint32 threshCannyMin = 127;
vx_uint32 threshCannyMax = 192;
Threshold threshCanny = Threshold::createRange(context, VX_TYPE_UINT8, threshCannyMin, threshCannyMax);
ivx::Scalar alpha = ivx::Scalar::create<VX_TYPE_FLOAT32>(context, 0.5);
// Sequence of some image operations
Node::create(graph, VX_KERNEL_COLOR_CONVERT, inputImage, yuv);
Node::create(graph, VX_KERNEL_CHANNEL_EXTRACT, yuv,
ivx::Scalar::create<VX_TYPE_ENUM>(context, VX_CHANNEL_Y), gray);
//node can also be added in function-like style
nodes::gaussian3x3(graph, gray, smoothed);
Node::create(graph, VX_KERNEL_CANNY_EDGE_DETECTOR, smoothed, threshCanny,
ivx::Scalar::create<VX_TYPE_INT32>(context, 3),
ivx::Scalar::create<VX_TYPE_ENUM>(context, VX_NORM_L2), cannied);
Node::create(graph, VX_KERNEL_ACCUMULATE_WEIGHTED, gray, alpha, halfImg);
Node::create(graph, VX_KERNEL_ACCUMULATE_WEIGHTED, cannied, alpha, halfCanny);
Node::create(graph, VX_KERNEL_ADD, halfImg, halfCanny,
ivx::Scalar::create<VX_TYPE_ENUM>(context, VX_CONVERT_POLICY_SATURATE), outputImage);
graph.verify();
return graph;
}
int ovxDemo(std::string inputPath, UserMemoryMode mode)
{
using namespace cv;
using namespace ivx;
Mat frame;
VideoCapture vc(inputPath);
if (!vc.isOpened())
return -1;
vc >> frame;
if (frame.empty()) return -1;
//check frame format
if (frame.type() != CV_8UC3) return -1;
try
{
Context context = Context::create();
//put user data from cv::Mat to vx_image
vx_df_image color = Image::matTypeToFormat(frame.type());
vx_uint32 width = frame.cols, height = frame.rows;
Image ivxImage;
if (mode == COPY)
{
ivxImage = Image::create(context, width, height, color);
}
else
{
ivxImage = Image::createFromHandle(context, color, Image::createAddressing(frame), frame.data);
}
Image ivxResult;
Mat output;
if (mode == COPY || mode == MAP)
{
//we will copy or map data from vx_image to cv::Mat
ivxResult = ivx::Image::create(context, width, height, VX_DF_IMAGE_U8);
}
else // if (mode == MAP_TO_VX)
{
//create vx_image based on user data, no copying required
output = cv::Mat(height, width, CV_8U, cv::Scalar(0));
ivxResult = Image::createFromHandle(context, Image::matTypeToFormat(CV_8U),
Image::createAddressing(output), output.data);
}
Graph graph = createProcessingGraph(ivxImage, ivxResult);
bool stop = false;
while (!stop)
{
if (mode == COPY) ivxImage.copyFrom(0, frame);
// Graph execution
graph.process();
//getting resulting image in cv::Mat
Image::Patch resultPatch;
std::vector<void*> ptrs;
std::vector<void*> prevPtrs(ivxResult.planes());
if (mode == COPY)
{
ivxResult.copyTo(0, output);
}
else if (mode == MAP)
{
//create cv::Mat based on vx_image mapped data
resultPatch.map(ivxResult, 0, ivxResult.getValidRegion(), VX_READ_AND_WRITE);
//generally this is very bad idea!
//but in our case unmap() won't happen until output is in use
output = resultPatch.getMat();
}
else // if(mode == MAP_TO_VX)
{
#ifdef VX_VERSION_1_1
//we should take user memory back from vx_image before using it (even before reading)
ivxResult.swapHandle(ptrs, prevPtrs);
#endif
}
//here output goes
imshow("press q to quit", output);
if ((char)waitKey(1) == 'q') stop = true;
#ifdef VX_VERSION_1_1
//restore handle
if (mode == USER_MEM)
{
ivxResult.swapHandle(prevPtrs, ptrs);
}
#endif
//this line is unnecessary since unmapping is done on destruction of patch
//resultPatch.unmap();
//grab next frame
Mat temp = frame;
vc >> frame;
if (frame.empty()) stop = true;
if (mode != COPY && frame.data != temp.data)
{
//frame was reallocated, pointer to data changed
frame.copyTo(temp);
}
}
destroyAllWindows();
#ifdef VX_VERSION_1_1
if (mode != COPY)
{
//we should take user memory back before release
//(it's not done automatically according to standard)
ivxImage.swapHandle();
if (mode == USER_MEM) ivxResult.swapHandle();
}
#endif
}
catch (const ivx::RuntimeError& e)
{
std::cerr << "Error: code = " << e.status() << ", message = " << e.what() << std::endl;
return e.status();
}
catch (const ivx::WrapperError& e)
{
std::cerr << "Error: message = " << e.what() << std::endl;
return -1;
}
return 0;
}
int main(int argc, char *argv[])
{
const std::string keys =
"{help h usage ? | | }"
"{video | <none> | video file to be processed}"
"{mode | copy | user memory interaction mode: \n"
"copy: create VX images and copy data to/from them\n"
"user_mem: use handles to user-allocated memory\n"
"map: map resulting VX image to user memory}"
;
cv::CommandLineParser parser(argc, argv, keys);
parser.about("OpenVX interoperability sample demonstrating OpenVX wrappers usage."
"The application opens a video and processes it with OpenVX graph while outputting result in a window");
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
std::string videoPath = parser.get<std::string>("video");
std::string modeString = parser.get<std::string>("mode");
UserMemoryMode mode;
if(modeString == "copy")
{
mode = COPY;
}
else if(modeString == "user_mem")
{
mode = USER_MEM;
}
else if(modeString == "map")
{
mode = MAP;
}
else
{
std::cerr << modeString << ": unknown memory mode" << std::endl;
return -1;
}
if (!parser.check())
{
parser.printErrors();
return -1;
}
return ovxDemo(videoPath, mode);
}
Loading…
Cancel
Save