dnn: drop legacy Inference Engine NN builder API

pull/21591/head
Alexander Alekhin 3 years ago
parent a00a0dbfcd
commit effce0573b
  1. 106
      cmake/OpenCVDetectInferenceEngine.cmake
  2. 7
      modules/dnn/CMakeLists.txt
  3. 2
      modules/dnn/include/opencv2/dnn/dnn.hpp
  4. 8
      modules/dnn/include/opencv2/dnn/utils/inference_engine.hpp
  5. 565
      modules/dnn/src/dnn.cpp
  6. 17
      modules/dnn/src/layers/batch_norm_layer.cpp
  7. 32
      modules/dnn/src/layers/blank_layer.cpp
  8. 20
      modules/dnn/src/layers/concat_layer.cpp
  9. 17
      modules/dnn/src/layers/const_layer.cpp
  10. 170
      modules/dnn/src/layers/convolution_layer.cpp
  11. 26
      modules/dnn/src/layers/detection_output_layer.cpp
  12. 130
      modules/dnn/src/layers/elementwise_layers.cpp
  13. 35
      modules/dnn/src/layers/eltwise_layer.cpp
  14. 26
      modules/dnn/src/layers/flatten_layer.cpp
  15. 26
      modules/dnn/src/layers/fully_connected_layer.cpp
  16. 29
      modules/dnn/src/layers/lrn_layer.cpp
  17. 16
      modules/dnn/src/layers/mvn_layer.cpp
  18. 57
      modules/dnn/src/layers/normalize_bbox_layer.cpp
  19. 29
      modules/dnn/src/layers/padding_layer.cpp
  20. 19
      modules/dnn/src/layers/permute_layer.cpp
  21. 75
      modules/dnn/src/layers/pooling_layer.cpp
  22. 65
      modules/dnn/src/layers/prior_box_layer.cpp
  23. 36
      modules/dnn/src/layers/proposal_layer.cpp
  24. 15
      modules/dnn/src/layers/reorg_layer.cpp
  25. 16
      modules/dnn/src/layers/reshape_layer.cpp
  26. 33
      modules/dnn/src/layers/resize_layer.cpp
  27. 37
      modules/dnn/src/layers/scale_layer.cpp
  28. 63
      modules/dnn/src/layers/slice_layer.cpp
  29. 19
      modules/dnn/src/layers/softmax_layer.cpp
  30. 1011
      modules/dnn/src/op_inf_engine.cpp
  31. 192
      modules/dnn/src/op_inf_engine.hpp
  32. 11
      modules/dnn/test/test_common.impl.hpp
  33. 13
      modules/dnn/test/test_ie_models.cpp
  34. 21
      modules/dnn/test/test_layers.cpp
  35. 49
      modules/dnn/test/test_misc.cpp

@ -23,80 +23,6 @@ endif()
# ======================
macro(ocv_ie_find_extra_libraries find_prefix find_suffix)
file(GLOB libraries "${INF_ENGINE_LIB_DIRS}/${find_prefix}inference_engine*${find_suffix}")
foreach(full_path IN LISTS libraries)
get_filename_component(library "${full_path}" NAME_WE)
string(REPLACE "${find_prefix}" "" library "${library}")
if(library STREQUAL "inference_engine" OR library STREQUAL "inference_engined")
# skip
else()
add_library(${library} UNKNOWN IMPORTED)
set_target_properties(${library} PROPERTIES
IMPORTED_LOCATION "${full_path}")
list(APPEND custom_libraries ${library})
endif()
endforeach()
endmacro()
function(add_custom_ie_build _inc _lib _lib_rel _lib_dbg _msg)
if(NOT _inc OR NOT (_lib OR _lib_rel OR _lib_dbg))
return()
endif()
if(NOT _lib)
if(_lib_rel)
set(_lib "${_lib_rel}")
else()
set(_lib "${_lib_dbg}")
endif()
endif()
add_library(inference_engine UNKNOWN IMPORTED)
set_target_properties(inference_engine PROPERTIES
IMPORTED_LOCATION "${_lib}"
IMPORTED_IMPLIB_RELEASE "${_lib_rel}"
IMPORTED_IMPLIB_DEBUG "${_lib_dbg}"
INTERFACE_INCLUDE_DIRECTORIES "${_inc}"
)
set(custom_libraries "")
set(__prefixes "${CMAKE_FIND_LIBRARY_PREFIXES}")
if(NOT __prefixes)
set(__prefixes "_empty_")
endif()
foreach(find_prefix ${__prefixes})
if(find_prefix STREQUAL "_empty_") # foreach doesn't iterate over empty elements
set(find_prefix "")
endif()
if(NOT DEFINED INFERENCE_ENGINE_FIND_LIBRARY_SUFFIXES) # allow custom override
set(INFERENCE_ENGINE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
if(APPLE)
ocv_list_filterout(INFERENCE_ENGINE_FIND_LIBRARY_SUFFIXES "^.so$") # skip plugins (can't be linked)
endif()
endif()
foreach(find_suffix ${INFERENCE_ENGINE_FIND_LIBRARY_SUFFIXES})
ocv_ie_find_extra_libraries("${find_prefix}" "${find_suffix}")
endforeach()
if(NOT CMAKE_FIND_LIBRARY_SUFFIXES)
ocv_ie_find_extra_libraries("${find_prefix}" "")
endif()
endforeach()
if(NOT INF_ENGINE_RELEASE VERSION_GREATER "2018050000")
find_library(INF_ENGINE_OMP_LIBRARY iomp5 PATHS "${INF_ENGINE_OMP_DIR}" NO_DEFAULT_PATH)
if(NOT INF_ENGINE_OMP_LIBRARY)
message(WARNING "OpenMP for IE have not been found. Set INF_ENGINE_OMP_DIR variable if you experience build errors.")
endif()
endif()
if(EXISTS "${INF_ENGINE_OMP_LIBRARY}")
set_target_properties(inference_engine PROPERTIES IMPORTED_LINK_INTERFACE_LIBRARIES "${INF_ENGINE_OMP_LIBRARY}")
endif()
set(INF_ENGINE_VERSION "Unknown" CACHE STRING "")
set(INF_ENGINE_TARGET "inference_engine;${custom_libraries}" PARENT_SCOPE)
message(STATUS "Detected InferenceEngine: ${_msg}")
endfunction()
# ======================
find_package(InferenceEngine QUIET)
if(InferenceEngine_FOUND)
set(INF_ENGINE_TARGET ${InferenceEngine_LIBRARIES})
@ -118,38 +44,6 @@ elseif(DEFINED INF_ENGINE_RELEASE)
endif()
set(INF_ENGINE_RELEASE "${INF_ENGINE_RELEASE_INIT}" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2020.1.0.2 -> 2020010002)")
if(NOT INF_ENGINE_TARGET AND INF_ENGINE_LIB_DIRS AND INF_ENGINE_INCLUDE_DIRS)
find_path(ie_custom_inc "inference_engine.hpp" PATHS "${INF_ENGINE_INCLUDE_DIRS}" NO_DEFAULT_PATH)
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
find_library(ie_custom_lib_dbg "inference_engined" PATHS "${INF_ENGINE_LIB_DIRS}" NO_DEFAULT_PATH) # Win32 and MacOSX
endif()
find_library(ie_custom_lib "inference_engine" PATHS "${INF_ENGINE_LIB_DIRS}" NO_DEFAULT_PATH)
find_library(ie_custom_lib_rel "inference_engine" PATHS "${INF_ENGINE_LIB_DIRS}/Release" NO_DEFAULT_PATH)
find_library(ie_custom_lib_dbg "inference_engine" PATHS "${INF_ENGINE_LIB_DIRS}/Debug" NO_DEFAULT_PATH)
add_custom_ie_build("${ie_custom_inc}" "${ie_custom_lib}" "${ie_custom_lib_rel}" "${ie_custom_lib_dbg}" "INF_ENGINE_{INCLUDE,LIB}_DIRS")
endif()
set(_loc "$ENV{INTEL_OPENVINO_DIR}")
if(NOT _loc AND DEFINED ENV{INTEL_CVSDK_DIR})
set(_loc "$ENV{INTEL_CVSDK_DIR}") # OpenVINO 2018.x
endif()
if(NOT INF_ENGINE_TARGET AND _loc)
if(NOT INF_ENGINE_RELEASE VERSION_GREATER "2018050000")
set(INF_ENGINE_PLATFORM_DEFAULT "ubuntu_16.04")
else()
set(INF_ENGINE_PLATFORM_DEFAULT "")
endif()
set(INF_ENGINE_PLATFORM "${INF_ENGINE_PLATFORM_DEFAULT}" CACHE STRING "InferenceEngine platform (library dir)")
find_path(ie_custom_env_inc "inference_engine.hpp" PATHS "${_loc}/deployment_tools/inference_engine/include" NO_DEFAULT_PATH)
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
find_library(ie_custom_env_lib_dbg "inference_engined" PATHS "${_loc}/deployment_tools/inference_engine/lib/${INF_ENGINE_PLATFORM}/intel64" NO_DEFAULT_PATH)
endif()
find_library(ie_custom_env_lib "inference_engine" PATHS "${_loc}/deployment_tools/inference_engine/lib/${INF_ENGINE_PLATFORM}/intel64" NO_DEFAULT_PATH)
find_library(ie_custom_env_lib_rel "inference_engine" PATHS "${_loc}/deployment_tools/inference_engine/lib/intel64/Release" NO_DEFAULT_PATH)
find_library(ie_custom_env_lib_dbg "inference_engine" PATHS "${_loc}/deployment_tools/inference_engine/lib/intel64/Debug" NO_DEFAULT_PATH)
add_custom_ie_build("${ie_custom_env_inc}" "${ie_custom_env_lib}" "${ie_custom_env_lib_rel}" "${ie_custom_env_lib_dbg}" "OpenVINO (${_loc})")
endif()
set(tgts)
set(defs)

@ -115,7 +115,12 @@ elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
endif()
set(dnn_runtime_libs "")
if(TARGET ocv.3rdparty.openvino)
ocv_option(OPENCV_DNN_OPENVINO "Build with OpenVINO support (2021.4+)" (TARGET ocv.3rdparty.openvino))
if(TARGET ocv.3rdparty.openvino AND OPENCV_DNN_OPENVINO)
if(NOT HAVE_OPENVINO AND NOT HAVE_NGRAPH)
message(FATAL_ERROR "DNN: Inference Engine is not supported without enabled 'nGraph'. Check build configuration.")
endif()
list(APPEND dnn_runtime_libs ocv.3rdparty.openvino)
endif()

@ -284,8 +284,6 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
*/
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs);
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs);
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs, const std::vector<Ptr<BackendNode> >& nodes);
/**

@ -15,14 +15,18 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
/* Values for 'OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE' parameter */
/// @deprecated
#define CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API "NN_BUILDER"
/// @deprecated
#define CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH "NGRAPH"
/** @brief Returns Inference Engine internal backend API.
*
* See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros.
*
* Default value is controlled through `OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE` runtime parameter (environment variable).
* `OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE` runtime parameter (environment variable) is ignored since 4.6.0.
*
* @deprecated
*/
CV_EXPORTS_W cv::String getInferenceEngineBackendType();
@ -31,6 +35,8 @@ CV_EXPORTS_W cv::String getInferenceEngineBackendType();
* See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros.
*
* @returns previous value of internal backend API
*
* @deprecated
*/
CV_EXPORTS_W cv::String setInferenceEngineBackendType(const cv::String& newBackendType);

@ -164,40 +164,24 @@ private:
#ifdef HAVE_INF_ENGINE
if (checkIETarget(DNN_TARGET_CPU)) {
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_CPU));
#endif
#ifdef HAVE_DNN_NGRAPH
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_CPU));
#endif
}
if (checkIETarget(DNN_TARGET_MYRIAD)) {
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_MYRIAD));
#endif
#ifdef HAVE_DNN_NGRAPH
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_MYRIAD));
#endif
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
if (checkIETarget(DNN_TARGET_FPGA))
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_FPGA));
#endif
#ifdef HAVE_OPENCL
if (cv::ocl::useOpenCL() && ocl::Device::getDefault().isIntel())
{
if (checkIETarget(DNN_TARGET_OPENCL)) {
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_OPENCL));
#endif
#ifdef HAVE_DNN_NGRAPH
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_OPENCL));
#endif
}
if (checkIETarget(DNN_TARGET_OPENCL_FP16)) {
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_OPENCL_FP16));
#endif
#ifdef HAVE_DNN_NGRAPH
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_OPENCL_FP16));
#endif
@ -232,7 +216,7 @@ std::vector<Target> getAvailableTargets(Backend be)
be = (Backend)PARAM_DNN_BACKEND_DEFAULT;
#ifdef HAVE_INF_ENGINE
if (be == DNN_BACKEND_INFERENCE_ENGINE)
be = getInferenceEngineBackendTypeParam();
be = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
#endif
std::vector<Target> result;
@ -588,8 +572,7 @@ struct DataLayer : public Layer
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && inputsData.size() == 1);
return backendId == DNN_BACKEND_OPENCV;
}
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
@ -780,39 +763,6 @@ struct DataLayer : public Layer
}
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
CV_CheckEQ(inputsData.size(), (size_t)1, "");
CV_CheckEQ(inputsData[0].dims, 4, "");
const size_t numChannels = inputsData[0].size[1];
CV_Assert(numChannels <= 4);
// Scale
InferenceEngine::TensorDesc td(InferenceEngine::Precision::FP32, {numChannels},
InferenceEngine::Layout::C);
auto weights = InferenceEngine::make_shared_blob<float>(td);
weights->allocate();
float* weight_buf = weights->buffer().as<float*>();
std::fill(weight_buf, weight_buf + numChannels, scaleFactors[0]);
// Mean subtraction
auto biases = InferenceEngine::make_shared_blob<float>(td);
biases->allocate();
float* bias_buf = biases->buffer().as<float*>();
for (int i = 0; i < numChannels; ++i)
{
bias_buf[i] = -means[0][i] * scaleFactors[0];
}
InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
addConstantData("weights", weights, ieLayer);
addConstantData("biases", biases, ieLayer);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
std::vector<String> outNames;
std::vector<MatShape> shapes;
@ -1070,18 +1020,14 @@ static Ptr<BackendWrapper> wrapMat(int backendId, int targetId, cv::Mat& m)
}
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
return Ptr<BackendWrapper>(new InfEngineBackendWrapper(targetId, m));
#else
CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support");
#endif
CV_ERROR_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019;
}
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
#ifdef HAVE_DNN_NGRAPH
return Ptr<BackendWrapper>(new NgraphBackendWrapper(targetId, m));
#else
CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of Inference Engine + nGraph");
CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of OpenVINO / Inference Engine + nGraph");
#endif
}
else
@ -1183,7 +1129,7 @@ struct Net::Impl : public detail::NetImplBase
}
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
return wrapMat(preferableBackend, preferableTarget, host);
CV_ERROR_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019;
}
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
@ -1285,7 +1231,7 @@ struct Net::Impl : public detail::NetImplBase
preferableBackend = (Backend)PARAM_DNN_BACKEND_DEFAULT;
#ifdef HAVE_INF_ENGINE
if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE)
preferableBackend = getInferenceEngineBackendTypeParam();
preferableBackend = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; // = getInferenceEngineBackendTypeParam();
#endif
CV_Assert(preferableBackend != DNN_BACKEND_OPENCV ||
@ -1296,8 +1242,7 @@ struct Net::Impl : public detail::NetImplBase
preferableTarget == DNN_TARGET_CPU ||
preferableTarget == DNN_TARGET_OPENCL);
#ifdef HAVE_INF_ENGINE
if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
CV_Assert(
(preferableTarget == DNN_TARGET_CPU && (!isArmComputePlugin() || preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)) ||
@ -1552,14 +1497,6 @@ struct Net::Impl : public detail::NetImplBase
CV_Assert(preferableTarget == DNN_TARGET_CPU || IS_DNN_OPENCL_TARGET(preferableTarget));
else if (preferableBackend == DNN_BACKEND_HALIDE)
initHalideBackend();
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
initInfEngineBackend(blobsToKeep_);
#else
CV_Assert(false && "This OpenCV version is built without Inference Engine NN Builder API support");
#endif
}
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
#ifdef HAVE_DNN_NGRAPH
@ -1569,7 +1506,7 @@ struct Net::Impl : public detail::NetImplBase
#endif
}
else
CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
CV_Error(Error::StsNotImplemented, cv::format("Unknown backend identifier: %d", preferableBackend));
}
void initHalideBackend()
@ -1627,314 +1564,6 @@ struct Net::Impl : public detail::NetImplBase
}
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
// Before launching Inference Engine graph we need to specify output blobs.
// This function requests output blobs based on inputs references of
// layers from default backend or layers from different graphs.
void addInfEngineNetOutputs(LayerData &ld)
{
CV_TRACE_FUNCTION();
Ptr<InfEngineBackendNet> layerNet;
if (ld.backendNodes.find(preferableBackend) != ld.backendNodes.end())
{
Ptr<BackendNode> node = ld.backendNodes[preferableBackend];
if (!node.empty())
{
Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
CV_Assert(!ieNode.empty()); CV_Assert(!ieNode->net.empty());
layerNet = ieNode->net;
}
}
// For an every input reference we check that it belongs to one of
// the Inference Engine backend graphs. Request an output blob if it is.
// Do nothing if layer's input is from the same graph.
for (int i = 0; i < ld.inputBlobsId.size(); ++i)
{
LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
if (!inpNode.empty())
{
Ptr<InfEngineBackendNode> ieInpNode = inpNode.dynamicCast<InfEngineBackendNode>();
CV_Assert(!ieInpNode.empty()); CV_Assert(!ieInpNode->net.empty());
if (layerNet != ieInpNode->net)
{
// layerNet is empty or nodes are from different graphs.
ieInpNode->net->addOutput(ieInpNode->layer.getName());
}
}
}
}
void initInfEngineBackend(const std::vector<LayerPin>& blobsToKeep_)
{
CV_TRACE_FUNCTION();
CV_Assert_N(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, haveInfEngine());
MapIdToLayerData::iterator it;
Ptr<InfEngineBackendNet> net;
for (it = layers.begin(); it != layers.end(); ++it)
{
LayerData &ld = it->second;
if (ld.id == 0)
{
CV_Assert((netInputLayer->outNames.empty() && ld.outputBlobsWrappers.size() == 1) ||
(netInputLayer->outNames.size() == ld.outputBlobsWrappers.size()));
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
dataPtr->name = netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i];
#else
dataPtr->setName(netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i]);
#endif
}
}
else
{
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
dataPtr->name = ld.name;
#else
dataPtr->setName(ld.name);
#endif
}
}
}
if (skipInfEngineInit)
{
Ptr<BackendNode> node = layers[lastLayerId].backendNodes[preferableBackend];
CV_Assert(!node.empty());
Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
CV_Assert(!ieNode.empty());
ieNode->net->reset();
for (it = layers.begin(); it != layers.end(); ++it)
{
LayerData &ld = it->second;
if (ld.id == 0)
{
for (int i = 0; i < ld.inputBlobsWrappers.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.inputBlobsWrappers[i]);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
dataPtr->name = netInputLayer->outNames[i];
#else
dataPtr->setName(netInputLayer->outNames[i]);
#endif
}
}
else
{
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
dataPtr->name = ld.name;
#else
dataPtr->setName(ld.name);
#endif
}
}
ieNode->net->addBlobs(ld.inputBlobsWrappers);
ieNode->net->addBlobs(ld.outputBlobsWrappers);
ld.skip = true;
}
layers[lastLayerId].skip = false;
ieNode->net->init((Target)preferableTarget);
return;
}
// Build Inference Engine networks from sets of layers that support this
// backend. Split a whole model on several Inference Engine networks if
// some of layers are not implemented.
bool supportsCPUFallback = preferableTarget == DNN_TARGET_CPU ||
BackendRegistry::checkIETarget(DNN_TARGET_CPU);
// Set of all input and output blobs wrappers for current network.
std::map<LayerPin, Ptr<BackendWrapper> > netBlobsWrappers;
for (it = layers.begin(); it != layers.end(); ++it)
{
LayerData &ld = it->second;
if (ld.id == 0 && ld.skip)
continue;
bool fused = ld.skip;
Ptr<Layer> layer = ld.layerInstance;
if (!fused && !layer->supportBackend(preferableBackend))
{
bool customizable = ld.id != 0 &&
INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R2) &&
supportsCPUFallback;
// TODO: there is a bug in Myriad plugin with custom layers shape infer.
if (preferableTarget == DNN_TARGET_MYRIAD)
{
for (int i = 0; customizable && i < ld.inputBlobs.size(); ++i)
{
customizable = ld.inputBlobs[i]->size[0] == 1;
}
}
// TODO: fix these workarounds
if (preferableTarget == DNN_TARGET_MYRIAD ||
preferableTarget == DNN_TARGET_OPENCL ||
preferableTarget == DNN_TARGET_OPENCL_FP16)
customizable &= ld.type != "Concat";
if (preferableTarget == DNN_TARGET_OPENCL ||
preferableTarget == DNN_TARGET_OPENCL_FP16)
customizable &= ld.type != "Power";
if (preferableTarget == DNN_TARGET_OPENCL)
customizable &= ld.type != "Eltwise";
if (!customizable)
{
addInfEngineNetOutputs(ld);
net = Ptr<InfEngineBackendNet>();
netBlobsWrappers.clear(); // Is not used for R5 release but we don't wrap it to #ifdef.
layer->preferableTarget = DNN_TARGET_CPU;
continue;
}
}
ld.skip = true; // Initially skip all Inference Engine supported layers.
// Create a new network if one of inputs from different Inference Engine graph.
for (int i = 0; i < ld.inputBlobsId.size(); ++i)
{
LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
if (!inpNode.empty())
{
Ptr<InfEngineBackendNode> ieInpNode = inpNode.dynamicCast<InfEngineBackendNode>();
CV_Assert(!ieInpNode.empty()); CV_Assert(!ieInpNode->net.empty());
if (ieInpNode->net != net)
{
net = Ptr<InfEngineBackendNet>();
netBlobsWrappers.clear(); // Is not used for R5 release but we don't wrap it to #ifdef.
break;
}
}
}
Ptr<BackendNode> node;
if (!net.empty())
{
if (fused)
{
bool inPlace = ld.inputBlobsId.size() == 1 && ld.outputBlobs.size() == 1 &&
ld.inputBlobs[0]->data == ld.outputBlobs[0].data;
CV_Assert(inPlace);
node = layers[ld.inputBlobsId[0].lid].backendNodes[preferableBackend];
ld.inputBlobsWrappers = layers[ld.inputBlobsId[0].lid].inputBlobsWrappers;
}
}
else
net = Ptr<InfEngineBackendNet>(new InfEngineBackendNet());
if (!fused)
{
if (layer->supportBackend(preferableBackend))
node = layer->initInfEngine(ld.inputBlobsWrappers);
else
{
node = Ptr<BackendNode>(new InfEngineBackendNode(
ld.layerInstance, ld.inputBlobs, ld.outputBlobs, ld.internals));
}
}
else if (node.empty())
continue;
CV_Assert(!node.empty());
ld.backendNodes[preferableBackend] = node;
Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
CV_Assert(!ieNode.empty());
ieNode->net = net;
for (const auto& pin : blobsToKeep_)
{
if (pin.lid == ld.id)
{
ieNode->net->addOutput(ieNode->layer.getName());
break;
}
}
// Convert weights in FP16 for specific targets.
if ((preferableTarget == DNN_TARGET_OPENCL_FP16 ||
preferableTarget == DNN_TARGET_MYRIAD ||
preferableTarget == DNN_TARGET_FPGA) && !fused)
{
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
for (const std::string& name : {"weights", "biases"})
{
auto it = ieNode->layer.getParameters().find(name);
if (it != ieNode->layer.getParameters().end())
{
InferenceEngine::Blob::Ptr bp = it->second.as<InferenceEngine::Blob::Ptr>();
it->second = convertFp16(std::const_pointer_cast<InferenceEngine::Blob>(bp));
}
}
#else
auto& blobs = ieNode->layer.getConstantData();
if (blobs.empty())
{
// In case of non weightable layer we have to specify
// it's precision adding dummy blob.
auto blob = InferenceEngine::make_shared_blob<int16_t>(
InferenceEngine::Precision::FP16,
InferenceEngine::Layout::C, {1});
blob->allocate();
blobs[""] = blob;
}
else
{
for (auto& it : blobs)
it.second = convertFp16(std::const_pointer_cast<InferenceEngine::Blob>(it.second));
}
#endif
}
if (!fused)
net->addLayer(ieNode->layer);
net->connect(ld.inputBlobsWrappers, ld.outputBlobsWrappers, ieNode->layer.getName());
net->addBlobs(ld.inputBlobsWrappers);
net->addBlobs(ld.outputBlobsWrappers);
addInfEngineNetOutputs(ld);
}
// Initialize all networks.
for (MapIdToLayerData::reverse_iterator it = layers.rbegin(); it != layers.rend(); ++it)
{
LayerData &ld = it->second;
if (ld.backendNodes.find(preferableBackend) == ld.backendNodes.end())
continue;
Ptr<BackendNode> node = ld.backendNodes[preferableBackend];
if (node.empty())
continue;
Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
if (ieNode.empty())
continue;
CV_Assert(!ieNode->net.empty());
if (!ieNode->net->isInitialized())
{
ieNode->net->init((Target)preferableTarget);
ld.skip = false;
}
}
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
/** mark input pins as outputs from other subnetworks
@ -1979,7 +1608,7 @@ struct Net::Impl : public detail::NetImplBase
void initNgraphBackend(const std::vector<LayerPin>& blobsToKeep_)
{
CV_TRACE_FUNCTION();
CV_Assert_N(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, haveInfEngine());
CV_CheckEQ(preferableBackend, DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, "");
Ptr<InfEngineNgraphNet> net;
@ -2425,7 +2054,6 @@ struct Net::Impl : public detail::NetImplBase
CV_TRACE_FUNCTION();
if(!fusion || (preferableBackend != DNN_BACKEND_OPENCV &&
preferableBackend != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 &&
preferableBackend != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH))
return;
@ -3024,14 +2652,12 @@ struct Net::Impl : public detail::NetImplBase
{
forwardHalide(ld.outputBlobsWrappers, node);
}
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
forwardInfEngine(ld.outputBlobsWrappers, node, isAsync);
}
#ifdef HAVE_INF_ENGINE
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
forwardNgraph(ld.outputBlobsWrappers, node, isAsync);
}
#endif
else
{
CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
@ -3347,27 +2973,13 @@ struct Net::Impl : public detail::NetImplBase
// Transfer data to CPU if it's require.
ld.outputBlobsWrappers[pin.oid]->copyToHost();
}
CV_Assert(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
CV_Assert(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) {
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
Ptr<InfEngineBackendWrapper> wrapper = ld.outputBlobsWrappers[pin.oid].dynamicCast<InfEngineBackendWrapper>();
return std::move(wrapper->futureMat);
#else
CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support");
#endif
}
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
#ifdef HAVE_DNN_NGRAPH
Ptr<NgraphBackendWrapper> wrapper = ld.outputBlobsWrappers[pin.oid].dynamicCast<NgraphBackendWrapper>();
return std::move(wrapper->futureMat);
Ptr<NgraphBackendWrapper> wrapper = ld.outputBlobsWrappers[pin.oid].dynamicCast<NgraphBackendWrapper>();
return std::move(wrapper->futureMat);
#else
CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of Inference Engine + nGraph");
#endif
}
CV_Error(Error::StsNotImplemented, "DNN: OpenVINO/nGraph backend is required");
#endif // HAVE_INF_ENGINE
CV_Error(Error::StsNotImplemented, "DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 backend is required");
}
AsyncArray getBlobAsync(String outputName)
@ -3441,40 +3053,18 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe
CV_TRACE_REGION_NEXT("backendNode");
Ptr<BackendNode> backendNode;
#ifdef HAVE_DNN_NGRAPH
if (DNN_BACKEND_INFERENCE_ENGINE_NGRAPH == getInferenceEngineBackendTypeParam())
{
auto fake_node = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{});
Ptr<InfEngineNgraphNode> backendNodeNGraph(new InfEngineNgraphNode(fake_node));
backendNodeNGraph->net = Ptr<InfEngineNgraphNet>(new InfEngineNgraphNet(*(cvNet.impl), ieNet));
backendNode = backendNodeNGraph;
}
else
#endif
{
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
Ptr<InfEngineBackendNode> backendNodeNN(new InfEngineBackendNode(InferenceEngine::Builder::Layer("")));
backendNodeNN->net = Ptr<InfEngineBackendNet>(new InfEngineBackendNet(ieNet));
backendNode = backendNodeNN;
#else
CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support");
#endif
}
CV_TRACE_REGION_NEXT("register_outputs");
#ifdef HAVE_DNN_NGRAPH
auto ngraphFunction = ieNet.getFunction();
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_2)
std::list< std::shared_ptr<ngraph::Node> > ngraphOperations;
#else
std::vector< std::shared_ptr<ngraph::Node> > ngraphOperations;
#endif
if (ngraphFunction)
{
ngraphOperations = ngraphFunction->get_ops();
}
#endif
CV_Assert(ngraphFunction);
std::vector< std::shared_ptr<ngraph::Node> > ngraphOperations = ngraphFunction->get_ops();
for (auto& it : ieNet.getOutputsInfo())
{
@ -3486,8 +3076,6 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe
LayerData& ld = cvNet.impl->layers[lid];
#ifdef HAVE_DNN_NGRAPH
if (DNN_BACKEND_INFERENCE_ENGINE_NGRAPH == getInferenceEngineBackendTypeParam())
{
Ptr<Layer> cvLayer(new NgraphBackendLayer(ieNet));
cvLayer->name = outputName;
@ -3495,44 +3083,18 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe
auto process_layer = [&](const std::string& name) -> bool
{
if (ngraphFunction)
{
CV_TRACE_REGION("ngraph_function");
for (const auto& op : ngraphOperations)
{
CV_Assert(op);
if (op->get_friendly_name() == name)
{
const std::string typeName = op->get_type_info().name;
cvLayer->type = typeName;
return true;
}
}
return false;
}
else
CV_TRACE_REGION("ngraph_function");
for (const auto& op : ngraphOperations)
{
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_4)
CV_Error(Error::StsNotImplemented, "This OpenCV version is built with Inference Engine which has dropped IR v7 support");
#else
CV_TRACE_REGION("legacy_cnn_layer");
try
CV_Assert(op);
if (op->get_friendly_name() == name)
{
InferenceEngine::CNNLayerPtr ieLayer = ieNet.getLayerByName(name.c_str());
CV_Assert(ieLayer);
cvLayer->type = ieLayer->type;
const std::string typeName = op->get_type_info().name;
cvLayer->type = typeName;
return true;
}
catch (const std::exception& e)
{
CV_UNUSED(e);
CV_LOG_DEBUG(NULL, "IE layer extraction failure: '" << name << "' - " << e.what());
return false;
}
#endif
}
return false;
};
bool found = process_layer(outputName);
@ -3551,37 +3113,6 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe
ld.layerInstance = cvLayer;
ld.backendNodes[DNN_BACKEND_INFERENCE_ENGINE_NGRAPH] = backendNode;
}
else
#endif
{
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
Ptr<Layer> cvLayer(new InfEngineBackendLayer(ieNet));
InferenceEngine::CNNLayerPtr ieLayer;
try
{
ieLayer = ieNet.getLayerByName(outputName.c_str());
}
catch (...)
{
auto pos = outputName.rfind('.'); // cut port number: ".0"
if (pos != std::string::npos)
{
std::string layerName = outputName.substr(0, pos);
ieLayer = ieNet.getLayerByName(layerName.c_str());
}
}
CV_Assert(ieLayer);
cvLayer->name = outputName;
cvLayer->type = ieLayer->type;
ld.layerInstance = cvLayer;
ld.backendNodes[DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019] = backendNode;
#else
CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support");
#endif
}
for (int i = 0; i < inputsNames.size(); ++i)
cvNet.connect(0, i, lid, i);
@ -3589,7 +3120,7 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe
CV_TRACE_REGION_NEXT("finalize");
cvNet.setPreferableBackend(getInferenceEngineBackendTypeParam());
cvNet.setPreferableBackend(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
cvNet.impl->skipInfEngineInit = true;
return cvNet;
@ -3606,16 +3137,8 @@ Net Net::readFromModelOptimizer(const String& xml, const String& bin)
FPDenormalsIgnoreHintScope fp_denormals_ignore_scope;
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3)
InferenceEngine::CNNNetReader reader;
reader.ReadNetwork(xml);
reader.ReadWeights(bin);
InferenceEngine::CNNNetwork ieNet = reader.getNetwork();
#else
InferenceEngine::Core& ie = getCore("");
InferenceEngine::CNNNetwork ieNet = ie.ReadNetwork(xml, bin);
#endif
return Impl::createNetworkFromModelOptimizer(ieNet);
#endif // HAVE_INF_ENGINE
@ -3644,26 +3167,6 @@ Net Net::readFromModelOptimizer(
FPDenormalsIgnoreHintScope fp_denormals_ignore_scope;
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3)
InferenceEngine::CNNNetReader reader;
try
{
reader.ReadNetwork(bufferModelConfigPtr, bufferModelConfigSize);
InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, { bufferWeightsSize }, InferenceEngine::Layout::C);
InferenceEngine::TBlob<uint8_t>::Ptr weightsBlobPtr(new InferenceEngine::TBlob<uint8_t>(tensorDesc));
weightsBlobPtr->allocate();
std::memcpy(weightsBlobPtr->buffer(), (uchar*)bufferWeightsPtr, bufferWeightsSize);
reader.SetWeights(weightsBlobPtr);
}
catch (const std::exception& e)
{
CV_Error(Error::StsError, std::string("DNN: IE failed to load model: ") + e.what());
}
InferenceEngine::CNNNetwork ieNet = reader.getNetwork();
#else
InferenceEngine::Core& ie = getCore("");
std::string model; model.assign((char*)bufferModelConfigPtr, bufferModelConfigSize);
@ -3680,7 +3183,6 @@ Net Net::readFromModelOptimizer(
{
CV_Error(Error::StsError, std::string("DNN: IE failed to load model: ") + e.what());
}
#endif
return Impl::createNetworkFromModelOptimizer(ieNet);
#endif // HAVE_INF_ENGINE
@ -3775,8 +3277,8 @@ AsyncArray Net::forwardAsync(const String& outputName)
std::vector<LayerPin> pins(1, impl->getPinByAlias(layerName));
impl->setUpNet(pins);
if (!(impl->preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || impl->preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH))
CV_Error(Error::StsNotImplemented, "DNN: Asynchronous forward is supported for Inference Engine backends only");
if (impl->preferableBackend != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
CV_Error(Error::StsNotImplemented, "DNN: Asynchronous forward is supported for Inference Engine backend only");
impl->isAsync = true;
impl->forwardToLayer(impl->getLayerData(layerName));
@ -3932,7 +3434,7 @@ void Net::setPreferableBackend(int backendId)
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
backendId = getInferenceEngineBackendTypeParam();
backendId = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
#endif
if( impl->preferableBackend != backendId )
@ -4172,8 +3674,8 @@ string Net::Impl::dump() const
case DNN_BACKEND_DEFAULT: backend = "DEFAULT/"; break;
case DNN_BACKEND_HALIDE: backend = "HALIDE/"; break;
case DNN_BACKEND_INFERENCE_ENGINE: // fallthru
case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: backend = "DLIE/"; break;
case DNN_BACKEND_INFERENCE_ENGINE_NGRAPH: backend = "NGRAPH/"; break;
case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: // fallthru
case DNN_BACKEND_INFERENCE_ENGINE_NGRAPH: backend = "OpenVINO/"; break;
case DNN_BACKEND_OPENCV: backend = "OCV/"; break;
// don't use default:
}
@ -4749,13 +4251,6 @@ Ptr<BackendNode> Layer::initHalide(const std::vector<Ptr<BackendWrapper> > &)
return Ptr<BackendNode>();
}
Ptr<BackendNode> Layer::initInfEngine(const std::vector<Ptr<BackendWrapper> > &)
{
CV_Error(Error::StsNotImplemented, "Inference Engine pipeline of " + type +
" layers is not defined.");
return Ptr<BackendNode>();
}
Ptr<BackendNode> Layer::initNgraph(const std::vector<Ptr<BackendWrapper> > & inputs, const std::vector<Ptr<BackendNode> >& nodes)
{
CV_Error(Error::StsNotImplemented, "Inference Engine pipeline of " + type +

@ -163,9 +163,12 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return preferableTarget == DNN_TARGET_CPU || dims == 4;
#endif
return (backendId == DNN_BACKEND_OPENCV) ||
(backendId == DNN_BACKEND_HALIDE && haveHalide()) ||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && (preferableTarget == DNN_TARGET_CPU || dims == 4));
(backendId == DNN_BACKEND_HALIDE && haveHalide());
}
#ifdef HAVE_OPENCL
@ -361,16 +364,6 @@ public:
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
const size_t numChannels = weights_.total();
addConstantData("weights", wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C), ieLayer);
addConstantData("biases", wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C), ieLayer);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE

@ -57,8 +57,11 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -108,31 +111,6 @@ public:
inputs[i].copyTo(outputs[i]);
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
std::vector<size_t> dims = input->getDims();
CV_Assert(!dims.empty());
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
if (preferableTarget == DNN_TARGET_MYRIAD)
{
ieLayer.setType("Copy");
}
else
{
ieLayer.setType("Split");
ieLayer.getParameters()["axis"] = dims.size() - 1;
ieLayer.getParameters()["out_sizes"] = dims[0];
}
ieLayer.setInputPorts({InferenceEngine::Port(dims)});
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

@ -104,10 +104,12 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding) || // By channels
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && haveInfEngine() && !padding) ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding);
}
class ChannelConcatInvoker : public ParallelLoopBody
@ -300,18 +302,6 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
InferenceEngine::Builder::ConcatLayer ieLayer(name);
ieLayer.setAxis(normalize_axis(axis, input->getDims().size()));
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

@ -27,9 +27,11 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV;
}
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -68,15 +70,6 @@ public:
blobs[0].copyTo(outputs[0]);
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::ConstLayer ieLayer(name);
ieLayer.setData(wrapToInfEngineBlob(blobs[0]));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

@ -272,7 +272,7 @@ public:
{
size_t ksize = kernel_size.size();
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
bool isArmTarget = preferableTarget == DNN_TARGET_CPU && isArmComputePlugin();
if (isArmTarget && blobs.empty())
@ -281,7 +281,8 @@ public:
return isArmTarget;
if (ksize == 3)
return preferableTarget != DNN_TARGET_MYRIAD && !isArmTarget;
if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || preferableTarget != DNN_TARGET_MYRIAD) && blobs.empty())
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD;
if (!isMyriad && blobs.empty())
return false;
return (preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height);
}
@ -514,68 +515,6 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
std::vector<size_t> dims = input->getDims();
CV_Assert(dims.size() == 4 || dims.size() == 5);
const int inpCn = dims[1];
const int outCn = blobs[0].size[0];
const int inpGroupCn = blobs[0].size[1];
const int group = inpCn / inpGroupCn;
InferenceEngine::Layout layout = (dims.size() == 4) ? InferenceEngine::Layout::OIHW :
InferenceEngine::Layout::NCDHW;
auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
if (fusedWeights)
{
if (weightsMat.isContinuous())
{
Mat cvWeights = weightsMat.reshape(1, blobs[0].dims, blobs[0].size);
ieWeights = wrapToInfEngineBlob(cvWeights, layout);
}
else
{
ieWeights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32,
ieWeights->getTensorDesc().getDims(), layout
});
ieWeights->allocate();
Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, outCn);
Mat cvWeights = weightsMat.colRange(0, newWeights.cols);
cvWeights.copyTo(newWeights);
}
}
InferenceEngine::Blob::Ptr ieBiases;
if (hasBias() || fusedBias)
{
Mat biasesMat({outCn}, CV_32F, &biasvec[0]);
ieBiases = wrapToInfEngineBlob(biasesMat, {(size_t)outCn}, InferenceEngine::Layout::C);
}
InferenceEngine::Builder::ConvolutionLayer ieLayer(name);
ieLayer.setKernel(kernel_size);
ieLayer.setStrides(strides);
ieLayer.setDilation(dilations);
ieLayer.setPaddingsBegin(pads_begin);
ieLayer.setPaddingsEnd(pads_end);
ieLayer.setGroup((size_t)group);
ieLayer.setOutDepth((size_t)outCn);
InferenceEngine::Builder::Layer l = ieLayer;
addConstantData("weights", ieWeights, l);
if (ieBiases)
addConstantData("biases", ieBiases, l);
if (!padMode.empty())
l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper");
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
@ -1792,52 +1731,6 @@ public:
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
return group == 1;
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
if (kernel_size.size() == 3 && preferableTarget != DNN_TARGET_CPU) {
return false;
}
if (std::accumulate(adjust_pads.begin(), adjust_pads.end(), 0, std::plus<size_t>()) > 0)
{
if (padMode.empty())
{
if (preferableTarget != DNN_TARGET_CPU && group != 1)
{
for (int i = 0; i < adjust_pads.size(); i++) {
if (adjust_pads[i] && pads_begin[i])
return false;
}
}
for (int i = 0; i < adjust_pads.size(); i++) {
if (pads_end[i] < adjust_pads[i])
return false;
}
return true;
}
else if (padMode == "SAME")
{
for (int i = 0; i < adjust_pads.size(); i++) {
if (kernel_size[i] < pads_begin[i] + 1 + adjust_pads[i])
return false;
}
return true;
}
else if (padMode == "VALID")
return false;
}
if (group != 1)
{
return preferableTarget == DNN_TARGET_CPU;
}
if (preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL_FP16)
return std::accumulate(dilations.begin(), dilations.end(), 1, std::multiplies<size_t>()) == 1;
return true;
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#endif // HAVE_INF_ENGINE
{
return kernel_size.size() == 2 && (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE);
@ -2423,63 +2316,6 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &) CV_OVERRIDE
{
InferenceEngine::Layout layout = blobs[0].dims == 5? InferenceEngine::Layout::NCDHW :
InferenceEngine::Layout::OIHW;
auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
if (fusedWeights)
{
ieWeights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32,
ieWeights->getTensorDesc().getDims(), layout
});
ieWeights->allocate();
int inpCn = blobs[0].size[0];
Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, inpCn);
transpose(weightsMat, newWeights);
}
const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW or OIDHW layout
const int group = numOutput / outGroupCn;
InferenceEngine::Builder::DeconvolutionLayer ieLayer(name);
ieLayer.setKernel(kernel_size);
ieLayer.setStrides(strides);
ieLayer.setDilation(dilations);
ieLayer.setPaddingsBegin(pads_begin);
if (padMode.empty())
{
std::vector<size_t> paddings_end;
for (int i = 0; i < pads_end.size(); i++) {
paddings_end.push_back(pads_end[i] - adjust_pads[i]);
}
ieLayer.setPaddingsEnd(paddings_end);
}
else if (padMode == "SAME")
{
std::vector<size_t> paddings_end;
for (int i = 0; i < pads_begin.size(); i++) {
paddings_end.push_back(kernel_size[i] - pads_begin[i] - 1 - adjust_pads[i]);
}
ieLayer.setPaddingsEnd(paddings_end);
}
ieLayer.setGroup((size_t)group);
ieLayer.setOutDepth((size_t)numOutput);
InferenceEngine::Builder::Layer l = ieLayer;
addConstantData("weights", ieWeights, l);
if (hasBias())
addConstantData("biases", wrapToInfEngineBlob(biasesMat, {(size_t)numOutput}, InferenceEngine::Layout::C), l);
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,

@ -215,7 +215,7 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && !_locPredTransposed && _bboxesNormalized);
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && !_locPredTransposed && _bboxesNormalized);
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -945,30 +945,6 @@ public:
}
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::DetectionOutputLayer ieLayer(name);
ieLayer.setNumClasses(_numClasses);
ieLayer.setShareLocation(_shareLocation);
ieLayer.setBackgroudLabelId(_backgroundLabelId);
ieLayer.setNMSThreshold(_nmsThreshold);
ieLayer.setTopK(_topK > 0 ? _topK : _keepTopK);
ieLayer.setKeepTopK(_keepTopK);
ieLayer.setConfidenceThreshold(_confidenceThreshold);
ieLayer.setVariantEncodedInTarget(_varianceEncodedInTarget);
ieLayer.setCodeType("caffe.PriorBoxParameter." + _codeType);
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(3));
InferenceEngine::Builder::Layer l = ieLayer;
l.getParameters()["eta"] = std::string("1.0");
l.getParameters()["clip"] = _clip;
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE

@ -156,14 +156,6 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::Layer ieLayer = func.initInfEngineBuilderAPI();
ieLayer.setName(this->name);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
@ -272,10 +264,6 @@ struct ReLUFunctor : public BaseFunctor
bool supportBackend(int backendId, int)
{
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
return slope >= 0 || !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
#endif
#ifdef HAVE_DNN_NGRAPH
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
@ -373,13 +361,6 @@ struct ReLUFunctor : public BaseFunctor
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::ReLULayer("").setNegativeSlope(slope);
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
@ -407,8 +388,12 @@ struct ReLU6Functor : public BaseFunctor
bool supportBackend(int backendId, int)
{
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE;
}
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
@ -483,12 +468,6 @@ struct ReLU6Functor : public BaseFunctor
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::ClampLayer("").setMinValue(minValue).setMaxValue(maxValue);
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
@ -546,12 +525,6 @@ struct BaseDefaultFunctor : public BaseFunctor
inline void setKernelParams(ocl::Kernel& kernel) const {}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
CV_Error(Error::StsNotImplemented, "");
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
@ -570,8 +543,12 @@ struct TanHFunctor : public BaseDefaultFunctor<TanHFunctor>
bool supportBackend(int backendId, int)
{
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE;
}
inline float calculate(float x) const
@ -587,13 +564,6 @@ struct TanHFunctor : public BaseDefaultFunctor<TanHFunctor>
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::TanHLayer("");
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
@ -700,8 +670,12 @@ struct SigmoidFunctor : public BaseDefaultFunctor<SigmoidFunctor>
bool supportBackend(int backendId, int)
{
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE;
}
inline float calculate(float x) const
@ -717,12 +691,6 @@ struct SigmoidFunctor : public BaseDefaultFunctor<SigmoidFunctor>
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::SigmoidLayer("");
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
@ -746,8 +714,12 @@ struct ELUFunctor : public BaseDefaultFunctor<ELUFunctor>
bool supportBackend(int backendId, int)
{
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE;
}
inline float calculate(float x) const
@ -768,13 +740,6 @@ struct ELUFunctor : public BaseDefaultFunctor<ELUFunctor>
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::ELULayer("");
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
@ -795,8 +760,8 @@ struct AbsValFunctor : public BaseDefaultFunctor<AbsValFunctor>
bool supportBackend(int backendId, int)
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
}
@ -814,12 +779,6 @@ struct AbsValFunctor : public BaseDefaultFunctor<AbsValFunctor>
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::ReLULayer("").setNegativeSlope(-0.999999f);
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
@ -880,10 +839,14 @@ struct PowerFunctor : public BaseFunctor
bool supportBackend(int backendId, int targetId)
{
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
return (targetId != DNN_TARGET_OPENCL && targetId != DNN_TARGET_OPENCL_FP16) || power == 1.0 || power == 0.5;
else
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
{
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE;
}
}
void finalize()
@ -968,14 +931,6 @@ struct PowerFunctor : public BaseFunctor
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::PowerLayer("").setPower(power)
.setScale(scale)
.setShift(shift);
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
@ -1104,8 +1059,12 @@ struct ChannelsPReLUFunctor : public BaseFunctor
bool supportBackend(int backendId, int)
{
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE;
}
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
@ -1188,15 +1147,6 @@ struct ChannelsPReLUFunctor : public BaseFunctor
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
InferenceEngine::Builder::Layer l = InferenceEngine::Builder::PReLULayer("");
const size_t numChannels = scale.total();
addConstantData("weights", wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C), l);
return l;
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)

@ -154,10 +154,13 @@ public:
if (hasVecInput && ELTWISE_CHANNNELS_SAME)
return backendId == DNN_BACKEND_OPENCV;
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return channelsMode == ELTWISE_CHANNNELS_SAME;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE ||
((((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (preferableTarget != DNN_TARGET_OPENCL || coeffs.empty()))
|| backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && channelsMode == ELTWISE_CHANNNELS_SAME));
backendId == DNN_BACKEND_HALIDE;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -752,32 +755,6 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::Builder::EltwiseLayer ieLayer(name);
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
if (op == SUM)
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::SUM);
else if (op == PROD)
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::MUL);
else if (op == DIV)
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::DIV);
else if (op == MAX)
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::MAX);
else
CV_Error(Error::StsNotImplemented, "Unsupported eltwise operation");
InferenceEngine::Builder::Layer l = ieLayer;
if (!coeffs.empty())
l.getParameters()["coeff"] = coeffs;
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

@ -66,8 +66,11 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -163,24 +166,11 @@ public:
}
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
ieLayer.setType("Flatten");
ieLayer.getParameters()["axis"] = (size_t)_startAxis;
ieLayer.getParameters()["end_axis"] = _endAxis; // Do not cast to size_t because it might be negative.
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
std::vector<size_t> dims = ieInpNode->get_shape();

@ -143,10 +143,13 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return axis == 1;
#endif
return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1) ||
(((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && !blobs.empty()) ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && axis == 1);
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1);
}
virtual bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
@ -534,23 +537,6 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::FullyConnectedLayer ieLayer(name);
const int outNum = blobs[0].size[0];
ieLayer.setOutputNum(outNum);
InferenceEngine::Builder::Layer l = ieLayer;
addConstantData("weights", wrapToInfEngineBlob(blobs[0], {(size_t)blobs[0].size[0], (size_t)blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW), l);
if (bias)
addConstantData("biases", wrapToInfEngineBlob(blobs[1], {(size_t)outNum}, InferenceEngine::Layout::C), l);
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

@ -92,13 +92,12 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) {
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return bias == (int)bias;
}
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
return bias == (int)bias;
}
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE;
}
#ifdef HAVE_OPENCL
@ -385,24 +384,6 @@ public:
#endif // HAVE_HALIDE
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
float alphaSize = alpha;
if (!normBySize)
alphaSize *= (type == SPATIAL_NRM ? size*size : size);
InferenceEngine::Builder::NormLayer ieLayer(name);
ieLayer.setSize(size);
ieLayer.setAlpha(alphaSize);
ieLayer.setBeta(beta);
ieLayer.setAcrossMaps(type == CHANNEL_NRM);
InferenceEngine::Builder::Layer l = ieLayer;
l.getParameters()["k"] = bias;
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE

@ -118,11 +118,7 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
return !zeroDev && (preferableTarget != DNN_TARGET_MYRIAD || eps <= 1e-7f);
#endif
#ifdef HAVE_DNN_NGRAPH
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
@ -378,16 +374,6 @@ public:
}
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::MVNLayer ieLayer(name);
ieLayer.setAcrossChannels(acrossChannels);
ieLayer.setNormalize(normVariance);
ieLayer.setEpsilon(eps);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

@ -64,16 +64,15 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
if (pnorm != 2)
return false;
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && preferableTarget == DNN_TARGET_MYRIAD)
return !acrossSpatial;
return startAxis == 1;
}
#endif
return backendId == DNN_BACKEND_OPENCV;
}
@ -261,56 +260,6 @@ public:
}
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
std::vector<size_t> dims = input->getDims();
if (dims.size() == 4)
{
InferenceEngine::Builder::NormalizeLayer ieLayer(name);
ieLayer.setChannelShared(false);
ieLayer.setAcrossMaps(acrossSpatial);
ieLayer.setEpsilon(epsilon);
InferenceEngine::Builder::Layer l = ieLayer;
const int numChannels = dims[1];
InferenceEngine::Blob::Ptr weights;
if (blobs.empty())
{
weights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32,
{(size_t)numChannels}, InferenceEngine::Layout::C
});
weights->allocate();
Mat weightsMat = infEngineBlobToMat(weights).reshape(1, numChannels);
Mat(numChannels, 1, CV_32F, Scalar(1)).copyTo(weightsMat);
l.getParameters()["channel_shared"] = false;
}
else
{
CV_Assert(numChannels == blobs[0].total());
weights = wrapToInfEngineBlob(blobs[0], {(size_t)numChannels}, InferenceEngine::Layout::C);
l.getParameters()["channel_shared"] = blobs[0].total() == 1;
}
addConstantData("weights", weights, l);
l.getParameters()["across_spatial"] = acrossSpatial;
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
else
{
InferenceEngine::Builder::GRNLayer ieLayer(name);
ieLayer.setBeta(epsilon);
InferenceEngine::Builder::Layer l = ieLayer;
l.getParameters()["bias"] = epsilon;
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

@ -96,9 +96,10 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
if (INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) && preferableTarget == DNN_TARGET_MYRIAD)
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD;
if (isMyriad)
return dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0;
return (dstRanges.size() <= 4 || !isArmComputePlugin());
@ -188,30 +189,6 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
ieLayer.setType("Pad");
std::vector<int> begins(paddings.size(), 0), ends(paddings.size(), 0);
for (int i = 0; i < paddings.size(); ++i)
{
begins[i] = paddings[i].first;
ends[i] = paddings[i].second;
}
ieLayer.getParameters()["pads_begin"] = begins;
ieLayer.getParameters()["pads_end"] = ends;
ieLayer.getParameters()["pad_mode"] = paddingType;
if (paddingType == "constant")
ieLayer.getParameters()["pad_value"] = paddingValue;
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

@ -106,11 +106,14 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && preferableTarget == DNN_TARGET_CPU)
return _order.size() <= 4 || !isArmComputePlugin();
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
if (preferableTarget == DNN_TARGET_CPU)
return _order.size() <= 4 || !isArmComputePlugin();
return true;
}
#endif
return backendId == DNN_BACKEND_OPENCV ||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
return backendId == DNN_BACKEND_OPENCV;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -379,14 +382,6 @@ public:
}
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::PermuteLayer ieLayer(name);
ieLayer.setOrder(_order);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

@ -182,34 +182,13 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
if (computeMaxIdx)
return false;
if (kernel_size.size() == 3)
return preferableTarget == DNN_TARGET_CPU;
if (kernel_size.size() == 1)
return false;
if (preferableTarget == DNN_TARGET_MYRIAD) {
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
if (type == MAX && (pads_begin[1] == 1 && pads_begin[0] == 1) && (strides[0] == 2 && strides[1] == 2)) {
return !isMyriadX();
}
#endif
return type == MAX || type == AVE;
}
else
return type != STOCHASTIC && type != SUM;
}
#endif
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
#ifdef HAVE_DNN_NGRAPH
return !computeMaxIdx && type != STOCHASTIC && kernel_size.size() > 1 && (kernel_size.size() != 3 || !isArmComputePlugin());
#endif
}
else if (backendId == DNN_BACKEND_OPENCV)
#endif
if (backendId == DNN_BACKEND_OPENCV)
{
if (kernel_size.size() == 3)
return preferableTarget == DNN_TARGET_CPU;
@ -336,54 +315,6 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
if (type == MAX || type == AVE)
{
InferenceEngine::Builder::PoolingLayer ieLayer(name);
ieLayer.setKernel(kernel_size);
ieLayer.setStrides(strides);
ieLayer.setPaddingsBegin(pads_begin);
ieLayer.setPaddingsEnd(pads_end);
ieLayer.setPoolingType(type == MAX ?
InferenceEngine::Builder::PoolingLayer::PoolingType::MAX :
InferenceEngine::Builder::PoolingLayer::PoolingType::AVG);
ieLayer.setRoundingType(ceilMode ?
InferenceEngine::Builder::PoolingLayer::RoundingType::CEIL :
InferenceEngine::Builder::PoolingLayer::RoundingType::FLOOR);
ieLayer.setExcludePad(!avePoolPaddedArea);
InferenceEngine::Builder::Layer l = ieLayer;
if (!padMode.empty())
l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper");
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
else if (type == ROI)
{
InferenceEngine::Builder::ROIPoolingLayer ieLayer(name);
ieLayer.setSpatialScale(spatialScale);
ieLayer.setPooled({pooledSize.height, pooledSize.width});
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(2));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
else if (type == PSROI)
{
InferenceEngine::Builder::PSROIPoolingLayer ieLayer(name);
ieLayer.setSpatialScale(spatialScale);
ieLayer.setOutputDim(psRoiOutChannels);
ieLayer.setGroupSize(pooledSize.width);
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(2));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
else
CV_Error(Error::StsNotImplemented, "Unsupported pooling type");
return Ptr<BackendNode>();
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH

@ -288,9 +288,7 @@ public:
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return _explicitSizes || _stepX == _stepY;
#endif
return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && haveInfEngine() &&
( _explicitSizes || (_minSize.size() == 1 && _maxSize.size() <= 1)));
return backendId == DNN_BACKEND_OPENCV;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -499,67 +497,6 @@ public:
}
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
if (_explicitSizes)
{
InferenceEngine::Builder::PriorBoxClusteredLayer ieLayer(name);
ieLayer.setSteps({_stepY, _stepX});
CV_CheckEQ(_offsetsX.size(), (size_t)1, ""); CV_CheckEQ(_offsetsY.size(), (size_t)1, ""); CV_CheckEQ(_offsetsX[0], _offsetsY[0], "");
ieLayer.setOffset(_offsetsX[0]);
ieLayer.setClip(_clip);
ieLayer.setFlip(false); // We already flipped aspect ratios.
InferenceEngine::Builder::Layer l = ieLayer;
CV_Assert_N(!_boxWidths.empty(), !_boxHeights.empty(), !_variance.empty());
CV_Assert(_boxWidths.size() == _boxHeights.size());
l.getParameters()["width"] = _boxWidths;
l.getParameters()["height"] = _boxHeights;
l.getParameters()["variance"] = _variance;
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
else
{
InferenceEngine::Builder::PriorBoxLayer ieLayer(name);
CV_Assert(!_explicitSizes);
ieLayer.setMinSize(_minSize[0]);
if (!_maxSize.empty())
ieLayer.setMaxSize(_maxSize[0]);
CV_CheckEQ(_offsetsX.size(), (size_t)1, ""); CV_CheckEQ(_offsetsY.size(), (size_t)1, ""); CV_CheckEQ(_offsetsX[0], _offsetsY[0], "");
ieLayer.setOffset(_offsetsX[0]);
ieLayer.setClip(_clip);
ieLayer.setFlip(false); // We already flipped aspect ratios.
InferenceEngine::Builder::Layer l = ieLayer;
if (_stepX == _stepY)
{
l.getParameters()["step"] = _stepX;
l.getParameters()["step_h"] = 0.0f;
l.getParameters()["step_w"] = 0.0f;
}
else
{
l.getParameters()["step"] = 0.0f;
l.getParameters()["step_h"] = _stepY;
l.getParameters()["step_w"] = _stepX;
}
if (!_aspectRatios.empty())
{
l.getParameters()["aspect_ratio"] = _aspectRatios;
}
CV_Assert(!_variance.empty());
l.getParameters()["variance"] = _variance;
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE

@ -95,8 +95,14 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && preferableTarget != DNN_TARGET_MYRIAD);
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD;
return !isMyriad;
}
#endif
return backendId == DNN_BACKEND_OPENCV;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -332,32 +338,6 @@ public:
layerOutputs[0].col(2).copyTo(dst);
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::ProposalLayer ieLayer(name);
ieLayer.setBaseSize(baseSize);
ieLayer.setFeatStride(featStride);
ieLayer.setMinSize(16);
ieLayer.setNMSThresh(nmsThreshold);
ieLayer.setPostNMSTopN(keepTopAfterNMS);
ieLayer.setPreNMSTopN(keepTopBeforeNMS);
std::vector<float> scalesVec(scales.size());
for (int i = 0; i < scales.size(); ++i)
scalesVec[i] = scales.get<float>(i);
ieLayer.setScale(scalesVec);
std::vector<float> ratiosVec(ratios.size());
for (int i = 0; i < ratios.size(); ++i)
ratiosVec[i] = ratios.get<float>(i);
ieLayer.setRatio(ratiosVec);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

@ -145,8 +145,11 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV;
}
#ifdef HAVE_OPENCL
@ -189,14 +192,6 @@ public:
permute->forward(inputs, outputs, internals_arr);
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::ReorgYoloLayer ieLayer(name);
ieLayer.setStride(reorgStride);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,

@ -195,8 +195,11 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -296,15 +299,6 @@ public:
}
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::Builder::ReshapeLayer ieLayer(name);
CV_Assert(outShapes.size() == 1);
ieLayer.setDims(outShapes[0]);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

@ -66,7 +66,7 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
return (interpolation == "nearest" && scaleWidth == scaleHeight) ||
(interpolation == "bilinear");
@ -226,37 +226,6 @@ public:
CV_Error(Error::StsNotImplemented, "Unknown interpolation: " + interpolation);
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
if (interpolation == "nearest")
{
ieLayer.setType("Resample");
ieLayer.getParameters()["type"] = std::string("caffe.ResampleParameter.NEAREST");
ieLayer.getParameters()["antialias"] = false;
if (scaleWidth != scaleHeight)
CV_Error(Error::StsNotImplemented, "resample with sw != sh");
ieLayer.getParameters()["factor"] = 1.0f / scaleWidth;
}
else if (interpolation == "bilinear")
{
ieLayer.setType("Interp");
ieLayer.getParameters()["pad_beg"] = 0;
ieLayer.getParameters()["pad_end"] = 0;
ieLayer.getParameters()["align_corners"] = alignCorners;
}
else
CV_Error(Error::StsNotImplemented, "Unsupported interpolation: " + interpolation);
ieLayer.getParameters()["width"] = outWidth;
ieLayer.getParameters()["height"] = outHeight;
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

@ -53,9 +53,12 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && axis == 1 && !blobs.empty()) ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && axis > 0);
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return axis > 0;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE;
}
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
@ -197,34 +200,6 @@ public:
}
#endif // HAVE_HALIDE
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::Layer l = InferenceEngine::Builder::ScaleShiftLayer(name);
CV_Assert(!blobs.empty());
const size_t numChannels = blobs[0].total();
if (hasWeights)
{
addConstantData("weights", wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C), l);
}
else
{
auto weights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32, {(size_t)numChannels},
InferenceEngine::Layout::C
});
weights->allocate();
float* buf = weights->buffer().as<float*>();
std::fill(buf, buf + numChannels, 1);
addConstantData("weights", weights, l);
}
if (hasBias)
addConstantData("biases", wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C), l);
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE

@ -160,12 +160,7 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
sliceRanges.size() == 1 && sliceRanges[0].size() == 4 && !hasSteps;
#endif
#ifdef HAVE_DNN_NGRAPH
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return sliceRanges.size() == 1 && !hasSteps;
#endif
@ -557,62 +552,6 @@ public:
}
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
CV_Assert_N(finalSliceRanges.size() == 1, inputs.size() <= 2);
std::vector<size_t> axes, offsets, dims;
int from, to, step;
int numDims = finalSliceRanges[0].size();
if (preferableTarget == DNN_TARGET_MYRIAD)
{
from = axis;
to = numDims;
step = 1;
}
else
{
from = numDims - 1;
to = axis - 1;
step = -1;
}
for (int i = from; i != to; i += step)
{
axes.push_back(i);
offsets.push_back(finalSliceRanges[0][i].start);
dims.push_back(finalSliceRanges[0][i].size());
}
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
ieLayer.setType("Crop");
ieLayer.getParameters()["axis"] = axes;
ieLayer.getParameters()["dim"] = dims;
ieLayer.getParameters()["offset"] = offsets;
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(2));
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
if (inputs.size() != 2)
{
std::vector<size_t> outShape(numDims);
for (int i = 0; i < numDims; ++i)
outShape[i] = finalSliceRanges[0][i].size();
ieLayer.getInputPorts()[1].setParameter("type", "weights");
auto shapeSource = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32, outShape,
InferenceEngine::Layout::ANY
});
shapeSource->allocate();
addConstantData("weights", shapeSource, ieLayer);
}
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif
#endif
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

@ -90,10 +90,12 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1) ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && haveInfEngine() && !logSoftMax);
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1);
}
#ifdef HAVE_OPENCL
@ -312,17 +314,6 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
InferenceEngine::Builder::SoftMaxLayer ieLayer(name);
ieLayer.setAxis(normalize_axis(axisRaw, input->getDims().size()));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,

File diff suppressed because it is too large Load Diff

@ -48,37 +48,16 @@
#pragma GCC diagnostic ignored "-Wsuggest-override"
#endif
#if defined(HAVE_DNN_IE_NN_BUILDER_2019) || INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2020_4)
//#define INFERENCE_ENGINE_DEPRECATED // turn off deprecation warnings from IE
//there is no way to suppress warnings from IE only at this moment, so we are forced to suppress warnings globally
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
#ifdef _MSC_VER
#pragma warning(disable: 4996) // was declared deprecated
#endif
#endif
#if defined(__GNUC__) && INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_1)
#pragma GCC visibility push(default)
#endif
#include <inference_engine.hpp>
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
#include <ie_builders.hpp>
#endif
#if defined(__GNUC__) && INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_1)
#pragma GCC visibility pop
#endif
#if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic pop
#endif
#endif // HAVE_INF_ENGINE
#define CV_ERROR_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 do { CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support (legacy API is not supported anymore)"); } while (0)
namespace cv { namespace dnn {
#ifdef HAVE_INF_ENGINE
@ -90,167 +69,6 @@ Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
void infEngineBlobsToMats(const std::vector<InferenceEngine::Blob::Ptr>& blobs,
std::vector<Mat>& mats);
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
class InfEngineBackendNet
{
public:
InfEngineBackendNet();
InfEngineBackendNet(InferenceEngine::CNNNetwork& net);
void addLayer(InferenceEngine::Builder::Layer& layer);
void addOutput(const std::string& name);
void connect(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendWrapper> >& outputs,
const std::string& layerName);
bool isInitialized();
void init(Target targetId);
void forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
bool isAsync);
void initPlugin(InferenceEngine::CNNNetwork& net);
void addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs);
void reset();
private:
InferenceEngine::Builder::Network netBuilder;
InferenceEngine::ExecutableNetwork netExec;
InferenceEngine::BlobMap allBlobs;
std::string device_name;
#if INF_ENGINE_VER_MAJOR_LE(2019010000)
InferenceEngine::InferenceEnginePluginPtr enginePtr;
InferenceEngine::InferencePlugin plugin;
#else
bool isInit = false;
#endif
struct InfEngineReqWrapper
{
InfEngineReqWrapper() : isReady(true) {}
void makePromises(const std::vector<Ptr<BackendWrapper> >& outs);
InferenceEngine::InferRequest req;
std::vector<cv::AsyncPromise> outProms;
std::vector<std::string> outsNames;
bool isReady;
};
std::vector<Ptr<InfEngineReqWrapper> > infRequests;
InferenceEngine::CNNNetwork cnn;
bool hasNetOwner;
std::map<std::string, int> layers;
std::vector<std::string> requestedOutputs;
std::set<std::pair<int, int> > unconnectedPorts;
};
class InfEngineBackendNode : public BackendNode
{
public:
InfEngineBackendNode(const InferenceEngine::Builder::Layer& layer);
InfEngineBackendNode(Ptr<Layer>& layer, std::vector<Mat*>& inputs,
std::vector<Mat>& outputs, std::vector<Mat>& internals);
void connect(std::vector<Ptr<BackendWrapper> >& inputs,
std::vector<Ptr<BackendWrapper> >& outputs);
// Inference Engine network object that allows to obtain the outputs of this layer.
InferenceEngine::Builder::Layer layer;
Ptr<InfEngineBackendNet> net;
// CPU fallback in case of unsupported Inference Engine layer.
Ptr<dnn::Layer> cvLayer;
};
class InfEngineBackendWrapper : public BackendWrapper
{
public:
InfEngineBackendWrapper(int targetId, const Mat& m);
InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper);
~InfEngineBackendWrapper();
static Ptr<BackendWrapper> create(Ptr<BackendWrapper> wrapper);
virtual void copyToHost() CV_OVERRIDE;
virtual void setHostDirty() CV_OVERRIDE;
InferenceEngine::DataPtr dataPtr;
InferenceEngine::Blob::Ptr blob;
AsyncArray futureMat;
};
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout = InferenceEngine::Layout::ANY);
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape, InferenceEngine::Layout layout);
InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);
// Convert Inference Engine blob with FP32 precision to FP16 precision.
// Allocates memory for a new blob.
InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, InferenceEngine::Builder::Layer& l);
// This is a fake class to run networks from Model Optimizer. Objects of that
// class simulate responses of layers are imported by OpenCV and supported by
// Inference Engine. The main difference is that they do not perform forward pass.
class InfEngineBackendLayer : public Layer
{
public:
InfEngineBackendLayer(const InferenceEngine::CNNNetwork &t_net_) : t_net(t_net_) {};
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE;
virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
OutputArrayOfArrays internals) CV_OVERRIDE;
virtual bool supportBackend(int backendId) CV_OVERRIDE;
private:
InferenceEngine::CNNNetwork t_net;
};
class InfEngineExtension : public InferenceEngine::IExtension
{
public:
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_2)
virtual void SetLogCallback(InferenceEngine::IErrorListener&) noexcept {}
#endif
virtual void Unload() noexcept {}
virtual void Release() noexcept {}
virtual void GetVersion(const InferenceEngine::Version*&) const noexcept {}
virtual InferenceEngine::StatusCode getPrimitiveTypes(char**&, unsigned int&,
InferenceEngine::ResponseDesc*) noexcept
{
return InferenceEngine::StatusCode::OK;
}
InferenceEngine::StatusCode getFactoryFor(InferenceEngine::ILayerImplFactory*& factory,
const InferenceEngine::CNNLayer* cnnLayer,
InferenceEngine::ResponseDesc* resp) noexcept;
};
#endif // HAVE_DNN_IE_NN_BUILDER_2019
CV__DNN_EXPERIMENTAL_NS_BEGIN
@ -271,14 +89,8 @@ static inline std::vector<T> getShape(const Mat& mat)
return result;
}
#endif // HAVE_INF_ENGINE
bool haveInfEngine();
void forwardInfEngine(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
Ptr<BackendNode>& node, bool isAsync);
}} // namespace dnn, namespace cv
#endif // __OPENCV_DNN_OP_INF_ENGINE_HPP__

@ -256,16 +256,6 @@ testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTarget
std::vector< tuple<Backend, Target> > targets;
std::vector< Target > available;
{
available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019);
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
{
if (*i == DNN_TARGET_MYRIAD && !withVPU)
continue;
targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, *i));
}
}
{
available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
@ -274,7 +264,6 @@ testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTarget
continue;
targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, *i));
}
}
return testing::ValuesIn(targets);

@ -371,17 +371,17 @@ TEST_P(DNNTestOpenVINO, models)
|| modelName == "person-vehicle-bike-detection-2004" // 2021.4+: ncDeviceOpen:1013 Failed to find booted device after boot
)
)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
if (targetId == DNN_TARGET_OPENCL && (false
|| modelName == "face-detection-0106" // Operation: 2278 of type ExperimentalDetectronPriorGridGenerator(op::v6) is not supported
)
)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
if (targetId == DNN_TARGET_OPENCL_FP16 && (false
|| modelName == "face-detection-0106" // Operation: 2278 of type ExperimentalDetectronPriorGridGenerator(op::v6) is not supported
)
)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
#if INF_ENGINE_VER_MAJOR_GE(2020020000)
@ -397,12 +397,7 @@ TEST_P(DNNTestOpenVINO, models)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
else
FAIL() << "Unknown backendId";
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
bool isFP16 = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD);

@ -1211,12 +1211,7 @@ TEST_P(Layer_Test_Convolution_DLDT, Accuracy)
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
throw SkipTestException("No support for async forward");
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
else
FAIL() << "Unknown backendId";
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
Net netDefault = readNet(_tf("layer_convolution.caffemodel"), _tf("layer_convolution.prototxt"));
Net net = readNet(_tf("layer_convolution.xml"), _tf("layer_convolution.bin"));
@ -1256,12 +1251,7 @@ TEST_P(Layer_Test_Convolution_DLDT, setInput_uint8)
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
throw SkipTestException("No support for async forward");
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
else
FAIL() << "Unknown backendId";
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
int blobSize[] = {2, 6, 75, 113};
Mat inputs[] = {Mat(4, &blobSize[0], CV_8U), Mat()};
@ -1294,12 +1284,7 @@ TEST_P(Layer_Test_Convolution_DLDT, multithreading)
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
throw SkipTestException("No support for async forward");
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
else
FAIL() << "Unknown backendId";
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
std::string xmlPath = _tf("layer_convolution.xml");
std::string binPath = _tf("layer_convolution.bin");

@ -117,12 +117,7 @@ void test_readNet_IE_do_not_call_setInput(Backend backendId)
const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
else
FAIL() << "Unknown backendId";
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
Net net = readNet(model, proto);
net.setPreferableBackend(backendId);
@ -458,12 +453,7 @@ TEST_P(Async, model_optimizer_pipeline_set_and_forward_single)
const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
else
FAIL() << "Unknown backendId";
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
Net netSync = readNet(model, proto);
netSync.setPreferableBackend(backendId);
@ -519,12 +509,7 @@ TEST_P(Async, model_optimizer_pipeline_set_and_forward_all)
const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
else
FAIL() << "Unknown backendId";
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
Net netSync = readNet(model, proto);
netSync.setPreferableBackend(backendId);
@ -582,12 +567,7 @@ TEST_P(Async, create_layer_pipeline_set_and_forward_all)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && dtype == CV_8U)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
else
FAIL() << "Unknown backendId";
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
Net netSync;
Net netAsync;
@ -693,12 +673,7 @@ TEST_P(Test_Model_Optimizer, forward_two_nets)
const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
else
FAIL() << "Unknown backendId";
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
Net net0 = readNet(model, proto);
net0.setPreferableTarget(targetId);
@ -737,12 +712,7 @@ TEST_P(Test_Model_Optimizer, readFromBuffer)
const std::string& weightsFile = findDataFile("dnn/layers/layer_convolution.bin");
const std::string& modelFile = findDataFile("dnn/layers/layer_convolution.xml");
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
else
FAIL() << "Unknown backendId";
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
Net net1 = readNetFromModelOptimizer(modelFile, weightsFile);
net1.setPreferableBackend(backendId);
@ -789,12 +759,7 @@ TEST_P(Test_Model_Optimizer, flexible_inputs)
const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
else
FAIL() << "Unknown backendId";
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
Net net0 = readNet(model, proto);
net0.setPreferableTarget(targetId);

Loading…
Cancel
Save