diff --git a/cmake/OpenCVDetectInferenceEngine.cmake b/cmake/OpenCVDetectInferenceEngine.cmake index 58a16f6d54..c0279d74d7 100644 --- a/cmake/OpenCVDetectInferenceEngine.cmake +++ b/cmake/OpenCVDetectInferenceEngine.cmake @@ -23,80 +23,6 @@ endif() # ====================== -macro(ocv_ie_find_extra_libraries find_prefix find_suffix) - file(GLOB libraries "${INF_ENGINE_LIB_DIRS}/${find_prefix}inference_engine*${find_suffix}") - foreach(full_path IN LISTS libraries) - get_filename_component(library "${full_path}" NAME_WE) - string(REPLACE "${find_prefix}" "" library "${library}") - if(library STREQUAL "inference_engine" OR library STREQUAL "inference_engined") - # skip - else() - add_library(${library} UNKNOWN IMPORTED) - set_target_properties(${library} PROPERTIES - IMPORTED_LOCATION "${full_path}") - list(APPEND custom_libraries ${library}) - endif() - endforeach() -endmacro() - -function(add_custom_ie_build _inc _lib _lib_rel _lib_dbg _msg) - if(NOT _inc OR NOT (_lib OR _lib_rel OR _lib_dbg)) - return() - endif() - if(NOT _lib) - if(_lib_rel) - set(_lib "${_lib_rel}") - else() - set(_lib "${_lib_dbg}") - endif() - endif() - add_library(inference_engine UNKNOWN IMPORTED) - set_target_properties(inference_engine PROPERTIES - IMPORTED_LOCATION "${_lib}" - IMPORTED_IMPLIB_RELEASE "${_lib_rel}" - IMPORTED_IMPLIB_DEBUG "${_lib_dbg}" - INTERFACE_INCLUDE_DIRECTORIES "${_inc}" - ) - - set(custom_libraries "") - set(__prefixes "${CMAKE_FIND_LIBRARY_PREFIXES}") - if(NOT __prefixes) - set(__prefixes "_empty_") - endif() - foreach(find_prefix ${__prefixes}) - if(find_prefix STREQUAL "_empty_") # foreach doesn't iterate over empty elements - set(find_prefix "") - endif() - if(NOT DEFINED INFERENCE_ENGINE_FIND_LIBRARY_SUFFIXES) # allow custom override - set(INFERENCE_ENGINE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES}) - if(APPLE) - ocv_list_filterout(INFERENCE_ENGINE_FIND_LIBRARY_SUFFIXES "^.so$") # skip plugins (can't be linked) - endif() - endif() - foreach(find_suffix ${INFERENCE_ENGINE_FIND_LIBRARY_SUFFIXES}) - ocv_ie_find_extra_libraries("${find_prefix}" "${find_suffix}") - endforeach() - if(NOT CMAKE_FIND_LIBRARY_SUFFIXES) - ocv_ie_find_extra_libraries("${find_prefix}" "") - endif() - endforeach() - - if(NOT INF_ENGINE_RELEASE VERSION_GREATER "2018050000") - find_library(INF_ENGINE_OMP_LIBRARY iomp5 PATHS "${INF_ENGINE_OMP_DIR}" NO_DEFAULT_PATH) - if(NOT INF_ENGINE_OMP_LIBRARY) - message(WARNING "OpenMP for IE have not been found. Set INF_ENGINE_OMP_DIR variable if you experience build errors.") - endif() - endif() - if(EXISTS "${INF_ENGINE_OMP_LIBRARY}") - set_target_properties(inference_engine PROPERTIES IMPORTED_LINK_INTERFACE_LIBRARIES "${INF_ENGINE_OMP_LIBRARY}") - endif() - set(INF_ENGINE_VERSION "Unknown" CACHE STRING "") - set(INF_ENGINE_TARGET "inference_engine;${custom_libraries}" PARENT_SCOPE) - message(STATUS "Detected InferenceEngine: ${_msg}") -endfunction() - -# ====================== - find_package(InferenceEngine QUIET) if(InferenceEngine_FOUND) set(INF_ENGINE_TARGET ${InferenceEngine_LIBRARIES}) @@ -118,38 +44,6 @@ elseif(DEFINED INF_ENGINE_RELEASE) endif() set(INF_ENGINE_RELEASE "${INF_ENGINE_RELEASE_INIT}" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2020.1.0.2 -> 2020010002)") -if(NOT INF_ENGINE_TARGET AND INF_ENGINE_LIB_DIRS AND INF_ENGINE_INCLUDE_DIRS) - find_path(ie_custom_inc "inference_engine.hpp" PATHS "${INF_ENGINE_INCLUDE_DIRS}" NO_DEFAULT_PATH) - if(CMAKE_BUILD_TYPE STREQUAL "Debug") - find_library(ie_custom_lib_dbg "inference_engined" PATHS "${INF_ENGINE_LIB_DIRS}" NO_DEFAULT_PATH) # Win32 and MacOSX - endif() - find_library(ie_custom_lib "inference_engine" PATHS "${INF_ENGINE_LIB_DIRS}" NO_DEFAULT_PATH) - find_library(ie_custom_lib_rel "inference_engine" PATHS "${INF_ENGINE_LIB_DIRS}/Release" NO_DEFAULT_PATH) - find_library(ie_custom_lib_dbg "inference_engine" PATHS "${INF_ENGINE_LIB_DIRS}/Debug" NO_DEFAULT_PATH) - add_custom_ie_build("${ie_custom_inc}" "${ie_custom_lib}" "${ie_custom_lib_rel}" "${ie_custom_lib_dbg}" "INF_ENGINE_{INCLUDE,LIB}_DIRS") -endif() - -set(_loc "$ENV{INTEL_OPENVINO_DIR}") -if(NOT _loc AND DEFINED ENV{INTEL_CVSDK_DIR}) - set(_loc "$ENV{INTEL_CVSDK_DIR}") # OpenVINO 2018.x -endif() -if(NOT INF_ENGINE_TARGET AND _loc) - if(NOT INF_ENGINE_RELEASE VERSION_GREATER "2018050000") - set(INF_ENGINE_PLATFORM_DEFAULT "ubuntu_16.04") - else() - set(INF_ENGINE_PLATFORM_DEFAULT "") - endif() - set(INF_ENGINE_PLATFORM "${INF_ENGINE_PLATFORM_DEFAULT}" CACHE STRING "InferenceEngine platform (library dir)") - find_path(ie_custom_env_inc "inference_engine.hpp" PATHS "${_loc}/deployment_tools/inference_engine/include" NO_DEFAULT_PATH) - if(CMAKE_BUILD_TYPE STREQUAL "Debug") - find_library(ie_custom_env_lib_dbg "inference_engined" PATHS "${_loc}/deployment_tools/inference_engine/lib/${INF_ENGINE_PLATFORM}/intel64" NO_DEFAULT_PATH) - endif() - find_library(ie_custom_env_lib "inference_engine" PATHS "${_loc}/deployment_tools/inference_engine/lib/${INF_ENGINE_PLATFORM}/intel64" NO_DEFAULT_PATH) - find_library(ie_custom_env_lib_rel "inference_engine" PATHS "${_loc}/deployment_tools/inference_engine/lib/intel64/Release" NO_DEFAULT_PATH) - find_library(ie_custom_env_lib_dbg "inference_engine" PATHS "${_loc}/deployment_tools/inference_engine/lib/intel64/Debug" NO_DEFAULT_PATH) - add_custom_ie_build("${ie_custom_env_inc}" "${ie_custom_env_lib}" "${ie_custom_env_lib_rel}" "${ie_custom_env_lib_dbg}" "OpenVINO (${_loc})") -endif() - set(tgts) set(defs) diff --git a/modules/dnn/CMakeLists.txt b/modules/dnn/CMakeLists.txt index 8b117a952d..4019356392 100644 --- a/modules/dnn/CMakeLists.txt +++ b/modules/dnn/CMakeLists.txt @@ -115,7 +115,12 @@ elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") endif() set(dnn_runtime_libs "") -if(TARGET ocv.3rdparty.openvino) + +ocv_option(OPENCV_DNN_OPENVINO "Build with OpenVINO support (2021.4+)" (TARGET ocv.3rdparty.openvino)) +if(TARGET ocv.3rdparty.openvino AND OPENCV_DNN_OPENVINO) + if(NOT HAVE_OPENVINO AND NOT HAVE_NGRAPH) + message(FATAL_ERROR "DNN: Inference Engine is not supported without enabled 'nGraph'. Check build configuration.") + endif() list(APPEND dnn_runtime_libs ocv.3rdparty.openvino) endif() diff --git a/modules/dnn/include/opencv2/dnn/dnn.hpp b/modules/dnn/include/opencv2/dnn/dnn.hpp index 8c9b32cdd7..00a147e260 100644 --- a/modules/dnn/include/opencv2/dnn/dnn.hpp +++ b/modules/dnn/include/opencv2/dnn/dnn.hpp @@ -284,8 +284,6 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN */ virtual Ptr initHalide(const std::vector > &inputs); - virtual Ptr initInfEngine(const std::vector > &inputs); - virtual Ptr initNgraph(const std::vector > &inputs, const std::vector >& nodes); /** diff --git a/modules/dnn/include/opencv2/dnn/utils/inference_engine.hpp b/modules/dnn/include/opencv2/dnn/utils/inference_engine.hpp index 4a7e9e0786..c8c3d73337 100644 --- a/modules/dnn/include/opencv2/dnn/utils/inference_engine.hpp +++ b/modules/dnn/include/opencv2/dnn/utils/inference_engine.hpp @@ -15,14 +15,18 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN /* Values for 'OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE' parameter */ +/// @deprecated #define CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API "NN_BUILDER" +/// @deprecated #define CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH "NGRAPH" /** @brief Returns Inference Engine internal backend API. * * See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros. * - * Default value is controlled through `OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE` runtime parameter (environment variable). + * `OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE` runtime parameter (environment variable) is ignored since 4.6.0. + * + * @deprecated */ CV_EXPORTS_W cv::String getInferenceEngineBackendType(); @@ -31,6 +35,8 @@ CV_EXPORTS_W cv::String getInferenceEngineBackendType(); * See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros. * * @returns previous value of internal backend API + * + * @deprecated */ CV_EXPORTS_W cv::String setInferenceEngineBackendType(const cv::String& newBackendType); diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index 2e2112064e..cbebad7e7c 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -164,40 +164,24 @@ private: #ifdef HAVE_INF_ENGINE if (checkIETarget(DNN_TARGET_CPU)) { -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_CPU)); -#endif #ifdef HAVE_DNN_NGRAPH backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_CPU)); #endif } if (checkIETarget(DNN_TARGET_MYRIAD)) { -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_MYRIAD)); -#endif #ifdef HAVE_DNN_NGRAPH backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_MYRIAD)); #endif } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - if (checkIETarget(DNN_TARGET_FPGA)) - backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_FPGA)); -#endif #ifdef HAVE_OPENCL if (cv::ocl::useOpenCL() && ocl::Device::getDefault().isIntel()) { if (checkIETarget(DNN_TARGET_OPENCL)) { -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_OPENCL)); -#endif #ifdef HAVE_DNN_NGRAPH backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_OPENCL)); #endif } if (checkIETarget(DNN_TARGET_OPENCL_FP16)) { -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_OPENCL_FP16)); -#endif #ifdef HAVE_DNN_NGRAPH backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_OPENCL_FP16)); #endif @@ -232,7 +216,7 @@ std::vector getAvailableTargets(Backend be) be = (Backend)PARAM_DNN_BACKEND_DEFAULT; #ifdef HAVE_INF_ENGINE if (be == DNN_BACKEND_INFERENCE_ENGINE) - be = getInferenceEngineBackendTypeParam(); + be = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; #endif std::vector result; @@ -588,8 +572,7 @@ struct DataLayer : public Layer virtual bool supportBackend(int backendId) CV_OVERRIDE { - return backendId == DNN_BACKEND_OPENCV || - (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && inputsData.size() == 1); + return backendId == DNN_BACKEND_OPENCV; } void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE @@ -780,39 +763,6 @@ struct DataLayer : public Layer } } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE - { - CV_CheckEQ(inputsData.size(), (size_t)1, ""); - CV_CheckEQ(inputsData[0].dims, 4, ""); - const size_t numChannels = inputsData[0].size[1]; - CV_Assert(numChannels <= 4); - - // Scale - InferenceEngine::TensorDesc td(InferenceEngine::Precision::FP32, {numChannels}, - InferenceEngine::Layout::C); - auto weights = InferenceEngine::make_shared_blob(td); - weights->allocate(); - - float* weight_buf = weights->buffer().as(); - std::fill(weight_buf, weight_buf + numChannels, scaleFactors[0]); - - // Mean subtraction - auto biases = InferenceEngine::make_shared_blob(td); - biases->allocate(); - float* bias_buf = biases->buffer().as(); - - for (int i = 0; i < numChannels; ++i) - { - bias_buf[i] = -means[0][i] * scaleFactors[0]; - } - - InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name); - addConstantData("weights", weights, ieLayer); - addConstantData("biases", biases, ieLayer); - return Ptr(new InfEngineBackendNode(ieLayer)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 std::vector outNames; std::vector shapes; @@ -1070,18 +1020,14 @@ static Ptr wrapMat(int backendId, int targetId, cv::Mat& m) } else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) { -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - return Ptr(new InfEngineBackendWrapper(targetId, m)); -#else - CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support"); -#endif + CV_ERROR_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019; } else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { #ifdef HAVE_DNN_NGRAPH return Ptr(new NgraphBackendWrapper(targetId, m)); #else - CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of Inference Engine + nGraph"); + CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of OpenVINO / Inference Engine + nGraph"); #endif } else @@ -1183,7 +1129,7 @@ struct Net::Impl : public detail::NetImplBase } else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) { - return wrapMat(preferableBackend, preferableTarget, host); + CV_ERROR_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019; } else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { @@ -1285,7 +1231,7 @@ struct Net::Impl : public detail::NetImplBase preferableBackend = (Backend)PARAM_DNN_BACKEND_DEFAULT; #ifdef HAVE_INF_ENGINE if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE) - preferableBackend = getInferenceEngineBackendTypeParam(); + preferableBackend = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; // = getInferenceEngineBackendTypeParam(); #endif CV_Assert(preferableBackend != DNN_BACKEND_OPENCV || @@ -1296,8 +1242,7 @@ struct Net::Impl : public detail::NetImplBase preferableTarget == DNN_TARGET_CPU || preferableTarget == DNN_TARGET_OPENCL); #ifdef HAVE_INF_ENGINE - if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || - preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { CV_Assert( (preferableTarget == DNN_TARGET_CPU && (!isArmComputePlugin() || preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)) || @@ -1552,14 +1497,6 @@ struct Net::Impl : public detail::NetImplBase CV_Assert(preferableTarget == DNN_TARGET_CPU || IS_DNN_OPENCL_TARGET(preferableTarget)); else if (preferableBackend == DNN_BACKEND_HALIDE) initHalideBackend(); - else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) - { -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - initInfEngineBackend(blobsToKeep_); -#else - CV_Assert(false && "This OpenCV version is built without Inference Engine NN Builder API support"); -#endif - } else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { #ifdef HAVE_DNN_NGRAPH @@ -1569,7 +1506,7 @@ struct Net::Impl : public detail::NetImplBase #endif } else - CV_Error(Error::StsNotImplemented, "Unknown backend identifier"); + CV_Error(Error::StsNotImplemented, cv::format("Unknown backend identifier: %d", preferableBackend)); } void initHalideBackend() @@ -1627,314 +1564,6 @@ struct Net::Impl : public detail::NetImplBase } } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - // Before launching Inference Engine graph we need to specify output blobs. - // This function requests output blobs based on inputs references of - // layers from default backend or layers from different graphs. - void addInfEngineNetOutputs(LayerData &ld) - { - CV_TRACE_FUNCTION(); - Ptr layerNet; - if (ld.backendNodes.find(preferableBackend) != ld.backendNodes.end()) - { - Ptr node = ld.backendNodes[preferableBackend]; - if (!node.empty()) - { - Ptr ieNode = node.dynamicCast(); - CV_Assert(!ieNode.empty()); CV_Assert(!ieNode->net.empty()); - layerNet = ieNode->net; - } - } - // For an every input reference we check that it belongs to one of - // the Inference Engine backend graphs. Request an output blob if it is. - // Do nothing if layer's input is from the same graph. - for (int i = 0; i < ld.inputBlobsId.size(); ++i) - { - LayerData &inpLd = layers[ld.inputBlobsId[i].lid]; - Ptr inpNode = inpLd.backendNodes[preferableBackend]; - if (!inpNode.empty()) - { - Ptr ieInpNode = inpNode.dynamicCast(); - CV_Assert(!ieInpNode.empty()); CV_Assert(!ieInpNode->net.empty()); - if (layerNet != ieInpNode->net) - { - // layerNet is empty or nodes are from different graphs. - ieInpNode->net->addOutput(ieInpNode->layer.getName()); - } - } - } - } - - void initInfEngineBackend(const std::vector& blobsToKeep_) - { - CV_TRACE_FUNCTION(); - CV_Assert_N(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, haveInfEngine()); - MapIdToLayerData::iterator it; - Ptr net; - - for (it = layers.begin(); it != layers.end(); ++it) - { - LayerData &ld = it->second; - if (ld.id == 0) - { - CV_Assert((netInputLayer->outNames.empty() && ld.outputBlobsWrappers.size() == 1) || - (netInputLayer->outNames.size() == ld.outputBlobsWrappers.size())); - for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i) - { - InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]); -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000) - dataPtr->name = netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i]; -#else - dataPtr->setName(netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i]); -#endif - } - } - else - { - for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i) - { - InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]); -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000) - dataPtr->name = ld.name; -#else - dataPtr->setName(ld.name); -#endif - } - } - } - - if (skipInfEngineInit) - { - Ptr node = layers[lastLayerId].backendNodes[preferableBackend]; - CV_Assert(!node.empty()); - - Ptr ieNode = node.dynamicCast(); - CV_Assert(!ieNode.empty()); - ieNode->net->reset(); - - for (it = layers.begin(); it != layers.end(); ++it) - { - LayerData &ld = it->second; - if (ld.id == 0) - { - for (int i = 0; i < ld.inputBlobsWrappers.size(); ++i) - { - InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.inputBlobsWrappers[i]); -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000) - dataPtr->name = netInputLayer->outNames[i]; -#else - dataPtr->setName(netInputLayer->outNames[i]); -#endif - } - } - else - { - for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i) - { - InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]); -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000) - dataPtr->name = ld.name; -#else - dataPtr->setName(ld.name); -#endif - } - } - ieNode->net->addBlobs(ld.inputBlobsWrappers); - ieNode->net->addBlobs(ld.outputBlobsWrappers); - ld.skip = true; - } - layers[lastLayerId].skip = false; - ieNode->net->init((Target)preferableTarget); - return; - } - - // Build Inference Engine networks from sets of layers that support this - // backend. Split a whole model on several Inference Engine networks if - // some of layers are not implemented. - - bool supportsCPUFallback = preferableTarget == DNN_TARGET_CPU || - BackendRegistry::checkIETarget(DNN_TARGET_CPU); - - // Set of all input and output blobs wrappers for current network. - std::map > netBlobsWrappers; - for (it = layers.begin(); it != layers.end(); ++it) - { - LayerData &ld = it->second; - if (ld.id == 0 && ld.skip) - continue; - bool fused = ld.skip; - - Ptr layer = ld.layerInstance; - if (!fused && !layer->supportBackend(preferableBackend)) - { - bool customizable = ld.id != 0 && - INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R2) && - supportsCPUFallback; - // TODO: there is a bug in Myriad plugin with custom layers shape infer. - if (preferableTarget == DNN_TARGET_MYRIAD) - { - for (int i = 0; customizable && i < ld.inputBlobs.size(); ++i) - { - customizable = ld.inputBlobs[i]->size[0] == 1; - } - } - - // TODO: fix these workarounds - if (preferableTarget == DNN_TARGET_MYRIAD || - preferableTarget == DNN_TARGET_OPENCL || - preferableTarget == DNN_TARGET_OPENCL_FP16) - customizable &= ld.type != "Concat"; - - if (preferableTarget == DNN_TARGET_OPENCL || - preferableTarget == DNN_TARGET_OPENCL_FP16) - customizable &= ld.type != "Power"; - - if (preferableTarget == DNN_TARGET_OPENCL) - customizable &= ld.type != "Eltwise"; - - if (!customizable) - { - addInfEngineNetOutputs(ld); - net = Ptr(); - netBlobsWrappers.clear(); // Is not used for R5 release but we don't wrap it to #ifdef. - layer->preferableTarget = DNN_TARGET_CPU; - continue; - } - } - ld.skip = true; // Initially skip all Inference Engine supported layers. - - // Create a new network if one of inputs from different Inference Engine graph. - for (int i = 0; i < ld.inputBlobsId.size(); ++i) - { - LayerData &inpLd = layers[ld.inputBlobsId[i].lid]; - Ptr inpNode = inpLd.backendNodes[preferableBackend]; - if (!inpNode.empty()) - { - Ptr ieInpNode = inpNode.dynamicCast(); - CV_Assert(!ieInpNode.empty()); CV_Assert(!ieInpNode->net.empty()); - if (ieInpNode->net != net) - { - net = Ptr(); - netBlobsWrappers.clear(); // Is not used for R5 release but we don't wrap it to #ifdef. - break; - } - } - } - - Ptr node; - if (!net.empty()) - { - if (fused) - { - bool inPlace = ld.inputBlobsId.size() == 1 && ld.outputBlobs.size() == 1 && - ld.inputBlobs[0]->data == ld.outputBlobs[0].data; - CV_Assert(inPlace); - node = layers[ld.inputBlobsId[0].lid].backendNodes[preferableBackend]; - ld.inputBlobsWrappers = layers[ld.inputBlobsId[0].lid].inputBlobsWrappers; - } - } - else - net = Ptr(new InfEngineBackendNet()); - - if (!fused) - { - if (layer->supportBackend(preferableBackend)) - node = layer->initInfEngine(ld.inputBlobsWrappers); - else - { - node = Ptr(new InfEngineBackendNode( - ld.layerInstance, ld.inputBlobs, ld.outputBlobs, ld.internals)); - } - } - else if (node.empty()) - continue; - - CV_Assert(!node.empty()); - ld.backendNodes[preferableBackend] = node; - - Ptr ieNode = node.dynamicCast(); - CV_Assert(!ieNode.empty()); - ieNode->net = net; - - for (const auto& pin : blobsToKeep_) - { - if (pin.lid == ld.id) - { - ieNode->net->addOutput(ieNode->layer.getName()); - break; - } - } - - // Convert weights in FP16 for specific targets. - if ((preferableTarget == DNN_TARGET_OPENCL_FP16 || - preferableTarget == DNN_TARGET_MYRIAD || - preferableTarget == DNN_TARGET_FPGA) && !fused) - { -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) - for (const std::string& name : {"weights", "biases"}) - { - auto it = ieNode->layer.getParameters().find(name); - if (it != ieNode->layer.getParameters().end()) - { - InferenceEngine::Blob::Ptr bp = it->second.as(); - it->second = convertFp16(std::const_pointer_cast(bp)); - } - } -#else - auto& blobs = ieNode->layer.getConstantData(); - if (blobs.empty()) - { - // In case of non weightable layer we have to specify - // it's precision adding dummy blob. - auto blob = InferenceEngine::make_shared_blob( - InferenceEngine::Precision::FP16, - InferenceEngine::Layout::C, {1}); - blob->allocate(); - blobs[""] = blob; - } - else - { - for (auto& it : blobs) - it.second = convertFp16(std::const_pointer_cast(it.second)); - } -#endif - } - - if (!fused) - net->addLayer(ieNode->layer); - - net->connect(ld.inputBlobsWrappers, ld.outputBlobsWrappers, ieNode->layer.getName()); - net->addBlobs(ld.inputBlobsWrappers); - net->addBlobs(ld.outputBlobsWrappers); - addInfEngineNetOutputs(ld); - } - - // Initialize all networks. - for (MapIdToLayerData::reverse_iterator it = layers.rbegin(); it != layers.rend(); ++it) - { - LayerData &ld = it->second; - if (ld.backendNodes.find(preferableBackend) == ld.backendNodes.end()) - continue; - - Ptr node = ld.backendNodes[preferableBackend]; - if (node.empty()) - continue; - - Ptr ieNode = node.dynamicCast(); - if (ieNode.empty()) - continue; - - CV_Assert(!ieNode->net.empty()); - - if (!ieNode->net->isInitialized()) - { - ieNode->net->init((Target)preferableTarget); - ld.skip = false; - } - } - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 - #ifdef HAVE_DNN_NGRAPH /** mark input pins as outputs from other subnetworks @@ -1979,7 +1608,7 @@ struct Net::Impl : public detail::NetImplBase void initNgraphBackend(const std::vector& blobsToKeep_) { CV_TRACE_FUNCTION(); - CV_Assert_N(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, haveInfEngine()); + CV_CheckEQ(preferableBackend, DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, ""); Ptr net; @@ -2425,7 +2054,6 @@ struct Net::Impl : public detail::NetImplBase CV_TRACE_FUNCTION(); if(!fusion || (preferableBackend != DNN_BACKEND_OPENCV && - preferableBackend != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && preferableBackend != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)) return; @@ -3024,14 +2652,12 @@ struct Net::Impl : public detail::NetImplBase { forwardHalide(ld.outputBlobsWrappers, node); } - else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) - { - forwardInfEngine(ld.outputBlobsWrappers, node, isAsync); - } +#ifdef HAVE_INF_ENGINE else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { forwardNgraph(ld.outputBlobsWrappers, node, isAsync); } +#endif else { CV_Error(Error::StsNotImplemented, "Unknown backend identifier"); @@ -3347,27 +2973,13 @@ struct Net::Impl : public detail::NetImplBase // Transfer data to CPU if it's require. ld.outputBlobsWrappers[pin.oid]->copyToHost(); } - CV_Assert(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH); + CV_Assert(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH); - if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) { -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - Ptr wrapper = ld.outputBlobsWrappers[pin.oid].dynamicCast(); - return std::move(wrapper->futureMat); -#else - CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support"); -#endif - } - else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) - { -#ifdef HAVE_DNN_NGRAPH - Ptr wrapper = ld.outputBlobsWrappers[pin.oid].dynamicCast(); - return std::move(wrapper->futureMat); + Ptr wrapper = ld.outputBlobsWrappers[pin.oid].dynamicCast(); + return std::move(wrapper->futureMat); #else - CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of Inference Engine + nGraph"); -#endif - } + CV_Error(Error::StsNotImplemented, "DNN: OpenVINO/nGraph backend is required"); #endif // HAVE_INF_ENGINE - CV_Error(Error::StsNotImplemented, "DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 backend is required"); } AsyncArray getBlobAsync(String outputName) @@ -3441,40 +3053,18 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe CV_TRACE_REGION_NEXT("backendNode"); Ptr backendNode; -#ifdef HAVE_DNN_NGRAPH - if (DNN_BACKEND_INFERENCE_ENGINE_NGRAPH == getInferenceEngineBackendTypeParam()) { auto fake_node = std::make_shared(ngraph::element::f32, ngraph::Shape{}); Ptr backendNodeNGraph(new InfEngineNgraphNode(fake_node)); backendNodeNGraph->net = Ptr(new InfEngineNgraphNet(*(cvNet.impl), ieNet)); backendNode = backendNodeNGraph; } - else -#endif - { -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - Ptr backendNodeNN(new InfEngineBackendNode(InferenceEngine::Builder::Layer(""))); - backendNodeNN->net = Ptr(new InfEngineBackendNet(ieNet)); - backendNode = backendNodeNN; -#else - CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support"); -#endif - } CV_TRACE_REGION_NEXT("register_outputs"); -#ifdef HAVE_DNN_NGRAPH auto ngraphFunction = ieNet.getFunction(); -#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_2) - std::list< std::shared_ptr > ngraphOperations; -#else - std::vector< std::shared_ptr > ngraphOperations; -#endif - if (ngraphFunction) - { - ngraphOperations = ngraphFunction->get_ops(); - } -#endif + CV_Assert(ngraphFunction); + std::vector< std::shared_ptr > ngraphOperations = ngraphFunction->get_ops(); for (auto& it : ieNet.getOutputsInfo()) { @@ -3486,8 +3076,6 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe LayerData& ld = cvNet.impl->layers[lid]; -#ifdef HAVE_DNN_NGRAPH - if (DNN_BACKEND_INFERENCE_ENGINE_NGRAPH == getInferenceEngineBackendTypeParam()) { Ptr cvLayer(new NgraphBackendLayer(ieNet)); cvLayer->name = outputName; @@ -3495,44 +3083,18 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe auto process_layer = [&](const std::string& name) -> bool { - if (ngraphFunction) - { - CV_TRACE_REGION("ngraph_function"); - for (const auto& op : ngraphOperations) - { - CV_Assert(op); - if (op->get_friendly_name() == name) - { - const std::string typeName = op->get_type_info().name; - cvLayer->type = typeName; - return true; - } - } - return false; - } - else + CV_TRACE_REGION("ngraph_function"); + for (const auto& op : ngraphOperations) { -#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_4) - CV_Error(Error::StsNotImplemented, "This OpenCV version is built with Inference Engine which has dropped IR v7 support"); -#else - CV_TRACE_REGION("legacy_cnn_layer"); - try + CV_Assert(op); + if (op->get_friendly_name() == name) { - InferenceEngine::CNNLayerPtr ieLayer = ieNet.getLayerByName(name.c_str()); - CV_Assert(ieLayer); - - cvLayer->type = ieLayer->type; + const std::string typeName = op->get_type_info().name; + cvLayer->type = typeName; return true; } - catch (const std::exception& e) - { - CV_UNUSED(e); - CV_LOG_DEBUG(NULL, "IE layer extraction failure: '" << name << "' - " << e.what()); - return false; - } -#endif - } + return false; }; bool found = process_layer(outputName); @@ -3551,37 +3113,6 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe ld.layerInstance = cvLayer; ld.backendNodes[DNN_BACKEND_INFERENCE_ENGINE_NGRAPH] = backendNode; } - else -#endif - { -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - Ptr cvLayer(new InfEngineBackendLayer(ieNet)); - - InferenceEngine::CNNLayerPtr ieLayer; - try - { - ieLayer = ieNet.getLayerByName(outputName.c_str()); - } - catch (...) - { - auto pos = outputName.rfind('.'); // cut port number: ".0" - if (pos != std::string::npos) - { - std::string layerName = outputName.substr(0, pos); - ieLayer = ieNet.getLayerByName(layerName.c_str()); - } - } - CV_Assert(ieLayer); - - cvLayer->name = outputName; - cvLayer->type = ieLayer->type; - ld.layerInstance = cvLayer; - - ld.backendNodes[DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019] = backendNode; -#else - CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support"); -#endif - } for (int i = 0; i < inputsNames.size(); ++i) cvNet.connect(0, i, lid, i); @@ -3589,7 +3120,7 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe CV_TRACE_REGION_NEXT("finalize"); - cvNet.setPreferableBackend(getInferenceEngineBackendTypeParam()); + cvNet.setPreferableBackend(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH); cvNet.impl->skipInfEngineInit = true; return cvNet; @@ -3606,16 +3137,8 @@ Net Net::readFromModelOptimizer(const String& xml, const String& bin) FPDenormalsIgnoreHintScope fp_denormals_ignore_scope; -#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3) - InferenceEngine::CNNNetReader reader; - reader.ReadNetwork(xml); - reader.ReadWeights(bin); - - InferenceEngine::CNNNetwork ieNet = reader.getNetwork(); -#else InferenceEngine::Core& ie = getCore(""); InferenceEngine::CNNNetwork ieNet = ie.ReadNetwork(xml, bin); -#endif return Impl::createNetworkFromModelOptimizer(ieNet); #endif // HAVE_INF_ENGINE @@ -3644,26 +3167,6 @@ Net Net::readFromModelOptimizer( FPDenormalsIgnoreHintScope fp_denormals_ignore_scope; -#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3) - InferenceEngine::CNNNetReader reader; - - try - { - reader.ReadNetwork(bufferModelConfigPtr, bufferModelConfigSize); - - InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, { bufferWeightsSize }, InferenceEngine::Layout::C); - InferenceEngine::TBlob::Ptr weightsBlobPtr(new InferenceEngine::TBlob(tensorDesc)); - weightsBlobPtr->allocate(); - std::memcpy(weightsBlobPtr->buffer(), (uchar*)bufferWeightsPtr, bufferWeightsSize); - reader.SetWeights(weightsBlobPtr); - } - catch (const std::exception& e) - { - CV_Error(Error::StsError, std::string("DNN: IE failed to load model: ") + e.what()); - } - - InferenceEngine::CNNNetwork ieNet = reader.getNetwork(); -#else InferenceEngine::Core& ie = getCore(""); std::string model; model.assign((char*)bufferModelConfigPtr, bufferModelConfigSize); @@ -3680,7 +3183,6 @@ Net Net::readFromModelOptimizer( { CV_Error(Error::StsError, std::string("DNN: IE failed to load model: ") + e.what()); } -#endif return Impl::createNetworkFromModelOptimizer(ieNet); #endif // HAVE_INF_ENGINE @@ -3775,8 +3277,8 @@ AsyncArray Net::forwardAsync(const String& outputName) std::vector pins(1, impl->getPinByAlias(layerName)); impl->setUpNet(pins); - if (!(impl->preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || impl->preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)) - CV_Error(Error::StsNotImplemented, "DNN: Asynchronous forward is supported for Inference Engine backends only"); + if (impl->preferableBackend != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + CV_Error(Error::StsNotImplemented, "DNN: Asynchronous forward is supported for Inference Engine backend only"); impl->isAsync = true; impl->forwardToLayer(impl->getLayerData(layerName)); @@ -3932,7 +3434,7 @@ void Net::setPreferableBackend(int backendId) #ifdef HAVE_INF_ENGINE if (backendId == DNN_BACKEND_INFERENCE_ENGINE) - backendId = getInferenceEngineBackendTypeParam(); + backendId = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; #endif if( impl->preferableBackend != backendId ) @@ -4172,8 +3674,8 @@ string Net::Impl::dump() const case DNN_BACKEND_DEFAULT: backend = "DEFAULT/"; break; case DNN_BACKEND_HALIDE: backend = "HALIDE/"; break; case DNN_BACKEND_INFERENCE_ENGINE: // fallthru - case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: backend = "DLIE/"; break; - case DNN_BACKEND_INFERENCE_ENGINE_NGRAPH: backend = "NGRAPH/"; break; + case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: // fallthru + case DNN_BACKEND_INFERENCE_ENGINE_NGRAPH: backend = "OpenVINO/"; break; case DNN_BACKEND_OPENCV: backend = "OCV/"; break; // don't use default: } @@ -4749,13 +4251,6 @@ Ptr Layer::initHalide(const std::vector > &) return Ptr(); } -Ptr Layer::initInfEngine(const std::vector > &) -{ - CV_Error(Error::StsNotImplemented, "Inference Engine pipeline of " + type + - " layers is not defined."); - return Ptr(); -} - Ptr Layer::initNgraph(const std::vector > & inputs, const std::vector >& nodes) { CV_Error(Error::StsNotImplemented, "Inference Engine pipeline of " + type + diff --git a/modules/dnn/src/layers/batch_norm_layer.cpp b/modules/dnn/src/layers/batch_norm_layer.cpp index dcb4005975..212d34a061 100644 --- a/modules/dnn/src/layers/batch_norm_layer.cpp +++ b/modules/dnn/src/layers/batch_norm_layer.cpp @@ -163,9 +163,12 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + return preferableTarget == DNN_TARGET_CPU || dims == 4; +#endif return (backendId == DNN_BACKEND_OPENCV) || - (backendId == DNN_BACKEND_HALIDE && haveHalide()) || - ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && (preferableTarget == DNN_TARGET_CPU || dims == 4)); + (backendId == DNN_BACKEND_HALIDE && haveHalide()); } #ifdef HAVE_OPENCL @@ -361,16 +364,6 @@ public: } #endif // HAVE_HALIDE -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE - { - InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name); - const size_t numChannels = weights_.total(); - addConstantData("weights", wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C), ieLayer); - addConstantData("biases", wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C), ieLayer); - return Ptr(new InfEngineBackendNode(ieLayer)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE diff --git a/modules/dnn/src/layers/blank_layer.cpp b/modules/dnn/src/layers/blank_layer.cpp index e7b385e969..e461ea1d93 100644 --- a/modules/dnn/src/layers/blank_layer.cpp +++ b/modules/dnn/src/layers/blank_layer.cpp @@ -57,8 +57,11 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { - return backendId == DNN_BACKEND_OPENCV || - ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine()); +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + return true; +#endif + return backendId == DNN_BACKEND_OPENCV; } bool getMemoryShapes(const std::vector &inputs, @@ -108,31 +111,6 @@ public: inputs[i].copyTo(outputs[i]); } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE - { - InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); - std::vector dims = input->getDims(); - CV_Assert(!dims.empty()); - - InferenceEngine::Builder::Layer ieLayer(name); - ieLayer.setName(name); - if (preferableTarget == DNN_TARGET_MYRIAD) - { - ieLayer.setType("Copy"); - } - else - { - ieLayer.setType("Split"); - ieLayer.getParameters()["axis"] = dims.size() - 1; - ieLayer.getParameters()["out_sizes"] = dims[0]; - } - ieLayer.setInputPorts({InferenceEngine::Port(dims)}); - ieLayer.setOutputPorts(std::vector(1)); - return Ptr(new InfEngineBackendNode(ieLayer)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 - #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, diff --git a/modules/dnn/src/layers/concat_layer.cpp b/modules/dnn/src/layers/concat_layer.cpp index 2f92c69e05..57241daf01 100644 --- a/modules/dnn/src/layers/concat_layer.cpp +++ b/modules/dnn/src/layers/concat_layer.cpp @@ -104,10 +104,12 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + return true; +#endif return backendId == DNN_BACKEND_OPENCV || - (backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding) || // By channels - (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && haveInfEngine() && !padding) || - backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; + (backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding); } class ChannelConcatInvoker : public ParallelLoopBody @@ -300,18 +302,6 @@ public: return Ptr(); } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE - { - InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); - - InferenceEngine::Builder::ConcatLayer ieLayer(name); - ieLayer.setAxis(normalize_axis(axis, input->getDims().size())); - ieLayer.setInputPorts(std::vector(inputs.size())); - return Ptr(new InfEngineBackendNode(ieLayer)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 - #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, diff --git a/modules/dnn/src/layers/const_layer.cpp b/modules/dnn/src/layers/const_layer.cpp index bc23064bee..428740d664 100644 --- a/modules/dnn/src/layers/const_layer.cpp +++ b/modules/dnn/src/layers/const_layer.cpp @@ -27,9 +27,11 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { - return backendId == DNN_BACKEND_OPENCV || - backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || - backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + return true; +#endif + return backendId == DNN_BACKEND_OPENCV; } virtual bool getMemoryShapes(const std::vector &inputs, @@ -68,15 +70,6 @@ public: blobs[0].copyTo(outputs[0]); } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE - { - InferenceEngine::Builder::ConstLayer ieLayer(name); - ieLayer.setData(wrapToInfEngineBlob(blobs[0])); - return Ptr(new InfEngineBackendNode(ieLayer)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 - #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp index e160cfeebe..ebe912b485 100644 --- a/modules/dnn/src/layers/convolution_layer.cpp +++ b/modules/dnn/src/layers/convolution_layer.cpp @@ -272,7 +272,7 @@ public: { size_t ksize = kernel_size.size(); #ifdef HAVE_INF_ENGINE - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { bool isArmTarget = preferableTarget == DNN_TARGET_CPU && isArmComputePlugin(); if (isArmTarget && blobs.empty()) @@ -281,7 +281,8 @@ public: return isArmTarget; if (ksize == 3) return preferableTarget != DNN_TARGET_MYRIAD && !isArmTarget; - if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || preferableTarget != DNN_TARGET_MYRIAD) && blobs.empty()) + bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD; + if (!isMyriad && blobs.empty()) return false; return (preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height); } @@ -514,68 +515,6 @@ public: return Ptr(); } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector > &inputs) CV_OVERRIDE - { - InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); - std::vector dims = input->getDims(); - CV_Assert(dims.size() == 4 || dims.size() == 5); - const int inpCn = dims[1]; - const int outCn = blobs[0].size[0]; - const int inpGroupCn = blobs[0].size[1]; - const int group = inpCn / inpGroupCn; - InferenceEngine::Layout layout = (dims.size() == 4) ? InferenceEngine::Layout::OIHW : - InferenceEngine::Layout::NCDHW; - - auto ieWeights = wrapToInfEngineBlob(blobs[0], layout); - if (fusedWeights) - { - if (weightsMat.isContinuous()) - { - Mat cvWeights = weightsMat.reshape(1, blobs[0].dims, blobs[0].size); - ieWeights = wrapToInfEngineBlob(cvWeights, layout); - } - else - { - ieWeights = InferenceEngine::make_shared_blob({ - InferenceEngine::Precision::FP32, - ieWeights->getTensorDesc().getDims(), layout - }); - ieWeights->allocate(); - - Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, outCn); - Mat cvWeights = weightsMat.colRange(0, newWeights.cols); - cvWeights.copyTo(newWeights); - } - } - InferenceEngine::Blob::Ptr ieBiases; - if (hasBias() || fusedBias) - { - Mat biasesMat({outCn}, CV_32F, &biasvec[0]); - ieBiases = wrapToInfEngineBlob(biasesMat, {(size_t)outCn}, InferenceEngine::Layout::C); - } - - InferenceEngine::Builder::ConvolutionLayer ieLayer(name); - - ieLayer.setKernel(kernel_size); - ieLayer.setStrides(strides); - ieLayer.setDilation(dilations); - ieLayer.setPaddingsBegin(pads_begin); - ieLayer.setPaddingsEnd(pads_end); - ieLayer.setGroup((size_t)group); - ieLayer.setOutDepth((size_t)outCn); - - InferenceEngine::Builder::Layer l = ieLayer; - addConstantData("weights", ieWeights, l); - if (ieBiases) - addConstantData("biases", ieBiases, l); - - if (!padMode.empty()) - l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper"); - - return Ptr(new InfEngineBackendNode(l)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector > &inputs, @@ -1792,52 +1731,6 @@ public: if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { return group == 1; } - -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) - { - if (kernel_size.size() == 3 && preferableTarget != DNN_TARGET_CPU) { - return false; - } - - if (std::accumulate(adjust_pads.begin(), adjust_pads.end(), 0, std::plus()) > 0) - { - if (padMode.empty()) - { - if (preferableTarget != DNN_TARGET_CPU && group != 1) - { - for (int i = 0; i < adjust_pads.size(); i++) { - if (adjust_pads[i] && pads_begin[i]) - return false; - } - } - for (int i = 0; i < adjust_pads.size(); i++) { - if (pads_end[i] < adjust_pads[i]) - return false; - } - return true; - } - else if (padMode == "SAME") - { - for (int i = 0; i < adjust_pads.size(); i++) { - if (kernel_size[i] < pads_begin[i] + 1 + adjust_pads[i]) - return false; - } - return true; - } - else if (padMode == "VALID") - return false; - } - - if (group != 1) - { - return preferableTarget == DNN_TARGET_CPU; - } - if (preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL_FP16) - return std::accumulate(dilations.begin(), dilations.end(), 1, std::multiplies()) == 1; - return true; - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 #endif // HAVE_INF_ENGINE { return kernel_size.size() == 2 && (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE); @@ -2423,63 +2316,6 @@ public: return Ptr(); } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector > &) CV_OVERRIDE - { - InferenceEngine::Layout layout = blobs[0].dims == 5? InferenceEngine::Layout::NCDHW : - InferenceEngine::Layout::OIHW; - - auto ieWeights = wrapToInfEngineBlob(blobs[0], layout); - if (fusedWeights) - { - ieWeights = InferenceEngine::make_shared_blob({ - InferenceEngine::Precision::FP32, - ieWeights->getTensorDesc().getDims(), layout - }); - ieWeights->allocate(); - - int inpCn = blobs[0].size[0]; - Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, inpCn); - transpose(weightsMat, newWeights); - } - - const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW or OIDHW layout - const int group = numOutput / outGroupCn; - - InferenceEngine::Builder::DeconvolutionLayer ieLayer(name); - - ieLayer.setKernel(kernel_size); - ieLayer.setStrides(strides); - ieLayer.setDilation(dilations); - ieLayer.setPaddingsBegin(pads_begin); - - if (padMode.empty()) - { - std::vector paddings_end; - for (int i = 0; i < pads_end.size(); i++) { - paddings_end.push_back(pads_end[i] - adjust_pads[i]); - } - ieLayer.setPaddingsEnd(paddings_end); - } - else if (padMode == "SAME") - { - std::vector paddings_end; - for (int i = 0; i < pads_begin.size(); i++) { - paddings_end.push_back(kernel_size[i] - pads_begin[i] - 1 - adjust_pads[i]); - } - ieLayer.setPaddingsEnd(paddings_end); - } - ieLayer.setGroup((size_t)group); - ieLayer.setOutDepth((size_t)numOutput); - - InferenceEngine::Builder::Layer l = ieLayer; - addConstantData("weights", ieWeights, l); - if (hasBias()) - addConstantData("biases", wrapToInfEngineBlob(biasesMat, {(size_t)numOutput}, InferenceEngine::Layout::C), l); - return Ptr(new InfEngineBackendNode(l)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 - #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector > &inputs, diff --git a/modules/dnn/src/layers/detection_output_layer.cpp b/modules/dnn/src/layers/detection_output_layer.cpp index 614b3a6462..80408210d4 100644 --- a/modules/dnn/src/layers/detection_output_layer.cpp +++ b/modules/dnn/src/layers/detection_output_layer.cpp @@ -215,7 +215,7 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_OPENCV || - ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && !_locPredTransposed && _bboxesNormalized); + (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && !_locPredTransposed && _bboxesNormalized); } bool getMemoryShapes(const std::vector &inputs, @@ -945,30 +945,6 @@ public: } } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE - { - InferenceEngine::Builder::DetectionOutputLayer ieLayer(name); - - ieLayer.setNumClasses(_numClasses); - ieLayer.setShareLocation(_shareLocation); - ieLayer.setBackgroudLabelId(_backgroundLabelId); - ieLayer.setNMSThreshold(_nmsThreshold); - ieLayer.setTopK(_topK > 0 ? _topK : _keepTopK); - ieLayer.setKeepTopK(_keepTopK); - ieLayer.setConfidenceThreshold(_confidenceThreshold); - ieLayer.setVariantEncodedInTarget(_varianceEncodedInTarget); - ieLayer.setCodeType("caffe.PriorBoxParameter." + _codeType); - ieLayer.setInputPorts(std::vector(3)); - - InferenceEngine::Builder::Layer l = ieLayer; - l.getParameters()["eta"] = std::string("1.0"); - l.getParameters()["clip"] = _clip; - - return Ptr(new InfEngineBackendNode(l)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 - #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE diff --git a/modules/dnn/src/layers/elementwise_layers.cpp b/modules/dnn/src/layers/elementwise_layers.cpp index 21838fbe49..b84523eb93 100644 --- a/modules/dnn/src/layers/elementwise_layers.cpp +++ b/modules/dnn/src/layers/elementwise_layers.cpp @@ -156,14 +156,6 @@ public: return Ptr(); } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE - { - InferenceEngine::Builder::Layer ieLayer = func.initInfEngineBuilderAPI(); - ieLayer.setName(this->name); - return Ptr(new InfEngineBackendNode(ieLayer)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE @@ -272,10 +264,6 @@ struct ReLUFunctor : public BaseFunctor bool supportBackend(int backendId, int) { -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) - return slope >= 0 || !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1); -#endif #ifdef HAVE_DNN_NGRAPH if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) return true; @@ -373,13 +361,6 @@ struct ReLUFunctor : public BaseFunctor } #endif // HAVE_HALIDE -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - InferenceEngine::Builder::Layer initInfEngineBuilderAPI() - { - return InferenceEngine::Builder::ReLULayer("").setNegativeSlope(slope); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 - #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const std::shared_ptr& node) { @@ -407,8 +388,12 @@ struct ReLU6Functor : public BaseFunctor bool supportBackend(int backendId, int) { - return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || - backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + return true; +#endif + return backendId == DNN_BACKEND_OPENCV || + backendId == DNN_BACKEND_HALIDE; } void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const @@ -483,12 +468,6 @@ struct ReLU6Functor : public BaseFunctor } #endif // HAVE_HALIDE -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - InferenceEngine::Builder::Layer initInfEngineBuilderAPI() - { - return InferenceEngine::Builder::ClampLayer("").setMinValue(minValue).setMaxValue(maxValue); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const std::shared_ptr& node) @@ -546,12 +525,6 @@ struct BaseDefaultFunctor : public BaseFunctor inline void setKernelParams(ocl::Kernel& kernel) const {} -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - InferenceEngine::Builder::Layer initInfEngineBuilderAPI() - { - CV_Error(Error::StsNotImplemented, ""); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const std::shared_ptr& node) @@ -570,8 +543,12 @@ struct TanHFunctor : public BaseDefaultFunctor bool supportBackend(int backendId, int) { - return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || - backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + return true; +#endif + return backendId == DNN_BACKEND_OPENCV || + backendId == DNN_BACKEND_HALIDE; } inline float calculate(float x) const @@ -587,13 +564,6 @@ struct TanHFunctor : public BaseDefaultFunctor } #endif // HAVE_HALIDE -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - InferenceEngine::Builder::Layer initInfEngineBuilderAPI() - { - return InferenceEngine::Builder::TanHLayer(""); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 - #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const std::shared_ptr& node) { @@ -700,8 +670,12 @@ struct SigmoidFunctor : public BaseDefaultFunctor bool supportBackend(int backendId, int) { - return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || - backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + return true; +#endif + return backendId == DNN_BACKEND_OPENCV || + backendId == DNN_BACKEND_HALIDE; } inline float calculate(float x) const @@ -717,12 +691,6 @@ struct SigmoidFunctor : public BaseDefaultFunctor } #endif // HAVE_HALIDE -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - InferenceEngine::Builder::Layer initInfEngineBuilderAPI() - { - return InferenceEngine::Builder::SigmoidLayer(""); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const std::shared_ptr& node) @@ -746,8 +714,12 @@ struct ELUFunctor : public BaseDefaultFunctor bool supportBackend(int backendId, int) { - return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || - backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + return true; +#endif + return backendId == DNN_BACKEND_OPENCV || + backendId == DNN_BACKEND_HALIDE; } inline float calculate(float x) const @@ -768,13 +740,6 @@ struct ELUFunctor : public BaseDefaultFunctor } #endif // HAVE_HALIDE -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - InferenceEngine::Builder::Layer initInfEngineBuilderAPI() - { - return InferenceEngine::Builder::ELULayer(""); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 - #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const std::shared_ptr& node) { @@ -795,8 +760,8 @@ struct AbsValFunctor : public BaseDefaultFunctor bool supportBackend(int backendId, int) { #ifdef HAVE_INF_ENGINE - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) - return !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1); + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + return true; #endif return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE; } @@ -814,12 +779,6 @@ struct AbsValFunctor : public BaseDefaultFunctor } #endif // HAVE_HALIDE -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - InferenceEngine::Builder::Layer initInfEngineBuilderAPI() - { - return InferenceEngine::Builder::ReLULayer("").setNegativeSlope(-0.999999f); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const std::shared_ptr& node) @@ -880,10 +839,14 @@ struct PowerFunctor : public BaseFunctor bool supportBackend(int backendId, int targetId) { - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) - return (targetId != DNN_TARGET_OPENCL && targetId != DNN_TARGET_OPENCL_FP16) || power == 1.0 || power == 0.5; - else - return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + return true; +#endif + { + return backendId == DNN_BACKEND_OPENCV || + backendId == DNN_BACKEND_HALIDE; + } } void finalize() @@ -968,14 +931,6 @@ struct PowerFunctor : public BaseFunctor } #endif // HAVE_HALIDE -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - InferenceEngine::Builder::Layer initInfEngineBuilderAPI() - { - return InferenceEngine::Builder::PowerLayer("").setPower(power) - .setScale(scale) - .setShift(shift); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const std::shared_ptr& node) @@ -1104,8 +1059,12 @@ struct ChannelsPReLUFunctor : public BaseFunctor bool supportBackend(int backendId, int) { - return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || - backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + return true; +#endif + return backendId == DNN_BACKEND_OPENCV || + backendId == DNN_BACKEND_HALIDE; } void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const @@ -1188,15 +1147,6 @@ struct ChannelsPReLUFunctor : public BaseFunctor } #endif // HAVE_HALIDE -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - InferenceEngine::Builder::Layer initInfEngineBuilderAPI() - { - InferenceEngine::Builder::Layer l = InferenceEngine::Builder::PReLULayer(""); - const size_t numChannels = scale.total(); - addConstantData("weights", wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C), l); - return l; - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const std::shared_ptr& node) diff --git a/modules/dnn/src/layers/eltwise_layer.cpp b/modules/dnn/src/layers/eltwise_layer.cpp index f30bb6b43d..df84902317 100644 --- a/modules/dnn/src/layers/eltwise_layer.cpp +++ b/modules/dnn/src/layers/eltwise_layer.cpp @@ -154,10 +154,13 @@ public: if (hasVecInput && ELTWISE_CHANNNELS_SAME) return backendId == DNN_BACKEND_OPENCV; +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + return channelsMode == ELTWISE_CHANNNELS_SAME; +#endif + return backendId == DNN_BACKEND_OPENCV || - backendId == DNN_BACKEND_HALIDE || - ((((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (preferableTarget != DNN_TARGET_OPENCL || coeffs.empty())) - || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && channelsMode == ELTWISE_CHANNNELS_SAME)); + backendId == DNN_BACKEND_HALIDE; } bool getMemoryShapes(const std::vector &inputs, @@ -752,32 +755,6 @@ public: return Ptr(); } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE - { - InferenceEngine::Builder::EltwiseLayer ieLayer(name); - - ieLayer.setInputPorts(std::vector(inputs.size())); - - if (op == SUM) - ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::SUM); - else if (op == PROD) - ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::MUL); - else if (op == DIV) - ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::DIV); - else if (op == MAX) - ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::MAX); - else - CV_Error(Error::StsNotImplemented, "Unsupported eltwise operation"); - - InferenceEngine::Builder::Layer l = ieLayer; - if (!coeffs.empty()) - l.getParameters()["coeff"] = coeffs; - - return Ptr(new InfEngineBackendNode(l)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 - #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, diff --git a/modules/dnn/src/layers/flatten_layer.cpp b/modules/dnn/src/layers/flatten_layer.cpp index 1e0b010167..72db42b943 100644 --- a/modules/dnn/src/layers/flatten_layer.cpp +++ b/modules/dnn/src/layers/flatten_layer.cpp @@ -66,8 +66,11 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { - return backendId == DNN_BACKEND_OPENCV || - ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine()); +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + return true; +#endif + return backendId == DNN_BACKEND_OPENCV; } bool getMemoryShapes(const std::vector &inputs, @@ -163,24 +166,11 @@ public: } } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE - { - InferenceEngine::Builder::Layer ieLayer(name); - ieLayer.setName(name); - ieLayer.setType("Flatten"); - ieLayer.getParameters()["axis"] = (size_t)_startAxis; - ieLayer.getParameters()["end_axis"] = _endAxis; // Do not cast to size_t because it might be negative. - ieLayer.setInputPorts(std::vector(1)); - ieLayer.setOutputPorts(std::vector(1)); - return Ptr(new InfEngineBackendNode(ieLayer)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH -virtual Ptr initNgraph(const std::vector >& inputs, - const std::vector >& nodes) CV_OVERRIDE -{ + virtual Ptr initNgraph(const std::vector >& inputs, + const std::vector >& nodes) CV_OVERRIDE + { auto& ieInpNode = nodes[0].dynamicCast()->node; std::vector dims = ieInpNode->get_shape(); diff --git a/modules/dnn/src/layers/fully_connected_layer.cpp b/modules/dnn/src/layers/fully_connected_layer.cpp index 5acce939f1..9c1c360a20 100644 --- a/modules/dnn/src/layers/fully_connected_layer.cpp +++ b/modules/dnn/src/layers/fully_connected_layer.cpp @@ -143,10 +143,13 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + return axis == 1; +#endif + return backendId == DNN_BACKEND_OPENCV || - (backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1) || - (((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && !blobs.empty()) || - backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && axis == 1); + (backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1); } virtual bool setActivation(const Ptr& layer) CV_OVERRIDE @@ -534,23 +537,6 @@ public: return Ptr(); } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE - { - InferenceEngine::Builder::FullyConnectedLayer ieLayer(name); - - const int outNum = blobs[0].size[0]; - ieLayer.setOutputNum(outNum); - - InferenceEngine::Builder::Layer l = ieLayer; - addConstantData("weights", wrapToInfEngineBlob(blobs[0], {(size_t)blobs[0].size[0], (size_t)blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW), l); - if (bias) - addConstantData("biases", wrapToInfEngineBlob(blobs[1], {(size_t)outNum}, InferenceEngine::Layout::C), l); - - return Ptr(new InfEngineBackendNode(l)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 - #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, diff --git a/modules/dnn/src/layers/lrn_layer.cpp b/modules/dnn/src/layers/lrn_layer.cpp index 65cd160dfe..ecd565e505 100644 --- a/modules/dnn/src/layers/lrn_layer.cpp +++ b/modules/dnn/src/layers/lrn_layer.cpp @@ -92,13 +92,12 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) { +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) return bias == (int)bias; - } - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { - return bias == (int)bias; - } - return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE; +#endif + return backendId == DNN_BACKEND_OPENCV || + backendId == DNN_BACKEND_HALIDE; } #ifdef HAVE_OPENCL @@ -385,24 +384,6 @@ public: #endif // HAVE_HALIDE } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE - { - float alphaSize = alpha; - if (!normBySize) - alphaSize *= (type == SPATIAL_NRM ? size*size : size); - - InferenceEngine::Builder::NormLayer ieLayer(name); - ieLayer.setSize(size); - ieLayer.setAlpha(alphaSize); - ieLayer.setBeta(beta); - ieLayer.setAcrossMaps(type == CHANNEL_NRM); - - InferenceEngine::Builder::Layer l = ieLayer; - l.getParameters()["k"] = bias; - return Ptr(new InfEngineBackendNode(l)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE diff --git a/modules/dnn/src/layers/mvn_layer.cpp b/modules/dnn/src/layers/mvn_layer.cpp index de2b0d5690..b25456b3ce 100644 --- a/modules/dnn/src/layers/mvn_layer.cpp +++ b/modules/dnn/src/layers/mvn_layer.cpp @@ -118,11 +118,7 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) - return !zeroDev && (preferableTarget != DNN_TARGET_MYRIAD || eps <= 1e-7f); -#endif -#ifdef HAVE_DNN_NGRAPH +#ifdef HAVE_INF_ENGINE if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) return true; #endif @@ -378,16 +374,6 @@ public: } } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE - { - InferenceEngine::Builder::MVNLayer ieLayer(name); - ieLayer.setAcrossChannels(acrossChannels); - ieLayer.setNormalize(normVariance); - ieLayer.setEpsilon(eps); - return Ptr(new InfEngineBackendNode(ieLayer)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, diff --git a/modules/dnn/src/layers/normalize_bbox_layer.cpp b/modules/dnn/src/layers/normalize_bbox_layer.cpp index 001ea3bd16..37202bf863 100644 --- a/modules/dnn/src/layers/normalize_bbox_layer.cpp +++ b/modules/dnn/src/layers/normalize_bbox_layer.cpp @@ -64,16 +64,15 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { if (pnorm != 2) return false; - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && preferableTarget == DNN_TARGET_MYRIAD) - return !acrossSpatial; - return startAxis == 1; } +#endif return backendId == DNN_BACKEND_OPENCV; } @@ -261,56 +260,6 @@ public: } } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE - { - InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); - std::vector dims = input->getDims(); - if (dims.size() == 4) - { - InferenceEngine::Builder::NormalizeLayer ieLayer(name); - - ieLayer.setChannelShared(false); - ieLayer.setAcrossMaps(acrossSpatial); - ieLayer.setEpsilon(epsilon); - - InferenceEngine::Builder::Layer l = ieLayer; - const int numChannels = dims[1]; - InferenceEngine::Blob::Ptr weights; - if (blobs.empty()) - { - weights = InferenceEngine::make_shared_blob({ - InferenceEngine::Precision::FP32, - {(size_t)numChannels}, InferenceEngine::Layout::C - }); - weights->allocate(); - - Mat weightsMat = infEngineBlobToMat(weights).reshape(1, numChannels); - Mat(numChannels, 1, CV_32F, Scalar(1)).copyTo(weightsMat); - l.getParameters()["channel_shared"] = false; - } - else - { - CV_Assert(numChannels == blobs[0].total()); - weights = wrapToInfEngineBlob(blobs[0], {(size_t)numChannels}, InferenceEngine::Layout::C); - l.getParameters()["channel_shared"] = blobs[0].total() == 1; - } - addConstantData("weights", weights, l); - l.getParameters()["across_spatial"] = acrossSpatial; - return Ptr(new InfEngineBackendNode(l)); - } - else - { - InferenceEngine::Builder::GRNLayer ieLayer(name); - ieLayer.setBeta(epsilon); - - InferenceEngine::Builder::Layer l = ieLayer; - l.getParameters()["bias"] = epsilon; - - return Ptr(new InfEngineBackendNode(l)); - } - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, diff --git a/modules/dnn/src/layers/padding_layer.cpp b/modules/dnn/src/layers/padding_layer.cpp index 9d7ca23714..a5831e6e2d 100644 --- a/modules/dnn/src/layers/padding_layer.cpp +++ b/modules/dnn/src/layers/padding_layer.cpp @@ -96,9 +96,10 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { - if (INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) && preferableTarget == DNN_TARGET_MYRIAD) + bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD; + if (isMyriad) return dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0; return (dstRanges.size() <= 4 || !isArmComputePlugin()); @@ -188,30 +189,6 @@ public: return Ptr(); } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE - { - InferenceEngine::Builder::Layer ieLayer(name); - ieLayer.setName(name); - ieLayer.setType("Pad"); - - std::vector begins(paddings.size(), 0), ends(paddings.size(), 0); - for (int i = 0; i < paddings.size(); ++i) - { - begins[i] = paddings[i].first; - ends[i] = paddings[i].second; - } - ieLayer.getParameters()["pads_begin"] = begins; - ieLayer.getParameters()["pads_end"] = ends; - ieLayer.getParameters()["pad_mode"] = paddingType; - if (paddingType == "constant") - ieLayer.getParameters()["pad_value"] = paddingValue; - - ieLayer.setInputPorts(std::vector(1)); - ieLayer.setOutputPorts(std::vector(1)); - return Ptr(new InfEngineBackendNode(ieLayer)); - } -#endif #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, diff --git a/modules/dnn/src/layers/permute_layer.cpp b/modules/dnn/src/layers/permute_layer.cpp index cb05e4ca52..f997ffa1d1 100644 --- a/modules/dnn/src/layers/permute_layer.cpp +++ b/modules/dnn/src/layers/permute_layer.cpp @@ -106,11 +106,14 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && preferableTarget == DNN_TARGET_CPU) - return _order.size() <= 4 || !isArmComputePlugin(); + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { + if (preferableTarget == DNN_TARGET_CPU) + return _order.size() <= 4 || !isArmComputePlugin(); + return true; + } #endif - return backendId == DNN_BACKEND_OPENCV || - ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine()); + return backendId == DNN_BACKEND_OPENCV; } bool getMemoryShapes(const std::vector &inputs, @@ -379,14 +382,6 @@ public: } } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE - { - InferenceEngine::Builder::PermuteLayer ieLayer(name); - ieLayer.setOrder(_order); - return Ptr(new InfEngineBackendNode(ieLayer)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, diff --git a/modules/dnn/src/layers/pooling_layer.cpp b/modules/dnn/src/layers/pooling_layer.cpp index 7067270ed6..47a8771388 100644 --- a/modules/dnn/src/layers/pooling_layer.cpp +++ b/modules/dnn/src/layers/pooling_layer.cpp @@ -182,34 +182,13 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) - { - if (computeMaxIdx) - return false; - if (kernel_size.size() == 3) - return preferableTarget == DNN_TARGET_CPU; - if (kernel_size.size() == 1) - return false; - if (preferableTarget == DNN_TARGET_MYRIAD) { -#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1) - if (type == MAX && (pads_begin[1] == 1 && pads_begin[0] == 1) && (strides[0] == 2 && strides[1] == 2)) { - return !isMyriadX(); - } -#endif - return type == MAX || type == AVE; - } - else - return type != STOCHASTIC && type != SUM; - } -#endif +#ifdef HAVE_INF_ENGINE if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { -#ifdef HAVE_DNN_NGRAPH return !computeMaxIdx && type != STOCHASTIC && kernel_size.size() > 1 && (kernel_size.size() != 3 || !isArmComputePlugin()); -#endif } - else if (backendId == DNN_BACKEND_OPENCV) +#endif + if (backendId == DNN_BACKEND_OPENCV) { if (kernel_size.size() == 3) return preferableTarget == DNN_TARGET_CPU; @@ -336,54 +315,6 @@ public: return Ptr(); } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE - { - if (type == MAX || type == AVE) - { - InferenceEngine::Builder::PoolingLayer ieLayer(name); - - ieLayer.setKernel(kernel_size); - ieLayer.setStrides(strides); - ieLayer.setPaddingsBegin(pads_begin); - ieLayer.setPaddingsEnd(pads_end); - - ieLayer.setPoolingType(type == MAX ? - InferenceEngine::Builder::PoolingLayer::PoolingType::MAX : - InferenceEngine::Builder::PoolingLayer::PoolingType::AVG); - ieLayer.setRoundingType(ceilMode ? - InferenceEngine::Builder::PoolingLayer::RoundingType::CEIL : - InferenceEngine::Builder::PoolingLayer::RoundingType::FLOOR); - ieLayer.setExcludePad(!avePoolPaddedArea); - - InferenceEngine::Builder::Layer l = ieLayer; - if (!padMode.empty()) - l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper"); - return Ptr(new InfEngineBackendNode(l)); - } - else if (type == ROI) - { - InferenceEngine::Builder::ROIPoolingLayer ieLayer(name); - ieLayer.setSpatialScale(spatialScale); - ieLayer.setPooled({pooledSize.height, pooledSize.width}); - ieLayer.setInputPorts(std::vector(2)); - return Ptr(new InfEngineBackendNode(ieLayer)); - } - else if (type == PSROI) - { - InferenceEngine::Builder::PSROIPoolingLayer ieLayer(name); - ieLayer.setSpatialScale(spatialScale); - ieLayer.setOutputDim(psRoiOutChannels); - ieLayer.setGroupSize(pooledSize.width); - ieLayer.setInputPorts(std::vector(2)); - return Ptr(new InfEngineBackendNode(ieLayer)); - } - else - CV_Error(Error::StsNotImplemented, "Unsupported pooling type"); - return Ptr(); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 - #ifdef HAVE_DNN_NGRAPH diff --git a/modules/dnn/src/layers/prior_box_layer.cpp b/modules/dnn/src/layers/prior_box_layer.cpp index dd39ce4417..facdc7b2e7 100644 --- a/modules/dnn/src/layers/prior_box_layer.cpp +++ b/modules/dnn/src/layers/prior_box_layer.cpp @@ -288,9 +288,7 @@ public: if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) return _explicitSizes || _stepX == _stepY; #endif - return backendId == DNN_BACKEND_OPENCV || - (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && haveInfEngine() && - ( _explicitSizes || (_minSize.size() == 1 && _maxSize.size() <= 1))); + return backendId == DNN_BACKEND_OPENCV; } bool getMemoryShapes(const std::vector &inputs, @@ -499,67 +497,6 @@ public: } } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE - { - if (_explicitSizes) - { - InferenceEngine::Builder::PriorBoxClusteredLayer ieLayer(name); - ieLayer.setSteps({_stepY, _stepX}); - - CV_CheckEQ(_offsetsX.size(), (size_t)1, ""); CV_CheckEQ(_offsetsY.size(), (size_t)1, ""); CV_CheckEQ(_offsetsX[0], _offsetsY[0], ""); - ieLayer.setOffset(_offsetsX[0]); - - ieLayer.setClip(_clip); - ieLayer.setFlip(false); // We already flipped aspect ratios. - - InferenceEngine::Builder::Layer l = ieLayer; - - CV_Assert_N(!_boxWidths.empty(), !_boxHeights.empty(), !_variance.empty()); - CV_Assert(_boxWidths.size() == _boxHeights.size()); - l.getParameters()["width"] = _boxWidths; - l.getParameters()["height"] = _boxHeights; - l.getParameters()["variance"] = _variance; - return Ptr(new InfEngineBackendNode(l)); - } - else - { - InferenceEngine::Builder::PriorBoxLayer ieLayer(name); - - CV_Assert(!_explicitSizes); - ieLayer.setMinSize(_minSize[0]); - if (!_maxSize.empty()) - ieLayer.setMaxSize(_maxSize[0]); - - CV_CheckEQ(_offsetsX.size(), (size_t)1, ""); CV_CheckEQ(_offsetsY.size(), (size_t)1, ""); CV_CheckEQ(_offsetsX[0], _offsetsY[0], ""); - ieLayer.setOffset(_offsetsX[0]); - - ieLayer.setClip(_clip); - ieLayer.setFlip(false); // We already flipped aspect ratios. - - InferenceEngine::Builder::Layer l = ieLayer; - if (_stepX == _stepY) - { - l.getParameters()["step"] = _stepX; - l.getParameters()["step_h"] = 0.0f; - l.getParameters()["step_w"] = 0.0f; - } - else - { - l.getParameters()["step"] = 0.0f; - l.getParameters()["step_h"] = _stepY; - l.getParameters()["step_w"] = _stepX; - } - if (!_aspectRatios.empty()) - { - l.getParameters()["aspect_ratio"] = _aspectRatios; - } - CV_Assert(!_variance.empty()); - l.getParameters()["variance"] = _variance; - return Ptr(new InfEngineBackendNode(l)); - } - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE diff --git a/modules/dnn/src/layers/proposal_layer.cpp b/modules/dnn/src/layers/proposal_layer.cpp index 69660b8d1b..dff63676d6 100644 --- a/modules/dnn/src/layers/proposal_layer.cpp +++ b/modules/dnn/src/layers/proposal_layer.cpp @@ -95,8 +95,14 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { - return backendId == DNN_BACKEND_OPENCV || - ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && preferableTarget != DNN_TARGET_MYRIAD); +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { + bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD; + return !isMyriad; + } +#endif + return backendId == DNN_BACKEND_OPENCV; } bool getMemoryShapes(const std::vector &inputs, @@ -332,32 +338,6 @@ public: layerOutputs[0].col(2).copyTo(dst); } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE - { - InferenceEngine::Builder::ProposalLayer ieLayer(name); - - ieLayer.setBaseSize(baseSize); - ieLayer.setFeatStride(featStride); - ieLayer.setMinSize(16); - ieLayer.setNMSThresh(nmsThreshold); - ieLayer.setPostNMSTopN(keepTopAfterNMS); - ieLayer.setPreNMSTopN(keepTopBeforeNMS); - - std::vector scalesVec(scales.size()); - for (int i = 0; i < scales.size(); ++i) - scalesVec[i] = scales.get(i); - ieLayer.setScale(scalesVec); - - std::vector ratiosVec(ratios.size()); - for (int i = 0; i < ratios.size(); ++i) - ratiosVec[i] = ratios.get(i); - ieLayer.setRatio(ratiosVec); - - return Ptr(new InfEngineBackendNode(ieLayer)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 - #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, diff --git a/modules/dnn/src/layers/reorg_layer.cpp b/modules/dnn/src/layers/reorg_layer.cpp index 6961243865..1caa81841b 100644 --- a/modules/dnn/src/layers/reorg_layer.cpp +++ b/modules/dnn/src/layers/reorg_layer.cpp @@ -145,8 +145,11 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { - return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || - backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + return true; +#endif + return backendId == DNN_BACKEND_OPENCV; } #ifdef HAVE_OPENCL @@ -189,14 +192,6 @@ public: permute->forward(inputs, outputs, internals_arr); } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE - { - InferenceEngine::Builder::ReorgYoloLayer ieLayer(name); - ieLayer.setStride(reorgStride); - return Ptr(new InfEngineBackendNode(ieLayer)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector > &inputs, diff --git a/modules/dnn/src/layers/reshape_layer.cpp b/modules/dnn/src/layers/reshape_layer.cpp index 2b7cb40eb5..79539147fd 100644 --- a/modules/dnn/src/layers/reshape_layer.cpp +++ b/modules/dnn/src/layers/reshape_layer.cpp @@ -195,8 +195,11 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { - return backendId == DNN_BACKEND_OPENCV || - ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine()); +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + return true; +#endif + return backendId == DNN_BACKEND_OPENCV; } bool getMemoryShapes(const std::vector &inputs, @@ -296,15 +299,6 @@ public: } } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE - { - InferenceEngine::Builder::ReshapeLayer ieLayer(name); - CV_Assert(outShapes.size() == 1); - ieLayer.setDims(outShapes[0]); - return Ptr(new InfEngineBackendNode(ieLayer)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, diff --git a/modules/dnn/src/layers/resize_layer.cpp b/modules/dnn/src/layers/resize_layer.cpp index 8b7d802ab2..5a21fcdb8c 100644 --- a/modules/dnn/src/layers/resize_layer.cpp +++ b/modules/dnn/src/layers/resize_layer.cpp @@ -66,7 +66,7 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { return (interpolation == "nearest" && scaleWidth == scaleHeight) || (interpolation == "bilinear"); @@ -226,37 +226,6 @@ public: CV_Error(Error::StsNotImplemented, "Unknown interpolation: " + interpolation); } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE - { - InferenceEngine::Builder::Layer ieLayer(name); - ieLayer.setName(name); - if (interpolation == "nearest") - { - ieLayer.setType("Resample"); - ieLayer.getParameters()["type"] = std::string("caffe.ResampleParameter.NEAREST"); - ieLayer.getParameters()["antialias"] = false; - if (scaleWidth != scaleHeight) - CV_Error(Error::StsNotImplemented, "resample with sw != sh"); - ieLayer.getParameters()["factor"] = 1.0f / scaleWidth; - } - else if (interpolation == "bilinear") - { - ieLayer.setType("Interp"); - ieLayer.getParameters()["pad_beg"] = 0; - ieLayer.getParameters()["pad_end"] = 0; - ieLayer.getParameters()["align_corners"] = alignCorners; - } - else - CV_Error(Error::StsNotImplemented, "Unsupported interpolation: " + interpolation); - ieLayer.getParameters()["width"] = outWidth; - ieLayer.getParameters()["height"] = outHeight; - ieLayer.setInputPorts(std::vector(1)); - ieLayer.setOutputPorts(std::vector(1)); - return Ptr(new InfEngineBackendNode(ieLayer)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 - #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, diff --git a/modules/dnn/src/layers/scale_layer.cpp b/modules/dnn/src/layers/scale_layer.cpp index e8a01672ad..594b0bb624 100644 --- a/modules/dnn/src/layers/scale_layer.cpp +++ b/modules/dnn/src/layers/scale_layer.cpp @@ -53,9 +53,12 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { - return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || - (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && axis == 1 && !blobs.empty()) || - (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && axis > 0); +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + return axis > 0; +#endif + return backendId == DNN_BACKEND_OPENCV || + backendId == DNN_BACKEND_HALIDE; } void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE @@ -197,34 +200,6 @@ public: } #endif // HAVE_HALIDE -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE - { - InferenceEngine::Builder::Layer l = InferenceEngine::Builder::ScaleShiftLayer(name); - - CV_Assert(!blobs.empty()); - const size_t numChannels = blobs[0].total(); - if (hasWeights) - { - addConstantData("weights", wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C), l); - } - else - { - auto weights = InferenceEngine::make_shared_blob({ - InferenceEngine::Precision::FP32, {(size_t)numChannels}, - InferenceEngine::Layout::C - }); - weights->allocate(); - float* buf = weights->buffer().as(); - std::fill(buf, buf + numChannels, 1); - addConstantData("weights", weights, l); - } - if (hasBias) - addConstantData("biases", wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C), l); - return Ptr(new InfEngineBackendNode(l)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 - #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE diff --git a/modules/dnn/src/layers/slice_layer.cpp b/modules/dnn/src/layers/slice_layer.cpp index a470772813..450e5b2e5f 100644 --- a/modules/dnn/src/layers/slice_layer.cpp +++ b/modules/dnn/src/layers/slice_layer.cpp @@ -160,12 +160,7 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) - return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) && - sliceRanges.size() == 1 && sliceRanges[0].size() == 4 && !hasSteps; -#endif -#ifdef HAVE_DNN_NGRAPH +#ifdef HAVE_INF_ENGINE if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) return sliceRanges.size() == 1 && !hasSteps; #endif @@ -557,62 +552,6 @@ public: } } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) - virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE - { - CV_Assert_N(finalSliceRanges.size() == 1, inputs.size() <= 2); - - std::vector axes, offsets, dims; - int from, to, step; - int numDims = finalSliceRanges[0].size(); - if (preferableTarget == DNN_TARGET_MYRIAD) - { - from = axis; - to = numDims; - step = 1; - } - else - { - from = numDims - 1; - to = axis - 1; - step = -1; - } - for (int i = from; i != to; i += step) - { - axes.push_back(i); - offsets.push_back(finalSliceRanges[0][i].start); - dims.push_back(finalSliceRanges[0][i].size()); - } - - InferenceEngine::Builder::Layer ieLayer(name); - ieLayer.setName(name); - ieLayer.setType("Crop"); - ieLayer.getParameters()["axis"] = axes; - ieLayer.getParameters()["dim"] = dims; - ieLayer.getParameters()["offset"] = offsets; - ieLayer.setInputPorts(std::vector(2)); - ieLayer.setOutputPorts(std::vector(1)); - - if (inputs.size() != 2) - { - std::vector outShape(numDims); - for (int i = 0; i < numDims; ++i) - outShape[i] = finalSliceRanges[0][i].size(); - - ieLayer.getInputPorts()[1].setParameter("type", "weights"); - - auto shapeSource = InferenceEngine::make_shared_blob({ - InferenceEngine::Precision::FP32, outShape, - InferenceEngine::Layout::ANY - }); - shapeSource->allocate(); - addConstantData("weights", shapeSource, ieLayer); - } - return Ptr(new InfEngineBackendNode(ieLayer)); - } -#endif -#endif #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, diff --git a/modules/dnn/src/layers/softmax_layer.cpp b/modules/dnn/src/layers/softmax_layer.cpp index 29acd06402..732a20744b 100644 --- a/modules/dnn/src/layers/softmax_layer.cpp +++ b/modules/dnn/src/layers/softmax_layer.cpp @@ -90,10 +90,12 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + return true; +#endif return backendId == DNN_BACKEND_OPENCV || - (backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1) || - backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH || - (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && haveInfEngine() && !logSoftMax); + (backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1); } #ifdef HAVE_OPENCL @@ -312,17 +314,6 @@ public: return Ptr(); } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE - { - InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); - - InferenceEngine::Builder::SoftMaxLayer ieLayer(name); - ieLayer.setAxis(normalize_axis(axisRaw, input->getDims().size())); - - return Ptr(new InfEngineBackendNode(ieLayer)); - } -#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, diff --git a/modules/dnn/src/op_inf_engine.cpp b/modules/dnn/src/op_inf_engine.cpp index 41783d10b5..34c2fc1b55 100644 --- a/modules/dnn/src/op_inf_engine.cpp +++ b/modules/dnn/src/op_inf_engine.cpp @@ -20,52 +20,17 @@ namespace cv { namespace dnn { #ifdef HAVE_INF_ENGINE -static Backend parseInferenceEngineBackendType(const cv::String& backend) -{ - CV_Assert(!backend.empty()); - if (backend == CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) - return DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; - if (backend == CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API) - return DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019; - CV_Error(Error::StsBadArg, cv::format("Unknown IE backend: %s", backend.c_str())); -} -static const char* dumpInferenceEngineBackendType(Backend backend) -{ - if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) - return CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; - if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) - return CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API; - CV_Error(Error::StsBadArg, cv::format("Invalid backend ID for IE: %d", backend)); -} -Backend& getInferenceEngineBackendTypeParam() -{ - static Backend param = parseInferenceEngineBackendType( - utils::getConfigurationParameterString("OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE", -#ifdef HAVE_DNN_NGRAPH - CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH -#elif defined(HAVE_DNN_IE_NN_BUILDER_2019) - CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API -#else -#error "Build configuration error: nGraph or NN Builder API backend should be enabled" -#endif - ) - ); - return param; -} - CV__DNN_EXPERIMENTAL_NS_BEGIN cv::String getInferenceEngineBackendType() { - return dumpInferenceEngineBackendType(getInferenceEngineBackendTypeParam()); + return "NGRAPH"; } cv::String setInferenceEngineBackendType(const cv::String& newBackendType) { - Backend newBackend = parseInferenceEngineBackendType(newBackendType); - Backend& param = getInferenceEngineBackendTypeParam(); - Backend old = param; - param = newBackend; - return dumpInferenceEngineBackendType(old); + if (newBackendType != "NGRAPH") + CV_Error(Error::StsNotImplemented, cv::format("DNN/IE: only NGRAPH backend is supported: %s", newBackendType.c_str())); + return newBackendType; } CV__DNN_EXPERIMENTAL_NS_END @@ -98,504 +63,6 @@ void infEngineBlobsToMats(const std::vector& blobs, } -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - -// For networks with input layer which has an empty name, IE generates a name id[some_number]. -// OpenCV lets users use an empty input name and to prevent unexpected naming, -// we can use some predefined name. -static std::string kDefaultInpLayerName = "empty_inp_layer_name"; -static std::string kOpenCVLayersType = "OpenCVLayer"; - -static std::string shapesToStr(const std::vector& mats) -{ - std::ostringstream shapes; - shapes << mats.size() << " "; - for (const Mat& m : mats) - { - shapes << m.dims << " "; - for (int i = 0; i < m.dims; ++i) - shapes << m.size[i] << " "; - } - return shapes.str(); -} - -static void strToShapes(const std::string& str, std::vector >& shapes) -{ - std::istringstream ss(str); - int num, dims; - ss >> num; - shapes.resize(num); - for (int i = 0; i < num; ++i) - { - ss >> dims; - shapes[i].resize(dims); - for (int j = 0; j < dims; ++j) - ss >> shapes[i][j]; - } -} - -class InfEngineCustomLayer : public InferenceEngine::ILayerExecImpl -{ -public: - explicit InfEngineCustomLayer(const InferenceEngine::CNNLayer& layer) : cnnLayer(layer) - { - std::istringstream iss(layer.GetParamAsString("impl")); - size_t ptr; - iss >> ptr; - cvLayer = (Layer*)ptr; - - std::vector > shapes; - strToShapes(layer.GetParamAsString("internals"), shapes); - internals.resize(shapes.size()); - for (int i = 0; i < shapes.size(); ++i) - internals[i].create(std::vector(shapes[i].begin(), shapes[i].end()), CV_32F); - } - - virtual InferenceEngine::StatusCode execute(std::vector& inputs, - std::vector& outputs, - InferenceEngine::ResponseDesc *resp) noexcept - { - std::vector inpMats, outMats; - infEngineBlobsToMats(inputs, inpMats); - infEngineBlobsToMats(outputs, outMats); - - try - { - cvLayer->forward(inpMats, outMats, internals); - return InferenceEngine::StatusCode::OK; - } - catch (...) - { - return InferenceEngine::StatusCode::GENERAL_ERROR; - } - } - - virtual InferenceEngine::StatusCode - getSupportedConfigurations(std::vector& conf, - InferenceEngine::ResponseDesc* resp) noexcept - { - std::vector inDataConfig; - std::vector outDataConfig; - for (auto& it : cnnLayer.insData) - { - InferenceEngine::DataConfig conf; - conf.desc = it.lock()->getTensorDesc(); - inDataConfig.push_back(conf); - } - - for (auto& it : cnnLayer.outData) - { - InferenceEngine::DataConfig conf; - conf.desc = it->getTensorDesc(); - outDataConfig.push_back(conf); - } - - InferenceEngine::LayerConfig layerConfig; - layerConfig.inConfs = inDataConfig; - layerConfig.outConfs = outDataConfig; - - conf.push_back(layerConfig); - return InferenceEngine::StatusCode::OK; - } - - InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, - InferenceEngine::ResponseDesc *resp) noexcept - { - return InferenceEngine::StatusCode::OK; - } - -private: - InferenceEngine::CNNLayer cnnLayer; - dnn::Layer* cvLayer; - std::vector internals; -}; - -class InfEngineCustomLayerShapeInfer : public InferenceEngine::IShapeInferImpl -{ -public: - InferenceEngine::StatusCode - inferShapes(const std::vector& inBlobs, - const std::map& params, - const std::map& blobs, - std::vector& outShapes, - InferenceEngine::ResponseDesc* desc) noexcept override - { - strToShapes(params.at("outputs"), outShapes); - return InferenceEngine::StatusCode::OK; - } -}; - -class InfEngineCustomLayerFactory : public InferenceEngine::ILayerImplFactory { -public: - explicit InfEngineCustomLayerFactory(const InferenceEngine::CNNLayer* layer) : cnnLayer(*layer) {} - - InferenceEngine::StatusCode - getImplementations(std::vector& impls, - InferenceEngine::ResponseDesc* resp) noexcept override { - impls.push_back(std::make_shared(cnnLayer)); - return InferenceEngine::StatusCode::OK; - } - -private: - InferenceEngine::CNNLayer cnnLayer; -}; - -InferenceEngine::StatusCode InfEngineExtension::getFactoryFor( - InferenceEngine::ILayerImplFactory*& factory, - const InferenceEngine::CNNLayer* cnnLayer, - InferenceEngine::ResponseDesc* resp -) noexcept -{ - if (cnnLayer->type != kOpenCVLayersType) - return InferenceEngine::StatusCode::NOT_IMPLEMENTED; - factory = new InfEngineCustomLayerFactory(cnnLayer); - return InferenceEngine::StatusCode::OK; -} - -InfEngineBackendNode::InfEngineBackendNode(const InferenceEngine::Builder::Layer& _layer) - : BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019), layer(_layer) {} - - InfEngineBackendNode::InfEngineBackendNode(Ptr& cvLayer_, std::vector& inputs, - std::vector& outputs, - std::vector& internals) - : BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019), layer(cvLayer_->name), - cvLayer(cvLayer_) -{ - CV_Assert(!cvLayer->name.empty()); - layer.setName(cvLayer->name); - layer.setType(kOpenCVLayersType); - layer.getParameters()["impl"] = (size_t)cvLayer.get(); - layer.getParameters()["outputs"] = shapesToStr(outputs); - layer.getParameters()["internals"] = shapesToStr(internals); - layer.setInputPorts(std::vector(inputs.size())); - layer.setOutputPorts(std::vector(outputs.size())); -} - -static std::vector > -infEngineWrappers(const std::vector >& ptrs) -{ - std::vector > wrappers(ptrs.size()); - for (int i = 0; i < ptrs.size(); ++i) - { - CV_Assert(!ptrs[i].empty()); - wrappers[i] = ptrs[i].dynamicCast(); - CV_Assert(!wrappers[i].empty()); - } - return wrappers; -} - -InfEngineBackendNet::InfEngineBackendNet() : netBuilder("") -{ - hasNetOwner = false; - device_name = "CPU"; -} - -InfEngineBackendNet::InfEngineBackendNet(InferenceEngine::CNNNetwork& net) : netBuilder(""), cnn(net) -{ - hasNetOwner = true; - device_name = "CPU"; -} - -void InfEngineBackendNet::connect(const std::vector >& inputs, - const std::vector >& outputs, - const std::string& layerName) -{ - std::vector > inpWrappers = infEngineWrappers(inputs); - std::map::iterator it = layers.find(layerName); - CV_Assert(it != layers.end()); - - const int layerId = it->second; - for (size_t i = 0; i < inpWrappers.size(); ++i) - { - const auto& inp = inpWrappers[i]; - const std::string& inpName = inp->dataPtr->getName(); - - std::string inpLayerName = inpName; - size_t inpPortId = inpName.rfind('.'); - if (inpPortId != std::string::npos) - { - std::string portIdStr = inpName.substr(inpPortId + 1); - if (std::all_of(portIdStr.begin(), portIdStr.end(), ::isdigit)) - { - inpLayerName = inpName.substr(0, inpPortId); - inpPortId = atoi(portIdStr.c_str()); - } - else - inpPortId = 0; - } - else - inpPortId = 0; - - int inpId; - it = layers.find(inpLayerName); - if (it == layers.end()) - { - InferenceEngine::Builder::InputLayer inpLayer(!inpLayerName.empty() ? inpLayerName : kDefaultInpLayerName); - std::vector shape(inp->blob->getTensorDesc().getDims()); - inpLayer.setPort(InferenceEngine::Port(shape)); - inpId = netBuilder.addLayer(inpLayer); - - layers.insert({inpName, inpId}); - } - else - inpId = it->second; - - netBuilder.connect({(size_t)inpId, inpPortId}, {(size_t)layerId, i}); - unconnectedPorts.erase({inpId, inpPortId}); - } - CV_Assert(!outputs.empty()); - for (int i = 0; i < outputs.size(); ++i) - { - InferenceEngine::DataPtr dataPtr = infEngineDataNode(outputs[i]); - std::string outputName = outputs.size() > 1 ? (layerName + "." + std::to_string(i)) : layerName; -#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1) - dataPtr->name = outputName; -#else - dataPtr->setName(outputName); -#endif - } -} - -void InfEngineBackendNet::init(Target targetId) -{ - if (!hasNetOwner) - { - CV_Assert(!unconnectedPorts.empty()); - for (const auto& port : unconnectedPorts) - { - InferenceEngine::Builder::OutputLayer outLayer("myconv1"); -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) - // Inference Engine determines network precision by ports. - InferenceEngine::Precision p = (targetId == DNN_TARGET_MYRIAD || - targetId == DNN_TARGET_OPENCL_FP16) ? - InferenceEngine::Precision::FP16 : - InferenceEngine::Precision::FP32; - outLayer.setPort(InferenceEngine::Port({}, p)); -#endif - netBuilder.addLayer({InferenceEngine::PortInfo(port.first, port.second)}, outLayer); - } - netBuilder.getContext().addShapeInferImpl(kOpenCVLayersType, - std::make_shared()); - cnn = InferenceEngine::CNNNetwork(InferenceEngine::Builder::convertToICNNNetwork(netBuilder.build())); - } - - switch (targetId) - { - case DNN_TARGET_CPU: - device_name = "CPU"; - break; - case DNN_TARGET_OPENCL: - case DNN_TARGET_OPENCL_FP16: - device_name = "GPU"; - break; - case DNN_TARGET_MYRIAD: - device_name = "MYRIAD"; - break; - case DNN_TARGET_FPGA: - device_name = "FPGA"; - break; - default: - CV_Error(Error::StsNotImplemented, "Unknown target"); - }; - - for (const auto& name : requestedOutputs) - { - cnn.addOutput(name); - } - - for (const auto& it : cnn.getInputsInfo()) - { - const std::string& name = it.first; - auto blobIt = allBlobs.find(name); - CV_Assert(blobIt != allBlobs.end()); - it.second->setPrecision(blobIt->second->getTensorDesc().getPrecision()); - } - for (const auto& it : cnn.getOutputsInfo()) - { - const std::string& name = it.first; - auto blobIt = allBlobs.find(name); - CV_Assert(blobIt != allBlobs.end()); - it.second->setPrecision(blobIt->second->getTensorDesc().getPrecision()); // Should be always FP32 - } - - initPlugin(cnn); -} - -void InfEngineBackendNet::addLayer(InferenceEngine::Builder::Layer& layer) -{ -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) - // Add weights to network and connect them after input blobs. - std::map& params = layer.getParameters(); - std::vector blobsIds; - std::vector portIds; - for (const std::string& name : {"weights", "biases"}) - { - bool asInput = false; - int portId = 0; - for (int i = 0; i < layer.getInputPorts().size(); ++i) - { - const auto& port = layer.getInputPorts()[i]; - auto it = port.getParameters().find("type"); - if (it != port.getParameters().end() && it->second == name) - { - portId = i; - asInput = true; - break; - } - } - - if (!asInput) - continue; - - auto it = params.find(name); - if (it != params.end()) - { - InferenceEngine::Blob::Ptr blob = it->second.as(); - params.erase(it); - int blobId = netBuilder.addLayer(InferenceEngine::Builder::ConstLayer(name).setData(blob)); - blobsIds.push_back(blobId); - portIds.push_back(portId); - } - } -#endif - - int id = netBuilder.addLayer(layer); - const std::string& layerName = layer.getName(); - - CV_Assert(layers.insert({layerName, id}).second); - for (int i = 0; i < layer.getOutputPorts().size(); ++i) - unconnectedPorts.insert({id, i}); - -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) - // By default, all the weights are connected to last ports ids. - for (int i = 0; i < blobsIds.size(); ++i) - { - netBuilder.connect((size_t)blobsIds[i], {(size_t)id, (size_t)portIds[i]}); - } -#endif -} - -void InfEngineBackendNet::addOutput(const std::string& name) -{ - requestedOutputs.push_back(name); -} - -static InferenceEngine::Layout estimateLayout(const Mat& m) -{ - if (m.dims == 4) - return InferenceEngine::Layout::NCHW; - else if (m.dims == 2) - return InferenceEngine::Layout::NC; - else - return InferenceEngine::Layout::ANY; -} - -static InferenceEngine::DataPtr wrapToInfEngineDataNode(const Mat& m, const std::string& name = "") -{ - std::vector shape = getShape(m); - if (m.type() == CV_32F) - return InferenceEngine::DataPtr(new InferenceEngine::Data(name, - {InferenceEngine::Precision::FP32, shape, estimateLayout(m)})); - else if (m.type() == CV_8U) - return InferenceEngine::DataPtr(new InferenceEngine::Data(name, - {InferenceEngine::Precision::U8, shape, estimateLayout(m)})); - else - CV_Error(Error::StsNotImplemented, format("Unsupported data type %d", m.type())); -} - -InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector& shape, - InferenceEngine::Layout layout) -{ - if (m.type() == CV_32F) - return InferenceEngine::make_shared_blob( - {InferenceEngine::Precision::FP32, shape, layout}, (float*)m.data); - else if (m.type() == CV_8U) - return InferenceEngine::make_shared_blob( - {InferenceEngine::Precision::U8, shape, layout}, (uint8_t*)m.data); - else - CV_Error(Error::StsNotImplemented, format("Unsupported data type %d", m.type())); -} - -InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout) -{ - std::vector shape = getShape(m); - return wrapToInfEngineBlob(m, shape, layout); -} - -InferenceEngine::Blob::Ptr cloneBlob(const InferenceEngine::Blob::Ptr& blob) -{ - InferenceEngine::Blob::Ptr copy; - auto description = blob->getTensorDesc(); - InferenceEngine::Precision precision = description.getPrecision(); - if (precision == InferenceEngine::Precision::FP32) - { - copy = InferenceEngine::make_shared_blob(description); - } - else if (precision == InferenceEngine::Precision::U8) - { - copy = InferenceEngine::make_shared_blob(description); - } - else - CV_Error(Error::StsNotImplemented, "Unsupported blob precision"); - copy->allocate(); - return copy; -} - -InferenceEngine::DataPtr infEngineDataNode(const Ptr& ptr) -{ - CV_Assert(!ptr.empty()); - Ptr p = ptr.dynamicCast(); - CV_Assert(!p.empty()); - return p->dataPtr; -} - -InfEngineBackendWrapper::InfEngineBackendWrapper(int targetId, const cv::Mat& m) - : BackendWrapper(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, targetId) -{ - dataPtr = wrapToInfEngineDataNode(m); - blob = wrapToInfEngineBlob(m, estimateLayout(m)); -} - -InfEngineBackendWrapper::InfEngineBackendWrapper(Ptr wrapper) - : BackendWrapper(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, wrapper->targetId) -{ - Ptr ieWrapper = wrapper.dynamicCast(); - CV_Assert(!ieWrapper.empty()); - InferenceEngine::DataPtr srcData = ieWrapper->dataPtr; - - dataPtr = InferenceEngine::DataPtr(new InferenceEngine::Data(srcData->getName(), srcData->getTensorDesc())); - blob = ieWrapper->blob; -} - -Ptr InfEngineBackendWrapper::create(Ptr wrapper) -{ - return Ptr(new InfEngineBackendWrapper(wrapper)); -} - -InfEngineBackendWrapper::~InfEngineBackendWrapper() -{ - -} - -void InfEngineBackendWrapper::copyToHost() -{ - -} - -void InfEngineBackendWrapper::setHostDirty() -{ - -} - -#endif // HAVE_DNN_IE_NN_BUILDER_2019 - -#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1) -static std::map& getSharedPlugins() -{ - static std::map sharedPlugins; - return sharedPlugins; -} -#else static bool init_IE_plugins() { // load and hold IE plugins @@ -649,7 +116,7 @@ InferenceEngine::Core& getCore(const std::string& id) : create_IE_Core_instance(id); return core; } -#endif + static bool detectArmPlugin_() { @@ -671,7 +138,7 @@ static bool detectArmPlugin_() static bool detectMyriadX_() { AutoLock lock(getInitializationMutex()); -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R3) + // Lightweight detection InferenceEngine::Core& ie = getCore("MYRIAD"); const std::vector devices = ie.GetAvailableDevices(); @@ -685,481 +152,22 @@ static bool detectMyriadX_() } } return false; -#else - InferenceEngine::Builder::Network builder(""); - InferenceEngine::idx_t inpId = builder.addLayer( - InferenceEngine::Builder::InputLayer().setPort(InferenceEngine::Port({1}))); - -#if INF_ENGINE_RELEASE <= 2018050000 - InferenceEngine::idx_t clampId; - { - InferenceEngine::Builder::Layer l = InferenceEngine::Builder::ClampLayer(); - auto& blobs = l.getConstantData(); - auto blob = InferenceEngine::make_shared_blob( - InferenceEngine::Precision::FP16, - InferenceEngine::Layout::C, {1}); - blob->allocate(); - blobs[""] = blob; - clampId = builder.addLayer({inpId}, l); - } - builder.addLayer({InferenceEngine::PortInfo(clampId)}, InferenceEngine::Builder::OutputLayer()); -#else - - InferenceEngine::idx_t clampId = builder.addLayer({inpId}, InferenceEngine::Builder::ClampLayer()); - builder.addLayer({InferenceEngine::PortInfo(clampId)}, - InferenceEngine::Builder::OutputLayer().setPort(InferenceEngine::Port({}, - InferenceEngine::Precision::FP16))); -#endif - - InferenceEngine::CNNNetwork cnn = InferenceEngine::CNNNetwork( - InferenceEngine::Builder::convertToICNNNetwork(builder.build())); - -#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1) - InferenceEngine::InferenceEnginePluginPtr enginePtr; - { - auto& sharedPlugins = getSharedPlugins(); - auto pluginIt = sharedPlugins.find("MYRIAD"); - if (pluginIt != sharedPlugins.end()) { - enginePtr = pluginIt->second; - } else { - auto dispatcher = InferenceEngine::PluginDispatcher({""}); - enginePtr = dispatcher.getPluginByDevice("MYRIAD"); - sharedPlugins["MYRIAD"] = enginePtr; - } - } - auto plugin = InferenceEngine::InferencePlugin(enginePtr); - try - { - auto netExec = plugin.LoadNetwork(cnn, {{"VPU_PLATFORM", "VPU_2480"}}); -#else - try - { -#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3) - auto netExec = getCore("MYRIAD").LoadNetwork(cnn, "MYRIAD", {{"VPU_PLATFORM", "VPU_2480"}}); -#else - auto netExec = getCore("MYRIAD").LoadNetwork(cnn, "MYRIAD", {{"VPU_MYRIAD_PLATFORM", "VPU_MYRIAD_2480"}}); -#endif -#endif - auto infRequest = netExec.CreateInferRequest(); - } catch(...) { - return false; - } - return true; -#endif } #endif // !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT) -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - -void InfEngineBackendNet::initPlugin(InferenceEngine::CNNNetwork& net) -{ - CV_Assert(!isInitialized()); - - try - { - AutoLock lock(getInitializationMutex()); -#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1) - auto& sharedPlugins = getSharedPlugins(); - auto pluginIt = sharedPlugins.find(device_name); - if (pluginIt != sharedPlugins.end()) - { - enginePtr = pluginIt->second; - } - else -#else - InferenceEngine::Core& ie = getCore(device_name); -#endif - { -#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1) - auto dispatcher = InferenceEngine::PluginDispatcher({""}); - if (device_name == "FPGA") - enginePtr = dispatcher.getPluginByDevice("HETERO:FPGA,CPU"); - else - enginePtr = dispatcher.getPluginByDevice(device_name); - sharedPlugins[device_name] = enginePtr; -#else - isInit = true; -#endif - std::vector candidates; - std::string param_pluginPath = utils::getConfigurationParameterString("OPENCV_DNN_IE_EXTRA_PLUGIN_PATH", ""); - if (!param_pluginPath.empty()) - { - candidates.push_back(param_pluginPath); - } -#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3) - if (device_name == "CPU" || device_name == "FPGA") - { - std::string suffixes[] = {"_avx2", "_sse4", ""}; - bool haveFeature[] = { - checkHardwareSupport(CPU_AVX2), - checkHardwareSupport(CPU_SSE4_2), - true - }; - for (int i = 0; i < 3; ++i) - { - if (!haveFeature[i]) - continue; -#ifdef _WIN32 - candidates.push_back("cpu_extension" + suffixes[i] + ".dll"); -#elif defined(__APPLE__) - candidates.push_back("libcpu_extension" + suffixes[i] + ".so"); // built as loadable module - candidates.push_back("libcpu_extension" + suffixes[i] + ".dylib"); // built as shared library -#else - candidates.push_back("libcpu_extension" + suffixes[i] + ".so"); -#endif // _WIN32 - } - } -#endif - bool found = false; - for (size_t i = 0; i != candidates.size(); ++i) - { - const std::string& libName = candidates[i]; - try - { - InferenceEngine::IExtensionPtr extension = - InferenceEngine::make_so_pointer(libName); - -#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1) - enginePtr->AddExtension(extension, 0); -#else - ie.AddExtension(extension, "CPU"); -#endif - CV_LOG_INFO(NULL, "DNN-IE: Loaded extension plugin: " << libName); - found = true; - break; - } - catch(...) {} - } - if (!found && !candidates.empty()) - { - CV_LOG_WARNING(NULL, "DNN-IE: Can't load extension plugin (extra layers for some networks). Specify path via OPENCV_DNN_IE_EXTRA_PLUGIN_PATH parameter"); - } - // Some of networks can work without a library of extra layers. -#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2019R1) - // OpenCV fallbacks as extensions. - try - { - ie.AddExtension(std::make_shared(), "CPU"); - } - catch(const std::exception& e) - { - CV_LOG_INFO(NULL, "DNN-IE: Can't register OpenCV custom layers extension: " << e.what()); - } -#endif - // Limit the number of CPU threads. -#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1) -#ifndef _WIN32 - enginePtr->SetConfig({{ - InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, format("%d", getNumThreads()), - }}, 0); -#endif // _WIN32 -#else - if (device_name == "CPU") - ie.SetConfig({{ - InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, format("%d", getNumThreads()), - }}, device_name); -#endif - } -#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1) - plugin = InferenceEngine::InferencePlugin(enginePtr); - netExec = plugin.LoadNetwork(net, {}); -#else - bool isHetero = false; - if (device_name != "CPU") - { - isHetero = device_name == "FPGA"; - for (auto& layer : net) - { - if (layer->type == kOpenCVLayersType) - { - isHetero = true; -#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2019R3) - // Not sure about lower versions but in 2019R3 we do not need this - layer->affinity = "CPU"; - } - else - { - layer->affinity = device_name; -#endif - } - } - } - if (isHetero) - netExec = ie.LoadNetwork(net, "HETERO:" + device_name + ",CPU"); - else - netExec = ie.LoadNetwork(net, device_name); -#endif - } - catch (const std::exception& ex) - { - CV_Error(Error::StsError, format("Failed to initialize Inference Engine backend (device = %s): %s", device_name.c_str(), ex.what())); - } -} - -bool InfEngineBackendNet::isInitialized() -{ -#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1) - return (bool)enginePtr; -#else - return isInit; -#endif -} - -void InfEngineBackendNet::reset() -{ - allBlobs.clear(); - infRequests.clear(); - isInit = false; -} - -void InfEngineBackendNet::addBlobs(const std::vector >& ptrs) -{ - auto wrappers = infEngineWrappers(ptrs); - for (const auto& wrapper : wrappers) - { - std::string name = wrapper->dataPtr->getName(); - name = name.empty() ? kDefaultInpLayerName : name; - allBlobs.insert({name, wrapper->blob}); - } -} - -void InfEngineBackendNet::InfEngineReqWrapper::makePromises(const std::vector >& outsWrappers) -{ - auto outs = infEngineWrappers(outsWrappers); - outProms.clear(); - outProms.resize(outs.size()); - outsNames.resize(outs.size()); - for (int i = 0; i < outs.size(); ++i) - { - outs[i]->futureMat = outProms[i].getArrayResult(); - outsNames[i] = outs[i]->dataPtr->getName(); - } -} - -void InfEngineBackendNet::forward(const std::vector >& outBlobsWrappers, - bool isAsync) -{ - CV_LOG_DEBUG(NULL, "InfEngineBackendNet::forward(" << (isAsync ? "async" : "sync") << ")"); - // Look for finished requests. - Ptr reqWrapper; - for (auto& wrapper : infRequests) - { - if (wrapper->isReady) - { - reqWrapper = wrapper; - break; - } - } - if (reqWrapper.empty()) - { - reqWrapper = Ptr(new InfEngineReqWrapper()); - try - { - reqWrapper->req = netExec.CreateInferRequest(); - } - catch (const std::exception& ex) - { - CV_Error(Error::StsAssert, format("Failed to initialize Inference Engine backend: %s", ex.what())); - } - infRequests.push_back(reqWrapper); - - InferenceEngine::BlobMap inpBlobs, outBlobs; - for (const auto& it : cnn.getInputsInfo()) - { - const std::string& name = it.first; - auto blobIt = allBlobs.find(name); - CV_Assert(blobIt != allBlobs.end()); - inpBlobs[name] = isAsync ? cloneBlob(blobIt->second) : blobIt->second; - } - for (const auto& it : cnn.getOutputsInfo()) - { - const std::string& name = it.first; - auto blobIt = allBlobs.find(name); - CV_Assert(blobIt != allBlobs.end()); - outBlobs[name] = isAsync ? cloneBlob(blobIt->second) : blobIt->second; - } - reqWrapper->req.SetInput(inpBlobs); - reqWrapper->req.SetOutput(outBlobs); - - InferenceEngine::IInferRequest::Ptr infRequestPtr = reqWrapper->req; - infRequestPtr->SetUserData(reqWrapper.get(), 0); - - infRequestPtr->SetCompletionCallback( - [](InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status) - { - CV_LOG_DEBUG(NULL, "DNN(IE): completionCallback(" << (int)status << ")"); - - InfEngineReqWrapper* wrapper; - request->GetUserData((void**)&wrapper, 0); - CV_Assert(wrapper && "Internal error"); - - size_t processedOutputs = 0; - try - { - for (; processedOutputs < wrapper->outProms.size(); ++processedOutputs) - { - const std::string& name = wrapper->outsNames[processedOutputs]; - Mat m = infEngineBlobToMat(wrapper->req.GetBlob(name)); - - try - { - CV_Assert(status == InferenceEngine::StatusCode::OK); - wrapper->outProms[processedOutputs].setValue(m.clone()); - } - catch (...) - { - try { - wrapper->outProms[processedOutputs].setException(std::current_exception()); - } catch(...) { - CV_LOG_ERROR(NULL, "DNN: Exception occurred during async inference exception propagation"); - } - } - } - } - catch (...) - { - std::exception_ptr e = std::current_exception(); - for (; processedOutputs < wrapper->outProms.size(); ++processedOutputs) - { - try { - wrapper->outProms[processedOutputs].setException(e); - } catch(...) { - CV_LOG_ERROR(NULL, "DNN: Exception occurred during async inference exception propagation"); - } - } - } - wrapper->isReady = true; - } - ); - } - if (isAsync) - { - // Copy actual data to infer request's input blobs. - for (const auto& it : cnn.getInputsInfo()) - { - const std::string& name = it.first; - auto blobIt = allBlobs.find(name); - Mat srcMat = infEngineBlobToMat(blobIt->second); - Mat dstMat = infEngineBlobToMat(reqWrapper->req.GetBlob(name)); - srcMat.copyTo(dstMat); - } - - // Set promises to output blobs wrappers. - reqWrapper->makePromises(outBlobsWrappers); - - reqWrapper->isReady = false; - reqWrapper->req.StartAsync(); - } - else - { - reqWrapper->req.Infer(); - } -} - -bool InfEngineBackendLayer::getMemoryShapes(const std::vector &inputs, - const int requiredOutputs, - std::vector &outputs, - std::vector &internals) const -{ - InferenceEngine::ICNNNetwork::InputShapes inShapes = t_net.getInputShapes(); - InferenceEngine::ICNNNetwork::InputShapes::iterator itr; - bool equal_flag = true; - size_t i = 0; - for (itr = inShapes.begin(); itr != inShapes.end(); ++itr) - { - InferenceEngine::SizeVector currentInShape(inputs[i].begin(), inputs[i].end()); - if (itr->second != currentInShape) - { - itr->second = currentInShape; - equal_flag = false; - } - i++; - } - - if (!equal_flag) - { - InferenceEngine::CNNNetwork curr_t_net(t_net); - curr_t_net.reshape(inShapes); - } - std::vector dims = t_net.getOutputsInfo()[name]->getDims(); - outputs.push_back(MatShape(dims.begin(), dims.end())); - return false; -} - -bool InfEngineBackendLayer::supportBackend(int backendId) -{ - CV_LOG_DEBUG(NULL, "InfEngineBackendLayer::supportBackend(" << backendId << ")"); - return backendId == DNN_BACKEND_DEFAULT || - (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019); -} - -void InfEngineBackendLayer::forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs, - OutputArrayOfArrays internals) -{ - CV_Error(Error::StsInternal, "Choose Inference Engine as a preferable backend."); -} - -InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob) -{ - auto halfs = InferenceEngine::make_shared_blob({ - InferenceEngine::Precision::FP16, blob->getTensorDesc().getDims(), - blob->getTensorDesc().getLayout() - }); - halfs->allocate(); - Mat floatsData(1, blob->size(), CV_32F, blob->buffer()); - Mat halfsData(1, blob->size(), CV_16SC1, halfs->buffer()); - convertFp16(floatsData, halfsData); - return halfs; -} - -void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, - InferenceEngine::Builder::Layer& l) -{ -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) - l.getParameters()[name] = data; -#else - l.addConstantData(name, data); -#endif -} - -#endif // HAVE_DNN_IE_NN_BUILDER_2019 - #endif // HAVE_INF_ENGINE -bool haveInfEngine() -{ -#ifdef HAVE_INF_ENGINE - return true; -#else - return false; -#endif // HAVE_INF_ENGINE -} - -void forwardInfEngine(const std::vector >& outBlobsWrappers, - Ptr& node, bool isAsync) -{ - CV_Assert(haveInfEngine()); -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - CV_Assert(!node.empty()); - Ptr ieNode = node.dynamicCast(); - CV_Assert(!ieNode.empty()); - ieNode->net->forward(outBlobsWrappers, isAsync); -#else - CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support"); -#endif // HAVE_INF_ENGINE -} CV__DNN_EXPERIMENTAL_NS_BEGIN void resetMyriadDevice() { #ifdef HAVE_INF_ENGINE + CV_LOG_INFO(NULL, "DNN: Unregistering both 'MYRIAD' and 'HETERO:MYRIAD,CPU' plugins"); + AutoLock lock(getInitializationMutex()); -#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1) - getSharedPlugins().erase("MYRIAD"); -#else - // Unregister both "MYRIAD" and "HETERO:MYRIAD,CPU" plugins + InferenceEngine::Core& ie = getCore("MYRIAD"); try { @@ -1167,7 +175,6 @@ void resetMyriadDevice() ie.UnregisterPlugin("HETERO"); } catch (...) {} -#endif #endif // HAVE_INF_ENGINE } diff --git a/modules/dnn/src/op_inf_engine.hpp b/modules/dnn/src/op_inf_engine.hpp index a825431627..44828a144c 100644 --- a/modules/dnn/src/op_inf_engine.hpp +++ b/modules/dnn/src/op_inf_engine.hpp @@ -48,37 +48,16 @@ #pragma GCC diagnostic ignored "-Wsuggest-override" #endif -#if defined(HAVE_DNN_IE_NN_BUILDER_2019) || INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2020_4) -//#define INFERENCE_ENGINE_DEPRECATED // turn off deprecation warnings from IE -//there is no way to suppress warnings from IE only at this moment, so we are forced to suppress warnings globally -#if defined(__GNUC__) -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" -#endif -#ifdef _MSC_VER -#pragma warning(disable: 4996) // was declared deprecated -#endif -#endif - -#if defined(__GNUC__) && INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_1) -#pragma GCC visibility push(default) -#endif - #include -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 -#include -#endif - -#if defined(__GNUC__) && INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_1) -#pragma GCC visibility pop -#endif - #if defined(__GNUC__) && __GNUC__ >= 5 //#pragma GCC diagnostic pop #endif #endif // HAVE_INF_ENGINE +#define CV_ERROR_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 do { CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support (legacy API is not supported anymore)"); } while (0) + namespace cv { namespace dnn { #ifdef HAVE_INF_ENGINE @@ -90,167 +69,6 @@ Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob); void infEngineBlobsToMats(const std::vector& blobs, std::vector& mats); -#ifdef HAVE_DNN_IE_NN_BUILDER_2019 - -class InfEngineBackendNet -{ -public: - InfEngineBackendNet(); - - InfEngineBackendNet(InferenceEngine::CNNNetwork& net); - - void addLayer(InferenceEngine::Builder::Layer& layer); - - void addOutput(const std::string& name); - - void connect(const std::vector >& inputs, - const std::vector >& outputs, - const std::string& layerName); - - bool isInitialized(); - - void init(Target targetId); - - void forward(const std::vector >& outBlobsWrappers, - bool isAsync); - - void initPlugin(InferenceEngine::CNNNetwork& net); - - void addBlobs(const std::vector >& ptrs); - - void reset(); - -private: - InferenceEngine::Builder::Network netBuilder; - - InferenceEngine::ExecutableNetwork netExec; - InferenceEngine::BlobMap allBlobs; - std::string device_name; -#if INF_ENGINE_VER_MAJOR_LE(2019010000) - InferenceEngine::InferenceEnginePluginPtr enginePtr; - InferenceEngine::InferencePlugin plugin; -#else - bool isInit = false; -#endif - - struct InfEngineReqWrapper - { - InfEngineReqWrapper() : isReady(true) {} - - void makePromises(const std::vector >& outs); - - InferenceEngine::InferRequest req; - std::vector outProms; - std::vector outsNames; - bool isReady; - }; - - std::vector > infRequests; - - InferenceEngine::CNNNetwork cnn; - bool hasNetOwner; - - std::map layers; - std::vector requestedOutputs; - - std::set > unconnectedPorts; -}; - -class InfEngineBackendNode : public BackendNode -{ -public: - InfEngineBackendNode(const InferenceEngine::Builder::Layer& layer); - - InfEngineBackendNode(Ptr& layer, std::vector& inputs, - std::vector& outputs, std::vector& internals); - - void connect(std::vector >& inputs, - std::vector >& outputs); - - // Inference Engine network object that allows to obtain the outputs of this layer. - InferenceEngine::Builder::Layer layer; - Ptr net; - // CPU fallback in case of unsupported Inference Engine layer. - Ptr cvLayer; -}; - -class InfEngineBackendWrapper : public BackendWrapper -{ -public: - InfEngineBackendWrapper(int targetId, const Mat& m); - - InfEngineBackendWrapper(Ptr wrapper); - - ~InfEngineBackendWrapper(); - - static Ptr create(Ptr wrapper); - - virtual void copyToHost() CV_OVERRIDE; - - virtual void setHostDirty() CV_OVERRIDE; - - InferenceEngine::DataPtr dataPtr; - InferenceEngine::Blob::Ptr blob; - AsyncArray futureMat; -}; - -InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout = InferenceEngine::Layout::ANY); - -InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector& shape, InferenceEngine::Layout layout); - -InferenceEngine::DataPtr infEngineDataNode(const Ptr& ptr); - -// Convert Inference Engine blob with FP32 precision to FP16 precision. -// Allocates memory for a new blob. -InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob); - -void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, InferenceEngine::Builder::Layer& l); - -// This is a fake class to run networks from Model Optimizer. Objects of that -// class simulate responses of layers are imported by OpenCV and supported by -// Inference Engine. The main difference is that they do not perform forward pass. -class InfEngineBackendLayer : public Layer -{ -public: - InfEngineBackendLayer(const InferenceEngine::CNNNetwork &t_net_) : t_net(t_net_) {}; - - virtual bool getMemoryShapes(const std::vector &inputs, - const int requiredOutputs, - std::vector &outputs, - std::vector &internals) const CV_OVERRIDE; - - virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs, - OutputArrayOfArrays internals) CV_OVERRIDE; - - virtual bool supportBackend(int backendId) CV_OVERRIDE; - -private: - InferenceEngine::CNNNetwork t_net; -}; - -class InfEngineExtension : public InferenceEngine::IExtension -{ -public: -#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_2) - virtual void SetLogCallback(InferenceEngine::IErrorListener&) noexcept {} -#endif - virtual void Unload() noexcept {} - virtual void Release() noexcept {} - virtual void GetVersion(const InferenceEngine::Version*&) const noexcept {} - - virtual InferenceEngine::StatusCode getPrimitiveTypes(char**&, unsigned int&, - InferenceEngine::ResponseDesc*) noexcept - { - return InferenceEngine::StatusCode::OK; - } - - InferenceEngine::StatusCode getFactoryFor(InferenceEngine::ILayerImplFactory*& factory, - const InferenceEngine::CNNLayer* cnnLayer, - InferenceEngine::ResponseDesc* resp) noexcept; -}; - -#endif // HAVE_DNN_IE_NN_BUILDER_2019 - CV__DNN_EXPERIMENTAL_NS_BEGIN @@ -271,14 +89,8 @@ static inline std::vector getShape(const Mat& mat) return result; } - #endif // HAVE_INF_ENGINE -bool haveInfEngine(); - -void forwardInfEngine(const std::vector >& outBlobsWrappers, - Ptr& node, bool isAsync); - }} // namespace dnn, namespace cv #endif // __OPENCV_DNN_OP_INF_ENGINE_HPP__ diff --git a/modules/dnn/test/test_common.impl.hpp b/modules/dnn/test/test_common.impl.hpp index 5546f68916..65ee07eb84 100644 --- a/modules/dnn/test/test_common.impl.hpp +++ b/modules/dnn/test/test_common.impl.hpp @@ -256,16 +256,6 @@ testing::internal::ParamGenerator< tuple > dnnBackendsAndTarget std::vector< tuple > targets; std::vector< Target > available; - { - available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019); - for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i) - { - if (*i == DNN_TARGET_MYRIAD && !withVPU) - continue; - targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, *i)); - } - } - { available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH); for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i) @@ -274,7 +264,6 @@ testing::internal::ParamGenerator< tuple > dnnBackendsAndTarget continue; targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, *i)); } - } return testing::ValuesIn(targets); diff --git a/modules/dnn/test/test_ie_models.cpp b/modules/dnn/test/test_ie_models.cpp index 3407e95e9b..93691a09ea 100644 --- a/modules/dnn/test/test_ie_models.cpp +++ b/modules/dnn/test/test_ie_models.cpp @@ -371,17 +371,17 @@ TEST_P(DNNTestOpenVINO, models) || modelName == "person-vehicle-bike-detection-2004" // 2021.4+: ncDeviceOpen:1013 Failed to find booted device after boot ) ) - applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); if (targetId == DNN_TARGET_OPENCL && (false || modelName == "face-detection-0106" // Operation: 2278 of type ExperimentalDetectronPriorGridGenerator(op::v6) is not supported ) ) - applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); if (targetId == DNN_TARGET_OPENCL_FP16 && (false || modelName == "face-detection-0106" // Operation: 2278 of type ExperimentalDetectronPriorGridGenerator(op::v6) is not supported ) ) - applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); #endif #if INF_ENGINE_VER_MAJOR_GE(2020020000) @@ -397,12 +397,7 @@ TEST_P(DNNTestOpenVINO, models) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); #endif - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API); - else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH); - else - FAIL() << "Unknown backendId"; + ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId); bool isFP16 = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD); diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp index 2d4f78c88c..66d2822fbc 100644 --- a/modules/dnn/test/test_layers.cpp +++ b/modules/dnn/test/test_layers.cpp @@ -1211,12 +1211,7 @@ TEST_P(Layer_Test_Convolution_DLDT, Accuracy) if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) throw SkipTestException("No support for async forward"); - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API); - else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH); - else - FAIL() << "Unknown backendId"; + ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId); Net netDefault = readNet(_tf("layer_convolution.caffemodel"), _tf("layer_convolution.prototxt")); Net net = readNet(_tf("layer_convolution.xml"), _tf("layer_convolution.bin")); @@ -1256,12 +1251,7 @@ TEST_P(Layer_Test_Convolution_DLDT, setInput_uint8) if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) throw SkipTestException("No support for async forward"); - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API); - else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH); - else - FAIL() << "Unknown backendId"; + ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId); int blobSize[] = {2, 6, 75, 113}; Mat inputs[] = {Mat(4, &blobSize[0], CV_8U), Mat()}; @@ -1294,12 +1284,7 @@ TEST_P(Layer_Test_Convolution_DLDT, multithreading) if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) throw SkipTestException("No support for async forward"); - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API); - else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH); - else - FAIL() << "Unknown backendId"; + ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId); std::string xmlPath = _tf("layer_convolution.xml"); std::string binPath = _tf("layer_convolution.bin"); diff --git a/modules/dnn/test/test_misc.cpp b/modules/dnn/test/test_misc.cpp index 0de503ac7d..f4658904f3 100644 --- a/modules/dnn/test/test_misc.cpp +++ b/modules/dnn/test/test_misc.cpp @@ -117,12 +117,7 @@ void test_readNet_IE_do_not_call_setInput(Backend backendId) const std::string& model = findDataFile("dnn/layers/layer_convolution.bin"); const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml"); - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API); - else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH); - else - FAIL() << "Unknown backendId"; + ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId); Net net = readNet(model, proto); net.setPreferableBackend(backendId); @@ -458,12 +453,7 @@ TEST_P(Async, model_optimizer_pipeline_set_and_forward_single) const std::string& model = findDataFile("dnn/layers/layer_convolution.bin"); const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml"); - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API); - else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH); - else - FAIL() << "Unknown backendId"; + ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId); Net netSync = readNet(model, proto); netSync.setPreferableBackend(backendId); @@ -519,12 +509,7 @@ TEST_P(Async, model_optimizer_pipeline_set_and_forward_all) const std::string& model = findDataFile("dnn/layers/layer_convolution.bin"); const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml"); - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API); - else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH); - else - FAIL() << "Unknown backendId"; + ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId); Net netSync = readNet(model, proto); netSync.setPreferableBackend(backendId); @@ -582,12 +567,7 @@ TEST_P(Async, create_layer_pipeline_set_and_forward_all) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && dtype == CV_8U) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API); - else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH); - else - FAIL() << "Unknown backendId"; + ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId); Net netSync; Net netAsync; @@ -693,12 +673,7 @@ TEST_P(Test_Model_Optimizer, forward_two_nets) const std::string& model = findDataFile("dnn/layers/layer_convolution.bin"); const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml"); - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API); - else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH); - else - FAIL() << "Unknown backendId"; + ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId); Net net0 = readNet(model, proto); net0.setPreferableTarget(targetId); @@ -737,12 +712,7 @@ TEST_P(Test_Model_Optimizer, readFromBuffer) const std::string& weightsFile = findDataFile("dnn/layers/layer_convolution.bin"); const std::string& modelFile = findDataFile("dnn/layers/layer_convolution.xml"); - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API); - else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH); - else - FAIL() << "Unknown backendId"; + ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId); Net net1 = readNetFromModelOptimizer(modelFile, weightsFile); net1.setPreferableBackend(backendId); @@ -789,12 +759,7 @@ TEST_P(Test_Model_Optimizer, flexible_inputs) const std::string& model = findDataFile("dnn/layers/layer_convolution.bin"); const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml"); - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API); - else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) - setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH); - else - FAIL() << "Unknown backendId"; + ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId); Net net0 = readNet(model, proto); net0.setPreferableTarget(targetId);