diff --git a/modules/dnn/perf/perf_net.cpp b/modules/dnn/perf/perf_net.cpp index c0628dba96..d4c962e741 100644 --- a/modules/dnn/perf/perf_net.cpp +++ b/modules/dnn/perf/perf_net.cpp @@ -108,7 +108,7 @@ PERF_TEST_P_(DNNTestNetwork, Inception_5h) PERF_TEST_P_(DNNTestNetwork, ENet) { - if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) || + if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU) || (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)) throw SkipTestException(""); processNet("dnn/Enet-model-best.net", "", "enet.yml", diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index ad2e52766b..af7ca3794b 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -1624,7 +1624,7 @@ struct Net::Impl Ptr layer = ld.layerInstance; if (!fused && !layer->supportBackend(preferableBackend)) { - bool customizable = ld.id != 0 && ld.outputBlobs.size() == 1 && + bool customizable = ld.id != 0 && INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R2) && supportsCPUFallback; // TODO: there is a bug in Myriad plugin with custom layers shape infer. diff --git a/modules/dnn/src/op_inf_engine.cpp b/modules/dnn/src/op_inf_engine.cpp index 17b2d7271b..0a35194995 100644 --- a/modules/dnn/src/op_inf_engine.cpp +++ b/modules/dnn/src/op_inf_engine.cpp @@ -278,11 +278,28 @@ void InfEngineBackendNet::connect(const std::vector >& input { const auto& inp = inpWrappers[i]; const std::string& inpName = inp->dataPtr->getName(); + + std::string inpLayerName = inpName; + size_t inpPortId = inpName.rfind('.'); + if (inpPortId != std::string::npos) + { + std::string portIdStr = inpName.substr(inpPortId + 1); + if (std::all_of(portIdStr.begin(), portIdStr.end(), ::isdigit)) + { + inpLayerName = inpName.substr(0, inpPortId); + inpPortId = atoi(portIdStr.c_str()); + } + else + inpPortId = 0; + } + else + inpPortId = 0; + int inpId; - it = layers.find(inpName); + it = layers.find(inpLayerName); if (it == layers.end()) { - InferenceEngine::Builder::InputLayer inpLayer(!inpName.empty() ? inpName : kDefaultInpLayerName); + InferenceEngine::Builder::InputLayer inpLayer(!inpLayerName.empty() ? inpLayerName : kDefaultInpLayerName); std::vector shape(inp->blob->getTensorDesc().getDims()); inpLayer.setPort(InferenceEngine::Port(shape)); inpId = netBuilder.addLayer(inpLayer); @@ -292,24 +309,28 @@ void InfEngineBackendNet::connect(const std::vector >& input else inpId = it->second; - netBuilder.connect((size_t)inpId, {(size_t)layerId, i}); - unconnectedLayersIds.erase(inpId); + netBuilder.connect({(size_t)inpId, inpPortId}, {(size_t)layerId, i}); + unconnectedPorts.erase({inpId, inpPortId}); } CV_Assert(!outputs.empty()); - InferenceEngine::DataPtr dataPtr = infEngineDataNode(outputs[0]); + for (int i = 0; i < outputs.size(); ++i) + { + InferenceEngine::DataPtr dataPtr = infEngineDataNode(outputs[i]); + std::string outputName = outputs.size() > 1 ? (layerName + "." + std::to_string(i)) : layerName; #if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1) - dataPtr->name = layerName; + dataPtr->name = outputName; #else - dataPtr->setName(layerName); + dataPtr->setName(outputName); #endif + } } void InfEngineBackendNet::init(Target targetId) { if (!hasNetOwner) { - CV_Assert(!unconnectedLayersIds.empty()); - for (int id : unconnectedLayersIds) + CV_Assert(!unconnectedPorts.empty()); + for (const auto& port : unconnectedPorts) { InferenceEngine::Builder::OutputLayer outLayer("myconv1"); #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) @@ -320,7 +341,7 @@ void InfEngineBackendNet::init(Target targetId) InferenceEngine::Precision::FP32; outLayer.setPort(InferenceEngine::Port({}, p)); #endif - netBuilder.addLayer({InferenceEngine::PortInfo(id)}, outLayer); + netBuilder.addLayer({InferenceEngine::PortInfo(port.first, port.second)}, outLayer); } netBuilder.getContext().addShapeInferImpl(kOpenCVLayersType, std::make_shared()); @@ -409,8 +430,10 @@ void InfEngineBackendNet::addLayer(InferenceEngine::Builder::Layer& layer) int id = netBuilder.addLayer(layer); const std::string& layerName = layer.getName(); + CV_Assert(layers.insert({layerName, id}).second); - unconnectedLayersIds.insert(id); + for (int i = 0; i < layer.getOutputPorts().size(); ++i) + unconnectedPorts.insert({id, i}); #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) // By default, all the weights are connected to last ports ids. diff --git a/modules/dnn/src/op_inf_engine.hpp b/modules/dnn/src/op_inf_engine.hpp index 8524ad9477..217eb9a008 100644 --- a/modules/dnn/src/op_inf_engine.hpp +++ b/modules/dnn/src/op_inf_engine.hpp @@ -132,7 +132,7 @@ private: std::map layers; std::vector requestedOutputs; - std::set unconnectedLayersIds; + std::set > unconnectedPorts; }; class InfEngineBackendNode : public BackendNode diff --git a/modules/dnn/test/test_tf_importer.cpp b/modules/dnn/test/test_tf_importer.cpp index 53578134e3..8826fa09ff 100644 --- a/modules/dnn/test/test_tf_importer.cpp +++ b/modules/dnn/test/test_tf_importer.cpp @@ -717,9 +717,8 @@ TEST_P(Test_TensorFlow_layers, lstm) TEST_P(Test_TensorFlow_layers, split) { - if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD && - getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2) - applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); runTensorFlowNet("split"); }