Enable MaxPooling with indices in Inference Engine

pull/16014/head
Dmitry Kurtaev 5 years ago
parent eb44e0a556
commit d8e10f3a8d
  1. 2
      modules/dnn/perf/perf_net.cpp
  2. 2
      modules/dnn/src/dnn.cpp
  3. 45
      modules/dnn/src/op_inf_engine.cpp
  4. 2
      modules/dnn/src/op_inf_engine.hpp
  5. 5
      modules/dnn/test/test_tf_importer.cpp

@ -108,7 +108,7 @@ PERF_TEST_P_(DNNTestNetwork, Inception_5h)
PERF_TEST_P_(DNNTestNetwork, ENet) PERF_TEST_P_(DNNTestNetwork, ENet)
{ {
if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) || if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU) ||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)) (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException(""); throw SkipTestException("");
processNet("dnn/Enet-model-best.net", "", "enet.yml", processNet("dnn/Enet-model-best.net", "", "enet.yml",

@ -1624,7 +1624,7 @@ struct Net::Impl
Ptr<Layer> layer = ld.layerInstance; Ptr<Layer> layer = ld.layerInstance;
if (!fused && !layer->supportBackend(preferableBackend)) if (!fused && !layer->supportBackend(preferableBackend))
{ {
bool customizable = ld.id != 0 && ld.outputBlobs.size() == 1 && bool customizable = ld.id != 0 &&
INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R2) && INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R2) &&
supportsCPUFallback; supportsCPUFallback;
// TODO: there is a bug in Myriad plugin with custom layers shape infer. // TODO: there is a bug in Myriad plugin with custom layers shape infer.

@ -278,11 +278,28 @@ void InfEngineBackendNet::connect(const std::vector<Ptr<BackendWrapper> >& input
{ {
const auto& inp = inpWrappers[i]; const auto& inp = inpWrappers[i];
const std::string& inpName = inp->dataPtr->getName(); const std::string& inpName = inp->dataPtr->getName();
std::string inpLayerName = inpName;
size_t inpPortId = inpName.rfind('.');
if (inpPortId != std::string::npos)
{
std::string portIdStr = inpName.substr(inpPortId + 1);
if (std::all_of(portIdStr.begin(), portIdStr.end(), ::isdigit))
{
inpLayerName = inpName.substr(0, inpPortId);
inpPortId = atoi(portIdStr.c_str());
}
else
inpPortId = 0;
}
else
inpPortId = 0;
int inpId; int inpId;
it = layers.find(inpName); it = layers.find(inpLayerName);
if (it == layers.end()) if (it == layers.end())
{ {
InferenceEngine::Builder::InputLayer inpLayer(!inpName.empty() ? inpName : kDefaultInpLayerName); InferenceEngine::Builder::InputLayer inpLayer(!inpLayerName.empty() ? inpLayerName : kDefaultInpLayerName);
std::vector<size_t> shape(inp->blob->getTensorDesc().getDims()); std::vector<size_t> shape(inp->blob->getTensorDesc().getDims());
inpLayer.setPort(InferenceEngine::Port(shape)); inpLayer.setPort(InferenceEngine::Port(shape));
inpId = netBuilder.addLayer(inpLayer); inpId = netBuilder.addLayer(inpLayer);
@ -292,24 +309,28 @@ void InfEngineBackendNet::connect(const std::vector<Ptr<BackendWrapper> >& input
else else
inpId = it->second; inpId = it->second;
netBuilder.connect((size_t)inpId, {(size_t)layerId, i}); netBuilder.connect({(size_t)inpId, inpPortId}, {(size_t)layerId, i});
unconnectedLayersIds.erase(inpId); unconnectedPorts.erase({inpId, inpPortId});
} }
CV_Assert(!outputs.empty()); CV_Assert(!outputs.empty());
InferenceEngine::DataPtr dataPtr = infEngineDataNode(outputs[0]); for (int i = 0; i < outputs.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = infEngineDataNode(outputs[i]);
std::string outputName = outputs.size() > 1 ? (layerName + "." + std::to_string(i)) : layerName;
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1) #if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
dataPtr->name = layerName; dataPtr->name = outputName;
#else #else
dataPtr->setName(layerName); dataPtr->setName(outputName);
#endif #endif
}
} }
void InfEngineBackendNet::init(Target targetId) void InfEngineBackendNet::init(Target targetId)
{ {
if (!hasNetOwner) if (!hasNetOwner)
{ {
CV_Assert(!unconnectedLayersIds.empty()); CV_Assert(!unconnectedPorts.empty());
for (int id : unconnectedLayersIds) for (const auto& port : unconnectedPorts)
{ {
InferenceEngine::Builder::OutputLayer outLayer("myconv1"); InferenceEngine::Builder::OutputLayer outLayer("myconv1");
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
@ -320,7 +341,7 @@ void InfEngineBackendNet::init(Target targetId)
InferenceEngine::Precision::FP32; InferenceEngine::Precision::FP32;
outLayer.setPort(InferenceEngine::Port({}, p)); outLayer.setPort(InferenceEngine::Port({}, p));
#endif #endif
netBuilder.addLayer({InferenceEngine::PortInfo(id)}, outLayer); netBuilder.addLayer({InferenceEngine::PortInfo(port.first, port.second)}, outLayer);
} }
netBuilder.getContext().addShapeInferImpl(kOpenCVLayersType, netBuilder.getContext().addShapeInferImpl(kOpenCVLayersType,
std::make_shared<InfEngineCustomLayerShapeInfer>()); std::make_shared<InfEngineCustomLayerShapeInfer>());
@ -409,8 +430,10 @@ void InfEngineBackendNet::addLayer(InferenceEngine::Builder::Layer& layer)
int id = netBuilder.addLayer(layer); int id = netBuilder.addLayer(layer);
const std::string& layerName = layer.getName(); const std::string& layerName = layer.getName();
CV_Assert(layers.insert({layerName, id}).second); CV_Assert(layers.insert({layerName, id}).second);
unconnectedLayersIds.insert(id); for (int i = 0; i < layer.getOutputPorts().size(); ++i)
unconnectedPorts.insert({id, i});
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
// By default, all the weights are connected to last ports ids. // By default, all the weights are connected to last ports ids.

@ -132,7 +132,7 @@ private:
std::map<std::string, int> layers; std::map<std::string, int> layers;
std::vector<std::string> requestedOutputs; std::vector<std::string> requestedOutputs;
std::set<int> unconnectedLayersIds; std::set<std::pair<int, int> > unconnectedPorts;
}; };
class InfEngineBackendNode : public BackendNode class InfEngineBackendNode : public BackendNode

@ -717,9 +717,8 @@ TEST_P(Test_TensorFlow_layers, lstm)
TEST_P(Test_TensorFlow_layers, split) TEST_P(Test_TensorFlow_layers, split)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD && if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
runTensorFlowNet("split"); runTensorFlowNet("split");
} }

Loading…
Cancel
Save