Merge pull request #16014 from dkurt:dnn_ie_pooling_with_indices

pull/16068/head^2
Alexander Alekhin 5 years ago
commit 38180c2c97
  1. 2
      modules/dnn/perf/perf_net.cpp
  2. 2
      modules/dnn/src/dnn.cpp
  3. 45
      modules/dnn/src/op_inf_engine.cpp
  4. 2
      modules/dnn/src/op_inf_engine.hpp
  5. 5
      modules/dnn/test/test_tf_importer.cpp

@ -108,7 +108,7 @@ PERF_TEST_P_(DNNTestNetwork, Inception_5h)
PERF_TEST_P_(DNNTestNetwork, ENet)
{
if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) ||
if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU) ||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("");
processNet("dnn/Enet-model-best.net", "", "enet.yml",

@ -1624,7 +1624,7 @@ struct Net::Impl
Ptr<Layer> layer = ld.layerInstance;
if (!fused && !layer->supportBackend(preferableBackend))
{
bool customizable = ld.id != 0 && ld.outputBlobs.size() == 1 &&
bool customizable = ld.id != 0 &&
INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R2) &&
supportsCPUFallback;
// TODO: there is a bug in Myriad plugin with custom layers shape infer.

@ -278,11 +278,28 @@ void InfEngineBackendNet::connect(const std::vector<Ptr<BackendWrapper> >& input
{
const auto& inp = inpWrappers[i];
const std::string& inpName = inp->dataPtr->getName();
std::string inpLayerName = inpName;
size_t inpPortId = inpName.rfind('.');
if (inpPortId != std::string::npos)
{
std::string portIdStr = inpName.substr(inpPortId + 1);
if (std::all_of(portIdStr.begin(), portIdStr.end(), ::isdigit))
{
inpLayerName = inpName.substr(0, inpPortId);
inpPortId = atoi(portIdStr.c_str());
}
else
inpPortId = 0;
}
else
inpPortId = 0;
int inpId;
it = layers.find(inpName);
it = layers.find(inpLayerName);
if (it == layers.end())
{
InferenceEngine::Builder::InputLayer inpLayer(!inpName.empty() ? inpName : kDefaultInpLayerName);
InferenceEngine::Builder::InputLayer inpLayer(!inpLayerName.empty() ? inpLayerName : kDefaultInpLayerName);
std::vector<size_t> shape(inp->blob->getTensorDesc().getDims());
inpLayer.setPort(InferenceEngine::Port(shape));
inpId = netBuilder.addLayer(inpLayer);
@ -292,24 +309,28 @@ void InfEngineBackendNet::connect(const std::vector<Ptr<BackendWrapper> >& input
else
inpId = it->second;
netBuilder.connect((size_t)inpId, {(size_t)layerId, i});
unconnectedLayersIds.erase(inpId);
netBuilder.connect({(size_t)inpId, inpPortId}, {(size_t)layerId, i});
unconnectedPorts.erase({inpId, inpPortId});
}
CV_Assert(!outputs.empty());
InferenceEngine::DataPtr dataPtr = infEngineDataNode(outputs[0]);
for (int i = 0; i < outputs.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = infEngineDataNode(outputs[i]);
std::string outputName = outputs.size() > 1 ? (layerName + "." + std::to_string(i)) : layerName;
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
dataPtr->name = layerName;
dataPtr->name = outputName;
#else
dataPtr->setName(layerName);
dataPtr->setName(outputName);
#endif
}
}
void InfEngineBackendNet::init(Target targetId)
{
if (!hasNetOwner)
{
CV_Assert(!unconnectedLayersIds.empty());
for (int id : unconnectedLayersIds)
CV_Assert(!unconnectedPorts.empty());
for (const auto& port : unconnectedPorts)
{
InferenceEngine::Builder::OutputLayer outLayer("myconv1");
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
@ -320,7 +341,7 @@ void InfEngineBackendNet::init(Target targetId)
InferenceEngine::Precision::FP32;
outLayer.setPort(InferenceEngine::Port({}, p));
#endif
netBuilder.addLayer({InferenceEngine::PortInfo(id)}, outLayer);
netBuilder.addLayer({InferenceEngine::PortInfo(port.first, port.second)}, outLayer);
}
netBuilder.getContext().addShapeInferImpl(kOpenCVLayersType,
std::make_shared<InfEngineCustomLayerShapeInfer>());
@ -409,8 +430,10 @@ void InfEngineBackendNet::addLayer(InferenceEngine::Builder::Layer& layer)
int id = netBuilder.addLayer(layer);
const std::string& layerName = layer.getName();
CV_Assert(layers.insert({layerName, id}).second);
unconnectedLayersIds.insert(id);
for (int i = 0; i < layer.getOutputPorts().size(); ++i)
unconnectedPorts.insert({id, i});
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
// By default, all the weights are connected to last ports ids.

@ -132,7 +132,7 @@ private:
std::map<std::string, int> layers;
std::vector<std::string> requestedOutputs;
std::set<int> unconnectedLayersIds;
std::set<std::pair<int, int> > unconnectedPorts;
};
class InfEngineBackendNode : public BackendNode

@ -717,9 +717,8 @@ TEST_P(Test_TensorFlow_layers, lstm)
TEST_P(Test_TensorFlow_layers, split)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD &&
getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
runTensorFlowNet("split");
}

Loading…
Cancel
Save