Merge pull request #14454 from dkurt:dnn_tf_subgraph_fusion

pull/14462/head^2
Alexander Alekhin 6 years ago
commit 1c092a181d
  1. 65
      modules/dnn/src/tensorflow/tf_graph_simplifier.cpp
  2. 19
      modules/dnn/src/tensorflow/tf_importer.cpp
  3. 7
      modules/dnn/test/test_tf_importer.cpp

@ -79,7 +79,7 @@ public:
}
}
static const tensorflow::NodeDef& getInputNode(const tensorflow::GraphDef& net,
static int getInputNodeId(const tensorflow::GraphDef& net,
const tensorflow::NodeDef& node,
int inpId)
{
@ -92,7 +92,7 @@ public:
for (int i = 0; i < numNodes; ++i)
{
if (net.node(i).name() == name)
return net.node(i);
return i;
}
CV_Error(Error::StsParseError, "Input node with name " + name + " not found");
}
@ -104,36 +104,46 @@ public:
matchedNodesIds.clear();
matchedNodesIds.reserve(nodesToFuse.size());
int numNodes = net.node_size();
for (int i = 0; i < nodesToFuse.size(); ++i)
{
while (nodeId < numNodes && net.node(nodeId).op() == "Const")
std::queue<int> nodesToMatch;
std::queue<int> targetNodes;
nodesToMatch.push(nodeId);
targetNodes.push(nodesToFuse.back());
while (!nodesToMatch.empty())
{
nodeId += 1;
}
if (nodeId > numNodes - 1)
return false;
int nodeToMatch = nodesToMatch.front();
int targetNodeId = targetNodes.front();
nodesToMatch.pop();
targetNodes.pop();
const tensorflow::NodeDef& node = net.node(nodeId);
if (std::find(matchedNodesIds.begin(), matchedNodesIds.end(), nodeToMatch) !=
matchedNodesIds.end())
continue;
if (node.op() != nodes[nodesToFuse[i]])
const tensorflow::NodeDef& node = net.node(nodeToMatch);
if (node.op() != nodes[targetNodeId])
return false;
std::vector<int>& inputNodes = inputs[nodesToFuse[i]];
std::vector<int>& inputNodes = inputs[targetNodeId];
if (inputNodes.size() != node.input_size())
return false;
for (int j = 0; j < inputNodes.size(); ++j)
{
if (nodes[inputNodes[j]].empty()) // Unknown input node type.
continue;
const tensorflow::NodeDef& inpNode = getInputNode(net, node, j);
if (inpNode.op() != nodes[inputNodes[j]])
nodeId = getInputNodeId(net, node, j);
const tensorflow::NodeDef& inpNode = net.node(nodeId);
if (inpNode.op() != "Const")
{
nodesToMatch.push(nodeId);
targetNodes.push(inputNodes[j]);
}
else if (nodes[inputNodes[j]] != "Const")
return false;
}
matchedNodesIds.push_back(nodeId);
nodeId += 1;
matchedNodesIds.push_back(nodeToMatch);
}
std::sort(matchedNodesIds.begin(), matchedNodesIds.end());
return true;
}
@ -181,7 +191,7 @@ public:
std::vector<tensorflow::NodeDef*> inputNodes(inputsNames.size());
for (int i = 0; i < inputsNames.size(); ++i)
{
inputNodes[i] = (tensorflow::NodeDef*)&getInputNode(net, *node, i);
inputNodes[i] = net.mutable_node(getInputNodeId(net, *node, i));
}
finalize(net, node, inputNodes);
}
@ -354,7 +364,7 @@ public:
{
if (!Subgraph::match(net, nodeId, matchedNodesIds))
return false;
Mat maxValue = getTensorContent(net.node(nodeId + 1).attr().at("value").tensor());
Mat maxValue = getTensorContent(net.node(matchedNodesIds.front() + 1).attr().at("value").tensor());
return maxValue.type() == CV_32FC1 && maxValue.total() == 1 && maxValue.at<float>(0) == 6;
}
};
@ -384,6 +394,17 @@ public:
setFusedNode("Reshape", ids);
}
virtual bool match(const tensorflow::GraphDef& net, int nodeId, std::vector<int>& matchedNodesIds) CV_OVERRIDE
{
const tensorflow::NodeDef& node = net.node(nodeId);
if (node.input_size() == 0)
return false;
inpName = node.input(0);
return Subgraph::match(net, nodeId, matchedNodesIds);
}
virtual void finalize(tensorflow::GraphDef&, tensorflow::NodeDef* fusedNode,
std::vector<tensorflow::NodeDef*>& inputNodes) CV_OVERRIDE
{
@ -395,6 +416,7 @@ public:
}
tensorflow::TensorProto* shapeTensor = inputNodes[1]->mutable_attr()->at("value").mutable_tensor();
fusedNode->mutable_input()->DeleteSubrange(2, numOutDims - 1);
fusedNode->set_input(0, inpName);
shapeTensor->clear_int_val();
for (int i = 0; i < shape.size(); ++i)
@ -405,6 +427,7 @@ public:
private:
int numOutDims;
std::string inpName;
};
class L2NormalizeSubgraph : public Subgraph
@ -685,9 +708,9 @@ void simplifySubgraphs(tensorflow::GraphDef& net)
subgraphs.push_back(Ptr<Subgraph>(new DeconvolutionSameKerasSubgraph()));
subgraphs.push_back(Ptr<Subgraph>(new ResizeBilinearSubgraph()));
subgraphs.push_back(Ptr<Subgraph>(new UpsamplingKerasSubgraph()));
subgraphs.push_back(Ptr<Subgraph>(new ReshapeAsShapeSubgraph()));
subgraphs.push_back(Ptr<Subgraph>(new SoftMaxSlimSubgraph()));
subgraphs.push_back(Ptr<Subgraph>(new SoftMaxSlimV2Subgraph()));
subgraphs.push_back(Ptr<Subgraph>(new ReshapeAsShapeSubgraph()));
int numNodes = net.node_size();
std::vector<int> matchedNodesIds;

@ -1126,7 +1126,15 @@ void TFImporter::populateNet(Net dstNet)
{
Mat newShape = getTensorContent(getConstBlob(layer, value_id, 1));
if (newShape.total() != 4 && inpLayout == DATA_LAYOUT_NHWC)
if (inpLayout == DATA_LAYOUT_NHWC)
{
if (newShape.total() == 4)
{
// NHWC->NCHW
std::swap(*newShape.ptr<int32_t>(0, 2), *newShape.ptr<int32_t>(0, 3));
std::swap(*newShape.ptr<int32_t>(0, 1), *newShape.ptr<int32_t>(0, 2));
}
if (newShape.total() != 4 || newShape.at<int>(1) == 1)
{
LayerParams permLP;
int order[] = {0, 2, 3, 1}; // From OpenCV's NCHW to NHWC.
@ -1140,11 +1148,6 @@ void TFImporter::populateNet(Net dstNet)
inpId = Pin(permName);
inpLayout = DATA_LAYOUT_NCHW;
}
else if (newShape.total() == 4 && inpLayout == DATA_LAYOUT_NHWC)
{
// NHWC->NCHW
std::swap(*newShape.ptr<int32_t>(0, 2), *newShape.ptr<int32_t>(0, 3));
std::swap(*newShape.ptr<int32_t>(0, 1), *newShape.ptr<int32_t>(0, 2));
}
layerParams.set("dim", DictValue::arrayInt<int*>(newShape.ptr<int>(), newShape.total()));
@ -1381,7 +1384,9 @@ void TFImporter::populateNet(Net dstNet)
// num_split
// 1st blob is dims tensor
int axis = getConstBlob(layer, value_id, 0).int_val().Get(0);
layerParams.set("axis", toNCHW(axis));
if (getDataLayout(name, data_layouts) == DATA_LAYOUT_NHWC)
axis = toNCHW(axis);
layerParams.set("axis", axis);
int id = dstNet.addLayer(name, "Slice", layerParams);
layer_id[name] = id;

@ -675,6 +675,13 @@ TEST_P(Test_TensorFlow_layers, relu6)
runTensorFlowNet("keras_relu6", /*hasText*/ true);
}
TEST_P(Test_TensorFlow_layers, subpixel)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("");
runTensorFlowNet("subpixel");
}
TEST_P(Test_TensorFlow_layers, keras_mobilenet_head)
{
runTensorFlowNet("keras_mobilenet_head");

Loading…
Cancel
Save