Fix multiple inputs for models from Intel's Model Optimizer

pull/11294/head
Dmitry Kurtaev 7 years ago
parent 070ec313f2
commit 4ef6c91583
  1. 3
      modules/dnn/src/dnn.cpp
  2. 38
      modules/dnn/test/test_layers.cpp

@ -1944,7 +1944,8 @@ Net Net::readFromModelOptimizer(const String& xml, const String& bin)
ld.layerInstance = Ptr<Layer>(new InfEngineBackendLayer(it.second));
ld.backendNodes[DNN_BACKEND_INFERENCE_ENGINE] = backendNode;
cvNet.connect(0, 0, lid, 0);
for (int i = 0; i < inputsNames.size(); ++i)
cvNet.connect(0, i, lid, i);
}
cvNet.setPreferableBackend(DNN_BACKEND_INFERENCE_ENGINE);

@ -866,6 +866,44 @@ TEST(Layer_Test_Convolution_DLDT, Accuracy)
normAssert(outDefault, out);
}
// 1. Create a .prototxt file with the following network:
// layer {
// type: "Input" name: "data" top: "data"
// input_param { shape { dim: 1 dim: 2 dim: 3 } }
// }
// layer {
// type: "Input" name: "second_input" top: "second_input"
// input_param { shape { dim: 1 dim: 2 dim: 3 } }
// }
// layer {
// type: "Eltwise" name: "output" top: "output"
// bottom: "data" bottom: "second_input"
// eltwise_param { operation: SUM }
// }
//
// 2. Create a .caffemodel file using Caffe:
//
// import caffe
// net = caffe.Net('/path/to/prototxt', caffe.TEST)
// net.save('/path/to/caffemodel')
//
// 3. Convert using ModelOptimizer.
TEST(Test_DLDT, two_inputs)
{
Net net = readNet(_tf("net_two_inputs.xml"), _tf("net_two_inputs.bin"));
int inpSize[] = {1, 2, 3};
Mat firstInp(3, &inpSize[0], CV_32F);
Mat secondInp(3, &inpSize[0], CV_32F);
randu(firstInp, -1, 1);
randu(secondInp, -1, 1);
net.setInput(firstInp, "data");
net.setInput(secondInp, "second_input");
Mat out = net.forward();
normAssert(out, firstInp + secondInp);
}
#endif // HAVE_INF_ENGINE
}} // namespace

Loading…
Cancel
Save