Added fix for other size

pull/13608/head
Alexander Nesterov 6 years ago
parent dcdbaef348
commit 97c3bcb1b7
  1. 2
      modules/dnn/src/dnn.cpp
  2. 30
      modules/dnn/src/op_inf_engine.cpp
  3. 4
      modules/dnn/src/op_inf_engine.hpp
  4. 19
      modules/dnn/test/test_layers.cpp

@ -2600,7 +2600,7 @@ Net Net::readFromModelOptimizer(const String& xml, const String& bin)
backendNode->net = Ptr<InfEngineBackendNet>(new InfEngineBackendNet(ieNet));
for (auto& it : ieNet.getOutputsInfo())
{
Ptr<Layer> cvLayer(new InfEngineBackendLayer(it.second));
Ptr<Layer> cvLayer(new InfEngineBackendLayer(ieNet));
InferenceEngine::CNNLayerPtr ieLayer = ieNet.getLayerByName(it.first.c_str());
CV_Assert(ieLayer);

@ -718,19 +718,33 @@ Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob)
return Mat(size, CV_32F, (void*)blob->buffer());
}
InfEngineBackendLayer::InfEngineBackendLayer(const InferenceEngine::DataPtr& output_)
{
output = output_;
}
bool InfEngineBackendLayer::getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
std::vector<size_t> dims = output->dims;
std::vector<int> shape(dims.rbegin(), dims.rend());
outputs.assign(1, shape);
InferenceEngine::ICNNNetwork::InputShapes inShapes = t_net.getInputShapes();
InferenceEngine::ICNNNetwork::InputShapes::iterator itr;
bool equal_flag = true;
size_t i = 0;
for (itr = inShapes.begin(); itr != inShapes.end(); ++itr)
{
InferenceEngine::SizeVector currentInShape(inputs[i].begin(), inputs[i].end());
if (itr->second != currentInShape)
{
itr->second = currentInShape;
equal_flag = false;
}
i++;
}
if (!equal_flag)
{
InferenceEngine::CNNNetwork curr_t_net(t_net);
curr_t_net.reshape(inShapes);
}
std::vector<size_t> dims = t_net.getOutputsInfo()[name]->getDims();
outputs.push_back(MatShape(dims.begin(), dims.end()));
return false;
}

@ -260,7 +260,7 @@ InferenceEngine::TBlob<int16_t>::Ptr convertFp16(const InferenceEngine::Blob::Pt
class InfEngineBackendLayer : public Layer
{
public:
InfEngineBackendLayer(const InferenceEngine::DataPtr& output);
InfEngineBackendLayer(const InferenceEngine::CNNNetwork &t_net_) : t_net(t_net_) {};
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
@ -273,7 +273,7 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE;
private:
InferenceEngine::DataPtr output;
InferenceEngine::CNNNetwork t_net;
};
#endif // HAVE_INF_ENGINE

@ -1008,8 +1008,8 @@ INSTANTIATE_TEST_CASE_P(/**/, Layer_Test_Convolution_DLDT,
// net.save('/path/to/caffemodel')
//
// 3. Convert using ModelOptimizer.
typedef testing::TestWithParam<tuple<int, int, Target> > Test_DLDT_two_inputs;
TEST_P(Test_DLDT_two_inputs, as_IR)
typedef testing::TestWithParam<tuple<int, int, Target, std::vector<int> > > Test_DLDT_two_inputs_3dim;
TEST_P(Test_DLDT_two_inputs_3dim, as_IR)
{
int firstInpType = get<0>(GetParam());
int secondInpType = get<1>(GetParam());
@ -1021,9 +1021,9 @@ TEST_P(Test_DLDT_two_inputs, as_IR)
#endif
Net net = readNet(_tf("net_two_inputs.xml"), _tf("net_two_inputs.bin"));
int inpSize[] = {1, 2, 3};
Mat firstInp(3, &inpSize[0], firstInpType);
Mat secondInp(3, &inpSize[0], secondInpType);
std::vector<int> inpSize = get<3>(GetParam());
Mat firstInp(3, inpSize.data(), firstInpType);
Mat secondInp(3, inpSize.data(), secondInpType);
randu(firstInp, 0, 255);
randu(secondInp, 0, 255);
@ -1046,6 +1046,15 @@ TEST_P(Test_DLDT_two_inputs, as_IR)
}
}
std::vector< std::vector<int> > list_sizes{ {1, 2, 3}, {3, 2, 1}, {5, 5, 5}, {13, 7, 11} };
INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_DLDT_two_inputs_3dim, Combine(
Values(CV_8U, CV_32F), Values(CV_8U, CV_32F),
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE)),
testing::ValuesIn(list_sizes)
));
typedef testing::TestWithParam<tuple<int, int, Target> > Test_DLDT_two_inputs;
TEST_P(Test_DLDT_two_inputs, as_backend)
{
static const float kScale = 0.5f;

Loading…
Cancel
Save