Remove ASSERT_ANY_THROW checks fpr Myriad plugin and FP32 networks

pull/13692/head
Dmitry Kurtaev 6 years ago
parent 4f668e1023
commit ff775b2e54
  1. 8
      modules/dnn/src/dnn.cpp
  2. 28
      modules/dnn/src/layers/convolution_layer.cpp
  3. 8
      modules/dnn/src/layers/pooling_layer.cpp
  4. 6
      modules/dnn/src/op_inf_engine.cpp
  5. 67
      modules/dnn/test/test_layers.cpp

@ -142,7 +142,13 @@ private:
#else
cv::dnn::Net net;
cv::dnn::LayerParams lp;
net.addLayerToPrev("testLayer", "Identity", lp);
lp.set("kernel_size", 1);
lp.set("num_output", 1);
lp.set("bias_term", false);
lp.type = "Convolution";
lp.name = "testLayer";
lp.blobs.push_back(Mat({1, 2, 1, 1}, CV_32F, Scalar(1)));
net.addLayerToPrev(lp.name, lp.type, lp);
net.setPreferableBackend(cv::dnn::DNN_BACKEND_INFERENCE_ENGINE);
net.setPreferableTarget(target);
static int inpDims[] = {1, 2, 3, 4};

@ -481,13 +481,13 @@ public:
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::ConvolutionLayer ieLayer(name);
ieLayer.setKernel({kernel.height, kernel.width});
ieLayer.setStrides({stride.height, stride.width});
ieLayer.setDilation({dilation.height, dilation.width});
ieLayer.setPaddingsBegin({pad.height, pad.width});
ieLayer.setPaddingsEnd({pad.height, pad.width});
ieLayer.setGroup(group);
ieLayer.setOutDepth(outCn);
ieLayer.setKernel({(size_t)kernel.height, (size_t)kernel.width});
ieLayer.setStrides({(size_t)stride.height, (size_t)stride.width});
ieLayer.setDilation({(size_t)dilation.height, (size_t)dilation.width});
ieLayer.setPaddingsBegin({(size_t)pad.height, (size_t)pad.width});
ieLayer.setPaddingsEnd({(size_t)pad.height, (size_t)pad.width});
ieLayer.setGroup((size_t)group);
ieLayer.setOutDepth((size_t)outCn);
ieLayer.setWeights(ieWeights);
if (ieBiases)
@ -1713,13 +1713,13 @@ public:
InferenceEngine::Builder::DeconvolutionLayer ieLayer(name);
ieLayer.setKernel({kernel.height, kernel.width});
ieLayer.setStrides({stride.height, stride.width});
ieLayer.setDilation({dilation.height, dilation.width});
ieLayer.setPaddingsBegin({pad.height, pad.width});
ieLayer.setPaddingsEnd({pad.height, pad.width});
ieLayer.setGroup(group);
ieLayer.setOutDepth(numOutput);
ieLayer.setKernel({(size_t)kernel.height, (size_t)kernel.width});
ieLayer.setStrides({(size_t)stride.height, (size_t)stride.width});
ieLayer.setDilation({(size_t)dilation.height, (size_t)dilation.width});
ieLayer.setPaddingsBegin({(size_t)pad.height, (size_t)pad.width});
ieLayer.setPaddingsEnd({(size_t)pad.height, (size_t)pad.width});
ieLayer.setGroup((size_t)group);
ieLayer.setOutDepth((size_t)numOutput);
ieLayer.setWeights(wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW));
if (hasBias())

@ -261,10 +261,10 @@ public:
if (type == MAX || type == AVE)
{
InferenceEngine::Builder::PoolingLayer ieLayer(name);
ieLayer.setKernel({kernel.height, kernel.width});
ieLayer.setStrides({stride.height, stride.width});
ieLayer.setPaddingsBegin({pad_t, pad_l});
ieLayer.setPaddingsEnd({pad_b, pad_r});
ieLayer.setKernel({(size_t)kernel.height, (size_t)kernel.width});
ieLayer.setStrides({(size_t)stride.height, (size_t)stride.width});
ieLayer.setPaddingsBegin({(size_t)pad_t, (size_t)pad_l});
ieLayer.setPaddingsEnd({(size_t)pad_b, (size_t)pad_r});
ieLayer.setPoolingType(type == MAX ?
InferenceEngine::Builder::PoolingLayer::PoolingType::MAX :
InferenceEngine::Builder::PoolingLayer::PoolingType::AVG);

@ -82,7 +82,7 @@ void InfEngineBackendNet::connect(const std::vector<Ptr<BackendWrapper> >& input
CV_Assert(it != layers.end());
const int layerId = it->second;
for (int i = 0; i < inpWrappers.size(); ++i)
for (size_t i = 0; i < inpWrappers.size(); ++i)
{
const auto& inp = inpWrappers[i];
const std::string& inpName = inp->dataPtr->name;
@ -103,7 +103,7 @@ void InfEngineBackendNet::connect(const std::vector<Ptr<BackendWrapper> >& input
else
inpId = it->second;
netBuilder.connect(inpId, {layerId, i});
netBuilder.connect((size_t)inpId, {(size_t)layerId, i});
unconnectedLayersIds.erase(inpId);
}
CV_Assert(!outputs.empty());
@ -119,7 +119,7 @@ void InfEngineBackendNet::init(int targetId)
for (int id : unconnectedLayersIds)
{
InferenceEngine::Builder::OutputLayer outLayer("myconv1");
netBuilder.addLayer({id}, outLayer);
netBuilder.addLayer({InferenceEngine::PortInfo(id)}, outLayer);
}
cnn = InferenceEngine::CNNNetwork(InferenceEngine::Builder::convertToICNNNetwork(netBuilder.build()));
}

@ -923,8 +923,9 @@ TEST_P(Layer_Test_Convolution_DLDT, Accuracy)
{
Target targetId = GetParam();
std::string suffix = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? "_fp16" : "";
Net netDefault = readNet(_tf("layer_convolution.caffemodel"), _tf("layer_convolution.prototxt"));
Net net = readNet(_tf("layer_convolution.xml"), _tf("layer_convolution.bin"));
Net net = readNet(_tf("layer_convolution" + suffix + ".xml"), _tf("layer_convolution" + suffix + ".bin"));
Mat inp = blobFromNPY(_tf("blob.npy"));
@ -935,22 +936,15 @@ TEST_P(Layer_Test_Convolution_DLDT, Accuracy)
net.setInput(inp);
net.setPreferableTarget(targetId);
if (targetId != DNN_TARGET_MYRIAD)
{
Mat out = net.forward();
Mat out = net.forward();
normAssert(outDefault, out);
double l1 = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 1.4e-3 : 1e-5;
double lInf = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 1.8e-2 : 1e-4;
normAssert(outDefault, out, "", l1, lInf);
std::vector<int> outLayers = net.getUnconnectedOutLayers();
ASSERT_EQ(net.getLayer(outLayers[0])->name, "output_merge");
ASSERT_EQ(net.getLayer(outLayers[0])->type, "Concat");
}
else
{
// An assertion is expected because the model is in FP32 format but
// Myriad plugin supports only FP16 models.
ASSERT_ANY_THROW(net.forward());
}
std::vector<int> outLayers = net.getUnconnectedOutLayers();
ASSERT_EQ(net.getLayer(outLayers[0])->name, "output");
ASSERT_EQ(net.getLayer(outLayers[0])->type, "Convolution");
}
TEST_P(Layer_Test_Convolution_DLDT, setInput_uint8)
@ -962,23 +956,16 @@ TEST_P(Layer_Test_Convolution_DLDT, setInput_uint8)
randu(inputs[0], 0, 255);
inputs[0].convertTo(inputs[1], CV_32F);
std::string suffix = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? "_fp16" : "";
Mat outs[2];
for (int i = 0; i < 2; ++i)
{
Net net = readNet(_tf("layer_convolution.xml"), _tf("layer_convolution.bin"));
Net net = readNet(_tf("layer_convolution" + suffix + ".xml"), _tf("layer_convolution" + suffix + ".bin"));
net.setPreferableTarget(targetId);
net.setInput(inputs[i]);
if (targetId != DNN_TARGET_MYRIAD)
{
outs[i] = net.forward();
ASSERT_EQ(outs[i].type(), CV_32F);
}
else
{
// An assertion is expected because the model is in FP32 format but
// Myriad plugin supports only FP16 models.
ASSERT_ANY_THROW(net.forward());
}
outs[i] = net.forward();
ASSERT_EQ(outs[i].type(), CV_32F);
}
if (targetId != DNN_TARGET_MYRIAD)
normAssert(outs[0], outs[1]);
@ -1020,7 +1007,8 @@ TEST_P(Test_DLDT_two_inputs_3dim, as_IR)
throw SkipTestException("Test is enabled starts from OpenVINO 2018R4");
#endif
Net net = readNet(_tf("net_two_inputs.xml"), _tf("net_two_inputs.bin"));
std::string suffix = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? "_fp16" : "";
Net net = readNet(_tf("net_two_inputs" + suffix + ".xml"), _tf("net_two_inputs.bin"));
std::vector<int> inpSize = get<3>(GetParam());
Mat firstInp(3, inpSize.data(), firstInpType);
Mat secondInp(3, inpSize.data(), secondInpType);
@ -1030,20 +1018,17 @@ TEST_P(Test_DLDT_two_inputs_3dim, as_IR)
net.setInput(firstInp, "data");
net.setInput(secondInp, "second_input");
net.setPreferableTarget(targetId);
if (targetId != DNN_TARGET_MYRIAD)
{
Mat out = net.forward();
Mat ref;
cv::add(firstInp, secondInp, ref, Mat(), CV_32F);
normAssert(out, ref);
}
else
{
// An assertion is expected because the model is in FP32 format but
// Myriad plugin supports only FP16 models.
ASSERT_ANY_THROW(net.forward());
}
double l1 = ((targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) &&
(firstInpType == CV_32F || secondInpType == CV_32F)) ? 0.06 : 0.0;
double lInf = ((targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) &&
(firstInpType == CV_32F || secondInpType == CV_32F)) ? 0.23 : 0.0;
Mat out = net.forward();
Mat ref;
cv::add(firstInp, secondInp, ref, Mat(), CV_32F);
normAssert(out, ref, "", l1, lInf);
}
std::vector< std::vector<int> > list_sizes{ {1, 2, 3}, {3, 2, 1}, {5, 5, 5}, {13, 7, 11} };

Loading…
Cancel
Save